diff --git a/other/java/examples/src/main/java/com/seaweedfs/examples/UnzipFile.java b/other/java/examples/src/main/java/com/seaweedfs/examples/ExampleReadFile.java
similarity index 73%
rename from other/java/examples/src/main/java/com/seaweedfs/examples/UnzipFile.java
rename to other/java/examples/src/main/java/com/seaweedfs/examples/ExampleReadFile.java
index 0529a5c73..d2eb94135 100644
--- a/other/java/examples/src/main/java/com/seaweedfs/examples/UnzipFile.java
+++ b/other/java/examples/src/main/java/com/seaweedfs/examples/ExampleReadFile.java
@@ -1,8 +1,7 @@
package com.seaweedfs.examples;
-import seaweed.hdfs.SeaweedInputStream;
import seaweedfs.client.FilerClient;
-import seaweedfs.client.FilerGrpcClient;
+import seaweedfs.client.SeaweedInputStream;
import java.io.FileInputStream;
import java.io.IOException;
@@ -10,12 +9,11 @@ import java.io.InputStream;
import java.util.zip.ZipEntry;
import java.util.zip.ZipInputStream;
-public class UnzipFile {
+public class ExampleReadFile {
public static void main(String[] args) throws IOException {
- FilerGrpcClient filerGrpcClient = new FilerGrpcClient("localhost", 18888);
- FilerClient filerClient = new FilerClient(filerGrpcClient);
+ FilerClient filerClient = new FilerClient("localhost", 18888);
long startTime = System.currentTimeMillis();
parseZip("/Users/chris/tmp/test.zip");
@@ -25,11 +23,7 @@ public class UnzipFile {
long localProcessTime = startTime2 - startTime;
SeaweedInputStream seaweedInputStream = new SeaweedInputStream(
- filerGrpcClient,
- new org.apache.hadoop.fs.FileSystem.Statistics(""),
- "/",
- filerClient.lookupEntry("/", "test.zip")
- );
+ filerClient, "/test.zip");
parseZip(seaweedInputStream);
long swProcessTime = System.currentTimeMillis() - startTime2;
diff --git a/other/java/examples/src/main/java/com/seaweedfs/examples/WatchFiles.java b/other/java/examples/src/main/java/com/seaweedfs/examples/ExampleWatchFileChanges.java
similarity index 98%
rename from other/java/examples/src/main/java/com/seaweedfs/examples/WatchFiles.java
rename to other/java/examples/src/main/java/com/seaweedfs/examples/ExampleWatchFileChanges.java
index e489cb3b1..72c572d31 100644
--- a/other/java/examples/src/main/java/com/seaweedfs/examples/WatchFiles.java
+++ b/other/java/examples/src/main/java/com/seaweedfs/examples/ExampleWatchFileChanges.java
@@ -7,7 +7,7 @@ import java.io.IOException;
import java.util.Date;
import java.util.Iterator;
-public class WatchFiles {
+public class ExampleWatchFileChanges {
public static void main(String[] args) throws IOException {
diff --git a/other/java/examples/src/main/java/com/seaweedfs/examples/ExampleWriteFile.java b/other/java/examples/src/main/java/com/seaweedfs/examples/ExampleWriteFile.java
new file mode 100644
index 000000000..26b74028f
--- /dev/null
+++ b/other/java/examples/src/main/java/com/seaweedfs/examples/ExampleWriteFile.java
@@ -0,0 +1,47 @@
+package com.seaweedfs.examples;
+
+import seaweedfs.client.FilerClient;
+import seaweedfs.client.SeaweedInputStream;
+import seaweedfs.client.SeaweedOutputStream;
+
+import java.io.IOException;
+import java.io.InputStream;
+import java.util.zip.ZipEntry;
+import java.util.zip.ZipInputStream;
+
+public class ExampleWriteFile {
+
+ public static void main(String[] args) throws IOException {
+
+ FilerClient filerClient = new FilerClient("localhost", 18888);
+
+ SeaweedInputStream seaweedInputStream = new SeaweedInputStream(filerClient, "/test.zip");
+ unZipFiles(filerClient, seaweedInputStream);
+
+ }
+
+ public static void unZipFiles(FilerClient filerClient, InputStream is) throws IOException {
+ ZipInputStream zin = new ZipInputStream(is);
+ ZipEntry ze;
+ while ((ze = zin.getNextEntry()) != null) {
+
+ String filename = ze.getName();
+ if (filename.indexOf("/") >= 0) {
+ filename = filename.substring(filename.lastIndexOf("/") + 1);
+ }
+ if (filename.length()==0) {
+ continue;
+ }
+
+ SeaweedOutputStream seaweedOutputStream = new SeaweedOutputStream(filerClient, "/test/"+filename);
+ byte[] bytesIn = new byte[16 * 1024];
+ int read = 0;
+ while ((read = zin.read(bytesIn))!=-1) {
+ seaweedOutputStream.write(bytesIn,0,read);
+ }
+ seaweedOutputStream.close();
+
+ System.out.println(ze.getName());
+ }
+ }
+}
diff --git a/other/java/examples/src/main/java/com/seaweedfs/examples/HdfsCopyFile.java b/other/java/examples/src/main/java/com/seaweedfs/examples/HdfsCopyFile.java
new file mode 100644
index 000000000..006c581c9
--- /dev/null
+++ b/other/java/examples/src/main/java/com/seaweedfs/examples/HdfsCopyFile.java
@@ -0,0 +1,25 @@
+package com.seaweedfs.examples;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.io.IOUtils;
+
+import java.io.*;
+
+public class HdfsCopyFile {
+ public static void main(String[] args) throws IOException {
+ Configuration configuration = new Configuration();
+
+ configuration.set("fs.defaultFS", "seaweedfs://localhost:8888");
+ configuration.set("fs.seaweedfs.impl", "seaweed.hdfs.SeaweedFileSystem");
+
+ FileSystem fs = FileSystem.get(configuration);
+ String source = "/Users/chris/tmp/test.zip";
+ String destination = "/buckets/spark/test01.zip";
+ InputStream in = new BufferedInputStream(new FileInputStream(source));
+
+ OutputStream out = fs.create(new Path(destination));
+ IOUtils.copyBytes(in, out, 4096, true);
+ }
+}
diff --git a/other/java/hdfs2/dependency-reduced-pom.xml b/other/java/hdfs2/dependency-reduced-pom.xml
index f7873a435..0680d86bb 100644
--- a/other/java/hdfs2/dependency-reduced-pom.xml
+++ b/other/java/hdfs2/dependency-reduced-pom.xml
@@ -301,7 +301,7 @@
- 1.5.6
+ 1.6.1
2.9.2
diff --git a/other/java/hdfs2/pom.xml b/other/java/hdfs2/pom.xml
index bda0eba56..897477066 100644
--- a/other/java/hdfs2/pom.xml
+++ b/other/java/hdfs2/pom.xml
@@ -5,7 +5,7 @@
4.0.0
- 1.5.6
+ 1.6.1
2.9.2
diff --git a/other/java/hdfs2/src/main/java/seaweed/hdfs/SeaweedFileSystem.java b/other/java/hdfs2/src/main/java/seaweed/hdfs/SeaweedFileSystem.java
index 84f11e846..25395db7a 100644
--- a/other/java/hdfs2/src/main/java/seaweed/hdfs/SeaweedFileSystem.java
+++ b/other/java/hdfs2/src/main/java/seaweed/hdfs/SeaweedFileSystem.java
@@ -26,6 +26,7 @@ public class SeaweedFileSystem extends FileSystem {
public static final int FS_SEAWEED_DEFAULT_PORT = 8888;
public static final String FS_SEAWEED_BUFFER_SIZE = "fs.seaweed.buffer.size";
public static final String FS_SEAWEED_REPLICATION = "fs.seaweed.replication";
+ public static final String FS_SEAWEED_VOLUME_SERVER_ACCESS = "fs.seaweed.volume.server.access";
public static final int FS_SEAWEED_DEFAULT_BUFFER_SIZE = 4 * 1024 * 1024;
private static final Logger LOG = LoggerFactory.getLogger(SeaweedFileSystem.class);
diff --git a/other/java/hdfs2/src/main/java/seaweed/hdfs/SeaweedFileSystemStore.java b/other/java/hdfs2/src/main/java/seaweed/hdfs/SeaweedFileSystemStore.java
index 14b32528e..f4e8c9349 100644
--- a/other/java/hdfs2/src/main/java/seaweed/hdfs/SeaweedFileSystemStore.java
+++ b/other/java/hdfs2/src/main/java/seaweed/hdfs/SeaweedFileSystemStore.java
@@ -18,27 +18,31 @@ import java.util.ArrayList;
import java.util.Arrays;
import java.util.List;
-import static seaweed.hdfs.SeaweedFileSystem.FS_SEAWEED_BUFFER_SIZE;
-import static seaweed.hdfs.SeaweedFileSystem.FS_SEAWEED_DEFAULT_BUFFER_SIZE;
+import static seaweed.hdfs.SeaweedFileSystem.*;
public class SeaweedFileSystemStore {
private static final Logger LOG = LoggerFactory.getLogger(SeaweedFileSystemStore.class);
- private FilerGrpcClient filerGrpcClient;
private FilerClient filerClient;
private Configuration conf;
public SeaweedFileSystemStore(String host, int port, Configuration conf) {
int grpcPort = 10000 + port;
- filerGrpcClient = new FilerGrpcClient(host, grpcPort);
- filerClient = new FilerClient(filerGrpcClient);
+ filerClient = new FilerClient(host, grpcPort);
this.conf = conf;
+ String volumeServerAccessMode = this.conf.get(FS_SEAWEED_VOLUME_SERVER_ACCESS, "direct");
+ if (volumeServerAccessMode.equals("publicUrl")) {
+ filerClient.setAccessVolumeServerByPublicUrl();
+ } else if (volumeServerAccessMode.equals("filerProxy")) {
+ filerClient.setAccessVolumeServerByFilerProxy();
+ }
+
}
public void close() {
try {
- this.filerGrpcClient.shutdown();
+ this.filerClient.shutdown();
} catch (InterruptedException e) {
e.printStackTrace();
}
@@ -213,10 +217,10 @@ public class SeaweedFileSystemStore {
.clearGroupName()
.addAllGroupName(Arrays.asList(userGroupInformation.getGroupNames()))
);
- SeaweedWrite.writeMeta(filerGrpcClient, getParentDirectory(path), entry);
+ SeaweedWrite.writeMeta(filerClient, getParentDirectory(path), entry);
}
- return new SeaweedOutputStream(filerGrpcClient, path, entry, writePosition, bufferSize, replication);
+ return new SeaweedHadoopOutputStream(filerClient, path.toString(), entry, writePosition, bufferSize, replication);
}
@@ -230,7 +234,7 @@ public class SeaweedFileSystemStore {
throw new FileNotFoundException("read non-exist file " + path);
}
- return new SeaweedInputStream(filerGrpcClient,
+ return new SeaweedHadoopInputStream(filerClient,
statistics,
path.toUri().getPath(),
entry);
diff --git a/other/java/hdfs2/src/main/java/seaweed/hdfs/SeaweedHadoopInputStream.java b/other/java/hdfs2/src/main/java/seaweed/hdfs/SeaweedHadoopInputStream.java
new file mode 100644
index 000000000..f26eae597
--- /dev/null
+++ b/other/java/hdfs2/src/main/java/seaweed/hdfs/SeaweedHadoopInputStream.java
@@ -0,0 +1,150 @@
+package seaweed.hdfs;
+
+// based on org.apache.hadoop.fs.azurebfs.services.AbfsInputStream
+
+import org.apache.hadoop.fs.ByteBufferReadable;
+import org.apache.hadoop.fs.FSInputStream;
+import org.apache.hadoop.fs.FileSystem.Statistics;
+import seaweedfs.client.FilerClient;
+import seaweedfs.client.FilerProto;
+import seaweedfs.client.SeaweedInputStream;
+
+import java.io.EOFException;
+import java.io.IOException;
+import java.nio.ByteBuffer;
+
+public class SeaweedHadoopInputStream extends FSInputStream implements ByteBufferReadable {
+
+ private final SeaweedInputStream seaweedInputStream;
+ private final Statistics statistics;
+
+ public SeaweedHadoopInputStream(
+ final FilerClient filerClient,
+ final Statistics statistics,
+ final String path,
+ final FilerProto.Entry entry) throws IOException {
+ this.seaweedInputStream = new SeaweedInputStream(filerClient, path, entry);
+ this.statistics = statistics;
+ }
+
+ @Override
+ public int read() throws IOException {
+ return seaweedInputStream.read();
+ }
+
+ @Override
+ public int read(final byte[] b, final int off, final int len) throws IOException {
+ return seaweedInputStream.read(b, off, len);
+ }
+
+ // implement ByteBufferReadable
+ @Override
+ public synchronized int read(ByteBuffer buf) throws IOException {
+ int bytesRead = seaweedInputStream.read(buf);
+
+ if (bytesRead > 0) {
+ if (statistics != null) {
+ statistics.incrementBytesRead(bytesRead);
+ }
+ }
+
+ return bytesRead;
+ }
+
+ /**
+ * Seek to given position in stream.
+ *
+ * @param n position to seek to
+ * @throws IOException if there is an error
+ * @throws EOFException if attempting to seek past end of file
+ */
+ @Override
+ public synchronized void seek(long n) throws IOException {
+ seaweedInputStream.seek(n);
+ }
+
+ @Override
+ public synchronized long skip(long n) throws IOException {
+ return seaweedInputStream.skip(n);
+ }
+
+ /**
+ * Return the size of the remaining available bytes
+ * if the size is less than or equal to {@link Integer#MAX_VALUE},
+ * otherwise, return {@link Integer#MAX_VALUE}.
+ *
+ * This is to match the behavior of DFSInputStream.available(),
+ * which some clients may rely on (HBase write-ahead log reading in
+ * particular).
+ */
+ @Override
+ public synchronized int available() throws IOException {
+ return seaweedInputStream.available();
+ }
+
+ /**
+ * Returns the length of the file that this stream refers to. Note that the length returned is the length
+ * as of the time the Stream was opened. Specifically, if there have been subsequent appends to the file,
+ * they wont be reflected in the returned length.
+ *
+ * @return length of the file.
+ * @throws IOException if the stream is closed
+ */
+ public long length() throws IOException {
+ return seaweedInputStream.length();
+ }
+
+ /**
+ * Return the current offset from the start of the file
+ *
+ * @throws IOException throws {@link IOException} if there is an error
+ */
+ @Override
+ public synchronized long getPos() throws IOException {
+ return seaweedInputStream.getPos();
+ }
+
+ /**
+ * Seeks a different copy of the data. Returns true if
+ * found a new source, false otherwise.
+ *
+ * @throws IOException throws {@link IOException} if there is an error
+ */
+ @Override
+ public boolean seekToNewSource(long l) throws IOException {
+ return false;
+ }
+
+ @Override
+ public synchronized void close() throws IOException {
+ seaweedInputStream.close();
+ }
+
+ /**
+ * Not supported by this stream. Throws {@link UnsupportedOperationException}
+ *
+ * @param readlimit ignored
+ */
+ @Override
+ public synchronized void mark(int readlimit) {
+ throw new UnsupportedOperationException("mark()/reset() not supported on this stream");
+ }
+
+ /**
+ * Not supported by this stream. Throws {@link UnsupportedOperationException}
+ */
+ @Override
+ public synchronized void reset() throws IOException {
+ throw new UnsupportedOperationException("mark()/reset() not supported on this stream");
+ }
+
+ /**
+ * gets whether mark and reset are supported by {@code ADLFileInputStream}. Always returns false.
+ *
+ * @return always {@code false}
+ */
+ @Override
+ public boolean markSupported() {
+ return false;
+ }
+}
diff --git a/other/java/hdfs2/src/main/java/seaweed/hdfs/SeaweedHadoopOutputStream.java b/other/java/hdfs2/src/main/java/seaweed/hdfs/SeaweedHadoopOutputStream.java
new file mode 100644
index 000000000..da5b56bbc
--- /dev/null
+++ b/other/java/hdfs2/src/main/java/seaweed/hdfs/SeaweedHadoopOutputStream.java
@@ -0,0 +1,16 @@
+package seaweed.hdfs;
+
+// adapted from org.apache.hadoop.fs.azurebfs.services.AbfsOutputStream
+
+import seaweedfs.client.FilerClient;
+import seaweedfs.client.FilerProto;
+import seaweedfs.client.SeaweedOutputStream;
+
+public class SeaweedHadoopOutputStream extends SeaweedOutputStream {
+
+ public SeaweedHadoopOutputStream(FilerClient filerClient, final String path, FilerProto.Entry.Builder entry,
+ final long position, final int bufferSize, final String replication) {
+ super(filerClient, path, entry, position, bufferSize, replication);
+ }
+
+}
diff --git a/other/java/hdfs3/dependency-reduced-pom.xml b/other/java/hdfs3/dependency-reduced-pom.xml
index 20b52e20f..2b4a1a494 100644
--- a/other/java/hdfs3/dependency-reduced-pom.xml
+++ b/other/java/hdfs3/dependency-reduced-pom.xml
@@ -309,7 +309,7 @@
- 1.5.6
+ 1.6.1
3.1.1
diff --git a/other/java/hdfs3/pom.xml b/other/java/hdfs3/pom.xml
index 85d8db859..49ff8f926 100644
--- a/other/java/hdfs3/pom.xml
+++ b/other/java/hdfs3/pom.xml
@@ -5,7 +5,7 @@
4.0.0
- 1.5.6
+ 1.6.1
3.1.1
diff --git a/other/java/hdfs3/src/main/java/seaweed/hdfs/SeaweedFileSystem.java b/other/java/hdfs3/src/main/java/seaweed/hdfs/SeaweedFileSystem.java
index 84f11e846..25395db7a 100644
--- a/other/java/hdfs3/src/main/java/seaweed/hdfs/SeaweedFileSystem.java
+++ b/other/java/hdfs3/src/main/java/seaweed/hdfs/SeaweedFileSystem.java
@@ -26,6 +26,7 @@ public class SeaweedFileSystem extends FileSystem {
public static final int FS_SEAWEED_DEFAULT_PORT = 8888;
public static final String FS_SEAWEED_BUFFER_SIZE = "fs.seaweed.buffer.size";
public static final String FS_SEAWEED_REPLICATION = "fs.seaweed.replication";
+ public static final String FS_SEAWEED_VOLUME_SERVER_ACCESS = "fs.seaweed.volume.server.access";
public static final int FS_SEAWEED_DEFAULT_BUFFER_SIZE = 4 * 1024 * 1024;
private static final Logger LOG = LoggerFactory.getLogger(SeaweedFileSystem.class);
diff --git a/other/java/hdfs3/src/main/java/seaweed/hdfs/SeaweedFileSystemStore.java b/other/java/hdfs3/src/main/java/seaweed/hdfs/SeaweedFileSystemStore.java
index 14b32528e..f4e8c9349 100644
--- a/other/java/hdfs3/src/main/java/seaweed/hdfs/SeaweedFileSystemStore.java
+++ b/other/java/hdfs3/src/main/java/seaweed/hdfs/SeaweedFileSystemStore.java
@@ -18,27 +18,31 @@ import java.util.ArrayList;
import java.util.Arrays;
import java.util.List;
-import static seaweed.hdfs.SeaweedFileSystem.FS_SEAWEED_BUFFER_SIZE;
-import static seaweed.hdfs.SeaweedFileSystem.FS_SEAWEED_DEFAULT_BUFFER_SIZE;
+import static seaweed.hdfs.SeaweedFileSystem.*;
public class SeaweedFileSystemStore {
private static final Logger LOG = LoggerFactory.getLogger(SeaweedFileSystemStore.class);
- private FilerGrpcClient filerGrpcClient;
private FilerClient filerClient;
private Configuration conf;
public SeaweedFileSystemStore(String host, int port, Configuration conf) {
int grpcPort = 10000 + port;
- filerGrpcClient = new FilerGrpcClient(host, grpcPort);
- filerClient = new FilerClient(filerGrpcClient);
+ filerClient = new FilerClient(host, grpcPort);
this.conf = conf;
+ String volumeServerAccessMode = this.conf.get(FS_SEAWEED_VOLUME_SERVER_ACCESS, "direct");
+ if (volumeServerAccessMode.equals("publicUrl")) {
+ filerClient.setAccessVolumeServerByPublicUrl();
+ } else if (volumeServerAccessMode.equals("filerProxy")) {
+ filerClient.setAccessVolumeServerByFilerProxy();
+ }
+
}
public void close() {
try {
- this.filerGrpcClient.shutdown();
+ this.filerClient.shutdown();
} catch (InterruptedException e) {
e.printStackTrace();
}
@@ -213,10 +217,10 @@ public class SeaweedFileSystemStore {
.clearGroupName()
.addAllGroupName(Arrays.asList(userGroupInformation.getGroupNames()))
);
- SeaweedWrite.writeMeta(filerGrpcClient, getParentDirectory(path), entry);
+ SeaweedWrite.writeMeta(filerClient, getParentDirectory(path), entry);
}
- return new SeaweedOutputStream(filerGrpcClient, path, entry, writePosition, bufferSize, replication);
+ return new SeaweedHadoopOutputStream(filerClient, path.toString(), entry, writePosition, bufferSize, replication);
}
@@ -230,7 +234,7 @@ public class SeaweedFileSystemStore {
throw new FileNotFoundException("read non-exist file " + path);
}
- return new SeaweedInputStream(filerGrpcClient,
+ return new SeaweedHadoopInputStream(filerClient,
statistics,
path.toUri().getPath(),
entry);
diff --git a/other/java/hdfs3/src/main/java/seaweed/hdfs/SeaweedHadoopInputStream.java b/other/java/hdfs3/src/main/java/seaweed/hdfs/SeaweedHadoopInputStream.java
new file mode 100644
index 000000000..f26eae597
--- /dev/null
+++ b/other/java/hdfs3/src/main/java/seaweed/hdfs/SeaweedHadoopInputStream.java
@@ -0,0 +1,150 @@
+package seaweed.hdfs;
+
+// based on org.apache.hadoop.fs.azurebfs.services.AbfsInputStream
+
+import org.apache.hadoop.fs.ByteBufferReadable;
+import org.apache.hadoop.fs.FSInputStream;
+import org.apache.hadoop.fs.FileSystem.Statistics;
+import seaweedfs.client.FilerClient;
+import seaweedfs.client.FilerProto;
+import seaweedfs.client.SeaweedInputStream;
+
+import java.io.EOFException;
+import java.io.IOException;
+import java.nio.ByteBuffer;
+
+public class SeaweedHadoopInputStream extends FSInputStream implements ByteBufferReadable {
+
+ private final SeaweedInputStream seaweedInputStream;
+ private final Statistics statistics;
+
+ public SeaweedHadoopInputStream(
+ final FilerClient filerClient,
+ final Statistics statistics,
+ final String path,
+ final FilerProto.Entry entry) throws IOException {
+ this.seaweedInputStream = new SeaweedInputStream(filerClient, path, entry);
+ this.statistics = statistics;
+ }
+
+ @Override
+ public int read() throws IOException {
+ return seaweedInputStream.read();
+ }
+
+ @Override
+ public int read(final byte[] b, final int off, final int len) throws IOException {
+ return seaweedInputStream.read(b, off, len);
+ }
+
+ // implement ByteBufferReadable
+ @Override
+ public synchronized int read(ByteBuffer buf) throws IOException {
+ int bytesRead = seaweedInputStream.read(buf);
+
+ if (bytesRead > 0) {
+ if (statistics != null) {
+ statistics.incrementBytesRead(bytesRead);
+ }
+ }
+
+ return bytesRead;
+ }
+
+ /**
+ * Seek to given position in stream.
+ *
+ * @param n position to seek to
+ * @throws IOException if there is an error
+ * @throws EOFException if attempting to seek past end of file
+ */
+ @Override
+ public synchronized void seek(long n) throws IOException {
+ seaweedInputStream.seek(n);
+ }
+
+ @Override
+ public synchronized long skip(long n) throws IOException {
+ return seaweedInputStream.skip(n);
+ }
+
+ /**
+ * Return the size of the remaining available bytes
+ * if the size is less than or equal to {@link Integer#MAX_VALUE},
+ * otherwise, return {@link Integer#MAX_VALUE}.
+ *
+ * This is to match the behavior of DFSInputStream.available(),
+ * which some clients may rely on (HBase write-ahead log reading in
+ * particular).
+ */
+ @Override
+ public synchronized int available() throws IOException {
+ return seaweedInputStream.available();
+ }
+
+ /**
+ * Returns the length of the file that this stream refers to. Note that the length returned is the length
+ * as of the time the Stream was opened. Specifically, if there have been subsequent appends to the file,
+ * they wont be reflected in the returned length.
+ *
+ * @return length of the file.
+ * @throws IOException if the stream is closed
+ */
+ public long length() throws IOException {
+ return seaweedInputStream.length();
+ }
+
+ /**
+ * Return the current offset from the start of the file
+ *
+ * @throws IOException throws {@link IOException} if there is an error
+ */
+ @Override
+ public synchronized long getPos() throws IOException {
+ return seaweedInputStream.getPos();
+ }
+
+ /**
+ * Seeks a different copy of the data. Returns true if
+ * found a new source, false otherwise.
+ *
+ * @throws IOException throws {@link IOException} if there is an error
+ */
+ @Override
+ public boolean seekToNewSource(long l) throws IOException {
+ return false;
+ }
+
+ @Override
+ public synchronized void close() throws IOException {
+ seaweedInputStream.close();
+ }
+
+ /**
+ * Not supported by this stream. Throws {@link UnsupportedOperationException}
+ *
+ * @param readlimit ignored
+ */
+ @Override
+ public synchronized void mark(int readlimit) {
+ throw new UnsupportedOperationException("mark()/reset() not supported on this stream");
+ }
+
+ /**
+ * Not supported by this stream. Throws {@link UnsupportedOperationException}
+ */
+ @Override
+ public synchronized void reset() throws IOException {
+ throw new UnsupportedOperationException("mark()/reset() not supported on this stream");
+ }
+
+ /**
+ * gets whether mark and reset are supported by {@code ADLFileInputStream}. Always returns false.
+ *
+ * @return always {@code false}
+ */
+ @Override
+ public boolean markSupported() {
+ return false;
+ }
+}
diff --git a/other/java/hdfs3/src/main/java/seaweed/hdfs/SeaweedHadoopOutputStream.java b/other/java/hdfs3/src/main/java/seaweed/hdfs/SeaweedHadoopOutputStream.java
new file mode 100644
index 000000000..1740312fe
--- /dev/null
+++ b/other/java/hdfs3/src/main/java/seaweed/hdfs/SeaweedHadoopOutputStream.java
@@ -0,0 +1,64 @@
+package seaweed.hdfs;
+
+// adapted from org.apache.hadoop.fs.azurebfs.services.AbfsOutputStream
+
+import org.apache.hadoop.fs.StreamCapabilities;
+import org.apache.hadoop.fs.Syncable;
+import seaweedfs.client.FilerClient;
+import seaweedfs.client.FilerProto;
+import seaweedfs.client.SeaweedOutputStream;
+
+import java.io.IOException;
+import java.util.Locale;
+
+public class SeaweedHadoopOutputStream extends SeaweedOutputStream implements Syncable, StreamCapabilities {
+
+ public SeaweedHadoopOutputStream(FilerClient filerClient, final String path, FilerProto.Entry.Builder entry,
+ final long position, final int bufferSize, final String replication) {
+ super(filerClient, path, entry, position, bufferSize, replication);
+ }
+
+ /**
+ * Similar to posix fsync, flush out the data in client's user buffer
+ * all the way to the disk device (but the disk may have it in its cache).
+ *
+ * @throws IOException if error occurs
+ */
+ @Override
+ public void hsync() throws IOException {
+ if (supportFlush) {
+ flushInternal();
+ }
+ }
+
+ /**
+ * Flush out the data in client's user buffer. After the return of
+ * this call, new readers will see the data.
+ *
+ * @throws IOException if any error occurs
+ */
+ @Override
+ public void hflush() throws IOException {
+ if (supportFlush) {
+ flushInternal();
+ }
+ }
+
+ /**
+ * Query the stream for a specific capability.
+ *
+ * @param capability string to query the stream support for.
+ * @return true for hsync and hflush.
+ */
+ @Override
+ public boolean hasCapability(String capability) {
+ switch (capability.toLowerCase(Locale.ENGLISH)) {
+ case StreamCapabilities.HSYNC:
+ case StreamCapabilities.HFLUSH:
+ return supportFlush;
+ default:
+ return false;
+ }
+ }
+
+}
diff --git a/other/java/hdfs3/src/main/java/seaweed/hdfs/SeaweedInputStream.java b/other/java/hdfs3/src/main/java/seaweed/hdfs/SeaweedInputStream.java
deleted file mode 100644
index 690366849..000000000
--- a/other/java/hdfs3/src/main/java/seaweed/hdfs/SeaweedInputStream.java
+++ /dev/null
@@ -1,259 +0,0 @@
-package seaweed.hdfs;
-
-// based on org.apache.hadoop.fs.azurebfs.services.AbfsInputStream
-
-import org.apache.hadoop.fs.ByteBufferReadable;
-import org.apache.hadoop.fs.FSExceptionMessages;
-import org.apache.hadoop.fs.FSInputStream;
-import org.apache.hadoop.fs.FileSystem.Statistics;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-import seaweedfs.client.FilerGrpcClient;
-import seaweedfs.client.FilerProto;
-import seaweedfs.client.SeaweedRead;
-
-import java.io.EOFException;
-import java.io.IOException;
-import java.nio.ByteBuffer;
-import java.util.List;
-
-public class SeaweedInputStream extends FSInputStream implements ByteBufferReadable {
-
- private static final Logger LOG = LoggerFactory.getLogger(SeaweedInputStream.class);
-
- private final FilerGrpcClient filerGrpcClient;
- private final Statistics statistics;
- private final String path;
- private final FilerProto.Entry entry;
- private final List visibleIntervalList;
- private final long contentLength;
-
- private long position = 0; // cursor of the file
-
- private boolean closed = false;
-
- public SeaweedInputStream(
- final FilerGrpcClient filerGrpcClient,
- final Statistics statistics,
- final String path,
- final FilerProto.Entry entry) throws IOException {
- this.filerGrpcClient = filerGrpcClient;
- this.statistics = statistics;
- this.path = path;
- this.entry = entry;
- this.contentLength = SeaweedRead.fileSize(entry);
-
- this.visibleIntervalList = SeaweedRead.nonOverlappingVisibleIntervals(filerGrpcClient, entry.getChunksList());
-
- LOG.debug("new path:{} entry:{} visibleIntervalList:{}", path, entry, visibleIntervalList);
-
- }
-
- public String getPath() {
- return path;
- }
-
- @Override
- public int read() throws IOException {
- byte[] b = new byte[1];
- int numberOfBytesRead = read(b, 0, 1);
- if (numberOfBytesRead < 0) {
- return -1;
- } else {
- return (b[0] & 0xFF);
- }
- }
-
- @Override
- public int read(final byte[] b, final int off, final int len) throws IOException {
-
- if (b == null) {
- throw new IllegalArgumentException("null byte array passed in to read() method");
- }
- if (off >= b.length) {
- throw new IllegalArgumentException("offset greater than length of array");
- }
- if (len < 0) {
- throw new IllegalArgumentException("requested read length is less than zero");
- }
- if (len > (b.length - off)) {
- throw new IllegalArgumentException("requested read length is more than will fit after requested offset in buffer");
- }
-
- ByteBuffer buf = ByteBuffer.wrap(b, off, len);
- return read(buf);
-
- }
-
- // implement ByteBufferReadable
- @Override
- public synchronized int read(ByteBuffer buf) throws IOException {
-
- if (position < 0) {
- throw new IllegalArgumentException("attempting to read from negative offset");
- }
- if (position >= contentLength) {
- return -1; // Hadoop prefers -1 to EOFException
- }
-
- long bytesRead = 0;
- int len = buf.remaining();
- int start = (int) this.position;
- if (start+len <= entry.getContent().size()) {
- entry.getContent().substring(start, start+len).copyTo(buf);
- } else {
- bytesRead = SeaweedRead.read(this.filerGrpcClient, this.visibleIntervalList, this.position, buf, SeaweedRead.fileSize(entry));
- }
-
- if (bytesRead > Integer.MAX_VALUE) {
- throw new IOException("Unexpected Content-Length");
- }
-
- if (bytesRead > 0) {
- this.position += bytesRead;
- if (statistics != null) {
- statistics.incrementBytesRead(bytesRead);
- }
- }
-
- return (int) bytesRead;
- }
-
- /**
- * Seek to given position in stream.
- *
- * @param n position to seek to
- * @throws IOException if there is an error
- * @throws EOFException if attempting to seek past end of file
- */
- @Override
- public synchronized void seek(long n) throws IOException {
- if (closed) {
- throw new IOException(FSExceptionMessages.STREAM_IS_CLOSED);
- }
- if (n < 0) {
- throw new EOFException(FSExceptionMessages.NEGATIVE_SEEK);
- }
- if (n > contentLength) {
- throw new EOFException(FSExceptionMessages.CANNOT_SEEK_PAST_EOF);
- }
-
- this.position = n;
-
- }
-
- @Override
- public synchronized long skip(long n) throws IOException {
- if (closed) {
- throw new IOException(FSExceptionMessages.STREAM_IS_CLOSED);
- }
- if (this.position == contentLength) {
- if (n > 0) {
- throw new EOFException(FSExceptionMessages.CANNOT_SEEK_PAST_EOF);
- }
- }
- long newPos = this.position + n;
- if (newPos < 0) {
- newPos = 0;
- n = newPos - this.position;
- }
- if (newPos > contentLength) {
- newPos = contentLength;
- n = newPos - this.position;
- }
- seek(newPos);
- return n;
- }
-
- /**
- * Return the size of the remaining available bytes
- * if the size is less than or equal to {@link Integer#MAX_VALUE},
- * otherwise, return {@link Integer#MAX_VALUE}.
- *
- * This is to match the behavior of DFSInputStream.available(),
- * which some clients may rely on (HBase write-ahead log reading in
- * particular).
- */
- @Override
- public synchronized int available() throws IOException {
- if (closed) {
- throw new IOException(
- FSExceptionMessages.STREAM_IS_CLOSED);
- }
- final long remaining = this.contentLength - this.getPos();
- return remaining <= Integer.MAX_VALUE
- ? (int) remaining : Integer.MAX_VALUE;
- }
-
- /**
- * Returns the length of the file that this stream refers to. Note that the length returned is the length
- * as of the time the Stream was opened. Specifically, if there have been subsequent appends to the file,
- * they wont be reflected in the returned length.
- *
- * @return length of the file.
- * @throws IOException if the stream is closed
- */
- public long length() throws IOException {
- if (closed) {
- throw new IOException(FSExceptionMessages.STREAM_IS_CLOSED);
- }
- return contentLength;
- }
-
- /**
- * Return the current offset from the start of the file
- *
- * @throws IOException throws {@link IOException} if there is an error
- */
- @Override
- public synchronized long getPos() throws IOException {
- if (closed) {
- throw new IOException(FSExceptionMessages.STREAM_IS_CLOSED);
- }
- return position;
- }
-
- /**
- * Seeks a different copy of the data. Returns true if
- * found a new source, false otherwise.
- *
- * @throws IOException throws {@link IOException} if there is an error
- */
- @Override
- public boolean seekToNewSource(long l) throws IOException {
- return false;
- }
-
- @Override
- public synchronized void close() throws IOException {
- closed = true;
- }
-
- /**
- * Not supported by this stream. Throws {@link UnsupportedOperationException}
- *
- * @param readlimit ignored
- */
- @Override
- public synchronized void mark(int readlimit) {
- throw new UnsupportedOperationException("mark()/reset() not supported on this stream");
- }
-
- /**
- * Not supported by this stream. Throws {@link UnsupportedOperationException}
- */
- @Override
- public synchronized void reset() throws IOException {
- throw new UnsupportedOperationException("mark()/reset() not supported on this stream");
- }
-
- /**
- * gets whether mark and reset are supported by {@code ADLFileInputStream}. Always returns false.
- *
- * @return always {@code false}
- */
- @Override
- public boolean markSupported() {
- return false;
- }
-}
diff --git a/other/java/hdfs3/src/main/java/seaweed/hdfs/SeaweedOutputStream.java b/other/java/hdfs3/src/main/java/seaweed/hdfs/SeaweedOutputStream.java
deleted file mode 100644
index d4c967a06..000000000
--- a/other/java/hdfs3/src/main/java/seaweed/hdfs/SeaweedOutputStream.java
+++ /dev/null
@@ -1,337 +0,0 @@
-package seaweed.hdfs;
-
-// adapted from org.apache.hadoop.fs.azurebfs.services.AbfsOutputStream
-
-import com.google.common.base.Preconditions;
-import org.apache.hadoop.fs.FSExceptionMessages;
-import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.fs.StreamCapabilities;
-import org.apache.hadoop.fs.Syncable;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-import seaweedfs.client.ByteBufferPool;
-import seaweedfs.client.FilerGrpcClient;
-import seaweedfs.client.FilerProto;
-import seaweedfs.client.SeaweedWrite;
-
-import java.io.IOException;
-import java.io.InterruptedIOException;
-import java.io.OutputStream;
-import java.nio.ByteBuffer;
-import java.util.Locale;
-import java.util.concurrent.*;
-
-import static seaweed.hdfs.SeaweedFileSystemStore.getParentDirectory;
-
-public class SeaweedOutputStream extends OutputStream implements Syncable, StreamCapabilities {
-
- private static final Logger LOG = LoggerFactory.getLogger(SeaweedOutputStream.class);
-
- private final FilerGrpcClient filerGrpcClient;
- private final Path path;
- private final int bufferSize;
- private final int maxConcurrentRequestCount;
- private final ThreadPoolExecutor threadExecutor;
- private final ExecutorCompletionService completionService;
- private final FilerProto.Entry.Builder entry;
- private final boolean supportFlush = false; // true;
- private final ConcurrentLinkedDeque writeOperations;
- private long position;
- private boolean closed;
- private volatile IOException lastError;
- private long lastFlushOffset;
- private long lastTotalAppendOffset = 0;
- private ByteBuffer buffer;
- private long outputIndex;
- private String replication = "000";
-
- public SeaweedOutputStream(FilerGrpcClient filerGrpcClient, final Path path, FilerProto.Entry.Builder entry,
- final long position, final int bufferSize, final String replication) {
- this.filerGrpcClient = filerGrpcClient;
- this.replication = replication;
- this.path = path;
- this.position = position;
- this.closed = false;
- this.lastError = null;
- this.lastFlushOffset = 0;
- this.bufferSize = bufferSize;
- this.buffer = ByteBufferPool.request(bufferSize);
- this.outputIndex = 0;
- this.writeOperations = new ConcurrentLinkedDeque<>();
-
- this.maxConcurrentRequestCount = Runtime.getRuntime().availableProcessors();
-
- this.threadExecutor
- = new ThreadPoolExecutor(maxConcurrentRequestCount,
- maxConcurrentRequestCount,
- 120L,
- TimeUnit.SECONDS,
- new LinkedBlockingQueue());
- this.completionService = new ExecutorCompletionService<>(this.threadExecutor);
-
- this.entry = entry;
-
- }
-
- private synchronized void flushWrittenBytesToServiceInternal(final long offset) throws IOException {
- try {
- SeaweedWrite.writeMeta(filerGrpcClient, getParentDirectory(path), entry);
- } catch (Exception ex) {
- throw new IOException(ex);
- }
- this.lastFlushOffset = offset;
- }
-
- @Override
- public void write(final int byteVal) throws IOException {
- write(new byte[]{(byte) (byteVal & 0xFF)});
- }
-
- @Override
- public synchronized void write(final byte[] data, final int off, final int length)
- throws IOException {
- maybeThrowLastError();
-
- Preconditions.checkArgument(data != null, "null data");
-
- if (off < 0 || length < 0 || length > data.length - off) {
- throw new IndexOutOfBoundsException();
- }
-
- // System.out.println(path + " write [" + (outputIndex + off) + "," + ((outputIndex + off) + length) + ")");
-
- int currentOffset = off;
- int writableBytes = bufferSize - buffer.position();
- int numberOfBytesToWrite = length;
-
- while (numberOfBytesToWrite > 0) {
-
- if (numberOfBytesToWrite < writableBytes) {
- buffer.put(data, currentOffset, numberOfBytesToWrite);
- outputIndex += numberOfBytesToWrite;
- break;
- }
-
- // System.out.println(path + " [" + (outputIndex + currentOffset) + "," + ((outputIndex + currentOffset) + writableBytes) + ") " + buffer.capacity());
- buffer.put(data, currentOffset, writableBytes);
- outputIndex += writableBytes;
- currentOffset += writableBytes;
- writeCurrentBufferToService();
- numberOfBytesToWrite = numberOfBytesToWrite - writableBytes;
- writableBytes = bufferSize - buffer.position();
- }
-
- }
-
- /**
- * Flushes this output stream and forces any buffered output bytes to be
- * written out. If any data remains in the payload it is committed to the
- * service. Data is queued for writing and forced out to the service
- * before the call returns.
- */
- @Override
- public void flush() throws IOException {
- if (supportFlush) {
- flushInternalAsync();
- }
- }
-
- /**
- * Similar to posix fsync, flush out the data in client's user buffer
- * all the way to the disk device (but the disk may have it in its cache).
- *
- * @throws IOException if error occurs
- */
- @Override
- public void hsync() throws IOException {
- if (supportFlush) {
- flushInternal();
- }
- }
-
- /**
- * Flush out the data in client's user buffer. After the return of
- * this call, new readers will see the data.
- *
- * @throws IOException if any error occurs
- */
- @Override
- public void hflush() throws IOException {
- if (supportFlush) {
- flushInternal();
- }
- }
-
- /**
- * Query the stream for a specific capability.
- *
- * @param capability string to query the stream support for.
- * @return true for hsync and hflush.
- */
- @Override
- public boolean hasCapability(String capability) {
- switch (capability.toLowerCase(Locale.ENGLISH)) {
- case StreamCapabilities.HSYNC:
- case StreamCapabilities.HFLUSH:
- return supportFlush;
- default:
- return false;
- }
- }
-
- /**
- * Force all data in the output stream to be written to Azure storage.
- * Wait to return until this is complete. Close the access to the stream and
- * shutdown the upload thread pool.
- * If the blob was created, its lease will be released.
- * Any error encountered caught in threads and stored will be rethrown here
- * after cleanup.
- */
- @Override
- public synchronized void close() throws IOException {
- if (closed) {
- return;
- }
-
- LOG.debug("close path: {}", path);
- try {
- flushInternal();
- threadExecutor.shutdown();
- } finally {
- lastError = new IOException(FSExceptionMessages.STREAM_IS_CLOSED);
- ByteBufferPool.release(buffer);
- buffer = null;
- outputIndex = 0;
- closed = true;
- writeOperations.clear();
- if (!threadExecutor.isShutdown()) {
- threadExecutor.shutdownNow();
- }
- }
- }
-
- private synchronized void writeCurrentBufferToService() throws IOException {
- if (buffer.position() == 0) {
- return;
- }
-
- position += submitWriteBufferToService(buffer, position);
-
- buffer = ByteBufferPool.request(bufferSize);
-
- }
-
- private synchronized int submitWriteBufferToService(final ByteBuffer bufferToWrite, final long writePosition) throws IOException {
-
- bufferToWrite.flip();
- int bytesLength = bufferToWrite.limit() - bufferToWrite.position();
-
- if (threadExecutor.getQueue().size() >= maxConcurrentRequestCount) {
- waitForTaskToComplete();
- }
- final Future job = completionService.submit(() -> {
- // System.out.println(path + " is going to save [" + (writePosition) + "," + ((writePosition) + bytesLength) + ")");
- SeaweedWrite.writeData(entry, replication, filerGrpcClient, writePosition, bufferToWrite.array(), bufferToWrite.position(), bufferToWrite.limit(), path.toUri().getPath());
- // System.out.println(path + " saved [" + (writePosition) + "," + ((writePosition) + bytesLength) + ")");
- ByteBufferPool.release(bufferToWrite);
- return null;
- });
-
- writeOperations.add(new WriteOperation(job, writePosition, bytesLength));
-
- // Try to shrink the queue
- shrinkWriteOperationQueue();
-
- return bytesLength;
-
- }
-
- private void waitForTaskToComplete() throws IOException {
- boolean completed;
- for (completed = false; completionService.poll() != null; completed = true) {
- // keep polling until there is no data
- }
-
- if (!completed) {
- try {
- completionService.take();
- } catch (InterruptedException e) {
- lastError = (IOException) new InterruptedIOException(e.toString()).initCause(e);
- throw lastError;
- }
- }
- }
-
- private void maybeThrowLastError() throws IOException {
- if (lastError != null) {
- throw lastError;
- }
- }
-
- /**
- * Try to remove the completed write operations from the beginning of write
- * operation FIFO queue.
- */
- private synchronized void shrinkWriteOperationQueue() throws IOException {
- try {
- while (writeOperations.peek() != null && writeOperations.peek().task.isDone()) {
- writeOperations.peek().task.get();
- lastTotalAppendOffset += writeOperations.peek().length;
- writeOperations.remove();
- }
- } catch (Exception e) {
- lastError = new IOException(e);
- throw lastError;
- }
- }
-
- private synchronized void flushInternal() throws IOException {
- maybeThrowLastError();
- writeCurrentBufferToService();
- flushWrittenBytesToService();
- }
-
- private synchronized void flushInternalAsync() throws IOException {
- maybeThrowLastError();
- writeCurrentBufferToService();
- flushWrittenBytesToServiceAsync();
- }
-
- private synchronized void flushWrittenBytesToService() throws IOException {
- for (WriteOperation writeOperation : writeOperations) {
- try {
- writeOperation.task.get();
- } catch (Exception ex) {
- lastError = new IOException(ex);
- throw lastError;
- }
- }
- LOG.debug("flushWrittenBytesToService: {} position:{}", path, position);
- flushWrittenBytesToServiceInternal(position);
- }
-
- private synchronized void flushWrittenBytesToServiceAsync() throws IOException {
- shrinkWriteOperationQueue();
-
- if (this.lastTotalAppendOffset > this.lastFlushOffset) {
- this.flushWrittenBytesToServiceInternal(this.lastTotalAppendOffset);
- }
- }
-
- private static class WriteOperation {
- private final Future task;
- private final long startOffset;
- private final long length;
-
- WriteOperation(final Future task, final long startOffset, final long length) {
- Preconditions.checkNotNull(task, "task");
- Preconditions.checkArgument(startOffset >= 0, "startOffset");
- Preconditions.checkArgument(length >= 0, "length");
-
- this.task = task;
- this.startOffset = startOffset;
- this.length = length;
- }
- }
-
-}
diff --git a/unmaintained/diff_volume_servers/diff_volume_servers.go b/unmaintained/diff_volume_servers/diff_volume_servers.go
index 6107f3d48..27a537617 100644
--- a/unmaintained/diff_volume_servers/diff_volume_servers.go
+++ b/unmaintained/diff_volume_servers/diff_volume_servers.go
@@ -168,7 +168,7 @@ func getVolumeFiles(v uint32, addr string) (map[types.NeedleId]needleState, int6
size: size,
}
}
- if actual := offset.ToAcutalOffset(); actual > maxOffset {
+ if actual := offset.ToActualOffset(); actual > maxOffset {
maxOffset = actual
}
return nil
diff --git a/weed/command/command.go b/weed/command/command.go
index 0df22b575..3fa52c922 100644
--- a/weed/command/command.go
+++ b/weed/command/command.go
@@ -15,6 +15,8 @@ var Commands = []*Command{
cmdDownload,
cmdExport,
cmdFiler,
+ cmdFilerCat,
+ cmdFilerMetaTail,
cmdFilerReplicate,
cmdFilerSynchronize,
cmdFix,
@@ -25,7 +27,6 @@ var Commands = []*Command{
cmdScaffold,
cmdServer,
cmdShell,
- cmdWatch,
cmdUpload,
cmdVersion,
cmdVolume,
diff --git a/weed/command/export.go b/weed/command/export.go
index 78d75ef52..f100f3af5 100644
--- a/weed/command/export.go
+++ b/weed/command/export.go
@@ -113,7 +113,7 @@ func (scanner *VolumeFileScanner4Export) VisitNeedle(n *needle.Needle, offset in
nv, ok := needleMap.Get(n.Id)
glog.V(3).Infof("key %d offset %d size %d disk_size %d compressed %v ok %v nv %+v",
n.Id, offset, n.Size, n.DiskSize(scanner.version), n.IsCompressed(), ok, nv)
- if *showDeleted && n.Size > 0 || ok && nv.Size.IsValid() && nv.Offset.ToAcutalOffset() == offset {
+ if *showDeleted && n.Size > 0 || ok && nv.Size.IsValid() && nv.Offset.ToActualOffset() == offset {
if newerThanUnix >= 0 && n.HasLastModifiedDate() && n.LastModified < uint64(newerThanUnix) {
glog.V(3).Infof("Skipping this file, as it's old enough: LastModified %d vs %d",
n.LastModified, newerThanUnix)
diff --git a/weed/command/filer.go b/weed/command/filer.go
index a3008eb29..7f665cec8 100644
--- a/weed/command/filer.go
+++ b/weed/command/filer.go
@@ -3,6 +3,7 @@ package command
import (
"fmt"
"net/http"
+ "os"
"strconv"
"strings"
"time"
@@ -19,9 +20,11 @@ import (
)
var (
- f FilerOptions
- filerStartS3 *bool
- filerS3Options S3Options
+ f FilerOptions
+ filerStartS3 *bool
+ filerS3Options S3Options
+ filerStartWebDav *bool
+ filerWebDavOptions WebDavOption
)
type FilerOptions struct {
@@ -42,7 +45,7 @@ type FilerOptions struct {
cipher *bool
peers *string
metricsHttpPort *int
- cacheToFilerLimit *int
+ saveToFilerLimit *int
defaultLevelDbDirectory *string
}
@@ -64,7 +67,7 @@ func init() {
f.cipher = cmdFiler.Flag.Bool("encryptVolumeData", false, "encrypt data on volume servers")
f.peers = cmdFiler.Flag.String("peers", "", "all filers sharing the same filer store in comma separated ip:port list")
f.metricsHttpPort = cmdFiler.Flag.Int("metricsPort", 0, "Prometheus metrics listen port")
- f.cacheToFilerLimit = cmdFiler.Flag.Int("cacheToFilerLimit", 0, "Small files smaller than this limit can be cached in filer store.")
+ f.saveToFilerLimit = cmdFiler.Flag.Int("saveToFilerLimit", 0, "files smaller than this limit will be saved in filer store")
f.defaultLevelDbDirectory = cmdFiler.Flag.String("defaultStoreDir", ".", "if filer.toml is empty, use an embedded filer store in the directory")
// start s3 on filer
@@ -74,6 +77,16 @@ func init() {
filerS3Options.tlsPrivateKey = cmdFiler.Flag.String("s3.key.file", "", "path to the TLS private key file")
filerS3Options.tlsCertificate = cmdFiler.Flag.String("s3.cert.file", "", "path to the TLS certificate file")
filerS3Options.config = cmdFiler.Flag.String("s3.config", "", "path to the config file")
+ filerS3Options.allowEmptyFolder = cmdFiler.Flag.Bool("s3.allowEmptyFolder", false, "allow empty folders")
+
+ // start webdav on filer
+ filerStartWebDav = cmdFiler.Flag.Bool("webdav", false, "whether to start webdav gateway")
+ filerWebDavOptions.port = cmdFiler.Flag.Int("webdav.port", 7333, "webdav server http listen port")
+ filerWebDavOptions.collection = cmdFiler.Flag.String("webdav.collection", "", "collection to create the files")
+ filerWebDavOptions.tlsPrivateKey = cmdFiler.Flag.String("webdav.key.file", "", "path to the TLS private key file")
+ filerWebDavOptions.tlsCertificate = cmdFiler.Flag.String("webdav.cert.file", "", "path to the TLS certificate file")
+ filerWebDavOptions.cacheDir = cmdFiler.Flag.String("webdav.cacheDir", os.TempDir(), "local cache directory for file chunks")
+ filerWebDavOptions.cacheSizeMB = cmdFiler.Flag.Int64("webdav.cacheCapacityMB", 1000, "local cache capacity in MB")
}
var cmdFiler = &Command{
@@ -113,6 +126,15 @@ func runFiler(cmd *Command, args []string) bool {
}()
}
+ if *filerStartWebDav {
+ filerAddress := fmt.Sprintf("%s:%d", *f.ip, *f.port)
+ filerWebDavOptions.filer = &filerAddress
+ go func() {
+ time.Sleep(2 * time.Second)
+ filerWebDavOptions.startWebDav()
+ }()
+ }
+
f.startFiler()
return true
@@ -148,7 +170,7 @@ func (fo *FilerOptions) startFiler() {
Host: *fo.ip,
Port: uint32(*fo.port),
Cipher: *fo.cipher,
- CacheToFilerLimit: int64(*fo.cacheToFilerLimit),
+ SaveToFilerLimit: *fo.saveToFilerLimit,
Filers: peers,
})
if nfs_err != nil {
diff --git a/weed/command/filer_cat.go b/weed/command/filer_cat.go
new file mode 100644
index 000000000..a46098b04
--- /dev/null
+++ b/weed/command/filer_cat.go
@@ -0,0 +1,118 @@
+package command
+
+import (
+ "context"
+ "fmt"
+ "github.com/chrislusf/seaweedfs/weed/filer"
+ "github.com/chrislusf/seaweedfs/weed/pb"
+ "github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
+ "github.com/chrislusf/seaweedfs/weed/wdclient"
+ "google.golang.org/grpc"
+ "math"
+ "net/url"
+ "os"
+ "strings"
+
+ "github.com/chrislusf/seaweedfs/weed/security"
+ "github.com/chrislusf/seaweedfs/weed/util"
+)
+
+var (
+ filerCat FilerCatOptions
+)
+
+type FilerCatOptions struct {
+ grpcDialOption grpc.DialOption
+ filerAddress string
+ filerClient filer_pb.SeaweedFilerClient
+ output *string
+}
+
+func (fco *FilerCatOptions) GetLookupFileIdFunction() wdclient.LookupFileIdFunctionType {
+ return func(fileId string) (targetUrls []string, err error) {
+ vid := filer.VolumeId(fileId)
+ resp, err := fco.filerClient.LookupVolume(context.Background(), &filer_pb.LookupVolumeRequest{
+ VolumeIds: []string{vid},
+ })
+ if err != nil {
+ return nil, err
+ }
+ locations := resp.LocationsMap[vid]
+ for _, loc := range locations.Locations {
+ targetUrls = append(targetUrls, fmt.Sprintf("http://%s/%s", loc.Url, fileId))
+ }
+ return
+ }
+}
+
+func init() {
+ cmdFilerCat.Run = runFilerCat // break init cycle
+ filerCat.output = cmdFilerCat.Flag.String("o", "", "write to file instead of stdout")
+}
+
+var cmdFilerCat = &Command{
+ UsageLine: "filer.cat [-o ] http://localhost:8888/path/to/file",
+ Short: "copy one file to local",
+ Long: `read one file to stdout or write to a file
+
+`,
+}
+
+func runFilerCat(cmd *Command, args []string) bool {
+
+ util.LoadConfiguration("security", false)
+
+ if len(args) == 0 {
+ return false
+ }
+ filerSource := args[len(args)-1]
+
+ filerUrl, err := url.Parse(filerSource)
+ if err != nil {
+ fmt.Printf("The last argument should be a URL on filer: %v\n", err)
+ return false
+ }
+ urlPath := filerUrl.Path
+ if strings.HasSuffix(urlPath, "/") {
+ fmt.Printf("The last argument should be a file: %v\n", err)
+ return false
+ }
+
+ filerCat.filerAddress = filerUrl.Host
+ filerCat.grpcDialOption = security.LoadClientTLS(util.GetViper(), "grpc.client")
+
+ dir, name := util.FullPath(urlPath).DirAndName()
+
+ writer := os.Stdout
+ if *filerCat.output != "" {
+
+ fmt.Printf("saving %s to %s\n", filerSource, *filerCat.output)
+
+ f, err := os.OpenFile(*filerCat.output, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0755)
+ if err != nil {
+ fmt.Printf("open file %s: %v\n", *filerCat.output, err)
+ return false
+ }
+ defer f.Close()
+ writer = f
+ }
+
+ pb.WithFilerClient(filerCat.filerAddress, filerCat.grpcDialOption, func(client filer_pb.SeaweedFilerClient) error {
+
+ request := &filer_pb.LookupDirectoryEntryRequest{
+ Name: name,
+ Directory: dir,
+ }
+ respLookupEntry, err := filer_pb.LookupEntry(client, request)
+ if err != nil {
+ return err
+ }
+
+ filerCat.filerClient = client
+
+ return filer.StreamContent(&filerCat, writer, respLookupEntry.Entry.Chunks, 0, math.MaxInt64)
+
+ })
+
+ return true
+}
diff --git a/weed/command/filer_copy.go b/weed/command/filer_copy.go
index 9b4c02904..6b8902ca6 100644
--- a/weed/command/filer_copy.go
+++ b/weed/command/filer_copy.go
@@ -94,7 +94,7 @@ func runCopy(cmd *Command, args []string) bool {
}
urlPath := filerUrl.Path
if !strings.HasSuffix(urlPath, "/") {
- fmt.Printf("The last argument should be a folder and end with \"/\": %v\n", err)
+ fmt.Printf("The last argument should be a folder and end with \"/\"\n")
return false
}
diff --git a/weed/command/filer_meta_tail.go b/weed/command/filer_meta_tail.go
new file mode 100644
index 000000000..f055b19a8
--- /dev/null
+++ b/weed/command/filer_meta_tail.go
@@ -0,0 +1,211 @@
+package command
+
+import (
+ "context"
+ "fmt"
+ "github.com/golang/protobuf/jsonpb"
+ jsoniter "github.com/json-iterator/go"
+ "github.com/olivere/elastic/v7"
+ "io"
+ "os"
+ "path/filepath"
+ "strings"
+ "time"
+
+ "github.com/chrislusf/seaweedfs/weed/pb"
+ "github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
+ "github.com/chrislusf/seaweedfs/weed/security"
+ "github.com/chrislusf/seaweedfs/weed/util"
+)
+
+func init() {
+ cmdFilerMetaTail.Run = runFilerMetaTail // break init cycle
+}
+
+var cmdFilerMetaTail = &Command{
+ UsageLine: "filer.meta.tail [-filer=localhost:8888] [-target=/]",
+ Short: "see recent changes on a filer",
+ Long: `See recent changes on a filer.
+
+ weed filer.meta.tail -timeAgo=30h | grep truncate
+ weed filer.meta.tail -timeAgo=30h | jq .
+ weed filer.meta.tail -timeAgo=30h | jq .eventNotification.newEntry.name
+
+ `,
+}
+
+var (
+ tailFiler = cmdFilerMetaTail.Flag.String("filer", "localhost:8888", "filer hostname:port")
+ tailTarget = cmdFilerMetaTail.Flag.String("pathPrefix", "/", "path to a folder or file, or common prefix for the folders or files on filer")
+ tailStart = cmdFilerMetaTail.Flag.Duration("timeAgo", 0, "start time before now. \"300ms\", \"1.5h\" or \"2h45m\". Valid time units are \"ns\", \"us\" (or \"µs\"), \"ms\", \"s\", \"m\", \"h\"")
+ tailPattern = cmdFilerMetaTail.Flag.String("pattern", "", "full path or just filename pattern, ex: \"/home/?opher\", \"*.pdf\", see https://golang.org/pkg/path/filepath/#Match ")
+ esServers = cmdFilerMetaTail.Flag.String("es", "", "comma-separated elastic servers http://")
+ esIndex = cmdFilerMetaTail.Flag.String("es.index", "seaweedfs", "ES index name")
+)
+
+func runFilerMetaTail(cmd *Command, args []string) bool {
+
+ grpcDialOption := security.LoadClientTLS(util.GetViper(), "grpc.client")
+
+ var filterFunc func(dir, fname string) bool
+ if *tailPattern != "" {
+ if strings.Contains(*tailPattern, "/") {
+ println("watch path pattern", *tailPattern)
+ filterFunc = func(dir, fname string) bool {
+ matched, err := filepath.Match(*tailPattern, dir+"/"+fname)
+ if err != nil {
+ fmt.Printf("error: %v", err)
+ }
+ return matched
+ }
+ } else {
+ println("watch file pattern", *tailPattern)
+ filterFunc = func(dir, fname string) bool {
+ matched, err := filepath.Match(*tailPattern, fname)
+ if err != nil {
+ fmt.Printf("error: %v", err)
+ }
+ return matched
+ }
+ }
+ }
+
+ shouldPrint := func(resp *filer_pb.SubscribeMetadataResponse) bool {
+ if filterFunc == nil {
+ return true
+ }
+ if resp.EventNotification.OldEntry == nil && resp.EventNotification.NewEntry == nil {
+ return false
+ }
+ if resp.EventNotification.OldEntry != nil && filterFunc(resp.Directory, resp.EventNotification.OldEntry.Name) {
+ return true
+ }
+ if resp.EventNotification.NewEntry != nil && filterFunc(resp.EventNotification.NewParentPath, resp.EventNotification.NewEntry.Name) {
+ return true
+ }
+ return false
+ }
+
+ jsonpbMarshaler := jsonpb.Marshaler{
+ EmitDefaults: false,
+ }
+ eachEntryFunc := func(resp *filer_pb.SubscribeMetadataResponse) error {
+ jsonpbMarshaler.Marshal(os.Stdout, resp)
+ fmt.Fprintln(os.Stdout)
+ return nil
+ }
+ if *esServers != "" {
+ var err error
+ eachEntryFunc, err = sendToElasticSearchFunc(*esServers, *esIndex)
+ if err != nil {
+ fmt.Printf("create elastic search client to %s: %+v\n", *esServers, err)
+ return false
+ }
+ }
+
+ tailErr := pb.WithFilerClient(*tailFiler, grpcDialOption, func(client filer_pb.SeaweedFilerClient) error {
+
+ ctx, cancel := context.WithCancel(context.Background())
+ defer cancel()
+
+ stream, err := client.SubscribeMetadata(ctx, &filer_pb.SubscribeMetadataRequest{
+ ClientName: "tail",
+ PathPrefix: *tailTarget,
+ SinceNs: time.Now().Add(-*tailStart).UnixNano(),
+ })
+ if err != nil {
+ return fmt.Errorf("listen: %v", err)
+ }
+
+ for {
+ resp, listenErr := stream.Recv()
+ if listenErr == io.EOF {
+ return nil
+ }
+ if listenErr != nil {
+ return listenErr
+ }
+ if !shouldPrint(resp) {
+ continue
+ }
+ if err = eachEntryFunc(resp); err != nil {
+ return err
+ }
+ }
+
+ })
+ if tailErr != nil {
+ fmt.Printf("tail %s: %v\n", *tailFiler, tailErr)
+ }
+
+ return true
+}
+
+type EsDocument struct {
+ Dir string `json:"dir,omitempty"`
+ Name string `json:"name,omitempty"`
+ IsDirectory bool `json:"isDir,omitempty"`
+ Size uint64 `json:"size,omitempty"`
+ Uid uint32 `json:"uid,omitempty"`
+ Gid uint32 `json:"gid,omitempty"`
+ UserName string `json:"userName,omitempty"`
+ Collection string `json:"collection,omitempty"`
+ Crtime int64 `json:"crtime,omitempty"`
+ Mtime int64 `json:"mtime,omitempty"`
+ Mime string `json:"mime,omitempty"`
+}
+
+func toEsEntry(event *filer_pb.EventNotification) (*EsDocument, string) {
+ entry := event.NewEntry
+ dir, name := event.NewParentPath, entry.Name
+ id := util.Md5String([]byte(util.NewFullPath(dir, name)))
+ esEntry := &EsDocument{
+ Dir: dir,
+ Name: name,
+ IsDirectory: entry.IsDirectory,
+ Size: entry.Attributes.FileSize,
+ Uid: entry.Attributes.Uid,
+ Gid: entry.Attributes.Gid,
+ UserName: entry.Attributes.UserName,
+ Collection: entry.Attributes.Collection,
+ Crtime: entry.Attributes.Crtime,
+ Mtime: entry.Attributes.Mtime,
+ Mime: entry.Attributes.Mime,
+ }
+ return esEntry, id
+}
+
+func sendToElasticSearchFunc(servers string, esIndex string) (func(resp *filer_pb.SubscribeMetadataResponse) error, error) {
+ options := []elastic.ClientOptionFunc{}
+ options = append(options, elastic.SetURL(strings.Split(servers, ",")...))
+ options = append(options, elastic.SetSniff(false))
+ options = append(options, elastic.SetHealthcheck(false))
+ client, err := elastic.NewClient(options...)
+ if err != nil {
+ return nil, err
+ }
+ return func(resp *filer_pb.SubscribeMetadataResponse) error {
+ event := resp.EventNotification
+ if event.OldEntry != nil &&
+ (event.NewEntry == nil || resp.Directory != event.NewParentPath || event.OldEntry.Name != event.NewEntry.Name) {
+ // delete or not update the same file
+ dir, name := resp.Directory, event.OldEntry.Name
+ id := util.Md5String([]byte(util.NewFullPath(dir, name)))
+ println("delete", id)
+ _, err := client.Delete().Index(esIndex).Id(id).Do(context.Background())
+ return err
+ }
+ if event.NewEntry != nil {
+ // add a new file or update the same file
+ esEntry, id := toEsEntry(event)
+ value, err := jsoniter.Marshal(esEntry)
+ if err != nil {
+ return err
+ }
+ println(string(value))
+ _, err = client.Index().Index(esIndex).Id(id).BodyJson(string(value)).Do(context.Background())
+ return err
+ }
+ return nil
+ }, nil
+}
diff --git a/weed/command/filer_replication.go b/weed/command/filer_replication.go
index 40f2b570b..e8c06b208 100644
--- a/weed/command/filer_replication.go
+++ b/weed/command/filer_replication.go
@@ -11,10 +11,10 @@ import (
_ "github.com/chrislusf/seaweedfs/weed/replication/sink/b2sink"
_ "github.com/chrislusf/seaweedfs/weed/replication/sink/filersink"
_ "github.com/chrislusf/seaweedfs/weed/replication/sink/gcssink"
+ _ "github.com/chrislusf/seaweedfs/weed/replication/sink/localsink"
_ "github.com/chrislusf/seaweedfs/weed/replication/sink/s3sink"
"github.com/chrislusf/seaweedfs/weed/replication/sub"
"github.com/chrislusf/seaweedfs/weed/util"
- "github.com/spf13/viper"
)
func init() {
@@ -98,13 +98,19 @@ func runFilerReplicate(cmd *Command, args []string) bool {
replicator := replication.NewReplicator(config, "source.filer.", dataSink)
for {
- key, m, err := notificationInput.ReceiveMessage()
+ key, m, onSuccessFn, onFailureFn, err := notificationInput.ReceiveMessage()
if err != nil {
glog.Errorf("receive %s: %+v", key, err)
+ if onFailureFn != nil {
+ onFailureFn()
+ }
continue
}
if key == "" {
// long poll received no messages
+ if onSuccessFn != nil {
+ onSuccessFn()
+ }
continue
}
if m.OldEntry != nil && m.NewEntry == nil {
@@ -116,14 +122,20 @@ func runFilerReplicate(cmd *Command, args []string) bool {
}
if err = replicator.Replicate(context.Background(), key, m); err != nil {
glog.Errorf("replicate %s: %+v", key, err)
+ if onFailureFn != nil {
+ onFailureFn()
+ }
} else {
glog.V(1).Infof("replicated %s", key)
+ if onSuccessFn != nil {
+ onSuccessFn()
+ }
}
}
}
-func validateOneEnabledInput(config *viper.Viper) {
+func validateOneEnabledInput(config *util.ViperProxy) {
enabledInput := ""
for _, input := range sub.NotificationInputs {
if config.GetBool("notification." + input.GetName() + ".enabled") {
diff --git a/weed/command/filer_sync.go b/weed/command/filer_sync.go
index 050fef7d4..fce048bde 100644
--- a/weed/command/filer_sync.go
+++ b/weed/command/filer_sync.go
@@ -35,6 +35,8 @@ type SyncOptions struct {
bDiskType *string
aDebug *bool
bDebug *bool
+ aProxyByFiler *bool
+ bProxyByFiler *bool
}
var (
@@ -45,7 +47,7 @@ var (
func init() {
cmdFilerSynchronize.Run = runFilerSynchronize // break init cycle
- syncOptions.isActivePassive = cmdFilerSynchronize.Flag.Bool("isActivePassive", false, "one directional follow if true")
+ syncOptions.isActivePassive = cmdFilerSynchronize.Flag.Bool("isActivePassive", false, "one directional follow from A to B if true")
syncOptions.filerA = cmdFilerSynchronize.Flag.String("a", "", "filer A in one SeaweedFS cluster")
syncOptions.filerB = cmdFilerSynchronize.Flag.String("b", "", "filer B in the other SeaweedFS cluster")
syncOptions.aPath = cmdFilerSynchronize.Flag.String("a.path", "/", "directory to sync on filer A")
@@ -58,6 +60,8 @@ func init() {
syncOptions.bTtlSec = cmdFilerSynchronize.Flag.Int("b.ttlSec", 0, "ttl in seconds on filer B")
syncOptions.aDiskType = cmdFilerSynchronize.Flag.String("a.disk", "", "[hdd|ssd] choose between hard drive or solid state drive on filer A")
syncOptions.bDiskType = cmdFilerSynchronize.Flag.String("b.disk", "", "[hdd|ssd] choose between hard drive or solid state drive on filer B")
+ syncOptions.aProxyByFiler = cmdFilerSynchronize.Flag.Bool("a.filerProxy", false, "read and write file chunks by filer A instead of volume servers")
+ syncOptions.bProxyByFiler = cmdFilerSynchronize.Flag.Bool("b.filerProxy", false, "read and write file chunks by filer B instead of volume servers")
syncOptions.aDebug = cmdFilerSynchronize.Flag.Bool("a.debug", false, "debug mode to print out filer A received files")
syncOptions.bDebug = cmdFilerSynchronize.Flag.Bool("b.debug", false, "debug mode to print out filer B received files")
syncCpuProfile = cmdFilerSynchronize.Flag.String("cpuprofile", "", "cpu profile output file")
@@ -66,8 +70,8 @@ func init() {
var cmdFilerSynchronize = &Command{
UsageLine: "filer.sync -a=: -b=:",
- Short: "continuously synchronize between two active-active or active-passive SeaweedFS clusters",
- Long: `continuously synchronize file changes between two active-active or active-passive filers
+ Short: "resumeable continuous synchronization between two active-active or active-passive SeaweedFS clusters",
+ Long: `resumeable continuous synchronization for file changes between two active-active or active-passive filers
filer.sync listens on filer notifications. If any file is updated, it will fetch the updated content,
and write to the other destination. Different from filer.replicate:
@@ -90,8 +94,8 @@ func runFilerSynchronize(cmd *Command, args []string) bool {
go func() {
for {
- err := doSubscribeFilerMetaChanges(grpcDialOption, *syncOptions.filerA, *syncOptions.aPath, *syncOptions.filerB,
- *syncOptions.bPath, *syncOptions.bReplication, *syncOptions.bCollection, *syncOptions.bTtlSec, *syncOptions.bDiskType, *syncOptions.bDebug)
+ err := doSubscribeFilerMetaChanges(grpcDialOption, *syncOptions.filerA, *syncOptions.aPath, *syncOptions.aProxyByFiler, *syncOptions.filerB,
+ *syncOptions.bPath, *syncOptions.bReplication, *syncOptions.bCollection, *syncOptions.bTtlSec, *syncOptions.bProxyByFiler, *syncOptions.bDiskType, *syncOptions.bDebug)
if err != nil {
glog.Errorf("sync from %s to %s: %v", *syncOptions.filerA, *syncOptions.filerB, err)
time.Sleep(1747 * time.Millisecond)
@@ -102,8 +106,8 @@ func runFilerSynchronize(cmd *Command, args []string) bool {
if !*syncOptions.isActivePassive {
go func() {
for {
- err := doSubscribeFilerMetaChanges(grpcDialOption, *syncOptions.filerB, *syncOptions.bPath, *syncOptions.filerA,
- *syncOptions.aPath, *syncOptions.aReplication, *syncOptions.aCollection, *syncOptions.aTtlSec, *syncOptions.aDiskType, *syncOptions.aDebug)
+ err := doSubscribeFilerMetaChanges(grpcDialOption, *syncOptions.filerB, *syncOptions.bPath, *syncOptions.bProxyByFiler, *syncOptions.filerA,
+ *syncOptions.aPath, *syncOptions.aReplication, *syncOptions.aCollection, *syncOptions.aTtlSec, *syncOptions.aProxyByFiler, *syncOptions.aDiskType, *syncOptions.aDebug)
if err != nil {
glog.Errorf("sync from %s to %s: %v", *syncOptions.filerB, *syncOptions.filerA, err)
time.Sleep(2147 * time.Millisecond)
@@ -117,8 +121,8 @@ func runFilerSynchronize(cmd *Command, args []string) bool {
return true
}
-func doSubscribeFilerMetaChanges(grpcDialOption grpc.DialOption, sourceFiler, sourcePath, targetFiler, targetPath string,
- replicationStr, collection string, ttlSec int, diskType string, debug bool) error {
+func doSubscribeFilerMetaChanges(grpcDialOption grpc.DialOption, sourceFiler, sourcePath string, sourceReadChunkFromFiler bool, targetFiler, targetPath string,
+ replicationStr, collection string, ttlSec int, sinkWriteChunkByFiler bool, diskType string, debug bool) error {
// read source filer signature
sourceFilerSignature, sourceErr := replication.ReadFilerSignature(grpcDialOption, sourceFiler)
@@ -142,9 +146,9 @@ func doSubscribeFilerMetaChanges(grpcDialOption grpc.DialOption, sourceFiler, so
// create filer sink
filerSource := &source.FilerSource{}
- filerSource.DoInitialize(pb.ServerToGrpcAddress(sourceFiler), sourcePath)
+ filerSource.DoInitialize(sourceFiler, pb.ServerToGrpcAddress(sourceFiler), sourcePath, sourceReadChunkFromFiler)
filerSink := &filersink.FilerSink{}
- filerSink.DoInitialize(pb.ServerToGrpcAddress(targetFiler), targetPath, replicationStr, collection, ttlSec, diskType, grpcDialOption)
+ filerSink.DoInitialize(targetFiler, pb.ServerToGrpcAddress(targetFiler), targetPath, replicationStr, collection, ttlSec, diskType, grpcDialOption, sinkWriteChunkByFiler)
filerSink.SetSourceFiler(filerSource)
processEventFn := func(resp *filer_pb.SubscribeMetadataResponse) error {
diff --git a/weed/command/mount.go b/weed/command/mount.go
index e380ff57c..a99ee5027 100644
--- a/weed/command/mount.go
+++ b/weed/command/mount.go
@@ -6,25 +6,25 @@ import (
)
type MountOptions struct {
- filer *string
- filerMountRootPath *string
- dir *string
- dirAutoCreate *bool
- collection *string
- replication *string
- diskType *string
- ttlSec *int
- chunkSizeLimitMB *int
- concurrentWriters *int
- cacheDir *string
- cacheSizeMB *int64
- dataCenter *string
- allowOthers *bool
- umaskString *string
- nonempty *bool
- outsideContainerClusterMode *bool
- uidMap *string
- gidMap *string
+ filer *string
+ filerMountRootPath *string
+ dir *string
+ dirAutoCreate *bool
+ collection *string
+ replication *string
+ diskType *string
+ ttlSec *int
+ chunkSizeLimitMB *int
+ concurrentWriters *int
+ cacheDir *string
+ cacheSizeMB *int64
+ dataCenter *string
+ allowOthers *bool
+ umaskString *string
+ nonempty *bool
+ volumeServerAccess *string
+ uidMap *string
+ gidMap *string
}
var (
@@ -45,14 +45,14 @@ func init() {
mountOptions.diskType = cmdMount.Flag.String("disk", "", "[hdd|ssd] choose between hard drive or solid state drive")
mountOptions.ttlSec = cmdMount.Flag.Int("ttl", 0, "file ttl in seconds")
mountOptions.chunkSizeLimitMB = cmdMount.Flag.Int("chunkSizeLimitMB", 2, "local write buffer size, also chunk large files")
- mountOptions.concurrentWriters = cmdMount.Flag.Int("concurrentWriters", 0, "limit concurrent goroutine writers if not 0")
+ mountOptions.concurrentWriters = cmdMount.Flag.Int("concurrentWriters", 128, "limit concurrent goroutine writers if not 0")
mountOptions.cacheDir = cmdMount.Flag.String("cacheDir", os.TempDir(), "local cache directory for file chunks and meta data")
mountOptions.cacheSizeMB = cmdMount.Flag.Int64("cacheCapacityMB", 1000, "local file chunk cache capacity in MB (0 will disable cache)")
mountOptions.dataCenter = cmdMount.Flag.String("dataCenter", "", "prefer to write to the data center")
mountOptions.allowOthers = cmdMount.Flag.Bool("allowOthers", true, "allows other users to access the file system")
mountOptions.umaskString = cmdMount.Flag.String("umask", "022", "octal umask, e.g., 022, 0111")
mountOptions.nonempty = cmdMount.Flag.Bool("nonempty", false, "allows the mounting over a non-empty directory")
- mountOptions.outsideContainerClusterMode = cmdMount.Flag.Bool("outsideContainerClusterMode", false, "allows other users to access volume servers with publicUrl")
+ mountOptions.volumeServerAccess = cmdMount.Flag.String("volumeServerAccess", "direct", "access volume servers by [direct|publicUrl|filerProxy]")
mountOptions.uidMap = cmdMount.Flag.String("map.uid", "", "map local uid to uid on filer, comma-separated :")
mountOptions.gidMap = cmdMount.Flag.String("map.gid", "", "map local gid to gid on filer, comma-separated :")
diff --git a/weed/command/mount_std.go b/weed/command/mount_std.go
index 0798d18af..e2aa7e7d4 100644
--- a/weed/command/mount_std.go
+++ b/weed/command/mount_std.go
@@ -59,6 +59,7 @@ func RunMount(option *MountOptions, umask os.FileMode) bool {
return true
}
+ util.LoadConfiguration("security", false)
// try to connect to filer, filerBucketsPath may be useful later
grpcDialOption := security.LoadClientTLS(util.GetViper(), "grpc.client")
var cipher bool
@@ -79,8 +80,6 @@ func RunMount(option *MountOptions, umask os.FileMode) bool {
dir := util.ResolvePath(*option.dir)
chunkSizeLimitMB := *mountOptions.chunkSizeLimitMB
- util.LoadConfiguration("security", false)
-
fmt.Printf("This is SeaweedFS version %s %s %s\n", util.Version(), runtime.GOOS, runtime.GOARCH)
if dir == "" {
fmt.Printf("Please specify the mount directory via \"-dir\"")
@@ -102,9 +101,9 @@ func RunMount(option *MountOptions, umask os.FileMode) bool {
uid, gid := uint32(0), uint32(0)
mountMode := os.ModeDir | 0777
if err == nil {
- mountMode = os.ModeDir | fileInfo.Mode()
+ mountMode = os.ModeDir | os.FileMode(0777)&^umask
uid, gid = util.GetFileUidGid(fileInfo)
- fmt.Printf("mount point owner uid=%d gid=%d mode=%s\n", uid, gid, fileInfo.Mode())
+ fmt.Printf("mount point owner uid=%d gid=%d mode=%s\n", uid, gid, mountMode)
} else {
fmt.Printf("can not stat %s\n", dir)
return false
@@ -152,6 +151,8 @@ func RunMount(option *MountOptions, umask os.FileMode) bool {
fuse.MaxReadahead(1024 * 128),
fuse.AsyncRead(),
fuse.WritebackCache(),
+ fuse.MaxBackground(128),
+ fuse.CongestionThreshold(128),
}
options = append(options, osSpecificMountOptions()...)
@@ -175,28 +176,30 @@ func RunMount(option *MountOptions, umask os.FileMode) bool {
}
seaweedFileSystem := filesys.NewSeaweedFileSystem(&filesys.Option{
- FilerGrpcAddress: filerGrpcAddress,
- GrpcDialOption: grpcDialOption,
- FilerMountRootPath: mountRoot,
- Collection: *option.collection,
- Replication: *option.replication,
- TtlSec: int32(*option.ttlSec),
- DiskType: diskType,
- ChunkSizeLimit: int64(chunkSizeLimitMB) * 1024 * 1024,
- ConcurrentWriters: *option.concurrentWriters,
- CacheDir: *option.cacheDir,
- CacheSizeMB: *option.cacheSizeMB,
- DataCenter: *option.dataCenter,
- EntryCacheTtl: 3 * time.Second,
- MountUid: uid,
- MountGid: gid,
- MountMode: mountMode,
- MountCtime: fileInfo.ModTime(),
- MountMtime: time.Now(),
- Umask: umask,
- OutsideContainerClusterMode: *mountOptions.outsideContainerClusterMode,
- Cipher: cipher,
- UidGidMapper: uidGidMapper,
+ MountDirectory: dir,
+ FilerAddress: filer,
+ FilerGrpcAddress: filerGrpcAddress,
+ GrpcDialOption: grpcDialOption,
+ FilerMountRootPath: mountRoot,
+ Collection: *option.collection,
+ Replication: *option.replication,
+ TtlSec: int32(*option.ttlSec),
+ DiskType: diskType,
+ ChunkSizeLimit: int64(chunkSizeLimitMB) * 1024 * 1024,
+ ConcurrentWriters: *option.concurrentWriters,
+ CacheDir: *option.cacheDir,
+ CacheSizeMB: *option.cacheSizeMB,
+ DataCenter: *option.dataCenter,
+ EntryCacheTtl: 3 * time.Second,
+ MountUid: uid,
+ MountGid: gid,
+ MountMode: mountMode,
+ MountCtime: fileInfo.ModTime(),
+ MountMtime: time.Now(),
+ Umask: umask,
+ VolumeServerAccess: *mountOptions.volumeServerAccess,
+ Cipher: cipher,
+ UidGidMapper: uidGidMapper,
})
// mount
@@ -213,7 +216,9 @@ func RunMount(option *MountOptions, umask os.FileMode) bool {
})
glog.V(0).Infof("mounted %s%s to %s", filer, mountRoot, dir)
- err = fs.Serve(c, seaweedFileSystem)
+ server := fs.New(c, nil)
+ seaweedFileSystem.Server = server
+ err = server.Serve(seaweedFileSystem)
// check if the mount process has an error to report
<-c.Ready
diff --git a/weed/command/s3.go b/weed/command/s3.go
index ed5bb0b80..d8e3e306b 100644
--- a/weed/command/s3.go
+++ b/weed/command/s3.go
@@ -23,13 +23,14 @@ var (
)
type S3Options struct {
- filer *string
- port *int
- config *string
- domainName *string
- tlsPrivateKey *string
- tlsCertificate *string
- metricsHttpPort *int
+ filer *string
+ port *int
+ config *string
+ domainName *string
+ tlsPrivateKey *string
+ tlsCertificate *string
+ metricsHttpPort *int
+ allowEmptyFolder *bool
}
func init() {
@@ -41,6 +42,7 @@ func init() {
s3StandaloneOptions.tlsPrivateKey = cmdS3.Flag.String("key.file", "", "path to the TLS private key file")
s3StandaloneOptions.tlsCertificate = cmdS3.Flag.String("cert.file", "", "path to the TLS certificate file")
s3StandaloneOptions.metricsHttpPort = cmdS3.Flag.Int("metricsPort", 0, "Prometheus metrics listen port")
+ s3StandaloneOptions.allowEmptyFolder = cmdS3.Flag.Bool("allowEmptyFolder", false, "allow empty folders")
}
var cmdS3 = &Command{
@@ -181,6 +183,7 @@ func (s3opt *S3Options) startS3Server() bool {
DomainName: *s3opt.domainName,
BucketsPath: filerBucketsPath,
GrpcDialOption: grpcDialOption,
+ AllowEmptyFolder: *s3opt.allowEmptyFolder,
})
if s3ApiServer_err != nil {
glog.Fatalf("S3 API Server startup error: %v", s3ApiServer_err)
diff --git a/weed/command/scaffold.go b/weed/command/scaffold.go
index 04a988027..58143a0ad 100644
--- a/weed/command/scaffold.go
+++ b/weed/command/scaffold.go
@@ -44,6 +44,8 @@ func runScaffold(cmd *Command, args []string) bool {
content = SECURITY_TOML_EXAMPLE
case "master":
content = MASTER_TOML_EXAMPLE
+ case "shell":
+ content = SHELL_TOML_EXAMPLE
}
if content == "" {
println("need a valid -config option")
@@ -85,9 +87,21 @@ buckets_folder = "/buckets"
# local on disk, mostly for simple single-machine setup, fairly scalable
# faster than previous leveldb, recommended.
enabled = true
-dir = "." # directory to store level db files
+dir = "./filerldb2" # directory to store level db files
-[mysql] # or tidb
+[leveldb3]
+# similar to leveldb2.
+# each bucket has its own meta store.
+enabled = false
+dir = "./filerldb3" # directory to store level db files
+
+[rocksdb]
+# local on disk, similar to leveldb
+# since it is using a C wrapper, you need to install rocksdb and build it by yourself
+enabled = false
+dir = "./filerrdb" # directory to store rocksdb files
+
+[mysql] # or memsql, tidb
# CREATE TABLE IF NOT EXISTS filemeta (
# dirhash BIGINT COMMENT 'first 64 bits of MD5 hash value of directory field',
# name VARCHAR(1000) COMMENT 'directory or file name',
@@ -104,9 +118,31 @@ password = ""
database = "" # create or use an existing database
connection_max_idle = 2
connection_max_open = 100
+connection_max_lifetime_seconds = 0
interpolateParams = false
-[postgres] # or cockroachdb
+[mysql2] # or memsql, tidb
+enabled = false
+createTable = """
+ CREATE TABLE IF NOT EXISTS %s (
+ dirhash BIGINT,
+ name VARCHAR(1000),
+ directory TEXT,
+ meta LONGBLOB,
+ PRIMARY KEY (dirhash, name)
+ ) DEFAULT CHARSET=utf8;
+"""
+hostname = "localhost"
+port = 3306
+username = "root"
+password = ""
+database = "" # create or use an existing database
+connection_max_idle = 2
+connection_max_open = 100
+connection_max_lifetime_seconds = 0
+interpolateParams = false
+
+[postgres] # or cockroachdb, YugabyteDB
# CREATE TABLE IF NOT EXISTS filemeta (
# dirhash BIGINT,
# name VARCHAR(65535),
@@ -119,7 +155,29 @@ hostname = "localhost"
port = 5432
username = "postgres"
password = ""
-database = "" # create or use an existing database
+database = "postgres" # create or use an existing database
+schema = ""
+sslmode = "disable"
+connection_max_idle = 100
+connection_max_open = 100
+
+[postgres2]
+enabled = false
+createTable = """
+ CREATE TABLE IF NOT EXISTS %s (
+ dirhash BIGINT,
+ name VARCHAR(65535),
+ directory VARCHAR(65535),
+ meta bytea,
+ PRIMARY KEY (dirhash, name)
+ );
+"""
+hostname = "localhost"
+port = 5432
+username = "postgres"
+password = ""
+database = "postgres" # create or use an existing database
+schema = ""
sslmode = "disable"
connection_max_idle = 100
connection_max_open = 100
@@ -141,6 +199,11 @@ password=""
# This changes the data layout. Only add new directories. Removing/Updating will cause data loss.
superLargeDirectories = []
+[hbase]
+enabled = false
+zkquorum = ""
+table = "seaweedfs"
+
[redis2]
enabled = false
address = "localhost:6379"
@@ -161,9 +224,9 @@ addresses = [
]
password = ""
# allows reads from slave servers or the master, but all writes still go to the master
-readOnly = true
+readOnly = false
# automatically use the closest Redis server for reads
-routeByLatency = true
+routeByLatency = false
# This changes the data layout. Only add new directories. Removing/Updating will cause data loss.
superLargeDirectories = []
@@ -266,7 +329,8 @@ enabled = false
# This URL will Dial the RabbitMQ server at the URL in the environment
# variable RABBIT_SERVER_URL and open the exchange "myexchange".
# The exchange must have already been created by some other means, like
-# the RabbitMQ management plugin.
+# the RabbitMQ management plugin. Сreate myexchange of type fanout and myqueue then
+# create binding myexchange => myqueue
topic_url = "rabbit://myexchange"
sub_url = "rabbit://myqueue"
`
@@ -287,6 +351,16 @@ grpcAddress = "localhost:18888"
# i.e., all files with this "prefix" are sent to notification message queue.
directory = "/buckets"
+[sink.local]
+enabled = false
+directory = "/data"
+
+[sink.local_incremental]
+# all replicated files are under modified time as yyyy-mm-dd directories
+# so each date directory contains all new and updated files.
+enabled = false
+directory = "/backup"
+
[sink.filer]
enabled = false
grpcAddress = "localhost:18888"
@@ -454,5 +528,19 @@ copy_other = 1 # create n x 1 = n actual volumes
# if you are doing your own replication or periodic sync of volumes.
treat_replication_as_minimums = false
+`
+ SHELL_TOML_EXAMPLE = `
+
+[cluster]
+default = "c1"
+
+[cluster.c1]
+master = "localhost:9333" # comma-separated master servers
+filer = "localhost:8888" # filer host and port
+
+[cluster.c2]
+master = ""
+filer = ""
+
`
)
diff --git a/weed/command/server.go b/weed/command/server.go
index 2d880c599..3f1ca321e 100644
--- a/weed/command/server.go
+++ b/weed/command/server.go
@@ -24,6 +24,7 @@ var (
masterOptions MasterOptions
filerOptions FilerOptions
s3Options S3Options
+ webdavOptions WebDavOption
msgBrokerOptions MessageBrokerOptions
)
@@ -61,9 +62,11 @@ var (
serverMetricsHttpPort = cmdServer.Flag.Int("metricsPort", 0, "Prometheus metrics listen port")
// pulseSeconds = cmdServer.Flag.Int("pulseSeconds", 5, "number of seconds between heartbeats")
+ isStartingMasterServer = cmdServer.Flag.Bool("master", true, "whether to start master server")
isStartingVolumeServer = cmdServer.Flag.Bool("volume", true, "whether to start volume server")
isStartingFiler = cmdServer.Flag.Bool("filer", false, "whether to start filer")
isStartingS3 = cmdServer.Flag.Bool("s3", false, "whether to start S3 gateway")
+ isStartingWebDav = cmdServer.Flag.Bool("webdav", false, "whether to start WebDAV gateway")
isStartingMsgBroker = cmdServer.Flag.Bool("msgBroker", false, "whether to start message broker")
serverWhiteList []string
@@ -94,7 +97,7 @@ func init() {
filerOptions.dirListingLimit = cmdServer.Flag.Int("filer.dirListLimit", 1000, "limit sub dir listing size")
filerOptions.cipher = cmdServer.Flag.Bool("filer.encryptVolumeData", false, "encrypt data on volume servers")
filerOptions.peers = cmdServer.Flag.String("filer.peers", "", "all filers sharing the same filer store in comma separated ip:port list")
- filerOptions.cacheToFilerLimit = cmdServer.Flag.Int("filer.cacheToFilerLimit", 0, "Small files smaller than this limit can be cached in filer store.")
+ filerOptions.saveToFilerLimit = cmdServer.Flag.Int("filer.saveToFilerLimit", 0, "Small files smaller than this limit can be cached in filer store.")
serverOptions.v.port = cmdServer.Flag.Int("volume.port", 8080, "volume server http listen port")
serverOptions.v.publicPort = cmdServer.Flag.Int("volume.port.public", 0, "volume server public port")
@@ -114,6 +117,14 @@ func init() {
s3Options.tlsPrivateKey = cmdServer.Flag.String("s3.key.file", "", "path to the TLS private key file")
s3Options.tlsCertificate = cmdServer.Flag.String("s3.cert.file", "", "path to the TLS certificate file")
s3Options.config = cmdServer.Flag.String("s3.config", "", "path to the config file")
+ s3Options.allowEmptyFolder = cmdServer.Flag.Bool("s3.allowEmptyFolder", false, "allow empty folders")
+
+ webdavOptions.port = cmdServer.Flag.Int("webdav.port", 7333, "webdav server http listen port")
+ webdavOptions.collection = cmdServer.Flag.String("webdav.collection", "", "collection to create the files")
+ webdavOptions.tlsPrivateKey = cmdServer.Flag.String("webdav.key.file", "", "path to the TLS private key file")
+ webdavOptions.tlsCertificate = cmdServer.Flag.String("webdav.cert.file", "", "path to the TLS certificate file")
+ webdavOptions.cacheDir = cmdServer.Flag.String("webdav.cacheDir", os.TempDir(), "local cache directory for file chunks")
+ webdavOptions.cacheSizeMB = cmdServer.Flag.Int64("webdav.cacheCapacityMB", 1000, "local cache capacity in MB")
msgBrokerOptions.port = cmdServer.Flag.Int("msgBroker.port", 17777, "broker gRPC listen port")
@@ -136,6 +147,9 @@ func runServer(cmd *Command, args []string) bool {
if *isStartingS3 {
*isStartingFiler = true
}
+ if *isStartingWebDav {
+ *isStartingFiler = true
+ }
if *isStartingMsgBroker {
*isStartingFiler = true
}
@@ -170,6 +184,7 @@ func runServer(cmd *Command, args []string) bool {
filerAddress := fmt.Sprintf("%s:%d", *serverIp, *filerOptions.port)
s3Options.filer = &filerAddress
+ webdavOptions.filer = &filerAddress
msgBrokerOptions.filer = &filerAddress
runtime.GOMAXPROCS(runtime.NumCPU())
@@ -211,6 +226,15 @@ func runServer(cmd *Command, args []string) bool {
}()
}
+ if *isStartingWebDav {
+ go func() {
+ time.Sleep(2 * time.Second)
+
+ webdavOptions.startWebDav()
+
+ }()
+ }
+
if *isStartingMsgBroker {
go func() {
time.Sleep(2 * time.Second)
@@ -224,7 +248,11 @@ func runServer(cmd *Command, args []string) bool {
}
- startMaster(masterOptions, serverWhiteList)
+ if *isStartingMasterServer {
+ go startMaster(masterOptions, serverWhiteList)
+ }
+
+ select {}
return true
}
diff --git a/weed/command/shell.go b/weed/command/shell.go
index 6dd768f47..c9976e809 100644
--- a/weed/command/shell.go
+++ b/weed/command/shell.go
@@ -11,12 +11,14 @@ import (
var (
shellOptions shell.ShellOptions
shellInitialFiler *string
+ shellCluster *string
)
func init() {
cmdShell.Run = runShell // break init cycle
- shellOptions.Masters = cmdShell.Flag.String("master", "localhost:9333", "comma-separated master servers")
- shellInitialFiler = cmdShell.Flag.String("filer", "localhost:8888", "filer host and port")
+ shellOptions.Masters = cmdShell.Flag.String("master", "", "comma-separated master servers, e.g. localhost:9333")
+ shellInitialFiler = cmdShell.Flag.String("filer", "", "filer host and port, e.g. localhost:8888")
+ shellCluster = cmdShell.Flag.String("cluster", "", "cluster defined in shell.toml")
}
var cmdShell = &Command{
@@ -24,6 +26,8 @@ var cmdShell = &Command{
Short: "run interactive administrative commands",
Long: `run interactive administrative commands.
+ Generate shell.toml via "weed scaffold -config=shell"
+
`,
}
@@ -32,6 +36,23 @@ func runShell(command *Command, args []string) bool {
util.LoadConfiguration("security", false)
shellOptions.GrpcDialOption = security.LoadClientTLS(util.GetViper(), "grpc.client")
+ if *shellOptions.Masters == "" && *shellInitialFiler == "" {
+ util.LoadConfiguration("shell", false)
+ v := util.GetViper()
+ cluster := v.GetString("cluster.default")
+ if *shellCluster != "" {
+ cluster = *shellCluster
+ }
+ if cluster == "" {
+ *shellOptions.Masters, *shellInitialFiler = "localhost:9333", "localhost:8888"
+ } else {
+ *shellOptions.Masters = v.GetString("cluster." + cluster + ".master")
+ *shellInitialFiler = v.GetString("cluster." + cluster + ".filer")
+ }
+ }
+
+ fmt.Printf("master: %s filer: %s\n", *shellOptions.Masters, *shellInitialFiler)
+
var err error
shellOptions.FilerHost, shellOptions.FilerPort, err = util.ParseHostPort(*shellInitialFiler)
if err != nil {
diff --git a/weed/command/upload.go b/weed/command/upload.go
index 592f71610..002c3032f 100644
--- a/weed/command/upload.go
+++ b/weed/command/upload.go
@@ -1,8 +1,12 @@
package command
import (
+ "context"
"encoding/json"
"fmt"
+ "github.com/chrislusf/seaweedfs/weed/pb"
+ "github.com/chrislusf/seaweedfs/weed/pb/master_pb"
+ "google.golang.org/grpc"
"os"
"path/filepath"
@@ -67,6 +71,15 @@ func runUpload(cmd *Command, args []string) bool {
util.LoadConfiguration("security", false)
grpcDialOption := security.LoadClientTLS(util.GetViper(), "grpc.client")
+ defaultCollection, err := readMasterConfiguration(grpcDialOption, *upload.master)
+ if err != nil {
+ fmt.Printf("upload: %v", err)
+ return false
+ }
+ if *upload.replication == "" {
+ *upload.replication = defaultCollection
+ }
+
if len(args) == 0 {
if *upload.dir == "" {
return false
@@ -106,3 +119,15 @@ func runUpload(cmd *Command, args []string) bool {
}
return true
}
+
+func readMasterConfiguration(grpcDialOption grpc.DialOption, masterAddress string) (replication string, err error) {
+ err = pb.WithMasterClient(masterAddress, grpcDialOption, func(client master_pb.SeaweedClient) error {
+ resp, err := client.GetMasterConfiguration(context.Background(), &master_pb.GetMasterConfigurationRequest{})
+ if err != nil {
+ return fmt.Errorf("get master %s configuration: %v", masterAddress, err)
+ }
+ replication = resp.DefaultReplication
+ return nil
+ })
+ return
+}
diff --git a/weed/command/watch.go b/weed/command/watch.go
deleted file mode 100644
index fd7dd6fb2..000000000
--- a/weed/command/watch.go
+++ /dev/null
@@ -1,113 +0,0 @@
-package command
-
-import (
- "context"
- "fmt"
- "io"
- "path/filepath"
- "strings"
- "time"
-
- "github.com/chrislusf/seaweedfs/weed/pb"
- "github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
- "github.com/chrislusf/seaweedfs/weed/security"
- "github.com/chrislusf/seaweedfs/weed/util"
-)
-
-func init() {
- cmdWatch.Run = runWatch // break init cycle
-}
-
-var cmdWatch = &Command{
- UsageLine: "watch [-filer=localhost:8888] [-target=/]",
- Short: "see recent changes on a filer",
- Long: `See recent changes on a filer.
-
- `,
-}
-
-var (
- watchFiler = cmdWatch.Flag.String("filer", "localhost:8888", "filer hostname:port")
- watchTarget = cmdWatch.Flag.String("pathPrefix", "/", "path to a folder or file, or common prefix for the folders or files on filer")
- watchStart = cmdWatch.Flag.Duration("timeAgo", 0, "start time before now. \"300ms\", \"1.5h\" or \"2h45m\". Valid time units are \"ns\", \"us\" (or \"µs\"), \"ms\", \"s\", \"m\", \"h\"")
- watchPattern = cmdWatch.Flag.String("pattern", "", "full path or just filename pattern, ex: \"/home/?opher\", \"*.pdf\", see https://golang.org/pkg/path/filepath/#Match ")
-)
-
-func runWatch(cmd *Command, args []string) bool {
-
- grpcDialOption := security.LoadClientTLS(util.GetViper(), "grpc.client")
-
- var filterFunc func(dir, fname string) bool
- if *watchPattern != "" {
- if strings.Contains(*watchPattern, "/") {
- println("watch path pattern", *watchPattern)
- filterFunc = func(dir, fname string) bool {
- matched, err := filepath.Match(*watchPattern, dir+"/"+fname)
- if err != nil {
- fmt.Printf("error: %v", err)
- }
- return matched
- }
- } else {
- println("watch file pattern", *watchPattern)
- filterFunc = func(dir, fname string) bool {
- matched, err := filepath.Match(*watchPattern, fname)
- if err != nil {
- fmt.Printf("error: %v", err)
- }
- return matched
- }
- }
- }
-
- shouldPrint := func(resp *filer_pb.SubscribeMetadataResponse) bool {
- if filterFunc == nil {
- return true
- }
- if resp.EventNotification.OldEntry == nil && resp.EventNotification.NewEntry == nil {
- return false
- }
- if resp.EventNotification.OldEntry != nil && filterFunc(resp.Directory, resp.EventNotification.OldEntry.Name) {
- return true
- }
- if resp.EventNotification.NewEntry != nil && filterFunc(resp.EventNotification.NewParentPath, resp.EventNotification.NewEntry.Name) {
- return true
- }
- return false
- }
-
- watchErr := pb.WithFilerClient(*watchFiler, grpcDialOption, func(client filer_pb.SeaweedFilerClient) error {
-
- ctx, cancel := context.WithCancel(context.Background())
- defer cancel()
-
- stream, err := client.SubscribeMetadata(ctx, &filer_pb.SubscribeMetadataRequest{
- ClientName: "watch",
- PathPrefix: *watchTarget,
- SinceNs: time.Now().Add(-*watchStart).UnixNano(),
- })
- if err != nil {
- return fmt.Errorf("listen: %v", err)
- }
-
- for {
- resp, listenErr := stream.Recv()
- if listenErr == io.EOF {
- return nil
- }
- if listenErr != nil {
- return listenErr
- }
- if !shouldPrint(resp) {
- continue
- }
- fmt.Printf("dir:%s %+v\n", resp.Directory, resp.EventNotification)
- }
-
- })
- if watchErr != nil {
- fmt.Printf("watch %s: %v\n", *watchFiler, watchErr)
- }
-
- return true
-}
diff --git a/weed/filer/abstract_sql/abstract_sql_store.go b/weed/filer/abstract_sql/abstract_sql_store.go
index da104358b..91b0bc98f 100644
--- a/weed/filer/abstract_sql/abstract_sql_store.go
+++ b/weed/filer/abstract_sql/abstract_sql_store.go
@@ -9,19 +9,33 @@ import (
"github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
"github.com/chrislusf/seaweedfs/weed/util"
"strings"
+ "sync"
)
+type SqlGenerator interface {
+ GetSqlInsert(bucket string) string
+ GetSqlUpdate(bucket string) string
+ GetSqlFind(bucket string) string
+ GetSqlDelete(bucket string) string
+ GetSqlDeleteFolderChildren(bucket string) string
+ GetSqlListExclusive(bucket string) string
+ GetSqlListInclusive(bucket string) string
+ GetSqlCreateTable(bucket string) string
+ GetSqlDropTable(bucket string) string
+}
+
type AbstractSqlStore struct {
- DB *sql.DB
- SqlInsert string
- SqlUpdate string
- SqlFind string
- SqlDelete string
- SqlDeleteFolderChildren string
- SqlListExclusive string
- SqlListInclusive string
+ SqlGenerator
+ DB *sql.DB
+ SupportBucketTable bool
+ dbs map[string]bool
+ dbsLock sync.Mutex
}
+const (
+ DEFAULT_TABLE = "filemeta"
+)
+
type TxOrDB interface {
ExecContext(ctx context.Context, query string, args ...interface{}) (sql.Result, error)
QueryRowContext(ctx context.Context, query string, args ...interface{}) *sql.Row
@@ -52,16 +66,65 @@ func (store *AbstractSqlStore) RollbackTransaction(ctx context.Context) error {
return nil
}
-func (store *AbstractSqlStore) getTxOrDB(ctx context.Context) TxOrDB {
+func (store *AbstractSqlStore) getTxOrDB(ctx context.Context, fullpath util.FullPath, isForChildren bool) (txOrDB TxOrDB, bucket string, shortPath util.FullPath, err error) {
+
+ shortPath = fullpath
+ bucket = DEFAULT_TABLE
+
if tx, ok := ctx.Value("tx").(*sql.Tx); ok {
- return tx
+ txOrDB = tx
+ } else {
+ txOrDB = store.DB
+ }
+
+ if !store.SupportBucketTable {
+ return
+ }
+
+ if !strings.HasPrefix(string(fullpath), "/buckets/") {
+ return
+ }
+
+ // detect bucket
+ bucketAndObjectKey := string(fullpath)[len("/buckets/"):]
+ t := strings.Index(bucketAndObjectKey, "/")
+ if t < 0 && !isForChildren {
+ return
+ }
+ bucket = bucketAndObjectKey
+ shortPath = "/"
+ if t > 0 {
+ bucket = bucketAndObjectKey[:t]
+ shortPath = util.FullPath(bucketAndObjectKey[t:])
+ }
+
+ if isValidBucket(bucket) {
+ store.dbsLock.Lock()
+ defer store.dbsLock.Unlock()
+
+ if store.dbs == nil {
+ store.dbs = make(map[string]bool)
+ }
+
+ if _, found := store.dbs[bucket]; !found {
+ if err = store.CreateTable(ctx, bucket); err != nil {
+ store.dbs[bucket] = true
+ }
+ }
+
}
- return store.DB
+
+ return
}
func (store *AbstractSqlStore) InsertEntry(ctx context.Context, entry *filer.Entry) (err error) {
- dir, name := entry.FullPath.DirAndName()
+ db, bucket, shortPath, err := store.getTxOrDB(ctx, entry.FullPath, false)
+ if err != nil {
+ return fmt.Errorf("findDB %s : %v", entry.FullPath, err)
+ }
+
+ dir, name := shortPath.DirAndName()
meta, err := entry.EncodeAttributesAndChunks()
if err != nil {
return fmt.Errorf("encode %s: %s", entry.FullPath, err)
@@ -71,7 +134,7 @@ func (store *AbstractSqlStore) InsertEntry(ctx context.Context, entry *filer.Ent
meta = util.MaybeGzipData(meta)
}
- res, err := store.getTxOrDB(ctx).ExecContext(ctx, store.SqlInsert, util.HashStringToLong(dir), name, dir, meta)
+ res, err := db.ExecContext(ctx, store.GetSqlInsert(bucket), util.HashStringToLong(dir), name, dir, meta)
if err == nil {
return
}
@@ -84,7 +147,7 @@ func (store *AbstractSqlStore) InsertEntry(ctx context.Context, entry *filer.Ent
// now the insert failed possibly due to duplication constraints
glog.V(1).Infof("insert %s falls back to update: %v", entry.FullPath, err)
- res, err = store.getTxOrDB(ctx).ExecContext(ctx, store.SqlUpdate, meta, util.HashStringToLong(dir), name, dir)
+ res, err = db.ExecContext(ctx, store.GetSqlUpdate(bucket), meta, util.HashStringToLong(dir), name, dir)
if err != nil {
return fmt.Errorf("upsert %s: %s", entry.FullPath, err)
}
@@ -99,13 +162,18 @@ func (store *AbstractSqlStore) InsertEntry(ctx context.Context, entry *filer.Ent
func (store *AbstractSqlStore) UpdateEntry(ctx context.Context, entry *filer.Entry) (err error) {
- dir, name := entry.FullPath.DirAndName()
+ db, bucket, shortPath, err := store.getTxOrDB(ctx, entry.FullPath, false)
+ if err != nil {
+ return fmt.Errorf("findDB %s : %v", entry.FullPath, err)
+ }
+
+ dir, name := shortPath.DirAndName()
meta, err := entry.EncodeAttributesAndChunks()
if err != nil {
return fmt.Errorf("encode %s: %s", entry.FullPath, err)
}
- res, err := store.getTxOrDB(ctx).ExecContext(ctx, store.SqlUpdate, meta, util.HashStringToLong(dir), name, dir)
+ res, err := db.ExecContext(ctx, store.GetSqlUpdate(bucket), meta, util.HashStringToLong(dir), name, dir)
if err != nil {
return fmt.Errorf("update %s: %s", entry.FullPath, err)
}
@@ -119,8 +187,13 @@ func (store *AbstractSqlStore) UpdateEntry(ctx context.Context, entry *filer.Ent
func (store *AbstractSqlStore) FindEntry(ctx context.Context, fullpath util.FullPath) (*filer.Entry, error) {
- dir, name := fullpath.DirAndName()
- row := store.getTxOrDB(ctx).QueryRowContext(ctx, store.SqlFind, util.HashStringToLong(dir), name, dir)
+ db, bucket, shortPath, err := store.getTxOrDB(ctx, fullpath, false)
+ if err != nil {
+ return nil, fmt.Errorf("findDB %s : %v", fullpath, err)
+ }
+
+ dir, name := shortPath.DirAndName()
+ row := db.QueryRowContext(ctx, store.GetSqlFind(bucket), util.HashStringToLong(dir), name, dir)
var data []byte
if err := row.Scan(&data); err != nil {
@@ -142,9 +215,14 @@ func (store *AbstractSqlStore) FindEntry(ctx context.Context, fullpath util.Full
func (store *AbstractSqlStore) DeleteEntry(ctx context.Context, fullpath util.FullPath) error {
- dir, name := fullpath.DirAndName()
+ db, bucket, shortPath, err := store.getTxOrDB(ctx, fullpath, false)
+ if err != nil {
+ return fmt.Errorf("findDB %s : %v", fullpath, err)
+ }
+
+ dir, name := shortPath.DirAndName()
- res, err := store.getTxOrDB(ctx).ExecContext(ctx, store.SqlDelete, util.HashStringToLong(dir), name, dir)
+ res, err := db.ExecContext(ctx, store.GetSqlDelete(bucket), util.HashStringToLong(dir), name, dir)
if err != nil {
return fmt.Errorf("delete %s: %s", fullpath, err)
}
@@ -159,7 +237,23 @@ func (store *AbstractSqlStore) DeleteEntry(ctx context.Context, fullpath util.Fu
func (store *AbstractSqlStore) DeleteFolderChildren(ctx context.Context, fullpath util.FullPath) error {
- res, err := store.getTxOrDB(ctx).ExecContext(ctx, store.SqlDeleteFolderChildren, util.HashStringToLong(string(fullpath)), fullpath)
+ db, bucket, shortPath, err := store.getTxOrDB(ctx, fullpath, true)
+ if err != nil {
+ return fmt.Errorf("findDB %s : %v", fullpath, err)
+ }
+
+ if isValidBucket(bucket) && shortPath == "/" {
+ if err = store.deleteTable(ctx, bucket); err == nil {
+ store.dbsLock.Lock()
+ delete(store.dbs, bucket)
+ store.dbsLock.Unlock()
+ return nil
+ } else {
+ return err
+ }
+ }
+
+ res, err := db.ExecContext(ctx, store.GetSqlDeleteFolderChildren(bucket), util.HashStringToLong(string(shortPath)), fullpath)
if err != nil {
return fmt.Errorf("deleteFolderChildren %s: %s", fullpath, err)
}
@@ -172,15 +266,21 @@ func (store *AbstractSqlStore) DeleteFolderChildren(ctx context.Context, fullpat
return nil
}
-func (store *AbstractSqlStore) ListDirectoryPrefixedEntries(ctx context.Context, fullpath util.FullPath, startFileName string, inclusive bool, limit int, prefix string) (entries []*filer.Entry, err error) {
- sqlText := store.SqlListExclusive
- if inclusive {
- sqlText = store.SqlListInclusive
+func (store *AbstractSqlStore) ListDirectoryPrefixedEntries(ctx context.Context, dirPath util.FullPath, startFileName string, includeStartFile bool, limit int64, prefix string, eachEntryFunc filer.ListEachEntryFunc) (lastFileName string, err error) {
+
+ db, bucket, shortPath, err := store.getTxOrDB(ctx, dirPath, true)
+ if err != nil {
+ return lastFileName, fmt.Errorf("findDB %s : %v", dirPath, err)
+ }
+
+ sqlText := store.GetSqlListExclusive(bucket)
+ if includeStartFile {
+ sqlText = store.GetSqlListInclusive(bucket)
}
- rows, err := store.getTxOrDB(ctx).QueryContext(ctx, sqlText, util.HashStringToLong(string(fullpath)), startFileName, string(fullpath), prefix+"%", limit)
+ rows, err := db.QueryContext(ctx, sqlText, util.HashStringToLong(string(shortPath)), startFileName, string(shortPath), prefix+"%", limit+1)
if err != nil {
- return nil, fmt.Errorf("list %s : %v", fullpath, err)
+ return lastFileName, fmt.Errorf("list %s : %v", dirPath, err)
}
defer rows.Close()
@@ -188,28 +288,52 @@ func (store *AbstractSqlStore) ListDirectoryPrefixedEntries(ctx context.Context,
var name string
var data []byte
if err = rows.Scan(&name, &data); err != nil {
- glog.V(0).Infof("scan %s : %v", fullpath, err)
- return nil, fmt.Errorf("scan %s: %v", fullpath, err)
+ glog.V(0).Infof("scan %s : %v", dirPath, err)
+ return lastFileName, fmt.Errorf("scan %s: %v", dirPath, err)
}
+ lastFileName = name
entry := &filer.Entry{
- FullPath: util.NewFullPath(string(fullpath), name),
+ FullPath: util.NewFullPath(string(dirPath), name),
}
if err = entry.DecodeAttributesAndChunks(util.MaybeDecompressData(data)); err != nil {
glog.V(0).Infof("scan decode %s : %v", entry.FullPath, err)
- return nil, fmt.Errorf("scan decode %s : %v", entry.FullPath, err)
+ return lastFileName, fmt.Errorf("scan decode %s : %v", entry.FullPath, err)
+ }
+
+ if !eachEntryFunc(entry) {
+ break
}
- entries = append(entries, entry)
}
- return entries, nil
+ return lastFileName, nil
}
-func (store *AbstractSqlStore) ListDirectoryEntries(ctx context.Context, fullpath util.FullPath, startFileName string, inclusive bool, limit int) (entries []*filer.Entry, err error) {
- return store.ListDirectoryPrefixedEntries(ctx, fullpath, startFileName, inclusive, limit, "")
+func (store *AbstractSqlStore) ListDirectoryEntries(ctx context.Context, dirPath util.FullPath, startFileName string, includeStartFile bool, limit int64, eachEntryFunc filer.ListEachEntryFunc) (lastFileName string, err error) {
+ return store.ListDirectoryPrefixedEntries(ctx, dirPath, startFileName, includeStartFile, limit, "", nil)
}
func (store *AbstractSqlStore) Shutdown() {
store.DB.Close()
}
+
+func isValidBucket(bucket string) bool {
+ return bucket != DEFAULT_TABLE && bucket != ""
+}
+
+func (store *AbstractSqlStore) CreateTable(ctx context.Context, bucket string) error {
+ if !store.SupportBucketTable {
+ return nil
+ }
+ _, err := store.DB.ExecContext(ctx, store.SqlGenerator.GetSqlCreateTable(bucket))
+ return err
+}
+
+func (store *AbstractSqlStore) deleteTable(ctx context.Context, bucket string) error {
+ if !store.SupportBucketTable {
+ return nil
+ }
+ _, err := store.DB.ExecContext(ctx, store.SqlGenerator.GetSqlDropTable(bucket))
+ return err
+}
diff --git a/weed/filer/abstract_sql/abstract_sql_store_kv.go b/weed/filer/abstract_sql/abstract_sql_store_kv.go
index 792a45ff4..03b016c76 100644
--- a/weed/filer/abstract_sql/abstract_sql_store_kv.go
+++ b/weed/filer/abstract_sql/abstract_sql_store_kv.go
@@ -13,9 +13,14 @@ import (
func (store *AbstractSqlStore) KvPut(ctx context.Context, key []byte, value []byte) (err error) {
+ db, _, _, err := store.getTxOrDB(ctx, "", false)
+ if err != nil {
+ return fmt.Errorf("findDB: %v", err)
+ }
+
dirStr, dirHash, name := genDirAndName(key)
- res, err := store.getTxOrDB(ctx).ExecContext(ctx, store.SqlInsert, dirHash, name, dirStr, value)
+ res, err := db.ExecContext(ctx, store.GetSqlInsert(DEFAULT_TABLE), dirHash, name, dirStr, value)
if err == nil {
return
}
@@ -28,7 +33,7 @@ func (store *AbstractSqlStore) KvPut(ctx context.Context, key []byte, value []by
// now the insert failed possibly due to duplication constraints
glog.V(1).Infof("kv insert falls back to update: %s", err)
- res, err = store.getTxOrDB(ctx).ExecContext(ctx, store.SqlUpdate, value, dirHash, name, dirStr)
+ res, err = db.ExecContext(ctx, store.GetSqlUpdate(DEFAULT_TABLE), value, dirHash, name, dirStr)
if err != nil {
return fmt.Errorf("kv upsert: %s", err)
}
@@ -43,8 +48,13 @@ func (store *AbstractSqlStore) KvPut(ctx context.Context, key []byte, value []by
func (store *AbstractSqlStore) KvGet(ctx context.Context, key []byte) (value []byte, err error) {
+ db, _, _, err := store.getTxOrDB(ctx, "", false)
+ if err != nil {
+ return nil, fmt.Errorf("findDB: %v", err)
+ }
+
dirStr, dirHash, name := genDirAndName(key)
- row := store.getTxOrDB(ctx).QueryRowContext(ctx, store.SqlFind, dirHash, name, dirStr)
+ row := db.QueryRowContext(ctx, store.GetSqlFind(DEFAULT_TABLE), dirHash, name, dirStr)
err = row.Scan(&value)
@@ -61,9 +71,14 @@ func (store *AbstractSqlStore) KvGet(ctx context.Context, key []byte) (value []b
func (store *AbstractSqlStore) KvDelete(ctx context.Context, key []byte) (err error) {
+ db, _, _, err := store.getTxOrDB(ctx, "", false)
+ if err != nil {
+ return fmt.Errorf("findDB: %v", err)
+ }
+
dirStr, dirHash, name := genDirAndName(key)
- res, err := store.getTxOrDB(ctx).ExecContext(ctx, store.SqlDelete, dirHash, name, dirStr)
+ res, err := db.ExecContext(ctx, store.GetSqlDelete(DEFAULT_TABLE), dirHash, name, dirStr)
if err != nil {
return fmt.Errorf("kv delete: %s", err)
}
diff --git a/weed/filer/cassandra/cassandra_store.go b/weed/filer/cassandra/cassandra_store.go
index 49f5625d9..fd2ce91a6 100644
--- a/weed/filer/cassandra/cassandra_store.go
+++ b/weed/filer/cassandra/cassandra_store.go
@@ -168,41 +168,43 @@ func (store *CassandraStore) DeleteFolderChildren(ctx context.Context, fullpath
return nil
}
-func (store *CassandraStore) ListDirectoryPrefixedEntries(ctx context.Context, fullpath util.FullPath, startFileName string, inclusive bool, limit int, prefix string) (entries []*filer.Entry, err error) {
- return nil, filer.ErrUnsupportedListDirectoryPrefixed
+func (store *CassandraStore) ListDirectoryPrefixedEntries(ctx context.Context, dirPath util.FullPath, startFileName string, includeStartFile bool, limit int64, prefix string, eachEntryFunc filer.ListEachEntryFunc) (lastFileName string, err error) {
+ return lastFileName, filer.ErrUnsupportedListDirectoryPrefixed
}
-func (store *CassandraStore) ListDirectoryEntries(ctx context.Context, fullpath util.FullPath, startFileName string, inclusive bool,
- limit int) (entries []*filer.Entry, err error) {
+func (store *CassandraStore) ListDirectoryEntries(ctx context.Context, dirPath util.FullPath, startFileName string, includeStartFile bool, limit int64, eachEntryFunc filer.ListEachEntryFunc) (lastFileName string, err error) {
- if _, ok := store.isSuperLargeDirectory(string(fullpath)); ok {
+ if _, ok := store.isSuperLargeDirectory(string(dirPath)); ok {
return // nil, filer.ErrUnsupportedSuperLargeDirectoryListing
}
cqlStr := "SELECT NAME, meta FROM filemeta WHERE directory=? AND name>? ORDER BY NAME ASC LIMIT ?"
- if inclusive {
+ if includeStartFile {
cqlStr = "SELECT NAME, meta FROM filemeta WHERE directory=? AND name>=? ORDER BY NAME ASC LIMIT ?"
}
var data []byte
var name string
- iter := store.session.Query(cqlStr, string(fullpath), startFileName, limit).Iter()
+ iter := store.session.Query(cqlStr, string(dirPath), startFileName, limit+1).Iter()
for iter.Scan(&name, &data) {
entry := &filer.Entry{
- FullPath: util.NewFullPath(string(fullpath), name),
+ FullPath: util.NewFullPath(string(dirPath), name),
}
+ lastFileName = name
if decodeErr := entry.DecodeAttributesAndChunks(util.MaybeDecompressData(data)); decodeErr != nil {
err = decodeErr
glog.V(0).Infof("list %s : %v", entry.FullPath, err)
break
}
- entries = append(entries, entry)
+ if !eachEntryFunc(entry) {
+ break
+ }
}
if err := iter.Close(); err != nil {
glog.V(0).Infof("list iterator close: %v", err)
}
- return entries, err
+ return lastFileName, err
}
func (store *CassandraStore) Shutdown() {
diff --git a/weed/filer/configuration.go b/weed/filer/configuration.go
index a6f18709e..9ef2f3e0f 100644
--- a/weed/filer/configuration.go
+++ b/weed/filer/configuration.go
@@ -2,7 +2,7 @@ package filer
import (
"github.com/chrislusf/seaweedfs/weed/glog"
- "github.com/spf13/viper"
+ "github.com/chrislusf/seaweedfs/weed/util"
"os"
"reflect"
"strings"
@@ -12,7 +12,7 @@ var (
Stores []FilerStore
)
-func (f *Filer) LoadConfiguration(config *viper.Viper) {
+func (f *Filer) LoadConfiguration(config *util.ViperProxy) {
validateOneEnabledStore(config)
@@ -79,7 +79,7 @@ func (f *Filer) LoadConfiguration(config *viper.Viper) {
}
-func validateOneEnabledStore(config *viper.Viper) {
+func validateOneEnabledStore(config *util.ViperProxy) {
enabledStore := ""
for _, store := range Stores {
if config.GetBool(store.GetName() + ".enabled") {
diff --git a/weed/filer/elastic/v7/elastic_store.go b/weed/filer/elastic/v7/elastic_store.go
index ec88e10a5..a16e5ebca 100644
--- a/weed/filer/elastic/v7/elastic_store.go
+++ b/weed/filer/elastic/v7/elastic_store.go
@@ -96,12 +96,12 @@ func (store *ElasticStore) RollbackTransaction(ctx context.Context) error {
return nil
}
-func (store *ElasticStore) ListDirectoryPrefixedEntries(ctx context.Context, fullpath weed_util.FullPath, startFileName string, inclusive bool, limit int, prefix string) (entries []*filer.Entry, err error) {
- return nil, filer.ErrUnsupportedListDirectoryPrefixed
+func (store *ElasticStore) ListDirectoryPrefixedEntries(ctx context.Context, dirPath weed_util.FullPath, startFileName string, includeStartFile bool, limit int64, prefix string, eachEntryFunc filer.ListEachEntryFunc) (lastFileName string, err error) {
+ return lastFileName, filer.ErrUnsupportedListDirectoryPrefixed
}
func (store *ElasticStore) InsertEntry(ctx context.Context, entry *filer.Entry) (err error) {
- index := getIndex(entry.FullPath)
+ index := getIndex(entry.FullPath, false)
dir, _ := entry.FullPath.DirAndName()
id := weed_util.Md5String([]byte(entry.FullPath))
esEntry := &ESEntry{
@@ -131,7 +131,7 @@ func (store *ElasticStore) UpdateEntry(ctx context.Context, entry *filer.Entry)
}
func (store *ElasticStore) FindEntry(ctx context.Context, fullpath weed_util.FullPath) (entry *filer.Entry, err error) {
- index := getIndex(fullpath)
+ index := getIndex(fullpath, false)
id := weed_util.Md5String([]byte(fullpath))
searchResult, err := store.client.Get().
Index(index).
@@ -154,7 +154,7 @@ func (store *ElasticStore) FindEntry(ctx context.Context, fullpath weed_util.Ful
}
func (store *ElasticStore) DeleteEntry(ctx context.Context, fullpath weed_util.FullPath) (err error) {
- index := getIndex(fullpath)
+ index := getIndex(fullpath, false)
id := weed_util.Md5String([]byte(fullpath))
if strings.Count(string(fullpath), "/") == 1 {
return store.deleteIndex(ctx, index)
@@ -187,62 +187,30 @@ func (store *ElasticStore) deleteEntry(ctx context.Context, index, id string) (e
}
func (store *ElasticStore) DeleteFolderChildren(ctx context.Context, fullpath weed_util.FullPath) (err error) {
- if entries, err := store.ListDirectoryEntries(ctx, fullpath, "", false, math.MaxInt32); err == nil {
- for _, entry := range entries {
- store.DeleteEntry(ctx, entry.FullPath)
+ _, err = store.ListDirectoryEntries(ctx, fullpath, "", false, math.MaxInt32, func(entry *filer.Entry) bool {
+ if err := store.DeleteEntry(ctx, entry.FullPath); err != nil {
+ glog.Errorf("elastic delete %s: %v.", entry.FullPath, err)
+ return false
}
- }
- return nil
+ return true
+ })
+ return
}
-func (store *ElasticStore) ListDirectoryEntries(
- ctx context.Context, fullpath weed_util.FullPath, startFileName string, inclusive bool, limit int,
-) (entries []*filer.Entry, err error) {
- if string(fullpath) == "/" {
- return store.listRootDirectoryEntries(ctx, startFileName, inclusive, limit)
- }
- return store.listDirectoryEntries(ctx, fullpath, startFileName, inclusive, limit)
-}
-
-func (store *ElasticStore) listRootDirectoryEntries(ctx context.Context, startFileName string, inclusive bool, limit int) (entries []*filer.Entry, err error) {
- indexResult, err := store.client.CatIndices().Do(ctx)
- if err != nil {
- glog.Errorf("list indices %v.", err)
- return entries, err
- }
- for _, index := range indexResult {
- if index.Index == indexKV {
- continue
- }
- if strings.HasPrefix(index.Index, indexPrefix) {
- if entry, err := store.FindEntry(ctx,
- weed_util.FullPath("/"+strings.Replace(index.Index, indexPrefix, "", 1))); err == nil {
- fileName := getFileName(entry.FullPath)
- if fileName == startFileName && !inclusive {
- continue
- }
- limit--
- if limit < 0 {
- break
- }
- entries = append(entries, entry)
- }
- }
- }
- return entries, nil
+func (store *ElasticStore) ListDirectoryEntries(ctx context.Context, dirPath weed_util.FullPath, startFileName string, includeStartFile bool, limit int64, eachEntryFunc filer.ListEachEntryFunc) (lastFileName string, err error) {
+ return store.listDirectoryEntries(ctx, dirPath, startFileName, includeStartFile, limit, eachEntryFunc)
}
func (store *ElasticStore) listDirectoryEntries(
- ctx context.Context, fullpath weed_util.FullPath, startFileName string, inclusive bool, limit int,
-) (entries []*filer.Entry, err error) {
+ ctx context.Context, fullpath weed_util.FullPath, startFileName string, inclusive bool, limit int64, eachEntryFunc filer.ListEachEntryFunc) (lastFileName string, err error) {
first := true
- index := getIndex(fullpath)
+ index := getIndex(fullpath, true)
nextStart := ""
parentId := weed_util.Md5String([]byte(fullpath))
- if _, err := store.client.Refresh(index).Do(ctx); err != nil {
+ if _, err = store.client.Refresh(index).Do(ctx); err != nil {
if elastic.IsNotFound(err) {
store.client.CreateIndex(index).Do(ctx)
- return entries, nil
+ return
}
}
for {
@@ -250,7 +218,7 @@ func (store *ElasticStore) listDirectoryEntries(
if (startFileName == "" && first) || inclusive {
if result, err = store.search(ctx, index, parentId); err != nil {
glog.Errorf("search (%s,%s,%t,%d) %v.", string(fullpath), startFileName, inclusive, limit, err)
- return entries, err
+ return
}
} else {
fullPath := string(fullpath) + "/" + startFileName
@@ -260,7 +228,7 @@ func (store *ElasticStore) listDirectoryEntries(
after := weed_util.Md5String([]byte(fullPath))
if result, err = store.searchAfter(ctx, index, parentId, after); err != nil {
glog.Errorf("searchAfter (%s,%s,%t,%d) %v.", string(fullpath), startFileName, inclusive, limit, err)
- return entries, err
+ return
}
}
first = false
@@ -272,21 +240,24 @@ func (store *ElasticStore) listDirectoryEntries(
if err := jsoniter.Unmarshal(hit.Source, esEntry); err == nil {
limit--
if limit < 0 {
- return entries, nil
+ return lastFileName, nil
}
nextStart = string(esEntry.Entry.FullPath)
- fileName := getFileName(esEntry.Entry.FullPath)
+ fileName := esEntry.Entry.FullPath.Name()
if fileName == startFileName && !inclusive {
continue
}
- entries = append(entries, esEntry.Entry)
+ if !eachEntryFunc(esEntry.Entry) {
+ break
+ }
+ lastFileName = fileName
}
}
if len(result.Hits.Hits) < store.maxPageSize {
break
}
}
- return entries, nil
+ return
}
func (store *ElasticStore) search(ctx context.Context, index, parentId string) (result *elastic.SearchResult, err error) {
@@ -321,18 +292,16 @@ func (store *ElasticStore) Shutdown() {
store.client.Stop()
}
-func getIndex(fullpath weed_util.FullPath) string {
+func getIndex(fullpath weed_util.FullPath, isDirectory bool) string {
path := strings.Split(string(fullpath), "/")
- if len(path) > 1 {
- return indexPrefix + path[1]
+ if isDirectory && len(path) >= 2 {
+ return indexPrefix + strings.ToLower(path[1])
}
- return ""
-}
-
-func getFileName(fullpath weed_util.FullPath) string {
- path := strings.Split(string(fullpath), "/")
- if len(path) > 1 {
- return path[len(path)-1]
+ if len(path) > 2 {
+ return indexPrefix + strings.ToLower(path[1])
+ }
+ if len(path) == 2 {
+ return indexPrefix
}
return ""
}
diff --git a/weed/filer/etcd/etcd_store.go b/weed/filer/etcd/etcd_store.go
index 634fba1eb..71ed738f9 100644
--- a/weed/filer/etcd/etcd_store.go
+++ b/weed/filer/etcd/etcd_store.go
@@ -1,6 +1,7 @@
package etcd
import (
+ "bytes"
"context"
"fmt"
"strings"
@@ -101,7 +102,7 @@ func (store *EtcdStore) FindEntry(ctx context.Context, fullpath weed_util.FullPa
resp, err := store.client.Get(ctx, string(key))
if err != nil {
- return nil, fmt.Errorf("get %s : %v", entry.FullPath, err)
+ return nil, fmt.Errorf("get %s : %v", fullpath, err)
}
if len(resp.Kvs) == 0 {
@@ -139,25 +140,32 @@ func (store *EtcdStore) DeleteFolderChildren(ctx context.Context, fullpath weed_
return nil
}
-func (store *EtcdStore) ListDirectoryPrefixedEntries(ctx context.Context, fullpath weed_util.FullPath, startFileName string, inclusive bool, limit int, prefix string) (entries []*filer.Entry, err error) {
- return nil, filer.ErrUnsupportedListDirectoryPrefixed
+func (store *EtcdStore) ListDirectoryPrefixedEntries(ctx context.Context, dirPath weed_util.FullPath, startFileName string, includeStartFile bool, limit int64, prefix string, eachEntryFunc filer.ListEachEntryFunc) (lastFileName string, err error) {
+ return lastFileName, filer.ErrUnsupportedListDirectoryPrefixed
}
-func (store *EtcdStore) ListDirectoryEntries(ctx context.Context, fullpath weed_util.FullPath, startFileName string, inclusive bool, limit int) (entries []*filer.Entry, err error) {
- directoryPrefix := genDirectoryKeyPrefix(fullpath, "")
+func (store *EtcdStore) ListDirectoryEntries(ctx context.Context, dirPath weed_util.FullPath, startFileName string, includeStartFile bool, limit int64, eachEntryFunc filer.ListEachEntryFunc) (lastFileName string, err error) {
+ directoryPrefix := genDirectoryKeyPrefix(dirPath, "")
+ lastFileStart := directoryPrefix
+ if startFileName != "" {
+ lastFileStart = genDirectoryKeyPrefix(dirPath, startFileName)
+ }
- resp, err := store.client.Get(ctx, string(directoryPrefix),
+ resp, err := store.client.Get(ctx, string(lastFileStart),
clientv3.WithPrefix(), clientv3.WithSort(clientv3.SortByKey, clientv3.SortDescend))
if err != nil {
- return nil, fmt.Errorf("list %s : %v", fullpath, err)
+ return lastFileName, fmt.Errorf("list %s : %v", dirPath, err)
}
for _, kv := range resp.Kvs {
+ if !bytes.HasPrefix(kv.Key, directoryPrefix) {
+ break
+ }
fileName := getNameFromKey(kv.Key)
if fileName == "" {
continue
}
- if fileName == startFileName && !inclusive {
+ if fileName == startFileName && !includeStartFile {
continue
}
limit--
@@ -165,17 +173,20 @@ func (store *EtcdStore) ListDirectoryEntries(ctx context.Context, fullpath weed_
break
}
entry := &filer.Entry{
- FullPath: weed_util.NewFullPath(string(fullpath), fileName),
+ FullPath: weed_util.NewFullPath(string(dirPath), fileName),
}
if decodeErr := entry.DecodeAttributesAndChunks(weed_util.MaybeDecompressData(kv.Value)); decodeErr != nil {
err = decodeErr
glog.V(0).Infof("list %s : %v", entry.FullPath, err)
break
}
- entries = append(entries, entry)
+ if !eachEntryFunc(entry) {
+ break
+ }
+ lastFileName = fileName
}
- return entries, err
+ return lastFileName, err
}
func genKey(dirPath, fileName string) (key []byte) {
diff --git a/weed/filer/filechunk_manifest.go b/weed/filer/filechunk_manifest.go
index f5ab36d37..845bfaec1 100644
--- a/weed/filer/filechunk_manifest.go
+++ b/weed/filer/filechunk_manifest.go
@@ -3,6 +3,7 @@ package filer
import (
"bytes"
"fmt"
+ "github.com/chrislusf/seaweedfs/weed/wdclient"
"io"
"math"
"time"
@@ -38,7 +39,7 @@ func SeparateManifestChunks(chunks []*filer_pb.FileChunk) (manifestChunks, nonMa
return
}
-func ResolveChunkManifest(lookupFileIdFn LookupFileIdFunctionType, chunks []*filer_pb.FileChunk) (dataChunks, manifestChunks []*filer_pb.FileChunk, manifestResolveErr error) {
+func ResolveChunkManifest(lookupFileIdFn wdclient.LookupFileIdFunctionType, chunks []*filer_pb.FileChunk) (dataChunks, manifestChunks []*filer_pb.FileChunk, manifestResolveErr error) {
// TODO maybe parallel this
for _, chunk := range chunks {
if !chunk.IsChunkManifest {
@@ -63,7 +64,7 @@ func ResolveChunkManifest(lookupFileIdFn LookupFileIdFunctionType, chunks []*fil
return
}
-func ResolveOneChunkManifest(lookupFileIdFn LookupFileIdFunctionType, chunk *filer_pb.FileChunk) (dataChunks []*filer_pb.FileChunk, manifestResolveErr error) {
+func ResolveOneChunkManifest(lookupFileIdFn wdclient.LookupFileIdFunctionType, chunk *filer_pb.FileChunk) (dataChunks []*filer_pb.FileChunk, manifestResolveErr error) {
if !chunk.IsChunkManifest {
return
}
@@ -84,7 +85,7 @@ func ResolveOneChunkManifest(lookupFileIdFn LookupFileIdFunctionType, chunk *fil
}
// TODO fetch from cache for weed mount?
-func fetchChunk(lookupFileIdFn LookupFileIdFunctionType, fileId string, cipherKey []byte, isGzipped bool) ([]byte, error) {
+func fetchChunk(lookupFileIdFn wdclient.LookupFileIdFunctionType, fileId string, cipherKey []byte, isGzipped bool) ([]byte, error) {
urlStrings, err := lookupFileIdFn(fileId)
if err != nil {
glog.Errorf("operation LookupFileId %s failed, err: %v", fileId, err)
diff --git a/weed/filer/filechunks.go b/weed/filer/filechunks.go
index c75a35f79..68f308a51 100644
--- a/weed/filer/filechunks.go
+++ b/weed/filer/filechunks.go
@@ -4,6 +4,7 @@ import (
"bytes"
"encoding/hex"
"fmt"
+ "github.com/chrislusf/seaweedfs/weed/wdclient"
"math"
"sort"
"sync"
@@ -52,7 +53,7 @@ func ETagChunks(chunks []*filer_pb.FileChunk) (etag string) {
return fmt.Sprintf("%x-%d", util.Md5(bytes.Join(md5_digests, nil)), len(chunks))
}
-func CompactFileChunks(lookupFileIdFn LookupFileIdFunctionType, chunks []*filer_pb.FileChunk) (compacted, garbage []*filer_pb.FileChunk) {
+func CompactFileChunks(lookupFileIdFn wdclient.LookupFileIdFunctionType, chunks []*filer_pb.FileChunk) (compacted, garbage []*filer_pb.FileChunk) {
visibles, _ := NonOverlappingVisibleIntervals(lookupFileIdFn, chunks)
@@ -71,7 +72,7 @@ func CompactFileChunks(lookupFileIdFn LookupFileIdFunctionType, chunks []*filer_
return
}
-func MinusChunks(lookupFileIdFn LookupFileIdFunctionType, as, bs []*filer_pb.FileChunk) (delta []*filer_pb.FileChunk, err error) {
+func MinusChunks(lookupFileIdFn wdclient.LookupFileIdFunctionType, as, bs []*filer_pb.FileChunk) (delta []*filer_pb.FileChunk, err error) {
aData, aMeta, aErr := ResolveChunkManifest(lookupFileIdFn, as)
if aErr != nil {
@@ -116,7 +117,7 @@ func (cv *ChunkView) IsFullChunk() bool {
return cv.Size == cv.ChunkSize
}
-func ViewFromChunks(lookupFileIdFn LookupFileIdFunctionType, chunks []*filer_pb.FileChunk, offset int64, size int64) (views []*ChunkView) {
+func ViewFromChunks(lookupFileIdFn wdclient.LookupFileIdFunctionType, chunks []*filer_pb.FileChunk, offset int64, size int64) (views []*ChunkView) {
visibles, _ := NonOverlappingVisibleIntervals(lookupFileIdFn, chunks)
@@ -222,7 +223,7 @@ func MergeIntoVisibles(visibles []VisibleInterval, chunk *filer_pb.FileChunk) (n
// NonOverlappingVisibleIntervals translates the file chunk into VisibleInterval in memory
// If the file chunk content is a chunk manifest
-func NonOverlappingVisibleIntervals(lookupFileIdFn LookupFileIdFunctionType, chunks []*filer_pb.FileChunk) (visibles []VisibleInterval, err error) {
+func NonOverlappingVisibleIntervals(lookupFileIdFn wdclient.LookupFileIdFunctionType, chunks []*filer_pb.FileChunk) (visibles []VisibleInterval, err error) {
chunks, _, err = ResolveChunkManifest(lookupFileIdFn, chunks)
diff --git a/weed/filer/filer.go b/weed/filer/filer.go
index 800dd35dc..e59887763 100644
--- a/weed/filer/filer.go
+++ b/weed/filer/filer.go
@@ -134,69 +134,7 @@ func (f *Filer) CreateEntry(ctx context.Context, entry *Entry, o_excl bool, isFr
return nil
}
- dirParts := strings.Split(string(entry.FullPath), "/")
-
- // fmt.Printf("directory parts: %+v\n", dirParts)
-
- var lastDirectoryEntry *Entry
-
- for i := 1; i < len(dirParts); i++ {
- dirPath := "/" + util.Join(dirParts[:i]...)
- // fmt.Printf("%d directory: %+v\n", i, dirPath)
-
- // check the store directly
- glog.V(4).Infof("find uncached directory: %s", dirPath)
- dirEntry, _ := f.FindEntry(ctx, util.FullPath(dirPath))
-
- // no such existing directory
- if dirEntry == nil {
-
- // create the directory
- now := time.Now()
-
- dirEntry = &Entry{
- FullPath: util.FullPath(dirPath),
- Attr: Attr{
- Mtime: now,
- Crtime: now,
- Mode: os.ModeDir | entry.Mode | 0110,
- Uid: entry.Uid,
- Gid: entry.Gid,
- Collection: entry.Collection,
- Replication: entry.Replication,
- UserName: entry.UserName,
- GroupNames: entry.GroupNames,
- },
- }
-
- glog.V(2).Infof("create directory: %s %v", dirPath, dirEntry.Mode)
- mkdirErr := f.Store.InsertEntry(ctx, dirEntry)
- if mkdirErr != nil {
- if _, err := f.FindEntry(ctx, util.FullPath(dirPath)); err == filer_pb.ErrNotFound {
- glog.V(3).Infof("mkdir %s: %v", dirPath, mkdirErr)
- return fmt.Errorf("mkdir %s: %v", dirPath, mkdirErr)
- }
- } else {
- f.maybeAddBucket(dirEntry)
- f.NotifyUpdateEvent(ctx, nil, dirEntry, false, isFromOtherCluster, nil)
- }
-
- } else if !dirEntry.IsDirectory() {
- glog.Errorf("CreateEntry %s: %s should be a directory", entry.FullPath, dirPath)
- return fmt.Errorf("%s is a file", dirPath)
- }
-
- // remember the direct parent directory entry
- if i == len(dirParts)-1 {
- lastDirectoryEntry = dirEntry
- }
-
- }
-
- if lastDirectoryEntry == nil {
- glog.Errorf("CreateEntry %s: lastDirectoryEntry is nil", entry.FullPath)
- return fmt.Errorf("parent folder not found: %v", entry.FullPath)
- }
+ oldEntry, _ := f.FindEntry(ctx, entry.FullPath)
/*
if !hasWritePermission(lastDirectoryEntry, entry) {
@@ -206,9 +144,13 @@ func (f *Filer) CreateEntry(ctx context.Context, entry *Entry, o_excl bool, isFr
}
*/
- oldEntry, _ := f.FindEntry(ctx, entry.FullPath)
-
if oldEntry == nil {
+
+ dirParts := strings.Split(string(entry.FullPath), "/")
+ if err := f.ensureParentDirecotryEntry(ctx, entry, dirParts, len(dirParts)-1, isFromOtherCluster); err != nil {
+ return err
+ }
+
glog.V(4).Infof("InsertEntry %s: new entry: %v", entry.FullPath, entry.Name())
if err := f.Store.InsertEntry(ctx, entry); err != nil {
glog.Errorf("insert entry %s: %v", entry.FullPath, err)
@@ -236,6 +178,65 @@ func (f *Filer) CreateEntry(ctx context.Context, entry *Entry, o_excl bool, isFr
return nil
}
+func (f *Filer) ensureParentDirecotryEntry(ctx context.Context, entry *Entry, dirParts []string, level int, isFromOtherCluster bool) (err error) {
+
+ if level == 0 {
+ return nil
+ }
+
+ dirPath := "/" + util.Join(dirParts[:level]...)
+ // fmt.Printf("%d directory: %+v\n", i, dirPath)
+
+ // check the store directly
+ glog.V(4).Infof("find uncached directory: %s", dirPath)
+ dirEntry, _ := f.FindEntry(ctx, util.FullPath(dirPath))
+
+ // no such existing directory
+ if dirEntry == nil {
+
+ // ensure parent directory
+ if err = f.ensureParentDirecotryEntry(ctx, entry, dirParts, level-1, isFromOtherCluster); err != nil {
+ return err
+ }
+
+ // create the directory
+ now := time.Now()
+
+ dirEntry = &Entry{
+ FullPath: util.FullPath(dirPath),
+ Attr: Attr{
+ Mtime: now,
+ Crtime: now,
+ Mode: os.ModeDir | entry.Mode | 0110,
+ Uid: entry.Uid,
+ Gid: entry.Gid,
+ Collection: entry.Collection,
+ Replication: entry.Replication,
+ UserName: entry.UserName,
+ GroupNames: entry.GroupNames,
+ },
+ }
+
+ glog.V(2).Infof("create directory: %s %v", dirPath, dirEntry.Mode)
+ mkdirErr := f.Store.InsertEntry(ctx, dirEntry)
+ if mkdirErr != nil {
+ if _, err := f.FindEntry(ctx, util.FullPath(dirPath)); err == filer_pb.ErrNotFound {
+ glog.V(3).Infof("mkdir %s: %v", dirPath, mkdirErr)
+ return fmt.Errorf("mkdir %s: %v", dirPath, mkdirErr)
+ }
+ } else {
+ f.maybeAddBucket(dirEntry)
+ f.NotifyUpdateEvent(ctx, nil, dirEntry, false, isFromOtherCluster, nil)
+ }
+
+ } else if !dirEntry.IsDirectory() {
+ glog.Errorf("CreateEntry %s: %s should be a directory", entry.FullPath, dirPath)
+ return fmt.Errorf("%s is a file", dirPath)
+ }
+
+ return nil
+}
+
func (f *Filer) UpdateEntry(ctx context.Context, oldEntry, entry *Entry) (err error) {
if oldEntry != nil {
entry.Attr.Crtime = oldEntry.Attr.Crtime
@@ -280,38 +281,19 @@ func (f *Filer) FindEntry(ctx context.Context, p util.FullPath) (entry *Entry, e
}
-func (f *Filer) ListDirectoryEntries(ctx context.Context, p util.FullPath, startFileName string, inclusive bool, limit int, prefix string) ([]*Entry, error) {
- if strings.HasSuffix(string(p), "/") && len(p) > 1 {
- p = p[0 : len(p)-1]
- }
-
- var makeupEntries []*Entry
- entries, expiredCount, lastFileName, err := f.doListDirectoryEntries(ctx, p, startFileName, inclusive, limit, prefix)
- for expiredCount > 0 && err == nil {
- makeupEntries, expiredCount, lastFileName, err = f.doListDirectoryEntries(ctx, p, lastFileName, false, expiredCount, prefix)
- if err == nil {
- entries = append(entries, makeupEntries...)
- }
- }
-
- return entries, err
-}
-
-func (f *Filer) doListDirectoryEntries(ctx context.Context, p util.FullPath, startFileName string, inclusive bool, limit int, prefix string) (entries []*Entry, expiredCount int, lastFileName string, err error) {
- listedEntries, listErr := f.Store.ListDirectoryPrefixedEntries(ctx, p, startFileName, inclusive, limit, prefix)
- if listErr != nil {
- return listedEntries, expiredCount, "", listErr
- }
- for _, entry := range listedEntries {
- lastFileName = entry.Name()
+func (f *Filer) doListDirectoryEntries(ctx context.Context, p util.FullPath, startFileName string, inclusive bool, limit int64, prefix string, eachEntryFunc ListEachEntryFunc) (expiredCount int64, lastFileName string, err error) {
+ lastFileName, err = f.Store.ListDirectoryPrefixedEntries(ctx, p, startFileName, inclusive, limit, prefix, func(entry *Entry) bool {
if entry.TtlSec > 0 {
if entry.Crtime.Add(time.Duration(entry.TtlSec) * time.Second).Before(time.Now()) {
f.Store.DeleteOneEntry(ctx, entry)
expiredCount++
- continue
+ return true
}
}
- entries = append(entries, entry)
+ return eachEntryFunc(entry)
+ })
+ if err != nil {
+ return expiredCount, lastFileName, err
}
return
}
diff --git a/weed/filer/filer_buckets.go b/weed/filer/filer_buckets.go
index 4d4f4abc3..ba170f02e 100644
--- a/weed/filer/filer_buckets.go
+++ b/weed/filer/filer_buckets.go
@@ -27,9 +27,9 @@ func (f *Filer) LoadBuckets() {
buckets: make(map[BucketName]*BucketOption),
}
- limit := math.MaxInt32
+ limit := int64(math.MaxInt32)
- entries, err := f.ListDirectoryEntries(context.Background(), util.FullPath(f.DirBucketsPath), "", false, limit, "")
+ entries, _, err := f.ListDirectoryEntries(context.Background(), util.FullPath(f.DirBucketsPath), "", false, limit, "", "")
if err != nil {
glog.V(1).Infof("no buckets found: %v", err)
diff --git a/weed/filer/filer_delete_entry.go b/weed/filer/filer_delete_entry.go
index da92c4f4b..50a669f40 100644
--- a/weed/filer/filer_delete_entry.go
+++ b/weed/filer/filer_delete_entry.go
@@ -30,7 +30,7 @@ func (f *Filer) DeleteEntryMetaAndData(ctx context.Context, p util.FullPath, isR
// delete the folder children, not including the folder itself
var dirChunks []*filer_pb.FileChunk
var dirHardLinkIds []HardLinkId
- dirChunks, dirHardLinkIds, err = f.doBatchDeleteFolderMetaAndData(ctx, entry, isRecursive, ignoreRecursiveError, shouldDeleteChunks && !isDeleteCollection, isFromOtherCluster, signatures)
+ dirChunks, dirHardLinkIds, err = f.doBatchDeleteFolderMetaAndData(ctx, entry, isRecursive, ignoreRecursiveError, shouldDeleteChunks && !isDeleteCollection, isDeleteCollection, isFromOtherCluster, signatures)
if err != nil {
glog.V(0).Infof("delete directory %s: %v", p, err)
return fmt.Errorf("delete directory %s: %v", p, err)
@@ -63,46 +63,49 @@ func (f *Filer) DeleteEntryMetaAndData(ctx context.Context, p util.FullPath, isR
return nil
}
-func (f *Filer) doBatchDeleteFolderMetaAndData(ctx context.Context, entry *Entry, isRecursive, ignoreRecursiveError, shouldDeleteChunks, isFromOtherCluster bool, signatures []int32) (chunks []*filer_pb.FileChunk, hardlinkIds []HardLinkId, err error) {
+func (f *Filer) doBatchDeleteFolderMetaAndData(ctx context.Context, entry *Entry, isRecursive, ignoreRecursiveError, shouldDeleteChunks, isDeletingBucket, isFromOtherCluster bool, signatures []int32) (chunks []*filer_pb.FileChunk, hardlinkIds []HardLinkId, err error) {
lastFileName := ""
includeLastFile := false
- for {
- entries, err := f.ListDirectoryEntries(ctx, entry.FullPath, lastFileName, includeLastFile, PaginationSize, "")
- if err != nil {
- glog.Errorf("list folder %s: %v", entry.FullPath, err)
- return nil, nil, fmt.Errorf("list folder %s: %v", entry.FullPath, err)
- }
- if lastFileName == "" && !isRecursive && len(entries) > 0 {
- // only for first iteration in the loop
- glog.Errorf("deleting a folder %s has children: %+v ...", entry.FullPath, entries[0].Name())
- return nil, nil, fmt.Errorf("fail to delete non-empty folder: %s", entry.FullPath)
- }
+ if !isDeletingBucket {
+ for {
+ entries, _, err := f.ListDirectoryEntries(ctx, entry.FullPath, lastFileName, includeLastFile, PaginationSize, "", "")
+ if err != nil {
+ glog.Errorf("list folder %s: %v", entry.FullPath, err)
+ return nil, nil, fmt.Errorf("list folder %s: %v", entry.FullPath, err)
+ }
+ if lastFileName == "" && !isRecursive && len(entries) > 0 {
+ // only for first iteration in the loop
+ glog.Errorf("deleting a folder %s has children: %+v ...", entry.FullPath, entries[0].Name())
+ return nil, nil, fmt.Errorf("fail to delete non-empty folder: %s", entry.FullPath)
+ }
- for _, sub := range entries {
- lastFileName = sub.Name()
- var dirChunks []*filer_pb.FileChunk
- var dirHardLinkIds []HardLinkId
- if sub.IsDirectory() {
- dirChunks, dirHardLinkIds, err = f.doBatchDeleteFolderMetaAndData(ctx, sub, isRecursive, ignoreRecursiveError, shouldDeleteChunks, false, nil)
- chunks = append(chunks, dirChunks...)
- hardlinkIds = append(hardlinkIds, dirHardLinkIds...)
- } else {
- f.NotifyUpdateEvent(ctx, sub, nil, shouldDeleteChunks, isFromOtherCluster, nil)
- if len(sub.HardLinkId) != 0 {
- // hard link chunk data are deleted separately
- hardlinkIds = append(hardlinkIds, sub.HardLinkId)
+ for _, sub := range entries {
+ lastFileName = sub.Name()
+ var dirChunks []*filer_pb.FileChunk
+ var dirHardLinkIds []HardLinkId
+ if sub.IsDirectory() {
+ subIsDeletingBucket := f.isBucket(sub)
+ dirChunks, dirHardLinkIds, err = f.doBatchDeleteFolderMetaAndData(ctx, sub, isRecursive, ignoreRecursiveError, shouldDeleteChunks, subIsDeletingBucket, false, nil)
+ chunks = append(chunks, dirChunks...)
+ hardlinkIds = append(hardlinkIds, dirHardLinkIds...)
} else {
- chunks = append(chunks, sub.Chunks...)
+ f.NotifyUpdateEvent(ctx, sub, nil, shouldDeleteChunks, isFromOtherCluster, nil)
+ if len(sub.HardLinkId) != 0 {
+ // hard link chunk data are deleted separately
+ hardlinkIds = append(hardlinkIds, sub.HardLinkId)
+ } else {
+ chunks = append(chunks, sub.Chunks...)
+ }
+ }
+ if err != nil && !ignoreRecursiveError {
+ return nil, nil, err
}
}
- if err != nil && !ignoreRecursiveError {
- return nil, nil, err
- }
- }
- if len(entries) < PaginationSize {
- break
+ if len(entries) < PaginationSize {
+ break
+ }
}
}
diff --git a/weed/filer/filer_notify.go b/weed/filer/filer_notify.go
index 40755e6a7..f3a795ad0 100644
--- a/weed/filer/filer_notify.go
+++ b/weed/filer/filer_notify.go
@@ -113,13 +113,13 @@ func (f *Filer) ReadPersistedLogBuffer(startTime time.Time, eachLogEntryFn func(
sizeBuf := make([]byte, 4)
startTsNs := startTime.UnixNano()
- dayEntries, listDayErr := f.ListDirectoryEntries(context.Background(), SystemLogDir, startDate, true, 366, "")
+ dayEntries, _, listDayErr := f.ListDirectoryEntries(context.Background(), SystemLogDir, startDate, true, 366, "", "")
if listDayErr != nil {
return lastTsNs, fmt.Errorf("fail to list log by day: %v", listDayErr)
}
for _, dayEntry := range dayEntries {
// println("checking day", dayEntry.FullPath)
- hourMinuteEntries, listHourMinuteErr := f.ListDirectoryEntries(context.Background(), util.NewFullPath(SystemLogDir, dayEntry.Name()), "", false, 24*60, "")
+ hourMinuteEntries, _, listHourMinuteErr := f.ListDirectoryEntries(context.Background(), util.NewFullPath(SystemLogDir, dayEntry.Name()), "", false, 24*60, "", "")
if listHourMinuteErr != nil {
return lastTsNs, fmt.Errorf("fail to list log %s by day: %v", dayEntry.Name(), listHourMinuteErr)
}
@@ -170,7 +170,7 @@ func ReadEachLogEntry(r io.Reader, sizeBuf []byte, ns int64, eachLogEntryFn func
return lastTsNs, err
}
if logEntry.TsNs <= ns {
- return lastTsNs, nil
+ continue
}
// println("each log: ", logEntry.TsNs)
if err := eachLogEntryFn(logEntry); err != nil {
diff --git a/weed/filer/filer_search.go b/weed/filer/filer_search.go
new file mode 100644
index 000000000..0a14d3756
--- /dev/null
+++ b/weed/filer/filer_search.go
@@ -0,0 +1,90 @@
+package filer
+
+import (
+ "context"
+ "github.com/chrislusf/seaweedfs/weed/util"
+ "path/filepath"
+ "strings"
+)
+
+func splitPattern(pattern string) (prefix string, restPattern string) {
+ position := strings.Index(pattern, "*")
+ if position >= 0 {
+ return pattern[:position], pattern[position:]
+ }
+ position = strings.Index(pattern, "?")
+ if position >= 0 {
+ return pattern[:position], pattern[position:]
+ }
+ return "", restPattern
+}
+
+// For now, prefix and namePattern are mutually exclusive
+func (f *Filer) ListDirectoryEntries(ctx context.Context, p util.FullPath, startFileName string, inclusive bool, limit int64, prefix string, namePattern string) (entries []*Entry, hasMore bool, err error) {
+
+ _, err = f.StreamListDirectoryEntries(ctx, p, startFileName, inclusive, limit+1, prefix, namePattern, func(entry *Entry) bool {
+ entries = append(entries, entry)
+ return true
+ })
+
+ hasMore = int64(len(entries)) >= limit+1
+ if hasMore {
+ entries = entries[:limit]
+ }
+
+ return entries, hasMore, err
+}
+
+// For now, prefix and namePattern are mutually exclusive
+func (f *Filer) StreamListDirectoryEntries(ctx context.Context, p util.FullPath, startFileName string, inclusive bool, limit int64, prefix string, namePattern string, eachEntryFunc ListEachEntryFunc) (lastFileName string, err error) {
+ if strings.HasSuffix(string(p), "/") && len(p) > 1 {
+ p = p[0 : len(p)-1]
+ }
+
+ prefixInNamePattern, restNamePattern := splitPattern(namePattern)
+ if prefixInNamePattern != "" {
+ prefix = prefixInNamePattern
+ }
+ var missedCount int64
+
+ missedCount, lastFileName, err = f.doListPatternMatchedEntries(ctx, p, startFileName, inclusive, limit, prefix, restNamePattern, eachEntryFunc)
+
+ for missedCount > 0 && err == nil {
+ missedCount, lastFileName, err = f.doListPatternMatchedEntries(ctx, p, lastFileName, false, missedCount, prefix, restNamePattern, eachEntryFunc)
+ }
+
+ return
+}
+
+func (f *Filer) doListPatternMatchedEntries(ctx context.Context, p util.FullPath, startFileName string, inclusive bool, limit int64, prefix, restNamePattern string, eachEntryFunc ListEachEntryFunc) (missedCount int64, lastFileName string, err error) {
+
+ if len(restNamePattern) == 0 {
+ lastFileName, err = f.doListValidEntries(ctx, p, startFileName, inclusive, limit, prefix, eachEntryFunc)
+ return 0, lastFileName, err
+ }
+
+ lastFileName, err = f.doListValidEntries(ctx, p, startFileName, inclusive, limit, prefix, func(entry *Entry) bool {
+ nameToTest := strings.ToLower(entry.Name())
+ if matched, matchErr := filepath.Match(restNamePattern, nameToTest[len(prefix):]); matchErr == nil && matched {
+ if !eachEntryFunc(entry) {
+ return false
+ }
+ } else {
+ missedCount++
+ }
+ return true
+ })
+ if err != nil {
+ return
+ }
+ return
+}
+
+func (f *Filer) doListValidEntries(ctx context.Context, p util.FullPath, startFileName string, inclusive bool, limit int64, prefix string, eachEntryFunc ListEachEntryFunc) (lastFileName string, err error) {
+ var expiredCount int64
+ expiredCount, lastFileName, err = f.doListDirectoryEntries(ctx, p, startFileName, inclusive, limit, prefix, eachEntryFunc)
+ for expiredCount > 0 && err == nil {
+ expiredCount, lastFileName, err = f.doListDirectoryEntries(ctx, p, lastFileName, false, expiredCount, prefix, eachEntryFunc)
+ }
+ return
+}
diff --git a/weed/filer/filerstore.go b/weed/filer/filerstore.go
index f1e6c6c35..8955a25c7 100644
--- a/weed/filer/filerstore.go
+++ b/weed/filer/filerstore.go
@@ -13,6 +13,8 @@ var (
ErrKvNotFound = errors.New("kv: not found")
)
+type ListEachEntryFunc func(entry *Entry) bool
+
type FilerStore interface {
// GetName gets the name to locate the configuration in filer.toml file
GetName() string
@@ -24,8 +26,8 @@ type FilerStore interface {
FindEntry(context.Context, util.FullPath) (entry *Entry, err error)
DeleteEntry(context.Context, util.FullPath) (err error)
DeleteFolderChildren(context.Context, util.FullPath) (err error)
- ListDirectoryEntries(ctx context.Context, dirPath util.FullPath, startFileName string, includeStartFile bool, limit int) ([]*Entry, error)
- ListDirectoryPrefixedEntries(ctx context.Context, dirPath util.FullPath, startFileName string, includeStartFile bool, limit int, prefix string) ([]*Entry, error)
+ ListDirectoryEntries(ctx context.Context, dirPath util.FullPath, startFileName string, includeStartFile bool, limit int64, eachEntryFunc ListEachEntryFunc) (lastFileName string, err error)
+ ListDirectoryPrefixedEntries(ctx context.Context, dirPath util.FullPath, startFileName string, includeStartFile bool, limit int64, prefix string, eachEntryFunc ListEachEntryFunc) (lastFileName string, err error)
BeginTransaction(ctx context.Context) (context.Context, error)
CommitTransaction(ctx context.Context) error
diff --git a/weed/filer/filerstore_translate_path.go b/weed/filer/filerstore_translate_path.go
index ea0f9db77..00bf82ed4 100644
--- a/weed/filer/filerstore_translate_path.go
+++ b/weed/filer/filerstore_translate_path.go
@@ -106,32 +106,24 @@ func (t *FilerStorePathTranlator) DeleteFolderChildren(ctx context.Context, fp u
return t.actualStore.DeleteFolderChildren(ctx, newFullPath)
}
-func (t *FilerStorePathTranlator) ListDirectoryEntries(ctx context.Context, dirPath util.FullPath, startFileName string, includeStartFile bool, limit int) ([]*Entry, error) {
+func (t *FilerStorePathTranlator) ListDirectoryEntries(ctx context.Context, dirPath util.FullPath, startFileName string, includeStartFile bool, limit int64, eachEntryFunc ListEachEntryFunc) (string, error) {
newFullPath := t.translatePath(dirPath)
- entries, err := t.actualStore.ListDirectoryEntries(ctx, newFullPath, startFileName, includeStartFile, limit)
- if err != nil {
- return nil, err
- }
- for _, entry := range entries {
+ return t.actualStore.ListDirectoryEntries(ctx, newFullPath, startFileName, includeStartFile, limit, func(entry *Entry) bool {
entry.FullPath = dirPath[:len(t.storeRoot)-1] + entry.FullPath
- }
- return entries, err
+ return eachEntryFunc(entry)
+ })
}
-func (t *FilerStorePathTranlator) ListDirectoryPrefixedEntries(ctx context.Context, dirPath util.FullPath, startFileName string, includeStartFile bool, limit int, prefix string) ([]*Entry, error) {
+func (t *FilerStorePathTranlator) ListDirectoryPrefixedEntries(ctx context.Context, dirPath util.FullPath, startFileName string, includeStartFile bool, limit int64, prefix string, eachEntryFunc ListEachEntryFunc) (string, error) {
newFullPath := t.translatePath(dirPath)
- entries, err := t.actualStore.ListDirectoryPrefixedEntries(ctx, newFullPath, startFileName, includeStartFile, limit, prefix)
- if err != nil {
- return nil, err
- }
- for _, entry := range entries {
+ return t.actualStore.ListDirectoryPrefixedEntries(ctx, newFullPath, startFileName, includeStartFile, limit, prefix, func(entry *Entry) bool {
entry.FullPath = dirPath[:len(t.storeRoot)-1] + entry.FullPath
- }
- return entries, nil
+ return eachEntryFunc(entry)
+ })
}
func (t *FilerStorePathTranlator) BeginTransaction(ctx context.Context) (context.Context, error) {
diff --git a/weed/filer/filerstore_wrapper.go b/weed/filer/filerstore_wrapper.go
index 3206d5ba4..64baac371 100644
--- a/weed/filer/filerstore_wrapper.go
+++ b/weed/filer/filerstore_wrapper.go
@@ -194,7 +194,7 @@ func (fsw *FilerStoreWrapper) DeleteFolderChildren(ctx context.Context, fp util.
return actualStore.DeleteFolderChildren(ctx, fp)
}
-func (fsw *FilerStoreWrapper) ListDirectoryEntries(ctx context.Context, dirPath util.FullPath, startFileName string, includeStartFile bool, limit int) ([]*Entry, error) {
+func (fsw *FilerStoreWrapper) ListDirectoryEntries(ctx context.Context, dirPath util.FullPath, startFileName string, includeStartFile bool, limit int64, eachEntryFunc ListEachEntryFunc) (string, error) {
actualStore := fsw.getActualStore(dirPath + "/")
stats.FilerStoreCounter.WithLabelValues(actualStore.GetName(), "list").Inc()
start := time.Now()
@@ -203,18 +203,14 @@ func (fsw *FilerStoreWrapper) ListDirectoryEntries(ctx context.Context, dirPath
}()
glog.V(4).Infof("ListDirectoryEntries %s from %s limit %d", dirPath, startFileName, limit)
- entries, err := actualStore.ListDirectoryEntries(ctx, dirPath, startFileName, includeStartFile, limit)
- if err != nil {
- return nil, err
- }
- for _, entry := range entries {
+ return actualStore.ListDirectoryEntries(ctx, dirPath, startFileName, includeStartFile, limit, func(entry *Entry) bool {
fsw.maybeReadHardLink(ctx, entry)
filer_pb.AfterEntryDeserialization(entry.Chunks)
- }
- return entries, err
+ return eachEntryFunc(entry)
+ })
}
-func (fsw *FilerStoreWrapper) ListDirectoryPrefixedEntries(ctx context.Context, dirPath util.FullPath, startFileName string, includeStartFile bool, limit int, prefix string) ([]*Entry, error) {
+func (fsw *FilerStoreWrapper) ListDirectoryPrefixedEntries(ctx context.Context, dirPath util.FullPath, startFileName string, includeStartFile bool, limit int64, prefix string, eachEntryFunc ListEachEntryFunc) (lastFileName string, err error) {
actualStore := fsw.getActualStore(dirPath + "/")
stats.FilerStoreCounter.WithLabelValues(actualStore.GetName(), "prefixList").Inc()
start := time.Now()
@@ -222,48 +218,52 @@ func (fsw *FilerStoreWrapper) ListDirectoryPrefixedEntries(ctx context.Context,
stats.FilerStoreHistogram.WithLabelValues(actualStore.GetName(), "prefixList").Observe(time.Since(start).Seconds())
}()
glog.V(4).Infof("ListDirectoryPrefixedEntries %s from %s prefix %s limit %d", dirPath, startFileName, prefix, limit)
- entries, err := actualStore.ListDirectoryPrefixedEntries(ctx, dirPath, startFileName, includeStartFile, limit, prefix)
+ lastFileName, err = actualStore.ListDirectoryPrefixedEntries(ctx, dirPath, startFileName, includeStartFile, limit, prefix, eachEntryFunc)
if err == ErrUnsupportedListDirectoryPrefixed {
- entries, err = fsw.prefixFilterEntries(ctx, dirPath, startFileName, includeStartFile, limit, prefix)
+ lastFileName, err = fsw.prefixFilterEntries(ctx, dirPath, startFileName, includeStartFile, limit, prefix, func(entry *Entry) bool {
+ fsw.maybeReadHardLink(ctx, entry)
+ filer_pb.AfterEntryDeserialization(entry.Chunks)
+ return eachEntryFunc(entry)
+ })
}
- if err != nil {
- return nil, err
- }
- for _, entry := range entries {
- fsw.maybeReadHardLink(ctx, entry)
- filer_pb.AfterEntryDeserialization(entry.Chunks)
- }
- return entries, nil
+ return lastFileName, err
}
-func (fsw *FilerStoreWrapper) prefixFilterEntries(ctx context.Context, dirPath util.FullPath, startFileName string, includeStartFile bool, limit int, prefix string) (entries []*Entry, err error) {
+func (fsw *FilerStoreWrapper) prefixFilterEntries(ctx context.Context, dirPath util.FullPath, startFileName string, includeStartFile bool, limit int64, prefix string, eachEntryFunc ListEachEntryFunc) (lastFileName string, err error) {
actualStore := fsw.getActualStore(dirPath + "/")
- entries, err = actualStore.ListDirectoryEntries(ctx, dirPath, startFileName, includeStartFile, limit)
- if err != nil {
- return nil, err
- }
if prefix == "" {
+ return actualStore.ListDirectoryEntries(ctx, dirPath, startFileName, includeStartFile, limit, eachEntryFunc)
+ }
+
+ var notPrefixed []*Entry
+ lastFileName, err = actualStore.ListDirectoryEntries(ctx, dirPath, startFileName, includeStartFile, limit, func(entry *Entry) bool {
+ notPrefixed = append(notPrefixed, entry)
+ return true
+ })
+ if err != nil {
return
}
- count := 0
- var lastFileName string
- notPrefixed := entries
- entries = nil
+ count := int64(0)
for count < limit && len(notPrefixed) > 0 {
for _, entry := range notPrefixed {
- lastFileName = entry.Name()
if strings.HasPrefix(entry.Name(), prefix) {
count++
- entries = append(entries, entry)
+ if !eachEntryFunc(entry) {
+ return
+ }
if count >= limit {
break
}
}
}
if count < limit {
- notPrefixed, err = actualStore.ListDirectoryEntries(ctx, dirPath, lastFileName, false, limit)
+ notPrefixed = notPrefixed[:0]
+ _, err = actualStore.ListDirectoryEntries(ctx, dirPath, lastFileName, false, limit, func(entry *Entry) bool {
+ notPrefixed = append(notPrefixed, entry)
+ return true
+ })
if err != nil {
return
}
diff --git a/weed/filer/hbase/hbase_store.go b/weed/filer/hbase/hbase_store.go
new file mode 100644
index 000000000..e0d878ca7
--- /dev/null
+++ b/weed/filer/hbase/hbase_store.go
@@ -0,0 +1,231 @@
+package hbase
+
+import (
+ "bytes"
+ "context"
+ "fmt"
+ "github.com/chrislusf/seaweedfs/weed/filer"
+ "github.com/chrislusf/seaweedfs/weed/glog"
+ "github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
+ "github.com/chrislusf/seaweedfs/weed/util"
+ "github.com/tsuna/gohbase"
+ "github.com/tsuna/gohbase/hrpc"
+ "io"
+)
+
+func init() {
+ filer.Stores = append(filer.Stores, &HbaseStore{})
+}
+
+type HbaseStore struct {
+ Client gohbase.Client
+ table []byte
+ cfKv string
+ cfMetaDir string
+ column string
+}
+
+func (store *HbaseStore) GetName() string {
+ return "hbase"
+}
+
+func (store *HbaseStore) Initialize(configuration util.Configuration, prefix string) (err error) {
+ return store.initialize(
+ configuration.GetString(prefix+"zkquorum"),
+ configuration.GetString(prefix+"table"),
+ )
+}
+
+func (store *HbaseStore) initialize(zkquorum, table string) (err error) {
+ store.Client = gohbase.NewClient(zkquorum)
+ store.table = []byte(table)
+ store.cfKv = "kv"
+ store.cfMetaDir = "meta"
+ store.column = "a"
+
+ // check table exists
+ key := "whatever"
+ headers := map[string][]string{store.cfMetaDir: nil}
+ get, err := hrpc.NewGet(context.Background(), store.table, []byte(key), hrpc.Families(headers))
+ if err != nil {
+ return fmt.Errorf("NewGet returned an error: %v", err)
+ }
+ _, err = store.Client.Get(get)
+ if err != gohbase.TableNotFound {
+ return nil
+ }
+
+ // create table
+ adminClient := gohbase.NewAdminClient(zkquorum)
+ cFamilies := []string{store.cfKv, store.cfMetaDir}
+ cf := make(map[string]map[string]string, len(cFamilies))
+ for _, f := range cFamilies {
+ cf[f] = nil
+ }
+ ct := hrpc.NewCreateTable(context.Background(), []byte(table), cf)
+ if err := adminClient.CreateTable(ct); err != nil {
+ return err
+ }
+
+ return nil
+}
+
+func (store *HbaseStore) InsertEntry(ctx context.Context, entry *filer.Entry) error {
+ value, err := entry.EncodeAttributesAndChunks()
+ if err != nil {
+ return fmt.Errorf("encoding %s %+v: %v", entry.FullPath, entry.Attr, err)
+ }
+ if len(entry.Chunks) > 50 {
+ value = util.MaybeGzipData(value)
+ }
+
+ return store.doPut(ctx, store.cfMetaDir, []byte(entry.FullPath), value, entry.TtlSec)
+}
+
+func (store *HbaseStore) UpdateEntry(ctx context.Context, entry *filer.Entry) (err error) {
+ return store.InsertEntry(ctx, entry)
+}
+
+func (store *HbaseStore) FindEntry(ctx context.Context, path util.FullPath) (entry *filer.Entry, err error) {
+ value, err := store.doGet(ctx, store.cfMetaDir, []byte(path))
+ if err != nil {
+ if err == filer.ErrKvNotFound {
+ return nil, filer_pb.ErrNotFound
+ }
+ return nil, err
+ }
+
+ entry = &filer.Entry{
+ FullPath: path,
+ }
+ err = entry.DecodeAttributesAndChunks(util.MaybeDecompressData(value))
+ if err != nil {
+ return entry, fmt.Errorf("decode %s : %v", entry.FullPath, err)
+ }
+ return entry, nil
+}
+
+func (store *HbaseStore) DeleteEntry(ctx context.Context, path util.FullPath) (err error) {
+ return store.doDelete(ctx, store.cfMetaDir, []byte(path))
+}
+
+func (store *HbaseStore) DeleteFolderChildren(ctx context.Context, path util.FullPath) (err error) {
+
+ family := map[string][]string{store.cfMetaDir: {COLUMN_NAME}}
+ expectedPrefix := []byte(path.Child(""))
+ scan, err := hrpc.NewScanRange(ctx, store.table, expectedPrefix, nil, hrpc.Families(family))
+ if err != nil {
+ return err
+ }
+
+ scanner := store.Client.Scan(scan)
+ defer scanner.Close()
+ for {
+ res, err := scanner.Next()
+ if err != nil {
+ break
+ }
+ if len(res.Cells) == 0 {
+ continue
+ }
+ cell := res.Cells[0]
+
+ if !bytes.HasPrefix(cell.Row, expectedPrefix) {
+ break
+ }
+ fullpath := util.FullPath(cell.Row)
+ dir, _ := fullpath.DirAndName()
+ if dir != string(path) {
+ continue
+ }
+
+ err = store.doDelete(ctx, store.cfMetaDir, cell.Row)
+ if err != nil {
+ break
+ }
+
+ }
+ return
+}
+
+func (store *HbaseStore) ListDirectoryEntries(ctx context.Context, dirPath util.FullPath, startFileName string, includeStartFile bool, limit int64, eachEntryFunc filer.ListEachEntryFunc) (string, error) {
+ return store.ListDirectoryPrefixedEntries(ctx, dirPath, startFileName, includeStartFile, limit, "", eachEntryFunc)
+}
+
+func (store *HbaseStore) ListDirectoryPrefixedEntries(ctx context.Context, dirPath util.FullPath, startFileName string, includeStartFile bool, limit int64, prefix string, eachEntryFunc filer.ListEachEntryFunc) (lastFileName string, err error) {
+ family := map[string][]string{store.cfMetaDir: {COLUMN_NAME}}
+ expectedPrefix := []byte(dirPath.Child(prefix))
+ scan, err := hrpc.NewScanRange(ctx, store.table, expectedPrefix, nil, hrpc.Families(family))
+ if err != nil {
+ return lastFileName, err
+ }
+
+ scanner := store.Client.Scan(scan)
+ defer scanner.Close()
+ for {
+ res, err := scanner.Next()
+ if err == io.EOF {
+ break
+ }
+ if err != nil {
+ return lastFileName, err
+ }
+ if len(res.Cells) == 0 {
+ continue
+ }
+ cell := res.Cells[0]
+
+ if !bytes.HasPrefix(cell.Row, expectedPrefix) {
+ break
+ }
+
+ fullpath := util.FullPath(cell.Row)
+ dir, fileName := fullpath.DirAndName()
+ if dir != string(dirPath) {
+ continue
+ }
+
+ value := cell.Value
+
+ if fileName == startFileName && !includeStartFile {
+ continue
+ }
+
+ limit--
+ if limit < 0 {
+ break
+ }
+
+ lastFileName = fileName
+
+ entry := &filer.Entry{
+ FullPath: fullpath,
+ }
+ if decodeErr := entry.DecodeAttributesAndChunks(util.MaybeDecompressData(value)); decodeErr != nil {
+ err = decodeErr
+ glog.V(0).Infof("list %s : %v", entry.FullPath, err)
+ break
+ }
+ if !eachEntryFunc(entry) {
+ break
+ }
+ }
+
+ return lastFileName, nil
+}
+
+func (store *HbaseStore) BeginTransaction(ctx context.Context) (context.Context, error) {
+ return ctx, nil
+}
+
+func (store *HbaseStore) CommitTransaction(ctx context.Context) error {
+ return nil
+}
+
+func (store *HbaseStore) RollbackTransaction(ctx context.Context) error {
+ return nil
+}
+
+func (store *HbaseStore) Shutdown() {
+ store.Client.Close()
+}
diff --git a/weed/filer/hbase/hbase_store_kv.go b/weed/filer/hbase/hbase_store_kv.go
new file mode 100644
index 000000000..990e55a24
--- /dev/null
+++ b/weed/filer/hbase/hbase_store_kv.go
@@ -0,0 +1,76 @@
+package hbase
+
+import (
+ "context"
+ "github.com/chrislusf/seaweedfs/weed/filer"
+ "github.com/tsuna/gohbase/hrpc"
+ "time"
+)
+
+const (
+ COLUMN_NAME = "a"
+)
+
+func (store *HbaseStore) KvPut(ctx context.Context, key []byte, value []byte) (err error) {
+ return store.doPut(ctx, store.cfKv, key, value, 0)
+}
+
+func (store *HbaseStore) KvGet(ctx context.Context, key []byte) (value []byte, err error) {
+ return store.doGet(ctx, store.cfKv, key)
+}
+
+func (store *HbaseStore) KvDelete(ctx context.Context, key []byte) (err error) {
+ return store.doDelete(ctx, store.cfKv, key)
+}
+
+func (store *HbaseStore) doPut(ctx context.Context, cf string, key, value []byte, ttlSecond int32) (err error) {
+ if ttlSecond > 0 {
+ return store.doPutWithOptions(ctx, cf, key, value, hrpc.Durability(hrpc.AsyncWal), hrpc.TTL(time.Duration(ttlSecond)*time.Second))
+ }
+ return store.doPutWithOptions(ctx, cf, key, value, hrpc.Durability(hrpc.AsyncWal))
+}
+
+func (store *HbaseStore) doPutWithOptions(ctx context.Context, cf string, key, value []byte, options ...func(hrpc.Call) error) (err error) {
+ values := map[string]map[string][]byte{cf: map[string][]byte{}}
+ values[cf][COLUMN_NAME] = value
+ putRequest, err := hrpc.NewPut(ctx, store.table, key, values, options...)
+ if err != nil {
+ return err
+ }
+ _, err = store.Client.Put(putRequest)
+ if err != nil {
+ return err
+ }
+ return nil
+}
+
+func (store *HbaseStore) doGet(ctx context.Context, cf string, key []byte) (value []byte, err error) {
+ family := map[string][]string{cf: {COLUMN_NAME}}
+ getRequest, err := hrpc.NewGet(context.Background(), store.table, key, hrpc.Families(family))
+ if err != nil {
+ return nil, err
+ }
+ getResp, err := store.Client.Get(getRequest)
+ if err != nil {
+ return nil, err
+ }
+ if len(getResp.Cells) == 0 {
+ return nil, filer.ErrKvNotFound
+ }
+
+ return getResp.Cells[0].Value, nil
+}
+
+func (store *HbaseStore) doDelete(ctx context.Context, cf string, key []byte) (err error) {
+ values := map[string]map[string][]byte{cf: map[string][]byte{}}
+ values[cf][COLUMN_NAME] = nil
+ deleteRequest, err := hrpc.NewDel(ctx, store.table, key, values, hrpc.Durability(hrpc.AsyncWal))
+ if err != nil {
+ return err
+ }
+ _, err = store.Client.Delete(deleteRequest)
+ if err != nil {
+ return err
+ }
+ return nil
+}
diff --git a/weed/filer/leveldb/leveldb_store.go b/weed/filer/leveldb/leveldb_store.go
index b879f3a6e..ac0ad4ba6 100644
--- a/weed/filer/leveldb/leveldb_store.go
+++ b/weed/filer/leveldb/leveldb_store.go
@@ -107,7 +107,7 @@ func (store *LevelDBStore) FindEntry(ctx context.Context, fullpath weed_util.Ful
return nil, filer_pb.ErrNotFound
}
if err != nil {
- return nil, fmt.Errorf("get %s : %v", entry.FullPath, err)
+ return nil, fmt.Errorf("get %s : %v", fullpath, err)
}
entry = &filer.Entry{
@@ -162,16 +162,19 @@ func (store *LevelDBStore) DeleteFolderChildren(ctx context.Context, fullpath we
return nil
}
-func (store *LevelDBStore) ListDirectoryEntries(ctx context.Context, fullpath weed_util.FullPath, startFileName string, inclusive bool,
- limit int) (entries []*filer.Entry, err error) {
- return store.ListDirectoryPrefixedEntries(ctx, fullpath, startFileName, inclusive, limit, "")
+func (store *LevelDBStore) ListDirectoryEntries(ctx context.Context, dirPath weed_util.FullPath, startFileName string, includeStartFile bool, limit int64, eachEntryFunc filer.ListEachEntryFunc) (lastFileName string, err error) {
+ return store.ListDirectoryPrefixedEntries(ctx, dirPath, startFileName, includeStartFile, limit, "", eachEntryFunc)
}
-func (store *LevelDBStore) ListDirectoryPrefixedEntries(ctx context.Context, fullpath weed_util.FullPath, startFileName string, inclusive bool, limit int, prefix string) (entries []*filer.Entry, err error) {
+func (store *LevelDBStore) ListDirectoryPrefixedEntries(ctx context.Context, dirPath weed_util.FullPath, startFileName string, includeStartFile bool, limit int64, prefix string, eachEntryFunc filer.ListEachEntryFunc) (lastFileName string, err error) {
- directoryPrefix := genDirectoryKeyPrefix(fullpath, prefix)
+ directoryPrefix := genDirectoryKeyPrefix(dirPath, prefix)
+ lastFileStart := directoryPrefix
+ if startFileName != "" {
+ lastFileStart = genDirectoryKeyPrefix(dirPath, startFileName)
+ }
- iter := store.db.NewIterator(&leveldb_util.Range{Start: genDirectoryKeyPrefix(fullpath, startFileName)}, nil)
+ iter := store.db.NewIterator(&leveldb_util.Range{Start: lastFileStart}, nil)
for iter.Next() {
key := iter.Key()
if !bytes.HasPrefix(key, directoryPrefix) {
@@ -181,26 +184,29 @@ func (store *LevelDBStore) ListDirectoryPrefixedEntries(ctx context.Context, ful
if fileName == "" {
continue
}
- if fileName == startFileName && !inclusive {
+ if fileName == startFileName && !includeStartFile {
continue
}
limit--
if limit < 0 {
break
}
+ lastFileName = fileName
entry := &filer.Entry{
- FullPath: weed_util.NewFullPath(string(fullpath), fileName),
+ FullPath: weed_util.NewFullPath(string(dirPath), fileName),
}
if decodeErr := entry.DecodeAttributesAndChunks(weed_util.MaybeDecompressData(iter.Value())); decodeErr != nil {
err = decodeErr
glog.V(0).Infof("list %s : %v", entry.FullPath, err)
break
}
- entries = append(entries, entry)
+ if !eachEntryFunc(entry) {
+ break
+ }
}
iter.Release()
- return entries, err
+ return lastFileName, err
}
func genKey(dirPath, fileName string) (key []byte) {
diff --git a/weed/filer/leveldb/leveldb_store_test.go b/weed/filer/leveldb/leveldb_store_test.go
index c5bfb8474..9c342605e 100644
--- a/weed/filer/leveldb/leveldb_store_test.go
+++ b/weed/filer/leveldb/leveldb_store_test.go
@@ -2,9 +2,11 @@ package leveldb
import (
"context"
+ "fmt"
"io/ioutil"
"os"
"testing"
+ "time"
"github.com/chrislusf/seaweedfs/weed/filer"
"github.com/chrislusf/seaweedfs/weed/util"
@@ -49,14 +51,14 @@ func TestCreateAndFind(t *testing.T) {
}
// checking one upper directory
- entries, _ := testFiler.ListDirectoryEntries(ctx, util.FullPath("/home/chris/this/is/one"), "", false, 100, "")
+ entries, _, _ := testFiler.ListDirectoryEntries(ctx, util.FullPath("/home/chris/this/is/one"), "", false, 100, "", "")
if len(entries) != 1 {
t.Errorf("list entries count: %v", len(entries))
return
}
// checking one upper directory
- entries, _ = testFiler.ListDirectoryEntries(ctx, util.FullPath("/"), "", false, 100, "")
+ entries, _, _ = testFiler.ListDirectoryEntries(ctx, util.FullPath("/"), "", false, 100, "", "")
if len(entries) != 1 {
t.Errorf("list entries count: %v", len(entries))
return
@@ -75,7 +77,7 @@ func TestEmptyRoot(t *testing.T) {
ctx := context.Background()
// checking one upper directory
- entries, err := testFiler.ListDirectoryEntries(ctx, util.FullPath("/"), "", false, 100, "")
+ entries, _, err := testFiler.ListDirectoryEntries(ctx, util.FullPath("/"), "", false, 100, "", "")
if err != nil {
t.Errorf("list entries: %v", err)
return
@@ -86,3 +88,28 @@ func TestEmptyRoot(t *testing.T) {
}
}
+
+func BenchmarkInsertEntry(b *testing.B) {
+ testFiler := filer.NewFiler(nil, nil, "", 0, "", "", "", nil)
+ dir, _ := ioutil.TempDir("", "seaweedfs_filer_bench")
+ defer os.RemoveAll(dir)
+ store := &LevelDBStore{}
+ store.initialize(dir)
+ testFiler.SetStore(store)
+
+ ctx := context.Background()
+
+ b.ReportAllocs()
+
+ for i := 0; i < b.N; i++ {
+ entry := &filer.Entry{
+ FullPath: util.FullPath(fmt.Sprintf("/file%d.txt", i)),
+ Attr: filer.Attr{
+ Crtime: time.Now(),
+ Mtime: time.Now(),
+ Mode: os.FileMode(0644),
+ },
+ }
+ store.InsertEntry(ctx, entry)
+ }
+}
diff --git a/weed/filer/leveldb2/leveldb2_store.go b/weed/filer/leveldb2/leveldb2_store.go
index 4b41554b9..c76340da4 100644
--- a/weed/filer/leveldb2/leveldb2_store.go
+++ b/weed/filer/leveldb2/leveldb2_store.go
@@ -115,7 +115,7 @@ func (store *LevelDB2Store) FindEntry(ctx context.Context, fullpath weed_util.Fu
return nil, filer_pb.ErrNotFound
}
if err != nil {
- return nil, fmt.Errorf("get %s : %v", entry.FullPath, err)
+ return nil, fmt.Errorf("get %s : %v", fullpath, err)
}
entry = &filer.Entry{
@@ -171,15 +171,17 @@ func (store *LevelDB2Store) DeleteFolderChildren(ctx context.Context, fullpath w
return nil
}
-func (store *LevelDB2Store) ListDirectoryEntries(ctx context.Context, fullpath weed_util.FullPath, startFileName string, inclusive bool,
- limit int) (entries []*filer.Entry, err error) {
- return store.ListDirectoryPrefixedEntries(ctx, fullpath, startFileName, inclusive, limit, "")
+func (store *LevelDB2Store) ListDirectoryEntries(ctx context.Context, dirPath weed_util.FullPath, startFileName string, includeStartFile bool, limit int64, eachEntryFunc filer.ListEachEntryFunc) (lastFileName string, err error) {
+ return store.ListDirectoryPrefixedEntries(ctx, dirPath, startFileName, includeStartFile, limit, "", eachEntryFunc)
}
-func (store *LevelDB2Store) ListDirectoryPrefixedEntries(ctx context.Context, fullpath weed_util.FullPath, startFileName string, inclusive bool, limit int, prefix string) (entries []*filer.Entry, err error) {
+func (store *LevelDB2Store) ListDirectoryPrefixedEntries(ctx context.Context, dirPath weed_util.FullPath, startFileName string, includeStartFile bool, limit int64, prefix string, eachEntryFunc filer.ListEachEntryFunc) (lastFileName string, err error) {
- directoryPrefix, partitionId := genDirectoryKeyPrefix(fullpath, prefix, store.dbCount)
- lastFileStart, _ := genDirectoryKeyPrefix(fullpath, startFileName, store.dbCount)
+ directoryPrefix, partitionId := genDirectoryKeyPrefix(dirPath, prefix, store.dbCount)
+ lastFileStart := directoryPrefix
+ if startFileName != "" {
+ lastFileStart, _ = genDirectoryKeyPrefix(dirPath, startFileName, store.dbCount)
+ }
iter := store.dbs[partitionId].NewIterator(&leveldb_util.Range{Start: lastFileStart}, nil)
for iter.Next() {
@@ -191,15 +193,16 @@ func (store *LevelDB2Store) ListDirectoryPrefixedEntries(ctx context.Context, fu
if fileName == "" {
continue
}
- if fileName == startFileName && !inclusive {
+ if fileName == startFileName && !includeStartFile {
continue
}
limit--
if limit < 0 {
break
}
+ lastFileName = fileName
entry := &filer.Entry{
- FullPath: weed_util.NewFullPath(string(fullpath), fileName),
+ FullPath: weed_util.NewFullPath(string(dirPath), fileName),
}
// println("list", entry.FullPath, "chunks", len(entry.Chunks))
@@ -208,11 +211,13 @@ func (store *LevelDB2Store) ListDirectoryPrefixedEntries(ctx context.Context, fu
glog.V(0).Infof("list %s : %v", entry.FullPath, err)
break
}
- entries = append(entries, entry)
+ if !eachEntryFunc(entry) {
+ break
+ }
}
iter.Release()
- return entries, err
+ return lastFileName, err
}
func genKey(dirPath, fileName string, dbCount int) (key []byte, partitionId int) {
diff --git a/weed/filer/leveldb2/leveldb2_store_test.go b/weed/filer/leveldb2/leveldb2_store_test.go
index 22c0d6052..495c73fdd 100644
--- a/weed/filer/leveldb2/leveldb2_store_test.go
+++ b/weed/filer/leveldb2/leveldb2_store_test.go
@@ -49,14 +49,14 @@ func TestCreateAndFind(t *testing.T) {
}
// checking one upper directory
- entries, _ := testFiler.ListDirectoryEntries(ctx, util.FullPath("/home/chris/this/is/one"), "", false, 100, "")
+ entries, _, _ := testFiler.ListDirectoryEntries(ctx, util.FullPath("/home/chris/this/is/one"), "", false, 100, "", "")
if len(entries) != 1 {
t.Errorf("list entries count: %v", len(entries))
return
}
// checking one upper directory
- entries, _ = testFiler.ListDirectoryEntries(ctx, util.FullPath("/"), "", false, 100, "")
+ entries, _, _ = testFiler.ListDirectoryEntries(ctx, util.FullPath("/"), "", false, 100, "", "")
if len(entries) != 1 {
t.Errorf("list entries count: %v", len(entries))
return
@@ -75,7 +75,7 @@ func TestEmptyRoot(t *testing.T) {
ctx := context.Background()
// checking one upper directory
- entries, err := testFiler.ListDirectoryEntries(ctx, util.FullPath("/"), "", false, 100, "")
+ entries, _, err := testFiler.ListDirectoryEntries(ctx, util.FullPath("/"), "", false, 100, "", "")
if err != nil {
t.Errorf("list entries: %v", err)
return
diff --git a/weed/filer/leveldb3/leveldb3_store.go b/weed/filer/leveldb3/leveldb3_store.go
new file mode 100644
index 000000000..c81ea7bbe
--- /dev/null
+++ b/weed/filer/leveldb3/leveldb3_store.go
@@ -0,0 +1,375 @@
+package leveldb
+
+import (
+ "bytes"
+ "context"
+ "crypto/md5"
+ "fmt"
+ "github.com/syndtr/goleveldb/leveldb"
+ leveldb_errors "github.com/syndtr/goleveldb/leveldb/errors"
+ "github.com/syndtr/goleveldb/leveldb/opt"
+ leveldb_util "github.com/syndtr/goleveldb/leveldb/util"
+ "io"
+ "os"
+ "strings"
+ "sync"
+
+ "github.com/chrislusf/seaweedfs/weed/filer"
+ "github.com/chrislusf/seaweedfs/weed/glog"
+ "github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
+ weed_util "github.com/chrislusf/seaweedfs/weed/util"
+)
+
+const (
+ DEFAULT = "_main"
+)
+
+func init() {
+ filer.Stores = append(filer.Stores, &LevelDB3Store{})
+}
+
+type LevelDB3Store struct {
+ dir string
+ dbs map[string]*leveldb.DB
+ dbsLock sync.RWMutex
+}
+
+func (store *LevelDB3Store) GetName() string {
+ return "leveldb3"
+}
+
+func (store *LevelDB3Store) Initialize(configuration weed_util.Configuration, prefix string) (err error) {
+ dir := configuration.GetString(prefix + "dir")
+ return store.initialize(dir)
+}
+
+func (store *LevelDB3Store) initialize(dir string) (err error) {
+ glog.Infof("filer store leveldb3 dir: %s", dir)
+ if err := weed_util.TestFolderWritable(dir); err != nil {
+ return fmt.Errorf("Check Level Folder %s Writable: %s", dir, err)
+ }
+ store.dir = dir
+
+ db, loadDbErr := store.loadDB(DEFAULT)
+ if loadDbErr != nil {
+ return loadDbErr
+ }
+ store.dbs = make(map[string]*leveldb.DB)
+ store.dbs[DEFAULT] = db
+
+ return
+}
+
+func (store *LevelDB3Store) loadDB(name string) (*leveldb.DB, error) {
+
+ opts := &opt.Options{
+ BlockCacheCapacity: 32 * 1024 * 1024, // default value is 8MiB
+ WriteBuffer: 16 * 1024 * 1024, // default value is 4MiB
+ CompactionTableSizeMultiplier: 4,
+ }
+ if name != DEFAULT {
+ opts = &opt.Options{
+ BlockCacheCapacity: 4 * 1024 * 1024, // default value is 8MiB
+ WriteBuffer: 2 * 1024 * 1024, // default value is 4MiB
+ CompactionTableSizeMultiplier: 4,
+ }
+ }
+
+ dbFolder := fmt.Sprintf("%s/%s", store.dir, name)
+ os.MkdirAll(dbFolder, 0755)
+ db, dbErr := leveldb.OpenFile(dbFolder, opts)
+ if leveldb_errors.IsCorrupted(dbErr) {
+ db, dbErr = leveldb.RecoverFile(dbFolder, opts)
+ }
+ if dbErr != nil {
+ glog.Errorf("filer store open dir %s: %v", dbFolder, dbErr)
+ return nil, dbErr
+ }
+ return db, nil
+}
+
+func (store *LevelDB3Store) findDB(fullpath weed_util.FullPath, isForChildren bool) (*leveldb.DB, string, weed_util.FullPath, error) {
+
+ store.dbsLock.RLock()
+
+ defaultDB := store.dbs[DEFAULT]
+ if !strings.HasPrefix(string(fullpath), "/buckets/") {
+ store.dbsLock.RUnlock()
+ return defaultDB, DEFAULT, fullpath, nil
+ }
+
+ // detect bucket
+ bucketAndObjectKey := string(fullpath)[len("/buckets/"):]
+ t := strings.Index(bucketAndObjectKey, "/")
+ if t < 0 && !isForChildren {
+ store.dbsLock.RUnlock()
+ return defaultDB, DEFAULT, fullpath, nil
+ }
+ bucket := bucketAndObjectKey
+ shortPath := weed_util.FullPath("/")
+ if t > 0 {
+ bucket = bucketAndObjectKey[:t]
+ shortPath = weed_util.FullPath(bucketAndObjectKey[t:])
+ }
+
+ if db, found := store.dbs[bucket]; found {
+ store.dbsLock.RUnlock()
+ return db, bucket, shortPath, nil
+ }
+
+ store.dbsLock.RUnlock()
+ // upgrade to write lock
+ store.dbsLock.Lock()
+ defer store.dbsLock.Unlock()
+
+ // double check after getting the write lock
+ if db, found := store.dbs[bucket]; found {
+ return db, bucket, shortPath, nil
+ }
+
+ // create db
+ db, err := store.loadDB(bucket)
+ if err != nil {
+ return nil, bucket, shortPath, err
+ }
+ store.dbs[bucket] = db
+
+ return db, bucket, shortPath, nil
+}
+
+func (store *LevelDB3Store) closeDB(bucket string) {
+
+ store.dbsLock.Lock()
+ defer store.dbsLock.Unlock()
+
+ if db, found := store.dbs[bucket]; found {
+ db.Close()
+ delete(store.dbs, bucket)
+ }
+
+}
+
+func (store *LevelDB3Store) BeginTransaction(ctx context.Context) (context.Context, error) {
+ return ctx, nil
+}
+func (store *LevelDB3Store) CommitTransaction(ctx context.Context) error {
+ return nil
+}
+func (store *LevelDB3Store) RollbackTransaction(ctx context.Context) error {
+ return nil
+}
+
+func (store *LevelDB3Store) InsertEntry(ctx context.Context, entry *filer.Entry) (err error) {
+
+ db, _, shortPath, err := store.findDB(entry.FullPath, false)
+ if err != nil {
+ return fmt.Errorf("findDB %s : %v", entry.FullPath, err)
+ }
+
+ dir, name := shortPath.DirAndName()
+ key := genKey(dir, name)
+
+ value, err := entry.EncodeAttributesAndChunks()
+ if err != nil {
+ return fmt.Errorf("encoding %s %+v: %v", entry.FullPath, entry.Attr, err)
+ }
+
+ if len(entry.Chunks) > 50 {
+ value = weed_util.MaybeGzipData(value)
+ }
+
+ err = db.Put(key, value, nil)
+
+ if err != nil {
+ return fmt.Errorf("persisting %s : %v", entry.FullPath, err)
+ }
+
+ // println("saved", entry.FullPath, "chunks", len(entry.Chunks))
+
+ return nil
+}
+
+func (store *LevelDB3Store) UpdateEntry(ctx context.Context, entry *filer.Entry) (err error) {
+
+ return store.InsertEntry(ctx, entry)
+}
+
+func (store *LevelDB3Store) FindEntry(ctx context.Context, fullpath weed_util.FullPath) (entry *filer.Entry, err error) {
+
+ db, _, shortPath, err := store.findDB(fullpath, false)
+ if err != nil {
+ return nil, fmt.Errorf("findDB %s : %v", fullpath, err)
+ }
+
+ dir, name := shortPath.DirAndName()
+ key := genKey(dir, name)
+
+ data, err := db.Get(key, nil)
+
+ if err == leveldb.ErrNotFound {
+ return nil, filer_pb.ErrNotFound
+ }
+ if err != nil {
+ return nil, fmt.Errorf("get %s : %v", fullpath, err)
+ }
+
+ entry = &filer.Entry{
+ FullPath: fullpath,
+ }
+ err = entry.DecodeAttributesAndChunks(weed_util.MaybeDecompressData(data))
+ if err != nil {
+ return entry, fmt.Errorf("decode %s : %v", entry.FullPath, err)
+ }
+
+ // println("read", entry.FullPath, "chunks", len(entry.Chunks), "data", len(data), string(data))
+
+ return entry, nil
+}
+
+func (store *LevelDB3Store) DeleteEntry(ctx context.Context, fullpath weed_util.FullPath) (err error) {
+
+ db, _, shortPath, err := store.findDB(fullpath, false)
+ if err != nil {
+ return fmt.Errorf("findDB %s : %v", fullpath, err)
+ }
+
+ dir, name := shortPath.DirAndName()
+ key := genKey(dir, name)
+
+ err = db.Delete(key, nil)
+ if err != nil {
+ return fmt.Errorf("delete %s : %v", fullpath, err)
+ }
+
+ return nil
+}
+
+func (store *LevelDB3Store) DeleteFolderChildren(ctx context.Context, fullpath weed_util.FullPath) (err error) {
+
+ db, bucket, shortPath, err := store.findDB(fullpath, true)
+ if err != nil {
+ return fmt.Errorf("findDB %s : %v", fullpath, err)
+ }
+
+ if bucket != DEFAULT && shortPath == "/" {
+ store.closeDB(bucket)
+ if bucket != "" { // just to make sure
+ os.RemoveAll(store.dir + "/" + bucket)
+ }
+ return nil
+ }
+
+ directoryPrefix := genDirectoryKeyPrefix(shortPath, "")
+
+ batch := new(leveldb.Batch)
+
+ iter := db.NewIterator(&leveldb_util.Range{Start: directoryPrefix}, nil)
+ for iter.Next() {
+ key := iter.Key()
+ if !bytes.HasPrefix(key, directoryPrefix) {
+ break
+ }
+ fileName := getNameFromKey(key)
+ if fileName == "" {
+ continue
+ }
+ batch.Delete(append(directoryPrefix, []byte(fileName)...))
+ }
+ iter.Release()
+
+ err = db.Write(batch, nil)
+
+ if err != nil {
+ return fmt.Errorf("delete %s : %v", fullpath, err)
+ }
+
+ return nil
+}
+
+func (store *LevelDB3Store) ListDirectoryEntries(ctx context.Context, dirPath weed_util.FullPath, startFileName string, includeStartFile bool, limit int64, eachEntryFunc filer.ListEachEntryFunc) (lastFileName string, err error) {
+ return store.ListDirectoryPrefixedEntries(ctx, dirPath, startFileName, includeStartFile, limit, "", eachEntryFunc)
+}
+
+func (store *LevelDB3Store) ListDirectoryPrefixedEntries(ctx context.Context, dirPath weed_util.FullPath, startFileName string, includeStartFile bool, limit int64, prefix string, eachEntryFunc filer.ListEachEntryFunc) (lastFileName string, err error) {
+
+ db, _, shortPath, err := store.findDB(dirPath, true)
+ if err != nil {
+ return lastFileName, fmt.Errorf("findDB %s : %v", dirPath, err)
+ }
+
+ directoryPrefix := genDirectoryKeyPrefix(shortPath, prefix)
+ lastFileStart := directoryPrefix
+ if startFileName != "" {
+ lastFileStart = genDirectoryKeyPrefix(shortPath, startFileName)
+ }
+
+ iter := db.NewIterator(&leveldb_util.Range{Start: lastFileStart}, nil)
+ for iter.Next() {
+ key := iter.Key()
+ if !bytes.HasPrefix(key, directoryPrefix) {
+ break
+ }
+ fileName := getNameFromKey(key)
+ if fileName == "" {
+ continue
+ }
+ if fileName == startFileName && !includeStartFile {
+ continue
+ }
+ limit--
+ if limit < 0 {
+ break
+ }
+ lastFileName = fileName
+ entry := &filer.Entry{
+ FullPath: weed_util.NewFullPath(string(dirPath), fileName),
+ }
+
+ // println("list", entry.FullPath, "chunks", len(entry.Chunks))
+ if decodeErr := entry.DecodeAttributesAndChunks(weed_util.MaybeDecompressData(iter.Value())); decodeErr != nil {
+ err = decodeErr
+ glog.V(0).Infof("list %s : %v", entry.FullPath, err)
+ break
+ }
+ if !eachEntryFunc(entry) {
+ break
+ }
+ }
+ iter.Release()
+
+ return lastFileName, err
+}
+
+func genKey(dirPath, fileName string) (key []byte) {
+ key = hashToBytes(dirPath)
+ key = append(key, []byte(fileName)...)
+ return key
+}
+
+func genDirectoryKeyPrefix(fullpath weed_util.FullPath, startFileName string) (keyPrefix []byte) {
+ keyPrefix = hashToBytes(string(fullpath))
+ if len(startFileName) > 0 {
+ keyPrefix = append(keyPrefix, []byte(startFileName)...)
+ }
+ return keyPrefix
+}
+
+func getNameFromKey(key []byte) string {
+
+ return string(key[md5.Size:])
+
+}
+
+// hash directory
+func hashToBytes(dir string) []byte {
+ h := md5.New()
+ io.WriteString(h, dir)
+ b := h.Sum(nil)
+ return b
+}
+
+func (store *LevelDB3Store) Shutdown() {
+ for _, db := range store.dbs {
+ db.Close()
+ }
+}
diff --git a/weed/filer/leveldb3/leveldb3_store_kv.go b/weed/filer/leveldb3/leveldb3_store_kv.go
new file mode 100644
index 000000000..18d782b80
--- /dev/null
+++ b/weed/filer/leveldb3/leveldb3_store_kv.go
@@ -0,0 +1,46 @@
+package leveldb
+
+import (
+ "context"
+ "fmt"
+
+ "github.com/chrislusf/seaweedfs/weed/filer"
+ "github.com/syndtr/goleveldb/leveldb"
+)
+
+func (store *LevelDB3Store) KvPut(ctx context.Context, key []byte, value []byte) (err error) {
+
+ err = store.dbs[DEFAULT].Put(key, value, nil)
+
+ if err != nil {
+ return fmt.Errorf("kv put: %v", err)
+ }
+
+ return nil
+}
+
+func (store *LevelDB3Store) KvGet(ctx context.Context, key []byte) (value []byte, err error) {
+
+ value, err = store.dbs[DEFAULT].Get(key, nil)
+
+ if err == leveldb.ErrNotFound {
+ return nil, filer.ErrKvNotFound
+ }
+
+ if err != nil {
+ return nil, fmt.Errorf("kv get: %v", err)
+ }
+
+ return
+}
+
+func (store *LevelDB3Store) KvDelete(ctx context.Context, key []byte) (err error) {
+
+ err = store.dbs[DEFAULT].Delete(key, nil)
+
+ if err != nil {
+ return fmt.Errorf("kv delete: %v", err)
+ }
+
+ return nil
+}
diff --git a/weed/filer/leveldb3/leveldb3_store_test.go b/weed/filer/leveldb3/leveldb3_store_test.go
new file mode 100644
index 000000000..53b0e927f
--- /dev/null
+++ b/weed/filer/leveldb3/leveldb3_store_test.go
@@ -0,0 +1,88 @@
+package leveldb
+
+import (
+ "context"
+ "io/ioutil"
+ "os"
+ "testing"
+
+ "github.com/chrislusf/seaweedfs/weed/filer"
+ "github.com/chrislusf/seaweedfs/weed/util"
+)
+
+func TestCreateAndFind(t *testing.T) {
+ testFiler := filer.NewFiler(nil, nil, "", 0, "", "", "", nil)
+ dir, _ := ioutil.TempDir("", "seaweedfs_filer_test")
+ defer os.RemoveAll(dir)
+ store := &LevelDB3Store{}
+ store.initialize(dir)
+ testFiler.SetStore(store)
+
+ fullpath := util.FullPath("/home/chris/this/is/one/file1.jpg")
+
+ ctx := context.Background()
+
+ entry1 := &filer.Entry{
+ FullPath: fullpath,
+ Attr: filer.Attr{
+ Mode: 0440,
+ Uid: 1234,
+ Gid: 5678,
+ },
+ }
+
+ if err := testFiler.CreateEntry(ctx, entry1, false, false, nil); err != nil {
+ t.Errorf("create entry %v: %v", entry1.FullPath, err)
+ return
+ }
+
+ entry, err := testFiler.FindEntry(ctx, fullpath)
+
+ if err != nil {
+ t.Errorf("find entry: %v", err)
+ return
+ }
+
+ if entry.FullPath != entry1.FullPath {
+ t.Errorf("find wrong entry: %v", entry.FullPath)
+ return
+ }
+
+ // checking one upper directory
+ entries, _, _ := testFiler.ListDirectoryEntries(ctx, util.FullPath("/home/chris/this/is/one"), "", false, 100, "", "")
+ if len(entries) != 1 {
+ t.Errorf("list entries count: %v", len(entries))
+ return
+ }
+
+ // checking one upper directory
+ entries, _, _ = testFiler.ListDirectoryEntries(ctx, util.FullPath("/"), "", false, 100, "", "")
+ if len(entries) != 1 {
+ t.Errorf("list entries count: %v", len(entries))
+ return
+ }
+
+}
+
+func TestEmptyRoot(t *testing.T) {
+ testFiler := filer.NewFiler(nil, nil, "", 0, "", "", "", nil)
+ dir, _ := ioutil.TempDir("", "seaweedfs_filer_test2")
+ defer os.RemoveAll(dir)
+ store := &LevelDB3Store{}
+ store.initialize(dir)
+ testFiler.SetStore(store)
+
+ ctx := context.Background()
+
+ // checking one upper directory
+ entries, _, err := testFiler.ListDirectoryEntries(ctx, util.FullPath("/"), "", false, 100, "", "")
+ if err != nil {
+ t.Errorf("list entries: %v", err)
+ return
+ }
+ if len(entries) != 0 {
+ t.Errorf("list entries count: %v", len(entries))
+ return
+ }
+
+}
diff --git a/weed/filer/mongodb/mongodb_store.go b/weed/filer/mongodb/mongodb_store.go
index d20c6477a..1ef5056f4 100644
--- a/weed/filer/mongodb/mongodb_store.go
+++ b/weed/filer/mongodb/mongodb_store.go
@@ -178,14 +178,14 @@ func (store *MongodbStore) DeleteFolderChildren(ctx context.Context, fullpath ut
return nil
}
-func (store *MongodbStore) ListDirectoryPrefixedEntries(ctx context.Context, fullpath util.FullPath, startFileName string, inclusive bool, limit int, prefix string) (entries []*filer.Entry, err error) {
- return nil, filer.ErrUnsupportedListDirectoryPrefixed
+func (store *MongodbStore) ListDirectoryPrefixedEntries(ctx context.Context, dirPath util.FullPath, startFileName string, includeStartFile bool, limit int64, prefix string, eachEntryFunc filer.ListEachEntryFunc) (lastFileName string, err error) {
+ return lastFileName, filer.ErrUnsupportedListDirectoryPrefixed
}
-func (store *MongodbStore) ListDirectoryEntries(ctx context.Context, fullpath util.FullPath, startFileName string, inclusive bool, limit int) (entries []*filer.Entry, err error) {
+func (store *MongodbStore) ListDirectoryEntries(ctx context.Context, dirPath util.FullPath, startFileName string, includeStartFile bool, limit int64, eachEntryFunc filer.ListEachEntryFunc) (lastFileName string, err error) {
- var where = bson.M{"directory": string(fullpath), "name": bson.M{"$gt": startFileName}}
- if inclusive {
+ var where = bson.M{"directory": string(dirPath), "name": bson.M{"$gt": startFileName}}
+ if includeStartFile {
where["name"] = bson.M{
"$gte": startFileName,
}
@@ -197,26 +197,30 @@ func (store *MongodbStore) ListDirectoryEntries(ctx context.Context, fullpath ut
var data Model
err := cur.Decode(&data)
if err != nil && err != mongo.ErrNoDocuments {
- return nil, err
+ return lastFileName, err
}
entry := &filer.Entry{
- FullPath: util.NewFullPath(string(fullpath), data.Name),
+ FullPath: util.NewFullPath(string(dirPath), data.Name),
}
+ lastFileName = data.Name
if decodeErr := entry.DecodeAttributesAndChunks(util.MaybeDecompressData(data.Meta)); decodeErr != nil {
err = decodeErr
glog.V(0).Infof("list %s : %v", entry.FullPath, err)
break
}
- entries = append(entries, entry)
+ if !eachEntryFunc(entry) {
+ break
+ }
+
}
if err := cur.Close(ctx); err != nil {
glog.V(0).Infof("list iterator close: %v", err)
}
- return entries, err
+ return lastFileName, err
}
func (store *MongodbStore) Shutdown() {
diff --git a/weed/filer/mysql/mysql_sql_gen.go b/weed/filer/mysql/mysql_sql_gen.go
new file mode 100644
index 000000000..057484c37
--- /dev/null
+++ b/weed/filer/mysql/mysql_sql_gen.go
@@ -0,0 +1,52 @@
+package mysql
+
+import (
+ "fmt"
+ "github.com/chrislusf/seaweedfs/weed/filer/abstract_sql"
+ _ "github.com/go-sql-driver/mysql"
+)
+
+type SqlGenMysql struct {
+ CreateTableSqlTemplate string
+ DropTableSqlTemplate string
+}
+
+var (
+ _ = abstract_sql.SqlGenerator(&SqlGenMysql{})
+)
+
+func (gen *SqlGenMysql) GetSqlInsert(bucket string) string {
+ return fmt.Sprintf("INSERT INTO %s (dirhash,name,directory,meta) VALUES(?,?,?,?)", bucket)
+}
+
+func (gen *SqlGenMysql) GetSqlUpdate(bucket string) string {
+ return fmt.Sprintf("UPDATE %s SET meta=? WHERE dirhash=? AND name=? AND directory=?", bucket)
+}
+
+func (gen *SqlGenMysql) GetSqlFind(bucket string) string {
+ return fmt.Sprintf("SELECT meta FROM %s WHERE dirhash=? AND name=? AND directory=?", bucket)
+}
+
+func (gen *SqlGenMysql) GetSqlDelete(bucket string) string {
+ return fmt.Sprintf("DELETE FROM %s WHERE dirhash=? AND name=? AND directory=?", bucket)
+}
+
+func (gen *SqlGenMysql) GetSqlDeleteFolderChildren(bucket string) string {
+ return fmt.Sprintf("DELETE FROM %s WHERE dirhash=? AND directory=?", bucket)
+}
+
+func (gen *SqlGenMysql) GetSqlListExclusive(bucket string) string {
+ return fmt.Sprintf("SELECT NAME, meta FROM %s WHERE dirhash=? AND name>? AND directory=? AND name like ? ORDER BY NAME ASC LIMIT ?", bucket)
+}
+
+func (gen *SqlGenMysql) GetSqlListInclusive(bucket string) string {
+ return fmt.Sprintf("SELECT NAME, meta FROM %s WHERE dirhash=? AND name>=? AND directory=? AND name like ? ORDER BY NAME ASC LIMIT ?", bucket)
+}
+
+func (gen *SqlGenMysql) GetSqlCreateTable(bucket string) string {
+ return fmt.Sprintf(gen.CreateTableSqlTemplate, bucket)
+}
+
+func (gen *SqlGenMysql) GetSqlDropTable(bucket string) string {
+ return fmt.Sprintf(gen.DropTableSqlTemplate, bucket)
+}
diff --git a/weed/filer/mysql/mysql_store.go b/weed/filer/mysql/mysql_store.go
index 5bc132980..686628740 100644
--- a/weed/filer/mysql/mysql_store.go
+++ b/weed/filer/mysql/mysql_store.go
@@ -3,8 +3,9 @@ package mysql
import (
"database/sql"
"fmt"
-
"github.com/chrislusf/seaweedfs/weed/filer"
+ "time"
+
"github.com/chrislusf/seaweedfs/weed/filer/abstract_sql"
"github.com/chrislusf/seaweedfs/weed/util"
_ "github.com/go-sql-driver/mysql"
@@ -35,20 +36,19 @@ func (store *MysqlStore) Initialize(configuration util.Configuration, prefix str
configuration.GetString(prefix+"database"),
configuration.GetInt(prefix+"connection_max_idle"),
configuration.GetInt(prefix+"connection_max_open"),
+ configuration.GetInt(prefix+"connection_max_lifetime_seconds"),
configuration.GetBool(prefix+"interpolateParams"),
)
}
-func (store *MysqlStore) initialize(user, password, hostname string, port int, database string, maxIdle, maxOpen int,
- interpolateParams bool) (err error) {
- //
- store.SqlInsert = "INSERT INTO filemeta (dirhash,name,directory,meta) VALUES(?,?,?,?)"
- store.SqlUpdate = "UPDATE filemeta SET meta=? WHERE dirhash=? AND name=? AND directory=?"
- store.SqlFind = "SELECT meta FROM filemeta WHERE dirhash=? AND name=? AND directory=?"
- store.SqlDelete = "DELETE FROM filemeta WHERE dirhash=? AND name=? AND directory=?"
- store.SqlDeleteFolderChildren = "DELETE FROM filemeta WHERE dirhash=? AND directory=?"
- store.SqlListExclusive = "SELECT NAME, meta FROM filemeta WHERE dirhash=? AND name>? AND directory=? AND name like ? ORDER BY NAME ASC LIMIT ?"
- store.SqlListInclusive = "SELECT NAME, meta FROM filemeta WHERE dirhash=? AND name>=? AND directory=? AND name like ? ORDER BY NAME ASC LIMIT ?"
+func (store *MysqlStore) initialize(user, password, hostname string, port int, database string, maxIdle, maxOpen,
+ maxLifetimeSeconds int, interpolateParams bool) (err error) {
+
+ store.SupportBucketTable = false
+ store.SqlGenerator = &SqlGenMysql{
+ CreateTableSqlTemplate: "",
+ DropTableSqlTemplate: "drop table %s",
+ }
sqlUrl := fmt.Sprintf(CONNECTION_URL_PATTERN, user, password, hostname, port, database)
if interpolateParams {
@@ -65,6 +65,7 @@ func (store *MysqlStore) initialize(user, password, hostname string, port int, d
store.DB.SetMaxIdleConns(maxIdle)
store.DB.SetMaxOpenConns(maxOpen)
+ store.DB.SetConnMaxLifetime(time.Duration(maxLifetimeSeconds) * time.Second)
if err = store.DB.Ping(); err != nil {
return fmt.Errorf("connect to %s error:%v", sqlUrl, err)
diff --git a/weed/filer/mysql2/mysql2_store.go b/weed/filer/mysql2/mysql2_store.go
new file mode 100644
index 000000000..15216b651
--- /dev/null
+++ b/weed/filer/mysql2/mysql2_store.go
@@ -0,0 +1,82 @@
+package mysql2
+
+import (
+ "context"
+ "database/sql"
+ "fmt"
+ "time"
+
+ "github.com/chrislusf/seaweedfs/weed/filer"
+ "github.com/chrislusf/seaweedfs/weed/filer/abstract_sql"
+ "github.com/chrislusf/seaweedfs/weed/filer/mysql"
+ "github.com/chrislusf/seaweedfs/weed/util"
+ _ "github.com/go-sql-driver/mysql"
+)
+
+const (
+ CONNECTION_URL_PATTERN = "%s:%s@tcp(%s:%d)/%s?charset=utf8"
+)
+
+func init() {
+ filer.Stores = append(filer.Stores, &MysqlStore2{})
+}
+
+type MysqlStore2 struct {
+ abstract_sql.AbstractSqlStore
+}
+
+func (store *MysqlStore2) GetName() string {
+ return "mysql2"
+}
+
+func (store *MysqlStore2) Initialize(configuration util.Configuration, prefix string) (err error) {
+ return store.initialize(
+ configuration.GetString(prefix+"createTable"),
+ configuration.GetString(prefix+"username"),
+ configuration.GetString(prefix+"password"),
+ configuration.GetString(prefix+"hostname"),
+ configuration.GetInt(prefix+"port"),
+ configuration.GetString(prefix+"database"),
+ configuration.GetInt(prefix+"connection_max_idle"),
+ configuration.GetInt(prefix+"connection_max_open"),
+ configuration.GetInt(prefix+"connection_max_lifetime_seconds"),
+ configuration.GetBool(prefix+"interpolateParams"),
+ )
+}
+
+func (store *MysqlStore2) initialize(createTable, user, password, hostname string, port int, database string, maxIdle, maxOpen,
+ maxLifetimeSeconds int, interpolateParams bool) (err error) {
+
+ store.SupportBucketTable = true
+ store.SqlGenerator = &mysql.SqlGenMysql{
+ CreateTableSqlTemplate: createTable,
+ DropTableSqlTemplate: "drop table %s",
+ }
+
+ sqlUrl := fmt.Sprintf(CONNECTION_URL_PATTERN, user, password, hostname, port, database)
+ if interpolateParams {
+ sqlUrl += "&interpolateParams=true"
+ }
+
+ var dbErr error
+ store.DB, dbErr = sql.Open("mysql", sqlUrl)
+ if dbErr != nil {
+ store.DB.Close()
+ store.DB = nil
+ return fmt.Errorf("can not connect to %s error:%v", sqlUrl, err)
+ }
+
+ store.DB.SetMaxIdleConns(maxIdle)
+ store.DB.SetMaxOpenConns(maxOpen)
+ store.DB.SetConnMaxLifetime(time.Duration(maxLifetimeSeconds) * time.Second)
+
+ if err = store.DB.Ping(); err != nil {
+ return fmt.Errorf("connect to %s error:%v", sqlUrl, err)
+ }
+
+ if err = store.CreateTable(context.Background(), abstract_sql.DEFAULT_TABLE); err != nil {
+ return fmt.Errorf("init table %s: %v", abstract_sql.DEFAULT_TABLE, err)
+ }
+
+ return nil
+}
diff --git a/weed/filer/postgres/postgres_sql_gen.go b/weed/filer/postgres/postgres_sql_gen.go
new file mode 100644
index 000000000..284cf254b
--- /dev/null
+++ b/weed/filer/postgres/postgres_sql_gen.go
@@ -0,0 +1,53 @@
+package postgres
+
+import (
+ "fmt"
+
+ "github.com/chrislusf/seaweedfs/weed/filer/abstract_sql"
+ _ "github.com/lib/pq"
+)
+
+type SqlGenPostgres struct {
+ CreateTableSqlTemplate string
+ DropTableSqlTemplate string
+}
+
+var (
+ _ = abstract_sql.SqlGenerator(&SqlGenPostgres{})
+)
+
+func (gen *SqlGenPostgres) GetSqlInsert(bucket string) string {
+ return fmt.Sprintf("INSERT INTO %s (dirhash,name,directory,meta) VALUES($1,$2,$3,$4)", bucket)
+}
+
+func (gen *SqlGenPostgres) GetSqlUpdate(bucket string) string {
+ return fmt.Sprintf("UPDATE %s SET meta=$1 WHERE dirhash=$2 AND name=$3 AND directory=$4", bucket)
+}
+
+func (gen *SqlGenPostgres) GetSqlFind(bucket string) string {
+ return fmt.Sprintf("SELECT meta FROM %s WHERE dirhash=$1 AND name=$2 AND directory=$3", bucket)
+}
+
+func (gen *SqlGenPostgres) GetSqlDelete(bucket string) string {
+ return fmt.Sprintf("DELETE FROM %s WHERE dirhash=$1 AND name=$2 AND directory=$3", bucket)
+}
+
+func (gen *SqlGenPostgres) GetSqlDeleteFolderChildren(bucket string) string {
+ return fmt.Sprintf("DELETE FROM %s WHERE dirhash=$1 AND directory=$2", bucket)
+}
+
+func (gen *SqlGenPostgres) GetSqlListExclusive(bucket string) string {
+ return fmt.Sprintf("SELECT NAME, meta FROM %s WHERE dirhash=$1 AND name>$2 AND directory=$3 AND name like $4 ORDER BY NAME ASC LIMIT $5", bucket)
+}
+
+func (gen *SqlGenPostgres) GetSqlListInclusive(bucket string) string {
+ return fmt.Sprintf("SELECT NAME, meta FROM %s WHERE dirhash=$1 AND name>=$2 AND directory=$3 AND name like $4 ORDER BY NAME ASC LIMIT $5", bucket)
+}
+
+func (gen *SqlGenPostgres) GetSqlCreateTable(bucket string) string {
+ return fmt.Sprintf(gen.CreateTableSqlTemplate, bucket)
+}
+
+func (gen *SqlGenPostgres) GetSqlDropTable(bucket string) string {
+ return fmt.Sprintf(gen.DropTableSqlTemplate, bucket)
+}
diff --git a/weed/filer/postgres/postgres_store.go b/weed/filer/postgres/postgres_store.go
index 2325568fe..27c6278c7 100644
--- a/weed/filer/postgres/postgres_store.go
+++ b/weed/filer/postgres/postgres_store.go
@@ -33,21 +33,20 @@ func (store *PostgresStore) Initialize(configuration util.Configuration, prefix
configuration.GetString(prefix+"hostname"),
configuration.GetInt(prefix+"port"),
configuration.GetString(prefix+"database"),
+ configuration.GetString(prefix+"schema"),
configuration.GetString(prefix+"sslmode"),
configuration.GetInt(prefix+"connection_max_idle"),
configuration.GetInt(prefix+"connection_max_open"),
)
}
-func (store *PostgresStore) initialize(user, password, hostname string, port int, database, sslmode string, maxIdle, maxOpen int) (err error) {
+func (store *PostgresStore) initialize(user, password, hostname string, port int, database, schema, sslmode string, maxIdle, maxOpen int) (err error) {
- store.SqlInsert = "INSERT INTO filemeta (dirhash,name,directory,meta) VALUES($1,$2,$3,$4)"
- store.SqlUpdate = "UPDATE filemeta SET meta=$1 WHERE dirhash=$2 AND name=$3 AND directory=$4"
- store.SqlFind = "SELECT meta FROM filemeta WHERE dirhash=$1 AND name=$2 AND directory=$3"
- store.SqlDelete = "DELETE FROM filemeta WHERE dirhash=$1 AND name=$2 AND directory=$3"
- store.SqlDeleteFolderChildren = "DELETE FROM filemeta WHERE dirhash=$1 AND directory=$2"
- store.SqlListExclusive = "SELECT NAME, meta FROM filemeta WHERE dirhash=$1 AND name>$2 AND directory=$3 AND name like $4 ORDER BY NAME ASC LIMIT $5"
- store.SqlListInclusive = "SELECT NAME, meta FROM filemeta WHERE dirhash=$1 AND name>=$2 AND directory=$3 AND name like $4 ORDER BY NAME ASC LIMIT $5"
+ store.SupportBucketTable = false
+ store.SqlGenerator = &SqlGenPostgres{
+ CreateTableSqlTemplate: "",
+ DropTableSqlTemplate: "drop table %s",
+ }
sqlUrl := fmt.Sprintf(CONNECTION_URL_PATTERN, hostname, port, sslmode)
if user != "" {
@@ -59,6 +58,9 @@ func (store *PostgresStore) initialize(user, password, hostname string, port int
if database != "" {
sqlUrl += " dbname=" + database
}
+ if schema != "" {
+ sqlUrl += " search_path=" + schema
+ }
var dbErr error
store.DB, dbErr = sql.Open("postgres", sqlUrl)
if dbErr != nil {
diff --git a/weed/filer/postgres2/postgres2_store.go b/weed/filer/postgres2/postgres2_store.go
new file mode 100644
index 000000000..82552376f
--- /dev/null
+++ b/weed/filer/postgres2/postgres2_store.go
@@ -0,0 +1,87 @@
+package postgres2
+
+import (
+ "context"
+ "database/sql"
+ "fmt"
+
+ "github.com/chrislusf/seaweedfs/weed/filer"
+ "github.com/chrislusf/seaweedfs/weed/filer/abstract_sql"
+ "github.com/chrislusf/seaweedfs/weed/filer/postgres"
+ "github.com/chrislusf/seaweedfs/weed/util"
+ _ "github.com/lib/pq"
+)
+
+const (
+ CONNECTION_URL_PATTERN = "host=%s port=%d sslmode=%s connect_timeout=30"
+)
+
+func init() {
+ filer.Stores = append(filer.Stores, &PostgresStore2{})
+}
+
+type PostgresStore2 struct {
+ abstract_sql.AbstractSqlStore
+}
+
+func (store *PostgresStore2) GetName() string {
+ return "postgres2"
+}
+
+func (store *PostgresStore2) Initialize(configuration util.Configuration, prefix string) (err error) {
+ return store.initialize(
+ configuration.GetString(prefix+"createTable"),
+ configuration.GetString(prefix+"username"),
+ configuration.GetString(prefix+"password"),
+ configuration.GetString(prefix+"hostname"),
+ configuration.GetInt(prefix+"port"),
+ configuration.GetString(prefix+"database"),
+ configuration.GetString(prefix+"schema"),
+ configuration.GetString(prefix+"sslmode"),
+ configuration.GetInt(prefix+"connection_max_idle"),
+ configuration.GetInt(prefix+"connection_max_open"),
+ )
+}
+
+func (store *PostgresStore2) initialize(createTable, user, password, hostname string, port int, database, schema, sslmode string, maxIdle, maxOpen int) (err error) {
+
+ store.SupportBucketTable = true
+ store.SqlGenerator = &postgres.SqlGenPostgres{
+ CreateTableSqlTemplate: createTable,
+ DropTableSqlTemplate: "drop table %s",
+ }
+
+ sqlUrl := fmt.Sprintf(CONNECTION_URL_PATTERN, hostname, port, sslmode)
+ if user != "" {
+ sqlUrl += " user=" + user
+ }
+ if password != "" {
+ sqlUrl += " password=" + password
+ }
+ if database != "" {
+ sqlUrl += " dbname=" + database
+ }
+ if schema != "" {
+ sqlUrl += " search_path=" + schema
+ }
+ var dbErr error
+ store.DB, dbErr = sql.Open("postgres", sqlUrl)
+ if dbErr != nil {
+ store.DB.Close()
+ store.DB = nil
+ return fmt.Errorf("can not connect to %s error:%v", sqlUrl, err)
+ }
+
+ store.DB.SetMaxIdleConns(maxIdle)
+ store.DB.SetMaxOpenConns(maxOpen)
+
+ if err = store.DB.Ping(); err != nil {
+ return fmt.Errorf("connect to %s error:%v", sqlUrl, err)
+ }
+
+ if err = store.CreateTable(context.Background(), abstract_sql.DEFAULT_TABLE); err != nil {
+ return fmt.Errorf("init table %s: %v", abstract_sql.DEFAULT_TABLE, err)
+ }
+
+ return nil
+}
diff --git a/weed/filer/reader_at.go b/weed/filer/reader_at.go
index 6193dbd45..a1e989684 100644
--- a/weed/filer/reader_at.go
+++ b/weed/filer/reader_at.go
@@ -18,7 +18,7 @@ import (
type ChunkReadAt struct {
masterClient *wdclient.MasterClient
chunkViews []*ChunkView
- lookupFileId LookupFileIdFunctionType
+ lookupFileId wdclient.LookupFileIdFunctionType
readerLock sync.Mutex
fileSize int64
@@ -31,9 +31,7 @@ type ChunkReadAt struct {
var _ = io.ReaderAt(&ChunkReadAt{})
var _ = io.Closer(&ChunkReadAt{})
-type LookupFileIdFunctionType func(fileId string) (targetUrls []string, err error)
-
-func LookupFn(filerClient filer_pb.FilerClient) LookupFileIdFunctionType {
+func LookupFn(filerClient filer_pb.FilerClient) wdclient.LookupFileIdFunctionType {
vidCache := make(map[string]*filer_pb.Locations)
var vicCacheLock sync.RWMutex
@@ -87,11 +85,11 @@ func LookupFn(filerClient filer_pb.FilerClient) LookupFileIdFunctionType {
}
}
-func NewChunkReaderAtFromClient(filerClient filer_pb.FilerClient, chunkViews []*ChunkView, chunkCache chunk_cache.ChunkCache, fileSize int64) *ChunkReadAt {
+func NewChunkReaderAtFromClient(lookupFn wdclient.LookupFileIdFunctionType, chunkViews []*ChunkView, chunkCache chunk_cache.ChunkCache, fileSize int64) *ChunkReadAt {
return &ChunkReadAt{
chunkViews: chunkViews,
- lookupFileId: LookupFn(filerClient),
+ lookupFileId: lookupFn,
chunkCache: chunkCache,
fileSize: fileSize,
}
@@ -109,7 +107,7 @@ func (c *ChunkReadAt) ReadAt(p []byte, offset int64) (n int, err error) {
defer c.readerLock.Unlock()
glog.V(4).Infof("ReadAt [%d,%d) of total file size %d bytes %d chunk views", offset, offset+int64(len(p)), c.fileSize, len(c.chunkViews))
- return c.doReadAt(p[n:], offset+int64(n))
+ return c.doReadAt(p, offset)
}
func (c *ChunkReadAt) doReadAt(p []byte, offset int64) (n int, err error) {
diff --git a/weed/filer/redis/redis_cluster_store.go b/weed/filer/redis/redis_cluster_store.go
index 8af94ee55..9572058a8 100644
--- a/weed/filer/redis/redis_cluster_store.go
+++ b/weed/filer/redis/redis_cluster_store.go
@@ -3,7 +3,7 @@ package redis
import (
"github.com/chrislusf/seaweedfs/weed/filer"
"github.com/chrislusf/seaweedfs/weed/util"
- "github.com/go-redis/redis"
+ "github.com/go-redis/redis/v8"
)
func init() {
@@ -20,8 +20,8 @@ func (store *RedisClusterStore) GetName() string {
func (store *RedisClusterStore) Initialize(configuration util.Configuration, prefix string) (err error) {
- configuration.SetDefault(prefix+"useReadOnly", true)
- configuration.SetDefault(prefix+"routeByLatency", true)
+ configuration.SetDefault(prefix+"useReadOnly", false)
+ configuration.SetDefault(prefix+"routeByLatency", false)
return store.initialize(
configuration.GetStringSlice(prefix+"addresses"),
diff --git a/weed/filer/redis/redis_store.go b/weed/filer/redis/redis_store.go
index e152457ed..665352a63 100644
--- a/weed/filer/redis/redis_store.go
+++ b/weed/filer/redis/redis_store.go
@@ -3,7 +3,7 @@ package redis
import (
"github.com/chrislusf/seaweedfs/weed/filer"
"github.com/chrislusf/seaweedfs/weed/util"
- "github.com/go-redis/redis"
+ "github.com/go-redis/redis/v8"
)
func init() {
diff --git a/weed/filer/redis/universal_redis_store.go b/weed/filer/redis/universal_redis_store.go
index 0de9924a3..30d11a7f4 100644
--- a/weed/filer/redis/universal_redis_store.go
+++ b/weed/filer/redis/universal_redis_store.go
@@ -7,7 +7,7 @@ import (
"strings"
"time"
- "github.com/go-redis/redis"
+ "github.com/go-redis/redis/v8"
"github.com/chrislusf/seaweedfs/weed/filer"
"github.com/chrislusf/seaweedfs/weed/glog"
@@ -44,7 +44,7 @@ func (store *UniversalRedisStore) InsertEntry(ctx context.Context, entry *filer.
value = util.MaybeGzipData(value)
}
- _, err = store.Client.Set(string(entry.FullPath), value, time.Duration(entry.TtlSec)*time.Second).Result()
+ _, err = store.Client.Set(ctx, string(entry.FullPath), value, time.Duration(entry.TtlSec)*time.Second).Result()
if err != nil {
return fmt.Errorf("persisting %s : %v", entry.FullPath, err)
@@ -52,7 +52,7 @@ func (store *UniversalRedisStore) InsertEntry(ctx context.Context, entry *filer.
dir, name := entry.FullPath.DirAndName()
if name != "" {
- _, err = store.Client.SAdd(genDirectoryListKey(dir), name).Result()
+ _, err = store.Client.SAdd(ctx, genDirectoryListKey(dir), name).Result()
if err != nil {
return fmt.Errorf("persisting %s in parent dir: %v", entry.FullPath, err)
}
@@ -68,7 +68,7 @@ func (store *UniversalRedisStore) UpdateEntry(ctx context.Context, entry *filer.
func (store *UniversalRedisStore) FindEntry(ctx context.Context, fullpath util.FullPath) (entry *filer.Entry, err error) {
- data, err := store.Client.Get(string(fullpath)).Result()
+ data, err := store.Client.Get(ctx, string(fullpath)).Result()
if err == redis.Nil {
return nil, filer_pb.ErrNotFound
}
@@ -90,7 +90,7 @@ func (store *UniversalRedisStore) FindEntry(ctx context.Context, fullpath util.F
func (store *UniversalRedisStore) DeleteEntry(ctx context.Context, fullpath util.FullPath) (err error) {
- _, err = store.Client.Del(string(fullpath)).Result()
+ _, err = store.Client.Del(ctx, string(fullpath)).Result()
if err != nil {
return fmt.Errorf("delete %s : %v", fullpath, err)
@@ -98,7 +98,7 @@ func (store *UniversalRedisStore) DeleteEntry(ctx context.Context, fullpath util
dir, name := fullpath.DirAndName()
if name != "" {
- _, err = store.Client.SRem(genDirectoryListKey(dir), name).Result()
+ _, err = store.Client.SRem(ctx, genDirectoryListKey(dir), name).Result()
if err != nil {
return fmt.Errorf("delete %s in parent dir: %v", fullpath, err)
}
@@ -109,14 +109,14 @@ func (store *UniversalRedisStore) DeleteEntry(ctx context.Context, fullpath util
func (store *UniversalRedisStore) DeleteFolderChildren(ctx context.Context, fullpath util.FullPath) (err error) {
- members, err := store.Client.SMembers(genDirectoryListKey(string(fullpath))).Result()
+ members, err := store.Client.SMembers(ctx, genDirectoryListKey(string(fullpath))).Result()
if err != nil {
return fmt.Errorf("delete folder %s : %v", fullpath, err)
}
for _, fileName := range members {
path := util.NewFullPath(string(fullpath), fileName)
- _, err = store.Client.Del(string(path)).Result()
+ _, err = store.Client.Del(ctx, string(path)).Result()
if err != nil {
return fmt.Errorf("delete %s in parent dir: %v", fullpath, err)
}
@@ -125,17 +125,16 @@ func (store *UniversalRedisStore) DeleteFolderChildren(ctx context.Context, full
return nil
}
-func (store *UniversalRedisStore) ListDirectoryPrefixedEntries(ctx context.Context, fullpath util.FullPath, startFileName string, inclusive bool, limit int, prefix string) (entries []*filer.Entry, err error) {
- return nil, filer.ErrUnsupportedListDirectoryPrefixed
+func (store *UniversalRedisStore) ListDirectoryPrefixedEntries(ctx context.Context, dirPath util.FullPath, startFileName string, includeStartFile bool, limit int64, prefix string, eachEntryFunc filer.ListEachEntryFunc) (lastFileName string, err error) {
+ return lastFileName, filer.ErrUnsupportedListDirectoryPrefixed
}
-func (store *UniversalRedisStore) ListDirectoryEntries(ctx context.Context, fullpath util.FullPath, startFileName string, inclusive bool,
- limit int) (entries []*filer.Entry, err error) {
+func (store *UniversalRedisStore) ListDirectoryEntries(ctx context.Context, dirPath util.FullPath, startFileName string, includeStartFile bool, limit int64, eachEntryFunc filer.ListEachEntryFunc) (lastFileName string, err error) {
- dirListKey := genDirectoryListKey(string(fullpath))
- members, err := store.Client.SMembers(dirListKey).Result()
+ dirListKey := genDirectoryListKey(string(dirPath))
+ members, err := store.Client.SMembers(ctx, dirListKey).Result()
if err != nil {
- return nil, fmt.Errorf("list %s : %v", fullpath, err)
+ return lastFileName, fmt.Errorf("list %s : %v", dirPath, err)
}
// skip
@@ -144,7 +143,7 @@ func (store *UniversalRedisStore) ListDirectoryEntries(ctx context.Context, full
for _, m := range members {
if strings.Compare(m, startFileName) >= 0 {
if m == startFileName {
- if inclusive {
+ if includeStartFile {
t = append(t, m)
}
} else {
@@ -161,29 +160,35 @@ func (store *UniversalRedisStore) ListDirectoryEntries(ctx context.Context, full
})
// limit
- if limit < len(members) {
+ if limit < int64(len(members)) {
members = members[:limit]
}
// fetch entry meta
for _, fileName := range members {
- path := util.NewFullPath(string(fullpath), fileName)
+ path := util.NewFullPath(string(dirPath), fileName)
entry, err := store.FindEntry(ctx, path)
+ lastFileName = fileName
if err != nil {
glog.V(0).Infof("list %s : %v", path, err)
+ if err == filer_pb.ErrNotFound {
+ continue
+ }
} else {
if entry.TtlSec > 0 {
if entry.Attr.Crtime.Add(time.Duration(entry.TtlSec) * time.Second).Before(time.Now()) {
- store.Client.Del(string(path)).Result()
- store.Client.SRem(dirListKey, fileName).Result()
+ store.Client.Del(ctx, string(path)).Result()
+ store.Client.SRem(ctx, dirListKey, fileName).Result()
continue
}
}
- entries = append(entries, entry)
+ if !eachEntryFunc(entry) {
+ break
+ }
}
}
- return entries, err
+ return lastFileName, err
}
func genDirectoryListKey(dir string) (dirList string) {
diff --git a/weed/filer/redis/universal_redis_store_kv.go b/weed/filer/redis/universal_redis_store_kv.go
index 0fc12c631..ad6e389ed 100644
--- a/weed/filer/redis/universal_redis_store_kv.go
+++ b/weed/filer/redis/universal_redis_store_kv.go
@@ -5,12 +5,12 @@ import (
"fmt"
"github.com/chrislusf/seaweedfs/weed/filer"
- "github.com/go-redis/redis"
+ "github.com/go-redis/redis/v8"
)
func (store *UniversalRedisStore) KvPut(ctx context.Context, key []byte, value []byte) (err error) {
- _, err = store.Client.Set(string(key), value, 0).Result()
+ _, err = store.Client.Set(ctx, string(key), value, 0).Result()
if err != nil {
return fmt.Errorf("kv put: %v", err)
@@ -21,7 +21,7 @@ func (store *UniversalRedisStore) KvPut(ctx context.Context, key []byte, value [
func (store *UniversalRedisStore) KvGet(ctx context.Context, key []byte) (value []byte, err error) {
- data, err := store.Client.Get(string(key)).Result()
+ data, err := store.Client.Get(ctx, string(key)).Result()
if err == redis.Nil {
return nil, filer.ErrKvNotFound
@@ -32,7 +32,7 @@ func (store *UniversalRedisStore) KvGet(ctx context.Context, key []byte) (value
func (store *UniversalRedisStore) KvDelete(ctx context.Context, key []byte) (err error) {
- _, err = store.Client.Del(string(key)).Result()
+ _, err = store.Client.Del(ctx, string(key)).Result()
if err != nil {
return fmt.Errorf("kv delete: %v", err)
diff --git a/weed/filer/redis2/redis_cluster_store.go b/weed/filer/redis2/redis_cluster_store.go
index c7742bb19..22d09da25 100644
--- a/weed/filer/redis2/redis_cluster_store.go
+++ b/weed/filer/redis2/redis_cluster_store.go
@@ -3,7 +3,7 @@ package redis2
import (
"github.com/chrislusf/seaweedfs/weed/filer"
"github.com/chrislusf/seaweedfs/weed/util"
- "github.com/go-redis/redis"
+ "github.com/go-redis/redis/v8"
)
func init() {
@@ -20,8 +20,8 @@ func (store *RedisCluster2Store) GetName() string {
func (store *RedisCluster2Store) Initialize(configuration util.Configuration, prefix string) (err error) {
- configuration.SetDefault(prefix+"useReadOnly", true)
- configuration.SetDefault(prefix+"routeByLatency", true)
+ configuration.SetDefault(prefix+"useReadOnly", false)
+ configuration.SetDefault(prefix+"routeByLatency", false)
return store.initialize(
configuration.GetStringSlice(prefix+"addresses"),
diff --git a/weed/filer/redis2/redis_store.go b/weed/filer/redis2/redis_store.go
index da404ed4c..8eb97e374 100644
--- a/weed/filer/redis2/redis_store.go
+++ b/weed/filer/redis2/redis_store.go
@@ -3,7 +3,7 @@ package redis2
import (
"github.com/chrislusf/seaweedfs/weed/filer"
"github.com/chrislusf/seaweedfs/weed/util"
- "github.com/go-redis/redis"
+ "github.com/go-redis/redis/v8"
)
func init() {
diff --git a/weed/filer/redis2/universal_redis_store.go b/weed/filer/redis2/universal_redis_store.go
index 00d02ea14..aab3d1f4a 100644
--- a/weed/filer/redis2/universal_redis_store.go
+++ b/weed/filer/redis2/universal_redis_store.go
@@ -5,7 +5,7 @@ import (
"fmt"
"time"
- "github.com/go-redis/redis"
+ "github.com/go-redis/redis/v8"
"github.com/chrislusf/seaweedfs/weed/filer"
"github.com/chrislusf/seaweedfs/weed/glog"
@@ -56,7 +56,7 @@ func (store *UniversalRedis2Store) InsertEntry(ctx context.Context, entry *filer
value = util.MaybeGzipData(value)
}
- if err = store.Client.Set(string(entry.FullPath), value, time.Duration(entry.TtlSec)*time.Second).Err(); err != nil {
+ if err = store.Client.Set(ctx, string(entry.FullPath), value, time.Duration(entry.TtlSec)*time.Second).Err(); err != nil {
return fmt.Errorf("persisting %s : %v", entry.FullPath, err)
}
@@ -66,7 +66,7 @@ func (store *UniversalRedis2Store) InsertEntry(ctx context.Context, entry *filer
}
if name != "" {
- if err = store.Client.ZAddNX(genDirectoryListKey(dir), redis.Z{Score: 0, Member: name}).Err(); err != nil {
+ if err = store.Client.ZAddNX(ctx, genDirectoryListKey(dir), &redis.Z{Score: 0, Member: name}).Err(); err != nil {
return fmt.Errorf("persisting %s in parent dir: %v", entry.FullPath, err)
}
}
@@ -81,7 +81,7 @@ func (store *UniversalRedis2Store) UpdateEntry(ctx context.Context, entry *filer
func (store *UniversalRedis2Store) FindEntry(ctx context.Context, fullpath util.FullPath) (entry *filer.Entry, err error) {
- data, err := store.Client.Get(string(fullpath)).Result()
+ data, err := store.Client.Get(ctx, string(fullpath)).Result()
if err == redis.Nil {
return nil, filer_pb.ErrNotFound
}
@@ -103,12 +103,12 @@ func (store *UniversalRedis2Store) FindEntry(ctx context.Context, fullpath util.
func (store *UniversalRedis2Store) DeleteEntry(ctx context.Context, fullpath util.FullPath) (err error) {
- _, err = store.Client.Del(genDirectoryListKey(string(fullpath))).Result()
+ _, err = store.Client.Del(ctx, genDirectoryListKey(string(fullpath))).Result()
if err != nil {
return fmt.Errorf("delete dir list %s : %v", fullpath, err)
}
- _, err = store.Client.Del(string(fullpath)).Result()
+ _, err = store.Client.Del(ctx, string(fullpath)).Result()
if err != nil {
return fmt.Errorf("delete %s : %v", fullpath, err)
}
@@ -118,7 +118,7 @@ func (store *UniversalRedis2Store) DeleteEntry(ctx context.Context, fullpath uti
return nil
}
if name != "" {
- _, err = store.Client.ZRem(genDirectoryListKey(dir), name).Result()
+ _, err = store.Client.ZRem(ctx, genDirectoryListKey(dir), name).Result()
if err != nil {
return fmt.Errorf("DeleteEntry %s in parent dir: %v", fullpath, err)
}
@@ -133,14 +133,14 @@ func (store *UniversalRedis2Store) DeleteFolderChildren(ctx context.Context, ful
return nil
}
- members, err := store.Client.ZRange(genDirectoryListKey(string(fullpath)), 0, -1).Result()
+ members, err := store.Client.ZRange(ctx, genDirectoryListKey(string(fullpath)), 0, -1).Result()
if err != nil {
return fmt.Errorf("DeleteFolderChildren %s : %v", fullpath, err)
}
for _, fileName := range members {
path := util.NewFullPath(string(fullpath), fileName)
- _, err = store.Client.Del(string(path)).Result()
+ _, err = store.Client.Del(ctx, string(path)).Result()
if err != nil {
return fmt.Errorf("DeleteFolderChildren %s in parent dir: %v", fullpath, err)
}
@@ -149,45 +149,50 @@ func (store *UniversalRedis2Store) DeleteFolderChildren(ctx context.Context, ful
return nil
}
-func (store *UniversalRedis2Store) ListDirectoryPrefixedEntries(ctx context.Context, fullpath util.FullPath, startFileName string, inclusive bool, limit int, prefix string) (entries []*filer.Entry, err error) {
- return nil, filer.ErrUnsupportedListDirectoryPrefixed
+func (store *UniversalRedis2Store) ListDirectoryPrefixedEntries(ctx context.Context, dirPath util.FullPath, startFileName string, includeStartFile bool, limit int64, prefix string, eachEntryFunc filer.ListEachEntryFunc) (lastFileName string, err error) {
+ return lastFileName, filer.ErrUnsupportedListDirectoryPrefixed
}
-func (store *UniversalRedis2Store) ListDirectoryEntries(ctx context.Context, fullpath util.FullPath, startFileName string, inclusive bool,
- limit int) (entries []*filer.Entry, err error) {
+func (store *UniversalRedis2Store) ListDirectoryEntries(ctx context.Context, dirPath util.FullPath, startFileName string, includeStartFile bool, limit int64, eachEntryFunc filer.ListEachEntryFunc) (lastFileName string, err error) {
- dirListKey := genDirectoryListKey(string(fullpath))
+ dirListKey := genDirectoryListKey(string(dirPath))
start := int64(0)
if startFileName != "" {
- start, _ = store.Client.ZRank(dirListKey, startFileName).Result()
- if !inclusive {
+ start, _ = store.Client.ZRank(ctx, dirListKey, startFileName).Result()
+ if !includeStartFile {
start++
}
}
- members, err := store.Client.ZRange(dirListKey, start, start+int64(limit)-1).Result()
+ members, err := store.Client.ZRange(ctx, dirListKey, start, start+int64(limit)-1).Result()
if err != nil {
- return nil, fmt.Errorf("list %s : %v", fullpath, err)
+ return lastFileName, fmt.Errorf("list %s : %v", dirPath, err)
}
// fetch entry meta
for _, fileName := range members {
- path := util.NewFullPath(string(fullpath), fileName)
+ path := util.NewFullPath(string(dirPath), fileName)
entry, err := store.FindEntry(ctx, path)
+ lastFileName = fileName
if err != nil {
glog.V(0).Infof("list %s : %v", path, err)
+ if err == filer_pb.ErrNotFound {
+ continue
+ }
} else {
if entry.TtlSec > 0 {
if entry.Attr.Crtime.Add(time.Duration(entry.TtlSec) * time.Second).Before(time.Now()) {
- store.Client.Del(string(path)).Result()
- store.Client.ZRem(dirListKey, fileName).Result()
+ store.Client.Del(ctx, string(path)).Result()
+ store.Client.ZRem(ctx, dirListKey, fileName).Result()
continue
}
}
- entries = append(entries, entry)
+ if !eachEntryFunc(entry) {
+ break
+ }
}
}
- return entries, err
+ return lastFileName, err
}
func genDirectoryListKey(dir string) (dirList string) {
diff --git a/weed/filer/redis2/universal_redis_store_kv.go b/weed/filer/redis2/universal_redis_store_kv.go
index 658491ddf..bde994dc9 100644
--- a/weed/filer/redis2/universal_redis_store_kv.go
+++ b/weed/filer/redis2/universal_redis_store_kv.go
@@ -5,12 +5,12 @@ import (
"fmt"
"github.com/chrislusf/seaweedfs/weed/filer"
- "github.com/go-redis/redis"
+ "github.com/go-redis/redis/v8"
)
func (store *UniversalRedis2Store) KvPut(ctx context.Context, key []byte, value []byte) (err error) {
- _, err = store.Client.Set(string(key), value, 0).Result()
+ _, err = store.Client.Set(ctx, string(key), value, 0).Result()
if err != nil {
return fmt.Errorf("kv put: %v", err)
@@ -21,7 +21,7 @@ func (store *UniversalRedis2Store) KvPut(ctx context.Context, key []byte, value
func (store *UniversalRedis2Store) KvGet(ctx context.Context, key []byte) (value []byte, err error) {
- data, err := store.Client.Get(string(key)).Result()
+ data, err := store.Client.Get(ctx, string(key)).Result()
if err == redis.Nil {
return nil, filer.ErrKvNotFound
@@ -32,7 +32,7 @@ func (store *UniversalRedis2Store) KvGet(ctx context.Context, key []byte) (value
func (store *UniversalRedis2Store) KvDelete(ctx context.Context, key []byte) (err error) {
- _, err = store.Client.Del(string(key)).Result()
+ _, err = store.Client.Del(ctx, string(key)).Result()
if err != nil {
return fmt.Errorf("kv delete: %v", err)
diff --git a/weed/filer/rocksdb/README.md b/weed/filer/rocksdb/README.md
new file mode 100644
index 000000000..6bae6d34e
--- /dev/null
+++ b/weed/filer/rocksdb/README.md
@@ -0,0 +1,41 @@
+# Prepare the compilation environment on linux
+- sudo add-apt-repository -y ppa:ubuntu-toolchain-r/test
+- sudo apt-get update -qq
+- sudo apt-get install gcc-6 g++-6 libsnappy-dev zlib1g-dev libbz2-dev -qq
+- export CXX="g++-6" CC="gcc-6"
+
+- wget https://launchpad.net/ubuntu/+archive/primary/+files/libgflags2_2.0-1.1ubuntu1_amd64.deb
+- sudo dpkg -i libgflags2_2.0-1.1ubuntu1_amd64.deb
+- wget https://launchpad.net/ubuntu/+archive/primary/+files/libgflags-dev_2.0-1.1ubuntu1_amd64.deb
+- sudo dpkg -i libgflags-dev_2.0-1.1ubuntu1_amd64.deb
+
+# Prepare the compilation environment on mac os
+```
+brew install snappy
+```
+
+# install rocksdb:
+```
+ export ROCKSDB_HOME=/Users/chris/dev/rocksdb
+
+ git clone https://github.com/facebook/rocksdb.git $ROCKSDB_HOME
+ pushd $ROCKSDB_HOME
+ make clean
+ make install-static
+ popd
+```
+
+# install gorocksdb
+
+```
+export CGO_CFLAGS="-I$ROCKSDB_HOME/include"
+export CGO_LDFLAGS="-L$ROCKSDB_HOME -lrocksdb -lstdc++ -lm -lz -lbz2 -lsnappy -llz4 -lzstd"
+
+go get github.com/tecbot/gorocksdb
+```
+# compile with rocksdb
+
+```
+cd ~/go/src/github.com/chrislusf/seaweedfs/weed
+go install -tags rocksdb
+```
diff --git a/weed/filer/rocksdb/rocksdb_store.go b/weed/filer/rocksdb/rocksdb_store.go
new file mode 100644
index 000000000..70c301725
--- /dev/null
+++ b/weed/filer/rocksdb/rocksdb_store.go
@@ -0,0 +1,302 @@
+// +build rocksdb
+
+package rocksdb
+
+import (
+ "bytes"
+ "context"
+ "crypto/md5"
+ "fmt"
+ "io"
+
+ "github.com/tecbot/gorocksdb"
+
+ "github.com/chrislusf/seaweedfs/weed/filer"
+ "github.com/chrislusf/seaweedfs/weed/glog"
+ "github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
+ weed_util "github.com/chrislusf/seaweedfs/weed/util"
+)
+
+func init() {
+ filer.Stores = append(filer.Stores, &RocksDBStore{})
+}
+
+type options struct {
+ opt *gorocksdb.Options
+ ro *gorocksdb.ReadOptions
+ wo *gorocksdb.WriteOptions
+}
+
+func (opt *options) init() {
+ opt.opt = gorocksdb.NewDefaultOptions()
+ opt.ro = gorocksdb.NewDefaultReadOptions()
+ opt.wo = gorocksdb.NewDefaultWriteOptions()
+}
+
+func (opt *options) close() {
+ opt.opt.Destroy()
+ opt.ro.Destroy()
+ opt.wo.Destroy()
+}
+
+type RocksDBStore struct {
+ path string
+ db *gorocksdb.DB
+ options
+}
+
+func (store *RocksDBStore) GetName() string {
+ return "rocksdb"
+}
+
+func (store *RocksDBStore) Initialize(configuration weed_util.Configuration, prefix string) (err error) {
+ dir := configuration.GetString(prefix + "dir")
+ return store.initialize(dir)
+}
+
+func (store *RocksDBStore) initialize(dir string) (err error) {
+ glog.Infof("filer store rocksdb dir: %s", dir)
+ if err := weed_util.TestFolderWritable(dir); err != nil {
+ return fmt.Errorf("Check Level Folder %s Writable: %s", dir, err)
+ }
+ store.options.init()
+ store.opt.SetCreateIfMissing(true)
+ // reduce write amplification
+ // also avoid expired data stored in highest level never get compacted
+ store.opt.SetLevelCompactionDynamicLevelBytes(true)
+ store.opt.SetCompactionFilter(NewTTLFilter())
+ // store.opt.SetMaxBackgroundCompactions(2)
+
+ store.db, err = gorocksdb.OpenDb(store.opt, dir)
+
+ return
+}
+
+func (store *RocksDBStore) BeginTransaction(ctx context.Context) (context.Context, error) {
+ return ctx, nil
+}
+func (store *RocksDBStore) CommitTransaction(ctx context.Context) error {
+ return nil
+}
+func (store *RocksDBStore) RollbackTransaction(ctx context.Context) error {
+ return nil
+}
+
+func (store *RocksDBStore) InsertEntry(ctx context.Context, entry *filer.Entry) (err error) {
+ dir, name := entry.DirAndName()
+ key := genKey(dir, name)
+
+ value, err := entry.EncodeAttributesAndChunks()
+ if err != nil {
+ return fmt.Errorf("encoding %s %+v: %v", entry.FullPath, entry.Attr, err)
+ }
+
+ err = store.db.Put(store.wo, key, value)
+
+ if err != nil {
+ return fmt.Errorf("persisting %s : %v", entry.FullPath, err)
+ }
+
+ // println("saved", entry.FullPath, "chunks", len(entry.Chunks))
+
+ return nil
+}
+
+func (store *RocksDBStore) UpdateEntry(ctx context.Context, entry *filer.Entry) (err error) {
+
+ return store.InsertEntry(ctx, entry)
+}
+
+func (store *RocksDBStore) FindEntry(ctx context.Context, fullpath weed_util.FullPath) (entry *filer.Entry, err error) {
+ dir, name := fullpath.DirAndName()
+ key := genKey(dir, name)
+ data, err := store.db.Get(store.ro, key)
+
+ if data == nil {
+ return nil, filer_pb.ErrNotFound
+ }
+ defer data.Free()
+
+ if err != nil {
+ return nil, fmt.Errorf("get %s : %v", fullpath, err)
+ }
+
+ entry = &filer.Entry{
+ FullPath: fullpath,
+ }
+ err = entry.DecodeAttributesAndChunks(data.Data())
+ if err != nil {
+ return entry, fmt.Errorf("decode %s : %v", entry.FullPath, err)
+ }
+
+ // println("read", entry.FullPath, "chunks", len(entry.Chunks), "data", len(data), string(data))
+
+ return entry, nil
+}
+
+func (store *RocksDBStore) DeleteEntry(ctx context.Context, fullpath weed_util.FullPath) (err error) {
+ dir, name := fullpath.DirAndName()
+ key := genKey(dir, name)
+
+ err = store.db.Delete(store.wo, key)
+ if err != nil {
+ return fmt.Errorf("delete %s : %v", fullpath, err)
+ }
+
+ return nil
+}
+
+func (store *RocksDBStore) DeleteFolderChildren(ctx context.Context, fullpath weed_util.FullPath) (err error) {
+ directoryPrefix := genDirectoryKeyPrefix(fullpath, "")
+
+ batch := gorocksdb.NewWriteBatch()
+ defer batch.Destroy()
+
+ ro := gorocksdb.NewDefaultReadOptions()
+ defer ro.Destroy()
+ ro.SetFillCache(false)
+
+ iter := store.db.NewIterator(ro)
+ defer iter.Close()
+ err = enumerate(iter, directoryPrefix, nil, false, -1, func(key, value []byte) bool {
+ batch.Delete(key)
+ return true
+ })
+ if err != nil {
+ return fmt.Errorf("delete list %s : %v", fullpath, err)
+ }
+
+ err = store.db.Write(store.wo, batch)
+
+ if err != nil {
+ return fmt.Errorf("delete %s : %v", fullpath, err)
+ }
+
+ return nil
+}
+
+func enumerate(iter *gorocksdb.Iterator, prefix, lastKey []byte, includeLastKey bool, limit int64, fn func(key, value []byte) bool) (err error) {
+
+ if len(lastKey) == 0 {
+ iter.Seek(prefix)
+ } else {
+ iter.Seek(lastKey)
+ if !includeLastKey {
+ if iter.Valid() {
+ if bytes.Equal(iter.Key().Data(), lastKey) {
+ iter.Next()
+ }
+ }
+ }
+ }
+
+ i := int64(0)
+ for ; iter.Valid(); iter.Next() {
+
+ if limit > 0 {
+ i++
+ if i > limit {
+ break
+ }
+ }
+
+ key := iter.Key().Data()
+
+ if !bytes.HasPrefix(key, prefix) {
+ break
+ }
+
+ ret := fn(key, iter.Value().Data())
+
+ if !ret {
+ break
+ }
+
+ }
+
+ if err := iter.Err(); err != nil {
+ return fmt.Errorf("prefix scan iterator: %v", err)
+ }
+ return nil
+}
+
+func (store *RocksDBStore) ListDirectoryEntries(ctx context.Context, dirPath weed_util.FullPath, startFileName string, includeStartFile bool, limit int64, eachEntryFunc filer.ListEachEntryFunc) (lastFileName string, err error) {
+ return store.ListDirectoryPrefixedEntries(ctx, dirPath, startFileName, includeStartFile, limit, "", eachEntryFunc)
+}
+
+func (store *RocksDBStore) ListDirectoryPrefixedEntries(ctx context.Context, dirPath weed_util.FullPath, startFileName string, includeStartFile bool, limit int64, prefix string, eachEntryFunc filer.ListEachEntryFunc) (lastFileName string, err error) {
+
+ directoryPrefix := genDirectoryKeyPrefix(dirPath, prefix)
+ lastFileStart := directoryPrefix
+ if startFileName != "" {
+ lastFileStart = genDirectoryKeyPrefix(dirPath, startFileName)
+ }
+
+ ro := gorocksdb.NewDefaultReadOptions()
+ defer ro.Destroy()
+ ro.SetFillCache(false)
+
+ iter := store.db.NewIterator(ro)
+ defer iter.Close()
+ err = enumerate(iter, directoryPrefix, lastFileStart, includeStartFile, limit, func(key, value []byte) bool {
+ fileName := getNameFromKey(key)
+ if fileName == "" {
+ return true
+ }
+ entry := &filer.Entry{
+ FullPath: weed_util.NewFullPath(string(dirPath), fileName),
+ }
+ lastFileName = fileName
+
+ // println("list", entry.FullPath, "chunks", len(entry.Chunks))
+ if decodeErr := entry.DecodeAttributesAndChunks(value); decodeErr != nil {
+ err = decodeErr
+ glog.V(0).Infof("list %s : %v", entry.FullPath, err)
+ return false
+ }
+ if !eachEntryFunc(entry) {
+ return false
+ }
+ return true
+ })
+ if err != nil {
+ return lastFileName, fmt.Errorf("prefix list %s : %v", dirPath, err)
+ }
+
+ return lastFileName, err
+}
+
+func genKey(dirPath, fileName string) (key []byte) {
+ key = hashToBytes(dirPath)
+ key = append(key, []byte(fileName)...)
+ return key
+}
+
+func genDirectoryKeyPrefix(fullpath weed_util.FullPath, startFileName string) (keyPrefix []byte) {
+ keyPrefix = hashToBytes(string(fullpath))
+ if len(startFileName) > 0 {
+ keyPrefix = append(keyPrefix, []byte(startFileName)...)
+ }
+ return keyPrefix
+}
+
+func getNameFromKey(key []byte) string {
+
+ return string(key[md5.Size:])
+
+}
+
+// hash directory, and use last byte for partitioning
+func hashToBytes(dir string) []byte {
+ h := md5.New()
+ io.WriteString(h, dir)
+
+ b := h.Sum(nil)
+
+ return b
+}
+
+func (store *RocksDBStore) Shutdown() {
+ store.db.Close()
+ store.options.close()
+}
diff --git a/weed/filer/rocksdb/rocksdb_store_kv.go b/weed/filer/rocksdb/rocksdb_store_kv.go
new file mode 100644
index 000000000..cf1214d5b
--- /dev/null
+++ b/weed/filer/rocksdb/rocksdb_store_kv.go
@@ -0,0 +1,47 @@
+// +build rocksdb
+
+package rocksdb
+
+import (
+ "context"
+ "fmt"
+
+ "github.com/chrislusf/seaweedfs/weed/filer"
+)
+
+func (store *RocksDBStore) KvPut(ctx context.Context, key []byte, value []byte) (err error) {
+
+ err = store.db.Put(store.wo, key, value)
+
+ if err != nil {
+ return fmt.Errorf("kv put: %v", err)
+ }
+
+ return nil
+}
+
+func (store *RocksDBStore) KvGet(ctx context.Context, key []byte) (value []byte, err error) {
+
+ value, err = store.db.GetBytes(store.ro, key)
+
+ if value == nil {
+ return nil, filer.ErrKvNotFound
+ }
+
+ if err != nil {
+ return nil, fmt.Errorf("kv get: %v", err)
+ }
+
+ return
+}
+
+func (store *RocksDBStore) KvDelete(ctx context.Context, key []byte) (err error) {
+
+ err = store.db.Delete(store.wo, key)
+
+ if err != nil {
+ return fmt.Errorf("kv delete: %v", err)
+ }
+
+ return nil
+}
diff --git a/weed/filer/rocksdb/rocksdb_store_test.go b/weed/filer/rocksdb/rocksdb_store_test.go
new file mode 100644
index 000000000..439663524
--- /dev/null
+++ b/weed/filer/rocksdb/rocksdb_store_test.go
@@ -0,0 +1,117 @@
+// +build rocksdb
+
+package rocksdb
+
+import (
+ "context"
+ "fmt"
+ "io/ioutil"
+ "os"
+ "testing"
+ "time"
+
+ "github.com/chrislusf/seaweedfs/weed/filer"
+ "github.com/chrislusf/seaweedfs/weed/util"
+)
+
+func TestCreateAndFind(t *testing.T) {
+ testFiler := filer.NewFiler(nil, nil, "", 0, "", "", "", nil)
+ dir, _ := ioutil.TempDir("", "seaweedfs_filer_test")
+ defer os.RemoveAll(dir)
+ store := &RocksDBStore{}
+ store.initialize(dir)
+ testFiler.SetStore(store)
+
+ fullpath := util.FullPath("/home/chris/this/is/one/file1.jpg")
+
+ ctx := context.Background()
+
+ entry1 := &filer.Entry{
+ FullPath: fullpath,
+ Attr: filer.Attr{
+ Mode: 0440,
+ Uid: 1234,
+ Gid: 5678,
+ },
+ }
+
+ if err := testFiler.CreateEntry(ctx, entry1, false, false, nil); err != nil {
+ t.Errorf("create entry %v: %v", entry1.FullPath, err)
+ return
+ }
+
+ entry, err := testFiler.FindEntry(ctx, fullpath)
+
+ if err != nil {
+ t.Errorf("find entry: %v", err)
+ return
+ }
+
+ if entry.FullPath != entry1.FullPath {
+ t.Errorf("find wrong entry: %v", entry.FullPath)
+ return
+ }
+
+ // checking one upper directory
+ entries, _, _ := testFiler.ListDirectoryEntries(ctx, util.FullPath("/home/chris/this/is/one"), "", false, 100, "", "")
+ if len(entries) != 1 {
+ t.Errorf("list entries count: %v", len(entries))
+ return
+ }
+
+ // checking one upper directory
+ entries, _, _ = testFiler.ListDirectoryEntries(ctx, util.FullPath("/"), "", false, 100, "", "")
+ if len(entries) != 1 {
+ t.Errorf("list entries count: %v", len(entries))
+ return
+ }
+
+}
+
+func TestEmptyRoot(t *testing.T) {
+ testFiler := filer.NewFiler(nil, nil, "", 0, "", "", "", nil)
+ dir, _ := ioutil.TempDir("", "seaweedfs_filer_test2")
+ defer os.RemoveAll(dir)
+ store := &RocksDBStore{}
+ store.initialize(dir)
+ testFiler.SetStore(store)
+
+ ctx := context.Background()
+
+ // checking one upper directory
+ entries, _, err := testFiler.ListDirectoryEntries(ctx, util.FullPath("/"), "", false, 100, "", "")
+ if err != nil {
+ t.Errorf("list entries: %v", err)
+ return
+ }
+ if len(entries) != 0 {
+ t.Errorf("list entries count: %v", len(entries))
+ return
+ }
+
+}
+
+func BenchmarkInsertEntry(b *testing.B) {
+ testFiler := filer.NewFiler(nil, nil, "", 0, "", "", "", nil)
+ dir, _ := ioutil.TempDir("", "seaweedfs_filer_bench")
+ defer os.RemoveAll(dir)
+ store := &RocksDBStore{}
+ store.initialize(dir)
+ testFiler.SetStore(store)
+
+ ctx := context.Background()
+
+ b.ReportAllocs()
+
+ for i := 0; i < b.N; i++ {
+ entry := &filer.Entry{
+ FullPath: util.FullPath(fmt.Sprintf("/file%d.txt", i)),
+ Attr: filer.Attr{
+ Crtime: time.Now(),
+ Mtime: time.Now(),
+ Mode: os.FileMode(0644),
+ },
+ }
+ store.InsertEntry(ctx, entry)
+ }
+}
diff --git a/weed/filer/rocksdb/rocksdb_ttl.go b/weed/filer/rocksdb/rocksdb_ttl.go
new file mode 100644
index 000000000..faed22310
--- /dev/null
+++ b/weed/filer/rocksdb/rocksdb_ttl.go
@@ -0,0 +1,40 @@
+//+build rocksdb
+
+package rocksdb
+
+import (
+ "time"
+
+ "github.com/tecbot/gorocksdb"
+
+ "github.com/chrislusf/seaweedfs/weed/filer"
+)
+
+type TTLFilter struct {
+ skipLevel0 bool
+}
+
+func NewTTLFilter() gorocksdb.CompactionFilter {
+ return &TTLFilter{
+ skipLevel0: true,
+ }
+}
+
+func (t *TTLFilter) Filter(level int, key, val []byte) (remove bool, newVal []byte) {
+ // decode could be slow, causing write stall
+ // level >0 sst can run compaction in parallel
+ if !t.skipLevel0 || level > 0 {
+ entry := filer.Entry{}
+ if err := entry.DecodeAttributesAndChunks(val); err == nil {
+ if entry.TtlSec > 0 &&
+ entry.Crtime.Add(time.Duration(entry.TtlSec)*time.Second).Before(time.Now()) {
+ return true, nil
+ }
+ }
+ }
+ return false, val
+}
+
+func (t *TTLFilter) Name() string {
+ return "TTLFilter"
+}
diff --git a/weed/filer/stream.go b/weed/filer/stream.go
index cffdc8303..f0042a0ff 100644
--- a/weed/filer/stream.go
+++ b/weed/filer/stream.go
@@ -13,16 +13,16 @@ import (
"github.com/chrislusf/seaweedfs/weed/wdclient"
)
-func StreamContent(masterClient *wdclient.MasterClient, w io.Writer, chunks []*filer_pb.FileChunk, offset int64, size int64) error {
+func StreamContent(masterClient wdclient.HasLookupFileIdFunction, w io.Writer, chunks []*filer_pb.FileChunk, offset int64, size int64) error {
// fmt.Printf("start to stream content for chunks: %+v\n", chunks)
- chunkViews := ViewFromChunks(masterClient.LookupFileId, chunks, offset, size)
+ chunkViews := ViewFromChunks(masterClient.GetLookupFileIdFunction(), chunks, offset, size)
fileId2Url := make(map[string][]string)
for _, chunkView := range chunkViews {
- urlStrings, err := masterClient.LookupFileId(chunkView.FileId)
+ urlStrings, err := masterClient.GetLookupFileIdFunction()(chunkView.FileId)
if err != nil {
glog.V(1).Infof("operation LookupFileId %s failed, err: %v", chunkView.FileId, err)
return err
@@ -86,7 +86,7 @@ type ChunkStreamReader struct {
bufferOffset int64
bufferPos int
chunkIndex int
- lookupFileId LookupFileIdFunctionType
+ lookupFileId wdclient.LookupFileIdFunctionType
}
var _ = io.ReadSeeker(&ChunkStreamReader{})
diff --git a/weed/filesys/dir.go b/weed/filesys/dir.go
index a8481a435..10a0a2b44 100644
--- a/weed/filesys/dir.go
+++ b/weed/filesys/dir.go
@@ -6,6 +6,7 @@ import (
"math"
"os"
"strings"
+ "syscall"
"time"
"github.com/seaweedfs/fuse"
@@ -57,7 +58,7 @@ func (dir *Dir) Attr(ctx context.Context, attr *fuse.Attr) error {
return err
}
- attr.Inode = util.FullPath(dir.FullPath()).AsInode()
+ // attr.Inode = util.FullPath(dir.FullPath()).AsInode()
attr.Mode = os.FileMode(dir.entry.Attributes.FileMode) | os.ModeDir
attr.Mtime = time.Unix(dir.entry.Attributes.Mtime, 0)
attr.Crtime = time.Unix(dir.entry.Attributes.Crtime, 0)
@@ -81,8 +82,8 @@ func (dir *Dir) Getxattr(ctx context.Context, req *fuse.GetxattrRequest, resp *f
}
func (dir *Dir) setRootDirAttributes(attr *fuse.Attr) {
- attr.Inode = 1 // filer2.FullPath(dir.Path).AsInode()
- attr.Valid = time.Hour
+ // attr.Inode = 1 // filer2.FullPath(dir.Path).AsInode()
+ attr.Valid = time.Second
attr.Uid = dir.wfs.option.MountUid
attr.Gid = dir.wfs.option.MountGid
attr.Mode = dir.wfs.option.MountMode
@@ -90,7 +91,7 @@ func (dir *Dir) setRootDirAttributes(attr *fuse.Attr) {
attr.Ctime = dir.wfs.option.MountCtime
attr.Mtime = dir.wfs.option.MountMtime
attr.Atime = dir.wfs.option.MountMtime
- attr.BlockSize = 1024 * 1024
+ attr.BlockSize = blockSize
}
func (dir *Dir) Fsync(ctx context.Context, req *fuse.FsyncRequest) error {
@@ -127,28 +128,59 @@ func (dir *Dir) newDirectory(fullpath util.FullPath, entry *filer_pb.Entry) fs.N
func (dir *Dir) Create(ctx context.Context, req *fuse.CreateRequest,
resp *fuse.CreateResponse) (fs.Node, fs.Handle, error) {
+ request, err := dir.doCreateEntry(req.Name, req.Mode, req.Uid, req.Gid, req.Flags&fuse.OpenExclusive != 0)
+
+ if err != nil {
+ return nil, nil, err
+ }
+ var node fs.Node
+ if request.Entry.IsDirectory {
+ node = dir.newDirectory(util.NewFullPath(dir.FullPath(), req.Name), request.Entry)
+ return node, nil, nil
+ }
+
+ node = dir.newFile(req.Name, request.Entry)
+ file := node.(*File)
+ fh := dir.wfs.AcquireHandle(file, req.Uid, req.Gid)
+ return file, fh, nil
+
+}
+
+func (dir *Dir) Mknod(ctx context.Context, req *fuse.MknodRequest) (fs.Node, error) {
+
+ request, err := dir.doCreateEntry(req.Name, req.Mode, req.Uid, req.Gid, false)
+
+ if err != nil {
+ return nil, err
+ }
+ var node fs.Node
+ node = dir.newFile(req.Name, request.Entry)
+ return node, nil
+}
+
+func (dir *Dir) doCreateEntry(name string, mode os.FileMode, uid, gid uint32, exlusive bool) (*filer_pb.CreateEntryRequest, error) {
request := &filer_pb.CreateEntryRequest{
Directory: dir.FullPath(),
Entry: &filer_pb.Entry{
- Name: req.Name,
- IsDirectory: req.Mode&os.ModeDir > 0,
+ Name: name,
+ IsDirectory: mode&os.ModeDir > 0,
Attributes: &filer_pb.FuseAttributes{
Mtime: time.Now().Unix(),
Crtime: time.Now().Unix(),
- FileMode: uint32(req.Mode &^ dir.wfs.option.Umask),
- Uid: req.Uid,
- Gid: req.Gid,
+ FileMode: uint32(mode &^ dir.wfs.option.Umask),
+ Uid: uid,
+ Gid: gid,
Collection: dir.wfs.option.Collection,
Replication: dir.wfs.option.Replication,
TtlSec: dir.wfs.option.TtlSec,
},
},
- OExcl: req.Flags&fuse.OpenExclusive != 0,
+ OExcl: exlusive,
Signatures: []int32{dir.wfs.signature},
}
- glog.V(1).Infof("create %s/%s: %v", dir.FullPath(), req.Name, req.Flags)
+ glog.V(1).Infof("create %s/%s", dir.FullPath(), name)
- if err := dir.wfs.WithFilerClient(func(client filer_pb.SeaweedFilerClient) error {
+ err := dir.wfs.WithFilerClient(func(client filer_pb.SeaweedFilerClient) error {
dir.wfs.mapPbIdFromLocalToFiler(request.Entry)
defer dir.wfs.mapPbIdFromFilerToLocal(request.Entry)
@@ -157,41 +189,15 @@ func (dir *Dir) Create(ctx context.Context, req *fuse.CreateRequest,
if strings.Contains(err.Error(), "EEXIST") {
return fuse.EEXIST
}
- glog.V(0).Infof("create %s/%s: %v", dir.FullPath(), req.Name, err)
+ glog.V(0).Infof("create %s/%s: %v", dir.FullPath(), name, err)
return fuse.EIO
}
dir.wfs.metaCache.InsertEntry(context.Background(), filer.FromPbEntry(request.Directory, request.Entry))
return nil
- }); err != nil {
- return nil, nil, err
- }
- var node fs.Node
- if request.Entry.IsDirectory {
- node = dir.newDirectory(util.NewFullPath(dir.FullPath(), req.Name), request.Entry)
- return node, nil, nil
- }
-
- node = dir.newFile(req.Name, request.Entry)
- file := node.(*File)
- fh := dir.wfs.AcquireHandle(file, req.Uid, req.Gid)
- return file, fh, nil
-
-}
-
-func (dir *Dir) Mknod(ctx context.Context, req *fuse.MknodRequest) (fs.Node, error) {
- if req.Mode&os.ModeNamedPipe != 0 {
- glog.V(1).Infof("mknod named pipe %s", req.String())
- return nil, fuse.ENOSYS
- }
- if req.Mode&req.Mode&os.ModeSocket != 0 {
- glog.V(1).Infof("mknod socket %s", req.String())
- return nil, fuse.ENOSYS
- }
- // not going to support mknod for normal files either
- glog.V(1).Infof("mknod %s", req.String())
- return nil, fuse.ENOSYS
+ })
+ return request, err
}
func (dir *Dir) Mkdir(ctx context.Context, req *fuse.MkdirRequest) (fs.Node, error) {
@@ -279,7 +285,7 @@ func (dir *Dir) Lookup(ctx context.Context, req *fuse.LookupRequest, resp *fuse.
}
// resp.EntryValid = time.Second
- resp.Attr.Inode = fullFilePath.AsInode()
+ // resp.Attr.Inode = fullFilePath.AsInode()
resp.Attr.Valid = time.Second
resp.Attr.Mtime = time.Unix(entry.Attributes.Mtime, 0)
resp.Attr.Crtime = time.Unix(entry.Attributes.Crtime, 0)
@@ -302,13 +308,11 @@ func (dir *Dir) ReadDirAll(ctx context.Context) (ret []fuse.Dirent, err error) {
glog.V(4).Infof("dir ReadDirAll %s", dir.FullPath())
processEachEntryFn := func(entry *filer_pb.Entry, isLast bool) error {
- fullpath := util.NewFullPath(dir.FullPath(), entry.Name)
- inode := fullpath.AsInode()
if entry.IsDirectory {
- dirent := fuse.Dirent{Inode: inode, Name: entry.Name, Type: fuse.DT_Dir}
+ dirent := fuse.Dirent{Name: entry.Name, Type: fuse.DT_Dir}
ret = append(ret, dirent)
} else {
- dirent := fuse.Dirent{Inode: inode, Name: entry.Name, Type: fuse.DT_File}
+ dirent := fuse.Dirent{Name: entry.Name, Type: findFileType(uint16(entry.Attributes.FileMode))}
ret = append(ret, dirent)
}
return nil
@@ -319,17 +323,37 @@ func (dir *Dir) ReadDirAll(ctx context.Context) (ret []fuse.Dirent, err error) {
glog.Errorf("dir ReadDirAll %s: %v", dirPath, err)
return nil, fuse.EIO
}
- listedEntries, listErr := dir.wfs.metaCache.ListDirectoryEntries(context.Background(), util.FullPath(dir.FullPath()), "", false, int(math.MaxInt32))
+ listErr := dir.wfs.metaCache.ListDirectoryEntries(context.Background(), util.FullPath(dir.FullPath()), "", false, int64(math.MaxInt32), func(entry *filer.Entry) bool {
+ processEachEntryFn(entry.ToProtoEntry(), false)
+ return true
+ })
if listErr != nil {
glog.Errorf("list meta cache: %v", listErr)
return nil, fuse.EIO
}
- for _, cachedEntry := range listedEntries {
- processEachEntryFn(cachedEntry.ToProtoEntry(), false)
- }
return
}
+func findFileType(mode uint16) fuse.DirentType {
+ switch mode & (syscall.S_IFMT & 0xffff) {
+ case syscall.S_IFSOCK:
+ return fuse.DT_Socket
+ case syscall.S_IFLNK:
+ return fuse.DT_Link
+ case syscall.S_IFREG:
+ return fuse.DT_File
+ case syscall.S_IFBLK:
+ return fuse.DT_Block
+ case syscall.S_IFDIR:
+ return fuse.DT_Dir
+ case syscall.S_IFCHR:
+ return fuse.DT_Char
+ case syscall.S_IFIFO:
+ return fuse.DT_FIFO
+ }
+ return fuse.DT_File
+}
+
func (dir *Dir) Remove(ctx context.Context, req *fuse.RemoveRequest) error {
if !req.Dir {
@@ -378,11 +402,6 @@ func (dir *Dir) removeOneFile(req *fuse.RemoveRequest) error {
inodeId := util.NewFullPath(dir.FullPath(), req.Name).AsInode()
delete(dir.wfs.handles, inodeId)
- // delete the chunks last
- if isDeleteData {
- dir.wfs.deleteFileChunks(entry.Chunks)
- }
-
return nil
}
diff --git a/weed/filesys/dirty_page.go b/weed/filesys/dirty_page.go
index 11089186f..f05a3a56a 100644
--- a/weed/filesys/dirty_page.go
+++ b/weed/filesys/dirty_page.go
@@ -3,7 +3,6 @@ package filesys
import (
"bytes"
"io"
- "runtime"
"sync"
"time"
@@ -12,30 +11,20 @@ import (
)
type ContinuousDirtyPages struct {
- intervals *ContinuousIntervals
- f *File
- writeWaitGroup sync.WaitGroup
- chunkAddLock sync.Mutex
- chunkSaveErrChan chan error
- chunkSaveErrChanClosed bool
- lastErr error
- collection string
- replication string
+ intervals *ContinuousIntervals
+ f *File
+ writeWaitGroup sync.WaitGroup
+ chunkAddLock sync.Mutex
+ lastErr error
+ collection string
+ replication string
}
func newDirtyPages(file *File) *ContinuousDirtyPages {
dirtyPages := &ContinuousDirtyPages{
- intervals: &ContinuousIntervals{},
- f: file,
- chunkSaveErrChan: make(chan error, runtime.NumCPU()),
+ intervals: &ContinuousIntervals{},
+ f: file,
}
- go func() {
- for t := range dirtyPages.chunkSaveErrChan {
- if t != nil {
- dirtyPages.lastErr = t
- }
- }
- }()
return dirtyPages
}
@@ -94,15 +83,6 @@ func (pages *ContinuousDirtyPages) saveExistingLargestPageToStorage() (hasSavedD
func (pages *ContinuousDirtyPages) saveToStorage(reader io.Reader, offset int64, size int64) {
- errChanSize := pages.f.wfs.option.ConcurrentWriters
- if errChanSize == 0 {
- errChanSize = runtime.NumCPU()
- }
- if pages.chunkSaveErrChanClosed {
- pages.chunkSaveErrChan = make(chan error, errChanSize)
- pages.chunkSaveErrChanClosed = false
- }
-
mtime := time.Now().UnixNano()
pages.writeWaitGroup.Add(1)
writer := func() {
@@ -112,7 +92,7 @@ func (pages *ContinuousDirtyPages) saveToStorage(reader io.Reader, offset int64,
chunk, collection, replication, err := pages.f.wfs.saveDataAsChunk(pages.f.fullpath())(reader, pages.f.Name, offset)
if err != nil {
glog.V(0).Infof("%s saveToStorage [%d,%d): %v", pages.f.fullpath(), offset, offset+size, err)
- pages.chunkSaveErrChan <- err
+ pages.lastErr = err
return
}
chunk.Mtime = mtime
diff --git a/weed/filesys/file.go b/weed/filesys/file.go
index 3bffa156e..a210c5152 100644
--- a/weed/filesys/file.go
+++ b/weed/filesys/file.go
@@ -45,7 +45,7 @@ func (file *File) fullpath() util.FullPath {
func (file *File) Attr(ctx context.Context, attr *fuse.Attr) (err error) {
- glog.V(4).Infof("file Attr %s, open:%v, existing attr: %+v", file.fullpath(), file.isOpen, attr)
+ glog.V(4).Infof("file Attr %s, open:%v existing:%v", file.fullpath(), file.isOpen, attr)
entry := file.entry
if file.isOpen <= 0 || entry == nil {
@@ -54,7 +54,7 @@ func (file *File) Attr(ctx context.Context, attr *fuse.Attr) (err error) {
}
}
- attr.Inode = file.fullpath().AsInode()
+ // attr.Inode = file.fullpath().AsInode()
attr.Valid = time.Second
attr.Mode = os.FileMode(entry.Attributes.FileMode)
attr.Size = filer.FileSize(entry)
@@ -144,9 +144,8 @@ func (file *File) Setattr(ctx context.Context, req *fuse.SetattrRequest, resp *f
}
}
file.entry.Chunks = chunks
- file.entryViewCache, _ = filer.NonOverlappingVisibleIntervals(filer.LookupFn(file.wfs), chunks)
+ file.entryViewCache, _ = filer.NonOverlappingVisibleIntervals(file.wfs.LookupFn(), chunks)
file.reader = nil
- file.wfs.deleteFileChunks(truncatedChunks)
}
file.entry.Attributes.FileSize = req.Size
file.dirtyMetadata = true
@@ -326,7 +325,7 @@ func (file *File) addChunks(chunks []*filer_pb.FileChunk) {
func (file *File) setEntry(entry *filer_pb.Entry) {
file.entry = entry
- file.entryViewCache, _ = filer.NonOverlappingVisibleIntervals(filer.LookupFn(file.wfs), entry.Chunks)
+ file.entryViewCache, _ = filer.NonOverlappingVisibleIntervals(file.wfs.LookupFn(), entry.Chunks)
file.reader = nil
}
diff --git a/weed/filesys/filehandle.go b/weed/filesys/filehandle.go
index c273eec8a..85f64d292 100644
--- a/weed/filesys/filehandle.go
+++ b/weed/filesys/filehandle.go
@@ -72,7 +72,7 @@ func (fh *FileHandle) Read(ctx context.Context, req *fuse.ReadRequest, resp *fus
}
totalRead, err := fh.readFromChunks(buff, req.Offset)
- if err == nil {
+ if err == nil || err == io.EOF {
maxStop := fh.readFromDirtyPages(buff, req.Offset)
totalRead = max(maxStop-req.Offset, totalRead)
}
@@ -90,8 +90,9 @@ func (fh *FileHandle) Read(ctx context.Context, req *fuse.ReadRequest, resp *fus
glog.Warningf("%s FileHandle Read %d: [%d,%d) size %d totalRead %d", fh.f.fullpath(), fh.handle, req.Offset, req.Offset+int64(req.Size), req.Size, totalRead)
totalRead = min(int64(len(buff)), totalRead)
}
- // resp.Data = buff[:totalRead]
- resp.Data = buff
+ if err == nil {
+ resp.Data = buff[:totalRead]
+ }
return err
}
@@ -118,19 +119,21 @@ func (fh *FileHandle) readFromChunks(buff []byte, offset int64) (int64, error) {
var chunkResolveErr error
if fh.f.entryViewCache == nil {
- fh.f.entryViewCache, chunkResolveErr = filer.NonOverlappingVisibleIntervals(filer.LookupFn(fh.f.wfs), fh.f.entry.Chunks)
+ fh.f.entryViewCache, chunkResolveErr = filer.NonOverlappingVisibleIntervals(fh.f.wfs.LookupFn(), fh.f.entry.Chunks)
if chunkResolveErr != nil {
return 0, fmt.Errorf("fail to resolve chunk manifest: %v", chunkResolveErr)
}
fh.f.reader = nil
}
- if fh.f.reader == nil {
+ reader := fh.f.reader
+ if reader == nil {
chunkViews := filer.ViewFromVisibleIntervals(fh.f.entryViewCache, 0, math.MaxInt64)
- fh.f.reader = filer.NewChunkReaderAtFromClient(fh.f.wfs, chunkViews, fh.f.wfs.chunkCache, fileSize)
+ reader = filer.NewChunkReaderAtFromClient(fh.f.wfs.LookupFn(), chunkViews, fh.f.wfs.chunkCache, fileSize)
}
+ fh.f.reader = reader
- totalRead, err := fh.f.reader.ReadAt(buff, offset)
+ totalRead, err := reader.ReadAt(buff, offset)
if err != nil && err != io.EOF {
glog.Errorf("file handle read %s: %v", fh.f.fullpath(), err)
@@ -181,25 +184,20 @@ func (fh *FileHandle) Release(ctx context.Context, req *fuse.ReleaseRequest) err
fh.Lock()
defer fh.Unlock()
- fh.f.isOpen--
-
- if fh.f.isOpen < 0 {
+ if fh.f.isOpen <= 0 {
glog.V(0).Infof("Release reset %s open count %d => %d", fh.f.Name, fh.f.isOpen, 0)
fh.f.isOpen = 0
return nil
}
- if fh.f.isOpen == 0 {
+ if fh.f.isOpen == 1 {
if err := fh.doFlush(ctx, req.Header); err != nil {
glog.Errorf("Release doFlush %s: %v", fh.f.Name, err)
+ return err
}
- // stop the goroutine
- if !fh.dirtyPages.chunkSaveErrChanClosed {
- fh.dirtyPages.chunkSaveErrChanClosed = true
- close(fh.dirtyPages.chunkSaveErrChan)
- }
+ fh.f.isOpen--
fh.f.wfs.ReleaseHandle(fh.f.fullpath(), fuse.HandleID(fh.handle))
if closer, ok := fh.f.reader.(io.Closer); ok {
@@ -213,10 +211,18 @@ func (fh *FileHandle) Release(ctx context.Context, req *fuse.ReleaseRequest) err
func (fh *FileHandle) Flush(ctx context.Context, req *fuse.FlushRequest) error {
+ glog.V(4).Infof("Flush %v fh %d", fh.f.fullpath(), fh.handle)
+
fh.Lock()
defer fh.Unlock()
- return fh.doFlush(ctx, req.Header)
+ if err := fh.doFlush(ctx, req.Header); err != nil {
+ glog.Errorf("Flush doFlush %s: %v", fh.f.Name, err)
+ return err
+ }
+
+ glog.V(4).Infof("Flush %v fh %d success", fh.f.fullpath(), fh.handle)
+ return nil
}
func (fh *FileHandle) doFlush(ctx context.Context, header fuse.Header) error {
@@ -229,7 +235,8 @@ func (fh *FileHandle) doFlush(ctx context.Context, header fuse.Header) error {
fh.dirtyPages.writeWaitGroup.Wait()
if fh.dirtyPages.lastErr != nil {
- return fh.dirtyPages.lastErr
+ glog.Errorf("%v doFlush last err: %v", fh.f.fullpath(), fh.dirtyPages.lastErr)
+ return fuse.EIO
}
if !fh.f.dirtyMetadata {
@@ -268,7 +275,7 @@ func (fh *FileHandle) doFlush(ctx context.Context, header fuse.Header) error {
manifestChunks, nonManifestChunks := filer.SeparateManifestChunks(fh.f.entry.Chunks)
- chunks, _ := filer.CompactFileChunks(filer.LookupFn(fh.f.wfs), nonManifestChunks)
+ chunks, _ := filer.CompactFileChunks(fh.f.wfs.LookupFn(), nonManifestChunks)
chunks, manifestErr := filer.MaybeManifestize(fh.f.wfs.saveDataAsChunk(fh.f.fullpath()), chunks)
if manifestErr != nil {
// not good, but should be ok
diff --git a/weed/filesys/meta_cache/meta_cache.go b/weed/filesys/meta_cache/meta_cache.go
index 4b282253d..b9d4724c9 100644
--- a/weed/filesys/meta_cache/meta_cache.go
+++ b/weed/filesys/meta_cache/meta_cache.go
@@ -4,6 +4,7 @@ import (
"context"
"fmt"
"os"
+ "strings"
"sync"
"github.com/chrislusf/seaweedfs/weed/filer"
@@ -29,7 +30,12 @@ func NewMetaCache(dbFolder string, baseDir util.FullPath, uidGidMapper *UidGidMa
localStore: openMetaStore(dbFolder),
visitedBoundary: bounded_tree.NewBoundedTree(baseDir),
uidGidMapper: uidGidMapper,
- invalidateFunc: invalidateFunc,
+ invalidateFunc: func(fullpath util.FullPath) {
+ if baseDir != "/" && strings.HasPrefix(string(fullpath), string(baseDir)) {
+ fullpath = fullpath[len(baseDir):]
+ }
+ invalidateFunc(fullpath)
+ },
}
}
@@ -117,22 +123,22 @@ func (mc *MetaCache) DeleteEntry(ctx context.Context, fp util.FullPath) (err err
return mc.localStore.DeleteEntry(ctx, fp)
}
-func (mc *MetaCache) ListDirectoryEntries(ctx context.Context, dirPath util.FullPath, startFileName string, includeStartFile bool, limit int) ([]*filer.Entry, error) {
+func (mc *MetaCache) ListDirectoryEntries(ctx context.Context, dirPath util.FullPath, startFileName string, includeStartFile bool, limit int64, eachEntryFunc filer.ListEachEntryFunc) error {
mc.RLock()
defer mc.RUnlock()
if !mc.visitedBoundary.HasVisited(dirPath) {
- return nil, fmt.Errorf("unsynchronized dir: %v", dirPath)
+ return fmt.Errorf("unsynchronized dir: %v", dirPath)
}
- entries, err := mc.localStore.ListDirectoryEntries(ctx, dirPath, startFileName, includeStartFile, limit)
- if err != nil {
- return nil, err
- }
- for _, entry := range entries {
+ _, err := mc.localStore.ListDirectoryEntries(ctx, dirPath, startFileName, includeStartFile, limit, func(entry *filer.Entry) bool {
mc.mapIdFromFilerToLocal(entry)
+ return eachEntryFunc(entry)
+ })
+ if err != nil {
+ return err
}
- return entries, err
+ return err
}
func (mc *MetaCache) Shutdown() {
diff --git a/weed/filesys/meta_cache/meta_cache_init.go b/weed/filesys/meta_cache/meta_cache_init.go
index 4089cea28..1ca3b16d5 100644
--- a/weed/filesys/meta_cache/meta_cache_init.go
+++ b/weed/filesys/meta_cache/meta_cache_init.go
@@ -19,6 +19,9 @@ func EnsureVisited(mc *MetaCache, client filer_pb.FilerClient, dirPath util.Full
util.Retry("ReadDirAllEntries", func() error {
err = filer_pb.ReadDirAllEntries(client, dirPath, "", func(pbEntry *filer_pb.Entry, isLast bool) error {
entry := filer.FromPbEntry(string(dirPath), pbEntry)
+ if IsHiddenSystemEntry(string(dirPath), entry.Name()) {
+ return nil
+ }
if err := mc.doInsertEntry(context.Background(), entry); err != nil {
glog.V(0).Infof("read %s: %v", entry.FullPath, err)
return err
@@ -38,3 +41,7 @@ func EnsureVisited(mc *MetaCache, client filer_pb.FilerClient, dirPath util.Full
return
})
}
+
+func IsHiddenSystemEntry(dir, name string) bool {
+ return dir == "/" && name == "topics"
+}
diff --git a/weed/filesys/wfs.go b/weed/filesys/wfs.go
index d8f865be7..9443bc0bf 100644
--- a/weed/filesys/wfs.go
+++ b/weed/filesys/wfs.go
@@ -4,6 +4,8 @@ import (
"context"
"fmt"
"github.com/chrislusf/seaweedfs/weed/storage"
+ "github.com/chrislusf/seaweedfs/weed/filer"
+ "github.com/chrislusf/seaweedfs/weed/wdclient"
"math"
"os"
"path"
@@ -25,6 +27,8 @@ import (
)
type Option struct {
+ MountDirectory string
+ FilerAddress string
FilerGrpcAddress string
GrpcDialOption grpc.DialOption
FilerMountRootPath string
@@ -46,9 +50,9 @@ type Option struct {
MountCtime time.Time
MountMtime time.Time
- OutsideContainerClusterMode bool // whether the mount runs outside SeaweedFS containers
- Cipher bool // whether encrypt data on volume server
- UidGidMapper *meta_cache.UidGidMapper
+ VolumeServerAccess string // how to access volume servers
+ Cipher bool // whether encrypt data on volume server
+ UidGidMapper *meta_cache.UidGidMapper
}
var _ = fs.FS(&WFS{})
@@ -74,6 +78,7 @@ type WFS struct {
// throttle writers
concurrentWriters *util.LimitedConcurrentExecutor
+ Server *fs.Server
}
type statsCache struct {
filer_pb.StatisticsResponse
@@ -91,7 +96,7 @@ func NewSeaweedFileSystem(option *Option) *WFS {
},
signature: util.RandomInt32(),
}
- cacheUniqueId := util.Md5String([]byte(option.FilerGrpcAddress + option.FilerMountRootPath + util.Version()))[0:4]
+ cacheUniqueId := util.Md5String([]byte(option.MountDirectory + option.FilerGrpcAddress + option.FilerMountRootPath + util.Version()))[0:8]
cacheDir := path.Join(option.CacheDir, cacheUniqueId)
if option.CacheSizeMB > 0 {
os.MkdirAll(cacheDir, os.FileMode(0777)&^option.Umask)
@@ -102,9 +107,22 @@ func NewSeaweedFileSystem(option *Option) *WFS {
fsNode := wfs.fsNodeCache.GetFsNode(filePath)
if fsNode != nil {
if file, ok := fsNode.(*File); ok {
+ if err := wfs.Server.InvalidateNodeData(file); err != nil {
+ glog.V(4).Infof("InvalidateNodeData %s : %v", filePath, err)
+ }
file.clearEntry()
}
}
+ dir, name := filePath.DirAndName()
+ parent := wfs.root
+ if dir != "/" {
+ parent = wfs.fsNodeCache.GetFsNode(util.FullPath(dir))
+ }
+ if parent != nil {
+ if err := wfs.Server.InvalidateEntry(parent, name); err != nil {
+ glog.V(4).Infof("InvalidateEntry %s : %v", filePath, err)
+ }
+ }
})
startTime := time.Now()
go meta_cache.SubscribeMetaEvents(wfs.metaCache, wfs.signature, wfs, wfs.option.FilerMountRootPath, startTime.UnixNano())
@@ -240,3 +258,13 @@ func (wfs *WFS) mapPbIdFromLocalToFiler(entry *filer_pb.Entry) {
}
entry.Attributes.Uid, entry.Attributes.Gid = wfs.option.UidGidMapper.LocalToFiler(entry.Attributes.Uid, entry.Attributes.Gid)
}
+
+func (wfs *WFS) LookupFn() wdclient.LookupFileIdFunctionType {
+ if wfs.option.VolumeServerAccess == "filerProxy" {
+ return func(fileId string) (targetUrls []string, err error) {
+ return []string{"http://" + wfs.option.FilerAddress + "/?proxyChunkId=" + fileId}, nil
+ }
+ }
+ return filer.LookupFn(wfs)
+
+}
diff --git a/weed/filesys/wfs_deletion.go b/weed/filesys/wfs_deletion.go
deleted file mode 100644
index a245b6795..000000000
--- a/weed/filesys/wfs_deletion.go
+++ /dev/null
@@ -1,84 +0,0 @@
-package filesys
-
-import (
- "context"
-
- "google.golang.org/grpc"
-
- "github.com/chrislusf/seaweedfs/weed/filer"
- "github.com/chrislusf/seaweedfs/weed/glog"
- "github.com/chrislusf/seaweedfs/weed/operation"
- "github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
-)
-
-func (wfs *WFS) deleteFileChunks(chunks []*filer_pb.FileChunk) {
- if len(chunks) == 0 {
- return
- }
-
- var fileIds []string
- for _, chunk := range chunks {
- if !chunk.IsChunkManifest {
- fileIds = append(fileIds, chunk.GetFileIdString())
- continue
- }
- dataChunks, manifestResolveErr := filer.ResolveOneChunkManifest(filer.LookupFn(wfs), chunk)
- if manifestResolveErr != nil {
- glog.V(0).Infof("failed to resolve manifest %s: %v", chunk.FileId, manifestResolveErr)
- }
- for _, dChunk := range dataChunks {
- fileIds = append(fileIds, dChunk.GetFileIdString())
- }
- fileIds = append(fileIds, chunk.GetFileIdString())
- }
-
- wfs.WithFilerClient(func(client filer_pb.SeaweedFilerClient) error {
- wfs.deleteFileIds(wfs.option.GrpcDialOption, client, fileIds)
- return nil
- })
-}
-
-func (wfs *WFS) deleteFileIds(grpcDialOption grpc.DialOption, client filer_pb.SeaweedFilerClient, fileIds []string) error {
-
- var vids []string
- for _, fileId := range fileIds {
- vids = append(vids, filer.VolumeId(fileId))
- }
-
- lookupFunc := func(vids []string) (map[string]operation.LookupResult, error) {
-
- m := make(map[string]operation.LookupResult)
-
- glog.V(4).Infof("deleteFileIds lookup volume id locations: %v", vids)
- resp, err := client.LookupVolume(context.Background(), &filer_pb.LookupVolumeRequest{
- VolumeIds: vids,
- })
- if err != nil {
- return m, err
- }
-
- for _, vid := range vids {
- lr := operation.LookupResult{
- VolumeId: vid,
- Locations: nil,
- }
- locations, found := resp.LocationsMap[vid]
- if !found {
- continue
- }
- for _, loc := range locations.Locations {
- lr.Locations = append(lr.Locations, operation.Location{
- Url: wfs.AdjustedUrl(loc),
- PublicUrl: loc.PublicUrl,
- })
- }
- m[vid] = lr
- }
-
- return m, err
- }
-
- _, err := operation.DeleteFilesWithLookupVolumeId(grpcDialOption, fileIds, lookupFunc)
-
- return err
-}
diff --git a/weed/filesys/wfs_filer_client.go b/weed/filesys/wfs_filer_client.go
index ef4213af1..671d20ba2 100644
--- a/weed/filesys/wfs_filer_client.go
+++ b/weed/filesys/wfs_filer_client.go
@@ -27,7 +27,7 @@ func (wfs *WFS) WithFilerClient(fn func(filer_pb.SeaweedFilerClient) error) erro
}
func (wfs *WFS) AdjustedUrl(location *filer_pb.Location) string {
- if wfs.option.OutsideContainerClusterMode {
+ if wfs.option.VolumeServerAccess == "publicUrl" {
return location.PublicUrl
}
return location.Url
diff --git a/weed/filesys/wfs_write.go b/weed/filesys/wfs_write.go
index 92ff5c8da..dbec3bebc 100644
--- a/weed/filesys/wfs_write.go
+++ b/weed/filesys/wfs_write.go
@@ -54,6 +54,9 @@ func (wfs *WFS) saveDataAsChunk(fullPath util.FullPath) filer.SaveDataAsChunkFun
}
fileUrl := fmt.Sprintf("http://%s/%s", host, fileId)
+ if wfs.option.VolumeServerAccess == "filerProxy" {
+ fileUrl = fmt.Sprintf("http://%s/?proxyChunkId=%s", wfs.option.FilerAddress, fileId)
+ }
uploadResult, err, data := operation.Upload(fileUrl, filename, wfs.option.Cipher, reader, false, "", nil, auth)
if err != nil {
glog.V(0).Infof("upload data %v to %s: %v", filename, fileUrl, err)
diff --git a/weed/messaging/broker/broker_grpc_server_subscribe.go b/weed/messaging/broker/broker_grpc_server_subscribe.go
index df4052096..3021473e5 100644
--- a/weed/messaging/broker/broker_grpc_server_subscribe.go
+++ b/weed/messaging/broker/broker_grpc_server_subscribe.go
@@ -101,20 +101,21 @@ func (broker *MessageBroker) Subscribe(stream messaging_pb.SeaweedMessaging_Subs
return nil
}
- if err = broker.readPersistedLogBuffer(&tp, lastReadTime, eachLogEntryFn); err != nil {
- if err != io.EOF {
- // println("stopping from persisted logs", err.Error())
- return err
- }
- }
-
- if processedTsNs != 0 {
- lastReadTime = time.Unix(0, processedTsNs)
- }
-
// fmt.Printf("subscriber %s read %d on disk log %v\n", subscriberId, messageCount, lastReadTime)
for {
+
+ if err = broker.readPersistedLogBuffer(&tp, lastReadTime, eachLogEntryFn); err != nil {
+ if err != io.EOF {
+ // println("stopping from persisted logs", err.Error())
+ return err
+ }
+ }
+
+ if processedTsNs != 0 {
+ lastReadTime = time.Unix(0, processedTsNs)
+ }
+
lastReadTime, err = lock.logBuffer.LoopProcessLogData(lastReadTime, func() bool {
lock.Mutex.Lock()
lock.cond.Wait()
@@ -122,6 +123,9 @@ func (broker *MessageBroker) Subscribe(stream messaging_pb.SeaweedMessaging_Subs
return isConnected
}, eachLogEntryFn)
if err != nil {
+ if err == log_buffer.ResumeFromDiskError {
+ continue
+ }
glog.Errorf("processed to %v: %v", lastReadTime, err)
time.Sleep(3127 * time.Millisecond)
if err != log_buffer.ResumeError {
diff --git a/weed/notification/configuration.go b/weed/notification/configuration.go
index 36211692c..541a453e9 100644
--- a/weed/notification/configuration.go
+++ b/weed/notification/configuration.go
@@ -4,7 +4,6 @@ import (
"github.com/chrislusf/seaweedfs/weed/glog"
"github.com/chrislusf/seaweedfs/weed/util"
"github.com/golang/protobuf/proto"
- "github.com/spf13/viper"
)
type MessageQueue interface {
@@ -21,7 +20,7 @@ var (
Queue MessageQueue
)
-func LoadConfiguration(config *viper.Viper, prefix string) {
+func LoadConfiguration(config *util.ViperProxy, prefix string) {
if config == nil {
return
@@ -43,7 +42,7 @@ func LoadConfiguration(config *viper.Viper, prefix string) {
}
-func validateOneEnabledQueue(config *viper.Viper) {
+func validateOneEnabledQueue(config *util.ViperProxy) {
enabledQueue := ""
for _, queue := range MessageQueues {
if config.GetBool(queue.GetName() + ".enabled") {
diff --git a/weed/operation/delete_content.go b/weed/operation/delete_content.go
index 9868a411d..65baaddf2 100644
--- a/weed/operation/delete_content.go
+++ b/weed/operation/delete_content.go
@@ -4,12 +4,11 @@ import (
"context"
"errors"
"fmt"
+ "google.golang.org/grpc"
"net/http"
"strings"
"sync"
- "google.golang.org/grpc"
-
"github.com/chrislusf/seaweedfs/weed/pb/volume_server_pb"
)
diff --git a/weed/operation/submit.go b/weed/operation/submit.go
index 907825311..c34b33577 100644
--- a/weed/operation/submit.go
+++ b/weed/operation/submit.go
@@ -33,7 +33,7 @@ type FilePart struct {
type SubmitResult struct {
FileName string `json:"fileName,omitempty"`
- FileUrl string `json:"fileUrl,omitempty"`
+ FileUrl string `json:"url,omitempty"`
Fid string `json:"fid,omitempty"`
Size uint32 `json:"size,omitempty"`
Error string `json:"error,omitempty"`
diff --git a/weed/pb/grpc_client_server.go b/weed/pb/grpc_client_server.go
index f19af43b2..9ffda9b04 100644
--- a/weed/pb/grpc_client_server.go
+++ b/weed/pb/grpc_client_server.go
@@ -35,8 +35,9 @@ func NewGrpcServer(opts ...grpc.ServerOption) *grpc.Server {
var options []grpc.ServerOption
options = append(options,
grpc.KeepaliveParams(keepalive.ServerParameters{
- Time: 10 * time.Second, // wait time before ping if no activity
- Timeout: 20 * time.Second, // ping timeout
+ Time: 10 * time.Second, // wait time before ping if no activity
+ Timeout: 20 * time.Second, // ping timeout
+ MaxConnectionAge: 10 * time.Hour,
}),
grpc.KeepaliveEnforcementPolicy(keepalive.EnforcementPolicy{
MinTime: 60 * time.Second, // min time a client should wait before sending a ping
@@ -138,6 +139,22 @@ func ServerToGrpcAddress(server string) (serverGrpcAddress string) {
return fmt.Sprintf("%s:%d", hostnameAndPort[0], grpcPort)
}
+func GrpcAddressToServerAddress(grpcAddress string) (serverAddress string) {
+ hostnameAndPort := strings.Split(grpcAddress, ":")
+ if len(hostnameAndPort) != 2 {
+ return fmt.Sprintf("unexpected grpcAddress: %s", grpcAddress)
+ }
+
+ grpcPort, parseErr := strconv.ParseUint(hostnameAndPort[1], 10, 64)
+ if parseErr != nil {
+ return fmt.Sprintf("failed to parse port for %s:%s", hostnameAndPort[0], hostnameAndPort[1])
+ }
+
+ port := int(grpcPort) - 10000
+
+ return fmt.Sprintf("%s:%d", hostnameAndPort[0], port)
+}
+
func WithMasterClient(master string, grpcDialOption grpc.DialOption, fn func(client master_pb.SeaweedClient) error) error {
masterGrpcAddress, parseErr := ParseServerToGrpcAddress(master)
diff --git a/weed/replication/repl_util/replication_utli.go b/weed/replication/repl_util/replication_utli.go
index 42777f4ad..c5b8ab4e1 100644
--- a/weed/replication/repl_util/replication_utli.go
+++ b/weed/replication/repl_util/replication_utli.go
@@ -17,9 +17,10 @@ func CopyFromChunkViews(chunkViews []*filer.ChunkView, filerSource *source.Filer
}
var writeErr error
+ var shouldRetry bool
for _, fileUrl := range fileUrls {
- _, err = util.ReadUrlAsStream(fileUrl+"?readDeleted=true", nil, false, chunk.IsFullChunk(), chunk.Offset, int(chunk.Size), func(data []byte) {
+ shouldRetry, err = util.ReadUrlAsStream(fileUrl+"?readDeleted=true", nil, false, chunk.IsFullChunk(), chunk.Offset, int(chunk.Size), func(data []byte) {
writeErr = writeFunc(data)
})
if err != nil {
@@ -30,11 +31,12 @@ func CopyFromChunkViews(chunkViews []*filer.ChunkView, filerSource *source.Filer
break
}
}
-
- if err != nil {
+ if shouldRetry && err != nil {
return err
}
-
+ if writeErr != nil {
+ return writeErr
+ }
}
return nil
}
diff --git a/weed/replication/replicator.go b/weed/replication/replicator.go
index c4228434f..7688029e6 100644
--- a/weed/replication/replicator.go
+++ b/weed/replication/replicator.go
@@ -6,6 +6,7 @@ import (
"github.com/chrislusf/seaweedfs/weed/pb"
"google.golang.org/grpc"
"strings"
+ "time"
"github.com/chrislusf/seaweedfs/weed/glog"
"github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
@@ -40,7 +41,17 @@ func (r *Replicator) Replicate(ctx context.Context, key string, message *filer_p
glog.V(4).Infof("skipping %v outside of %v", key, r.source.Dir)
return nil
}
- newKey := util.Join(r.sink.GetSinkToDirectory(), key[len(r.source.Dir):])
+ var dateKey string
+ if r.sink.GetName() == "local_incremental" {
+ var mTime int64
+ if message.NewEntry != nil {
+ mTime = message.NewEntry.Attributes.Mtime
+ } else if message.OldEntry != nil {
+ mTime = message.OldEntry.Attributes.Mtime
+ }
+ dateKey = time.Unix(mTime, 0).Format("2006-01-02")
+ }
+ newKey := util.Join(r.sink.GetSinkToDirectory(), dateKey, key[len(r.source.Dir):])
glog.V(3).Infof("replicate %s => %s", key, newKey)
key = newKey
if message.OldEntry != nil && message.NewEntry == nil {
diff --git a/weed/replication/sink/filersink/fetch_write.go b/weed/replication/sink/filersink/fetch_write.go
index 6b51a7966..a7392d856 100644
--- a/weed/replication/sink/filersink/fetch_write.go
+++ b/weed/replication/sink/filersink/fetch_write.go
@@ -30,6 +30,7 @@ func (fs *FilerSink) replicateChunks(sourceChunks []*filer_pb.FileChunk, path st
replicatedChunk, e := fs.replicateOneChunk(chunk, path)
if e != nil {
err = e
+ return
}
replicatedChunks[index] = replicatedChunk
}(sourceChunk, chunkIndex)
@@ -98,6 +99,9 @@ func (fs *FilerSink) fetchAndWrite(sourceChunk *filer_pb.FileChunk, path string)
}
fileUrl := fmt.Sprintf("http://%s/%s", host, fileId)
+ if fs.writeChunkByFiler {
+ fileUrl = fmt.Sprintf("http://%s/?proxyChunkId=%s", fs.address, fileId)
+ }
glog.V(4).Infof("replicating %s to %s header:%+v", filename, fileUrl, header)
diff --git a/weed/replication/sink/filersink/filer_sink.go b/weed/replication/sink/filersink/filer_sink.go
index a69c9dc8e..9960634f6 100644
--- a/weed/replication/sink/filersink/filer_sink.go
+++ b/weed/replication/sink/filersink/filer_sink.go
@@ -3,6 +3,8 @@ package filersink
import (
"context"
"fmt"
+ "github.com/chrislusf/seaweedfs/weed/pb"
+ "github.com/chrislusf/seaweedfs/weed/wdclient"
"google.golang.org/grpc"
@@ -17,15 +19,17 @@ import (
)
type FilerSink struct {
- filerSource *source.FilerSource
- grpcAddress string
- dir string
- replication string
- collection string
- ttlSec int32
+ filerSource *source.FilerSource
+ grpcAddress string
+ dir string
+ replication string
+ collection string
+ ttlSec int32
diskType string
- dataCenter string
- grpcDialOption grpc.DialOption
+ dataCenter string
+ grpcDialOption grpc.DialOption
+ address string
+ writeChunkByFiler bool
}
func init() {
@@ -42,21 +46,27 @@ func (fs *FilerSink) GetSinkToDirectory() string {
func (fs *FilerSink) Initialize(configuration util.Configuration, prefix string) error {
return fs.DoInitialize(
+ "",
configuration.GetString(prefix+"grpcAddress"),
configuration.GetString(prefix+"directory"),
configuration.GetString(prefix+"replication"),
configuration.GetString(prefix+"collection"),
configuration.GetInt(prefix+"ttlSec"),
configuration.GetString(prefix+"disk"),
- security.LoadClientTLS(util.GetViper(), "grpc.client"))
+ security.LoadClientTLS(util.GetViper(), "grpc.client"),
+ false)
}
func (fs *FilerSink) SetSourceFiler(s *source.FilerSource) {
fs.filerSource = s
}
-func (fs *FilerSink) DoInitialize(grpcAddress string, dir string,
- replication string, collection string, ttlSec int, diskType string, grpcDialOption grpc.DialOption) (err error) {
+func (fs *FilerSink) DoInitialize(address, grpcAddress string, dir string,
+ replication string, collection string, ttlSec int, diskType string, grpcDialOption grpc.DialOption, writeChunkByFiler bool) (err error) {
+ fs.address = address
+ if fs.address == "" {
+ fs.address = pb.GrpcAddressToServerAddress(grpcAddress)
+ }
fs.grpcAddress = grpcAddress
fs.dir = dir
fs.replication = replication
@@ -64,6 +74,7 @@ func (fs *FilerSink) DoInitialize(grpcAddress string, dir string,
fs.ttlSec = int32(ttlSec)
fs.diskType = diskType
fs.grpcDialOption = grpcDialOption
+ fs.writeChunkByFiler = writeChunkByFiler
return nil
}
@@ -209,7 +220,7 @@ func (fs *FilerSink) UpdateEntry(key string, oldEntry *filer_pb.Entry, newParent
})
}
-func compareChunks(lookupFileIdFn filer.LookupFileIdFunctionType, oldEntry, newEntry *filer_pb.Entry) (deletedChunks, newChunks []*filer_pb.FileChunk, err error) {
+func compareChunks(lookupFileIdFn wdclient.LookupFileIdFunctionType, oldEntry, newEntry *filer_pb.Entry) (deletedChunks, newChunks []*filer_pb.FileChunk, err error) {
aData, aMeta, aErr := filer.ResolveChunkManifest(lookupFileIdFn, oldEntry.Chunks)
if aErr != nil {
return nil, nil, aErr
diff --git a/weed/replication/sink/localsink/local_incremental_sink.go b/weed/replication/sink/localsink/local_incremental_sink.go
new file mode 100644
index 000000000..a1d49e28a
--- /dev/null
+++ b/weed/replication/sink/localsink/local_incremental_sink.go
@@ -0,0 +1,17 @@
+package localsink
+
+import (
+ "github.com/chrislusf/seaweedfs/weed/replication/sink"
+)
+
+type LocalIncSink struct {
+ LocalSink
+}
+
+func (localincsink *LocalIncSink) GetName() string {
+ return "local_incremental"
+}
+
+func init() {
+ sink.Sinks = append(sink.Sinks, &LocalIncSink{})
+}
diff --git a/weed/replication/sink/localsink/local_sink.go b/weed/replication/sink/localsink/local_sink.go
new file mode 100644
index 000000000..21c625c3f
--- /dev/null
+++ b/weed/replication/sink/localsink/local_sink.go
@@ -0,0 +1,101 @@
+package localsink
+
+import (
+ "github.com/chrislusf/seaweedfs/weed/filer"
+ "github.com/chrislusf/seaweedfs/weed/glog"
+ "github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
+ "github.com/chrislusf/seaweedfs/weed/replication/repl_util"
+ "github.com/chrislusf/seaweedfs/weed/replication/sink"
+ "github.com/chrislusf/seaweedfs/weed/replication/source"
+ "github.com/chrislusf/seaweedfs/weed/util"
+ "io/ioutil"
+ "os"
+ "path/filepath"
+ "strings"
+)
+
+type LocalSink struct {
+ Dir string
+ filerSource *source.FilerSource
+}
+
+func init() {
+ sink.Sinks = append(sink.Sinks, &LocalSink{})
+}
+
+func (localsink *LocalSink) SetSourceFiler(s *source.FilerSource) {
+ localsink.filerSource = s
+}
+
+func (localsink *LocalSink) GetName() string {
+ return "local"
+}
+
+func (localsink *LocalSink) isMultiPartEntry(key string) bool {
+ return strings.HasSuffix(key, ".part") && strings.Contains(key, "/.uploads/")
+}
+
+func (localsink *LocalSink) initialize(dir string) error {
+ localsink.Dir = dir
+ return nil
+}
+
+func (localsink *LocalSink) Initialize(configuration util.Configuration, prefix string) error {
+ dir := configuration.GetString(prefix + "directory")
+ glog.V(4).Infof("sink.local.directory: %v", dir)
+ return localsink.initialize(dir)
+}
+
+func (localsink *LocalSink) GetSinkToDirectory() string {
+ return localsink.Dir
+}
+
+func (localsink *LocalSink) DeleteEntry(key string, isDirectory, deleteIncludeChunks bool, signatures []int32) error {
+ if localsink.isMultiPartEntry(key) {
+ return nil
+ }
+ glog.V(4).Infof("Delete Entry key: %s", key)
+ if err := os.Remove(key); err != nil {
+ glog.V(0).Infof("remove entry key %s: %s", key, err)
+ }
+ return nil
+}
+
+func (localsink *LocalSink) CreateEntry(key string, entry *filer_pb.Entry, signatures []int32) error {
+ if entry.IsDirectory || localsink.isMultiPartEntry(key) {
+ return nil
+ }
+ glog.V(4).Infof("Create Entry key: %s", key)
+
+ totalSize := filer.FileSize(entry)
+ chunkViews := filer.ViewFromChunks(localsink.filerSource.LookupFileId, entry.Chunks, 0, int64(totalSize))
+
+ dir := filepath.Dir(key)
+
+ if _, err := os.Stat(dir); os.IsNotExist(err) {
+ glog.V(4).Infof("Create Direcotry key: %s", dir)
+ if err = os.MkdirAll(dir, 0); err != nil {
+ return err
+ }
+ }
+
+ writeFunc := func(data []byte) error {
+ writeErr := ioutil.WriteFile(key, data, 0)
+ return writeErr
+ }
+
+ if err := repl_util.CopyFromChunkViews(chunkViews, localsink.filerSource, writeFunc); err != nil {
+ return err
+ }
+
+ return nil
+}
+
+func (localsink *LocalSink) UpdateEntry(key string, oldEntry *filer_pb.Entry, newParentPath string, newEntry *filer_pb.Entry, deleteIncludeChunks bool, signatures []int32) (foundExistingEntry bool, err error) {
+ if localsink.isMultiPartEntry(key) {
+ return true, nil
+ }
+ glog.V(4).Infof("Update Entry key: %s", key)
+ // do delete and create
+ return false, nil
+}
diff --git a/weed/replication/source/filer_source.go b/weed/replication/source/filer_source.go
index ff4f2eb26..3982360b0 100644
--- a/weed/replication/source/filer_source.go
+++ b/weed/replication/source/filer_source.go
@@ -25,19 +25,28 @@ type FilerSource struct {
grpcAddress string
grpcDialOption grpc.DialOption
Dir string
+ address string
+ proxyByFiler bool
}
func (fs *FilerSource) Initialize(configuration util.Configuration, prefix string) error {
return fs.DoInitialize(
+ "",
configuration.GetString(prefix+"grpcAddress"),
configuration.GetString(prefix+"directory"),
+ false,
)
}
-func (fs *FilerSource) DoInitialize(grpcAddress string, dir string) (err error) {
+func (fs *FilerSource) DoInitialize(address, grpcAddress string, dir string, readChunkFromFiler bool) (err error) {
+ fs.address = address
+ if fs.address == "" {
+ fs.address = pb.GrpcAddressToServerAddress(grpcAddress)
+ }
fs.grpcAddress = grpcAddress
fs.Dir = dir
fs.grpcDialOption = security.LoadClientTLS(util.GetViper(), "grpc.client")
+ fs.proxyByFiler = readChunkFromFiler
return nil
}
@@ -81,9 +90,13 @@ func (fs *FilerSource) LookupFileId(part string) (fileUrls []string, err error)
return
}
-func (fs *FilerSource) ReadPart(part string) (filename string, header http.Header, resp *http.Response, err error) {
+func (fs *FilerSource) ReadPart(fileId string) (filename string, header http.Header, resp *http.Response, err error) {
+
+ if fs.proxyByFiler {
+ return util.DownloadFile("http://" + fs.address + "/?proxyChunkId=" + fileId)
+ }
- fileUrls, err := fs.LookupFileId(part)
+ fileUrls, err := fs.LookupFileId(fileId)
if err != nil {
return "", nil, nil, err
}
diff --git a/weed/replication/sub/notification_aws_sqs.go b/weed/replication/sub/notification_aws_sqs.go
index 1dd386ba7..642834c72 100644
--- a/weed/replication/sub/notification_aws_sqs.go
+++ b/weed/replication/sub/notification_aws_sqs.go
@@ -68,7 +68,7 @@ func (k *AwsSqsInput) initialize(awsAccessKeyId, awsSecretAccessKey, region, que
return nil
}
-func (k *AwsSqsInput) ReceiveMessage() (key string, message *filer_pb.EventNotification, err error) {
+func (k *AwsSqsInput) ReceiveMessage() (key string, message *filer_pb.EventNotification, onSuccessFn func(), onFailureFn func(), err error) {
// receive message
result, err := k.svc.ReceiveMessage(&sqs.ReceiveMessageInput{
diff --git a/weed/replication/sub/notification_gocdk_pub_sub.go b/weed/replication/sub/notification_gocdk_pub_sub.go
index 9726096e5..413c0e3cf 100644
--- a/weed/replication/sub/notification_gocdk_pub_sub.go
+++ b/weed/replication/sub/notification_gocdk_pub_sub.go
@@ -2,13 +2,17 @@ package sub
import (
"context"
-
"github.com/chrislusf/seaweedfs/weed/glog"
"github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
"github.com/chrislusf/seaweedfs/weed/util"
"github.com/golang/protobuf/proto"
+ "github.com/streadway/amqp"
"gocloud.dev/pubsub"
_ "gocloud.dev/pubsub/awssnssqs"
+ "net/url"
+ "path"
+ "strings"
+
// _ "gocloud.dev/pubsub/azuresb"
_ "gocloud.dev/pubsub/gcppubsub"
_ "gocloud.dev/pubsub/natspubsub"
@@ -19,6 +23,55 @@ func init() {
NotificationInputs = append(NotificationInputs, &GoCDKPubSubInput{})
}
+func getPath(rawUrl string) string {
+ parsedUrl, _ := url.Parse(rawUrl)
+ return path.Join(parsedUrl.Host, parsedUrl.Path)
+}
+
+func QueueDeclareAndBind(conn *amqp.Connection, exchangeUrl string, queueUrl string) error {
+ exchangeName := getPath(exchangeUrl)
+ queueName := getPath(queueUrl)
+ exchangeNameDLX := "DLX." + exchangeName
+ queueNameDLX := "DLX." + queueName
+ ch, err := conn.Channel()
+ if err != nil {
+ glog.Error(err)
+ return err
+ }
+ defer ch.Close()
+ if err := ch.ExchangeDeclare(
+ exchangeNameDLX, "fanout", false, false, false, false, nil); err != nil {
+ glog.Error(err)
+ return err
+ }
+ if err := ch.ExchangeDeclare(
+ exchangeName, "fanout", false, false, false, false, nil); err != nil {
+ glog.Error(err)
+ return err
+ }
+ if _, err := ch.QueueDeclare(
+ queueName, false, false, false, false,
+ amqp.Table{"x-dead-letter-exchange": exchangeNameDLX}); err != nil {
+ glog.Error(err)
+ return err
+ }
+ if err := ch.QueueBind(queueName, "", exchangeName, false, nil); err != nil {
+ glog.Error(err)
+ return err
+ }
+ if _, err := ch.QueueDeclare(
+ queueNameDLX, false, false, false, false,
+ amqp.Table{"x-dead-letter-exchange": exchangeName, "x-message-ttl": 600000}); err != nil {
+ glog.Error(err)
+ return err
+ }
+ if err := ch.QueueBind(queueNameDLX, "", exchangeNameDLX, false, nil); err != nil {
+ glog.Error(err)
+ return err
+ }
+ return nil
+}
+
type GoCDKPubSubInput struct {
sub *pubsub.Subscription
}
@@ -28,23 +81,65 @@ func (k *GoCDKPubSubInput) GetName() string {
}
func (k *GoCDKPubSubInput) Initialize(configuration util.Configuration, prefix string) error {
+ topicUrl := configuration.GetString(prefix + "topic_url")
subURL := configuration.GetString(prefix + "sub_url")
glog.V(0).Infof("notification.gocdk_pub_sub.sub_url: %v", subURL)
sub, err := pubsub.OpenSubscription(context.Background(), subURL)
if err != nil {
return err
}
+ var conn *amqp.Connection
+ if sub.As(&conn) {
+ ch, err := conn.Channel()
+ if err != nil {
+ return err
+ }
+ defer ch.Close()
+ _, err = ch.QueueInspect(getPath(subURL))
+ if err != nil {
+ if strings.HasPrefix(err.Error(), "Exception (404) Reason") {
+ if err := QueueDeclareAndBind(conn, topicUrl, subURL); err != nil {
+ return err
+ }
+ } else {
+ return err
+ }
+ }
+ }
k.sub = sub
return nil
}
-func (k *GoCDKPubSubInput) ReceiveMessage() (key string, message *filer_pb.EventNotification, err error) {
+func (k *GoCDKPubSubInput) ReceiveMessage() (key string, message *filer_pb.EventNotification, onSuccessFn func(), onFailureFn func(), err error) {
msg, err := k.sub.Receive(context.Background())
+ if err != nil {
+ return
+ }
+ onFailureFn = func() {
+ if msg.Nackable() {
+ isRedelivered := false
+ var delivery amqp.Delivery
+ if msg.As(&delivery) {
+ isRedelivered = delivery.Redelivered
+ glog.Warningf("onFailureFn() metadata: %+v, redelivered: %v", msg.Metadata, delivery.Redelivered)
+ }
+ if isRedelivered {
+ if err := delivery.Nack(false, false); err != nil {
+ glog.Error(err)
+ }
+ } else {
+ msg.Nack()
+ }
+ }
+ }
+ onSuccessFn = func() {
+ msg.Ack()
+ }
key = msg.Metadata["key"]
message = &filer_pb.EventNotification{}
err = proto.Unmarshal(msg.Body, message)
if err != nil {
- return "", nil, err
+ return "", nil, onSuccessFn, onFailureFn, err
}
- return key, message, nil
+ return key, message, onSuccessFn, onFailureFn, nil
}
diff --git a/weed/replication/sub/notification_google_pub_sub.go b/weed/replication/sub/notification_google_pub_sub.go
index a950bb42b..f7c767d4a 100644
--- a/weed/replication/sub/notification_google_pub_sub.go
+++ b/weed/replication/sub/notification_google_pub_sub.go
@@ -85,16 +85,22 @@ func (k *GooglePubSubInput) initialize(google_application_credentials, projectId
go k.sub.Receive(ctx, func(ctx context.Context, m *pubsub.Message) {
k.messageChan <- m
- m.Ack()
})
return err
}
-func (k *GooglePubSubInput) ReceiveMessage() (key string, message *filer_pb.EventNotification, err error) {
+func (k *GooglePubSubInput) ReceiveMessage() (key string, message *filer_pb.EventNotification, onSuccessFn func(), onFailureFn func(), err error) {
m := <-k.messageChan
+ onSuccessFn = func() {
+ m.Ack()
+ }
+ onFailureFn = func() {
+ m.Nack()
+ }
+
// process the message
key = m.Attributes["key"]
message = &filer_pb.EventNotification{}
diff --git a/weed/replication/sub/notification_kafka.go b/weed/replication/sub/notification_kafka.go
index fa9cfad9b..622a759ea 100644
--- a/weed/replication/sub/notification_kafka.go
+++ b/weed/replication/sub/notification_kafka.go
@@ -97,7 +97,7 @@ func (k *KafkaInput) initialize(hosts []string, topic string, offsetFile string,
return nil
}
-func (k *KafkaInput) ReceiveMessage() (key string, message *filer_pb.EventNotification, err error) {
+func (k *KafkaInput) ReceiveMessage() (key string, message *filer_pb.EventNotification, onSuccessFn func(), onFailureFn func(), err error) {
msg := <-k.messageChan
diff --git a/weed/replication/sub/notifications.go b/weed/replication/sub/notifications.go
index 8a2668f98..d5a910db9 100644
--- a/weed/replication/sub/notifications.go
+++ b/weed/replication/sub/notifications.go
@@ -10,7 +10,7 @@ type NotificationInput interface {
GetName() string
// Initialize initializes the file store
Initialize(configuration util.Configuration, prefix string) error
- ReceiveMessage() (key string, message *filer_pb.EventNotification, err error)
+ ReceiveMessage() (key string, message *filer_pb.EventNotification, onSuccessFn func(), onFailureFn func(), err error)
}
var (
diff --git a/weed/s3api/auth_credentials.go b/weed/s3api/auth_credentials.go
index 93544b75e..b8af6381a 100644
--- a/weed/s3api/auth_credentials.go
+++ b/weed/s3api/auth_credentials.go
@@ -3,6 +3,7 @@ package s3api
import (
"fmt"
"github.com/chrislusf/seaweedfs/weed/filer"
+ "github.com/chrislusf/seaweedfs/weed/s3api/s3_constants"
"io/ioutil"
"net/http"
@@ -185,7 +186,6 @@ func (iam *IdentityAccessManagement) authRequest(r *http.Request, action Action)
return identity, s3err.ErrNotImplemented
}
- glog.V(3).Infof("auth error: %v", s3Err)
if s3Err != s3err.ErrNone {
return identity, s3Err
}
@@ -202,6 +202,44 @@ func (iam *IdentityAccessManagement) authRequest(r *http.Request, action Action)
}
+func (iam *IdentityAccessManagement) authUser(r *http.Request) (*Identity, s3err.ErrorCode) {
+ var identity *Identity
+ var s3Err s3err.ErrorCode
+ var found bool
+ switch getRequestAuthType(r) {
+ case authTypeStreamingSigned:
+ return identity, s3err.ErrNone
+ case authTypeUnknown:
+ glog.V(3).Infof("unknown auth type")
+ return identity, s3err.ErrAccessDenied
+ case authTypePresignedV2, authTypeSignedV2:
+ glog.V(3).Infof("v2 auth type")
+ identity, s3Err = iam.isReqAuthenticatedV2(r)
+ case authTypeSigned, authTypePresigned:
+ glog.V(3).Infof("v4 auth type")
+ identity, s3Err = iam.reqSignatureV4Verify(r)
+ case authTypePostPolicy:
+ glog.V(3).Infof("post policy auth type")
+ return identity, s3err.ErrNone
+ case authTypeJWT:
+ glog.V(3).Infof("jwt auth type")
+ return identity, s3err.ErrNotImplemented
+ case authTypeAnonymous:
+ identity, found = iam.lookupAnonymous()
+ if !found {
+ return identity, s3err.ErrAccessDenied
+ }
+ default:
+ return identity, s3err.ErrNotImplemented
+ }
+
+ glog.V(3).Infof("auth error: %v", s3Err)
+ if s3Err != s3err.ErrNone {
+ return identity, s3Err
+ }
+ return identity, s3err.ErrNone
+}
+
func (identity *Identity) canDo(action Action, bucket string) bool {
if identity.isAdmin() {
return true
@@ -215,10 +253,14 @@ func (identity *Identity) canDo(action Action, bucket string) bool {
return false
}
limitedByBucket := string(action) + ":" + bucket
+ adminLimitedByBucket := s3_constants.ACTION_ADMIN + ":" + bucket
for _, a := range identity.Actions {
if string(a) == limitedByBucket {
return true
}
+ if string(a) == adminLimitedByBucket {
+ return true
+ }
}
return false
}
diff --git a/weed/s3api/http/header.go b/weed/s3api/http/header.go
index a960d2370..6614b0af0 100644
--- a/weed/s3api/http/header.go
+++ b/weed/s3api/http/header.go
@@ -31,6 +31,6 @@ const (
// Non-Standard S3 HTTP request constants
const (
- AmzIdentityId = "x-amz-identity-id"
- AmzIsAdmin = "x-amz-is-admin" // only set to http request header as a context
+ AmzIdentityId = "s3-identity-id"
+ AmzIsAdmin = "s3-is-admin" // only set to http request header as a context
)
diff --git a/weed/s3api/s3api_bucket_handlers.go b/weed/s3api/s3api_bucket_handlers.go
index 00b7382cc..338f82668 100644
--- a/weed/s3api/s3api_bucket_handlers.go
+++ b/weed/s3api/s3api_bucket_handlers.go
@@ -4,6 +4,7 @@ import (
"context"
"encoding/xml"
"fmt"
+ "github.com/chrislusf/seaweedfs/weed/s3api/s3_constants"
"math"
"net/http"
"time"
@@ -26,6 +27,16 @@ type ListAllMyBucketsResult struct {
func (s3a *S3ApiServer) ListBucketsHandler(w http.ResponseWriter, r *http.Request) {
+ var identity *Identity
+ var s3Err s3err.ErrorCode
+ if s3a.iam.isEnabled() {
+ identity, s3Err = s3a.iam.authUser(r)
+ if s3Err != s3err.ErrNone {
+ writeErrorResponse(w, s3Err, r.URL)
+ return
+ }
+ }
+
var response ListAllMyBucketsResult
entries, _, err := s3a.list(s3a.option.BucketsPath, "", "", false, math.MaxInt32)
@@ -40,7 +51,7 @@ func (s3a *S3ApiServer) ListBucketsHandler(w http.ResponseWriter, r *http.Reques
var buckets []*s3.Bucket
for _, entry := range entries {
if entry.IsDirectory {
- if !s3a.hasAccess(r, entry) {
+ if identity != nil && !identity.canDo(s3_constants.ACTION_ADMIN, entry.Name) {
continue
}
buckets = append(buckets, &s3.Bucket{
diff --git a/weed/s3api/s3api_object_handlers.go b/weed/s3api/s3api_object_handlers.go
index 401e2f96c..c9e124328 100644
--- a/weed/s3api/s3api_object_handlers.go
+++ b/weed/s3api/s3api_object_handlers.go
@@ -8,6 +8,7 @@ import (
"io"
"io/ioutil"
"net/http"
+ "sort"
"strings"
"github.com/chrislusf/seaweedfs/weed/s3api/s3err"
@@ -175,16 +176,15 @@ func (s3a *S3ApiServer) DeleteMultipleObjectsHandler(w http.ResponseWriter, r *h
var deletedObjects []ObjectIdentifier
var deleteErrors []DeleteError
+ directoriesWithDeletion := make(map[string]int)
+
s3a.WithFilerClient(func(client filer_pb.SeaweedFilerClient) error {
+ // delete file entries
for _, object := range deleteObjects.Objects {
- response, _ := s3a.listFilerEntries(bucket, object.ObjectName, 1, "", "/")
- if len(response.Contents) != 0 && strings.HasSuffix(object.ObjectName, "/") {
- continue
- }
lastSeparator := strings.LastIndex(object.ObjectName, "/")
- parentDirectoryPath, entryName, isDeleteData, isRecursive := "/", object.ObjectName, true, true
+ parentDirectoryPath, entryName, isDeleteData, isRecursive := "/", object.ObjectName, true, false
if lastSeparator > 0 && lastSeparator+1 < len(object.ObjectName) {
entryName = object.ObjectName[lastSeparator+1:]
parentDirectoryPath = "/" + object.ObjectName[:lastSeparator]
@@ -193,8 +193,10 @@ func (s3a *S3ApiServer) DeleteMultipleObjectsHandler(w http.ResponseWriter, r *h
err := doDeleteEntry(client, parentDirectoryPath, entryName, isDeleteData, isRecursive)
if err == nil {
+ directoriesWithDeletion[parentDirectoryPath]++
deletedObjects = append(deletedObjects, object)
} else {
+ delete(directoriesWithDeletion, parentDirectoryPath)
deleteErrors = append(deleteErrors, DeleteError{
Code: "",
Message: err.Error(),
@@ -202,6 +204,12 @@ func (s3a *S3ApiServer) DeleteMultipleObjectsHandler(w http.ResponseWriter, r *h
})
}
}
+
+ // purge empty folders, only checking folders with deletions
+ for len(directoriesWithDeletion) > 0 {
+ directoriesWithDeletion = doDeleteEmptyDirectories(client, directoriesWithDeletion)
+ }
+
return nil
})
@@ -215,6 +223,26 @@ func (s3a *S3ApiServer) DeleteMultipleObjectsHandler(w http.ResponseWriter, r *h
}
+func doDeleteEmptyDirectories(client filer_pb.SeaweedFilerClient, directoriesWithDeletion map[string]int) (newDirectoriesWithDeletion map[string]int){
+ var allDirs []string
+ for dir, _ := range directoriesWithDeletion {
+ allDirs = append(allDirs, dir)
+ }
+ sort.Slice(allDirs, func(i, j int) bool {
+ return len(allDirs[i]) > len(allDirs[j])
+ })
+ newDirectoriesWithDeletion = make(map[string]int)
+ for _, dir := range allDirs {
+ parentDir, dirName := util.FullPath(dir).DirAndName()
+ if err := doDeleteEntry(client, parentDir, dirName, false, false); err != nil {
+ glog.V(4).Infof("directory %s has %d deletion but still not empty: %v", dir, directoriesWithDeletion[dir], err)
+ } else {
+ newDirectoriesWithDeletion[parentDir]++
+ }
+ }
+ return
+}
+
var passThroughHeaders = []string{
"response-cache-control",
"response-content-disposition",
@@ -268,8 +296,10 @@ func (s3a *S3ApiServer) proxyToFiler(w http.ResponseWriter, r *http.Request, des
defer util.CloseResponse(resp)
if (resp.ContentLength == -1 || resp.StatusCode == 404) && !strings.HasSuffix(destUrl, "/") {
- writeErrorResponse(w, s3err.ErrNoSuchKey, r.URL)
- return
+ if r.Method != "DELETE" {
+ writeErrorResponse(w, s3err.ErrNoSuchKey, r.URL)
+ return
+ }
}
responseFn(resp, w)
diff --git a/weed/s3api/s3api_objects_list_handlers.go b/weed/s3api/s3api_objects_list_handlers.go
index 0af099967..2d36c6ec9 100644
--- a/weed/s3api/s3api_objects_list_handlers.go
+++ b/weed/s3api/s3api_objects_list_handlers.go
@@ -71,7 +71,7 @@ func (s3a *S3ApiServer) ListObjectsV2Handler(w http.ResponseWriter, r *http.Requ
ContinuationToken: continuationToken,
Delimiter: response.Delimiter,
IsTruncated: response.IsTruncated,
- KeyCount: len(response.Contents),
+ KeyCount: len(response.Contents) + len(response.CommonPrefixes),
MaxKeys: response.MaxKeys,
NextContinuationToken: response.NextMarker,
Prefix: response.Prefix,
@@ -264,8 +264,10 @@ func (s3a *S3ApiServer) doListFilerEntries(client filer_pb.SeaweedFilerClient, d
}
} else {
var isEmpty bool
- if isEmpty, err = s3a.isDirectoryAllEmpty(client, dir, entry.Name); err != nil {
- glog.Errorf("check empty folder %s: %v", dir, err)
+ if !s3a.option.AllowEmptyFolder {
+ if isEmpty, err = s3a.isDirectoryAllEmpty(client, dir, entry.Name); err != nil {
+ glog.Errorf("check empty folder %s: %v", dir, err)
+ }
}
if !isEmpty {
eachEntryFn(dir, entry)
@@ -310,13 +312,17 @@ func getListObjectsV1Args(values url.Values) (prefix, marker, delimiter string,
func (s3a *S3ApiServer) isDirectoryAllEmpty(filerClient filer_pb.SeaweedFilerClient, parentDir, name string) (isEmpty bool, err error) {
// println("+ isDirectoryAllEmpty", dir, name)
+ glog.V(4).Infof("+ isEmpty %s/%s", parentDir, name)
+ defer glog.V(4).Infof("- isEmpty %s/%s %v", parentDir, name, isEmpty)
var fileCounter int
var subDirs []string
currentDir := parentDir + "/" + name
var startFrom string
var isExhausted bool
+ var foundEntry bool
for fileCounter == 0 && !isExhausted && err == nil {
err = filer_pb.SeaweedList(filerClient, currentDir, "", func(entry *filer_pb.Entry, isLast bool) error {
+ foundEntry = true
if entry.IsDirectory {
subDirs = append(subDirs, entry.Name)
} else {
@@ -324,8 +330,12 @@ func (s3a *S3ApiServer) isDirectoryAllEmpty(filerClient filer_pb.SeaweedFilerCli
}
startFrom = entry.Name
isExhausted = isExhausted || isLast
+ glog.V(4).Infof(" * %s/%s isLast: %t", currentDir, startFrom, isLast)
return nil
}, startFrom, false, 8)
+ if !foundEntry {
+ break
+ }
}
if err != nil {
diff --git a/weed/s3api/s3api_server.go b/weed/s3api/s3api_server.go
index 93e2bb575..4993104ae 100644
--- a/weed/s3api/s3api_server.go
+++ b/weed/s3api/s3api_server.go
@@ -20,6 +20,7 @@ type S3ApiServerOption struct {
DomainName string
BucketsPath string
GrpcDialOption grpc.DialOption
+ AllowEmptyFolder bool
}
type S3ApiServer struct {
@@ -128,7 +129,7 @@ func (s3a *S3ApiServer) registerRouter(router *mux.Router) {
}
// ListBuckets
- apiRouter.Methods("GET").Path("/").HandlerFunc(track(s3a.iam.Auth(s3a.ListBucketsHandler, ACTION_ADMIN), "LIST"))
+ apiRouter.Methods("GET").Path("/").HandlerFunc(track(s3a.ListBucketsHandler, "LIST"))
// NotFound
apiRouter.NotFoundHandler = http.HandlerFunc(notFoundHandler)
diff --git a/weed/security/tls.go b/weed/security/tls.go
index 72363768f..b4bf84e2d 100644
--- a/weed/security/tls.go
+++ b/weed/security/tls.go
@@ -3,17 +3,16 @@ package security
import (
"crypto/tls"
"crypto/x509"
+ "github.com/chrislusf/seaweedfs/weed/util"
"io/ioutil"
- "github.com/spf13/viper"
-
"google.golang.org/grpc"
"google.golang.org/grpc/credentials"
"github.com/chrislusf/seaweedfs/weed/glog"
)
-func LoadServerTLS(config *viper.Viper, component string) grpc.ServerOption {
+func LoadServerTLS(config *util.ViperProxy, component string) grpc.ServerOption {
if config == nil {
return nil
}
@@ -40,7 +39,7 @@ func LoadServerTLS(config *viper.Viper, component string) grpc.ServerOption {
return grpc.Creds(ta)
}
-func LoadClientTLS(config *viper.Viper, component string) grpc.DialOption {
+func LoadClientTLS(config *util.ViperProxy, component string) grpc.DialOption {
if config == nil {
return grpc.WithInsecure()
}
diff --git a/weed/server/common.go b/weed/server/common.go
index 0ce846ed7..7d32d6a9a 100644
--- a/weed/server/common.go
+++ b/weed/server/common.go
@@ -234,12 +234,12 @@ func adjustHeaderContentDisposition(w http.ResponseWriter, r *http.Request, file
}
}
-func processRangeRequest(r *http.Request, w http.ResponseWriter, totalSize int64, mimeType string, writeFn func(writer io.Writer, offset int64, size int64) error) {
+func processRangeRequest(r *http.Request, w http.ResponseWriter, totalSize int64, mimeType string, writeFn func(writer io.Writer, offset int64, size int64, httpStatusCode int) error) {
rangeReq := r.Header.Get("Range")
if rangeReq == "" {
w.Header().Set("Content-Length", strconv.FormatInt(totalSize, 10))
- if err := writeFn(w, 0, totalSize); err != nil {
+ if err := writeFn(w, 0, totalSize, 0); err != nil {
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
@@ -278,9 +278,8 @@ func processRangeRequest(r *http.Request, w http.ResponseWriter, totalSize int64
ra := ranges[0]
w.Header().Set("Content-Length", strconv.FormatInt(ra.length, 10))
w.Header().Set("Content-Range", ra.contentRange(totalSize))
- w.WriteHeader(http.StatusPartialContent)
- err = writeFn(w, ra.start, ra.length)
+ err = writeFn(w, ra.start, ra.length, http.StatusPartialContent)
if err != nil {
http.Error(w, err.Error(), http.StatusInternalServerError)
return
@@ -308,7 +307,7 @@ func processRangeRequest(r *http.Request, w http.ResponseWriter, totalSize int64
pw.CloseWithError(e)
return
}
- if e = writeFn(part, ra.start, ra.length); e != nil {
+ if e = writeFn(part, ra.start, ra.length, 0); e != nil {
pw.CloseWithError(e)
return
}
diff --git a/weed/server/filer_grpc_server.go b/weed/server/filer_grpc_server.go
index 7f3012e96..04145f2f9 100644
--- a/weed/server/filer_grpc_server.go
+++ b/weed/server/filer_grpc_server.go
@@ -44,7 +44,7 @@ func (fs *FilerServer) LookupDirectoryEntry(ctx context.Context, req *filer_pb.L
}, nil
}
-func (fs *FilerServer) ListEntries(req *filer_pb.ListEntriesRequest, stream filer_pb.SeaweedFiler_ListEntriesServer) error {
+func (fs *FilerServer) ListEntries(req *filer_pb.ListEntriesRequest, stream filer_pb.SeaweedFiler_ListEntriesServer) (err error) {
glog.V(4).Infof("ListEntries %v", req)
@@ -60,23 +60,12 @@ func (fs *FilerServer) ListEntries(req *filer_pb.ListEntriesRequest, stream file
lastFileName := req.StartFromFileName
includeLastFile := req.InclusiveStartFrom
+ var listErr error
for limit > 0 {
- entries, err := fs.filer.ListDirectoryEntries(stream.Context(), util.FullPath(req.Directory), lastFileName, includeLastFile, paginationLimit, req.Prefix)
-
- if err != nil {
- return err
- }
- if len(entries) == 0 {
- return nil
- }
-
- includeLastFile = false
-
- for _, entry := range entries {
-
- lastFileName = entry.Name()
-
- if err := stream.Send(&filer_pb.ListEntriesResponse{
+ var hasEntries bool
+ lastFileName, listErr = fs.filer.StreamListDirectoryEntries(stream.Context(), util.FullPath(req.Directory), lastFileName, includeLastFile, int64(paginationLimit), req.Prefix, "", func(entry *filer.Entry) bool {
+ hasEntries = true
+ if err = stream.Send(&filer_pb.ListEntriesResponse{
Entry: &filer_pb.Entry{
Name: entry.Name(),
IsDirectory: entry.IsDirectory(),
@@ -88,19 +77,28 @@ func (fs *FilerServer) ListEntries(req *filer_pb.ListEntriesRequest, stream file
Content: entry.Content,
},
}); err != nil {
- return err
+ return false
}
limit--
if limit == 0 {
- return nil
+ return false
}
- }
+ return true
+ })
- if len(entries) < paginationLimit {
- break
+ if listErr != nil {
+ return listErr
+ }
+ if err != nil {
+ return err
+ }
+ if !hasEntries {
+ return nil
}
+ includeLastFile = false
+
}
return nil
@@ -327,7 +325,7 @@ func (fs *FilerServer) DeleteEntry(ctx context.Context, req *filer_pb.DeleteEntr
err = fs.filer.DeleteEntryMetaAndData(ctx, util.JoinPath(req.Directory, req.Name), req.IsRecursive, req.IgnoreRecursiveError, req.IsDeleteData, req.IsFromOtherCluster, req.Signatures)
resp = &filer_pb.DeleteEntryResponse{}
- if err != nil {
+ if err != nil && err != filer_pb.ErrNotFound {
resp.Error = err.Error()
}
return resp, nil
diff --git a/weed/server/filer_grpc_server_rename.go b/weed/server/filer_grpc_server_rename.go
index fa86737ac..5b68b64de 100644
--- a/weed/server/filer_grpc_server_rename.go
+++ b/weed/server/filer_grpc_server_rename.go
@@ -75,7 +75,7 @@ func (fs *FilerServer) moveFolderSubEntries(ctx context.Context, oldParent util.
includeLastFile := false
for {
- entries, err := fs.filer.ListDirectoryEntries(ctx, currentDirPath, lastFileName, includeLastFile, 1024, "")
+ entries, hasMore, err := fs.filer.ListDirectoryEntries(ctx, currentDirPath, lastFileName, includeLastFile, 1024, "", "")
if err != nil {
return err
}
@@ -90,7 +90,7 @@ func (fs *FilerServer) moveFolderSubEntries(ctx context.Context, oldParent util.
return err
}
}
- if len(entries) < 1024 {
+ if !hasMore {
break
}
}
diff --git a/weed/server/filer_grpc_server_sub_meta.go b/weed/server/filer_grpc_server_sub_meta.go
index 3b8ced675..d9f91b125 100644
--- a/weed/server/filer_grpc_server_sub_meta.go
+++ b/weed/server/filer_grpc_server_sub_meta.go
@@ -29,16 +29,20 @@ func (fs *FilerServer) SubscribeMetadata(req *filer_pb.SubscribeMetadataRequest,
eachLogEntryFn := eachLogEntryFn(eachEventNotificationFn)
- processedTsNs, err := fs.filer.ReadPersistedLogBuffer(lastReadTime, eachLogEntryFn)
- if err != nil {
- return fmt.Errorf("reading from persisted logs: %v", err)
- }
-
- if processedTsNs != 0 {
- lastReadTime = time.Unix(0, processedTsNs)
- }
+ var processedTsNs int64
+ var err error
for {
+
+ processedTsNs, err = fs.filer.ReadPersistedLogBuffer(lastReadTime, eachLogEntryFn)
+ if err != nil {
+ return fmt.Errorf("reading from persisted logs: %v", err)
+ }
+
+ if processedTsNs != 0 {
+ lastReadTime = time.Unix(0, processedTsNs)
+ }
+
lastReadTime, err = fs.filer.MetaAggregator.MetaLogBuffer.LoopProcessLogData(lastReadTime, func() bool {
fs.filer.MetaAggregator.ListenersLock.Lock()
fs.filer.MetaAggregator.ListenersCond.Wait()
@@ -46,6 +50,9 @@ func (fs *FilerServer) SubscribeMetadata(req *filer_pb.SubscribeMetadataRequest,
return true
}, eachLogEntryFn)
if err != nil {
+ if err == log_buffer.ResumeFromDiskError {
+ continue
+ }
glog.Errorf("processed to %v: %v", lastReadTime, err)
time.Sleep(3127 * time.Millisecond)
if err != log_buffer.ResumeError {
@@ -73,19 +80,23 @@ func (fs *FilerServer) SubscribeLocalMetadata(req *filer_pb.SubscribeMetadataReq
eachLogEntryFn := eachLogEntryFn(eachEventNotificationFn)
- // println("reading from persisted logs ...")
- processedTsNs, err := fs.filer.ReadPersistedLogBuffer(lastReadTime, eachLogEntryFn)
- if err != nil {
- return fmt.Errorf("reading from persisted logs: %v", err)
- }
-
- if processedTsNs != 0 {
- lastReadTime = time.Unix(0, processedTsNs)
- }
- glog.V(0).Infof("after local log reads, %v local subscribe %s from %+v", clientName, req.PathPrefix, lastReadTime)
+ var processedTsNs int64
+ var err error
- // println("reading from in memory logs ...")
for {
+ // println("reading from persisted logs ...")
+ processedTsNs, err = fs.filer.ReadPersistedLogBuffer(lastReadTime, eachLogEntryFn)
+ if err != nil {
+ return fmt.Errorf("reading from persisted logs: %v", err)
+ }
+
+ if processedTsNs != 0 {
+ lastReadTime = time.Unix(0, processedTsNs)
+ }
+ // glog.V(0).Infof("after local log reads, %v local subscribe %s from %+v", clientName, req.PathPrefix, lastReadTime)
+
+ // println("reading from in memory logs ...")
+
lastReadTime, err = fs.filer.LocalMetaLogBuffer.LoopProcessLogData(lastReadTime, func() bool {
fs.listenersLock.Lock()
fs.listenersCond.Wait()
@@ -93,6 +104,9 @@ func (fs *FilerServer) SubscribeLocalMetadata(req *filer_pb.SubscribeMetadataReq
return true
}, eachLogEntryFn)
if err != nil {
+ if err == log_buffer.ResumeFromDiskError {
+ continue
+ }
glog.Errorf("processed to %v: %v", lastReadTime, err)
time.Sleep(3127 * time.Millisecond)
if err != log_buffer.ResumeError {
diff --git a/weed/server/filer_server.go b/weed/server/filer_server.go
index 2991d14ab..22474a5e2 100644
--- a/weed/server/filer_server.go
+++ b/weed/server/filer_server.go
@@ -23,11 +23,15 @@ import (
_ "github.com/chrislusf/seaweedfs/weed/filer/cassandra"
_ "github.com/chrislusf/seaweedfs/weed/filer/elastic/v7"
_ "github.com/chrislusf/seaweedfs/weed/filer/etcd"
+ _ "github.com/chrislusf/seaweedfs/weed/filer/hbase"
_ "github.com/chrislusf/seaweedfs/weed/filer/leveldb"
_ "github.com/chrislusf/seaweedfs/weed/filer/leveldb2"
+ _ "github.com/chrislusf/seaweedfs/weed/filer/leveldb3"
_ "github.com/chrislusf/seaweedfs/weed/filer/mongodb"
_ "github.com/chrislusf/seaweedfs/weed/filer/mysql"
+ _ "github.com/chrislusf/seaweedfs/weed/filer/mysql2"
_ "github.com/chrislusf/seaweedfs/weed/filer/postgres"
+ _ "github.com/chrislusf/seaweedfs/weed/filer/postgres2"
_ "github.com/chrislusf/seaweedfs/weed/filer/redis"
_ "github.com/chrislusf/seaweedfs/weed/filer/redis2"
"github.com/chrislusf/seaweedfs/weed/glog"
@@ -55,7 +59,7 @@ type FilerOption struct {
Port uint32
recursiveDelete bool
Cipher bool
- CacheToFilerLimit int64
+ SaveToFilerLimit int
Filers []string
}
diff --git a/weed/server/filer_server_handlers.go b/weed/server/filer_server_handlers.go
index 451e2a2de..3bc0c5d0d 100644
--- a/weed/server/filer_server_handlers.go
+++ b/weed/server/filer_server_handlers.go
@@ -3,18 +3,33 @@ package weed_server
import (
"github.com/chrislusf/seaweedfs/weed/util"
"net/http"
+ "strings"
"time"
"github.com/chrislusf/seaweedfs/weed/stats"
)
func (fs *FilerServer) filerHandler(w http.ResponseWriter, r *http.Request) {
+
+ start := time.Now()
+
+ // proxy to volume servers
+ var fileId string
+ if strings.HasPrefix(r.RequestURI, "/?proxyChunkId=") {
+ fileId = r.RequestURI[len("/?proxyChunkId="):]
+ }
+ if fileId != "" {
+ stats.FilerRequestCounter.WithLabelValues("proxy").Inc()
+ fs.proxyToVolumeServer(w, r, fileId)
+ stats.FilerRequestHistogram.WithLabelValues("proxy").Observe(time.Since(start).Seconds())
+ return
+ }
+
w.Header().Set("Server", "SeaweedFS Filer "+util.VERSION)
if r.Header.Get("Origin") != "" {
w.Header().Set("Access-Control-Allow-Origin", "*")
w.Header().Set("Access-Control-Allow-Credentials", "true")
}
- start := time.Now()
switch r.Method {
case "GET":
stats.FilerRequestCounter.WithLabelValues("get").Inc()
diff --git a/weed/server/filer_server_handlers_proxy.go b/weed/server/filer_server_handlers_proxy.go
new file mode 100644
index 000000000..8d73bc960
--- /dev/null
+++ b/weed/server/filer_server_handlers_proxy.go
@@ -0,0 +1,66 @@
+package weed_server
+
+import (
+ "github.com/chrislusf/seaweedfs/weed/glog"
+ "github.com/chrislusf/seaweedfs/weed/util"
+ "io"
+ "math/rand"
+ "net/http"
+)
+
+var (
+ client *http.Client
+)
+
+func init() {
+ client = &http.Client{Transport: &http.Transport{
+ MaxIdleConnsPerHost: 1024,
+ }}
+}
+
+func (fs *FilerServer) proxyToVolumeServer(w http.ResponseWriter, r *http.Request, fileId string) {
+
+ urlStrings, err := fs.filer.MasterClient.GetLookupFileIdFunction()(fileId)
+ if err != nil {
+ glog.Errorf("locate %s: %v", fileId, err)
+ w.WriteHeader(http.StatusInternalServerError)
+ return
+ }
+
+ if len(urlStrings) == 0 {
+ w.WriteHeader(http.StatusNotFound)
+ return
+ }
+
+ proxyReq, err := http.NewRequest(r.Method, urlStrings[rand.Intn(len(urlStrings))], r.Body)
+ if err != nil {
+ glog.Errorf("NewRequest %s: %v", urlStrings[0], err)
+ w.WriteHeader(http.StatusInternalServerError)
+ return
+ }
+
+ proxyReq.Header.Set("Host", r.Host)
+ proxyReq.Header.Set("X-Forwarded-For", r.RemoteAddr)
+
+ for header, values := range r.Header {
+ for _, value := range values {
+ proxyReq.Header.Add(header, value)
+ }
+ }
+
+ proxyResponse, postErr := client.Do(proxyReq)
+
+ if postErr != nil {
+ glog.Errorf("post to filer: %v", postErr)
+ w.WriteHeader(http.StatusInternalServerError)
+ return
+ }
+ defer util.CloseResponse(proxyResponse)
+
+ for k, v := range proxyResponse.Header {
+ w.Header()[k] = v
+ }
+ w.WriteHeader(proxyResponse.StatusCode)
+ io.Copy(w, proxyResponse.Body)
+
+}
diff --git a/weed/server/filer_server_handlers_read.go b/weed/server/filer_server_handlers_read.go
index d55bf7cbb..f77462adb 100644
--- a/weed/server/filer_server_handlers_read.go
+++ b/weed/server/filer_server_handlers_read.go
@@ -6,6 +6,7 @@ import (
"io"
"mime"
"net/http"
+ "net/url"
"path/filepath"
"strconv"
"strings"
@@ -99,6 +100,16 @@ func (fs *FilerServer) GetOrHeadHandler(w http.ResponseWriter, r *http.Request,
w.Header().Set(k, string(v))
}
+ //Seaweed custom header are not visible to Vue or javascript
+ seaweedHeaders := []string{}
+ for header, _ := range w.Header() {
+ if strings.HasPrefix(header, "Seaweed-") {
+ seaweedHeaders = append(seaweedHeaders, header)
+ }
+ }
+ seaweedHeaders = append(seaweedHeaders, "Content-Disposition")
+ w.Header().Set("Access-Control-Expose-Headers", strings.Join(seaweedHeaders, ","))
+
//set tag count
if r.Method == "GET" {
tagCount := 0
@@ -121,6 +132,7 @@ func (fs *FilerServer) GetOrHeadHandler(w http.ResponseWriter, r *http.Request,
setEtag(w, etag)
filename := entry.Name()
+ filename = url.QueryEscape(filename)
adjustHeaderContentDisposition(w, r, filename)
totalSize := int64(entry.Size())
@@ -146,7 +158,10 @@ func (fs *FilerServer) GetOrHeadHandler(w http.ResponseWriter, r *http.Request,
}
}
- processRangeRequest(r, w, totalSize, mimeType, func(writer io.Writer, offset int64, size int64) error {
+ processRangeRequest(r, w, totalSize, mimeType, func(writer io.Writer, offset int64, size int64, httpStatusCode int) error {
+ if httpStatusCode != 0 {
+ w.WriteHeader(httpStatusCode)
+ }
if offset+size <= int64(len(entry.Content)) {
_, err := writer.Write(entry.Content[offset : offset+size])
return err
diff --git a/weed/server/filer_server_handlers_read_dir.go b/weed/server/filer_server_handlers_read_dir.go
index 99345550c..9cf79ab41 100644
--- a/weed/server/filer_server_handlers_read_dir.go
+++ b/weed/server/filer_server_handlers_read_dir.go
@@ -34,8 +34,9 @@ func (fs *FilerServer) listDirectoryHandler(w http.ResponseWriter, r *http.Reque
}
lastFileName := r.FormValue("lastFileName")
+ namePattern := r.FormValue("namePattern")
- entries, err := fs.filer.ListDirectoryEntries(context.Background(), util.FullPath(path), lastFileName, false, limit, "")
+ entries, shouldDisplayLoadMore, err := fs.filer.ListDirectoryEntries(context.Background(), util.FullPath(path), lastFileName, false, int64(limit), "", namePattern)
if err != nil {
glog.V(0).Infof("listDirectory %s %s %d: %s", path, lastFileName, limit, err)
@@ -43,7 +44,6 @@ func (fs *FilerServer) listDirectoryHandler(w http.ResponseWriter, r *http.Reque
return
}
- shouldDisplayLoadMore := len(entries) == limit
if path == "/" {
path = ""
}
diff --git a/weed/server/filer_server_handlers_write.go b/weed/server/filer_server_handlers_write.go
index 6af06b90b..28dff07c6 100644
--- a/weed/server/filer_server_handlers_write.go
+++ b/weed/server/filer_server_handlers_write.go
@@ -96,7 +96,7 @@ func (fs *FilerServer) DeleteHandler(w http.ResponseWriter, r *http.Request) {
glog.V(1).Infoln("deleting", objectPath, ":", err.Error())
httpStatus := http.StatusInternalServerError
if err == filer_pb.ErrNotFound {
- httpStatus = http.StatusNotFound
+ httpStatus = http.StatusNoContent
}
writeJsonError(w, r, httpStatus, err)
return
diff --git a/weed/server/filer_server_handlers_write_autochunk.go b/weed/server/filer_server_handlers_write_autochunk.go
index 963f1cd11..145989445 100644
--- a/weed/server/filer_server_handlers_write_autochunk.go
+++ b/weed/server/filer_server_handlers_write_autochunk.go
@@ -96,12 +96,6 @@ func (fs *FilerServer) doPostAutoChunk(ctx context.Context, w http.ResponseWrite
return nil, nil, err
}
- fileChunks, replyerr = filer.MaybeManifestize(fs.saveAsChunk(so), fileChunks)
- if replyerr != nil {
- glog.V(0).Infof("manifestize %s: %v", r.RequestURI, replyerr)
- return
- }
-
md5bytes = md5Hash.Sum(nil)
filerResult, replyerr = fs.saveMetaData(ctx, r, fileName, contentType, so, md5bytes, fileChunks, chunkOffset, smallContent)
@@ -111,25 +105,26 @@ func (fs *FilerServer) doPostAutoChunk(ctx context.Context, w http.ResponseWrite
func (fs *FilerServer) doPutAutoChunk(ctx context.Context, w http.ResponseWriter, r *http.Request, chunkSize int32, so *operation.StorageOption) (filerResult *FilerPostResult, md5bytes []byte, replyerr error) {
fileName := ""
- contentType := ""
+ contentType := r.Header.Get("Content-Type")
+ if contentType == "application/octet-stream" {
+ contentType = ""
+ }
fileChunks, md5Hash, chunkOffset, err, smallContent := fs.uploadReaderToChunks(w, r, r.Body, chunkSize, fileName, contentType, so)
if err != nil {
return nil, nil, err
}
- fileChunks, replyerr = filer.MaybeManifestize(fs.saveAsChunk(so), fileChunks)
- if replyerr != nil {
- glog.V(0).Infof("manifestize %s: %v", r.RequestURI, replyerr)
- return
- }
-
md5bytes = md5Hash.Sum(nil)
filerResult, replyerr = fs.saveMetaData(ctx, r, fileName, contentType, so, md5bytes, fileChunks, chunkOffset, smallContent)
return
}
+func isAppend(r *http.Request) bool {
+ return r.URL.Query().Get("op") == "append"
+}
+
func (fs *FilerServer) saveMetaData(ctx context.Context, r *http.Request, fileName string, contentType string, so *operation.StorageOption, md5bytes []byte, fileChunks []*filer_pb.FileChunk, chunkOffset int64, content []byte) (filerResult *FilerPostResult, replyerr error) {
// detect file mode
@@ -151,30 +146,66 @@ func (fs *FilerServer) saveMetaData(ctx context.Context, r *http.Request, fileNa
}
}
- glog.V(4).Infoln("saving", path)
- entry := &filer.Entry{
- FullPath: util.FullPath(path),
- Attr: filer.Attr{
- Mtime: time.Now(),
- Crtime: time.Now(),
- Mode: os.FileMode(mode),
- Uid: OS_UID,
- Gid: OS_GID,
- Replication: so.Replication,
- Collection: so.Collection,
- TtlSec: so.TtlSeconds,
- DiskType: so.DiskType,
- Mime: contentType,
- Md5: md5bytes,
- FileSize: uint64(chunkOffset),
- },
- Chunks: fileChunks,
- Content: content,
+ var entry *filer.Entry
+ var mergedChunks []*filer_pb.FileChunk
+ // when it is an append
+ if isAppend(r) {
+ existingEntry, findErr := fs.filer.FindEntry(ctx, util.FullPath(path))
+ if findErr != nil && findErr != filer_pb.ErrNotFound {
+ glog.V(0).Infof("failing to find %s: %v", path, findErr)
+ }
+ entry = existingEntry
+ }
+ if entry != nil {
+ entry.Mtime = time.Now()
+ entry.Md5 = nil
+ // adjust chunk offsets
+ for _, chunk := range fileChunks {
+ chunk.Offset += int64(entry.FileSize)
+ }
+ mergedChunks = append(entry.Chunks, fileChunks...)
+ entry.FileSize += uint64(chunkOffset)
+
+ // TODO
+ if len(entry.Content) > 0 {
+ replyerr = fmt.Errorf("append to small file is not supported yet")
+ return
+ }
+
+ } else {
+ glog.V(4).Infoln("saving", path)
+ mergedChunks = fileChunks
+ entry = &filer.Entry{
+ FullPath: util.FullPath(path),
+ Attr: filer.Attr{
+ Mtime: time.Now(),
+ Crtime: time.Now(),
+ Mode: os.FileMode(mode),
+ Uid: OS_UID,
+ Gid: OS_GID,
+ Replication: so.Replication,
+ Collection: so.Collection,
+ TtlSec: so.TtlSeconds,
+ DiskType: so.DiskType,
+ Mime: contentType,
+ Md5: md5bytes,
+ FileSize: uint64(chunkOffset),
+ },
+ Content: content,
+ }
}
+ // maybe compact entry chunks
+ mergedChunks, replyerr = filer.MaybeManifestize(fs.saveAsChunk(so), mergedChunks)
+ if replyerr != nil {
+ glog.V(0).Infof("manifestize %s: %v", r.RequestURI, replyerr)
+ return
+ }
+ entry.Chunks = mergedChunks
+
filerResult = &FilerPostResult{
Name: fileName,
- Size: chunkOffset,
+ Size: int64(entry.FileSize),
}
if entry.Extended == nil {
@@ -190,7 +221,7 @@ func (fs *FilerServer) saveMetaData(ctx context.Context, r *http.Request, fileNa
}
if dbErr := fs.filer.CreateEntry(ctx, entry, false, false, nil); dbErr != nil {
- fs.filer.DeleteChunks(entry.Chunks)
+ fs.filer.DeleteChunks(fileChunks)
replyerr = dbErr
filerResult.Error = dbErr.Error()
glog.V(0).Infof("failing to write %s to filer server : %v", path, dbErr)
@@ -205,23 +236,47 @@ func (fs *FilerServer) uploadReaderToChunks(w http.ResponseWriter, r *http.Reque
var partReader = ioutil.NopCloser(io.TeeReader(reader, md5Hash))
chunkOffset := int64(0)
- var smallContent, content []byte
+ var smallContent []byte
for {
limitedReader := io.LimitReader(partReader, int64(chunkSize))
- // assign one file id for one chunk
- fileId, urlLocation, auth, assignErr := fs.assignNewFileInfo(so)
- if assignErr != nil {
- return nil, nil, 0, assignErr, nil
+ data, err := ioutil.ReadAll(limitedReader)
+ if err != nil {
+ return nil, nil, 0, err, nil
}
+ if chunkOffset == 0 && !isAppend(r) {
+ if len(data) < fs.option.SaveToFilerLimit || strings.HasPrefix(r.URL.Path, filer.DirectoryEtcRoot) && len(data) < 4*1024 {
+ smallContent = data
+ chunkOffset += int64(len(data))
+ break
+ }
+ }
+ dataReader := util.NewBytesReader(data)
+
+ // retry to assign a different file id
+ var fileId, urlLocation string
+ var auth security.EncodedJwt
+ var assignErr, uploadErr error
+ var uploadResult *operation.UploadResult
+ for i := 0; i < 3; i++ {
+ // assign one file id for one chunk
+ fileId, urlLocation, auth, assignErr = fs.assignNewFileInfo(so)
+ if assignErr != nil {
+ return nil, nil, 0, assignErr, nil
+ }
- // upload the chunk to the volume server
- uploadResult, uploadErr, data := fs.doUpload(urlLocation, w, r, limitedReader, fileName, contentType, nil, auth)
+ // upload the chunk to the volume server
+ uploadResult, uploadErr, _ = fs.doUpload(urlLocation, w, r, dataReader, fileName, contentType, nil, auth)
+ if uploadErr != nil {
+ time.Sleep(251 * time.Millisecond)
+ continue
+ }
+ break
+ }
if uploadErr != nil {
return nil, nil, 0, uploadErr, nil
}
- content = data
// if last chunk exhausted the reader exactly at the border
if uploadResult.Size == 0 {
@@ -242,9 +297,6 @@ func (fs *FilerServer) uploadReaderToChunks(w http.ResponseWriter, r *http.Reque
}
}
- if chunkOffset < fs.option.CacheToFilerLimit || strings.HasPrefix(r.URL.Path, filer.DirectoryEtcRoot) && chunkOffset < 4*1024 {
- smallContent = content
- }
return fileChunks, md5Hash, chunkOffset, nil, smallContent
}
diff --git a/weed/server/filer_server_rocksdb.go b/weed/server/filer_server_rocksdb.go
new file mode 100644
index 000000000..5fcc7e88f
--- /dev/null
+++ b/weed/server/filer_server_rocksdb.go
@@ -0,0 +1,7 @@
+// +build rocksdb
+
+package weed_server
+
+import (
+ _ "github.com/chrislusf/seaweedfs/weed/filer/rocksdb"
+)
diff --git a/weed/server/volume_grpc_copy_incremental.go b/weed/server/volume_grpc_copy_incremental.go
index 6d6c3daa3..82b143c3d 100644
--- a/weed/server/volume_grpc_copy_incremental.go
+++ b/weed/server/volume_grpc_copy_incremental.go
@@ -27,7 +27,7 @@ func (vs *VolumeServer) VolumeIncrementalCopy(req *volume_server_pb.VolumeIncrem
return nil
}
- startOffset := foundOffset.ToAcutalOffset()
+ startOffset := foundOffset.ToActualOffset()
buf := make([]byte, 1024*1024*2)
return sendFileContent(v.DataBackend, buf, startOffset, int64(stopOffset), stream)
diff --git a/weed/server/volume_grpc_tail.go b/weed/server/volume_grpc_tail.go
index 2dde5b69c..6c039ebf5 100644
--- a/weed/server/volume_grpc_tail.go
+++ b/weed/server/volume_grpc_tail.go
@@ -72,7 +72,7 @@ func sendNeedlesSince(stream volume_server_pb.VolumeServer_VolumeTailSenderServe
stream: stream,
}
- err = storage.ScanVolumeFileFrom(v.Version(), v.DataBackend, foundOffset.ToAcutalOffset(), scanner)
+ err = storage.ScanVolumeFileFrom(v.Version(), v.DataBackend, foundOffset.ToActualOffset(), scanner)
return scanner.lastProcessedTimestampNs, err
diff --git a/weed/server/volume_server.go b/weed/server/volume_server.go
index 7c731763b..4cdb1628b 100644
--- a/weed/server/volume_server.go
+++ b/weed/server/volume_server.go
@@ -24,7 +24,7 @@ type VolumeServer struct {
guard *security.Guard
grpcDialOption grpc.DialOption
- needleMapKind storage.NeedleMapType
+ needleMapKind storage.NeedleMapKind
FixJpgOrientation bool
ReadRedirect bool
compactionBytePerSecond int64
@@ -39,7 +39,7 @@ func NewVolumeServer(adminMux, publicMux *http.ServeMux, ip string,
port int, publicUrl string,
folders []string, maxCounts []int, minFreeSpacePercents []float32, diskTypes []storage.DiskType,
idxFolder string,
- needleMapKind storage.NeedleMapType,
+ needleMapKind storage.NeedleMapKind,
masterNodes []string, pulseSeconds int,
dataCenter string, rack string,
whiteList []string,
diff --git a/weed/server/volume_server_handlers_read.go b/weed/server/volume_server_handlers_read.go
index 1c963b39c..1cd4ee21d 100644
--- a/weed/server/volume_server_handlers_read.go
+++ b/weed/server/volume_server_handlers_read.go
@@ -261,10 +261,13 @@ func writeResponseContent(filename, mimeType string, rs io.ReadSeeker, w http.Re
return nil
}
- processRangeRequest(r, w, totalSize, mimeType, func(writer io.Writer, offset int64, size int64) error {
+ processRangeRequest(r, w, totalSize, mimeType, func(writer io.Writer, offset int64, size int64, httpStatusCode int) error {
if _, e = rs.Seek(offset, 0); e != nil {
return e
}
+ if httpStatusCode != 0 {
+ w.WriteHeader(httpStatusCode)
+ }
_, e = io.CopyN(writer, rs, size)
return e
})
diff --git a/weed/server/webdav_server.go b/weed/server/webdav_server.go
index 6694086af..4b57c7afe 100644
--- a/weed/server/webdav_server.go
+++ b/weed/server/webdav_server.go
@@ -106,7 +106,11 @@ type WebDavFile struct {
func NewWebDavFileSystem(option *WebDavOption) (webdav.FileSystem, error) {
- chunkCache := chunk_cache.NewTieredChunkCache(256, option.CacheDir, option.CacheSizeMB, 1024*1024)
+ cacheUniqueId := util.Md5String([]byte("webdav" + option.FilerGrpcAddress + util.Version()))[0:8]
+ cacheDir := path.Join(option.CacheDir, cacheUniqueId)
+
+ os.MkdirAll(cacheDir, os.FileMode(0755))
+ chunkCache := chunk_cache.NewTieredChunkCache(256, cacheDir, option.CacheSizeMB, 1024*1024)
return &WebDavFileSystem{
option: option,
chunkCache: chunkCache,
@@ -525,7 +529,7 @@ func (f *WebDavFile) Read(p []byte) (readSize int, err error) {
}
if f.reader == nil {
chunkViews := filer.ViewFromVisibleIntervals(f.entryViewCache, 0, math.MaxInt64)
- f.reader = filer.NewChunkReaderAtFromClient(f.fs, chunkViews, f.fs.chunkCache, fileSize)
+ f.reader = filer.NewChunkReaderAtFromClient(filer.LookupFn(f.fs), chunkViews, f.fs.chunkCache, fileSize)
}
readSize, err = f.reader.ReadAt(p, f.off)
diff --git a/weed/shell/command_s3_configure.go b/weed/shell/command_s3_configure.go
index 869949a25..ca51ef72f 100644
--- a/weed/shell/command_s3_configure.go
+++ b/weed/shell/command_s3_configure.go
@@ -25,7 +25,7 @@ func (c *commandS3Configure) Name() string {
}
func (c *commandS3Configure) Help() string {
- return ` configure and apply s3 options for each bucket
+ return `configure and apply s3 options for each bucket
# see the current configuration file content
s3.configure
diff --git a/weed/shell/command_volume_fix_replication.go b/weed/shell/command_volume_fix_replication.go
index 9b9abd8eb..8ae8850f3 100644
--- a/weed/shell/command_volume_fix_replication.go
+++ b/weed/shell/command_volume_fix_replication.go
@@ -6,6 +6,7 @@ import (
"fmt"
"github.com/chrislusf/seaweedfs/weed/storage/needle"
"io"
+ "path/filepath"
"sort"
"github.com/chrislusf/seaweedfs/weed/operation"
@@ -19,6 +20,7 @@ func init() {
}
type commandVolumeFixReplication struct {
+ collectionPattern *string
}
func (c *commandVolumeFixReplication) Name() string {
@@ -33,12 +35,13 @@ func (c *commandVolumeFixReplication) Help() string {
This command also finds all under-replicated volumes, and finds volume servers with free slots.
If the free slots satisfy the replication requirement, the volume content is copied over and mounted.
- volume.fix.replication -n # do not take action
- volume.fix.replication # actually deleting or copying the volume files and mount the volume
+ volume.fix.replication -n # do not take action
+ volume.fix.replication # actually deleting or copying the volume files and mount the volume
+ volume.fix.replication -collectionPattern=important* # fix any collections with prefix "important"
Note:
- * each time this will only add back one replica for one volume id. If there are multiple replicas
- are missing, e.g. multiple volume servers are new, you may need to run this multiple times.
+ * each time this will only add back one replica for each volume id that is under replicated.
+ If there are multiple replicas are missing, e.g. replica count is > 2, you may need to run this multiple times.
* do not run this too quickly within seconds, since the new volume replica may take a few seconds
to register itself to the master.
@@ -52,6 +55,7 @@ func (c *commandVolumeFixReplication) Do(args []string, commandEnv *CommandEnv,
}
volFixReplicationCommand := flag.NewFlagSet(c.Name(), flag.ContinueOnError)
+ c.collectionPattern = volFixReplicationCommand.String("collectionPattern", "", "match with wildcard characters '*' and '?'")
skipChange := volFixReplicationCommand.Bool("n", false, "skip the changes")
if err = volFixReplicationCommand.Parse(args); err != nil {
return nil
@@ -127,6 +131,17 @@ func (c *commandVolumeFixReplication) fixOverReplicatedVolumes(commandEnv *Comma
replica := pickOneReplicaToDelete(replicas, replicaPlacement)
+ // check collection name pattern
+ if *c.collectionPattern != "" {
+ matched, err := filepath.Match(*c.collectionPattern, replica.info.Collection)
+ if err != nil {
+ return fmt.Errorf("match pattern %s with collection %s: %v", *c.collectionPattern, replica.info.Collection, err)
+ }
+ if !matched {
+ break
+ }
+ }
+
fmt.Fprintf(writer, "deleting volume %d from %s ...\n", replica.info.Id, replica.location.dataNode.Id)
if !takeAction {
@@ -147,9 +162,22 @@ func (c *commandVolumeFixReplication) fixUnderReplicatedVolumes(commandEnv *Comm
replica := pickOneReplicaToCopyFrom(replicas)
replicaPlacement, _ := super_block.NewReplicaPlacementFromByte(byte(replica.info.ReplicaPlacement))
foundNewLocation := false
+ hasSkippedCollection := false
for _, dst := range allLocations {
// check whether data nodes satisfy the constraints
if dst.dataNode.FreeVolumeCount > 0 && satisfyReplicaPlacement(replicaPlacement, replicas, dst) {
+ // check collection name pattern
+ if *c.collectionPattern != "" {
+ matched, err := filepath.Match(*c.collectionPattern, replica.info.Collection)
+ if err != nil {
+ return fmt.Errorf("match pattern %s with collection %s: %v", *c.collectionPattern, replica.info.Collection, err)
+ }
+ if !matched {
+ hasSkippedCollection = true
+ break
+ }
+ }
+
// ask the volume server to replicate the volume
foundNewLocation = true
fmt.Fprintf(writer, "replicating volume %d %s from %s to dataNode %s ...\n", replica.info.Id, replicaPlacement, replica.location.dataNode.Id, dst.dataNode.Id)
@@ -179,7 +207,7 @@ func (c *commandVolumeFixReplication) fixUnderReplicatedVolumes(commandEnv *Comm
break
}
}
- if !foundNewLocation {
+ if !foundNewLocation && !hasSkippedCollection {
fmt.Fprintf(writer, "failed to place volume %d replica as %s, existing:%+v\n", replica.info.Id, replicaPlacement, len(replicas))
}
diff --git a/weed/storage/backend/backend.go b/weed/storage/backend/backend.go
index daab29621..b8b883be6 100644
--- a/weed/storage/backend/backend.go
+++ b/weed/storage/backend/backend.go
@@ -1,6 +1,7 @@
package backend
import (
+ "github.com/chrislusf/seaweedfs/weed/util"
"io"
"os"
"strings"
@@ -9,7 +10,6 @@ import (
"github.com/chrislusf/seaweedfs/weed/glog"
"github.com/chrislusf/seaweedfs/weed/pb/master_pb"
"github.com/chrislusf/seaweedfs/weed/pb/volume_server_pb"
- "github.com/spf13/viper"
)
type BackendStorageFile interface {
@@ -45,7 +45,7 @@ var (
)
// used by master to load remote storage configurations
-func LoadConfiguration(config *viper.Viper) {
+func LoadConfiguration(config *util.ViperProxy) {
StorageBackendPrefix := "storage.backend"
diff --git a/weed/storage/disk_location.go b/weed/storage/disk_location.go
index ce42232a7..a7be3a559 100644
--- a/weed/storage/disk_location.go
+++ b/weed/storage/disk_location.go
@@ -84,7 +84,7 @@ func getValidVolumeName(basename string) string {
return ""
}
-func (l *DiskLocation) loadExistingVolume(fileInfo os.FileInfo, needleMapKind NeedleMapType) bool {
+func (l *DiskLocation) loadExistingVolume(fileInfo os.FileInfo, needleMapKind NeedleMapKind) bool {
basename := fileInfo.Name()
if fileInfo.IsDir() {
return false
@@ -135,7 +135,7 @@ func (l *DiskLocation) loadExistingVolume(fileInfo os.FileInfo, needleMapKind Ne
return true
}
-func (l *DiskLocation) concurrentLoadingVolumes(needleMapKind NeedleMapType, concurrency int) {
+func (l *DiskLocation) concurrentLoadingVolumes(needleMapKind NeedleMapKind, concurrency int) {
task_queue := make(chan os.FileInfo, 10*concurrency)
go func() {
@@ -169,7 +169,7 @@ func (l *DiskLocation) concurrentLoadingVolumes(needleMapKind NeedleMapType, con
}
-func (l *DiskLocation) loadExistingVolumes(needleMapKind NeedleMapType) {
+func (l *DiskLocation) loadExistingVolumes(needleMapKind NeedleMapKind) {
l.concurrentLoadingVolumes(needleMapKind, 10)
glog.V(0).Infof("Store started on dir: %s with %d volumes max %d", l.Directory, len(l.volumes), l.MaxVolumeCount)
@@ -239,7 +239,7 @@ func (l *DiskLocation) deleteVolumeById(vid needle.VolumeId) (found bool, e erro
return
}
-func (l *DiskLocation) LoadVolume(vid needle.VolumeId, needleMapKind NeedleMapType) bool {
+func (l *DiskLocation) LoadVolume(vid needle.VolumeId, needleMapKind NeedleMapKind) bool {
if fileInfo, found := l.LocateVolume(vid); found {
return l.loadExistingVolume(fileInfo, needleMapKind)
}
diff --git a/weed/storage/erasure_coding/ec_decoder.go b/weed/storage/erasure_coding/ec_decoder.go
index bc86d9c04..47d3c6550 100644
--- a/weed/storage/erasure_coding/ec_decoder.go
+++ b/weed/storage/erasure_coding/ec_decoder.go
@@ -58,7 +58,7 @@ func FindDatFileSize(dataBaseFileName, indexBaseFileName string) (datSize int64,
return nil
}
- entryStopOffset := offset.ToAcutalOffset() + needle.GetActualSize(size, version)
+ entryStopOffset := offset.ToActualOffset() + needle.GetActualSize(size, version)
if datSize < entryStopOffset {
datSize = entryStopOffset
}
diff --git a/weed/storage/erasure_coding/ec_test.go b/weed/storage/erasure_coding/ec_test.go
index 63cc2c352..0d48bec02 100644
--- a/weed/storage/erasure_coding/ec_test.go
+++ b/weed/storage/erasure_coding/ec_test.go
@@ -93,7 +93,7 @@ func assertSame(datFile *os.File, datSize int64, ecFiles []*os.File, offset type
func readDatFile(datFile *os.File, offset types.Offset, size types.Size) ([]byte, error) {
data := make([]byte, size)
- n, err := datFile.ReadAt(data, offset.ToAcutalOffset())
+ n, err := datFile.ReadAt(data, offset.ToActualOffset())
if err != nil {
return nil, fmt.Errorf("failed to ReadAt dat file: %v", err)
}
@@ -105,7 +105,7 @@ func readDatFile(datFile *os.File, offset types.Offset, size types.Size) ([]byte
func readEcFile(datSize int64, ecFiles []*os.File, offset types.Offset, size types.Size) (data []byte, err error) {
- intervals := LocateData(largeBlockSize, smallBlockSize, datSize, offset.ToAcutalOffset(), size)
+ intervals := LocateData(largeBlockSize, smallBlockSize, datSize, offset.ToActualOffset(), size)
for i, interval := range intervals {
if d, e := readOneInterval(interval, ecFiles); e != nil {
diff --git a/weed/storage/erasure_coding/ec_volume.go b/weed/storage/erasure_coding/ec_volume.go
index 2183e43d6..a9d08ed0e 100644
--- a/weed/storage/erasure_coding/ec_volume.go
+++ b/weed/storage/erasure_coding/ec_volume.go
@@ -211,7 +211,7 @@ func (ev *EcVolume) LocateEcShardNeedle(needleId types.NeedleId, version needle.
shard := ev.Shards[0]
// calculate the locations in the ec shards
- intervals = LocateData(ErasureCodingLargeBlockSize, ErasureCodingSmallBlockSize, DataShardsCount*shard.ecdFileSize, offset.ToAcutalOffset(), types.Size(needle.GetActualSize(size, version)))
+ intervals = LocateData(ErasureCodingLargeBlockSize, ErasureCodingSmallBlockSize, DataShardsCount*shard.ecdFileSize, offset.ToActualOffset(), types.Size(needle.GetActualSize(size, version)))
return
}
diff --git a/weed/storage/erasure_coding/ec_volume_test.go b/weed/storage/erasure_coding/ec_volume_test.go
index fe45bf722..747ef4aab 100644
--- a/weed/storage/erasure_coding/ec_volume_test.go
+++ b/weed/storage/erasure_coding/ec_volume_test.go
@@ -35,16 +35,16 @@ func TestPositioning(t *testing.T) {
needleId, _ := types.ParseNeedleId(test.needleId)
offset, size, err := SearchNeedleFromSortedIndex(ecxFile, fileSize, needleId, nil)
assert.Equal(t, nil, err, "SearchNeedleFromSortedIndex")
- fmt.Printf("offset: %d size: %d\n", offset.ToAcutalOffset(), size)
+ fmt.Printf("offset: %d size: %d\n", offset.ToActualOffset(), size)
}
needleId, _ := types.ParseNeedleId("0f087622")
offset, size, err := SearchNeedleFromSortedIndex(ecxFile, fileSize, needleId, nil)
assert.Equal(t, nil, err, "SearchNeedleFromSortedIndex")
- fmt.Printf("offset: %d size: %d\n", offset.ToAcutalOffset(), size)
+ fmt.Printf("offset: %d size: %d\n", offset.ToActualOffset(), size)
var shardEcdFileSize int64 = 1118830592 // 1024*1024*1024*3
- intervals := LocateData(ErasureCodingLargeBlockSize, ErasureCodingSmallBlockSize, DataShardsCount*shardEcdFileSize, offset.ToAcutalOffset(), types.Size(needle.GetActualSize(size, needle.CurrentVersion)))
+ intervals := LocateData(ErasureCodingLargeBlockSize, ErasureCodingSmallBlockSize, DataShardsCount*shardEcdFileSize, offset.ToActualOffset(), types.Size(needle.GetActualSize(size, needle.CurrentVersion)))
for _, interval := range intervals {
shardId, shardOffset := interval.ToShardIdAndOffset(ErasureCodingLargeBlockSize, ErasureCodingSmallBlockSize)
diff --git a/weed/storage/needle/needle_parse_upload.go b/weed/storage/needle/needle_parse_upload.go
index 8f457be1d..7201503f1 100644
--- a/weed/storage/needle/needle_parse_upload.go
+++ b/weed/storage/needle/needle_parse_upload.go
@@ -193,9 +193,9 @@ func parseMultipart(r *http.Request, sizeLimit int64, pu *ParsedUpload) (e error
mtype = contentType
}
- pu.IsGzipped = part.Header.Get("Content-Encoding") == "gzip"
- // pu.IsZstd = part.Header.Get("Content-Encoding") == "zstd"
}
+ pu.IsGzipped = part.Header.Get("Content-Encoding") == "gzip"
+ // pu.IsZstd = part.Header.Get("Content-Encoding") == "zstd"
return
}
diff --git a/weed/storage/needle_map.go b/weed/storage/needle_map.go
index 9f331267d..5b41286ea 100644
--- a/weed/storage/needle_map.go
+++ b/weed/storage/needle_map.go
@@ -11,10 +11,10 @@ import (
. "github.com/chrislusf/seaweedfs/weed/storage/types"
)
-type NeedleMapType int
+type NeedleMapKind int
const (
- NeedleMapInMemory NeedleMapType = iota
+ NeedleMapInMemory NeedleMapKind = iota
NeedleMapLevelDb // small memory footprint, 4MB total, 1 write buffer, 3 block buffer
NeedleMapLevelDbMedium // medium memory footprint, 8MB total, 3 write buffer, 5 block buffer
NeedleMapLevelDbLarge // large memory footprint, 12MB total, 4write buffer, 8 block buffer
diff --git a/weed/storage/store.go b/weed/storage/store.go
index 470ce0c18..482e8998c 100644
--- a/weed/storage/store.go
+++ b/weed/storage/store.go
@@ -40,7 +40,7 @@ type Store struct {
dataCenter string // optional informaton, overwriting master setting if exists
rack string // optional information, overwriting master setting if exists
connected bool
- NeedleMapType NeedleMapType
+ NeedleMapKind NeedleMapKind
NewVolumesChan chan master_pb.VolumeShortInformationMessage
DeletedVolumesChan chan master_pb.VolumeShortInformationMessage
NewEcShardsChan chan master_pb.VolumeEcShardInformationMessage
@@ -52,8 +52,8 @@ func (s *Store) String() (str string) {
return
}
-func NewStore(grpcDialOption grpc.DialOption, port int, ip, publicUrl string, dirnames []string, maxVolumeCounts []int, minFreeSpacePercents []float32, idxFolder string, needleMapKind NeedleMapType, diskTypes []DiskType) (s *Store) {
- s = &Store{grpcDialOption: grpcDialOption, Port: port, Ip: ip, PublicUrl: publicUrl, NeedleMapType: needleMapKind}
+func NewStore(grpcDialOption grpc.DialOption, port int, ip, publicUrl string, dirnames []string, maxVolumeCounts []int, minFreeSpacePercents []float32, idxFolder string, needleMapKind NeedleMapKind, diskTypes []DiskType) (s *Store) {
+ s = &Store{grpcDialOption: grpcDialOption, Port: port, Ip: ip, PublicUrl: publicUrl, NeedleMapKind: needleMapKind}
s.Locations = make([]*DiskLocation, 0)
for i := 0; i < len(dirnames); i++ {
location := NewDiskLocation(dirnames[i], maxVolumeCounts[i], minFreeSpacePercents[i], idxFolder, diskTypes[i])
@@ -69,7 +69,7 @@ func NewStore(grpcDialOption grpc.DialOption, port int, ip, publicUrl string, di
return
}
-func (s *Store) AddVolume(volumeId needle.VolumeId, collection string, needleMapKind NeedleMapType, replicaPlacement string, ttlString string, preallocate int64, MemoryMapMaxSizeMb uint32, diskType DiskType) error {
+func (s *Store) AddVolume(volumeId needle.VolumeId, collection string, needleMapKind NeedleMapKind, replicaPlacement string, ttlString string, preallocate int64, MemoryMapMaxSizeMb uint32, diskType DiskType) error {
rt, e := super_block.NewReplicaPlacementFromString(replicaPlacement)
if e != nil {
return e
@@ -117,7 +117,7 @@ func (s *Store) FindFreeLocation(diskType DiskType) (ret *DiskLocation) {
}
return ret
}
-func (s *Store) addVolume(vid needle.VolumeId, collection string, needleMapKind NeedleMapType, replicaPlacement *super_block.ReplicaPlacement, ttl *needle.TTL, preallocate int64, memoryMapMaxSizeMb uint32, diskType DiskType) error {
+func (s *Store) addVolume(vid needle.VolumeId, collection string, needleMapKind NeedleMapKind, replicaPlacement *super_block.ReplicaPlacement, ttl *needle.TTL, preallocate int64, memoryMapMaxSizeMb uint32, diskType DiskType) error {
if s.findVolume(vid) != nil {
return fmt.Errorf("Volume Id %d already exists!", vid)
}
@@ -373,7 +373,7 @@ func (s *Store) MarkVolumeWritable(i needle.VolumeId) error {
func (s *Store) MountVolume(i needle.VolumeId) error {
for _, location := range s.Locations {
- if found := location.LoadVolume(i, s.NeedleMapType); found == true {
+ if found := location.LoadVolume(i, s.NeedleMapKind); found == true {
glog.V(0).Infof("mount volume %d", i)
v := s.findVolume(i)
s.NewVolumesChan <- master_pb.VolumeShortInformationMessage{
diff --git a/weed/storage/store_ec.go b/weed/storage/store_ec.go
index 853757ce3..ab4e96634 100644
--- a/weed/storage/store_ec.go
+++ b/weed/storage/store_ec.go
@@ -131,7 +131,7 @@ func (s *Store) ReadEcShardNeedle(vid needle.VolumeId, n *needle.Needle) (int, e
return 0, ErrorDeleted
}
- glog.V(3).Infof("read ec volume %d offset %d size %d intervals:%+v", vid, offset.ToAcutalOffset(), size, intervals)
+ glog.V(3).Infof("read ec volume %d offset %d size %d intervals:%+v", vid, offset.ToActualOffset(), size, intervals)
if len(intervals) > 1 {
glog.V(3).Infof("ReadEcShardNeedle needle id %s intervals:%+v", n.String(), intervals)
@@ -144,7 +144,7 @@ func (s *Store) ReadEcShardNeedle(vid needle.VolumeId, n *needle.Needle) (int, e
return 0, ErrorDeleted
}
- err = n.ReadBytes(bytes, offset.ToAcutalOffset(), size, localEcVolume.Version)
+ err = n.ReadBytes(bytes, offset.ToActualOffset(), size, localEcVolume.Version)
if err != nil {
return 0, fmt.Errorf("readbytes: %v", err)
}
diff --git a/weed/storage/types/offset_4bytes.go b/weed/storage/types/offset_4bytes.go
index d53147e21..5348d5b36 100644
--- a/weed/storage/types/offset_4bytes.go
+++ b/weed/storage/types/offset_4bytes.go
@@ -54,7 +54,7 @@ func ToOffset(offset int64) Offset {
return Uint32ToOffset(smaller)
}
-func (offset Offset) ToAcutalOffset() (actualOffset int64) {
+func (offset Offset) ToActualOffset() (actualOffset int64) {
return (int64(offset.b0) + int64(offset.b1)<<8 + int64(offset.b2)<<16 + int64(offset.b3)<<24) * int64(NeedlePaddingSize)
}
diff --git a/weed/storage/types/offset_5bytes.go b/weed/storage/types/offset_5bytes.go
index 05c6d2f39..b6181fc11 100644
--- a/weed/storage/types/offset_5bytes.go
+++ b/weed/storage/types/offset_5bytes.go
@@ -71,7 +71,7 @@ func ToOffset(offset int64) Offset {
}
}
-func (offset Offset) ToAcutalOffset() (actualOffset int64) {
+func (offset Offset) ToActualOffset() (actualOffset int64) {
return (int64(offset.b0) + int64(offset.b1)<<8 + int64(offset.b2)<<16 + int64(offset.b3)<<24 + int64(offset.b4)<<32) * int64(NeedlePaddingSize)
}
diff --git a/weed/storage/volume.go b/weed/storage/volume.go
index 1905a85a5..afa0fbf28 100644
--- a/weed/storage/volume.go
+++ b/weed/storage/volume.go
@@ -25,7 +25,7 @@ type Volume struct {
Collection string
DataBackend backend.BackendStorageFile
nm NeedleMapper
- needleMapKind NeedleMapType
+ needleMapKind NeedleMapKind
noWriteOrDelete bool // if readonly, either noWriteOrDelete or noWriteCanDelete
noWriteCanDelete bool // if readonly, either noWriteOrDelete or noWriteCanDelete
noWriteLock sync.RWMutex
@@ -50,7 +50,7 @@ type Volume struct {
lastIoError error
}
-func NewVolume(dirname string, dirIdx string, collection string, id needle.VolumeId, needleMapKind NeedleMapType, replicaPlacement *super_block.ReplicaPlacement, ttl *needle.TTL, preallocate int64, memoryMapMaxSizeMb uint32) (v *Volume, e error) {
+func NewVolume(dirname string, dirIdx string, collection string, id needle.VolumeId, needleMapKind NeedleMapKind, replicaPlacement *super_block.ReplicaPlacement, ttl *needle.TTL, preallocate int64, memoryMapMaxSizeMb uint32) (v *Volume, e error) {
// if replicaPlacement is nil, the superblock will be loaded from disk
v = &Volume{dir: dirname, dirIdx: dirIdx, Collection: collection, Id: id, MemoryMapMaxSizeMb: memoryMapMaxSizeMb,
asyncRequestsChan: make(chan *needle.AsyncRequest, 128)}
diff --git a/weed/storage/volume_backup.go b/weed/storage/volume_backup.go
index 9aeb10f69..82ea12a89 100644
--- a/weed/storage/volume_backup.go
+++ b/weed/storage/volume_backup.go
@@ -154,13 +154,13 @@ func (v *Volume) locateLastAppendEntry() (Offset, error) {
func (v *Volume) readAppendAtNs(offset Offset) (uint64, error) {
- n, _, bodyLength, err := needle.ReadNeedleHeader(v.DataBackend, v.SuperBlock.Version, offset.ToAcutalOffset())
+ n, _, bodyLength, err := needle.ReadNeedleHeader(v.DataBackend, v.SuperBlock.Version, offset.ToActualOffset())
if err != nil {
- return 0, fmt.Errorf("ReadNeedleHeader %s [%d,%d): %v", v.DataBackend.Name(), offset.ToAcutalOffset(), offset.ToAcutalOffset()+NeedleHeaderSize, err)
+ return 0, fmt.Errorf("ReadNeedleHeader %s [%d,%d): %v", v.DataBackend.Name(), offset.ToActualOffset(), offset.ToActualOffset()+NeedleHeaderSize, err)
}
- _, err = n.ReadNeedleBody(v.DataBackend, v.SuperBlock.Version, offset.ToAcutalOffset()+NeedleHeaderSize, bodyLength)
+ _, err = n.ReadNeedleBody(v.DataBackend, v.SuperBlock.Version, offset.ToActualOffset()+NeedleHeaderSize, bodyLength)
if err != nil {
- return 0, fmt.Errorf("ReadNeedleBody offset %d, bodyLength %d: %v", offset.ToAcutalOffset(), bodyLength, err)
+ return 0, fmt.Errorf("ReadNeedleBody offset %d, bodyLength %d: %v", offset.ToActualOffset(), bodyLength, err)
}
return n.AppendAtNs, nil
diff --git a/weed/storage/volume_checking.go b/weed/storage/volume_checking.go
index 00e04047f..8d63c39c1 100644
--- a/weed/storage/volume_checking.go
+++ b/weed/storage/volume_checking.go
@@ -58,7 +58,7 @@ func doCheckAndFixVolumeData(v *Volume, indexFile *os.File, indexOffset int64) (
return lastAppendAtNs, fmt.Errorf("verifyNeedleIntegrity %s failed: %v", indexFile.Name(), err)
}
} else {
- if lastAppendAtNs, err = verifyNeedleIntegrity(v.DataBackend, v.Version(), offset.ToAcutalOffset(), key, size); err != nil {
+ if lastAppendAtNs, err = verifyNeedleIntegrity(v.DataBackend, v.Version(), offset.ToActualOffset(), key, size); err != nil {
return lastAppendAtNs, err
}
}
diff --git a/weed/storage/volume_loading.go b/weed/storage/volume_loading.go
index a6efc630d..52a50a98c 100644
--- a/weed/storage/volume_loading.go
+++ b/weed/storage/volume_loading.go
@@ -14,7 +14,7 @@ import (
"github.com/chrislusf/seaweedfs/weed/util"
)
-func loadVolumeWithoutIndex(dirname string, collection string, id needle.VolumeId, needleMapKind NeedleMapType) (v *Volume, err error) {
+func loadVolumeWithoutIndex(dirname string, collection string, id needle.VolumeId, needleMapKind NeedleMapKind) (v *Volume, err error) {
v = &Volume{dir: dirname, Collection: collection, Id: id}
v.SuperBlock = super_block.SuperBlock{}
v.needleMapKind = needleMapKind
@@ -22,7 +22,7 @@ func loadVolumeWithoutIndex(dirname string, collection string, id needle.VolumeI
return
}
-func (v *Volume) load(alsoLoadIndex bool, createDatIfMissing bool, needleMapKind NeedleMapType, preallocate int64) (err error) {
+func (v *Volume) load(alsoLoadIndex bool, createDatIfMissing bool, needleMapKind NeedleMapKind, preallocate int64) (err error) {
alreadyHasSuperBlock := false
hasLoadedVolume := false
diff --git a/weed/storage/volume_read_write.go b/weed/storage/volume_read_write.go
index f28ee50e6..07376bc88 100644
--- a/weed/storage/volume_read_write.go
+++ b/weed/storage/volume_read_write.go
@@ -41,9 +41,9 @@ func (v *Volume) isFileUnchanged(n *needle.Needle) bool {
nv, ok := v.nm.Get(n.Id)
if ok && !nv.Offset.IsZero() && nv.Size.IsValid() {
oldNeedle := new(needle.Needle)
- err := oldNeedle.ReadData(v.DataBackend, nv.Offset.ToAcutalOffset(), nv.Size, v.Version())
+ err := oldNeedle.ReadData(v.DataBackend, nv.Offset.ToActualOffset(), nv.Size, v.Version())
if err != nil {
- glog.V(0).Infof("Failed to check updated file at offset %d size %d: %v", nv.Offset.ToAcutalOffset(), nv.Size, err)
+ glog.V(0).Infof("Failed to check updated file at offset %d size %d: %v", nv.Offset.ToActualOffset(), nv.Size, err)
return false
}
if oldNeedle.Cookie == n.Cookie && oldNeedle.Checksum == n.Checksum && bytes.Equal(oldNeedle.Data, n.Data) {
@@ -113,7 +113,7 @@ func (v *Volume) syncWrite(n *needle.Needle) (offset uint64, size Size, isUnchan
// check whether existing needle cookie matches
nv, ok := v.nm.Get(n.Id)
if ok {
- existingNeedle, _, _, existingNeedleReadErr := needle.ReadNeedleHeader(v.DataBackend, v.Version(), nv.Offset.ToAcutalOffset())
+ existingNeedle, _, _, existingNeedleReadErr := needle.ReadNeedleHeader(v.DataBackend, v.Version(), nv.Offset.ToActualOffset())
if existingNeedleReadErr != nil {
err = fmt.Errorf("reading existing needle: %v", existingNeedleReadErr)
return
@@ -136,7 +136,7 @@ func (v *Volume) syncWrite(n *needle.Needle) (offset uint64, size Size, isUnchan
v.lastAppendAtNs = n.AppendAtNs
// add to needle map
- if !ok || uint64(nv.Offset.ToAcutalOffset()) < offset {
+ if !ok || uint64(nv.Offset.ToActualOffset()) < offset {
if err = v.nm.Put(n.Id, ToOffset(int64(offset)), n.Size); err != nil {
glog.V(4).Infof("failed to save in needle map %d: %v", n.Id, err)
}
@@ -179,7 +179,7 @@ func (v *Volume) doWriteRequest(n *needle.Needle) (offset uint64, size Size, isU
// check whether existing needle cookie matches
nv, ok := v.nm.Get(n.Id)
if ok {
- existingNeedle, _, _, existingNeedleReadErr := needle.ReadNeedleHeader(v.DataBackend, v.Version(), nv.Offset.ToAcutalOffset())
+ existingNeedle, _, _, existingNeedleReadErr := needle.ReadNeedleHeader(v.DataBackend, v.Version(), nv.Offset.ToActualOffset())
if existingNeedleReadErr != nil {
err = fmt.Errorf("reading existing needle: %v", existingNeedleReadErr)
return
@@ -201,7 +201,7 @@ func (v *Volume) doWriteRequest(n *needle.Needle) (offset uint64, size Size, isU
v.lastAppendAtNs = n.AppendAtNs
// add to needle map
- if !ok || uint64(nv.Offset.ToAcutalOffset()) < offset {
+ if !ok || uint64(nv.Offset.ToActualOffset()) < offset {
if err = v.nm.Put(n.Id, ToOffset(int64(offset)), n.Size); err != nil {
glog.V(4).Infof("failed to save in needle map %d: %v", n.Id, err)
}
@@ -303,9 +303,9 @@ func (v *Volume) readNeedle(n *needle.Needle, readOption *ReadOption) (int, erro
if readSize == 0 {
return 0, nil
}
- err := n.ReadData(v.DataBackend, nv.Offset.ToAcutalOffset(), readSize, v.Version())
+ err := n.ReadData(v.DataBackend, nv.Offset.ToActualOffset(), readSize, v.Version())
if err == needle.ErrorSizeMismatch && OffsetSize == 4 {
- err = n.ReadData(v.DataBackend, nv.Offset.ToAcutalOffset()+int64(MaxPossibleVolumeSize), readSize, v.Version())
+ err = n.ReadData(v.DataBackend, nv.Offset.ToActualOffset()+int64(MaxPossibleVolumeSize), readSize, v.Version())
}
v.checkReadWriteError(err)
if err != nil {
@@ -410,7 +410,7 @@ type VolumeFileScanner interface {
}
func ScanVolumeFile(dirname string, collection string, id needle.VolumeId,
- needleMapKind NeedleMapType,
+ needleMapKind NeedleMapKind,
volumeFileScanner VolumeFileScanner) (err error) {
var v *Volume
if v, err = loadVolumeWithoutIndex(dirname, collection, id, needleMapKind); err != nil {
diff --git a/weed/storage/volume_vacuum.go b/weed/storage/volume_vacuum.go
index 5884eca87..c17c9c937 100644
--- a/weed/storage/volume_vacuum.go
+++ b/weed/storage/volume_vacuum.go
@@ -280,11 +280,11 @@ func (v *Volume) makeupDiff(newDatFileName, newIdxFileName, oldDatFileName, oldI
//updated needle
if !increIdxEntry.offset.IsZero() && increIdxEntry.size != 0 && increIdxEntry.size.IsValid() {
//even the needle cache in memory is hit, the need_bytes is correct
- glog.V(4).Infof("file %d offset %d size %d", key, increIdxEntry.offset.ToAcutalOffset(), increIdxEntry.size)
+ glog.V(4).Infof("file %d offset %d size %d", key, increIdxEntry.offset.ToActualOffset(), increIdxEntry.size)
var needleBytes []byte
- needleBytes, err = needle.ReadNeedleBlob(oldDatBackend, increIdxEntry.offset.ToAcutalOffset(), increIdxEntry.size, v.Version())
+ needleBytes, err = needle.ReadNeedleBlob(oldDatBackend, increIdxEntry.offset.ToActualOffset(), increIdxEntry.size, v.Version())
if err != nil {
- return fmt.Errorf("ReadNeedleBlob %s key %d offset %d size %d failed: %v", oldDatFile.Name(), key, increIdxEntry.offset.ToAcutalOffset(), increIdxEntry.size, err)
+ return fmt.Errorf("ReadNeedleBlob %s key %d offset %d size %d failed: %v", oldDatFile.Name(), key, increIdxEntry.offset.ToActualOffset(), increIdxEntry.size, err)
}
dst.Write(needleBytes)
util.Uint32toBytes(idxEntryBytes[8:12], uint32(offset/NeedlePaddingSize))
@@ -339,7 +339,7 @@ func (scanner *VolumeFileScanner4Vacuum) VisitNeedle(n *needle.Needle, offset in
}
nv, ok := scanner.v.nm.Get(n.Id)
glog.V(4).Infoln("needle expected offset ", offset, "ok", ok, "nv", nv)
- if ok && nv.Offset.ToAcutalOffset() == offset && nv.Size > 0 && nv.Size.IsValid() {
+ if ok && nv.Offset.ToActualOffset() == offset && nv.Size > 0 && nv.Size.IsValid() {
if err := scanner.nm.Set(n.Id, ToOffset(scanner.newOffset), n.Size); err != nil {
return fmt.Errorf("cannot put needle: %s", err)
}
@@ -422,7 +422,7 @@ func copyDataBasedOnIndexFile(srcDatName, srcIdxName, dstDatName, datIdxName str
}
n := new(needle.Needle)
- err := n.ReadData(srcDatBackend, offset.ToAcutalOffset(), size, version)
+ err := n.ReadData(srcDatBackend, offset.ToActualOffset(), size, version)
if err != nil {
return nil
}
diff --git a/weed/topology/data_node.go b/weed/topology/data_node.go
index bf9557ca0..816139ac3 100644
--- a/weed/topology/data_node.go
+++ b/weed/topology/data_node.go
@@ -163,7 +163,10 @@ func (dn *DataNode) GetVolumesById(id needle.VolumeId) (storage.VolumeInfo, erro
}
func (dn *DataNode) GetDataCenter() *DataCenter {
- return dn.Parent().Parent().(*NodeImpl).value.(*DataCenter)
+ rack := dn.Parent()
+ dcNode := rack.Parent()
+ dcValue := dcNode.GetValue()
+ return dcValue.(*DataCenter)
}
func (dn *DataNode) GetRack() *Rack {
diff --git a/weed/topology/volume_layout.go b/weed/topology/volume_layout.go
index 3b1feefa4..e21922310 100644
--- a/weed/topology/volume_layout.go
+++ b/weed/topology/volume_layout.go
@@ -220,7 +220,7 @@ func (vl *VolumeLayout) ensureCorrectWritables(vid needle.VolumeId) {
func (vl *VolumeLayout) isAllWritable(vid needle.VolumeId) bool {
for _, dn := range vl.vid2location[vid].list {
- if v, found := dn.volumes[vid]; found {
+ if v, getError := dn.GetVolumesById(vid); getError == nil {
if v.ReadOnly {
return false
}
diff --git a/weed/util/chunk_cache/chunk_cache_in_memory.go b/weed/util/chunk_cache/chunk_cache_in_memory.go
index 931e45e9a..1eb00e1fa 100644
--- a/weed/util/chunk_cache/chunk_cache_in_memory.go
+++ b/weed/util/chunk_cache/chunk_cache_in_memory.go
@@ -3,7 +3,7 @@ package chunk_cache
import (
"time"
- "github.com/karlseguin/ccache"
+ "github.com/karlseguin/ccache/v2"
)
// a global cache for recently accessed file chunks
diff --git a/weed/util/chunk_cache/chunk_cache_on_disk.go b/weed/util/chunk_cache/chunk_cache_on_disk.go
index 356dfe188..d724e925e 100644
--- a/weed/util/chunk_cache/chunk_cache_on_disk.go
+++ b/weed/util/chunk_cache/chunk_cache_on_disk.go
@@ -107,9 +107,9 @@ func (v *ChunkCacheVolume) GetNeedle(key types.NeedleId) ([]byte, error) {
return nil, storage.ErrorNotFound
}
data := make([]byte, nv.Size)
- if readSize, readErr := v.DataBackend.ReadAt(data, nv.Offset.ToAcutalOffset()); readErr != nil {
+ if readSize, readErr := v.DataBackend.ReadAt(data, nv.Offset.ToActualOffset()); readErr != nil {
return nil, fmt.Errorf("read %s.dat [%d,%d): %v",
- v.fileName, nv.Offset.ToAcutalOffset(), nv.Offset.ToAcutalOffset()+int64(nv.Size), readErr)
+ v.fileName, nv.Offset.ToActualOffset(), nv.Offset.ToActualOffset()+int64(nv.Size), readErr)
} else {
if readSize != int(nv.Size) {
return nil, fmt.Errorf("read %d, expected %d", readSize, nv.Size)
diff --git a/weed/util/config.go b/weed/util/config.go
index 94e621e34..ee805f26a 100644
--- a/weed/util/config.go
+++ b/weed/util/config.go
@@ -2,6 +2,7 @@ package util
import (
"strings"
+ "sync"
"github.com/spf13/viper"
@@ -28,11 +29,11 @@ func LoadConfiguration(configFileName string, required bool) (loaded bool) {
glog.V(1).Infof("Reading %s.toml from %s", configFileName, viper.ConfigFileUsed())
if err := viper.MergeInConfig(); err != nil { // Handle errors reading the config file
- logLevel := glog.Level(0)
if strings.Contains(err.Error(), "Not Found") {
- logLevel = 1
+ glog.V(1).Infof("Reading %s: %v", viper.ConfigFileUsed(), err)
+ } else {
+ glog.Fatalf("Reading %s: %v", viper.ConfigFileUsed(), err)
}
- glog.V(logLevel).Infof("Reading %s: %v", viper.ConfigFileUsed(), err)
if required {
glog.Fatalf("Failed to load %s.toml file from current directory, or $HOME/.seaweedfs/, or /etc/seaweedfs/"+
"\n\nPlease use this command to generate the default %s.toml file\n"+
@@ -46,11 +47,55 @@ func LoadConfiguration(configFileName string, required bool) (loaded bool) {
return true
}
-func GetViper() *viper.Viper {
- v := &viper.Viper{}
- *v = *viper.GetViper()
- v.AutomaticEnv()
- v.SetEnvPrefix("weed")
- v.SetEnvKeyReplacer(strings.NewReplacer(".", "_"))
- return v
+type ViperProxy struct {
+ *viper.Viper
+ sync.Mutex
+}
+
+var (
+ vp = &ViperProxy{}
+)
+
+func (vp *ViperProxy) SetDefault(key string, value interface{}) {
+ vp.Lock()
+ defer vp.Unlock()
+ vp.Viper.SetDefault(key, value)
+}
+
+func (vp *ViperProxy) GetString(key string) string {
+ vp.Lock()
+ defer vp.Unlock()
+ return vp.Viper.GetString(key)
+}
+
+func (vp *ViperProxy) GetBool(key string) bool {
+ vp.Lock()
+ defer vp.Unlock()
+ return vp.Viper.GetBool(key)
+}
+
+func (vp *ViperProxy) GetInt(key string) int {
+ vp.Lock()
+ defer vp.Unlock()
+ return vp.Viper.GetInt(key)
+}
+
+func (vp *ViperProxy) GetStringSlice(key string) []string {
+ vp.Lock()
+ defer vp.Unlock()
+ return vp.Viper.GetStringSlice(key)
+}
+
+func GetViper() *ViperProxy {
+ vp.Lock()
+ defer vp.Unlock()
+
+ if vp.Viper == nil {
+ vp.Viper = viper.GetViper()
+ vp.AutomaticEnv()
+ vp.SetEnvPrefix("weed")
+ vp.SetEnvKeyReplacer(strings.NewReplacer(".", "_"))
+ }
+
+ return vp
}
diff --git a/weed/util/constants.go b/weed/util/constants.go
index 89155e9a2..6001ae78e 100644
--- a/weed/util/constants.go
+++ b/weed/util/constants.go
@@ -5,7 +5,7 @@ import (
)
var (
- VERSION = fmt.Sprintf("%s %d.%02d", sizeLimit, 2, 16)
+ VERSION = fmt.Sprintf("%s %d.%02d", sizeLimit, 2, 24)
COMMIT = ""
)
diff --git a/weed/util/log_buffer/log_buffer.go b/weed/util/log_buffer/log_buffer.go
index e4310b5c5..f84c674ff 100644
--- a/weed/util/log_buffer/log_buffer.go
+++ b/weed/util/log_buffer/log_buffer.go
@@ -28,6 +28,7 @@ type LogBuffer struct {
pos int
startTime time.Time
stopTime time.Time
+ lastFlushTime time.Time
sizeBuf []byte
flushInterval time.Duration
flushFn func(startTime, stopTime time.Time, buf []byte)
@@ -129,6 +130,7 @@ func (m *LogBuffer) loopFlush() {
// fmt.Printf("flush [%v, %v] size %d\n", d.startTime, d.stopTime, len(d.data.Bytes()))
m.flushFn(d.startTime, d.stopTime, d.data.Bytes())
d.releaseMemory()
+ m.lastFlushTime = d.stopTime
}
}
}
@@ -174,10 +176,14 @@ func (d *dataToFlush) releaseMemory() {
bufferPool.Put(d.data)
}
-func (m *LogBuffer) ReadFromBuffer(lastReadTime time.Time) (bufferCopy *bytes.Buffer) {
+func (m *LogBuffer) ReadFromBuffer(lastReadTime time.Time) (bufferCopy *bytes.Buffer, err error) {
m.RLock()
defer m.RUnlock()
+ if !m.lastFlushTime.IsZero() && m.lastFlushTime.After(lastReadTime) {
+ return nil, ResumeFromDiskError
+ }
+
/*
fmt.Printf("read buffer %p: %v last stop time: [%v,%v], pos %d, entries:%d, prevBufs:%d\n", m, lastReadTime, m.startTime, m.stopTime, m.pos, len(m.idx), len(m.prevBuffers.buffers))
for i, prevBuf := range m.prevBuffers.buffers {
@@ -186,11 +192,11 @@ func (m *LogBuffer) ReadFromBuffer(lastReadTime time.Time) (bufferCopy *bytes.Bu
*/
if lastReadTime.Equal(m.stopTime) {
- return nil
+ return nil, nil
}
if lastReadTime.After(m.stopTime) {
// glog.Fatalf("unexpected last read time %v, older than latest %v", lastReadTime, m.stopTime)
- return nil
+ return nil, nil
}
if lastReadTime.Before(m.startTime) {
// println("checking ", lastReadTime.UnixNano())
@@ -198,19 +204,19 @@ func (m *LogBuffer) ReadFromBuffer(lastReadTime time.Time) (bufferCopy *bytes.Bu
if buf.startTime.After(lastReadTime) {
if i == 0 {
// println("return the earliest in memory", buf.startTime.UnixNano())
- return copiedBytes(buf.buf[:buf.size])
+ return copiedBytes(buf.buf[:buf.size]), nil
}
// println("return the", i, "th in memory", buf.startTime.UnixNano())
- return copiedBytes(buf.buf[:buf.size])
+ return copiedBytes(buf.buf[:buf.size]), nil
}
if !buf.startTime.After(lastReadTime) && buf.stopTime.After(lastReadTime) {
pos := buf.locateByTs(lastReadTime)
// fmt.Printf("locate buffer[%d] pos %d\n", i, pos)
- return copiedBytes(buf.buf[pos:buf.size])
+ return copiedBytes(buf.buf[pos:buf.size]), nil
}
}
// println("return the current buf", lastReadTime.UnixNano())
- return copiedBytes(m.buf[:m.pos])
+ return copiedBytes(m.buf[:m.pos]), nil
}
lastTs := lastReadTime.UnixNano()
@@ -243,7 +249,7 @@ func (m *LogBuffer) ReadFromBuffer(lastReadTime time.Time) (bufferCopy *bytes.Bu
}
if prevT <= lastTs {
// fmt.Printf("found l=%d, m-1=%d(ts=%d), m=%d(ts=%d), h=%d [%d, %d) \n", l, mid-1, prevT, mid, t, h, pos, m.pos)
- return copiedBytes(m.buf[pos:m.pos])
+ return copiedBytes(m.buf[pos:m.pos]), nil
}
h = mid
}
@@ -251,7 +257,7 @@ func (m *LogBuffer) ReadFromBuffer(lastReadTime time.Time) (bufferCopy *bytes.Bu
}
// FIXME: this could be that the buffer has been flushed already
- return nil
+ return nil, nil
}
func (m *LogBuffer) ReleaseMemory(b *bytes.Buffer) {
diff --git a/weed/util/log_buffer/log_read.go b/weed/util/log_buffer/log_read.go
index 57f4b0115..d6917abfe 100644
--- a/weed/util/log_buffer/log_read.go
+++ b/weed/util/log_buffer/log_read.go
@@ -13,7 +13,8 @@ import (
)
var (
- ResumeError = fmt.Errorf("resume")
+ ResumeError = fmt.Errorf("resume")
+ ResumeFromDiskError = fmt.Errorf("resumeFromDisk")
)
func (logBuffer *LogBuffer) LoopProcessLogData(
@@ -34,7 +35,10 @@ func (logBuffer *LogBuffer) LoopProcessLogData(
if bytesBuf != nil {
logBuffer.ReleaseMemory(bytesBuf)
}
- bytesBuf = logBuffer.ReadFromBuffer(lastReadTime)
+ bytesBuf, err = logBuffer.ReadFromBuffer(lastReadTime)
+ if err == ResumeFromDiskError {
+ return lastReadTime, ResumeFromDiskError
+ }
// fmt.Printf("ReadFromBuffer by %v\n", lastReadTime)
if bytesBuf == nil {
if waitForDataFn() {
diff --git a/weed/wdclient/vid_map.go b/weed/wdclient/vid_map.go
index 773da0191..271baa132 100644
--- a/weed/wdclient/vid_map.go
+++ b/weed/wdclient/vid_map.go
@@ -15,6 +15,12 @@ const (
maxCursorIndex = 4096
)
+type HasLookupFileIdFunction interface {
+ GetLookupFileIdFunction() LookupFileIdFunctionType
+}
+
+type LookupFileIdFunctionType func(fileId string) (targetUrls []string, err error)
+
type Location struct {
Url string `json:"url,omitempty"`
PublicUrl string `json:"publicUrl,omitempty"`
@@ -67,6 +73,10 @@ func (vc *vidMap) LookupVolumeServerUrl(vid string) (serverUrls []string, err er
return
}
+func (vc *vidMap) GetLookupFileIdFunction() LookupFileIdFunctionType {
+ return vc.LookupFileId
+}
+
func (vc *vidMap) LookupFileId(fileId string) (fullUrls []string, err error) {
parts := strings.Split(fileId, ",")
if len(parts) != 2 {