diff --git a/other/java/client/pom_debug.xml b/other/java/client/pom_debug.xml
new file mode 100644
index 000000000..1d8454bf7
--- /dev/null
+++ b/other/java/client/pom_debug.xml
@@ -0,0 +1,139 @@
+
+
+ 4.0.0
+
+ com.github.chrislusf
+ seaweedfs-client
+ 1.2.9
+
+
+ org.sonatype.oss
+ oss-parent
+ 9
+
+
+
+ 3.9.1
+
+ 1.23.0
+ 28.0-jre
+
+
+
+
+ com.moandjiezana.toml
+ toml4j
+ 0.7.2
+
+
+
+ com.google.protobuf
+ protobuf-java
+ ${protobuf.version}
+
+
+ com.google.guava
+ guava
+ ${guava.version}
+
+
+ io.grpc
+ grpc-netty-shaded
+ ${grpc.version}
+
+
+ io.grpc
+ grpc-protobuf
+ ${grpc.version}
+
+
+ io.grpc
+ grpc-stub
+ ${grpc.version}
+
+
+ org.slf4j
+ slf4j-api
+ 1.7.25
+
+
+ org.apache.httpcomponents
+ httpmime
+ 4.5.6
+
+
+ junit
+ junit
+ 4.12
+ test
+
+
+
+
+
+
+ kr.motd.maven
+ os-maven-plugin
+ 1.6.2
+
+
+
+
+ org.apache.maven.plugins
+ maven-compiler-plugin
+
+ 8
+ 8
+
+
+
+ org.xolstice.maven.plugins
+ protobuf-maven-plugin
+ 0.6.1
+
+ com.google.protobuf:protoc:${protobuf.version}:exe:${os.detected.classifier}
+
+ grpc-java
+ io.grpc:protoc-gen-grpc-java:${grpc.version}:exe:${os.detected.classifier}
+
+
+
+
+
+ compile
+ compile-custom
+
+
+
+
+
+ org.apache.maven.plugins
+ maven-source-plugin
+ 2.2.1
+
+
+ attach-sources
+
+ jar-no-fork
+
+
+
+
+
+ org.apache.maven.plugins
+ maven-javadoc-plugin
+ 2.9.1
+
+
+ attach-javadocs
+
+ jar
+
+
+
+
+
+
+
+
diff --git a/other/java/client/src/main/java/seaweedfs/client/ChunkCache.java b/other/java/client/src/main/java/seaweedfs/client/ChunkCache.java
new file mode 100644
index 000000000..e249d4524
--- /dev/null
+++ b/other/java/client/src/main/java/seaweedfs/client/ChunkCache.java
@@ -0,0 +1,27 @@
+package seaweedfs.client;
+
+import com.google.common.cache.Cache;
+import com.google.common.cache.CacheBuilder;
+
+import java.util.concurrent.TimeUnit;
+
+public class ChunkCache {
+
+ private final Cache cache;
+
+ public ChunkCache(int maxEntries) {
+ this.cache = CacheBuilder.newBuilder()
+ .maximumSize(maxEntries)
+ .expireAfterAccess(1, TimeUnit.HOURS)
+ .build();
+ }
+
+ public byte[] getChunk(String fileId) {
+ return this.cache.getIfPresent(fileId);
+ }
+
+ public void setChunk(String fileId, byte[] data) {
+ this.cache.put(fileId, data);
+ }
+
+}
diff --git a/other/java/client/src/main/java/seaweedfs/client/FilerClient.java b/other/java/client/src/main/java/seaweedfs/client/FilerClient.java
index 63d0d8320..ef32c7e9a 100644
--- a/other/java/client/src/main/java/seaweedfs/client/FilerClient.java
+++ b/other/java/client/src/main/java/seaweedfs/client/FilerClient.java
@@ -7,13 +7,14 @@ import java.nio.file.Path;
import java.nio.file.Paths;
import java.util.ArrayList;
import java.util.Arrays;
+import java.util.Iterator;
import java.util.List;
public class FilerClient {
private static final Logger LOG = LoggerFactory.getLogger(FilerClient.class);
- private FilerGrpcClient filerGrpcClient;
+ private final FilerGrpcClient filerGrpcClient;
public FilerClient(String host, int grpcPort) {
filerGrpcClient = new FilerGrpcClient(host, grpcPort);
@@ -34,13 +35,12 @@ public class FilerClient {
public boolean mkdirs(String path, int mode, int uid, int gid, String userName, String[] groupNames) {
- Path pathObject = Paths.get(path);
- String parent = pathObject.getParent().toString();
- String name = pathObject.getFileName().toString();
-
if ("/".equals(path)) {
return true;
}
+ Path pathObject = Paths.get(path);
+ String parent = pathObject.getParent().toString();
+ String name = pathObject.getFileName().toString();
mkdirs(parent, mode, uid, gid, userName, groupNames);
@@ -51,23 +51,38 @@ public class FilerClient {
}
return createEntry(
- parent,
- newDirectoryEntry(name, mode, uid, gid, userName, groupNames).build()
+ parent,
+ newDirectoryEntry(name, mode, uid, gid, userName, groupNames).build()
);
}
- public boolean rm(String path, boolean isRecursive) {
+ public boolean mv(String oldPath, String newPath) {
+
+ Path oldPathObject = Paths.get(oldPath);
+ String oldParent = oldPathObject.getParent().toString();
+ String oldName = oldPathObject.getFileName().toString();
+
+ Path newPathObject = Paths.get(newPath);
+ String newParent = newPathObject.getParent().toString();
+ String newName = newPathObject.getFileName().toString();
+
+ return atomicRenameEntry(oldParent, oldName, newParent, newName);
+
+ }
+
+ public boolean rm(String path, boolean isRecursive, boolean ignoreRecusiveError) {
Path pathObject = Paths.get(path);
String parent = pathObject.getParent().toString();
String name = pathObject.getFileName().toString();
return deleteEntry(
- parent,
- name,
- true,
- isRecursive);
+ parent,
+ name,
+ true,
+ isRecursive,
+ ignoreRecusiveError);
}
public boolean touch(String path, int mode) {
@@ -84,18 +99,18 @@ public class FilerClient {
FilerProto.Entry entry = lookupEntry(parent, name);
if (entry == null) {
return createEntry(
- parent,
- newFileEntry(name, mode, uid, gid, userName, groupNames).build()
+ parent,
+ newFileEntry(name, mode, uid, gid, userName, groupNames).build()
);
}
long now = System.currentTimeMillis() / 1000L;
FilerProto.FuseAttributes.Builder attr = entry.getAttributes().toBuilder()
- .setMtime(now)
- .setUid(uid)
- .setGid(gid)
- .setUserName(userName)
- .clearGroupName()
- .addAllGroupName(Arrays.asList(groupNames));
+ .setMtime(now)
+ .setUid(uid)
+ .setGid(gid)
+ .setUserName(userName)
+ .clearGroupName()
+ .addAllGroupName(Arrays.asList(groupNames));
return updateEntry(parent, entry.toBuilder().setAttributes(attr).build());
}
@@ -105,17 +120,17 @@ public class FilerClient {
long now = System.currentTimeMillis() / 1000L;
return FilerProto.Entry.newBuilder()
- .setName(name)
- .setIsDirectory(true)
- .setAttributes(FilerProto.FuseAttributes.newBuilder()
- .setMtime(now)
- .setCrtime(now)
- .setUid(uid)
- .setGid(gid)
- .setFileMode(mode | 1 << 31)
- .setUserName(userName)
- .clearGroupName()
- .addAllGroupName(Arrays.asList(groupNames)));
+ .setName(name)
+ .setIsDirectory(true)
+ .setAttributes(FilerProto.FuseAttributes.newBuilder()
+ .setMtime(now)
+ .setCrtime(now)
+ .setUid(uid)
+ .setGid(gid)
+ .setFileMode(mode | 1 << 31)
+ .setUserName(userName)
+ .clearGroupName()
+ .addAllGroupName(Arrays.asList(groupNames)));
}
public FilerProto.Entry.Builder newFileEntry(String name, int mode,
@@ -124,17 +139,17 @@ public class FilerClient {
long now = System.currentTimeMillis() / 1000L;
return FilerProto.Entry.newBuilder()
- .setName(name)
- .setIsDirectory(false)
- .setAttributes(FilerProto.FuseAttributes.newBuilder()
- .setMtime(now)
- .setCrtime(now)
- .setUid(uid)
- .setGid(gid)
- .setFileMode(mode)
- .setUserName(userName)
- .clearGroupName()
- .addAllGroupName(Arrays.asList(groupNames)));
+ .setName(name)
+ .setIsDirectory(false)
+ .setAttributes(FilerProto.FuseAttributes.newBuilder()
+ .setMtime(now)
+ .setCrtime(now)
+ .setUid(uid)
+ .setGid(gid)
+ .setFileMode(mode)
+ .setUserName(userName)
+ .clearGroupName()
+ .addAllGroupName(Arrays.asList(groupNames)));
}
public List listEntries(String path) {
@@ -159,22 +174,35 @@ public class FilerClient {
}
public List listEntries(String path, String entryPrefix, String lastEntryName, int limit) {
- return filerGrpcClient.getBlockingStub().listEntries(FilerProto.ListEntriesRequest.newBuilder()
- .setDirectory(path)
- .setPrefix(entryPrefix)
- .setStartFromFileName(lastEntryName)
- .setLimit(limit)
- .build()).getEntriesList();
+ Iterator iter = filerGrpcClient.getBlockingStub().listEntries(FilerProto.ListEntriesRequest.newBuilder()
+ .setDirectory(path)
+ .setPrefix(entryPrefix)
+ .setStartFromFileName(lastEntryName)
+ .setLimit(limit)
+ .build());
+ List entries = new ArrayList<>();
+ while (iter.hasNext()) {
+ FilerProto.ListEntriesResponse resp = iter.next();
+ entries.add(fixEntryAfterReading(resp.getEntry()));
+ }
+ return entries;
}
public FilerProto.Entry lookupEntry(String directory, String entryName) {
try {
- return filerGrpcClient.getBlockingStub().lookupDirectoryEntry(
- FilerProto.LookupDirectoryEntryRequest.newBuilder()
- .setDirectory(directory)
- .setName(entryName)
- .build()).getEntry();
+ FilerProto.Entry entry = filerGrpcClient.getBlockingStub().lookupDirectoryEntry(
+ FilerProto.LookupDirectoryEntryRequest.newBuilder()
+ .setDirectory(directory)
+ .setName(entryName)
+ .build()).getEntry();
+ if (entry == null) {
+ return null;
+ }
+ return fixEntryAfterReading(entry);
} catch (Exception e) {
+ if (e.getMessage().indexOf("filer: no entry is found in filer store") > 0) {
+ return null;
+ }
LOG.warn("lookupEntry {}/{}: {}", directory, entryName, e);
return null;
}
@@ -184,9 +212,9 @@ public class FilerClient {
public boolean createEntry(String parent, FilerProto.Entry entry) {
try {
filerGrpcClient.getBlockingStub().createEntry(FilerProto.CreateEntryRequest.newBuilder()
- .setDirectory(parent)
- .setEntry(entry)
- .build());
+ .setDirectory(parent)
+ .setEntry(entry)
+ .build());
} catch (Exception e) {
LOG.warn("createEntry {}/{}: {}", parent, entry.getName(), e);
return false;
@@ -197,9 +225,9 @@ public class FilerClient {
public boolean updateEntry(String parent, FilerProto.Entry entry) {
try {
filerGrpcClient.getBlockingStub().updateEntry(FilerProto.UpdateEntryRequest.newBuilder()
- .setDirectory(parent)
- .setEntry(entry)
- .build());
+ .setDirectory(parent)
+ .setEntry(entry)
+ .build());
} catch (Exception e) {
LOG.warn("createEntry {}/{}: {}", parent, entry.getName(), e);
return false;
@@ -207,14 +235,15 @@ public class FilerClient {
return true;
}
- public boolean deleteEntry(String parent, String entryName, boolean isDeleteFileChunk, boolean isRecursive) {
+ public boolean deleteEntry(String parent, String entryName, boolean isDeleteFileChunk, boolean isRecursive, boolean ignoreRecusiveError) {
try {
filerGrpcClient.getBlockingStub().deleteEntry(FilerProto.DeleteEntryRequest.newBuilder()
- .setDirectory(parent)
- .setName(entryName)
- .setIsDeleteData(isDeleteFileChunk)
- .setIsRecursive(isRecursive)
- .build());
+ .setDirectory(parent)
+ .setName(entryName)
+ .setIsDeleteData(isDeleteFileChunk)
+ .setIsRecursive(isRecursive)
+ .setIgnoreRecursiveError(ignoreRecusiveError)
+ .build());
} catch (Exception e) {
LOG.warn("deleteEntry {}/{}: {}", parent, entryName, e);
return false;
@@ -222,4 +251,39 @@ public class FilerClient {
return true;
}
+ public boolean atomicRenameEntry(String oldParent, String oldName, String newParent, String newName) {
+ try {
+ filerGrpcClient.getBlockingStub().atomicRenameEntry(FilerProto.AtomicRenameEntryRequest.newBuilder()
+ .setOldDirectory(oldParent)
+ .setOldName(oldName)
+ .setNewDirectory(newParent)
+ .setNewName(newName)
+ .build());
+ } catch (Exception e) {
+ LOG.warn("atomicRenameEntry {}/{} => {}/{}: {}", oldParent, oldName, newParent, newName, e);
+ return false;
+ }
+ return true;
+ }
+
+ private FilerProto.Entry fixEntryAfterReading(FilerProto.Entry entry) {
+ if (entry.getChunksList().size() <= 0) {
+ return entry;
+ }
+ String fileId = entry.getChunks(0).getFileId();
+ if (fileId != null && fileId.length() != 0) {
+ return entry;
+ }
+ FilerProto.Entry.Builder entryBuilder = entry.toBuilder();
+ entryBuilder.clearChunks();
+ for (FilerProto.FileChunk chunk : entry.getChunksList()) {
+ FilerProto.FileChunk.Builder chunkBuilder = chunk.toBuilder();
+ FilerProto.FileId fid = chunk.getFid();
+ fileId = String.format("%d,%d%x", fid.getVolumeId(), fid.getFileKey(), fid.getCookie());
+ chunkBuilder.setFileId(fileId);
+ entryBuilder.addChunks(chunkBuilder);
+ }
+ return entryBuilder.build();
+ }
+
}
diff --git a/other/java/client/src/main/java/seaweedfs/client/FilerGrpcClient.java b/other/java/client/src/main/java/seaweedfs/client/FilerGrpcClient.java
index 16b7c3249..3f5d1e8e9 100644
--- a/other/java/client/src/main/java/seaweedfs/client/FilerGrpcClient.java
+++ b/other/java/client/src/main/java/seaweedfs/client/FilerGrpcClient.java
@@ -2,22 +2,55 @@ package seaweedfs.client;
import io.grpc.ManagedChannel;
import io.grpc.ManagedChannelBuilder;
+import io.grpc.netty.shaded.io.grpc.netty.NegotiationType;
+import io.grpc.netty.shaded.io.grpc.netty.NettyChannelBuilder;
+import io.grpc.netty.shaded.io.netty.handler.ssl.SslContext;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+import javax.net.ssl.SSLException;
import java.util.concurrent.TimeUnit;
-import java.util.logging.Logger;
public class FilerGrpcClient {
- private static final Logger logger = Logger.getLogger(FilerGrpcClient.class.getName());
+ private static final Logger logger = LoggerFactory.getLogger(FilerGrpcClient.class);
+ static SslContext sslContext;
+
+ static {
+ try {
+ sslContext = FilerSslContext.loadSslContext();
+ } catch (SSLException e) {
+ logger.warn("failed to load ssl context", e);
+ }
+ }
private final ManagedChannel channel;
private final SeaweedFilerGrpc.SeaweedFilerBlockingStub blockingStub;
private final SeaweedFilerGrpc.SeaweedFilerStub asyncStub;
private final SeaweedFilerGrpc.SeaweedFilerFutureStub futureStub;
-
+ private boolean cipher = false;
+ private String collection = "";
+ private String replication = "";
public FilerGrpcClient(String host, int grpcPort) {
- this(ManagedChannelBuilder.forAddress(host, grpcPort).usePlaintext());
+ this(host, grpcPort, sslContext);
+ }
+
+ public FilerGrpcClient(String host, int grpcPort, SslContext sslContext) {
+
+ this(sslContext == null ?
+ ManagedChannelBuilder.forAddress(host, grpcPort).usePlaintext() :
+ NettyChannelBuilder.forAddress(host, grpcPort)
+ .negotiationType(NegotiationType.TLS)
+ .sslContext(sslContext));
+
+ FilerProto.GetFilerConfigurationResponse filerConfigurationResponse =
+ this.getBlockingStub().getFilerConfiguration(
+ FilerProto.GetFilerConfigurationRequest.newBuilder().build());
+ cipher = filerConfigurationResponse.getCipher();
+ collection = filerConfigurationResponse.getCollection();
+ replication = filerConfigurationResponse.getReplication();
+
}
public FilerGrpcClient(ManagedChannelBuilder> channelBuilder) {
@@ -27,6 +60,18 @@ public class FilerGrpcClient {
futureStub = SeaweedFilerGrpc.newFutureStub(channel);
}
+ public boolean isCipher() {
+ return cipher;
+ }
+
+ public String getCollection() {
+ return collection;
+ }
+
+ public String getReplication() {
+ return replication;
+ }
+
public void shutdown() throws InterruptedException {
channel.shutdown().awaitTermination(5, TimeUnit.SECONDS);
}
@@ -42,4 +87,5 @@ public class FilerGrpcClient {
public SeaweedFilerGrpc.SeaweedFilerFutureStub getFutureStub() {
return futureStub;
}
+
}
diff --git a/other/java/client/src/main/java/seaweedfs/client/FilerSslContext.java b/other/java/client/src/main/java/seaweedfs/client/FilerSslContext.java
new file mode 100644
index 000000000..5a88c1da3
--- /dev/null
+++ b/other/java/client/src/main/java/seaweedfs/client/FilerSslContext.java
@@ -0,0 +1,64 @@
+package seaweedfs.client;
+
+import com.google.common.base.Strings;
+import com.moandjiezana.toml.Toml;
+import io.grpc.netty.shaded.io.grpc.netty.GrpcSslContexts;
+import io.grpc.netty.shaded.io.netty.handler.ssl.SslContext;
+import io.grpc.netty.shaded.io.netty.handler.ssl.SslContextBuilder;
+import io.grpc.netty.shaded.io.netty.handler.ssl.util.InsecureTrustManagerFactory;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import javax.net.ssl.SSLException;
+import java.io.File;
+
+public class FilerSslContext {
+
+ private static final Logger logger = LoggerFactory.getLogger(FilerSslContext.class);
+
+ public static SslContext loadSslContext() throws SSLException {
+ String securityFileName = "security.toml";
+ String home = System.getProperty("user.home");
+ File f1 = new File("./"+securityFileName);
+ File f2 = new File(home + "/.seaweedfs/"+securityFileName);
+ File f3 = new File(home + "/etc/seaweedfs/"+securityFileName);
+
+ File securityFile = f1.exists()? f1 : f2.exists() ? f2 : f3.exists()? f3 : null;
+
+ if (securityFile==null){
+ return null;
+ }
+
+ Toml toml = new Toml().read(securityFile);
+ logger.debug("reading ssl setup from {}", securityFile);
+
+ String trustCertCollectionFilePath = toml.getString("grpc.ca");
+ logger.debug("loading ca from {}", trustCertCollectionFilePath);
+ String clientCertChainFilePath = toml.getString("grpc.client.cert");
+ logger.debug("loading client ca from {}", clientCertChainFilePath);
+ String clientPrivateKeyFilePath = toml.getString("grpc.client.key");
+ logger.debug("loading client key from {}", clientPrivateKeyFilePath);
+
+ if (Strings.isNullOrEmpty(clientPrivateKeyFilePath) && Strings.isNullOrEmpty(clientPrivateKeyFilePath)){
+ return null;
+ }
+
+ // possibly fix the format https://netty.io/wiki/sslcontextbuilder-and-private-key.html
+
+ return buildSslContext(trustCertCollectionFilePath, clientCertChainFilePath, clientPrivateKeyFilePath);
+ }
+
+
+ private static SslContext buildSslContext(String trustCertCollectionFilePath,
+ String clientCertChainFilePath,
+ String clientPrivateKeyFilePath) throws SSLException {
+ SslContextBuilder builder = GrpcSslContexts.forClient();
+ if (trustCertCollectionFilePath != null) {
+ builder.trustManager(new File(trustCertCollectionFilePath));
+ }
+ if (clientCertChainFilePath != null && clientPrivateKeyFilePath != null) {
+ builder.keyManager(new File(clientCertChainFilePath), new File(clientPrivateKeyFilePath));
+ }
+ return builder.trustManager(InsecureTrustManagerFactory.INSTANCE).build();
+ }
+}
diff --git a/other/java/client/src/main/java/seaweedfs/client/Gzip.java b/other/java/client/src/main/java/seaweedfs/client/Gzip.java
new file mode 100644
index 000000000..248285dd3
--- /dev/null
+++ b/other/java/client/src/main/java/seaweedfs/client/Gzip.java
@@ -0,0 +1,37 @@
+package seaweedfs.client;
+
+import java.io.ByteArrayInputStream;
+import java.io.ByteArrayOutputStream;
+import java.io.IOException;
+import java.io.InputStream;
+import java.util.zip.GZIPInputStream;
+import java.util.zip.GZIPOutputStream;
+
+public class Gzip {
+ public static byte[] compress(byte[] data) throws IOException {
+ ByteArrayOutputStream bos = new ByteArrayOutputStream(data.length);
+ GZIPOutputStream gzip = new GZIPOutputStream(bos);
+ gzip.write(data);
+ gzip.close();
+ byte[] compressed = bos.toByteArray();
+ bos.close();
+ return compressed;
+ }
+
+ public static byte[] decompress(byte[] compressed) throws IOException {
+ ByteArrayInputStream bis = new ByteArrayInputStream(compressed);
+ GZIPInputStream gis = new GZIPInputStream(bis);
+ return readAll(gis);
+ }
+
+ private static byte[] readAll(InputStream input) throws IOException {
+ try( ByteArrayOutputStream output = new ByteArrayOutputStream()){
+ byte[] buffer = new byte[4096];
+ int n;
+ while (-1 != (n = input.read(buffer))) {
+ output.write(buffer, 0, n);
+ }
+ return output.toByteArray();
+ }
+ }
+}
diff --git a/other/java/client/src/main/java/seaweedfs/client/SeaweedCipher.java b/other/java/client/src/main/java/seaweedfs/client/SeaweedCipher.java
new file mode 100644
index 000000000..8d0ebd755
--- /dev/null
+++ b/other/java/client/src/main/java/seaweedfs/client/SeaweedCipher.java
@@ -0,0 +1,55 @@
+package seaweedfs.client;
+
+import javax.crypto.Cipher;
+import javax.crypto.spec.GCMParameterSpec;
+import javax.crypto.spec.SecretKeySpec;
+import java.security.SecureRandom;
+
+public class SeaweedCipher {
+ // AES-GCM parameters
+ public static final int AES_KEY_SIZE = 256; // in bits
+ public static final int GCM_NONCE_LENGTH = 12; // in bytes
+ public static final int GCM_TAG_LENGTH = 16; // in bytes
+
+ private static SecureRandom random = new SecureRandom();
+
+ public static byte[] genCipherKey() throws Exception {
+ byte[] key = new byte[AES_KEY_SIZE / 8];
+ random.nextBytes(key);
+ return key;
+ }
+
+ public static byte[] encrypt(byte[] clearTextbytes, byte[] cipherKey) throws Exception {
+ return encrypt(clearTextbytes, 0, clearTextbytes.length, cipherKey);
+ }
+
+ public static byte[] encrypt(byte[] clearTextbytes, int offset, int length, byte[] cipherKey) throws Exception {
+
+ final byte[] nonce = new byte[GCM_NONCE_LENGTH];
+ random.nextBytes(nonce);
+ GCMParameterSpec spec = new GCMParameterSpec(GCM_TAG_LENGTH * 8, nonce);
+ SecretKeySpec keySpec = new SecretKeySpec(cipherKey, "AES");
+
+ Cipher AES_cipherInstance = Cipher.getInstance("AES/GCM/NoPadding");
+ AES_cipherInstance.init(Cipher.ENCRYPT_MODE, keySpec, spec);
+
+ byte[] encryptedText = AES_cipherInstance.doFinal(clearTextbytes, offset, length);
+
+ byte[] iv = AES_cipherInstance.getIV();
+ byte[] message = new byte[GCM_NONCE_LENGTH + clearTextbytes.length + GCM_TAG_LENGTH];
+ System.arraycopy(iv, 0, message, 0, GCM_NONCE_LENGTH);
+ System.arraycopy(encryptedText, 0, message, GCM_NONCE_LENGTH, encryptedText.length);
+
+ return message;
+ }
+
+ public static byte[] decrypt(byte[] encryptedText, byte[] cipherKey) throws Exception {
+ final Cipher AES_cipherInstance = Cipher.getInstance("AES/GCM/NoPadding");
+ GCMParameterSpec params = new GCMParameterSpec(GCM_TAG_LENGTH * 8, encryptedText, 0, GCM_NONCE_LENGTH);
+ SecretKeySpec keySpec = new SecretKeySpec(cipherKey, "AES");
+ AES_cipherInstance.init(Cipher.DECRYPT_MODE, keySpec, params);
+ byte[] decryptedText = AES_cipherInstance.doFinal(encryptedText, GCM_NONCE_LENGTH, encryptedText.length - GCM_NONCE_LENGTH);
+ return decryptedText;
+ }
+
+}
diff --git a/other/java/client/src/main/java/seaweedfs/client/SeaweedRead.java b/other/java/client/src/main/java/seaweedfs/client/SeaweedRead.java
index a906a689b..7be39da53 100644
--- a/other/java/client/src/main/java/seaweedfs/client/SeaweedRead.java
+++ b/other/java/client/src/main/java/seaweedfs/client/SeaweedRead.java
@@ -5,25 +5,25 @@ import org.apache.http.HttpHeaders;
import org.apache.http.HttpResponse;
import org.apache.http.client.HttpClient;
import org.apache.http.client.methods.HttpGet;
-import org.apache.http.impl.client.HttpClientBuilder;
+import org.apache.http.impl.client.DefaultHttpClient;
+import org.apache.http.util.EntityUtils;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+import java.io.Closeable;
import java.io.IOException;
-import java.io.OutputStream;
-import java.nio.ByteBuffer;
-import java.util.ArrayList;
-import java.util.Arrays;
-import java.util.Comparator;
-import java.util.List;
-import java.util.Map;
+import java.util.*;
public class SeaweedRead {
- // private static final Logger LOG = LoggerFactory.getLogger(SeaweedRead.class);
+ private static final Logger LOG = LoggerFactory.getLogger(SeaweedRead.class);
+
+ static ChunkCache chunkCache = new ChunkCache(1000);
// returns bytesRead
public static long read(FilerGrpcClient filerGrpcClient, List visibleIntervals,
final long position, final byte[] buffer, final int bufferOffset,
- final int bufferLength) {
+ final int bufferLength) throws IOException {
List chunkViews = viewFromVisibles(visibleIntervals, position, bufferLength);
@@ -34,7 +34,7 @@ public class SeaweedRead {
}
FilerProto.LookupVolumeResponse lookupResponse = filerGrpcClient
- .getBlockingStub().lookupVolume(lookupRequest.build());
+ .getBlockingStub().lookupVolume(lookupRequest.build());
Map vid2Locations = lookupResponse.getLocationsMapMap();
@@ -58,35 +58,64 @@ public class SeaweedRead {
return readCount;
}
- private static int readChunkView(long position, byte[] buffer, int startOffset, ChunkView chunkView, FilerProto.Locations locations) {
- HttpClient client = HttpClientBuilder.create().build();
- HttpGet request = new HttpGet(
- String.format("http://%s/%s", locations.getLocations(0).getUrl(), chunkView.fileId));
+ private static int readChunkView(long position, byte[] buffer, int startOffset, ChunkView chunkView, FilerProto.Locations locations) throws IOException {
+
+ byte[] chunkData = chunkCache.getChunk(chunkView.fileId);
- if (!chunkView.isFullChunk) {
- request.setHeader(HttpHeaders.ACCEPT_ENCODING, "");
- request.setHeader(HttpHeaders.RANGE,
- String.format("bytes=%d-%d", chunkView.offset, chunkView.offset + chunkView.size));
+ if (chunkData == null) {
+ chunkData = doFetchFullChunkData(chunkView, locations);
}
+ int len = (int) chunkView.size;
+ LOG.debug("readChunkView fid:{} chunkData.length:{} chunkView.offset:{} buffer.length:{} startOffset:{} len:{}",
+ chunkView.fileId, chunkData.length, chunkView.offset, buffer.length, startOffset, len);
+ System.arraycopy(chunkData, (int) chunkView.offset, buffer, startOffset, len);
+
+ chunkCache.setChunk(chunkView.fileId, chunkData);
+
+ return len;
+ }
+
+ private static byte[] doFetchFullChunkData(ChunkView chunkView, FilerProto.Locations locations) throws IOException {
+
+ HttpClient client = new DefaultHttpClient();
+ HttpGet request = new HttpGet(
+ String.format("http://%s/%s", locations.getLocations(0).getUrl(), chunkView.fileId));
+
+ request.setHeader(HttpHeaders.ACCEPT_ENCODING, "");
+
+ byte[] data = null;
+
try {
HttpResponse response = client.execute(request);
HttpEntity entity = response.getEntity();
- int len = (int) (chunkView.logicOffset - position + chunkView.size);
- OutputStream outputStream = new ByteBufferOutputStream(ByteBuffer.wrap(buffer, startOffset, len));
- entity.writeTo(outputStream);
- // LOG.debug("* read chunkView:{} startOffset:{} length:{}", chunkView, startOffset, len);
+ data = EntityUtils.toByteArray(entity);
- return len;
+ } finally {
+ if (client instanceof Closeable) {
+ Closeable t = (Closeable) client;
+ t.close();
+ }
+ }
- } catch (IOException e) {
- e.printStackTrace();
+ if (chunkView.isGzipped) {
+ data = Gzip.decompress(data);
}
- return 0;
+
+ if (chunkView.cipherKey != null && chunkView.cipherKey.length != 0) {
+ try {
+ data = SeaweedCipher.decrypt(data, chunkView.cipherKey);
+ } catch (Exception e) {
+ throw new IOException("fail to decrypt", e);
+ }
+ }
+
+ return data;
+
}
- public static List viewFromVisibles(List visibleIntervals, long offset, long size) {
+ protected static List viewFromVisibles(List visibleIntervals, long offset, long size) {
List views = new ArrayList<>();
long stop = offset + size;
@@ -94,11 +123,13 @@ public class SeaweedRead {
if (chunk.start <= offset && offset < chunk.stop && offset < stop) {
boolean isFullChunk = chunk.isFullChunk && chunk.start == offset && chunk.stop <= stop;
views.add(new ChunkView(
- chunk.fileId,
- offset - chunk.start,
- Math.min(chunk.stop, stop) - offset,
- offset,
- isFullChunk
+ chunk.fileId,
+ offset - chunk.start,
+ Math.min(chunk.stop, stop) - offset,
+ offset,
+ isFullChunk,
+ chunk.cipherKey,
+ chunk.isGzipped
));
offset = Math.min(chunk.stop, stop);
}
@@ -128,11 +159,13 @@ public class SeaweedRead {
List newVisibles,
FilerProto.FileChunk chunk) {
VisibleInterval newV = new VisibleInterval(
- chunk.getOffset(),
- chunk.getOffset() + chunk.getSize(),
- chunk.getFileId(),
- chunk.getMtime(),
- true
+ chunk.getOffset(),
+ chunk.getOffset() + chunk.getSize(),
+ chunk.getFileId(),
+ chunk.getMtime(),
+ true,
+ chunk.getCipherKey().toByteArray(),
+ chunk.getIsGzipped()
);
// easy cases to speed up
@@ -148,21 +181,25 @@ public class SeaweedRead {
for (VisibleInterval v : visibles) {
if (v.start < chunk.getOffset() && chunk.getOffset() < v.stop) {
newVisibles.add(new VisibleInterval(
- v.start,
- chunk.getOffset(),
- v.fileId,
- v.modifiedTime,
- false
+ v.start,
+ chunk.getOffset(),
+ v.fileId,
+ v.modifiedTime,
+ false,
+ v.cipherKey,
+ v.isGzipped
));
}
long chunkStop = chunk.getOffset() + chunk.getSize();
if (v.start < chunkStop && chunkStop < v.stop) {
newVisibles.add(new VisibleInterval(
- chunkStop,
- v.stop,
- v.fileId,
- v.modifiedTime,
- false
+ chunkStop,
+ v.stop,
+ v.fileId,
+ v.modifiedTime,
+ false,
+ v.cipherKey,
+ v.isGzipped
));
}
if (chunkStop <= v.start || v.stop <= chunk.getOffset()) {
@@ -209,24 +246,30 @@ public class SeaweedRead {
public final long modifiedTime;
public final String fileId;
public final boolean isFullChunk;
+ public final byte[] cipherKey;
+ public final boolean isGzipped;
- public VisibleInterval(long start, long stop, String fileId, long modifiedTime, boolean isFullChunk) {
+ public VisibleInterval(long start, long stop, String fileId, long modifiedTime, boolean isFullChunk, byte[] cipherKey, boolean isGzipped) {
this.start = start;
this.stop = stop;
this.modifiedTime = modifiedTime;
this.fileId = fileId;
this.isFullChunk = isFullChunk;
+ this.cipherKey = cipherKey;
+ this.isGzipped = isGzipped;
}
@Override
public String toString() {
return "VisibleInterval{" +
- "start=" + start +
- ", stop=" + stop +
- ", modifiedTime=" + modifiedTime +
- ", fileId='" + fileId + '\'' +
- ", isFullChunk=" + isFullChunk +
- '}';
+ "start=" + start +
+ ", stop=" + stop +
+ ", modifiedTime=" + modifiedTime +
+ ", fileId='" + fileId + '\'' +
+ ", isFullChunk=" + isFullChunk +
+ ", cipherKey=" + Arrays.toString(cipherKey) +
+ ", isGzipped=" + isGzipped +
+ '}';
}
}
@@ -236,24 +279,30 @@ public class SeaweedRead {
public final long size;
public final long logicOffset;
public final boolean isFullChunk;
+ public final byte[] cipherKey;
+ public final boolean isGzipped;
- public ChunkView(String fileId, long offset, long size, long logicOffset, boolean isFullChunk) {
+ public ChunkView(String fileId, long offset, long size, long logicOffset, boolean isFullChunk, byte[] cipherKey, boolean isGzipped) {
this.fileId = fileId;
this.offset = offset;
this.size = size;
this.logicOffset = logicOffset;
this.isFullChunk = isFullChunk;
+ this.cipherKey = cipherKey;
+ this.isGzipped = isGzipped;
}
@Override
public String toString() {
return "ChunkView{" +
- "fileId='" + fileId + '\'' +
- ", offset=" + offset +
- ", size=" + size +
- ", logicOffset=" + logicOffset +
- ", isFullChunk=" + isFullChunk +
- '}';
+ "fileId='" + fileId + '\'' +
+ ", offset=" + offset +
+ ", size=" + size +
+ ", logicOffset=" + logicOffset +
+ ", isFullChunk=" + isFullChunk +
+ ", cipherKey=" + Arrays.toString(cipherKey) +
+ ", isGzipped=" + isGzipped +
+ '}';
}
}
diff --git a/other/java/client/src/main/java/seaweedfs/client/SeaweedWrite.java b/other/java/client/src/main/java/seaweedfs/client/SeaweedWrite.java
index a7cede09f..18ec77b76 100644
--- a/other/java/client/src/main/java/seaweedfs/client/SeaweedWrite.java
+++ b/other/java/client/src/main/java/seaweedfs/client/SeaweedWrite.java
@@ -1,18 +1,23 @@
package seaweedfs.client;
+import com.google.protobuf.ByteString;
import org.apache.http.HttpResponse;
+import org.apache.http.client.HttpClient;
import org.apache.http.client.methods.HttpPost;
import org.apache.http.entity.mime.HttpMultipartMode;
import org.apache.http.entity.mime.MultipartEntityBuilder;
-import org.apache.http.impl.client.CloseableHttpClient;
-import org.apache.http.impl.client.HttpClientBuilder;
+import org.apache.http.impl.client.DefaultHttpClient;
import java.io.ByteArrayInputStream;
+import java.io.Closeable;
import java.io.IOException;
import java.io.InputStream;
+import java.security.SecureRandom;
public class SeaweedWrite {
+ private static SecureRandom random = new SecureRandom();
+
public static void writeData(FilerProto.Entry.Builder entry,
final String replication,
final FilerGrpcClient filerGrpcClient,
@@ -20,53 +25,83 @@ public class SeaweedWrite {
final byte[] bytes,
final long bytesOffset, final long bytesLength) throws IOException {
FilerProto.AssignVolumeResponse response = filerGrpcClient.getBlockingStub().assignVolume(
- FilerProto.AssignVolumeRequest.newBuilder()
- .setCollection("")
- .setReplication(replication)
- .setDataCenter("")
- .setReplication("")
- .setTtlSec(0)
- .build());
+ FilerProto.AssignVolumeRequest.newBuilder()
+ .setCollection(filerGrpcClient.getCollection())
+ .setReplication(replication == null ? filerGrpcClient.getReplication() : replication)
+ .setDataCenter("")
+ .setTtlSec(0)
+ .build());
String fileId = response.getFileId();
String url = response.getUrl();
+ String auth = response.getAuth();
String targetUrl = String.format("http://%s/%s", url, fileId);
- String etag = multipartUpload(targetUrl, bytes, bytesOffset, bytesLength);
+ ByteString cipherKeyString = com.google.protobuf.ByteString.EMPTY;
+ byte[] cipherKey = null;
+ if (filerGrpcClient.isCipher()) {
+ cipherKey = genCipherKey();
+ cipherKeyString = ByteString.copyFrom(cipherKey);
+ }
+
+ String etag = multipartUpload(targetUrl, auth, bytes, bytesOffset, bytesLength, cipherKey);
+
+ synchronized (entry) {
+ entry.addChunks(FilerProto.FileChunk.newBuilder()
+ .setFileId(fileId)
+ .setOffset(offset)
+ .setSize(bytesLength)
+ .setMtime(System.currentTimeMillis() / 10000L)
+ .setETag(etag)
+ .setCipherKey(cipherKeyString)
+ );
+ }
- entry.addChunks(FilerProto.FileChunk.newBuilder()
- .setFileId(fileId)
- .setOffset(offset)
- .setSize(bytesLength)
- .setMtime(System.currentTimeMillis() / 10000L)
- .setETag(etag)
- );
+ // cache fileId ~ bytes
+ SeaweedRead.chunkCache.setChunk(fileId, bytes);
}
public static void writeMeta(final FilerGrpcClient filerGrpcClient,
final String parentDirectory, final FilerProto.Entry.Builder entry) {
- filerGrpcClient.getBlockingStub().createEntry(
- FilerProto.CreateEntryRequest.newBuilder()
- .setDirectory(parentDirectory)
- .setEntry(entry)
- .build()
- );
+ synchronized (entry){
+ filerGrpcClient.getBlockingStub().createEntry(
+ FilerProto.CreateEntryRequest.newBuilder()
+ .setDirectory(parentDirectory)
+ .setEntry(entry)
+ .build()
+ );
+ }
}
private static String multipartUpload(String targetUrl,
+ String auth,
final byte[] bytes,
- final long bytesOffset, final long bytesLength) throws IOException {
-
- CloseableHttpClient client = HttpClientBuilder.create().setUserAgent("hdfs-client").build();
-
- InputStream inputStream = new ByteArrayInputStream(bytes, (int) bytesOffset, (int) bytesLength);
+ final long bytesOffset, final long bytesLength,
+ byte[] cipherKey) throws IOException {
+
+ HttpClient client = new DefaultHttpClient();
+
+ InputStream inputStream = null;
+ if (cipherKey == null || cipherKey.length == 0) {
+ inputStream = new ByteArrayInputStream(bytes, (int) bytesOffset, (int) bytesLength);
+ } else {
+ try {
+ byte[] encryptedBytes = SeaweedCipher.encrypt(bytes, (int) bytesOffset, (int) bytesLength, cipherKey);
+ inputStream = new ByteArrayInputStream(encryptedBytes, 0, encryptedBytes.length);
+ } catch (Exception e) {
+ throw new IOException("fail to encrypt data", e);
+ }
+ }
HttpPost post = new HttpPost(targetUrl);
+ if (auth != null && auth.length() != 0) {
+ post.addHeader("Authorization", "BEARER " + auth);
+ }
post.setEntity(MultipartEntityBuilder.create()
- .setMode(HttpMultipartMode.BROWSER_COMPATIBLE)
- .addBinaryBody("upload", inputStream)
- .build());
+ .setMode(HttpMultipartMode.BROWSER_COMPATIBLE)
+ .addBinaryBody("upload", inputStream)
+ .build());
try {
HttpResponse response = client.execute(post);
@@ -79,8 +114,17 @@ public class SeaweedWrite {
return etag;
} finally {
- client.close();
+ if (client instanceof Closeable) {
+ Closeable t = (Closeable) client;
+ t.close();
+ }
}
}
+
+ private static byte[] genCipherKey() {
+ byte[] b = new byte[32];
+ random.nextBytes(b);
+ return b;
+ }
}
diff --git a/other/java/client/src/main/proto/filer.proto b/other/java/client/src/main/proto/filer.proto
index 6cd4df6b4..37121f29c 100644
--- a/other/java/client/src/main/proto/filer.proto
+++ b/other/java/client/src/main/proto/filer.proto
@@ -2,6 +2,7 @@ syntax = "proto3";
package filer_pb;
+option go_package = "github.com/chrislusf/seaweedfs/weed/pb/filer_pb";
option java_package = "seaweedfs.client";
option java_outer_classname = "FilerProto";
@@ -12,7 +13,7 @@ service SeaweedFiler {
rpc LookupDirectoryEntry (LookupDirectoryEntryRequest) returns (LookupDirectoryEntryResponse) {
}
- rpc ListEntries (ListEntriesRequest) returns (ListEntriesResponse) {
+ rpc ListEntries (ListEntriesRequest) returns (stream ListEntriesResponse) {
}
rpc CreateEntry (CreateEntryRequest) returns (CreateEntryResponse) {
@@ -21,9 +22,15 @@ service SeaweedFiler {
rpc UpdateEntry (UpdateEntryRequest) returns (UpdateEntryResponse) {
}
+ rpc AppendToEntry (AppendToEntryRequest) returns (AppendToEntryResponse) {
+ }
+
rpc DeleteEntry (DeleteEntryRequest) returns (DeleteEntryResponse) {
}
+ rpc AtomicRenameEntry (AtomicRenameEntryRequest) returns (AtomicRenameEntryResponse) {
+ }
+
rpc AssignVolume (AssignVolumeRequest) returns (AssignVolumeResponse) {
}
@@ -36,6 +43,21 @@ service SeaweedFiler {
rpc Statistics (StatisticsRequest) returns (StatisticsResponse) {
}
+ rpc GetFilerConfiguration (GetFilerConfigurationRequest) returns (GetFilerConfigurationResponse) {
+ }
+
+ rpc SubscribeMetadata (SubscribeMetadataRequest) returns (stream SubscribeMetadataResponse) {
+ }
+
+ rpc SubscribeLocalMetadata (SubscribeMetadataRequest) returns (stream SubscribeMetadataResponse) {
+ }
+
+ rpc KeepConnected (stream KeepConnectedRequest) returns (stream KeepConnectedResponse) {
+ }
+
+ rpc LocateBroker (LocateBrokerRequest) returns (LocateBrokerResponse) {
+ }
+
}
//////////////////////////////////////////////////
@@ -58,7 +80,7 @@ message ListEntriesRequest {
}
message ListEntriesResponse {
- repeated Entry entries = 1;
+ Entry entry = 1;
}
message Entry {
@@ -69,19 +91,36 @@ message Entry {
map extended = 5;
}
+message FullEntry {
+ string dir = 1;
+ Entry entry = 2;
+}
+
message EventNotification {
Entry old_entry = 1;
Entry new_entry = 2;
bool delete_chunks = 3;
+ string new_parent_path = 4;
+ bool is_from_other_cluster = 5;
}
message FileChunk {
- string file_id = 1;
+ string file_id = 1; // to be deprecated
int64 offset = 2;
uint64 size = 3;
int64 mtime = 4;
string e_tag = 5;
- string source_file_id = 6;
+ string source_file_id = 6; // to be deprecated
+ FileId fid = 7;
+ FileId source_fid = 8;
+ bytes cipher_key = 9;
+ bool is_compressed = 10;
+}
+
+message FileId {
+ uint32 volume_id = 1;
+ uint64 file_key = 2;
+ fixed32 cookie = 3;
}
message FuseAttributes {
@@ -98,32 +137,58 @@ message FuseAttributes {
string user_name = 11; // for hdfs
repeated string group_name = 12; // for hdfs
string symlink_target = 13;
+ bytes md5 = 14;
}
message CreateEntryRequest {
string directory = 1;
Entry entry = 2;
+ bool o_excl = 3;
+ bool is_from_other_cluster = 4;
}
message CreateEntryResponse {
+ string error = 1;
}
message UpdateEntryRequest {
string directory = 1;
Entry entry = 2;
+ bool is_from_other_cluster = 3;
}
message UpdateEntryResponse {
}
+message AppendToEntryRequest {
+ string directory = 1;
+ string entry_name = 2;
+ repeated FileChunk chunks = 3;
+}
+message AppendToEntryResponse {
+}
+
message DeleteEntryRequest {
string directory = 1;
string name = 2;
// bool is_directory = 3;
bool is_delete_data = 4;
bool is_recursive = 5;
+ bool ignore_recursive_error = 6;
+ bool is_from_other_cluster = 7;
}
message DeleteEntryResponse {
+ string error = 1;
+}
+
+message AtomicRenameEntryRequest {
+ string old_directory = 1;
+ string old_name = 2;
+ string new_directory = 3;
+ string new_name = 4;
+}
+
+message AtomicRenameEntryResponse {
}
message AssignVolumeRequest {
@@ -132,6 +197,7 @@ message AssignVolumeRequest {
string replication = 3;
int32 ttl_sec = 4;
string data_center = 5;
+ string parent_path = 6;
}
message AssignVolumeResponse {
@@ -139,6 +205,10 @@ message AssignVolumeResponse {
string url = 2;
string public_url = 3;
int32 count = 4;
+ string auth = 5;
+ string collection = 6;
+ string replication = 7;
+ string error = 8;
}
message LookupVolumeRequest {
@@ -177,3 +247,53 @@ message StatisticsResponse {
uint64 used_size = 5;
uint64 file_count = 6;
}
+
+message GetFilerConfigurationRequest {
+}
+message GetFilerConfigurationResponse {
+ repeated string masters = 1;
+ string replication = 2;
+ string collection = 3;
+ uint32 max_mb = 4;
+ string dir_buckets = 5;
+ bool cipher = 7;
+}
+
+message SubscribeMetadataRequest {
+ string client_name = 1;
+ string path_prefix = 2;
+ int64 since_ns = 3;
+}
+message SubscribeMetadataResponse {
+ string directory = 1;
+ EventNotification event_notification = 2;
+ int64 ts_ns = 3;
+}
+
+message LogEntry {
+ int64 ts_ns = 1;
+ int32 partition_key_hash = 2;
+ bytes data = 3;
+}
+
+message KeepConnectedRequest {
+ string name = 1;
+ uint32 grpc_port = 2;
+ repeated string resources = 3;
+}
+message KeepConnectedResponse {
+}
+
+message LocateBrokerRequest {
+ string resource = 1;
+}
+message LocateBrokerResponse {
+ bool found = 1;
+ // if found, send the exact address
+ // if not found, send the full list of existing brokers
+ message Resource {
+ string grpc_addresses = 1;
+ int32 resource_count = 2;
+ }
+ repeated Resource resources = 2;
+}
diff --git a/other/java/client/src/test/java/seaweedfs/client/SeaweedCipherTest.java b/other/java/client/src/test/java/seaweedfs/client/SeaweedCipherTest.java
new file mode 100644
index 000000000..7b5e53e19
--- /dev/null
+++ b/other/java/client/src/test/java/seaweedfs/client/SeaweedCipherTest.java
@@ -0,0 +1,42 @@
+package seaweedfs.client;
+
+import org.junit.Test;
+
+import java.util.Base64;
+
+import static seaweedfs.client.SeaweedCipher.decrypt;
+import static seaweedfs.client.SeaweedCipher.encrypt;
+
+public class SeaweedCipherTest {
+
+ @Test
+ public void testSameAsGoImplemnetation() throws Exception {
+ byte[] secretKey = "256-bit key for AES 256 GCM encr".getBytes();
+
+ String plainText = "Now we need to generate a 256-bit key for AES 256 GCM";
+
+ System.out.println("Original Text : " + plainText);
+
+ byte[] cipherText = encrypt(plainText.getBytes(), secretKey);
+ System.out.println("Encrypted Text : " + Base64.getEncoder().encodeToString(cipherText));
+
+ byte[] decryptedText = decrypt(cipherText, secretKey);
+ System.out.println("DeCrypted Text : " + new String(decryptedText));
+ }
+
+ @Test
+ public void testEncryptDecrypt() throws Exception {
+ byte[] secretKey = SeaweedCipher.genCipherKey();
+
+ String plainText = "Now we need to generate a 256-bit key for AES 256 GCM";
+
+ System.out.println("Original Text : " + plainText);
+
+ byte[] cipherText = encrypt(plainText.getBytes(), secretKey);
+ System.out.println("Encrypted Text : " + Base64.getEncoder().encodeToString(cipherText));
+
+ byte[] decryptedText = decrypt(cipherText, secretKey);
+ System.out.println("DeCrypted Text : " + new String(decryptedText));
+ }
+
+}
diff --git a/other/java/client/src/test/java/seaweedfs/client/SeaweedFilerTest.java b/other/java/client/src/test/java/seaweedfs/client/SeaweedFilerTest.java
new file mode 100644
index 000000000..eaf17e5c6
--- /dev/null
+++ b/other/java/client/src/test/java/seaweedfs/client/SeaweedFilerTest.java
@@ -0,0 +1,23 @@
+package seaweedfs.client;
+
+import java.util.List;
+
+public class SeaweedFilerTest {
+ public static void main(String[] args){
+
+ FilerClient filerClient = new FilerClient("localhost", 18888);
+
+ List entries = filerClient.listEntries("/");
+
+ for (FilerProto.Entry entry : entries) {
+ System.out.println(entry.toString());
+ }
+
+ filerClient.mkdirs("/new_folder", 0755);
+ filerClient.touch("/new_folder/new_empty_file", 0755);
+ filerClient.touch("/new_folder/new_empty_file2", 0755);
+ filerClient.rm("/new_folder/new_empty_file", false, true);
+ filerClient.rm("/new_folder", true, true);
+
+ }
+}
diff --git a/other/java/hdfs2/dependency-reduced-pom.xml b/other/java/hdfs2/dependency-reduced-pom.xml
new file mode 100644
index 000000000..53fb62186
--- /dev/null
+++ b/other/java/hdfs2/dependency-reduced-pom.xml
@@ -0,0 +1,133 @@
+
+
+
+ oss-parent
+ org.sonatype.oss
+ 9
+ ../pom.xml/pom.xml
+
+ 4.0.0
+ com.github.chrislusf
+ seaweedfs-hadoop2-client
+ ${seaweedfs.client.version}
+
+
+
+ maven-compiler-plugin
+
+ 7
+ 7
+
+
+
+ maven-shade-plugin
+ 3.2.1
+
+
+ package
+
+ shade
+
+
+
+
+ *:*
+
+ META-INF/*.SF
+ META-INF/*.DSA
+ META-INF/*.RSA
+ org/slf4j/**
+ META-INF/maven/org.slf4j/**
+
+
+
+
+
+
+
+
+ com.google
+ shaded.com.google
+
+
+ io.grpc.internal
+ shaded.io.grpc.internal
+
+
+ org.apache.commons
+ shaded.org.apache.commons
+
+ org.apache.hadoop
+ org.apache.log4j
+
+
+
+ org.apache.http
+ shaded.org.apache.http
+
+
+
+
+
+
+
+ maven-gpg-plugin
+ 1.5
+
+
+ sign-artifacts
+ verify
+
+ sign
+
+
+
+
+
+ org.sonatype.plugins
+ nexus-staging-maven-plugin
+ 1.6.7
+ true
+
+ ossrh
+ https://oss.sonatype.org/
+ true
+
+
+
+ maven-source-plugin
+ 2.2.1
+
+
+ attach-sources
+
+ jar-no-fork
+
+
+
+
+
+ maven-javadoc-plugin
+ 2.9.1
+
+
+ attach-javadocs
+
+ jar
+
+
+
+
+
+
+
+
+ ossrh
+ https://oss.sonatype.org/content/repositories/snapshots
+
+
+
+ 1.2.9
+ 2.9.2
+
+
diff --git a/other/java/hdfs2/pom.xml b/other/java/hdfs2/pom.xml
new file mode 100644
index 000000000..0d5b138d5
--- /dev/null
+++ b/other/java/hdfs2/pom.xml
@@ -0,0 +1,163 @@
+
+
+ 4.0.0
+
+
+ 1.2.9
+ 2.9.2
+
+
+ com.github.chrislusf
+ seaweedfs-hadoop2-client
+ ${seaweedfs.client.version}
+
+
+ org.sonatype.oss
+ oss-parent
+ 9
+
+
+
+
+ ossrh
+ https://oss.sonatype.org/content/repositories/snapshots
+
+
+
+
+
+ org.apache.maven.plugins
+ maven-compiler-plugin
+
+ 7
+ 7
+
+
+
+ org.apache.maven.plugins
+ maven-shade-plugin
+ 3.2.1
+
+
+ package
+
+ shade
+
+
+
+
+ *:*
+
+ META-INF/*.SF
+ META-INF/*.DSA
+ META-INF/*.RSA
+ org/slf4j/**
+ META-INF/maven/org.slf4j/**
+
+
+
+
+
+
+
+
+ com.google
+ shaded.com.google
+
+
+ io.grpc.internal
+ shaded.io.grpc.internal
+
+
+ org.apache.commons
+ shaded.org.apache.commons
+
+ org.apache.hadoop
+ org.apache.log4j
+
+
+
+ org.apache.http
+ shaded.org.apache.http
+
+
+
+
+
+
+
+ org.apache.maven.plugins
+ maven-gpg-plugin
+ 1.5
+
+
+ sign-artifacts
+ verify
+
+ sign
+
+
+
+
+
+ org.sonatype.plugins
+ nexus-staging-maven-plugin
+ 1.6.7
+ true
+
+ ossrh
+ https://oss.sonatype.org/
+ true
+
+
+
+ org.apache.maven.plugins
+ maven-source-plugin
+ 2.2.1
+
+
+ attach-sources
+
+ jar-no-fork
+
+
+
+
+
+ org.apache.maven.plugins
+ maven-javadoc-plugin
+ 2.9.1
+
+
+ attach-javadocs
+
+ jar
+
+
+
+
+
+
+
+
+
+ org.apache.hadoop
+ hadoop-client
+ ${hadoop.version}
+
+
+ com.github.chrislusf
+ seaweedfs-client
+ ${seaweedfs.client.version}
+
+
+ org.apache.hadoop
+ hadoop-common
+ ${hadoop.version}
+
+
+
+
diff --git a/other/java/hdfs/src/main/java/seaweed/hdfs/ReadBuffer.java b/other/java/hdfs2/src/main/java/seaweed/hdfs/ReadBuffer.java
similarity index 100%
rename from other/java/hdfs/src/main/java/seaweed/hdfs/ReadBuffer.java
rename to other/java/hdfs2/src/main/java/seaweed/hdfs/ReadBuffer.java
diff --git a/other/java/hdfs/src/main/java/seaweed/hdfs/ReadBufferManager.java b/other/java/hdfs2/src/main/java/seaweed/hdfs/ReadBufferManager.java
similarity index 100%
rename from other/java/hdfs/src/main/java/seaweed/hdfs/ReadBufferManager.java
rename to other/java/hdfs2/src/main/java/seaweed/hdfs/ReadBufferManager.java
diff --git a/other/java/hdfs/src/main/java/seaweed/hdfs/ReadBufferStatus.java b/other/java/hdfs2/src/main/java/seaweed/hdfs/ReadBufferStatus.java
similarity index 100%
rename from other/java/hdfs/src/main/java/seaweed/hdfs/ReadBufferStatus.java
rename to other/java/hdfs2/src/main/java/seaweed/hdfs/ReadBufferStatus.java
diff --git a/other/java/hdfs/src/main/java/seaweed/hdfs/ReadBufferWorker.java b/other/java/hdfs2/src/main/java/seaweed/hdfs/ReadBufferWorker.java
similarity index 100%
rename from other/java/hdfs/src/main/java/seaweed/hdfs/ReadBufferWorker.java
rename to other/java/hdfs2/src/main/java/seaweed/hdfs/ReadBufferWorker.java
diff --git a/other/java/hdfs/src/main/java/seaweed/hdfs/SeaweedFileSystem.java b/other/java/hdfs2/src/main/java/seaweed/hdfs/SeaweedFileSystem.java
similarity index 73%
rename from other/java/hdfs/src/main/java/seaweed/hdfs/SeaweedFileSystem.java
rename to other/java/hdfs2/src/main/java/seaweed/hdfs/SeaweedFileSystem.java
index 2a0ef78af..d471d8440 100644
--- a/other/java/hdfs/src/main/java/seaweed/hdfs/SeaweedFileSystem.java
+++ b/other/java/hdfs2/src/main/java/seaweed/hdfs/SeaweedFileSystem.java
@@ -1,14 +1,7 @@
package seaweed.hdfs;
import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.FSDataInputStream;
-import org.apache.hadoop.fs.FSDataOutputStream;
-import org.apache.hadoop.fs.FileAlreadyExistsException;
-import org.apache.hadoop.fs.FileStatus;
-import org.apache.hadoop.fs.ParentNotDirectoryException;
-import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.fs.UnsupportedFileSystemException;
-import org.apache.hadoop.fs.XAttrSetFlag;
+import org.apache.hadoop.fs.*;
import org.apache.hadoop.fs.permission.AclEntry;
import org.apache.hadoop.fs.permission.AclStatus;
import org.apache.hadoop.fs.permission.FsPermission;
@@ -73,6 +66,7 @@ public class SeaweedFileSystem extends org.apache.hadoop.fs.FileSystem {
this.uri = uri;
seaweedFileSystemStore = new SeaweedFileSystemStore(host, port);
+
}
@Override
@@ -86,6 +80,7 @@ public class SeaweedFileSystem extends org.apache.hadoop.fs.FileSystem {
InputStream inputStream = seaweedFileSystemStore.openFileForRead(path, statistics, bufferSize);
return new FSDataInputStream(inputStream);
} catch (Exception ex) {
+ LOG.warn("open path: {} bufferSize:{}", path, bufferSize, ex);
return null;
}
}
@@ -103,10 +98,36 @@ public class SeaweedFileSystem extends org.apache.hadoop.fs.FileSystem {
OutputStream outputStream = seaweedFileSystemStore.createFile(path, overwrite, permission, bufferSize, replicaPlacement);
return new FSDataOutputStream(outputStream, statistics);
} catch (Exception ex) {
+ LOG.warn("create path: {} bufferSize:{} blockSize:{}", path, bufferSize, blockSize, ex);
return null;
}
}
+ /**
+ * {@inheritDoc}
+ * @throws FileNotFoundException if the parent directory is not present -or
+ * is not a directory.
+ */
+ @Override
+ public FSDataOutputStream createNonRecursive(Path path,
+ FsPermission permission,
+ EnumSet flags,
+ int bufferSize,
+ short replication,
+ long blockSize,
+ Progressable progress) throws IOException {
+ Path parent = path.getParent();
+ if (parent != null) {
+ // expect this to raise an exception if there is no parent
+ if (!getFileStatus(parent).isDirectory()) {
+ throw new FileAlreadyExistsException("Not a directory: " + parent);
+ }
+ }
+ return create(path, permission,
+ flags.contains(CreateFlag.OVERWRITE), bufferSize,
+ replication, blockSize, progress);
+ }
+
@Override
public FSDataOutputStream append(Path path, int bufferSize, Progressable progressable) throws IOException {
@@ -117,6 +138,7 @@ public class SeaweedFileSystem extends org.apache.hadoop.fs.FileSystem {
OutputStream outputStream = seaweedFileSystemStore.createFile(path, false, null, bufferSize, "");
return new FSDataOutputStream(outputStream, statistics);
} catch (Exception ex) {
+ LOG.warn("append path: {} bufferSize:{}", path, bufferSize, ex);
return null;
}
}
@@ -206,8 +228,8 @@ public class SeaweedFileSystem extends org.apache.hadoop.fs.FileSystem {
UserGroupInformation currentUser = UserGroupInformation.getCurrentUser();
return seaweedFileSystemStore.createDirectory(path, currentUser,
- fsPermission == null ? FsPermission.getDirDefault() : fsPermission,
- FsPermission.getUMask(getConf()));
+ fsPermission == null ? FsPermission.getDirDefault() : fsPermission,
+ FsPermission.getUMask(getConf()));
}
@@ -238,7 +260,7 @@ public class SeaweedFileSystem extends org.apache.hadoop.fs.FileSystem {
*/
@Override
public void setOwner(Path path, final String owner, final String group)
- throws IOException {
+ throws IOException {
LOG.debug("setOwner path: {}", path);
path = qualify(path);
@@ -271,54 +293,55 @@ public class SeaweedFileSystem extends org.apache.hadoop.fs.FileSystem {
/**
* Concat existing files together.
- * @param trg the path to the target destination.
+ *
+ * @param trg the path to the target destination.
* @param psrcs the paths to the sources to use for the concatenation.
- * @throws IOException IO failure
+ * @throws IOException IO failure
* @throws UnsupportedOperationException if the operation is unsupported
- * (default).
+ * (default).
*/
@Override
- public void concat(final Path trg, final Path [] psrcs) throws IOException {
+ public void concat(final Path trg, final Path[] psrcs) throws IOException {
throw new UnsupportedOperationException("Not implemented by the " +
- getClass().getSimpleName() + " FileSystem implementation");
+ getClass().getSimpleName() + " FileSystem implementation");
}
/**
* Truncate the file in the indicated path to the indicated size.
*
- * Fails if path is a directory.
- * Fails if path does not exist.
- * Fails if path is not closed.
- * Fails if new size is greater than current size.
+ * Fails if path is a directory.
+ * Fails if path does not exist.
+ * Fails if path is not closed.
+ * Fails if new size is greater than current size.
*
- * @param f The path to the file to be truncated
- * @param newLength The size the file is to be truncated to
*
+ * @param f The path to the file to be truncated
+ * @param newLength The size the file is to be truncated to
* @return true
if the file has been truncated to the desired
* newLength
and is immediately available to be reused for
* write operations such as append
, or
* false
if a background process of adjusting the length of
* the last block has been started, and clients should wait for it to
* complete before proceeding with further file updates.
- * @throws IOException IO failure
+ * @throws IOException IO failure
* @throws UnsupportedOperationException if the operation is unsupported
- * (default).
+ * (default).
*/
@Override
public boolean truncate(Path f, long newLength) throws IOException {
throw new UnsupportedOperationException("Not implemented by the " +
- getClass().getSimpleName() + " FileSystem implementation");
+ getClass().getSimpleName() + " FileSystem implementation");
}
@Override
public void createSymlink(final Path target, final Path link,
final boolean createParent) throws AccessControlException,
- FileAlreadyExistsException, FileNotFoundException,
- ParentNotDirectoryException, UnsupportedFileSystemException,
- IOException {
+ FileAlreadyExistsException, FileNotFoundException,
+ ParentNotDirectoryException, UnsupportedFileSystemException,
+ IOException {
// Supporting filesystems should override this method
throw new UnsupportedOperationException(
- "Filesystem does not support symlinks!");
+ "Filesystem does not support symlinks!");
}
public boolean supportsSymlinks() {
@@ -327,48 +350,51 @@ public class SeaweedFileSystem extends org.apache.hadoop.fs.FileSystem {
/**
* Create a snapshot.
- * @param path The directory where snapshots will be taken.
+ *
+ * @param path The directory where snapshots will be taken.
* @param snapshotName The name of the snapshot
* @return the snapshot path.
- * @throws IOException IO failure
+ * @throws IOException IO failure
* @throws UnsupportedOperationException if the operation is unsupported
*/
@Override
public Path createSnapshot(Path path, String snapshotName)
- throws IOException {
+ throws IOException {
throw new UnsupportedOperationException(getClass().getSimpleName()
- + " doesn't support createSnapshot");
+ + " doesn't support createSnapshot");
}
/**
* Rename a snapshot.
- * @param path The directory path where the snapshot was taken
+ *
+ * @param path The directory path where the snapshot was taken
* @param snapshotOldName Old name of the snapshot
* @param snapshotNewName New name of the snapshot
- * @throws IOException IO failure
+ * @throws IOException IO failure
* @throws UnsupportedOperationException if the operation is unsupported
- * (default outcome).
+ * (default outcome).
*/
@Override
public void renameSnapshot(Path path, String snapshotOldName,
String snapshotNewName) throws IOException {
throw new UnsupportedOperationException(getClass().getSimpleName()
- + " doesn't support renameSnapshot");
+ + " doesn't support renameSnapshot");
}
/**
* Delete a snapshot of a directory.
- * @param path The directory that the to-be-deleted snapshot belongs to
+ *
+ * @param path The directory that the to-be-deleted snapshot belongs to
* @param snapshotName The name of the snapshot
- * @throws IOException IO failure
+ * @throws IOException IO failure
* @throws UnsupportedOperationException if the operation is unsupported
- * (default outcome).
+ * (default outcome).
*/
@Override
public void deleteSnapshot(Path path, String snapshotName)
- throws IOException {
+ throws IOException {
throw new UnsupportedOperationException(getClass().getSimpleName()
- + " doesn't support deleteSnapshot");
+ + " doesn't support deleteSnapshot");
}
/**
@@ -377,49 +403,49 @@ public class SeaweedFileSystem extends org.apache.hadoop.fs.FileSystem {
* ACL entries that are not specified in this call are retained without
* changes. (Modifications are merged into the current ACL.)
*
- * @param path Path to modify
+ * @param path Path to modify
* @param aclSpec List<AclEntry> describing modifications
- * @throws IOException if an ACL could not be modified
+ * @throws IOException if an ACL could not be modified
* @throws UnsupportedOperationException if the operation is unsupported
- * (default outcome).
+ * (default outcome).
*/
@Override
public void modifyAclEntries(Path path, List aclSpec)
- throws IOException {
+ throws IOException {
throw new UnsupportedOperationException(getClass().getSimpleName()
- + " doesn't support modifyAclEntries");
+ + " doesn't support modifyAclEntries");
}
/**
* Removes ACL entries from files and directories. Other ACL entries are
* retained.
*
- * @param path Path to modify
+ * @param path Path to modify
* @param aclSpec List describing entries to remove
- * @throws IOException if an ACL could not be modified
+ * @throws IOException if an ACL could not be modified
* @throws UnsupportedOperationException if the operation is unsupported
- * (default outcome).
+ * (default outcome).
*/
@Override
public void removeAclEntries(Path path, List aclSpec)
- throws IOException {
+ throws IOException {
throw new UnsupportedOperationException(getClass().getSimpleName()
- + " doesn't support removeAclEntries");
+ + " doesn't support removeAclEntries");
}
/**
* Removes all default ACL entries from files and directories.
*
* @param path Path to modify
- * @throws IOException if an ACL could not be modified
+ * @throws IOException if an ACL could not be modified
* @throws UnsupportedOperationException if the operation is unsupported
- * (default outcome).
+ * (default outcome).
*/
@Override
public void removeDefaultAcl(Path path)
- throws IOException {
+ throws IOException {
throw new UnsupportedOperationException(getClass().getSimpleName()
- + " doesn't support removeDefaultAcl");
+ + " doesn't support removeDefaultAcl");
}
/**
@@ -428,32 +454,32 @@ public class SeaweedFileSystem extends org.apache.hadoop.fs.FileSystem {
* bits.
*
* @param path Path to modify
- * @throws IOException if an ACL could not be removed
+ * @throws IOException if an ACL could not be removed
* @throws UnsupportedOperationException if the operation is unsupported
- * (default outcome).
+ * (default outcome).
*/
@Override
public void removeAcl(Path path)
- throws IOException {
+ throws IOException {
throw new UnsupportedOperationException(getClass().getSimpleName()
- + " doesn't support removeAcl");
+ + " doesn't support removeAcl");
}
/**
* Fully replaces ACL of files and directories, discarding all existing
* entries.
*
- * @param path Path to modify
+ * @param path Path to modify
* @param aclSpec List describing modifications, which must include entries
- * for user, group, and others for compatibility with permission bits.
- * @throws IOException if an ACL could not be modified
+ * for user, group, and others for compatibility with permission bits.
+ * @throws IOException if an ACL could not be modified
* @throws UnsupportedOperationException if the operation is unsupported
- * (default outcome).
+ * (default outcome).
*/
@Override
public void setAcl(Path path, List aclSpec) throws IOException {
throw new UnsupportedOperationException(getClass().getSimpleName()
- + " doesn't support setAcl");
+ + " doesn't support setAcl");
}
/**
@@ -461,14 +487,14 @@ public class SeaweedFileSystem extends org.apache.hadoop.fs.FileSystem {
*
* @param path Path to get
* @return AclStatus describing the ACL of the file or directory
- * @throws IOException if an ACL could not be read
+ * @throws IOException if an ACL could not be read
* @throws UnsupportedOperationException if the operation is unsupported
- * (default outcome).
+ * (default outcome).
*/
@Override
public AclStatus getAclStatus(Path path) throws IOException {
throw new UnsupportedOperationException(getClass().getSimpleName()
- + " doesn't support getAclStatus");
+ + " doesn't support getAclStatus");
}
/**
@@ -478,19 +504,19 @@ public class SeaweedFileSystem extends org.apache.hadoop.fs.FileSystem {
*
* Refer to the HDFS extended attributes user documentation for details.
*
- * @param path Path to modify
- * @param name xattr name.
+ * @param path Path to modify
+ * @param name xattr name.
* @param value xattr value.
- * @param flag xattr set flag
- * @throws IOException IO failure
+ * @param flag xattr set flag
+ * @throws IOException IO failure
* @throws UnsupportedOperationException if the operation is unsupported
- * (default outcome).
+ * (default outcome).
*/
@Override
public void setXAttr(Path path, String name, byte[] value,
EnumSet flag) throws IOException {
throw new UnsupportedOperationException(getClass().getSimpleName()
- + " doesn't support setXAttr");
+ + " doesn't support setXAttr");
}
/**
@@ -503,14 +529,14 @@ public class SeaweedFileSystem extends org.apache.hadoop.fs.FileSystem {
* @param path Path to get extended attribute
* @param name xattr name.
* @return byte[] xattr value.
- * @throws IOException IO failure
+ * @throws IOException IO failure
* @throws UnsupportedOperationException if the operation is unsupported
- * (default outcome).
+ * (default outcome).
*/
@Override
public byte[] getXAttr(Path path, String name) throws IOException {
throw new UnsupportedOperationException(getClass().getSimpleName()
- + " doesn't support getXAttr");
+ + " doesn't support getXAttr");
}
/**
@@ -522,14 +548,14 @@ public class SeaweedFileSystem extends org.apache.hadoop.fs.FileSystem {
*
* @param path Path to get extended attributes
* @return Map describing the XAttrs of the file or directory
- * @throws IOException IO failure
+ * @throws IOException IO failure
* @throws UnsupportedOperationException if the operation is unsupported
- * (default outcome).
+ * (default outcome).
*/
@Override
public Map getXAttrs(Path path) throws IOException {
throw new UnsupportedOperationException(getClass().getSimpleName()
- + " doesn't support getXAttrs");
+ + " doesn't support getXAttrs");
}
/**
@@ -539,18 +565,18 @@ public class SeaweedFileSystem extends org.apache.hadoop.fs.FileSystem {
*
* Refer to the HDFS extended attributes user documentation for details.
*
- * @param path Path to get extended attributes
+ * @param path Path to get extended attributes
* @param names XAttr names.
* @return Map describing the XAttrs of the file or directory
- * @throws IOException IO failure
+ * @throws IOException IO failure
* @throws UnsupportedOperationException if the operation is unsupported
- * (default outcome).
+ * (default outcome).
*/
@Override
public Map getXAttrs(Path path, List names)
- throws IOException {
+ throws IOException {
throw new UnsupportedOperationException(getClass().getSimpleName()
- + " doesn't support getXAttrs");
+ + " doesn't support getXAttrs");
}
/**
@@ -562,14 +588,14 @@ public class SeaweedFileSystem extends org.apache.hadoop.fs.FileSystem {
*
* @param path Path to get extended attributes
* @return List{@literal } of the XAttr names of the file or directory
- * @throws IOException IO failure
+ * @throws IOException IO failure
* @throws UnsupportedOperationException if the operation is unsupported
- * (default outcome).
+ * (default outcome).
*/
@Override
public List listXAttrs(Path path) throws IOException {
throw new UnsupportedOperationException(getClass().getSimpleName()
- + " doesn't support listXAttrs");
+ + " doesn't support listXAttrs");
}
/**
@@ -581,14 +607,14 @@ public class SeaweedFileSystem extends org.apache.hadoop.fs.FileSystem {
*
* @param path Path to remove extended attribute
* @param name xattr name
- * @throws IOException IO failure
+ * @throws IOException IO failure
* @throws UnsupportedOperationException if the operation is unsupported
- * (default outcome).
+ * (default outcome).
*/
@Override
public void removeXAttr(Path path, String name) throws IOException {
throw new UnsupportedOperationException(getClass().getSimpleName()
- + " doesn't support removeXAttr");
+ + " doesn't support removeXAttr");
}
}
diff --git a/other/java/hdfs/src/main/java/seaweed/hdfs/SeaweedFileSystemStore.java b/other/java/hdfs2/src/main/java/seaweed/hdfs/SeaweedFileSystemStore.java
similarity index 86%
rename from other/java/hdfs/src/main/java/seaweed/hdfs/SeaweedFileSystemStore.java
rename to other/java/hdfs2/src/main/java/seaweed/hdfs/SeaweedFileSystemStore.java
index 27678e615..9617a38be 100644
--- a/other/java/hdfs/src/main/java/seaweed/hdfs/SeaweedFileSystemStore.java
+++ b/other/java/hdfs2/src/main/java/seaweed/hdfs/SeaweedFileSystemStore.java
@@ -64,6 +64,16 @@ public class SeaweedFileSystemStore {
public FileStatus[] listEntries(final Path path) {
LOG.debug("listEntries path: {}", path);
+ FileStatus pathStatus = getFileStatus(path);
+
+ if (pathStatus == null) {
+ return new FileStatus[0];
+ }
+
+ if (!pathStatus.isDirectory()) {
+ return new FileStatus[]{pathStatus};
+ }
+
List fileStatuses = new ArrayList();
List entries = filerClient.listEntries(path.toUri().getPath());
@@ -74,7 +84,9 @@ public class SeaweedFileSystemStore {
fileStatuses.add(fileStatus);
}
+ LOG.debug("listEntries path: {} size {}", fileStatuses, fileStatuses.size());
return fileStatuses.toArray(new FileStatus[0]);
+
}
public FileStatus getFileStatus(final Path path) {
@@ -106,7 +118,7 @@ public class SeaweedFileSystemStore {
}
}
- return filerClient.deleteEntry(getParentDirectory(path), path.getName(), true, recursive);
+ return filerClient.deleteEntry(getParentDirectory(path), path.getName(), true, recursive, true);
}
private FileStatus doGetFileStatus(Path path, FilerProto.Entry entry) {
@@ -137,41 +149,13 @@ public class SeaweedFileSystemStore {
if (source.isRoot()) {
return;
}
- LOG.warn("rename lookupEntry source: {}", source);
+ LOG.info("rename source: {} destination:{}", source, destination);
FilerProto.Entry entry = lookupEntry(source);
if (entry == null) {
LOG.warn("rename non-existing source: {}", source);
return;
}
- LOG.warn("rename moveEntry source: {}", source);
- moveEntry(source.getParent(), entry, destination);
- }
-
- private boolean moveEntry(Path oldParent, FilerProto.Entry entry, Path destination) {
-
- LOG.debug("moveEntry: {}/{} => {}", oldParent, entry.getName(), destination);
-
- FilerProto.Entry.Builder newEntry = entry.toBuilder().setName(destination.getName());
- boolean isDirectoryCreated = filerClient.createEntry(getParentDirectory(destination), newEntry.build());
-
- if (!isDirectoryCreated) {
- return false;
- }
-
- if (entry.getIsDirectory()) {
- Path entryPath = new Path(oldParent, entry.getName());
- List entries = filerClient.listEntries(entryPath.toUri().getPath());
- for (FilerProto.Entry ent : entries) {
- boolean isSucess = moveEntry(entryPath, ent, new Path(destination, ent.getName()));
- if (!isSucess) {
- return false;
- }
- }
- }
-
- return filerClient.deleteEntry(
- oldParent.toUri().getPath(), entry.getName(), false, false);
-
+ filerClient.mv(source.toUri().getPath(), destination.toUri().getPath());
}
public OutputStream createFile(final Path path,
@@ -199,10 +183,10 @@ public class SeaweedFileSystemStore {
entry = FilerProto.Entry.newBuilder();
entry.mergeFrom(existingEntry);
entry.getAttributesBuilder().setMtime(now);
+ LOG.debug("createFile merged entry path:{} entry:{} from:{}", path, entry, existingEntry);
+ writePosition = SeaweedRead.totalSize(existingEntry.getChunksList());
+ replication = existingEntry.getAttributes().getReplication();
}
- LOG.debug("createFile merged entry path:{} entry:{} from:{}", path, entry, existingEntry);
- writePosition = SeaweedRead.totalSize(existingEntry.getChunksList());
- replication = existingEntry.getAttributes().getReplication();
}
if (entry == null) {
entry = FilerProto.Entry.newBuilder()
@@ -294,4 +278,5 @@ public class SeaweedFileSystemStore {
filerClient.updateEntry(getParentDirectory(path), entryBuilder.build());
}
+
}
diff --git a/other/java/hdfs/src/main/java/seaweed/hdfs/SeaweedInputStream.java b/other/java/hdfs2/src/main/java/seaweed/hdfs/SeaweedInputStream.java
similarity index 100%
rename from other/java/hdfs/src/main/java/seaweed/hdfs/SeaweedInputStream.java
rename to other/java/hdfs2/src/main/java/seaweed/hdfs/SeaweedInputStream.java
diff --git a/other/java/hdfs2/src/main/java/seaweed/hdfs/SeaweedOutputStream.java b/other/java/hdfs2/src/main/java/seaweed/hdfs/SeaweedOutputStream.java
new file mode 100644
index 000000000..e08843caa
--- /dev/null
+++ b/other/java/hdfs2/src/main/java/seaweed/hdfs/SeaweedOutputStream.java
@@ -0,0 +1,280 @@
+package seaweed.hdfs;
+
+// adapted from org.apache.hadoop.fs.azurebfs.services.AbfsOutputStream
+
+import com.google.common.base.Preconditions;
+import org.apache.hadoop.fs.FSExceptionMessages;
+import org.apache.hadoop.fs.Path;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+import seaweedfs.client.FilerGrpcClient;
+import seaweedfs.client.FilerProto;
+import seaweedfs.client.SeaweedWrite;
+
+import java.io.IOException;
+import java.io.InterruptedIOException;
+import java.io.OutputStream;
+import java.util.concurrent.*;
+
+import static seaweed.hdfs.SeaweedFileSystemStore.getParentDirectory;
+
+public class SeaweedOutputStream extends OutputStream {
+
+ private static final Logger LOG = LoggerFactory.getLogger(SeaweedOutputStream.class);
+
+ private final FilerGrpcClient filerGrpcClient;
+ private final Path path;
+ private final int bufferSize;
+ private final int maxConcurrentRequestCount;
+ private final ThreadPoolExecutor threadExecutor;
+ private final ExecutorCompletionService completionService;
+ private FilerProto.Entry.Builder entry;
+ private long position;
+ private boolean closed;
+ private boolean supportFlush = true;
+ private volatile IOException lastError;
+ private long lastFlushOffset;
+ private long lastTotalAppendOffset = 0;
+ private byte[] buffer;
+ private int bufferIndex;
+ private ConcurrentLinkedDeque writeOperations;
+ private String replication = "000";
+
+ public SeaweedOutputStream(FilerGrpcClient filerGrpcClient, final Path path, FilerProto.Entry.Builder entry,
+ final long position, final int bufferSize, final String replication) {
+ this.filerGrpcClient = filerGrpcClient;
+ this.replication = replication;
+ this.path = path;
+ this.position = position;
+ this.closed = false;
+ this.lastError = null;
+ this.lastFlushOffset = 0;
+ this.bufferSize = bufferSize;
+ this.buffer = new byte[bufferSize];
+ this.bufferIndex = 0;
+ this.writeOperations = new ConcurrentLinkedDeque<>();
+
+ this.maxConcurrentRequestCount = 4 * Runtime.getRuntime().availableProcessors();
+
+ this.threadExecutor
+ = new ThreadPoolExecutor(maxConcurrentRequestCount,
+ maxConcurrentRequestCount,
+ 10L,
+ TimeUnit.SECONDS,
+ new LinkedBlockingQueue());
+ this.completionService = new ExecutorCompletionService<>(this.threadExecutor);
+
+ this.entry = entry;
+
+ }
+
+ private synchronized void flushWrittenBytesToServiceInternal(final long offset) throws IOException {
+ try {
+ SeaweedWrite.writeMeta(filerGrpcClient, getParentDirectory(path), entry);
+ } catch (Exception ex) {
+ throw new IOException(ex);
+ }
+ this.lastFlushOffset = offset;
+ }
+
+ @Override
+ public void write(final int byteVal) throws IOException {
+ write(new byte[]{(byte) (byteVal & 0xFF)});
+ }
+
+ @Override
+ public synchronized void write(final byte[] data, final int off, final int length)
+ throws IOException {
+ maybeThrowLastError();
+
+ Preconditions.checkArgument(data != null, "null data");
+
+ if (off < 0 || length < 0 || length > data.length - off) {
+ throw new IndexOutOfBoundsException();
+ }
+
+ int currentOffset = off;
+ int writableBytes = bufferSize - bufferIndex;
+ int numberOfBytesToWrite = length;
+
+ while (numberOfBytesToWrite > 0) {
+ if (writableBytes <= numberOfBytesToWrite) {
+ System.arraycopy(data, currentOffset, buffer, bufferIndex, writableBytes);
+ bufferIndex += writableBytes;
+ writeCurrentBufferToService();
+ currentOffset += writableBytes;
+ numberOfBytesToWrite = numberOfBytesToWrite - writableBytes;
+ } else {
+ System.arraycopy(data, currentOffset, buffer, bufferIndex, numberOfBytesToWrite);
+ bufferIndex += numberOfBytesToWrite;
+ numberOfBytesToWrite = 0;
+ }
+
+ writableBytes = bufferSize - bufferIndex;
+ }
+ }
+
+ /**
+ * Flushes this output stream and forces any buffered output bytes to be
+ * written out. If any data remains in the payload it is committed to the
+ * service. Data is queued for writing and forced out to the service
+ * before the call returns.
+ */
+ @Override
+ public void flush() throws IOException {
+ if (supportFlush) {
+ flushInternalAsync();
+ }
+ }
+
+ /**
+ * Force all data in the output stream to be written to Azure storage.
+ * Wait to return until this is complete. Close the access to the stream and
+ * shutdown the upload thread pool.
+ * If the blob was created, its lease will be released.
+ * Any error encountered caught in threads and stored will be rethrown here
+ * after cleanup.
+ */
+ @Override
+ public synchronized void close() throws IOException {
+ if (closed) {
+ return;
+ }
+
+ LOG.debug("close path: {}", path);
+ try {
+ flushInternal();
+ threadExecutor.shutdown();
+ } finally {
+ lastError = new IOException(FSExceptionMessages.STREAM_IS_CLOSED);
+ buffer = null;
+ bufferIndex = 0;
+ closed = true;
+ writeOperations.clear();
+ if (!threadExecutor.isShutdown()) {
+ threadExecutor.shutdownNow();
+ }
+ }
+ }
+
+ private synchronized void writeCurrentBufferToService() throws IOException {
+ if (bufferIndex == 0) {
+ return;
+ }
+
+ final byte[] bytes = buffer;
+ final int bytesLength = bufferIndex;
+
+ buffer = new byte[bufferSize];
+ bufferIndex = 0;
+ final long offset = position;
+ position += bytesLength;
+
+ if (threadExecutor.getQueue().size() >= maxConcurrentRequestCount * 2) {
+ waitForTaskToComplete();
+ }
+
+ final Future job = completionService.submit(new Callable() {
+ @Override
+ public Void call() throws Exception {
+ // originally: client.append(path, offset, bytes, 0, bytesLength);
+ SeaweedWrite.writeData(entry, replication, filerGrpcClient, offset, bytes, 0, bytesLength);
+ return null;
+ }
+ });
+
+ writeOperations.add(new WriteOperation(job, offset, bytesLength));
+
+ // Try to shrink the queue
+ shrinkWriteOperationQueue();
+ }
+
+ private void waitForTaskToComplete() throws IOException {
+ boolean completed;
+ for (completed = false; completionService.poll() != null; completed = true) {
+ // keep polling until there is no data
+ }
+
+ if (!completed) {
+ try {
+ completionService.take();
+ } catch (InterruptedException e) {
+ lastError = (IOException) new InterruptedIOException(e.toString()).initCause(e);
+ throw lastError;
+ }
+ }
+ }
+
+ private void maybeThrowLastError() throws IOException {
+ if (lastError != null) {
+ throw lastError;
+ }
+ }
+
+ /**
+ * Try to remove the completed write operations from the beginning of write
+ * operation FIFO queue.
+ */
+ private synchronized void shrinkWriteOperationQueue() throws IOException {
+ try {
+ while (writeOperations.peek() != null && writeOperations.peek().task.isDone()) {
+ writeOperations.peek().task.get();
+ lastTotalAppendOffset += writeOperations.peek().length;
+ writeOperations.remove();
+ }
+ } catch (Exception e) {
+ lastError = new IOException(e);
+ throw lastError;
+ }
+ }
+
+ private synchronized void flushInternal() throws IOException {
+ maybeThrowLastError();
+ writeCurrentBufferToService();
+ flushWrittenBytesToService();
+ }
+
+ private synchronized void flushInternalAsync() throws IOException {
+ maybeThrowLastError();
+ writeCurrentBufferToService();
+ flushWrittenBytesToServiceAsync();
+ }
+
+ private synchronized void flushWrittenBytesToService() throws IOException {
+ for (WriteOperation writeOperation : writeOperations) {
+ try {
+ writeOperation.task.get();
+ } catch (Exception ex) {
+ lastError = new IOException(ex);
+ throw lastError;
+ }
+ }
+ LOG.debug("flushWrittenBytesToService: {} position:{}", path, position);
+ flushWrittenBytesToServiceInternal(position);
+ }
+
+ private synchronized void flushWrittenBytesToServiceAsync() throws IOException {
+ shrinkWriteOperationQueue();
+
+ if (this.lastTotalAppendOffset > this.lastFlushOffset) {
+ this.flushWrittenBytesToServiceInternal(this.lastTotalAppendOffset);
+ }
+ }
+
+ private static class WriteOperation {
+ private final Future task;
+ private final long startOffset;
+ private final long length;
+
+ WriteOperation(final Future task, final long startOffset, final long length) {
+ Preconditions.checkNotNull(task, "task");
+ Preconditions.checkArgument(startOffset >= 0, "startOffset");
+ Preconditions.checkArgument(length >= 0, "length");
+
+ this.task = task;
+ this.startOffset = startOffset;
+ this.length = length;
+ }
+ }
+
+}
diff --git a/other/java/hdfs3/dependency-reduced-pom.xml b/other/java/hdfs3/dependency-reduced-pom.xml
new file mode 100644
index 000000000..f5d14acdd
--- /dev/null
+++ b/other/java/hdfs3/dependency-reduced-pom.xml
@@ -0,0 +1,133 @@
+
+
+
+ oss-parent
+ org.sonatype.oss
+ 9
+ ../pom.xml/pom.xml
+
+ 4.0.0
+ com.github.chrislusf
+ seaweedfs-hadoop3-client
+ ${seaweedfs.client.version}
+
+
+
+ maven-compiler-plugin
+
+ 7
+ 7
+
+
+
+ maven-shade-plugin
+ 3.2.1
+
+
+ package
+
+ shade
+
+
+
+
+ *:*
+
+ META-INF/*.SF
+ META-INF/*.DSA
+ META-INF/*.RSA
+ org/slf4j/**
+ META-INF/maven/org.slf4j/**
+
+
+
+
+
+
+
+
+ com.google
+ shaded.com.google
+
+
+ io.grpc.internal
+ shaded.io.grpc.internal
+
+
+ org.apache.commons
+ shaded.org.apache.commons
+
+ org.apache.hadoop
+ org.apache.log4j
+
+
+
+ org.apache.http
+ shaded.org.apache.http
+
+
+
+
+
+
+
+ maven-gpg-plugin
+ 1.5
+
+
+ sign-artifacts
+ verify
+
+ sign
+
+
+
+
+
+ org.sonatype.plugins
+ nexus-staging-maven-plugin
+ 1.6.7
+ true
+
+ ossrh
+ https://oss.sonatype.org/
+ true
+
+
+
+ maven-source-plugin
+ 2.2.1
+
+
+ attach-sources
+
+ jar-no-fork
+
+
+
+
+
+ maven-javadoc-plugin
+ 2.9.1
+
+
+ attach-javadocs
+
+ jar
+
+
+
+
+
+
+
+
+ ossrh
+ https://oss.sonatype.org/content/repositories/snapshots
+
+
+
+ 1.2.9
+ 3.1.1
+
+
diff --git a/other/java/hdfs/pom.xml b/other/java/hdfs3/pom.xml
similarity index 94%
rename from other/java/hdfs/pom.xml
rename to other/java/hdfs3/pom.xml
index a0cab8752..8c88b60df 100644
--- a/other/java/hdfs/pom.xml
+++ b/other/java/hdfs3/pom.xml
@@ -5,12 +5,12 @@
4.0.0
- 1.0.5
+ 1.2.9
3.1.1
com.github.chrislusf
- seaweedfs-hadoop-client
+ seaweedfs-hadoop3-client
${seaweedfs.client.version}
@@ -79,6 +79,10 @@
org.apache.log4j
+
+ org.apache.http
+ shaded.org.apache.http
+
diff --git a/other/java/hdfs3/src/main/java/seaweed/hdfs/ReadBuffer.java b/other/java/hdfs3/src/main/java/seaweed/hdfs/ReadBuffer.java
new file mode 100644
index 000000000..926d0b83b
--- /dev/null
+++ b/other/java/hdfs3/src/main/java/seaweed/hdfs/ReadBuffer.java
@@ -0,0 +1,137 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package seaweed.hdfs;
+
+import java.util.concurrent.CountDownLatch;
+
+class ReadBuffer {
+
+ private SeaweedInputStream stream;
+ private long offset; // offset within the file for the buffer
+ private int length; // actual length, set after the buffer is filles
+ private int requestedLength; // requested length of the read
+ private byte[] buffer; // the buffer itself
+ private int bufferindex = -1; // index in the buffers array in Buffer manager
+ private ReadBufferStatus status; // status of the buffer
+ private CountDownLatch latch = null; // signaled when the buffer is done reading, so any client
+ // waiting on this buffer gets unblocked
+
+ // fields to help with eviction logic
+ private long timeStamp = 0; // tick at which buffer became available to read
+ private boolean isFirstByteConsumed = false;
+ private boolean isLastByteConsumed = false;
+ private boolean isAnyByteConsumed = false;
+
+ public SeaweedInputStream getStream() {
+ return stream;
+ }
+
+ public void setStream(SeaweedInputStream stream) {
+ this.stream = stream;
+ }
+
+ public long getOffset() {
+ return offset;
+ }
+
+ public void setOffset(long offset) {
+ this.offset = offset;
+ }
+
+ public int getLength() {
+ return length;
+ }
+
+ public void setLength(int length) {
+ this.length = length;
+ }
+
+ public int getRequestedLength() {
+ return requestedLength;
+ }
+
+ public void setRequestedLength(int requestedLength) {
+ this.requestedLength = requestedLength;
+ }
+
+ public byte[] getBuffer() {
+ return buffer;
+ }
+
+ public void setBuffer(byte[] buffer) {
+ this.buffer = buffer;
+ }
+
+ public int getBufferindex() {
+ return bufferindex;
+ }
+
+ public void setBufferindex(int bufferindex) {
+ this.bufferindex = bufferindex;
+ }
+
+ public ReadBufferStatus getStatus() {
+ return status;
+ }
+
+ public void setStatus(ReadBufferStatus status) {
+ this.status = status;
+ }
+
+ public CountDownLatch getLatch() {
+ return latch;
+ }
+
+ public void setLatch(CountDownLatch latch) {
+ this.latch = latch;
+ }
+
+ public long getTimeStamp() {
+ return timeStamp;
+ }
+
+ public void setTimeStamp(long timeStamp) {
+ this.timeStamp = timeStamp;
+ }
+
+ public boolean isFirstByteConsumed() {
+ return isFirstByteConsumed;
+ }
+
+ public void setFirstByteConsumed(boolean isFirstByteConsumed) {
+ this.isFirstByteConsumed = isFirstByteConsumed;
+ }
+
+ public boolean isLastByteConsumed() {
+ return isLastByteConsumed;
+ }
+
+ public void setLastByteConsumed(boolean isLastByteConsumed) {
+ this.isLastByteConsumed = isLastByteConsumed;
+ }
+
+ public boolean isAnyByteConsumed() {
+ return isAnyByteConsumed;
+ }
+
+ public void setAnyByteConsumed(boolean isAnyByteConsumed) {
+ this.isAnyByteConsumed = isAnyByteConsumed;
+ }
+
+}
diff --git a/other/java/hdfs3/src/main/java/seaweed/hdfs/ReadBufferManager.java b/other/java/hdfs3/src/main/java/seaweed/hdfs/ReadBufferManager.java
new file mode 100644
index 000000000..5b1e21529
--- /dev/null
+++ b/other/java/hdfs3/src/main/java/seaweed/hdfs/ReadBufferManager.java
@@ -0,0 +1,394 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package seaweed.hdfs;
+
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.util.Collection;
+import java.util.LinkedList;
+import java.util.Queue;
+import java.util.Stack;
+import java.util.concurrent.CountDownLatch;
+
+/**
+ * The Read Buffer Manager for Rest AbfsClient.
+ */
+final class ReadBufferManager {
+ private static final Logger LOGGER = LoggerFactory.getLogger(ReadBufferManager.class);
+
+ private static final int NUM_BUFFERS = 16;
+ private static final int BLOCK_SIZE = 4 * 1024 * 1024;
+ private static final int NUM_THREADS = 8;
+ private static final int THRESHOLD_AGE_MILLISECONDS = 3000; // have to see if 3 seconds is a good threshold
+
+ private Thread[] threads = new Thread[NUM_THREADS];
+ private byte[][] buffers; // array of byte[] buffers, to hold the data that is read
+ private Stack freeList = new Stack<>(); // indices in buffers[] array that are available
+
+ private Queue readAheadQueue = new LinkedList<>(); // queue of requests that are not picked up by any worker thread yet
+ private LinkedList inProgressList = new LinkedList<>(); // requests being processed by worker threads
+ private LinkedList completedReadList = new LinkedList<>(); // buffers available for reading
+ private static final ReadBufferManager BUFFER_MANAGER; // singleton, initialized in static initialization block
+
+ static {
+ BUFFER_MANAGER = new ReadBufferManager();
+ BUFFER_MANAGER.init();
+ }
+
+ static ReadBufferManager getBufferManager() {
+ return BUFFER_MANAGER;
+ }
+
+ private void init() {
+ buffers = new byte[NUM_BUFFERS][];
+ for (int i = 0; i < NUM_BUFFERS; i++) {
+ buffers[i] = new byte[BLOCK_SIZE]; // same buffers are reused. The byte array never goes back to GC
+ freeList.add(i);
+ }
+ for (int i = 0; i < NUM_THREADS; i++) {
+ Thread t = new Thread(new ReadBufferWorker(i));
+ t.setDaemon(true);
+ threads[i] = t;
+ t.setName("SeaweedFS-prefetch-" + i);
+ t.start();
+ }
+ ReadBufferWorker.UNLEASH_WORKERS.countDown();
+ }
+
+ // hide instance constructor
+ private ReadBufferManager() {
+ }
+
+
+ /*
+ *
+ * SeaweedInputStream-facing methods
+ *
+ */
+
+
+ /**
+ * {@link SeaweedInputStream} calls this method to queue read-aheads.
+ *
+ * @param stream The {@link SeaweedInputStream} for which to do the read-ahead
+ * @param requestedOffset The offset in the file which shoukd be read
+ * @param requestedLength The length to read
+ */
+ void queueReadAhead(final SeaweedInputStream stream, final long requestedOffset, final int requestedLength) {
+ if (LOGGER.isTraceEnabled()) {
+ LOGGER.trace("Start Queueing readAhead for {} offset {} length {}",
+ stream.getPath(), requestedOffset, requestedLength);
+ }
+ ReadBuffer buffer;
+ synchronized (this) {
+ if (isAlreadyQueued(stream, requestedOffset)) {
+ return; // already queued, do not queue again
+ }
+ if (freeList.isEmpty() && !tryEvict()) {
+ return; // no buffers available, cannot queue anything
+ }
+
+ buffer = new ReadBuffer();
+ buffer.setStream(stream);
+ buffer.setOffset(requestedOffset);
+ buffer.setLength(0);
+ buffer.setRequestedLength(requestedLength);
+ buffer.setStatus(ReadBufferStatus.NOT_AVAILABLE);
+ buffer.setLatch(new CountDownLatch(1));
+
+ Integer bufferIndex = freeList.pop(); // will return a value, since we have checked size > 0 already
+
+ buffer.setBuffer(buffers[bufferIndex]);
+ buffer.setBufferindex(bufferIndex);
+ readAheadQueue.add(buffer);
+ notifyAll();
+ }
+ if (LOGGER.isTraceEnabled()) {
+ LOGGER.trace("Done q-ing readAhead for file {} offset {} buffer idx {}",
+ stream.getPath(), requestedOffset, buffer.getBufferindex());
+ }
+ }
+
+
+ /**
+ * {@link SeaweedInputStream} calls this method read any bytes already available in a buffer (thereby saving a
+ * remote read). This returns the bytes if the data already exists in buffer. If there is a buffer that is reading
+ * the requested offset, then this method blocks until that read completes. If the data is queued in a read-ahead
+ * but not picked up by a worker thread yet, then it cancels that read-ahead and reports cache miss. This is because
+ * depending on worker thread availability, the read-ahead may take a while - the calling thread can do it's own
+ * read to get the data faster (copmared to the read waiting in queue for an indeterminate amount of time).
+ *
+ * @param stream the file to read bytes for
+ * @param position the offset in the file to do a read for
+ * @param length the length to read
+ * @param buffer the buffer to read data into. Note that the buffer will be written into from offset 0.
+ * @return the number of bytes read
+ */
+ int getBlock(final SeaweedInputStream stream, final long position, final int length, final byte[] buffer) {
+ // not synchronized, so have to be careful with locking
+ if (LOGGER.isTraceEnabled()) {
+ LOGGER.trace("getBlock for file {} position {} thread {}",
+ stream.getPath(), position, Thread.currentThread().getName());
+ }
+
+ waitForProcess(stream, position);
+
+ int bytesRead = 0;
+ synchronized (this) {
+ bytesRead = getBlockFromCompletedQueue(stream, position, length, buffer);
+ }
+ if (bytesRead > 0) {
+ if (LOGGER.isTraceEnabled()) {
+ LOGGER.trace("Done read from Cache for {} position {} length {}",
+ stream.getPath(), position, bytesRead);
+ }
+ return bytesRead;
+ }
+
+ // otherwise, just say we got nothing - calling thread can do its own read
+ return 0;
+ }
+
+ /*
+ *
+ * Internal methods
+ *
+ */
+
+ private void waitForProcess(final SeaweedInputStream stream, final long position) {
+ ReadBuffer readBuf;
+ synchronized (this) {
+ clearFromReadAheadQueue(stream, position);
+ readBuf = getFromList(inProgressList, stream, position);
+ }
+ if (readBuf != null) { // if in in-progress queue, then block for it
+ try {
+ if (LOGGER.isTraceEnabled()) {
+ LOGGER.trace("got a relevant read buffer for file {} offset {} buffer idx {}",
+ stream.getPath(), readBuf.getOffset(), readBuf.getBufferindex());
+ }
+ readBuf.getLatch().await(); // blocking wait on the caller stream's thread
+ // Note on correctness: readBuf gets out of inProgressList only in 1 place: after worker thread
+ // is done processing it (in doneReading). There, the latch is set after removing the buffer from
+ // inProgressList. So this latch is safe to be outside the synchronized block.
+ // Putting it in synchronized would result in a deadlock, since this thread would be holding the lock
+ // while waiting, so no one will be able to change any state. If this becomes more complex in the future,
+ // then the latch cane be removed and replaced with wait/notify whenever inProgressList is touched.
+ } catch (InterruptedException ex) {
+ Thread.currentThread().interrupt();
+ }
+ if (LOGGER.isTraceEnabled()) {
+ LOGGER.trace("latch done for file {} buffer idx {} length {}",
+ stream.getPath(), readBuf.getBufferindex(), readBuf.getLength());
+ }
+ }
+ }
+
+ /**
+ * If any buffer in the completedlist can be reclaimed then reclaim it and return the buffer to free list.
+ * The objective is to find just one buffer - there is no advantage to evicting more than one.
+ *
+ * @return whether the eviction succeeeded - i.e., were we able to free up one buffer
+ */
+ private synchronized boolean tryEvict() {
+ ReadBuffer nodeToEvict = null;
+ if (completedReadList.size() <= 0) {
+ return false; // there are no evict-able buffers
+ }
+
+ // first, try buffers where all bytes have been consumed (approximated as first and last bytes consumed)
+ for (ReadBuffer buf : completedReadList) {
+ if (buf.isFirstByteConsumed() && buf.isLastByteConsumed()) {
+ nodeToEvict = buf;
+ break;
+ }
+ }
+ if (nodeToEvict != null) {
+ return evict(nodeToEvict);
+ }
+
+ // next, try buffers where any bytes have been consumed (may be a bad idea? have to experiment and see)
+ for (ReadBuffer buf : completedReadList) {
+ if (buf.isAnyByteConsumed()) {
+ nodeToEvict = buf;
+ break;
+ }
+ }
+
+ if (nodeToEvict != null) {
+ return evict(nodeToEvict);
+ }
+
+ // next, try any old nodes that have not been consumed
+ long earliestBirthday = Long.MAX_VALUE;
+ for (ReadBuffer buf : completedReadList) {
+ if (buf.getTimeStamp() < earliestBirthday) {
+ nodeToEvict = buf;
+ earliestBirthday = buf.getTimeStamp();
+ }
+ }
+ if ((currentTimeMillis() - earliestBirthday > THRESHOLD_AGE_MILLISECONDS) && (nodeToEvict != null)) {
+ return evict(nodeToEvict);
+ }
+
+ // nothing can be evicted
+ return false;
+ }
+
+ private boolean evict(final ReadBuffer buf) {
+ freeList.push(buf.getBufferindex());
+ completedReadList.remove(buf);
+ if (LOGGER.isTraceEnabled()) {
+ LOGGER.trace("Evicting buffer idx {}; was used for file {} offset {} length {}",
+ buf.getBufferindex(), buf.getStream().getPath(), buf.getOffset(), buf.getLength());
+ }
+ return true;
+ }
+
+ private boolean isAlreadyQueued(final SeaweedInputStream stream, final long requestedOffset) {
+ // returns true if any part of the buffer is already queued
+ return (isInList(readAheadQueue, stream, requestedOffset)
+ || isInList(inProgressList, stream, requestedOffset)
+ || isInList(completedReadList, stream, requestedOffset));
+ }
+
+ private boolean isInList(final Collection list, final SeaweedInputStream stream, final long requestedOffset) {
+ return (getFromList(list, stream, requestedOffset) != null);
+ }
+
+ private ReadBuffer getFromList(final Collection list, final SeaweedInputStream stream, final long requestedOffset) {
+ for (ReadBuffer buffer : list) {
+ if (buffer.getStream() == stream) {
+ if (buffer.getStatus() == ReadBufferStatus.AVAILABLE
+ && requestedOffset >= buffer.getOffset()
+ && requestedOffset < buffer.getOffset() + buffer.getLength()) {
+ return buffer;
+ } else if (requestedOffset >= buffer.getOffset()
+ && requestedOffset < buffer.getOffset() + buffer.getRequestedLength()) {
+ return buffer;
+ }
+ }
+ }
+ return null;
+ }
+
+ private void clearFromReadAheadQueue(final SeaweedInputStream stream, final long requestedOffset) {
+ ReadBuffer buffer = getFromList(readAheadQueue, stream, requestedOffset);
+ if (buffer != null) {
+ readAheadQueue.remove(buffer);
+ notifyAll(); // lock is held in calling method
+ freeList.push(buffer.getBufferindex());
+ }
+ }
+
+ private int getBlockFromCompletedQueue(final SeaweedInputStream stream, final long position, final int length,
+ final byte[] buffer) {
+ ReadBuffer buf = getFromList(completedReadList, stream, position);
+ if (buf == null || position >= buf.getOffset() + buf.getLength()) {
+ return 0;
+ }
+ int cursor = (int) (position - buf.getOffset());
+ int availableLengthInBuffer = buf.getLength() - cursor;
+ int lengthToCopy = Math.min(length, availableLengthInBuffer);
+ System.arraycopy(buf.getBuffer(), cursor, buffer, 0, lengthToCopy);
+ if (cursor == 0) {
+ buf.setFirstByteConsumed(true);
+ }
+ if (cursor + lengthToCopy == buf.getLength()) {
+ buf.setLastByteConsumed(true);
+ }
+ buf.setAnyByteConsumed(true);
+ return lengthToCopy;
+ }
+
+ /*
+ *
+ * ReadBufferWorker-thread-facing methods
+ *
+ */
+
+ /**
+ * ReadBufferWorker thread calls this to get the next buffer that it should work on.
+ *
+ * @return {@link ReadBuffer}
+ * @throws InterruptedException if thread is interrupted
+ */
+ ReadBuffer getNextBlockToRead() throws InterruptedException {
+ ReadBuffer buffer = null;
+ synchronized (this) {
+ //buffer = readAheadQueue.take(); // blocking method
+ while (readAheadQueue.size() == 0) {
+ wait();
+ }
+ buffer = readAheadQueue.remove();
+ notifyAll();
+ if (buffer == null) {
+ return null; // should never happen
+ }
+ buffer.setStatus(ReadBufferStatus.READING_IN_PROGRESS);
+ inProgressList.add(buffer);
+ }
+ if (LOGGER.isTraceEnabled()) {
+ LOGGER.trace("ReadBufferWorker picked file {} for offset {}",
+ buffer.getStream().getPath(), buffer.getOffset());
+ }
+ return buffer;
+ }
+
+ /**
+ * ReadBufferWorker thread calls this method to post completion.
+ *
+ * @param buffer the buffer whose read was completed
+ * @param result the {@link ReadBufferStatus} after the read operation in the worker thread
+ * @param bytesActuallyRead the number of bytes that the worker thread was actually able to read
+ */
+ void doneReading(final ReadBuffer buffer, final ReadBufferStatus result, final int bytesActuallyRead) {
+ if (LOGGER.isTraceEnabled()) {
+ LOGGER.trace("ReadBufferWorker completed file {} for offset {} bytes {}",
+ buffer.getStream().getPath(), buffer.getOffset(), bytesActuallyRead);
+ }
+ synchronized (this) {
+ inProgressList.remove(buffer);
+ if (result == ReadBufferStatus.AVAILABLE && bytesActuallyRead > 0) {
+ buffer.setStatus(ReadBufferStatus.AVAILABLE);
+ buffer.setTimeStamp(currentTimeMillis());
+ buffer.setLength(bytesActuallyRead);
+ completedReadList.add(buffer);
+ } else {
+ freeList.push(buffer.getBufferindex());
+ // buffer should go out of scope after the end of the calling method in ReadBufferWorker, and eligible for GC
+ }
+ }
+ //outside the synchronized, since anyone receiving a wake-up from the latch must see safe-published results
+ buffer.getLatch().countDown(); // wake up waiting threads (if any)
+ }
+
+ /**
+ * Similar to System.currentTimeMillis, except implemented with System.nanoTime().
+ * System.currentTimeMillis can go backwards when system clock is changed (e.g., with NTP time synchronization),
+ * making it unsuitable for measuring time intervals. nanotime is strictly monotonically increasing per CPU core.
+ * Note: it is not monotonic across Sockets, and even within a CPU, its only the
+ * more recent parts which share a clock across all cores.
+ *
+ * @return current time in milliseconds
+ */
+ private long currentTimeMillis() {
+ return System.nanoTime() / 1000 / 1000;
+ }
+}
diff --git a/other/java/hdfs3/src/main/java/seaweed/hdfs/ReadBufferStatus.java b/other/java/hdfs3/src/main/java/seaweed/hdfs/ReadBufferStatus.java
new file mode 100644
index 000000000..d63674977
--- /dev/null
+++ b/other/java/hdfs3/src/main/java/seaweed/hdfs/ReadBufferStatus.java
@@ -0,0 +1,29 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package seaweed.hdfs;
+
+/**
+ * The ReadBufferStatus for Rest AbfsClient
+ */
+public enum ReadBufferStatus {
+ NOT_AVAILABLE, // buffers sitting in readaheadqueue have this stats
+ READING_IN_PROGRESS, // reading is in progress on this buffer. Buffer should be in inProgressList
+ AVAILABLE, // data is available in buffer. It should be in completedList
+ READ_FAILED // read completed, but failed.
+}
diff --git a/other/java/hdfs3/src/main/java/seaweed/hdfs/ReadBufferWorker.java b/other/java/hdfs3/src/main/java/seaweed/hdfs/ReadBufferWorker.java
new file mode 100644
index 000000000..6ffbc4644
--- /dev/null
+++ b/other/java/hdfs3/src/main/java/seaweed/hdfs/ReadBufferWorker.java
@@ -0,0 +1,70 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package seaweed.hdfs;
+
+import java.util.concurrent.CountDownLatch;
+
+class ReadBufferWorker implements Runnable {
+
+ protected static final CountDownLatch UNLEASH_WORKERS = new CountDownLatch(1);
+ private int id;
+
+ ReadBufferWorker(final int id) {
+ this.id = id;
+ }
+
+ /**
+ * return the ID of ReadBufferWorker.
+ */
+ public int getId() {
+ return this.id;
+ }
+
+ /**
+ * Waits until a buffer becomes available in ReadAheadQueue.
+ * Once a buffer becomes available, reads the file specified in it and then posts results back to buffer manager.
+ * Rinse and repeat. Forever.
+ */
+ public void run() {
+ try {
+ UNLEASH_WORKERS.await();
+ } catch (InterruptedException ex) {
+ Thread.currentThread().interrupt();
+ }
+ ReadBufferManager bufferManager = ReadBufferManager.getBufferManager();
+ ReadBuffer buffer;
+ while (true) {
+ try {
+ buffer = bufferManager.getNextBlockToRead(); // blocks, until a buffer is available for this thread
+ } catch (InterruptedException ex) {
+ Thread.currentThread().interrupt();
+ return;
+ }
+ if (buffer != null) {
+ try {
+ // do the actual read, from the file.
+ int bytesRead = buffer.getStream().readRemote(buffer.getOffset(), buffer.getBuffer(), 0, buffer.getRequestedLength());
+ bufferManager.doneReading(buffer, ReadBufferStatus.AVAILABLE, bytesRead); // post result back to ReadBufferManager
+ } catch (Exception ex) {
+ bufferManager.doneReading(buffer, ReadBufferStatus.READ_FAILED, 0);
+ }
+ }
+ }
+ }
+}
diff --git a/other/java/hdfs3/src/main/java/seaweed/hdfs/SeaweedFileSystem.java b/other/java/hdfs3/src/main/java/seaweed/hdfs/SeaweedFileSystem.java
new file mode 100644
index 000000000..c12da8261
--- /dev/null
+++ b/other/java/hdfs3/src/main/java/seaweed/hdfs/SeaweedFileSystem.java
@@ -0,0 +1,620 @@
+package seaweed.hdfs;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.*;
+import org.apache.hadoop.fs.permission.AclEntry;
+import org.apache.hadoop.fs.permission.AclStatus;
+import org.apache.hadoop.fs.permission.FsPermission;
+import org.apache.hadoop.security.AccessControlException;
+import org.apache.hadoop.security.UserGroupInformation;
+import org.apache.hadoop.util.Progressable;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.io.FileNotFoundException;
+import java.io.IOException;
+import java.io.InputStream;
+import java.io.OutputStream;
+import java.net.URI;
+import java.util.EnumSet;
+import java.util.List;
+import java.util.Map;
+
+import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.IO_FILE_BUFFER_SIZE_KEY;
+
+public class SeaweedFileSystem extends FileSystem {
+
+ public static final int FS_SEAWEED_DEFAULT_PORT = 8888;
+ public static final String FS_SEAWEED_FILER_HOST = "fs.seaweed.filer.host";
+ public static final String FS_SEAWEED_FILER_PORT = "fs.seaweed.filer.port";
+
+ private static final Logger LOG = LoggerFactory.getLogger(SeaweedFileSystem.class);
+ private static int BUFFER_SIZE = 16 * 1024 * 1024;
+
+ private URI uri;
+ private Path workingDirectory = new Path("/");
+ private SeaweedFileSystemStore seaweedFileSystemStore;
+
+ public URI getUri() {
+ return uri;
+ }
+
+ public String getScheme() {
+ return "seaweedfs";
+ }
+
+ @Override
+ public void initialize(URI uri, Configuration conf) throws IOException { // get
+ super.initialize(uri, conf);
+
+ // get host information from uri (overrides info in conf)
+ String host = uri.getHost();
+ host = (host == null) ? conf.get(FS_SEAWEED_FILER_HOST, "localhost") : host;
+ if (host == null) {
+ throw new IOException("Invalid host specified");
+ }
+ conf.set(FS_SEAWEED_FILER_HOST, host);
+
+ // get port information from uri, (overrides info in conf)
+ int port = uri.getPort();
+ port = (port == -1) ? FS_SEAWEED_DEFAULT_PORT : port;
+ conf.setInt(FS_SEAWEED_FILER_PORT, port);
+
+ conf.setInt(IO_FILE_BUFFER_SIZE_KEY, BUFFER_SIZE);
+
+ setConf(conf);
+ this.uri = uri;
+
+ seaweedFileSystemStore = new SeaweedFileSystemStore(host, port);
+
+ }
+
+ @Override
+ public FSDataInputStream open(Path path, int bufferSize) throws IOException {
+
+ LOG.debug("open path: {} bufferSize:{}", path, bufferSize);
+
+ path = qualify(path);
+
+ try {
+ InputStream inputStream = seaweedFileSystemStore.openFileForRead(path, statistics, bufferSize);
+ return new FSDataInputStream(inputStream);
+ } catch (Exception ex) {
+ LOG.warn("open path: {} bufferSize:{}", path, bufferSize, ex);
+ return null;
+ }
+ }
+
+ @Override
+ public FSDataOutputStream create(Path path, FsPermission permission, final boolean overwrite, final int bufferSize,
+ final short replication, final long blockSize, final Progressable progress) throws IOException {
+
+ LOG.debug("create path: {} bufferSize:{} blockSize:{}", path, bufferSize, blockSize);
+
+ path = qualify(path);
+
+ try {
+ String replicaPlacement = String.format("%03d", replication - 1);
+ OutputStream outputStream = seaweedFileSystemStore.createFile(path, overwrite, permission, bufferSize, replicaPlacement);
+ return new FSDataOutputStream(outputStream, statistics);
+ } catch (Exception ex) {
+ LOG.warn("create path: {} bufferSize:{} blockSize:{}", path, bufferSize, blockSize, ex);
+ return null;
+ }
+ }
+
+ /**
+ * {@inheritDoc}
+ * @throws FileNotFoundException if the parent directory is not present -or
+ * is not a directory.
+ */
+ @Override
+ public FSDataOutputStream createNonRecursive(Path path,
+ FsPermission permission,
+ EnumSet flags,
+ int bufferSize,
+ short replication,
+ long blockSize,
+ Progressable progress) throws IOException {
+ Path parent = path.getParent();
+ if (parent != null) {
+ // expect this to raise an exception if there is no parent
+ if (!getFileStatus(parent).isDirectory()) {
+ throw new FileAlreadyExistsException("Not a directory: " + parent);
+ }
+ }
+ return create(path, permission,
+ flags.contains(CreateFlag.OVERWRITE), bufferSize,
+ replication, blockSize, progress);
+ }
+
+ @Override
+ public FSDataOutputStream append(Path path, int bufferSize, Progressable progressable) throws IOException {
+
+ LOG.debug("append path: {} bufferSize:{}", path, bufferSize);
+
+ path = qualify(path);
+ try {
+ OutputStream outputStream = seaweedFileSystemStore.createFile(path, false, null, bufferSize, "");
+ return new FSDataOutputStream(outputStream, statistics);
+ } catch (Exception ex) {
+ LOG.warn("append path: {} bufferSize:{}", path, bufferSize, ex);
+ return null;
+ }
+ }
+
+ @Override
+ public boolean rename(Path src, Path dst) {
+
+ LOG.debug("rename path: {} => {}", src, dst);
+
+ if (src.isRoot()) {
+ return false;
+ }
+
+ if (src.equals(dst)) {
+ return true;
+ }
+ FileStatus dstFileStatus = getFileStatus(dst);
+
+ String sourceFileName = src.getName();
+ Path adjustedDst = dst;
+
+ if (dstFileStatus != null) {
+ if (!dstFileStatus.isDirectory()) {
+ return false;
+ }
+ adjustedDst = new Path(dst, sourceFileName);
+ }
+
+ Path qualifiedSrcPath = qualify(src);
+ Path qualifiedDstPath = qualify(adjustedDst);
+
+ seaweedFileSystemStore.rename(qualifiedSrcPath, qualifiedDstPath);
+ return true;
+ }
+
+ @Override
+ public boolean delete(Path path, boolean recursive) {
+
+ LOG.debug("delete path: {} recursive:{}", path, recursive);
+
+ path = qualify(path);
+
+ FileStatus fileStatus = getFileStatus(path);
+
+ if (fileStatus == null) {
+ return true;
+ }
+
+ return seaweedFileSystemStore.deleteEntries(path, fileStatus.isDirectory(), recursive);
+
+ }
+
+ @Override
+ public FileStatus[] listStatus(Path path) throws IOException {
+
+ LOG.debug("listStatus path: {}", path);
+
+ path = qualify(path);
+
+ return seaweedFileSystemStore.listEntries(path);
+ }
+
+ @Override
+ public Path getWorkingDirectory() {
+ return workingDirectory;
+ }
+
+ @Override
+ public void setWorkingDirectory(Path path) {
+ if (path.isAbsolute()) {
+ workingDirectory = path;
+ } else {
+ workingDirectory = new Path(workingDirectory, path);
+ }
+ }
+
+ @Override
+ public boolean mkdirs(Path path, FsPermission fsPermission) throws IOException {
+
+ LOG.debug("mkdirs path: {}", path);
+
+ path = qualify(path);
+
+ FileStatus fileStatus = getFileStatus(path);
+
+ if (fileStatus == null) {
+
+ UserGroupInformation currentUser = UserGroupInformation.getCurrentUser();
+ return seaweedFileSystemStore.createDirectory(path, currentUser,
+ fsPermission == null ? FsPermission.getDirDefault() : fsPermission,
+ FsPermission.getUMask(getConf()));
+
+ }
+
+ if (fileStatus.isDirectory()) {
+ return true;
+ } else {
+ throw new FileAlreadyExistsException("Path is a file: " + path);
+ }
+ }
+
+ @Override
+ public FileStatus getFileStatus(Path path) {
+
+ LOG.debug("getFileStatus path: {}", path);
+
+ path = qualify(path);
+
+ return seaweedFileSystemStore.getFileStatus(path);
+ }
+
+ /**
+ * Set owner of a path (i.e. a file or a directory).
+ * The parameters owner and group cannot both be null.
+ *
+ * @param path The path
+ * @param owner If it is null, the original username remains unchanged.
+ * @param group If it is null, the original groupname remains unchanged.
+ */
+ @Override
+ public void setOwner(Path path, final String owner, final String group)
+ throws IOException {
+ LOG.debug("setOwner path: {}", path);
+ path = qualify(path);
+
+ seaweedFileSystemStore.setOwner(path, owner, group);
+ }
+
+
+ /**
+ * Set permission of a path.
+ *
+ * @param path The path
+ * @param permission Access permission
+ */
+ @Override
+ public void setPermission(Path path, final FsPermission permission) throws IOException {
+ LOG.debug("setPermission path: {}", path);
+
+ if (permission == null) {
+ throw new IllegalArgumentException("The permission can't be null");
+ }
+
+ path = qualify(path);
+
+ seaweedFileSystemStore.setPermission(path, permission);
+ }
+
+ Path qualify(Path path) {
+ return path.makeQualified(uri, workingDirectory);
+ }
+
+ /**
+ * Concat existing files together.
+ *
+ * @param trg the path to the target destination.
+ * @param psrcs the paths to the sources to use for the concatenation.
+ * @throws IOException IO failure
+ * @throws UnsupportedOperationException if the operation is unsupported
+ * (default).
+ */
+ @Override
+ public void concat(final Path trg, final Path[] psrcs) throws IOException {
+ throw new UnsupportedOperationException("Not implemented by the " +
+ getClass().getSimpleName() + " FileSystem implementation");
+ }
+
+ /**
+ * Truncate the file in the indicated path to the indicated size.
+ *
+ * Fails if path is a directory.
+ * Fails if path does not exist.
+ * Fails if path is not closed.
+ * Fails if new size is greater than current size.
+ *
+ *
+ * @param f The path to the file to be truncated
+ * @param newLength The size the file is to be truncated to
+ * @return true
if the file has been truncated to the desired
+ * newLength
and is immediately available to be reused for
+ * write operations such as append
, or
+ * false
if a background process of adjusting the length of
+ * the last block has been started, and clients should wait for it to
+ * complete before proceeding with further file updates.
+ * @throws IOException IO failure
+ * @throws UnsupportedOperationException if the operation is unsupported
+ * (default).
+ */
+ @Override
+ public boolean truncate(Path f, long newLength) throws IOException {
+ throw new UnsupportedOperationException("Not implemented by the " +
+ getClass().getSimpleName() + " FileSystem implementation");
+ }
+
+ @Override
+ public void createSymlink(final Path target, final Path link,
+ final boolean createParent) throws AccessControlException,
+ FileAlreadyExistsException, FileNotFoundException,
+ ParentNotDirectoryException, UnsupportedFileSystemException,
+ IOException {
+ // Supporting filesystems should override this method
+ throw new UnsupportedOperationException(
+ "Filesystem does not support symlinks!");
+ }
+
+ public boolean supportsSymlinks() {
+ return false;
+ }
+
+ /**
+ * Create a snapshot.
+ *
+ * @param path The directory where snapshots will be taken.
+ * @param snapshotName The name of the snapshot
+ * @return the snapshot path.
+ * @throws IOException IO failure
+ * @throws UnsupportedOperationException if the operation is unsupported
+ */
+ @Override
+ public Path createSnapshot(Path path, String snapshotName)
+ throws IOException {
+ throw new UnsupportedOperationException(getClass().getSimpleName()
+ + " doesn't support createSnapshot");
+ }
+
+ /**
+ * Rename a snapshot.
+ *
+ * @param path The directory path where the snapshot was taken
+ * @param snapshotOldName Old name of the snapshot
+ * @param snapshotNewName New name of the snapshot
+ * @throws IOException IO failure
+ * @throws UnsupportedOperationException if the operation is unsupported
+ * (default outcome).
+ */
+ @Override
+ public void renameSnapshot(Path path, String snapshotOldName,
+ String snapshotNewName) throws IOException {
+ throw new UnsupportedOperationException(getClass().getSimpleName()
+ + " doesn't support renameSnapshot");
+ }
+
+ /**
+ * Delete a snapshot of a directory.
+ *
+ * @param path The directory that the to-be-deleted snapshot belongs to
+ * @param snapshotName The name of the snapshot
+ * @throws IOException IO failure
+ * @throws UnsupportedOperationException if the operation is unsupported
+ * (default outcome).
+ */
+ @Override
+ public void deleteSnapshot(Path path, String snapshotName)
+ throws IOException {
+ throw new UnsupportedOperationException(getClass().getSimpleName()
+ + " doesn't support deleteSnapshot");
+ }
+
+ /**
+ * Modifies ACL entries of files and directories. This method can add new ACL
+ * entries or modify the permissions on existing ACL entries. All existing
+ * ACL entries that are not specified in this call are retained without
+ * changes. (Modifications are merged into the current ACL.)
+ *
+ * @param path Path to modify
+ * @param aclSpec List<AclEntry> describing modifications
+ * @throws IOException if an ACL could not be modified
+ * @throws UnsupportedOperationException if the operation is unsupported
+ * (default outcome).
+ */
+ @Override
+ public void modifyAclEntries(Path path, List aclSpec)
+ throws IOException {
+ throw new UnsupportedOperationException(getClass().getSimpleName()
+ + " doesn't support modifyAclEntries");
+ }
+
+ /**
+ * Removes ACL entries from files and directories. Other ACL entries are
+ * retained.
+ *
+ * @param path Path to modify
+ * @param aclSpec List describing entries to remove
+ * @throws IOException if an ACL could not be modified
+ * @throws UnsupportedOperationException if the operation is unsupported
+ * (default outcome).
+ */
+ @Override
+ public void removeAclEntries(Path path, List aclSpec)
+ throws IOException {
+ throw new UnsupportedOperationException(getClass().getSimpleName()
+ + " doesn't support removeAclEntries");
+ }
+
+ /**
+ * Removes all default ACL entries from files and directories.
+ *
+ * @param path Path to modify
+ * @throws IOException if an ACL could not be modified
+ * @throws UnsupportedOperationException if the operation is unsupported
+ * (default outcome).
+ */
+ @Override
+ public void removeDefaultAcl(Path path)
+ throws IOException {
+ throw new UnsupportedOperationException(getClass().getSimpleName()
+ + " doesn't support removeDefaultAcl");
+ }
+
+ /**
+ * Removes all but the base ACL entries of files and directories. The entries
+ * for user, group, and others are retained for compatibility with permission
+ * bits.
+ *
+ * @param path Path to modify
+ * @throws IOException if an ACL could not be removed
+ * @throws UnsupportedOperationException if the operation is unsupported
+ * (default outcome).
+ */
+ @Override
+ public void removeAcl(Path path)
+ throws IOException {
+ throw new UnsupportedOperationException(getClass().getSimpleName()
+ + " doesn't support removeAcl");
+ }
+
+ /**
+ * Fully replaces ACL of files and directories, discarding all existing
+ * entries.
+ *
+ * @param path Path to modify
+ * @param aclSpec List describing modifications, which must include entries
+ * for user, group, and others for compatibility with permission bits.
+ * @throws IOException if an ACL could not be modified
+ * @throws UnsupportedOperationException if the operation is unsupported
+ * (default outcome).
+ */
+ @Override
+ public void setAcl(Path path, List aclSpec) throws IOException {
+ throw new UnsupportedOperationException(getClass().getSimpleName()
+ + " doesn't support setAcl");
+ }
+
+ /**
+ * Gets the ACL of a file or directory.
+ *
+ * @param path Path to get
+ * @return AclStatus describing the ACL of the file or directory
+ * @throws IOException if an ACL could not be read
+ * @throws UnsupportedOperationException if the operation is unsupported
+ * (default outcome).
+ */
+ @Override
+ public AclStatus getAclStatus(Path path) throws IOException {
+ throw new UnsupportedOperationException(getClass().getSimpleName()
+ + " doesn't support getAclStatus");
+ }
+
+ /**
+ * Set an xattr of a file or directory.
+ * The name must be prefixed with the namespace followed by ".". For example,
+ * "user.attr".
+ *
+ * Refer to the HDFS extended attributes user documentation for details.
+ *
+ * @param path Path to modify
+ * @param name xattr name.
+ * @param value xattr value.
+ * @param flag xattr set flag
+ * @throws IOException IO failure
+ * @throws UnsupportedOperationException if the operation is unsupported
+ * (default outcome).
+ */
+ @Override
+ public void setXAttr(Path path, String name, byte[] value,
+ EnumSet flag) throws IOException {
+ throw new UnsupportedOperationException(getClass().getSimpleName()
+ + " doesn't support setXAttr");
+ }
+
+ /**
+ * Get an xattr name and value for a file or directory.
+ * The name must be prefixed with the namespace followed by ".". For example,
+ * "user.attr".
+ *
+ * Refer to the HDFS extended attributes user documentation for details.
+ *
+ * @param path Path to get extended attribute
+ * @param name xattr name.
+ * @return byte[] xattr value.
+ * @throws IOException IO failure
+ * @throws UnsupportedOperationException if the operation is unsupported
+ * (default outcome).
+ */
+ @Override
+ public byte[] getXAttr(Path path, String name) throws IOException {
+ throw new UnsupportedOperationException(getClass().getSimpleName()
+ + " doesn't support getXAttr");
+ }
+
+ /**
+ * Get all of the xattr name/value pairs for a file or directory.
+ * Only those xattrs which the logged-in user has permissions to view
+ * are returned.
+ *
+ * Refer to the HDFS extended attributes user documentation for details.
+ *
+ * @param path Path to get extended attributes
+ * @return Map describing the XAttrs of the file or directory
+ * @throws IOException IO failure
+ * @throws UnsupportedOperationException if the operation is unsupported
+ * (default outcome).
+ */
+ @Override
+ public Map getXAttrs(Path path) throws IOException {
+ throw new UnsupportedOperationException(getClass().getSimpleName()
+ + " doesn't support getXAttrs");
+ }
+
+ /**
+ * Get all of the xattrs name/value pairs for a file or directory.
+ * Only those xattrs which the logged-in user has permissions to view
+ * are returned.
+ *
+ * Refer to the HDFS extended attributes user documentation for details.
+ *
+ * @param path Path to get extended attributes
+ * @param names XAttr names.
+ * @return Map describing the XAttrs of the file or directory
+ * @throws IOException IO failure
+ * @throws UnsupportedOperationException if the operation is unsupported
+ * (default outcome).
+ */
+ @Override
+ public Map getXAttrs(Path path, List names)
+ throws IOException {
+ throw new UnsupportedOperationException(getClass().getSimpleName()
+ + " doesn't support getXAttrs");
+ }
+
+ /**
+ * Get all of the xattr names for a file or directory.
+ * Only those xattr names which the logged-in user has permissions to view
+ * are returned.
+ *
+ * Refer to the HDFS extended attributes user documentation for details.
+ *
+ * @param path Path to get extended attributes
+ * @return List{@literal } of the XAttr names of the file or directory
+ * @throws IOException IO failure
+ * @throws UnsupportedOperationException if the operation is unsupported
+ * (default outcome).
+ */
+ @Override
+ public List listXAttrs(Path path) throws IOException {
+ throw new UnsupportedOperationException(getClass().getSimpleName()
+ + " doesn't support listXAttrs");
+ }
+
+ /**
+ * Remove an xattr of a file or directory.
+ * The name must be prefixed with the namespace followed by ".". For example,
+ * "user.attr".
+ *
+ * Refer to the HDFS extended attributes user documentation for details.
+ *
+ * @param path Path to remove extended attribute
+ * @param name xattr name
+ * @throws IOException IO failure
+ * @throws UnsupportedOperationException if the operation is unsupported
+ * (default outcome).
+ */
+ @Override
+ public void removeXAttr(Path path, String name) throws IOException {
+ throw new UnsupportedOperationException(getClass().getSimpleName()
+ + " doesn't support removeXAttr");
+ }
+
+}
diff --git a/other/java/hdfs3/src/main/java/seaweed/hdfs/SeaweedFileSystemStore.java b/other/java/hdfs3/src/main/java/seaweed/hdfs/SeaweedFileSystemStore.java
new file mode 100644
index 000000000..9617a38be
--- /dev/null
+++ b/other/java/hdfs3/src/main/java/seaweed/hdfs/SeaweedFileSystemStore.java
@@ -0,0 +1,282 @@
+package seaweed.hdfs;
+
+import org.apache.hadoop.fs.FileStatus;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.fs.permission.FsPermission;
+import org.apache.hadoop.security.UserGroupInformation;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+import seaweedfs.client.FilerClient;
+import seaweedfs.client.FilerGrpcClient;
+import seaweedfs.client.FilerProto;
+import seaweedfs.client.SeaweedRead;
+
+import java.io.FileNotFoundException;
+import java.io.IOException;
+import java.io.InputStream;
+import java.io.OutputStream;
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.List;
+
+public class SeaweedFileSystemStore {
+
+ private static final Logger LOG = LoggerFactory.getLogger(SeaweedFileSystemStore.class);
+
+ private FilerGrpcClient filerGrpcClient;
+ private FilerClient filerClient;
+
+ public SeaweedFileSystemStore(String host, int port) {
+ int grpcPort = 10000 + port;
+ filerGrpcClient = new FilerGrpcClient(host, grpcPort);
+ filerClient = new FilerClient(filerGrpcClient);
+ }
+
+ public static String getParentDirectory(Path path) {
+ return path.isRoot() ? "/" : path.getParent().toUri().getPath();
+ }
+
+ static int permissionToMode(FsPermission permission, boolean isDirectory) {
+ int p = permission.toShort();
+ if (isDirectory) {
+ p = p | 1 << 31;
+ }
+ return p;
+ }
+
+ public boolean createDirectory(final Path path, UserGroupInformation currentUser,
+ final FsPermission permission, final FsPermission umask) {
+
+ LOG.debug("createDirectory path: {} permission: {} umask: {}",
+ path,
+ permission,
+ umask);
+
+ return filerClient.mkdirs(
+ path.toUri().getPath(),
+ permissionToMode(permission, true),
+ currentUser.getUserName(),
+ currentUser.getGroupNames()
+ );
+ }
+
+ public FileStatus[] listEntries(final Path path) {
+ LOG.debug("listEntries path: {}", path);
+
+ FileStatus pathStatus = getFileStatus(path);
+
+ if (pathStatus == null) {
+ return new FileStatus[0];
+ }
+
+ if (!pathStatus.isDirectory()) {
+ return new FileStatus[]{pathStatus};
+ }
+
+ List fileStatuses = new ArrayList();
+
+ List entries = filerClient.listEntries(path.toUri().getPath());
+
+ for (FilerProto.Entry entry : entries) {
+
+ FileStatus fileStatus = doGetFileStatus(new Path(path, entry.getName()), entry);
+
+ fileStatuses.add(fileStatus);
+ }
+ LOG.debug("listEntries path: {} size {}", fileStatuses, fileStatuses.size());
+ return fileStatuses.toArray(new FileStatus[0]);
+
+ }
+
+ public FileStatus getFileStatus(final Path path) {
+
+ FilerProto.Entry entry = lookupEntry(path);
+ if (entry == null) {
+ return null;
+ }
+ LOG.debug("doGetFileStatus path:{} entry:{}", path, entry);
+
+ FileStatus fileStatus = doGetFileStatus(path, entry);
+ return fileStatus;
+ }
+
+ public boolean deleteEntries(final Path path, boolean isDirectory, boolean recursive) {
+ LOG.debug("deleteEntries path: {} isDirectory {} recursive: {}",
+ path,
+ String.valueOf(isDirectory),
+ String.valueOf(recursive));
+
+ if (path.isRoot()) {
+ return true;
+ }
+
+ if (recursive && isDirectory) {
+ List entries = filerClient.listEntries(path.toUri().getPath());
+ for (FilerProto.Entry entry : entries) {
+ deleteEntries(new Path(path, entry.getName()), entry.getIsDirectory(), true);
+ }
+ }
+
+ return filerClient.deleteEntry(getParentDirectory(path), path.getName(), true, recursive, true);
+ }
+
+ private FileStatus doGetFileStatus(Path path, FilerProto.Entry entry) {
+ FilerProto.FuseAttributes attributes = entry.getAttributes();
+ long length = SeaweedRead.totalSize(entry.getChunksList());
+ boolean isDir = entry.getIsDirectory();
+ int block_replication = 1;
+ int blocksize = 512;
+ long modification_time = attributes.getMtime() * 1000; // milliseconds
+ long access_time = 0;
+ FsPermission permission = FsPermission.createImmutable((short) attributes.getFileMode());
+ String owner = attributes.getUserName();
+ String group = attributes.getGroupNameCount() > 0 ? attributes.getGroupName(0) : "";
+ return new FileStatus(length, isDir, block_replication, blocksize,
+ modification_time, access_time, permission, owner, group, null, path);
+ }
+
+ private FilerProto.Entry lookupEntry(Path path) {
+
+ return filerClient.lookupEntry(getParentDirectory(path), path.getName());
+
+ }
+
+ public void rename(Path source, Path destination) {
+
+ LOG.debug("rename source: {} destination:{}", source, destination);
+
+ if (source.isRoot()) {
+ return;
+ }
+ LOG.info("rename source: {} destination:{}", source, destination);
+ FilerProto.Entry entry = lookupEntry(source);
+ if (entry == null) {
+ LOG.warn("rename non-existing source: {}", source);
+ return;
+ }
+ filerClient.mv(source.toUri().getPath(), destination.toUri().getPath());
+ }
+
+ public OutputStream createFile(final Path path,
+ final boolean overwrite,
+ FsPermission permission,
+ int bufferSize,
+ String replication) throws IOException {
+
+ permission = permission == null ? FsPermission.getFileDefault() : permission;
+
+ LOG.debug("createFile path: {} overwrite: {} permission: {}",
+ path,
+ overwrite,
+ permission.toString());
+
+ UserGroupInformation userGroupInformation = UserGroupInformation.getCurrentUser();
+ long now = System.currentTimeMillis() / 1000L;
+
+ FilerProto.Entry.Builder entry = null;
+ long writePosition = 0;
+ if (!overwrite) {
+ FilerProto.Entry existingEntry = lookupEntry(path);
+ LOG.debug("createFile merged entry path:{} existingEntry:{}", path, existingEntry);
+ if (existingEntry != null) {
+ entry = FilerProto.Entry.newBuilder();
+ entry.mergeFrom(existingEntry);
+ entry.getAttributesBuilder().setMtime(now);
+ LOG.debug("createFile merged entry path:{} entry:{} from:{}", path, entry, existingEntry);
+ writePosition = SeaweedRead.totalSize(existingEntry.getChunksList());
+ replication = existingEntry.getAttributes().getReplication();
+ }
+ }
+ if (entry == null) {
+ entry = FilerProto.Entry.newBuilder()
+ .setName(path.getName())
+ .setIsDirectory(false)
+ .setAttributes(FilerProto.FuseAttributes.newBuilder()
+ .setFileMode(permissionToMode(permission, false))
+ .setReplication(replication)
+ .setCrtime(now)
+ .setMtime(now)
+ .setUserName(userGroupInformation.getUserName())
+ .clearGroupName()
+ .addAllGroupName(Arrays.asList(userGroupInformation.getGroupNames()))
+ );
+ }
+
+ return new SeaweedOutputStream(filerGrpcClient, path, entry, writePosition, bufferSize, replication);
+
+ }
+
+ public InputStream openFileForRead(final Path path, FileSystem.Statistics statistics,
+ int bufferSize) throws IOException {
+
+ LOG.debug("openFileForRead path:{} bufferSize:{}", path, bufferSize);
+
+ int readAheadQueueDepth = 2;
+ FilerProto.Entry entry = lookupEntry(path);
+
+ if (entry == null) {
+ throw new FileNotFoundException("read non-exist file " + path);
+ }
+
+ return new SeaweedInputStream(filerGrpcClient,
+ statistics,
+ path.toUri().getPath(),
+ entry,
+ bufferSize,
+ readAheadQueueDepth);
+ }
+
+ public void setOwner(Path path, String owner, String group) {
+
+ LOG.debug("setOwner path:{} owner:{} group:{}", path, owner, group);
+
+ FilerProto.Entry entry = lookupEntry(path);
+ if (entry == null) {
+ LOG.debug("setOwner path:{} entry:{}", path, entry);
+ return;
+ }
+
+ FilerProto.Entry.Builder entryBuilder = entry.toBuilder();
+ FilerProto.FuseAttributes.Builder attributesBuilder = entry.getAttributes().toBuilder();
+
+ if (owner != null) {
+ attributesBuilder.setUserName(owner);
+ }
+ if (group != null) {
+ attributesBuilder.clearGroupName();
+ attributesBuilder.addGroupName(group);
+ }
+
+ entryBuilder.setAttributes(attributesBuilder);
+
+ LOG.debug("setOwner path:{} entry:{}", path, entryBuilder);
+
+ filerClient.updateEntry(getParentDirectory(path), entryBuilder.build());
+
+ }
+
+ public void setPermission(Path path, FsPermission permission) {
+
+ LOG.debug("setPermission path:{} permission:{}", path, permission);
+
+ FilerProto.Entry entry = lookupEntry(path);
+ if (entry == null) {
+ LOG.debug("setPermission path:{} entry:{}", path, entry);
+ return;
+ }
+
+ FilerProto.Entry.Builder entryBuilder = entry.toBuilder();
+ FilerProto.FuseAttributes.Builder attributesBuilder = entry.getAttributes().toBuilder();
+
+ attributesBuilder.setFileMode(permissionToMode(permission, entry.getIsDirectory()));
+
+ entryBuilder.setAttributes(attributesBuilder);
+
+ LOG.debug("setPermission path:{} entry:{}", path, entryBuilder);
+
+ filerClient.updateEntry(getParentDirectory(path), entryBuilder.build());
+
+ }
+
+}
diff --git a/other/java/hdfs3/src/main/java/seaweed/hdfs/SeaweedInputStream.java b/other/java/hdfs3/src/main/java/seaweed/hdfs/SeaweedInputStream.java
new file mode 100644
index 000000000..90c14c772
--- /dev/null
+++ b/other/java/hdfs3/src/main/java/seaweed/hdfs/SeaweedInputStream.java
@@ -0,0 +1,371 @@
+package seaweed.hdfs;
+
+// based on org.apache.hadoop.fs.azurebfs.services.AbfsInputStream
+
+import com.google.common.base.Preconditions;
+import org.apache.hadoop.fs.FSExceptionMessages;
+import org.apache.hadoop.fs.FSInputStream;
+import org.apache.hadoop.fs.FileSystem.Statistics;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+import seaweedfs.client.FilerGrpcClient;
+import seaweedfs.client.FilerProto;
+import seaweedfs.client.SeaweedRead;
+
+import java.io.EOFException;
+import java.io.IOException;
+import java.util.List;
+
+public class SeaweedInputStream extends FSInputStream {
+
+ private static final Logger LOG = LoggerFactory.getLogger(SeaweedInputStream.class);
+
+ private final FilerGrpcClient filerGrpcClient;
+ private final Statistics statistics;
+ private final String path;
+ private final FilerProto.Entry entry;
+ private final List visibleIntervalList;
+ private final long contentLength;
+ private final int bufferSize; // default buffer size
+ private final int readAheadQueueDepth; // initialized in constructor
+ private final boolean readAheadEnabled; // whether enable readAhead;
+
+ private byte[] buffer = null; // will be initialized on first use
+
+ private long fCursor = 0; // cursor of buffer within file - offset of next byte to read from remote server
+ private long fCursorAfterLastRead = -1;
+ private int bCursor = 0; // cursor of read within buffer - offset of next byte to be returned from buffer
+ private int limit = 0; // offset of next byte to be read into buffer from service (i.e., upper marker+1
+ // of valid bytes in buffer)
+ private boolean closed = false;
+
+ public SeaweedInputStream(
+ final FilerGrpcClient filerGrpcClient,
+ final Statistics statistics,
+ final String path,
+ final FilerProto.Entry entry,
+ final int bufferSize,
+ final int readAheadQueueDepth) {
+ this.filerGrpcClient = filerGrpcClient;
+ this.statistics = statistics;
+ this.path = path;
+ this.entry = entry;
+ this.contentLength = SeaweedRead.totalSize(entry.getChunksList());
+ this.bufferSize = bufferSize;
+ this.readAheadQueueDepth = (readAheadQueueDepth >= 0) ? readAheadQueueDepth : Runtime.getRuntime().availableProcessors();
+ this.readAheadEnabled = true;
+
+ this.visibleIntervalList = SeaweedRead.nonOverlappingVisibleIntervals(entry.getChunksList());
+
+ LOG.debug("new path:{} entry:{} visibleIntervalList:{}", path, entry, visibleIntervalList);
+
+ }
+
+ public String getPath() {
+ return path;
+ }
+
+ @Override
+ public int read() throws IOException {
+ byte[] b = new byte[1];
+ int numberOfBytesRead = read(b, 0, 1);
+ if (numberOfBytesRead < 0) {
+ return -1;
+ } else {
+ return (b[0] & 0xFF);
+ }
+ }
+
+ @Override
+ public synchronized int read(final byte[] b, final int off, final int len) throws IOException {
+ int currentOff = off;
+ int currentLen = len;
+ int lastReadBytes;
+ int totalReadBytes = 0;
+ do {
+ lastReadBytes = readOneBlock(b, currentOff, currentLen);
+ if (lastReadBytes > 0) {
+ currentOff += lastReadBytes;
+ currentLen -= lastReadBytes;
+ totalReadBytes += lastReadBytes;
+ }
+ if (currentLen <= 0 || currentLen > b.length - currentOff) {
+ break;
+ }
+ } while (lastReadBytes > 0);
+ return totalReadBytes > 0 ? totalReadBytes : lastReadBytes;
+ }
+
+ private int readOneBlock(final byte[] b, final int off, final int len) throws IOException {
+ if (closed) {
+ throw new IOException(FSExceptionMessages.STREAM_IS_CLOSED);
+ }
+
+ Preconditions.checkNotNull(b);
+
+ if (len == 0) {
+ return 0;
+ }
+
+ if (this.available() == 0) {
+ return -1;
+ }
+
+ if (off < 0 || len < 0 || len > b.length - off) {
+ throw new IndexOutOfBoundsException();
+ }
+
+ //If buffer is empty, then fill the buffer.
+ if (bCursor == limit) {
+ //If EOF, then return -1
+ if (fCursor >= contentLength) {
+ return -1;
+ }
+
+ long bytesRead = 0;
+ //reset buffer to initial state - i.e., throw away existing data
+ bCursor = 0;
+ limit = 0;
+ if (buffer == null) {
+ buffer = new byte[bufferSize];
+ }
+
+ // Enable readAhead when reading sequentially
+ if (-1 == fCursorAfterLastRead || fCursorAfterLastRead == fCursor || b.length >= bufferSize) {
+ bytesRead = readInternal(fCursor, buffer, 0, bufferSize, false);
+ } else {
+ bytesRead = readInternal(fCursor, buffer, 0, b.length, true);
+ }
+
+ if (bytesRead == -1) {
+ return -1;
+ }
+
+ limit += bytesRead;
+ fCursor += bytesRead;
+ fCursorAfterLastRead = fCursor;
+ }
+
+ //If there is anything in the buffer, then return lesser of (requested bytes) and (bytes in buffer)
+ //(bytes returned may be less than requested)
+ int bytesRemaining = limit - bCursor;
+ int bytesToRead = Math.min(len, bytesRemaining);
+ System.arraycopy(buffer, bCursor, b, off, bytesToRead);
+ bCursor += bytesToRead;
+ if (statistics != null) {
+ statistics.incrementBytesRead(bytesToRead);
+ }
+ return bytesToRead;
+ }
+
+
+ private int readInternal(final long position, final byte[] b, final int offset, final int length,
+ final boolean bypassReadAhead) throws IOException {
+ if (readAheadEnabled && !bypassReadAhead) {
+ // try reading from read-ahead
+ if (offset != 0) {
+ throw new IllegalArgumentException("readahead buffers cannot have non-zero buffer offsets");
+ }
+ int receivedBytes;
+
+ // queue read-aheads
+ int numReadAheads = this.readAheadQueueDepth;
+ long nextSize;
+ long nextOffset = position;
+ while (numReadAheads > 0 && nextOffset < contentLength) {
+ nextSize = Math.min((long) bufferSize, contentLength - nextOffset);
+ ReadBufferManager.getBufferManager().queueReadAhead(this, nextOffset, (int) nextSize);
+ nextOffset = nextOffset + nextSize;
+ numReadAheads--;
+ }
+
+ // try reading from buffers first
+ receivedBytes = ReadBufferManager.getBufferManager().getBlock(this, position, length, b);
+ if (receivedBytes > 0) {
+ return receivedBytes;
+ }
+
+ // got nothing from read-ahead, do our own read now
+ receivedBytes = readRemote(position, b, offset, length);
+ return receivedBytes;
+ } else {
+ return readRemote(position, b, offset, length);
+ }
+ }
+
+ int readRemote(long position, byte[] b, int offset, int length) throws IOException {
+ if (position < 0) {
+ throw new IllegalArgumentException("attempting to read from negative offset");
+ }
+ if (position >= contentLength) {
+ return -1; // Hadoop prefers -1 to EOFException
+ }
+ if (b == null) {
+ throw new IllegalArgumentException("null byte array passed in to read() method");
+ }
+ if (offset >= b.length) {
+ throw new IllegalArgumentException("offset greater than length of array");
+ }
+ if (length < 0) {
+ throw new IllegalArgumentException("requested read length is less than zero");
+ }
+ if (length > (b.length - offset)) {
+ throw new IllegalArgumentException("requested read length is more than will fit after requested offset in buffer");
+ }
+
+ long bytesRead = SeaweedRead.read(filerGrpcClient, visibleIntervalList, position, b, offset, length);
+ if (bytesRead > Integer.MAX_VALUE) {
+ throw new IOException("Unexpected Content-Length");
+ }
+ return (int) bytesRead;
+ }
+
+ /**
+ * Seek to given position in stream.
+ *
+ * @param n position to seek to
+ * @throws IOException if there is an error
+ * @throws EOFException if attempting to seek past end of file
+ */
+ @Override
+ public synchronized void seek(long n) throws IOException {
+ if (closed) {
+ throw new IOException(FSExceptionMessages.STREAM_IS_CLOSED);
+ }
+ if (n < 0) {
+ throw new EOFException(FSExceptionMessages.NEGATIVE_SEEK);
+ }
+ if (n > contentLength) {
+ throw new EOFException(FSExceptionMessages.CANNOT_SEEK_PAST_EOF);
+ }
+
+ if (n >= fCursor - limit && n <= fCursor) { // within buffer
+ bCursor = (int) (n - (fCursor - limit));
+ return;
+ }
+
+ // next read will read from here
+ fCursor = n;
+
+ //invalidate buffer
+ limit = 0;
+ bCursor = 0;
+ }
+
+ @Override
+ public synchronized long skip(long n) throws IOException {
+ if (closed) {
+ throw new IOException(FSExceptionMessages.STREAM_IS_CLOSED);
+ }
+ long currentPos = getPos();
+ if (currentPos == contentLength) {
+ if (n > 0) {
+ throw new EOFException(FSExceptionMessages.CANNOT_SEEK_PAST_EOF);
+ }
+ }
+ long newPos = currentPos + n;
+ if (newPos < 0) {
+ newPos = 0;
+ n = newPos - currentPos;
+ }
+ if (newPos > contentLength) {
+ newPos = contentLength;
+ n = newPos - currentPos;
+ }
+ seek(newPos);
+ return n;
+ }
+
+ /**
+ * Return the size of the remaining available bytes
+ * if the size is less than or equal to {@link Integer#MAX_VALUE},
+ * otherwise, return {@link Integer#MAX_VALUE}.
+ *
+ * This is to match the behavior of DFSInputStream.available(),
+ * which some clients may rely on (HBase write-ahead log reading in
+ * particular).
+ */
+ @Override
+ public synchronized int available() throws IOException {
+ if (closed) {
+ throw new IOException(
+ FSExceptionMessages.STREAM_IS_CLOSED);
+ }
+ final long remaining = this.contentLength - this.getPos();
+ return remaining <= Integer.MAX_VALUE
+ ? (int) remaining : Integer.MAX_VALUE;
+ }
+
+ /**
+ * Returns the length of the file that this stream refers to. Note that the length returned is the length
+ * as of the time the Stream was opened. Specifically, if there have been subsequent appends to the file,
+ * they wont be reflected in the returned length.
+ *
+ * @return length of the file.
+ * @throws IOException if the stream is closed
+ */
+ public long length() throws IOException {
+ if (closed) {
+ throw new IOException(FSExceptionMessages.STREAM_IS_CLOSED);
+ }
+ return contentLength;
+ }
+
+ /**
+ * Return the current offset from the start of the file
+ *
+ * @throws IOException throws {@link IOException} if there is an error
+ */
+ @Override
+ public synchronized long getPos() throws IOException {
+ if (closed) {
+ throw new IOException(FSExceptionMessages.STREAM_IS_CLOSED);
+ }
+ return fCursor - limit + bCursor;
+ }
+
+ /**
+ * Seeks a different copy of the data. Returns true if
+ * found a new source, false otherwise.
+ *
+ * @throws IOException throws {@link IOException} if there is an error
+ */
+ @Override
+ public boolean seekToNewSource(long l) throws IOException {
+ return false;
+ }
+
+ @Override
+ public synchronized void close() throws IOException {
+ closed = true;
+ buffer = null; // de-reference the buffer so it can be GC'ed sooner
+ }
+
+ /**
+ * Not supported by this stream. Throws {@link UnsupportedOperationException}
+ *
+ * @param readlimit ignored
+ */
+ @Override
+ public synchronized void mark(int readlimit) {
+ throw new UnsupportedOperationException("mark()/reset() not supported on this stream");
+ }
+
+ /**
+ * Not supported by this stream. Throws {@link UnsupportedOperationException}
+ */
+ @Override
+ public synchronized void reset() throws IOException {
+ throw new UnsupportedOperationException("mark()/reset() not supported on this stream");
+ }
+
+ /**
+ * gets whether mark and reset are supported by {@code ADLFileInputStream}. Always returns false.
+ *
+ * @return always {@code false}
+ */
+ @Override
+ public boolean markSupported() {
+ return false;
+ }
+}
diff --git a/other/java/hdfs/src/main/java/seaweed/hdfs/SeaweedOutputStream.java b/other/java/hdfs3/src/main/java/seaweed/hdfs/SeaweedOutputStream.java
similarity index 99%
rename from other/java/hdfs/src/main/java/seaweed/hdfs/SeaweedOutputStream.java
rename to other/java/hdfs3/src/main/java/seaweed/hdfs/SeaweedOutputStream.java
index 4f307ff96..96af27fe0 100644
--- a/other/java/hdfs/src/main/java/seaweed/hdfs/SeaweedOutputStream.java
+++ b/other/java/hdfs3/src/main/java/seaweed/hdfs/SeaweedOutputStream.java
@@ -78,9 +78,6 @@ public class SeaweedOutputStream extends OutputStream implements Syncable, Strea
}
private synchronized void flushWrittenBytesToServiceInternal(final long offset) throws IOException {
-
- LOG.debug("SeaweedWrite.writeMeta path: {} entry:{}", path, entry);
-
try {
SeaweedWrite.writeMeta(filerGrpcClient, getParentDirectory(path), entry);
} catch (Exception ex) {
diff --git a/snap/README.md b/snap/README.md
new file mode 100644
index 000000000..5752bd4af
--- /dev/null
+++ b/snap/README.md
@@ -0,0 +1,49 @@
+Hi
+
+This PR adds support for building a snap package of seaweedfs. Snaps are cross distro Linux software packages. One snap can be installed on Ubuntu all supported LTS and non LTS releases from 14.04 onward. Additionally they can installed on Debian, Manjaro, Fedora, OpenSUSE and others. Making a snap of seaweedfs enables you to provide automatic updates on your schedule to your users via the snap store.
+
+If accepted, you can use snapcraft locally, a CI system such as travis or circle-ci, or our free build system (build.snapcraft.io) to create snaps and upload to the store (snapcraft.io/store). The store supports
+
+To test this PR locally, I used an Ubuntu 16.04 VM, with the following steps.
+
+```
+snap install snapcraft --classic
+git clone https://github.com/popey/seaweedfs
+cd seaweedfs
+git checkout add-snapcraft
+snapcraft
+```
+
+The generated a .snap file from the tip of master (I could have checked out a stable release instead). It can be installed with:-
+
+ snap install seaweedfs_0.99+git30.79371c0-dirty_amd64.snap --dangerous
+
+(the --dangerous is necessary because weâre installing an app which hasnât gone through the snap store review process)
+
+Once installed, the (namespaced) weed command can be executed. If you accept this and land the snap in the store, we can request an âaliasâ so users can use the âweedâ command rather than the namespaced âseaweedfs.weedâ
+
+- Run the command
+- Create sample config. Snaps are securely confined so their home directory is in a different place
+ mkdir ~/snap/seaweedfs/current/.seaweedfs
+ seaweedfs.weed scaffold > ~/snap/seaweed/current/.seaweedfs/filer.toml
+- Run a server
+ seaweedfs.weed server
+- Run a benchmark
+ seaweedfs.weed benchmark
+
+Results from my test run: https://paste.ubuntu.com/p/95Xk8zFQ7w/
+
+If landed, you will need to:-
+
+- Register an account in the snap store https://snapcraft.io/account
+- Register the âseaweedfsâ name in the store
+ - snapcraft login
+ - snapcraft register seaweedfs
+- Upload a built snap to the store
+ - snapcraft push seaweedfs_0.99+git30.79371c0-dirty_amd64.snap --release edge
+- Test installing on a clean Ubuntu 16.04 machine
+ - snap install seaweedfs --edge
+
+The store supports multiple risk levels as âchannelsâ with the edge channel typically used to host the latest build from git master. Stable is where stable releases are pushed. Optionally beta and candidate channels can also be used if needed.
+
+Once you are happy, you can push a stable release to the stable channel, update the store page, and promote the application online (we can help there).
diff --git a/snap/snapcraft.yaml b/snap/snapcraft.yaml
new file mode 100644
index 000000000..6449e9bfb
--- /dev/null
+++ b/snap/snapcraft.yaml
@@ -0,0 +1,53 @@
+# Name of snap as registered in the store
+name: seaweedfs
+# Automatically derive snap version from git tags
+version: git
+# Short human readable name as seen in 'snap find $SNAPNAME'
+summary: SeaweedFS
+# Longer multi-line description found in 'snap info $SNAPNAME'
+description: |
+ SeaweedFS is a simple and highly scalable distributed file system,
+ to store and serve billions of files fast!
+ SeaweedFS implements an object store with O(1) disk seek,
+ transparent cloud integration, and an optional Filer with POSIX interface,
+ supporting S3 API, Rack-Aware Erasure Coding for warm storage,
+ FUSE mount, Hadoop compatible, WebDAV.
+
+# Grade is stable for snaps expected to land in the stable channel
+grade: stable
+# Uses the strict confinement model and uses interfaces to open up access to
+# resources on the target host
+confinement: strict
+
+# List of parts which comprise the snap
+parts:
+ # The main part which defines how to build the application in the snap
+ seaweedfs:
+ # This part needs a newer version of golang, so we use a separate part
+ # which defines how to get a newer golang during the build
+ after: [go]
+ # The go plugin knows how to build go applications into a snap
+ plugin: go
+ # Snapcraft will look in this location for the source of the application
+ source: .
+ go-importpath: github.com/chrislusf/seaweedfs
+ go:
+ # Defines the version of golang which will be bootstrapped into the snap
+ source-tag: go1.14
+
+# Apps exposes the binaries inside the snap to the host system once installed
+apps:
+ # We expose the weed command.
+ # This differs from the snap name, so it will be namespaced as seaweedfs.weed
+ # An alias can be added to expose this as 'weed' if requested in the snapcraft forum
+ weed:
+ # The path to the binary inside the snap, relative to the $SNAP home
+ command: bin/weed
+ # Plugs connect the snap to resources on the host system. We enable network connectivity
+ # We also add home and removable-media (latter not autoconnected by default)
+ # so users can access files in their home or on removable disks
+ plugs:
+ - network
+ - network-bind
+ - home
+ - removable-media
diff --git a/unmaintained/change_superblock/change_superblock.go b/unmaintained/change_superblock/change_superblock.go
index 779580a9b..afe651c4e 100644
--- a/unmaintained/change_superblock/change_superblock.go
+++ b/unmaintained/change_superblock/change_superblock.go
@@ -8,7 +8,9 @@ import (
"strconv"
"github.com/chrislusf/seaweedfs/weed/glog"
- "github.com/chrislusf/seaweedfs/weed/storage"
+ "github.com/chrislusf/seaweedfs/weed/storage/backend"
+ "github.com/chrislusf/seaweedfs/weed/storage/needle"
+ "github.com/chrislusf/seaweedfs/weed/storage/super_block"
)
var (
@@ -46,9 +48,10 @@ func main() {
if err != nil {
glog.Fatalf("Open Volume Data File [ERROR]: %v", err)
}
- defer datFile.Close()
+ datBackend := backend.NewDiskFile(datFile)
+ defer datBackend.Close()
- superBlock, err := storage.ReadSuperBlock(datFile)
+ superBlock, err := super_block.ReadSuperBlock(datBackend)
if err != nil {
glog.Fatalf("cannot parse existing super block: %v", err)
@@ -60,7 +63,7 @@ func main() {
hasChange := false
if *targetReplica != "" {
- replica, err := storage.NewReplicaPlacementFromString(*targetReplica)
+ replica, err := super_block.NewReplicaPlacementFromString(*targetReplica)
if err != nil {
glog.Fatalf("cannot parse target replica %s: %v", *targetReplica, err)
@@ -73,7 +76,7 @@ func main() {
}
if *targetTTL != "" {
- ttl, err := storage.ReadTTL(*targetTTL)
+ ttl, err := needle.ReadTTL(*targetTTL)
if err != nil {
glog.Fatalf("cannot parse target ttl %s: %v", *targetTTL, err)
diff --git a/unmaintained/check_disk_size/check_disk_size.go b/unmaintained/check_disk_size/check_disk_size.go
new file mode 100644
index 000000000..4a8b92b88
--- /dev/null
+++ b/unmaintained/check_disk_size/check_disk_size.go
@@ -0,0 +1,42 @@
+package main
+
+import (
+ "flag"
+ "fmt"
+ "runtime"
+ "syscall"
+)
+
+var (
+ dir = flag.String("dir", ".", "the directory which uses a disk")
+)
+
+func main() {
+ flag.Parse()
+
+ fillInDiskStatus(*dir)
+
+ fmt.Printf("OS: %v\n", runtime.GOOS)
+ fmt.Printf("Arch: %v\n", runtime.GOARCH)
+
+}
+
+func fillInDiskStatus(dir string) {
+ fs := syscall.Statfs_t{}
+ err := syscall.Statfs(dir, &fs)
+ if err != nil {
+ fmt.Printf("failed to statfs on %s: %v\n", dir, err)
+ return
+ }
+ fmt.Printf("statfs: %+v\n", fs)
+ fmt.Println()
+
+ total := fs.Blocks * uint64(fs.Bsize)
+ free := fs.Bfree * uint64(fs.Bsize)
+ fmt.Printf("Total: %d blocks x %d block size = %d bytes\n", fs.Blocks, uint64(fs.Bsize), total)
+ fmt.Printf("Free : %d blocks x %d block size = %d bytes\n", fs.Bfree, uint64(fs.Bsize), free)
+ fmt.Printf("Used : %d blocks x %d block size = %d bytes\n", fs.Blocks-fs.Bfree, uint64(fs.Bsize), total-free)
+ fmt.Printf("Free Percentage : %.2f%%\n", float32((float64(free)/float64(total))*100))
+ fmt.Printf("Used Percentage : %.2f%%\n", float32((float64(total-free)/float64(total))*100))
+ return
+}
diff --git a/unmaintained/compact_leveldb/compact_leveldb.go b/unmaintained/compact_leveldb/compact_leveldb.go
new file mode 100644
index 000000000..9be5697de
--- /dev/null
+++ b/unmaintained/compact_leveldb/compact_leveldb.go
@@ -0,0 +1,39 @@
+package main
+
+import (
+ "flag"
+ "log"
+
+ "github.com/syndtr/goleveldb/leveldb"
+ "github.com/syndtr/goleveldb/leveldb/errors"
+ "github.com/syndtr/goleveldb/leveldb/opt"
+ "github.com/syndtr/goleveldb/leveldb/util"
+)
+
+var (
+ dir = flag.String("dir", ".", "data directory to store leveldb files")
+)
+
+func main() {
+
+ flag.Parse()
+
+ opts := &opt.Options{
+ BlockCacheCapacity: 32 * 1024 * 1024, // default value is 8MiB
+ WriteBuffer: 16 * 1024 * 1024, // default value is 4MiB
+ CompactionTableSizeMultiplier: 10,
+ OpenFilesCacheCapacity: -1,
+ }
+
+ db, err := leveldb.OpenFile(*dir, opts)
+ if errors.IsCorrupted(err) {
+ db, err = leveldb.RecoverFile(*dir, opts)
+ }
+ if err != nil {
+ log.Fatal(err)
+ }
+ defer db.Close()
+ if err := db.CompactRange(util.Range{}); err != nil {
+ log.Fatal(err)
+ }
+}
diff --git a/unmaintained/fix_dat/fix_dat.go b/unmaintained/fix_dat/fix_dat.go
index 9eb64b3b4..d6110d870 100644
--- a/unmaintained/fix_dat/fix_dat.go
+++ b/unmaintained/fix_dat/fix_dat.go
@@ -9,7 +9,9 @@ import (
"strconv"
"github.com/chrislusf/seaweedfs/weed/glog"
- "github.com/chrislusf/seaweedfs/weed/storage"
+ "github.com/chrislusf/seaweedfs/weed/storage/backend"
+ "github.com/chrislusf/seaweedfs/weed/storage/needle"
+ "github.com/chrislusf/seaweedfs/weed/storage/super_block"
"github.com/chrislusf/seaweedfs/weed/storage/types"
"github.com/chrislusf/seaweedfs/weed/util"
)
@@ -43,11 +45,13 @@ func main() {
glog.Fatalf("Read Volume Index %v", err)
}
defer indexFile.Close()
- datFile, err := os.OpenFile(path.Join(*fixVolumePath, fileName+".dat"), os.O_RDONLY, 0644)
+ datFileName := path.Join(*fixVolumePath, fileName+".dat")
+ datFile, err := os.OpenFile(datFileName, os.O_RDONLY, 0644)
if err != nil {
glog.Fatalf("Read Volume Data %v", err)
}
- defer datFile.Close()
+ datBackend := backend.NewDiskFile(datFile)
+ defer datBackend.Close()
newDatFile, err := os.Create(path.Join(*fixVolumePath, fileName+".dat_fixed"))
if err != nil {
@@ -55,21 +59,21 @@ func main() {
}
defer newDatFile.Close()
- superBlock, err := storage.ReadSuperBlock(datFile)
+ superBlock, err := super_block.ReadSuperBlock(datBackend)
if err != nil {
glog.Fatalf("Read Volume Data superblock %v", err)
}
newDatFile.Write(superBlock.Bytes())
- iterateEntries(datFile, indexFile, func(n *storage.Needle, offset int64) {
+ iterateEntries(datBackend, indexFile, func(n *needle.Needle, offset int64) {
fmt.Printf("needle id=%v name=%s size=%d dataSize=%d\n", n.Id, string(n.Name), n.Size, n.DataSize)
- _, s, _, e := n.Append(newDatFile, superBlock.Version())
+ _, s, _, e := n.Append(datBackend, superBlock.Version)
fmt.Printf("size %d error %v\n", s, e)
})
}
-func iterateEntries(datFile, idxFile *os.File, visitNeedle func(n *storage.Needle, offset int64)) {
+func iterateEntries(datBackend backend.BackendStorageFile, idxFile *os.File, visitNeedle func(n *needle.Needle, offset int64)) {
// start to read index file
var readerOffset int64
bytes := make([]byte, 16)
@@ -77,14 +81,14 @@ func iterateEntries(datFile, idxFile *os.File, visitNeedle func(n *storage.Needl
readerOffset += int64(count)
// start to read dat file
- superBlock, err := storage.ReadSuperBlock(datFile)
+ superBlock, err := super_block.ReadSuperBlock(datBackend)
if err != nil {
fmt.Printf("cannot read dat file super block: %v", err)
return
}
offset := int64(superBlock.BlockSize())
- version := superBlock.Version()
- n, rest, err := storage.ReadNeedleHeader(datFile, version, offset)
+ version := superBlock.Version
+ n, _, rest, err := needle.ReadNeedleHeader(datBackend, version, offset)
if err != nil {
fmt.Printf("cannot read needle header: %v", err)
return
@@ -106,7 +110,7 @@ func iterateEntries(datFile, idxFile *os.File, visitNeedle func(n *storage.Needl
fmt.Printf("key: %d offsetFromIndex %d n.Size %d sizeFromIndex:%d\n", key, offsetFromIndex, n.Size, sizeFromIndex)
- rest = storage.NeedleBodyLength(sizeFromIndex, version)
+ rest = needle.NeedleBodyLength(sizeFromIndex, version)
func() {
defer func() {
@@ -114,7 +118,7 @@ func iterateEntries(datFile, idxFile *os.File, visitNeedle func(n *storage.Needl
fmt.Println("Recovered in f", r)
}
}()
- if err = n.ReadNeedleBody(datFile, version, offset+int64(types.NeedleEntrySize), rest); err != nil {
+ if _, err = n.ReadNeedleBody(datBackend, version, offset+int64(types.NeedleHeaderSize), rest); err != nil {
fmt.Printf("cannot read needle body: offset %d body %d %v\n", offset, rest, err)
}
}()
@@ -124,9 +128,9 @@ func iterateEntries(datFile, idxFile *os.File, visitNeedle func(n *storage.Needl
}
visitNeedle(n, offset)
- offset += types.NeedleEntrySize + rest
+ offset += types.NeedleHeaderSize + rest
//fmt.Printf("==> new entry offset %d\n", offset)
- if n, rest, err = storage.ReadNeedleHeader(datFile, version, offset); err != nil {
+ if n, _, rest, err = needle.ReadNeedleHeader(datBackend, version, offset); err != nil {
if err == io.EOF {
return
}
diff --git a/unmaintained/load_test/load_test_leveldb/load_test_leveldb.go b/unmaintained/load_test/load_test_leveldb/load_test_leveldb.go
new file mode 100644
index 000000000..43dfb0e21
--- /dev/null
+++ b/unmaintained/load_test/load_test_leveldb/load_test_leveldb.go
@@ -0,0 +1,155 @@
+package load_test_leveldb
+
+import (
+ "crypto/md5"
+ "flag"
+ "fmt"
+ "io"
+ "log"
+ "math/rand"
+ "os"
+ "sync"
+ "time"
+
+ "github.com/syndtr/goleveldb/leveldb"
+ "github.com/syndtr/goleveldb/leveldb/opt"
+)
+
+var (
+ dir = flag.String("dir", "./t", "directory to store level db files")
+ useHash = flag.Bool("isHash", false, "hash the path as the key")
+ dbCount = flag.Int("dbCount", 1, "the number of leveldb")
+)
+
+func main() {
+
+ flag.Parse()
+
+ totalTenants := 300
+ totalYears := 3
+
+ opts := &opt.Options{
+ BlockCacheCapacity: 32 * 1024 * 1024, // default value is 8MiB
+ WriteBuffer: 16 * 1024 * 1024, // default value is 4MiB
+ CompactionTableSizeMultiplier: 4,
+ }
+
+ var dbs []*leveldb.DB
+ var chans []chan string
+ for d := 0; d < *dbCount; d++ {
+ dbFolder := fmt.Sprintf("%s/%02d", *dir, d)
+ os.MkdirAll(dbFolder, 0755)
+ db, err := leveldb.OpenFile(dbFolder, opts)
+ if err != nil {
+ log.Printf("filer store open dir %s: %v", *dir, err)
+ return
+ }
+ dbs = append(dbs, db)
+ chans = append(chans, make(chan string, 1024))
+ }
+
+ var wg sync.WaitGroup
+ for d := 0; d < *dbCount; d++ {
+ wg.Add(1)
+ go func(d int) {
+ defer wg.Done()
+
+ ch := chans[d]
+ db := dbs[d]
+
+ for p := range ch {
+ if *useHash {
+ insertAsHash(db, p)
+ } else {
+ insertAsFullPath(db, p)
+ }
+ }
+ }(d)
+ }
+
+ counter := int64(0)
+ lastResetTime := time.Now()
+
+ r := rand.New(rand.NewSource(35))
+
+ for y := 0; y < totalYears; y++ {
+ for m := 0; m < 12; m++ {
+ for d := 0; d < 31; d++ {
+ for h := 0; h < 24; h++ {
+ for min := 0; min < 60; min++ {
+ for i := 0; i < totalTenants; i++ {
+ p := fmt.Sprintf("tenent%03d/%4d/%02d/%02d/%02d/%02d", i, 2015+y, 1+m, 1+d, h, min)
+
+ x := r.Intn(*dbCount)
+
+ chans[x] <- p
+
+ counter++
+ }
+
+ t := time.Now()
+ if lastResetTime.Add(time.Second).Before(t) {
+ p := fmt.Sprintf("%4d/%02d/%02d/%02d/%02d", 2015+y, 1+m, 1+d, h, min)
+ fmt.Printf("%s = %4d put/sec\n", p, counter)
+ counter = 0
+ lastResetTime = t
+ }
+ }
+ }
+ }
+ }
+ }
+
+ for d := 0; d < *dbCount; d++ {
+ close(chans[d])
+ }
+
+ wg.Wait()
+
+}
+
+func insertAsFullPath(db *leveldb.DB, p string) {
+ _, getErr := db.Get([]byte(p), nil)
+ if getErr == leveldb.ErrNotFound {
+ putErr := db.Put([]byte(p), []byte(p), nil)
+ if putErr != nil {
+ log.Printf("failed to put %s", p)
+ }
+ }
+}
+
+func insertAsHash(db *leveldb.DB, p string) {
+ key := fmt.Sprintf("%d:%s", hashToLong(p), p)
+ _, getErr := db.Get([]byte(key), nil)
+ if getErr == leveldb.ErrNotFound {
+ putErr := db.Put([]byte(key), []byte(p), nil)
+ if putErr != nil {
+ log.Printf("failed to put %s", p)
+ }
+ }
+}
+
+func hashToLong(dir string) (v int64) {
+ h := md5.New()
+ io.WriteString(h, dir)
+
+ b := h.Sum(nil)
+
+ v += int64(b[0])
+ v <<= 8
+ v += int64(b[1])
+ v <<= 8
+ v += int64(b[2])
+ v <<= 8
+ v += int64(b[3])
+ v <<= 8
+ v += int64(b[4])
+ v <<= 8
+ v += int64(b[5])
+ v <<= 8
+ v += int64(b[6])
+ v <<= 8
+ v += int64(b[7])
+
+ return
+}
diff --git a/unmaintained/remove_duplicate_fids/remove_duplicate_fids.go b/unmaintained/remove_duplicate_fids/remove_duplicate_fids.go
new file mode 100644
index 000000000..84173a663
--- /dev/null
+++ b/unmaintained/remove_duplicate_fids/remove_duplicate_fids.go
@@ -0,0 +1,95 @@
+package main
+
+import (
+ "flag"
+ "fmt"
+ "os"
+ "path/filepath"
+
+ "github.com/chrislusf/seaweedfs/weed/glog"
+ "github.com/chrislusf/seaweedfs/weed/storage"
+ "github.com/chrislusf/seaweedfs/weed/storage/backend"
+ "github.com/chrislusf/seaweedfs/weed/storage/needle"
+ "github.com/chrislusf/seaweedfs/weed/storage/super_block"
+)
+
+var (
+ volumePath = flag.String("dir", "/tmp", "data directory to store files")
+ volumeCollection = flag.String("collection", "", "the volume collection name")
+ volumeId = flag.Int("volumeId", -1, "a volume id. The volume should already exist in the dir. The volume index file should not exist.")
+)
+
+func Checksum(n *needle.Needle) string {
+ return fmt.Sprintf("%s%x", n.Id, n.Cookie)
+}
+
+type VolumeFileScanner4SeeDat struct {
+ version needle.Version
+ block super_block.SuperBlock
+
+ dir string
+ hashes map[string]bool
+ dat *os.File
+ datBackend backend.BackendStorageFile
+}
+
+func (scanner *VolumeFileScanner4SeeDat) VisitSuperBlock(superBlock super_block.SuperBlock) error {
+ scanner.version = superBlock.Version
+ scanner.block = superBlock
+ return nil
+
+}
+func (scanner *VolumeFileScanner4SeeDat) ReadNeedleBody() bool {
+ return true
+}
+
+func (scanner *VolumeFileScanner4SeeDat) VisitNeedle(n *needle.Needle, offset int64, needleHeader, needleBody []byte) error {
+
+ if scanner.datBackend == nil {
+ newFileName := filepath.Join(*volumePath, "dat_fixed")
+ newDatFile, err := os.Create(newFileName)
+ if err != nil {
+ glog.Fatalf("Write New Volume Data %v", err)
+ }
+ scanner.datBackend = backend.NewDiskFile(newDatFile)
+ scanner.datBackend.WriteAt(scanner.block.Bytes(), 0)
+ }
+
+ checksum := Checksum(n)
+
+ if scanner.hashes[checksum] {
+ glog.V(0).Infof("duplicate checksum:%s fid:%d,%s%x @ offset:%d", checksum, *volumeId, n.Id, n.Cookie, offset)
+ return nil
+ }
+ scanner.hashes[checksum] = true
+
+ _, s, _, e := n.Append(scanner.datBackend, scanner.version)
+ fmt.Printf("size %d error %v\n", s, e)
+
+ return nil
+}
+
+func main() {
+ flag.Parse()
+
+ vid := needle.VolumeId(*volumeId)
+
+ outpath, _ := filepath.Abs(filepath.Dir(os.Args[0]))
+
+ scanner := &VolumeFileScanner4SeeDat{
+ dir: filepath.Join(outpath, "out"),
+ hashes: map[string]bool{},
+ }
+
+ if _, err := os.Stat(scanner.dir); err != nil {
+ if err := os.MkdirAll(scanner.dir, os.ModePerm); err != nil {
+ glog.Fatalf("could not create output dir : %s", err)
+ }
+ }
+
+ err := storage.ScanVolumeFile(*volumePath, *volumeCollection, vid, storage.NeedleMapInMemory, scanner)
+ if err != nil {
+ glog.Fatalf("Reading Volume File [ERROR] %s\n", err)
+ }
+
+}
diff --git a/unmaintained/repeated_vacuum/repeated_vacuum.go b/unmaintained/repeated_vacuum/repeated_vacuum.go
index 7cc583f56..12ac42dbe 100644
--- a/unmaintained/repeated_vacuum/repeated_vacuum.go
+++ b/unmaintained/repeated_vacuum/repeated_vacuum.go
@@ -1,45 +1,73 @@
package main
import (
- "bytes"
"flag"
"fmt"
"log"
"math/rand"
+ "time"
+
+ "google.golang.org/grpc"
"github.com/chrislusf/seaweedfs/weed/operation"
+ "github.com/chrislusf/seaweedfs/weed/security"
"github.com/chrislusf/seaweedfs/weed/util"
)
var (
- master = flag.String("master", "127.0.0.1:9333", "the master server")
- repeat = flag.Int("n", 5, "repeat how many times")
+ master = flag.String("master", "127.0.0.1:9333", "the master server")
+ repeat = flag.Int("n", 5, "repeat how many times")
+ garbageThreshold = flag.Float64("garbageThreshold", 0.3, "garbageThreshold")
+ replication = flag.String("replication", "", "replication 000, 001, 002, etc")
)
func main() {
flag.Parse()
- for i := 0; i < *repeat; i++ {
- assignResult, err := operation.Assign(*master, &operation.VolumeAssignRequest{Count: 1})
- if err != nil {
- log.Fatalf("assign: %v", err)
+ util.LoadConfiguration("security", false)
+ grpcDialOption := security.LoadClientTLS(util.GetViper(), "grpc.client")
+
+ genFile(grpcDialOption, 0)
+
+ go func() {
+ for {
+ println("vacuum threshold", *garbageThreshold)
+ _, err := util.Get(fmt.Sprintf("http://%s/vol/vacuum?garbageThreshold=%f", *master, *garbageThreshold))
+ if err != nil {
+ log.Fatalf("vacuum: %v", err)
+ }
+ time.Sleep(time.Second)
}
+ }()
- data := make([]byte, 1024)
- rand.Read(data)
- reader := bytes.NewReader(data)
+ for i := 0; i < *repeat; i++ {
+ // create 2 files, and delete one of them
- targetUrl := fmt.Sprintf("http://%s/%s", assignResult.Url, assignResult.Fid)
+ assignResult, targetUrl := genFile(grpcDialOption, i)
- _, err = operation.Upload(targetUrl, fmt.Sprintf("test%d", i), reader, false, "", nil, "")
- if err != nil {
- log.Fatalf("upload: %v", err)
- }
+ util.Delete(targetUrl, string(assignResult.Auth))
- util.Delete(targetUrl, "")
+ }
- util.Get(fmt.Sprintf("http://%s/vol/vacuum", *master))
+}
+func genFile(grpcDialOption grpc.DialOption, i int) (*operation.AssignResult, string) {
+ assignResult, err := operation.Assign(*master, grpcDialOption, &operation.VolumeAssignRequest{
+ Count: 1,
+ Replication: *replication,
+ })
+ if err != nil {
+ log.Fatalf("assign: %v", err)
}
+ data := make([]byte, 1024)
+ rand.Read(data)
+
+ targetUrl := fmt.Sprintf("http://%s/%s", assignResult.Url, assignResult.Fid)
+
+ _, err = operation.UploadData(targetUrl, fmt.Sprintf("test%d", i), false, data, false, "bench/test", nil, assignResult.Auth)
+ if err != nil {
+ log.Fatalf("upload: %v", err)
+ }
+ return assignResult, targetUrl
}
diff --git a/unmaintained/see_dat/see_dat.go b/unmaintained/see_dat/see_dat.go
index f79c0a6a9..17c494841 100644
--- a/unmaintained/see_dat/see_dat.go
+++ b/unmaintained/see_dat/see_dat.go
@@ -2,8 +2,13 @@ package main
import (
"flag"
+ "github.com/chrislusf/seaweedfs/weed/util"
+ "time"
+
"github.com/chrislusf/seaweedfs/weed/glog"
"github.com/chrislusf/seaweedfs/weed/storage"
+ "github.com/chrislusf/seaweedfs/weed/storage/needle"
+ "github.com/chrislusf/seaweedfs/weed/storage/super_block"
)
var (
@@ -13,32 +18,33 @@ var (
)
type VolumeFileScanner4SeeDat struct {
- version storage.Version
+ version needle.Version
}
-func (scanner *VolumeFileScanner4SeeDat) VisitSuperBlock(superBlock storage.SuperBlock) error {
- scanner.version = superBlock.Version()
+func (scanner *VolumeFileScanner4SeeDat) VisitSuperBlock(superBlock super_block.SuperBlock) error {
+ scanner.version = superBlock.Version
return nil
}
func (scanner *VolumeFileScanner4SeeDat) ReadNeedleBody() bool {
- return false
+ return true
}
-func (scanner *VolumeFileScanner4SeeDat) VisitNeedle(n *storage.Needle, offset int64) error {
- glog.V(0).Infof("%d,%s%x offset %d size %d cookie %x", *volumeId, n.Id, n.Cookie, offset, n.Size, n.Cookie)
+func (scanner *VolumeFileScanner4SeeDat) VisitNeedle(n *needle.Needle, offset int64, needleHeader, needleBody []byte) error {
+ t := time.Unix(int64(n.AppendAtNs)/int64(time.Second), int64(n.AppendAtNs)%int64(time.Second))
+ glog.V(0).Infof("%d,%s%x offset %d size %d(%s) cookie %x appendedAt %v",
+ *volumeId, n.Id, n.Cookie, offset, n.Size, util.BytesToHumanReadable(uint64(n.Size)), n.Cookie, t)
return nil
}
func main() {
flag.Parse()
- vid := storage.VolumeId(*volumeId)
+ vid := needle.VolumeId(*volumeId)
scanner := &VolumeFileScanner4SeeDat{}
err := storage.ScanVolumeFile(*volumePath, *volumeCollection, vid, storage.NeedleMapInMemory, scanner)
if err != nil {
glog.Fatalf("Reading Volume File [ERROR] %s\n", err)
}
-
}
diff --git a/unmaintained/see_dat/see_dat_gzip.go b/unmaintained/see_dat/see_dat_gzip.go
new file mode 100644
index 000000000..cec073e3f
--- /dev/null
+++ b/unmaintained/see_dat/see_dat_gzip.go
@@ -0,0 +1,83 @@
+package main
+
+import (
+ "bytes"
+ "compress/gzip"
+ "crypto/md5"
+ "flag"
+ "io"
+ "io/ioutil"
+ "net/http"
+ "time"
+ "github.com/chrislusf/seaweedfs/weed/glog"
+ "github.com/chrislusf/seaweedfs/weed/storage"
+ "github.com/chrislusf/seaweedfs/weed/storage/needle"
+ "github.com/chrislusf/seaweedfs/weed/storage/super_block"
+ "github.com/chrislusf/seaweedfs/weed/util"
+)
+
+type VolumeFileScanner4SeeDat struct {
+ version needle.Version
+}
+
+func (scanner *VolumeFileScanner4SeeDat) VisitSuperBlock(superBlock super_block.SuperBlock) error {
+ scanner.version = superBlock.Version
+ return nil
+}
+
+func (scanner *VolumeFileScanner4SeeDat) ReadNeedleBody() bool {
+ return true
+}
+
+var (
+ files = int64(0)
+ filebytes = int64(0)
+ diffbytes = int64(0)
+)
+
+func Compresssion(data []byte) float64 {
+ if len(data) <= 128 {
+ return 100.0
+ }
+ compressed, _ := util.GzipData(data[0:128])
+ return float64(len(compressed)*10) / 1280.0
+}
+
+func (scanner *VolumeFileScanner4SeeDat) VisitNeedle(n *needle.Needle, offset int64, needleHeader, needleBody []byte) error {
+ t := time.Unix(int64(n.AppendAtNs)/int64(time.Second), int64(n.AppendAtNs)%int64(time.Second))
+ glog.V(0).Info("----------------------------------------------------------------------------------")
+ glog.V(0).Infof("%d,%s%x offset %d size %d(%s) cookie %x appendedAt %v hasmime[%t] mime[%s] (len: %d)",
+ *volumeId, n.Id, n.Cookie, offset, n.Size, util.BytesToHumanReadable(uint64(n.Size)), n.Cookie, t, n.HasMime(), string(n.Mime), len(n.Mime))
+ r, err := gzip.NewReader(bytes.NewReader(n.Data))
+ if err == nil {
+ buf := bytes.Buffer{}
+ h := md5.New()
+ c, _ := io.Copy(&buf, r)
+ d := buf.Bytes()
+ io.Copy(h, bytes.NewReader(d))
+ diff := (int64(n.DataSize) - int64(c))
+ diffbytes += diff
+ glog.V(0).Infof("was gzip! stored_size: %d orig_size: %d diff: %d(%d) mime:%s compression-of-128: %.2f md5: %x", n.DataSize, c, diff, diffbytes, http.DetectContentType(d), Compresssion(d), h.Sum(nil))
+ } else {
+ glog.V(0).Infof("no gzip!")
+ }
+ return nil
+}
+
+var (
+ _ = ioutil.ReadAll
+ volumePath = flag.String("dir", "/tmp", "data directory to store files")
+ volumeCollection = flag.String("collection", "", "the volume collection name")
+ volumeId = flag.Int("volumeId", -1, "a volume id. The volume should already exist in the dir. The volume index file should not exist.")
+)
+
+func main() {
+ flag.Parse()
+ vid := needle.VolumeId(*volumeId)
+ glog.V(0).Info("Starting")
+ scanner := &VolumeFileScanner4SeeDat{}
+ err := storage.ScanVolumeFile(*volumePath, *volumeCollection, vid, storage.NeedleMapInMemory, scanner)
+ if err != nil {
+ glog.Fatalf("Reading Volume File [ERROR] %s\n", err)
+ }
+}
diff --git a/unmaintained/see_idx/see_idx.go b/unmaintained/see_idx/see_idx.go
index 23ca04c2e..47cbd291b 100644
--- a/unmaintained/see_idx/see_idx.go
+++ b/unmaintained/see_idx/see_idx.go
@@ -3,12 +3,13 @@ package main
import (
"flag"
"fmt"
+ "github.com/chrislusf/seaweedfs/weed/util"
"os"
"path"
"strconv"
"github.com/chrislusf/seaweedfs/weed/glog"
- "github.com/chrislusf/seaweedfs/weed/storage"
+ "github.com/chrislusf/seaweedfs/weed/storage/idx"
"github.com/chrislusf/seaweedfs/weed/storage/types"
)
@@ -35,8 +36,8 @@ func main() {
}
defer indexFile.Close()
- storage.WalkIndexFile(indexFile, func(key types.NeedleId, offset types.Offset, size uint32) error {
- fmt.Printf("key:%v offset:%v size:%v\n", key, offset, size)
+ idx.WalkIndexFile(indexFile, func(key types.NeedleId, offset types.Offset, size uint32) error {
+ fmt.Printf("key:%v offset:%v size:%v(%v)\n", key, offset, size, util.BytesToHumanReadable(uint64(size)))
return nil
})
diff --git a/unmaintained/see_log_entry/see_log_entry.go b/unmaintained/see_log_entry/see_log_entry.go
new file mode 100644
index 000000000..34965f6be
--- /dev/null
+++ b/unmaintained/see_log_entry/see_log_entry.go
@@ -0,0 +1,75 @@
+package main
+
+import (
+ "flag"
+ "fmt"
+ "io"
+ "log"
+ "os"
+
+ "github.com/golang/protobuf/proto"
+
+ "github.com/chrislusf/seaweedfs/weed/filer2"
+ "github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
+ "github.com/chrislusf/seaweedfs/weed/util"
+)
+
+var (
+ logdataFile = flag.String("logdata", "", "log data file saved under "+ filer2.SystemLogDir)
+)
+
+func main() {
+ flag.Parse()
+
+ dst, err := os.OpenFile(*logdataFile, os.O_RDONLY, 0644)
+ if err != nil {
+ log.Fatalf("failed to open %s: %v", *logdataFile, err)
+ }
+ defer dst.Close()
+
+ err = walkLogEntryFile(dst)
+ if err != nil {
+ log.Fatalf("failed to visit %s: %v", *logdataFile, err)
+ }
+
+}
+
+func walkLogEntryFile(dst *os.File) error {
+
+ sizeBuf := make([]byte, 4)
+
+ for {
+ if n, err := dst.Read(sizeBuf); n != 4 {
+ if err == io.EOF {
+ return nil
+ }
+ return err
+ }
+
+ size := util.BytesToUint32(sizeBuf)
+
+ data := make([]byte, int(size))
+
+ if n, err := dst.Read(data); n != len(data) {
+ return err
+ }
+
+ logEntry := &filer_pb.LogEntry{}
+ err := proto.Unmarshal(data, logEntry)
+ if err != nil {
+ log.Printf("unexpected unmarshal filer_pb.LogEntry: %v", err)
+ return nil
+ }
+
+ event := &filer_pb.SubscribeMetadataResponse{}
+ err = proto.Unmarshal(logEntry.Data, event)
+ if err != nil {
+ log.Printf("unexpected unmarshal filer_pb.SubscribeMetadataResponse: %v", err)
+ return nil
+ }
+
+ fmt.Printf("event: %+v\n", event)
+
+ }
+
+}
diff --git a/unmaintained/see_meta/see_meta.go b/unmaintained/see_meta/see_meta.go
new file mode 100644
index 000000000..452badfd6
--- /dev/null
+++ b/unmaintained/see_meta/see_meta.go
@@ -0,0 +1,68 @@
+package main
+
+import (
+ "flag"
+ "fmt"
+ "io"
+ "log"
+ "os"
+
+ "github.com/golang/protobuf/proto"
+
+ "github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
+ "github.com/chrislusf/seaweedfs/weed/util"
+)
+
+var (
+ metaFile = flag.String("meta", "", "meta file generated via fs.meta.save")
+)
+
+func main() {
+ flag.Parse()
+
+ dst, err := os.OpenFile(*metaFile, os.O_RDONLY, 0644)
+ if err != nil {
+ log.Fatalf("failed to open %s: %v", *metaFile, err)
+ }
+ defer dst.Close()
+
+ err = walkMetaFile(dst)
+ if err != nil {
+ log.Fatalf("failed to visit %s: %v", *metaFile, err)
+ }
+
+}
+
+func walkMetaFile(dst *os.File) error {
+
+ sizeBuf := make([]byte, 4)
+
+ for {
+ if n, err := dst.Read(sizeBuf); n != 4 {
+ if err == io.EOF {
+ return nil
+ }
+ return err
+ }
+
+ size := util.BytesToUint32(sizeBuf)
+
+ data := make([]byte, int(size))
+
+ if n, err := dst.Read(data); n != len(data) {
+ return err
+ }
+
+ fullEntry := &filer_pb.FullEntry{}
+ if err := proto.Unmarshal(data, fullEntry); err != nil {
+ return err
+ }
+
+ fmt.Fprintf(os.Stdout, "file %s %v\n", util.FullPath(fullEntry.Dir).Child(fullEntry.Entry.Name), fullEntry.Entry.Attributes.String())
+ for i, chunk := range fullEntry.Entry.Chunks {
+ fmt.Fprintf(os.Stdout, " chunk %d %v\n", i+1, chunk.String())
+ }
+
+ }
+
+}
diff --git a/unmaintained/stress_filer_upload/bench_filer_upload/bench_filer_upload.go b/unmaintained/stress_filer_upload/bench_filer_upload/bench_filer_upload.go
new file mode 100644
index 000000000..b2e4b28c6
--- /dev/null
+++ b/unmaintained/stress_filer_upload/bench_filer_upload/bench_filer_upload.go
@@ -0,0 +1,136 @@
+package main
+
+import (
+ "bytes"
+ "flag"
+ "fmt"
+ "io"
+ "io/ioutil"
+ "log"
+ "math/rand"
+ "mime/multipart"
+ "net/http"
+ "os"
+ "strings"
+ "sync"
+ "time"
+)
+
+var (
+ size = flag.Int("size", 1024, "file size")
+ concurrency = flag.Int("c", 4, "concurrent number of uploads")
+ times = flag.Int("n", 1024, "repeated number of times")
+ fileCount = flag.Int("fileCount", 1, "number of files to write")
+ destination = flag.String("to", "http://localhost:8888/", "destination directory on filer")
+
+ statsChan = make(chan stat, 8)
+)
+
+type stat struct {
+ size int64
+}
+
+func main() {
+
+ flag.Parse()
+
+ data := make([]byte, *size)
+ println("data len", len(data))
+
+ var wg sync.WaitGroup
+ for x := 0; x < *concurrency; x++ {
+ wg.Add(1)
+
+ go func(x int) {
+ defer wg.Done()
+
+ client := &http.Client{Transport: &http.Transport{
+ MaxConnsPerHost: 1024,
+ MaxIdleConnsPerHost: 1024,
+ }}
+ r := rand.New(rand.NewSource(time.Now().UnixNano() + int64(x)))
+
+ for t := 0; t < *times; t++ {
+ for f := 0; f < *fileCount; f++ {
+ fn := r.Intn(*fileCount)
+ if size, err := uploadFileToFiler(client, data, fmt.Sprintf("file%04d", fn), *destination); err == nil {
+ statsChan <- stat{
+ size: size,
+ }
+ } else {
+ log.Fatalf("client %d upload %d times: %v", x, t, err)
+ }
+ }
+ }
+ }(x)
+ }
+
+ go func() {
+ ticker := time.NewTicker(1000 * time.Millisecond)
+
+ var lastTime time.Time
+ var counter, size int64
+ for {
+ select {
+ case stat := <-statsChan:
+ size += stat.size
+ counter++
+ case x := <-ticker.C:
+ if !lastTime.IsZero() {
+ elapsed := x.Sub(lastTime).Seconds()
+ fmt.Fprintf(os.Stdout, "%.2f files/s, %.2f MB/s\n",
+ float64(counter)/elapsed,
+ float64(size/1024/1024)/elapsed)
+ }
+ lastTime = x
+ size = 0
+ counter = 0
+ }
+ }
+ }()
+
+ wg.Wait()
+
+}
+
+func uploadFileToFiler(client *http.Client, data []byte, filename, destination string) (size int64, err error) {
+
+ if !strings.HasSuffix(destination, "/") {
+ destination = destination + "/"
+ }
+
+ body := &bytes.Buffer{}
+ writer := multipart.NewWriter(body)
+ part, err := writer.CreateFormFile("file", filename)
+ if err != nil {
+ return 0, fmt.Errorf("fail to create form %v: %v", filename, err)
+ }
+
+ part.Write(data)
+
+ err = writer.Close()
+ if err != nil {
+ return 0, fmt.Errorf("fail to write part %v: %v", filename, err)
+ }
+
+ uri := destination + filename
+
+ request, err := http.NewRequest("POST", uri, body)
+ request.Header.Set("Content-Type", writer.FormDataContentType())
+ // request.Close = true // can not use this, which do not reuse http connection, impacting filer->volume also.
+
+ resp, err := client.Do(request)
+ if err != nil {
+ return 0, fmt.Errorf("http POST %s: %v", uri, err)
+ } else {
+ body := &bytes.Buffer{}
+ _, err := body.ReadFrom(resp.Body)
+ if err != nil {
+ return 0, fmt.Errorf("read http POST %s response: %v", uri, err)
+ }
+ io.Copy(ioutil.Discard, resp.Body)
+ resp.Body.Close()
+ }
+
+ return int64(len(data)), nil
+}
diff --git a/unmaintained/stress_filer_upload/stress_filer_upload_actual/stress_filer_upload.go b/unmaintained/stress_filer_upload/stress_filer_upload_actual/stress_filer_upload.go
new file mode 100644
index 000000000..8b986b546
--- /dev/null
+++ b/unmaintained/stress_filer_upload/stress_filer_upload_actual/stress_filer_upload.go
@@ -0,0 +1,150 @@
+package main
+
+import (
+ "bytes"
+ "flag"
+ "fmt"
+ "io"
+ "io/ioutil"
+ "log"
+ "math/rand"
+ "mime/multipart"
+ "net/http"
+ "os"
+ "path/filepath"
+ "strings"
+ "sync"
+ "time"
+)
+
+var (
+ dir = flag.String("dir", ".", "upload files under this directory")
+ concurrency = flag.Int("c", 1, "concurrent number of uploads")
+ times = flag.Int("n", 1, "repeated number of times")
+ destination = flag.String("to", "http://localhost:8888/", "destination directory on filer")
+
+ statsChan = make(chan stat, 8)
+)
+
+type stat struct {
+ size int64
+}
+
+func main() {
+
+ flag.Parse()
+
+ var fileNames []string
+
+ files, err := ioutil.ReadDir(*dir)
+ if err != nil {
+ log.Fatalf("fail to read dir %v: %v", *dir, err)
+ }
+
+ for _, file := range files {
+ if file.IsDir() {
+ continue
+ }
+ fileNames = append(fileNames, filepath.Join(*dir, file.Name()))
+ }
+
+ var wg sync.WaitGroup
+ for x := 0; x < *concurrency; x++ {
+ wg.Add(1)
+
+ client := &http.Client{}
+
+ go func() {
+ defer wg.Done()
+ rand.Shuffle(len(fileNames), func(i, j int) {
+ fileNames[i], fileNames[j] = fileNames[j], fileNames[i]
+ })
+ for t := 0; t < *times; t++ {
+ for _, filename := range fileNames {
+ if size, err := uploadFileToFiler(client, filename, *destination); err == nil {
+ statsChan <- stat{
+ size: size,
+ }
+ }
+ }
+ }
+ }()
+ }
+
+ go func() {
+ ticker := time.NewTicker(500 * time.Millisecond)
+
+ var lastTime time.Time
+ var counter, size int64
+ for {
+ select {
+ case stat := <-statsChan:
+ size += stat.size
+ counter++
+ case x := <-ticker.C:
+ if !lastTime.IsZero() {
+ elapsed := x.Sub(lastTime).Seconds()
+ fmt.Fprintf(os.Stdout, "%.2f files/s, %.2f MB/s\n",
+ float64(counter)/elapsed,
+ float64(size/1024/1024)/elapsed)
+ }
+ lastTime = x
+ size = 0
+ counter = 0
+ }
+ }
+ }()
+
+ wg.Wait()
+
+}
+
+func uploadFileToFiler(client *http.Client, filename, destination string) (size int64, err error) {
+ file, err := os.Open(filename)
+ if err != nil {
+ panic(err)
+ }
+ defer file.Close()
+
+ fi, err := file.Stat()
+
+ if !strings.HasSuffix(destination, "/") {
+ destination = destination + "/"
+ }
+
+ body := &bytes.Buffer{}
+ writer := multipart.NewWriter(body)
+ part, err := writer.CreateFormFile("file", file.Name())
+ if err != nil {
+ return 0, fmt.Errorf("fail to create form %v: %v", file.Name(), err)
+ }
+ _, err = io.Copy(part, file)
+ if err != nil {
+ return 0, fmt.Errorf("fail to write part %v: %v", file.Name(), err)
+ }
+
+ err = writer.Close()
+ if err != nil {
+ return 0, fmt.Errorf("fail to write part %v: %v", file.Name(), err)
+ }
+
+ uri := destination + file.Name()
+
+ request, err := http.NewRequest("POST", uri, body)
+ request.Header.Set("Content-Type", writer.FormDataContentType())
+
+ resp, err := client.Do(request)
+ if err != nil {
+ return 0, fmt.Errorf("http POST %s: %v", uri, err)
+ } else {
+ body := &bytes.Buffer{}
+ _, err := body.ReadFrom(resp.Body)
+ if err != nil {
+ return 0, fmt.Errorf("read http POST %s response: %v", uri, err)
+ }
+ io.Copy(ioutil.Discard, resp.Body)
+ resp.Body.Close()
+ }
+
+ return fi.Size(), nil
+}
diff --git a/unmaintained/volume_tailer/volume_tailer.go b/unmaintained/volume_tailer/volume_tailer.go
new file mode 100644
index 000000000..e93f1cc13
--- /dev/null
+++ b/unmaintained/volume_tailer/volume_tailer.go
@@ -0,0 +1,69 @@
+package main
+
+import (
+ "flag"
+ "log"
+ "time"
+
+ "github.com/chrislusf/seaweedfs/weed/operation"
+ "github.com/chrislusf/seaweedfs/weed/security"
+ "github.com/chrislusf/seaweedfs/weed/storage/needle"
+ util2 "github.com/chrislusf/seaweedfs/weed/util"
+ "golang.org/x/tools/godoc/util"
+)
+
+var (
+ master = flag.String("master", "localhost:9333", "master server host and port")
+ volumeId = flag.Int("volumeId", -1, "a volume id")
+ rewindDuration = flag.Duration("rewind", -1, "rewind back in time. -1 means from the first entry. 0 means from now.")
+ timeoutSeconds = flag.Int("timeoutSeconds", 0, "disconnect if no activity after these seconds")
+ showTextFile = flag.Bool("showTextFile", false, "display textual file content")
+)
+
+func main() {
+ flag.Parse()
+
+ util2.LoadConfiguration("security", false)
+ grpcDialOption := security.LoadClientTLS(util2.GetViper(), "grpc.client")
+
+ vid := needle.VolumeId(*volumeId)
+
+ var sinceTimeNs int64
+ if *rewindDuration == 0 {
+ sinceTimeNs = time.Now().UnixNano()
+ } else if *rewindDuration == -1 {
+ sinceTimeNs = 0
+ } else if *rewindDuration > 0 {
+ sinceTimeNs = time.Now().Add(-*rewindDuration).UnixNano()
+ }
+
+ err := operation.TailVolume(*master, grpcDialOption, vid, uint64(sinceTimeNs), *timeoutSeconds, func(n *needle.Needle) (err error) {
+ if n.Size == 0 {
+ println("-", n.String())
+ return nil
+ } else {
+ println("+", n.String())
+ }
+
+ if *showTextFile {
+
+ data := n.Data
+ if n.IsCompressed() {
+ if data, err = util2.DecompressData(data); err != nil {
+ return err
+ }
+ }
+ if util.IsText(data) {
+ println(string(data))
+ }
+
+ println("-", n.String(), "compressed", n.IsCompressed(), "original size", len(data))
+ }
+ return nil
+ })
+
+ if err != nil {
+ log.Printf("Error VolumeTailSender volume %d: %v", vid, err)
+ }
+
+}
diff --git a/util/gostd b/util/gostd
new file mode 100755
index 000000000..e9fc783d1
--- /dev/null
+++ b/util/gostd
@@ -0,0 +1,98 @@
+#!/usr/bin/env bash
+
+############################ GLOBAL VARIABLES
+regex=' '
+branch="master"
+max_length=150
+
+REGEX_SUFFIX_GO=".+\.go$"
+
+############################ FUNCTIONS
+msg() {
+ printf '%b' "$1" >&2
+}
+
+die() {
+ msg "\33[31m[â]\33[0m ${1}${2}"
+ exit 1
+}
+
+succ() {
+ msg "\33[34m[â]\33[0m ${1}${2}"
+}
+
+gostd() {
+ local branch=$1
+ local reg4exclude=$2
+ local max_length=$3
+
+ for file in `git diff $branch --name-only`
+ do
+ if ! [[ $file =~ $REGEX_SUFFIX_GO ]] || [[ $file =~ $reg4exclude ]]; then
+ continue
+ fi
+
+ error=`go fmt $file 2>&1`
+ if ! [ $? -eq 0 ]; then
+ die "go fmt $file:" "$error"
+ fi
+
+ succ "$file\n"
+
+ grep -n -E --color=always ".{$max_length}" $file | awk '{ printf ("%4s %s\n", "", $0) }'
+ done
+}
+
+get_options() {
+ while getopts "b:e:hl:" opts
+ do
+ case $opts in
+ b)
+ branch=$OPTARG
+ ;;
+ e)
+ regex=$OPTARG
+ ;;
+ h)
+ usage
+ exit 0
+ ;;
+ l)
+ max_length=$OPTARG
+ ;;
+ \?)
+ usage
+ exit 1
+ ;;
+ esac
+ done
+}
+
+usage () {
+ cat << _EOC_
+Usage:
+ gostd [options]
+
+Options:
+ -b Specify the git diff branch or commit.
+ (default: master)
+ -e Regex for excluding file or directory.
+ -h Print this usage.
+ -l Show files that exceed the limit line length.
+ (default: 150)
+
+Examples:
+ gostd
+ gostd -b master -l 100
+ gostd -b 59d532a -e weed/pb -l 100
+_EOC_
+}
+
+main() {
+ get_options "$@"
+
+ gostd "$branch" "$regex" "$max_length"
+}
+
+############################ MAIN()
+main "$@"
diff --git a/weed/command/backup.go b/weed/command/backup.go
index 072aea75b..615be80cf 100644
--- a/weed/command/backup.go
+++ b/weed/command/backup.go
@@ -3,6 +3,11 @@ package command
import (
"fmt"
+ "github.com/chrislusf/seaweedfs/weed/security"
+ "github.com/chrislusf/seaweedfs/weed/storage/needle"
+ "github.com/chrislusf/seaweedfs/weed/storage/super_block"
+ "github.com/chrislusf/seaweedfs/weed/util"
+
"github.com/chrislusf/seaweedfs/weed/operation"
"github.com/chrislusf/seaweedfs/weed/storage"
)
@@ -12,10 +17,12 @@ var (
)
type BackupOptions struct {
- master *string
- collection *string
- dir *string
- volumeId *int
+ master *string
+ collection *string
+ dir *string
+ volumeId *int
+ ttl *string
+ replication *string
}
func init() {
@@ -24,32 +31,45 @@ func init() {
s.collection = cmdBackup.Flag.String("collection", "", "collection name")
s.dir = cmdBackup.Flag.String("dir", ".", "directory to store volume data files")
s.volumeId = cmdBackup.Flag.Int("volumeId", -1, "a volume id. The volume .dat and .idx files should already exist in the dir.")
+ s.ttl = cmdBackup.Flag.String("ttl", "", `backup volume's time to live, format:
+ 3m: 3 minutes
+ 4h: 4 hours
+ 5d: 5 days
+ 6w: 6 weeks
+ 7M: 7 months
+ 8y: 8 years
+ default is the same with origin`)
+ s.replication = cmdBackup.Flag.String("replication", "", "backup volume's replication, default is the same with origin")
}
var cmdBackup = &Command{
UsageLine: "backup -dir=. -volumeId=234 -server=localhost:9333",
Short: "incrementally backup a volume to local folder",
Long: `Incrementally backup volume data.
-
+
It is expected that you use this inside a script, to loop through
all possible volume ids that needs to be backup to local folder.
-
+
The volume id does not need to exist locally or even remotely.
This will help to backup future new volumes.
-
+
Usually backing up is just copying the .dat (and .idx) files.
But it's tricky to incrementally copy the differences.
-
+
The complexity comes when there are multiple addition, deletion and compaction.
- This tool will handle them correctly and efficiently, avoiding unnecessary data transporation.
+ This tool will handle them correctly and efficiently, avoiding unnecessary data transportation.
`,
}
func runBackup(cmd *Command, args []string) bool {
+
+ util.LoadConfiguration("security", false)
+ grpcDialOption := security.LoadClientTLS(util.GetViper(), "grpc.client")
+
if *s.volumeId == -1 {
return false
}
- vid := storage.VolumeId(*s.volumeId)
+ vid := needle.VolumeId(*s.volumeId)
// find volume location, replication, ttl info
lookup, err := operation.Lookup(*s.master, vid.String())
@@ -59,29 +79,73 @@ func runBackup(cmd *Command, args []string) bool {
}
volumeServer := lookup.Locations[0].Url
- stats, err := operation.GetVolumeSyncStatus(volumeServer, uint32(vid))
+ stats, err := operation.GetVolumeSyncStatus(volumeServer, grpcDialOption, uint32(vid))
if err != nil {
fmt.Printf("Error get volume %d status: %v\n", vid, err)
return true
}
- ttl, err := storage.ReadTTL(stats.Ttl)
- if err != nil {
- fmt.Printf("Error get volume %d ttl %s: %v\n", vid, stats.Ttl, err)
- return true
+ var ttl *needle.TTL
+ if *s.ttl != "" {
+ ttl, err = needle.ReadTTL(*s.ttl)
+ if err != nil {
+ fmt.Printf("Error generate volume %d ttl %s: %v\n", vid, *s.ttl, err)
+ return true
+ }
+ } else {
+ ttl, err = needle.ReadTTL(stats.Ttl)
+ if err != nil {
+ fmt.Printf("Error get volume %d ttl %s: %v\n", vid, stats.Ttl, err)
+ return true
+ }
}
- replication, err := storage.NewReplicaPlacementFromString(stats.Replication)
- if err != nil {
- fmt.Printf("Error get volume %d replication %s : %v\n", vid, stats.Replication, err)
- return true
+ var replication *super_block.ReplicaPlacement
+ if *s.replication != "" {
+ replication, err = super_block.NewReplicaPlacementFromString(*s.replication)
+ if err != nil {
+ fmt.Printf("Error generate volume %d replication %s : %v\n", vid, *s.replication, err)
+ return true
+ }
+ } else {
+ replication, err = super_block.NewReplicaPlacementFromString(stats.Replication)
+ if err != nil {
+ fmt.Printf("Error get volume %d replication %s : %v\n", vid, stats.Replication, err)
+ return true
+ }
}
-
- v, err := storage.NewVolume(*s.dir, *s.collection, vid, storage.NeedleMapInMemory, replication, ttl, 0)
+ v, err := storage.NewVolume(*s.dir, *s.collection, vid, storage.NeedleMapInMemory, replication, ttl, 0, 0)
if err != nil {
fmt.Printf("Error creating or reading from volume %d: %v\n", vid, err)
return true
}
- if err := v.Synchronize(volumeServer); err != nil {
+ if v.SuperBlock.CompactionRevision < uint16(stats.CompactRevision) {
+ if err = v.Compact2(30*1024*1024*1024, 0); err != nil {
+ fmt.Printf("Compact Volume before synchronizing %v\n", err)
+ return true
+ }
+ if err = v.CommitCompact(); err != nil {
+ fmt.Printf("Commit Compact before synchronizing %v\n", err)
+ return true
+ }
+ v.SuperBlock.CompactionRevision = uint16(stats.CompactRevision)
+ v.DataBackend.WriteAt(v.SuperBlock.Bytes(), 0)
+ }
+
+ datSize, _, _ := v.FileStat()
+
+ if datSize > stats.TailOffset {
+ // remove the old data
+ v.Destroy()
+ // recreate an empty volume
+ v, err = storage.NewVolume(*s.dir, *s.collection, vid, storage.NeedleMapInMemory, replication, ttl, 0, 0)
+ if err != nil {
+ fmt.Printf("Error creating or reading from volume %d: %v\n", vid, err)
+ return true
+ }
+ }
+ defer v.Close()
+
+ if err := v.IncrementalBackup(volumeServer, grpcDialOption); err != nil {
fmt.Printf("Error synchronizing volume %d: %v\n", vid, err)
return true
}
diff --git a/weed/command/benchmark.go b/weed/command/benchmark.go
index 60fd88ccd..4a9a9619a 100644
--- a/weed/command/benchmark.go
+++ b/weed/command/benchmark.go
@@ -2,7 +2,6 @@ package command
import (
"bufio"
- "context"
"fmt"
"io"
"math"
@@ -15,6 +14,8 @@ import (
"sync"
"time"
+ "google.golang.org/grpc"
+
"github.com/chrislusf/seaweedfs/weed/glog"
"github.com/chrislusf/seaweedfs/weed/operation"
"github.com/chrislusf/seaweedfs/weed/security"
@@ -33,15 +34,18 @@ type BenchmarkOptions struct {
read *bool
sequentialRead *bool
collection *string
+ replication *string
cpuprofile *string
maxCpu *int
- secretKey *string
+ grpcDialOption grpc.DialOption
+ masterClient *wdclient.MasterClient
+ fsync *bool
}
var (
- b BenchmarkOptions
- sharedBytes []byte
- masterClient *wdclient.MasterClient
+ b BenchmarkOptions
+ sharedBytes []byte
+ isSecure bool
)
func init() {
@@ -57,14 +61,15 @@ func init() {
b.read = cmdBenchmark.Flag.Bool("read", true, "enable read")
b.sequentialRead = cmdBenchmark.Flag.Bool("readSequentially", false, "randomly read by ids from \"-list\" specified file")
b.collection = cmdBenchmark.Flag.String("collection", "benchmark", "write data to this collection")
+ b.replication = cmdBenchmark.Flag.String("replication", "000", "replication type")
b.cpuprofile = cmdBenchmark.Flag.String("cpuprofile", "", "cpu profile output file")
b.maxCpu = cmdBenchmark.Flag.Int("maxCpu", 0, "maximum number of CPUs. 0 means all available CPUs")
- b.secretKey = cmdBenchmark.Flag.String("secure.secret", "", "secret to encrypt Json Web Token(JWT)")
+ b.fsync = cmdBenchmark.Flag.Bool("fsync", false, "flush data to disk after write")
sharedBytes = make([]byte, 1024)
}
var cmdBenchmark = &Command{
- UsageLine: "benchmark -server=localhost:9333 -c=10 -n=100000",
+ UsageLine: "benchmark -master=localhost:9333 -c=10 -n=100000",
Short: "benchmark on writing millions of files and read out",
Long: `benchmark on an empty SeaweedFS file system.
@@ -102,7 +107,11 @@ var (
)
func runBenchmark(cmd *Command, args []string) bool {
- fmt.Printf("This is SeaweedFS version %s %s %s\n", util.VERSION, runtime.GOOS, runtime.GOARCH)
+
+ util.LoadConfiguration("security", false)
+ b.grpcDialOption = security.LoadClientTLS(util.GetViper(), "grpc.client")
+
+ fmt.Printf("This is SeaweedFS version %s %s %s\n", util.Version(), runtime.GOOS, runtime.GOARCH)
if *b.maxCpu < 1 {
*b.maxCpu = runtime.NumCPU()
}
@@ -116,9 +125,9 @@ func runBenchmark(cmd *Command, args []string) bool {
defer pprof.StopCPUProfile()
}
- masterClient = wdclient.NewMasterClient(context.Background(), "benchmark", strings.Split(*b.masters, ","))
- go masterClient.KeepConnectedToMaster()
- masterClient.WaitUntilConnected()
+ b.masterClient = wdclient.NewMasterClient(b.grpcDialOption, "client", "", 0, strings.Split(*b.masters, ","))
+ go b.masterClient.KeepConnectedToMaster()
+ b.masterClient.WaitUntilConnected()
if *b.write {
benchWrite()
@@ -188,7 +197,6 @@ func writeFiles(idChan chan int, fileIdLineChan chan string, s *stat) {
defer wait.Done()
delayedDeleteChan := make(chan *delayedFile, 100)
var waitForDeletions sync.WaitGroup
- secret := security.Secret(*b.secretKey)
for i := 0; i < 7; i++ {
waitForDeletions.Add(1)
@@ -198,8 +206,11 @@ func writeFiles(idChan chan int, fileIdLineChan chan string, s *stat) {
if df.enterTime.After(time.Now()) {
time.Sleep(df.enterTime.Sub(time.Now()))
}
- if e := util.Delete("http://"+df.fp.Server+"/"+df.fp.Fid,
- security.GenJwt(secret, df.fp.Fid)); e == nil {
+ var jwtAuthorization security.EncodedJwt
+ if isSecure {
+ jwtAuthorization = operation.LookupJwt(b.masterClient.GetMaster(), df.fp.Fid)
+ }
+ if e := util.Delete(fmt.Sprintf("http://%s/%s", df.fp.Server, df.fp.Fid), string(jwtAuthorization)); e == nil {
s.completed++
} else {
s.failed++
@@ -214,17 +225,22 @@ func writeFiles(idChan chan int, fileIdLineChan chan string, s *stat) {
start := time.Now()
fileSize := int64(*b.fileSize + random.Intn(64))
fp := &operation.FilePart{
- Reader: &FakeReader{id: uint64(id), size: fileSize},
+ Reader: &FakeReader{id: uint64(id), size: fileSize, random: random},
FileSize: fileSize,
MimeType: "image/bench", // prevent gzip benchmark content
+ Fsync: *b.fsync,
}
ar := &operation.VolumeAssignRequest{
- Count: 1,
- Collection: *b.collection,
+ Count: 1,
+ Collection: *b.collection,
+ Replication: *b.replication,
}
- if assignResult, err := operation.Assign(masterClient.GetMaster(), ar); err == nil {
+ if assignResult, err := operation.Assign(b.masterClient.GetMaster(), b.grpcDialOption, ar); err == nil {
fp.Server, fp.Fid, fp.Collection = assignResult.Url, assignResult.Fid, *b.collection
- if _, err := fp.Upload(0, masterClient.GetMaster(), secret); err == nil {
+ if !isSecure && assignResult.Auth != "" {
+ isSecure = true
+ }
+ if _, err := fp.Upload(0, b.masterClient.GetMaster(), false, assignResult.Auth, b.grpcDialOption); err == nil {
if random.Intn(100) < *b.deletePercentage {
s.total++
delayedDeleteChan <- &delayedFile{time.Now().Add(time.Second), fp}
@@ -264,19 +280,24 @@ func readFiles(fileIdLineChan chan string, s *stat) {
fmt.Printf("reading file %s\n", fid)
}
start := time.Now()
- url, err := masterClient.LookupFileId(fid)
+ var bytesRead int
+ var err error
+ url, err := b.masterClient.LookupFileId(fid)
if err != nil {
s.failed++
println("!!!! ", fid, " location not found!!!!!")
continue
}
- if bytesRead, err := util.Get(url); err == nil {
+ var bytes []byte
+ bytes, err = util.Get(url)
+ bytesRead = len(bytes)
+ if err == nil {
s.completed++
- s.transferred += int64(len(bytesRead))
+ s.transferred += int64(bytesRead)
readStats.addSample(time.Now().Sub(start))
} else {
s.failed++
- fmt.Printf("Failed to read %s error:%v\n", url, err)
+ fmt.Printf("Failed to read %s error:%v\n", fid, err)
}
}
}
@@ -338,7 +359,7 @@ func readFileIds(fileName string, fileIdLineChan chan string) {
}
const (
- benchResolution = 10000 //0.1 microsecond
+ benchResolution = 10000 // 0.1 microsecond
benchBucket = 1000000000 / benchResolution
)
@@ -461,7 +482,7 @@ func (s *stats) printStats() {
fmt.Printf("\nConnection Times (ms)\n")
fmt.Printf(" min avg max std\n")
fmt.Printf("Total: %2.1f %3.1f %3.1f %3.1f\n", float32(min)/10, float32(avg)/10, float32(max)/10, std/10)
- //printing percentiles
+ // printing percentiles
fmt.Printf("\nPercentage of the requests served within a certain time (ms)\n")
percentiles := make([]int, len(percentages))
for i := 0; i < len(percentages); i++ {
@@ -495,8 +516,9 @@ func (s *stats) printStats() {
// a fake reader to generate content to upload
type FakeReader struct {
- id uint64 // an id number
- size int64 // max bytes
+ id uint64 // an id number
+ size int64 // max bytes
+ random *rand.Rand
}
func (l *FakeReader) Read(p []byte) (n int, err error) {
@@ -512,6 +534,7 @@ func (l *FakeReader) Read(p []byte) (n int, err error) {
for i := 0; i < 8; i++ {
p[i] = byte(l.id >> uint(i*8))
}
+ l.random.Read(p[8:])
}
l.size -= int64(n)
return
diff --git a/weed/command/command.go b/weed/command/command.go
index 91b9bf3fc..9a41a8a7c 100644
--- a/weed/command/command.go
+++ b/weed/command/command.go
@@ -12,21 +12,23 @@ var Commands = []*Command{
cmdBackup,
cmdCompact,
cmdCopy,
- cmdFix,
- cmdFilerExport,
+ cmdDownload,
+ cmdExport,
+ cmdFiler,
cmdFilerReplicate,
- cmdServer,
+ cmdFix,
cmdMaster,
- cmdFiler,
+ cmdMount,
cmdS3,
- cmdUpload,
- cmdDownload,
+ cmdMsgBroker,
cmdScaffold,
+ cmdServer,
cmdShell,
+ cmdWatch,
+ cmdUpload,
cmdVersion,
cmdVolume,
- cmdExport,
- cmdMount,
+ cmdWebDav,
}
type Command struct {
diff --git a/weed/command/compact.go b/weed/command/compact.go
index 0dd4efe0e..4e28aa725 100644
--- a/weed/command/compact.go
+++ b/weed/command/compact.go
@@ -3,6 +3,7 @@ package command
import (
"github.com/chrislusf/seaweedfs/weed/glog"
"github.com/chrislusf/seaweedfs/weed/storage"
+ "github.com/chrislusf/seaweedfs/weed/storage/needle"
)
func init() {
@@ -16,6 +17,9 @@ var cmdCompact = &Command{
The compacted .dat file is stored as .cpd file.
The compacted .idx file is stored as .cpx file.
+ For method=0, it compacts based on the .dat file, works if .idx file is corrupted.
+ For method=1, it compacts based on the .idx file, works if deletion happened but not written to .dat files.
+
`,
}
@@ -35,18 +39,18 @@ func runCompact(cmd *Command, args []string) bool {
preallocate := *compactVolumePreallocate * (1 << 20)
- vid := storage.VolumeId(*compactVolumeId)
+ vid := needle.VolumeId(*compactVolumeId)
v, err := storage.NewVolume(*compactVolumePath, *compactVolumeCollection, vid,
- storage.NeedleMapInMemory, nil, nil, preallocate)
+ storage.NeedleMapInMemory, nil, nil, preallocate, 0)
if err != nil {
glog.Fatalf("Load Volume [ERROR] %s\n", err)
}
if *compactMethod == 0 {
- if err = v.Compact(preallocate); err != nil {
+ if err = v.Compact(preallocate, 0); err != nil {
glog.Fatalf("Compact Volume [ERROR] %s\n", err)
}
} else {
- if err = v.Compact2(); err != nil {
+ if err = v.Compact2(preallocate, 0); err != nil {
glog.Fatalf("Compact Volume [ERROR] %s\n", err)
}
}
diff --git a/weed/command/download.go b/weed/command/download.go
index b3e33defd..be0eb47e5 100644
--- a/weed/command/download.go
+++ b/weed/command/download.go
@@ -71,6 +71,7 @@ func downloadToFile(server, fileId, saveDir string) error {
}
f, err := os.OpenFile(path.Join(saveDir, filename), os.O_WRONLY|os.O_CREATE|os.O_TRUNC, os.ModePerm)
if err != nil {
+ io.Copy(ioutil.Discard, rc)
return err
}
defer f.Close()
diff --git a/weed/command/export.go b/weed/command/export.go
index 5c7e064ce..5d304b5a0 100644
--- a/weed/command/export.go
+++ b/weed/command/export.go
@@ -4,6 +4,7 @@ import (
"archive/tar"
"bytes"
"fmt"
+ "io"
"os"
"path"
"path/filepath"
@@ -14,8 +15,11 @@ import (
"github.com/chrislusf/seaweedfs/weed/glog"
"github.com/chrislusf/seaweedfs/weed/storage"
+ "github.com/chrislusf/seaweedfs/weed/storage/needle"
+ "github.com/chrislusf/seaweedfs/weed/storage/needle_map"
+ "github.com/chrislusf/seaweedfs/weed/storage/super_block"
"github.com/chrislusf/seaweedfs/weed/storage/types"
- "io"
+ "github.com/chrislusf/seaweedfs/weed/util"
)
const (
@@ -66,17 +70,17 @@ var (
localLocation, _ = time.LoadLocation("Local")
)
-func printNeedle(vid storage.VolumeId, n *storage.Needle, version storage.Version, deleted bool) {
- key := storage.NewFileIdFromNeedle(vid, n).String()
+func printNeedle(vid needle.VolumeId, n *needle.Needle, version needle.Version, deleted bool) {
+ key := needle.NewFileIdFromNeedle(vid, n).String()
size := n.DataSize
- if version == storage.Version1 {
+ if version == needle.Version1 {
size = n.Size
}
fmt.Printf("%s\t%s\t%d\t%t\t%s\t%s\t%s\t%t\n",
key,
n.Name,
size,
- n.IsGzipped(),
+ n.IsCompressed(),
n.Mime,
n.LastModifiedString(),
n.Ttl.String(),
@@ -85,14 +89,14 @@ func printNeedle(vid storage.VolumeId, n *storage.Needle, version storage.Versio
}
type VolumeFileScanner4Export struct {
- version storage.Version
+ version needle.Version
counter int
- needleMap *storage.NeedleMap
- vid storage.VolumeId
+ needleMap *needle_map.MemDb
+ vid needle.VolumeId
}
-func (scanner *VolumeFileScanner4Export) VisitSuperBlock(superBlock storage.SuperBlock) error {
- scanner.version = superBlock.Version()
+func (scanner *VolumeFileScanner4Export) VisitSuperBlock(superBlock super_block.SuperBlock) error {
+ scanner.version = superBlock.Version
return nil
}
@@ -100,14 +104,14 @@ func (scanner *VolumeFileScanner4Export) ReadNeedleBody() bool {
return true
}
-func (scanner *VolumeFileScanner4Export) VisitNeedle(n *storage.Needle, offset int64) error {
+func (scanner *VolumeFileScanner4Export) VisitNeedle(n *needle.Needle, offset int64, needleHeader, needleBody []byte) error {
needleMap := scanner.needleMap
vid := scanner.vid
nv, ok := needleMap.Get(n.Id)
- glog.V(3).Infof("key %d offset %d size %d disk_size %d gzip %v ok %v nv %+v",
- n.Id, offset, n.Size, n.DiskSize(scanner.version), n.IsGzipped(), ok, nv)
- if ok && nv.Size > 0 && int64(nv.Offset)*types.NeedlePaddingSize == offset {
+ glog.V(3).Infof("key %d offset %d size %d disk_size %d compressed %v ok %v nv %+v",
+ n.Id, offset, n.Size, n.DiskSize(scanner.version), n.IsCompressed(), ok, nv)
+ if ok && nv.Size > 0 && nv.Size != types.TombstoneFileSize && nv.Offset.ToAcutalOffset() == offset {
if newerThanUnix >= 0 && n.HasLastModifiedDate() && n.LastModified < uint64(newerThanUnix) {
glog.V(3).Infof("Skipping this file, as it's old enough: LastModified %d vs %d",
n.LastModified, newerThanUnix)
@@ -189,16 +193,13 @@ func runExport(cmd *Command, args []string) bool {
if *export.collection != "" {
fileName = *export.collection + "_" + fileName
}
- vid := storage.VolumeId(*export.volumeId)
- indexFile, err := os.OpenFile(path.Join(*export.dir, fileName+".idx"), os.O_RDONLY, 0644)
- if err != nil {
- glog.Fatalf("Create Volume Index [ERROR] %s\n", err)
- }
- defer indexFile.Close()
+ vid := needle.VolumeId(*export.volumeId)
- needleMap, err := storage.LoadBtreeNeedleMap(indexFile)
- if err != nil {
- glog.Fatalf("cannot load needle map from %s: %s", indexFile.Name(), err)
+ needleMap := needle_map.NewMemDb()
+ defer needleMap.Close()
+
+ if err := needleMap.LoadFromIdx(path.Join(*export.dir, fileName+".idx")); err != nil {
+ glog.Fatalf("cannot load needle map from %s.idx: %s", fileName, err)
}
volumeFileScanner := &VolumeFileScanner4Export{
@@ -225,8 +226,8 @@ type nameParams struct {
Ext string
}
-func writeFile(vid storage.VolumeId, n *storage.Needle) (err error) {
- key := storage.NewFileIdFromNeedle(vid, n).String()
+func writeFile(vid needle.VolumeId, n *needle.Needle) (err error) {
+ key := needle.NewFileIdFromNeedle(vid, n).String()
fileNameTemplateBuffer.Reset()
if err = fileNameTemplate.Execute(fileNameTemplateBuffer,
nameParams{
@@ -242,8 +243,11 @@ func writeFile(vid storage.VolumeId, n *storage.Needle) (err error) {
fileName := fileNameTemplateBuffer.String()
- if n.IsGzipped() && path.Ext(fileName) != ".gz" {
- fileName = fileName + ".gz"
+ if n.IsCompressed() {
+ if util.IsGzippedContent(n.Data) && path.Ext(fileName) != ".gz" {
+ fileName = fileName + ".gz"
+ }
+ // TODO other compression method
}
tarHeader.Name, tarHeader.Size = fileName, int64(len(n.Data))
diff --git a/weed/command/filer.go b/weed/command/filer.go
index 0c1950f96..b52b01149 100644
--- a/weed/command/filer.go
+++ b/weed/command/filer.go
@@ -6,11 +6,14 @@ import (
"strings"
"time"
+ "google.golang.org/grpc/reflection"
+
"github.com/chrislusf/seaweedfs/weed/glog"
+ "github.com/chrislusf/seaweedfs/weed/pb"
"github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
+ "github.com/chrislusf/seaweedfs/weed/security"
"github.com/chrislusf/seaweedfs/weed/server"
"github.com/chrislusf/seaweedfs/weed/util"
- "google.golang.org/grpc/reflection"
)
var (
@@ -20,18 +23,19 @@ var (
type FilerOptions struct {
masters *string
ip *string
+ bindIp *string
port *int
- grpcPort *int
publicPort *int
collection *string
defaultReplicaPlacement *string
- redirectOnRead *bool
disableDirListing *bool
maxMB *int
- secretKey *string
dirListingLimit *int
dataCenter *string
enableNotification *bool
+ disableHttp *bool
+ cipher *bool
+ peers *string
// default leveldb directory, used in "weed server" mode
defaultLevelDbDirectory *string
@@ -41,17 +45,18 @@ func init() {
cmdFiler.Run = runFiler // break init cycle
f.masters = cmdFiler.Flag.String("master", "localhost:9333", "comma-separated master servers")
f.collection = cmdFiler.Flag.String("collection", "", "all data will be stored in this collection")
- f.ip = cmdFiler.Flag.String("ip", "", "filer server http listen ip address")
+ f.ip = cmdFiler.Flag.String("ip", util.DetectedHostAddress(), "filer server http listen ip address")
+ f.bindIp = cmdFiler.Flag.String("ip.bind", "0.0.0.0", "ip address to bind to")
f.port = cmdFiler.Flag.Int("port", 8888, "filer server http listen port")
- f.grpcPort = cmdFiler.Flag.Int("port.grpc", 0, "filer grpc server listen port, default to http port + 10000")
- f.publicPort = cmdFiler.Flag.Int("port.public", 0, "port opened to public")
+ f.publicPort = cmdFiler.Flag.Int("port.readonly", 0, "readonly port opened to public")
f.defaultReplicaPlacement = cmdFiler.Flag.String("defaultReplicaPlacement", "000", "default replication type if not specified")
- f.redirectOnRead = cmdFiler.Flag.Bool("redirectOnRead", false, "whether proxy or redirect to volume server during file GET request")
f.disableDirListing = cmdFiler.Flag.Bool("disableDirListing", false, "turn off directory listing")
f.maxMB = cmdFiler.Flag.Int("maxMB", 32, "split files larger than the limit")
- f.secretKey = cmdFiler.Flag.String("secure.secret", "", "secret to encrypt Json Web Token(JWT)")
f.dirListingLimit = cmdFiler.Flag.Int("dirListLimit", 100000, "limit sub dir listing size")
f.dataCenter = cmdFiler.Flag.String("dataCenter", "", "prefer to write to volumes in this data center")
+ f.disableHttp = cmdFiler.Flag.Bool("disableHttp", false, "disable http request, only gRpc operations are allowed")
+ f.cipher = cmdFiler.Flag.Bool("encryptVolumeData", false, "encrypt data on volume servers")
+ f.peers = cmdFiler.Flag.String("peers", "", "all filers sharing the same filer store in comma separated ip:port list")
}
var cmdFiler = &Command{
@@ -70,13 +75,15 @@ var cmdFiler = &Command{
The configuration file "filer.toml" is read from ".", "$HOME/.seaweedfs/", or "/etc/seaweedfs/", in that order.
- The example filer.toml configuration file can be generated by "weed scaffold filer"
+ The example filer.toml configuration file can be generated by "weed scaffold -config=filer"
`,
}
func runFiler(cmd *Command, args []string) bool {
+ util.LoadConfiguration("security", false)
+
f.startFiler()
return true
@@ -91,30 +98,38 @@ func (fo *FilerOptions) startFiler() {
publicVolumeMux = http.NewServeMux()
}
- defaultLevelDbDirectory := "./filerdb"
+ defaultLevelDbDirectory := "./filerldb2"
if fo.defaultLevelDbDirectory != nil {
- defaultLevelDbDirectory = *fo.defaultLevelDbDirectory + "/filerdb"
+ defaultLevelDbDirectory = *fo.defaultLevelDbDirectory + "/filerldb2"
+ }
+
+ var peers []string
+ if *fo.peers != "" {
+ peers = strings.Split(*fo.peers, ",")
}
fs, nfs_err := weed_server.NewFilerServer(defaultMux, publicVolumeMux, &weed_server.FilerOption{
- Masters: strings.Split(*f.masters, ","),
+ Masters: strings.Split(*fo.masters, ","),
Collection: *fo.collection,
DefaultReplication: *fo.defaultReplicaPlacement,
- RedirectOnRead: *fo.redirectOnRead,
DisableDirListing: *fo.disableDirListing,
MaxMB: *fo.maxMB,
- SecretKey: *fo.secretKey,
DirListingLimit: *fo.dirListingLimit,
DataCenter: *fo.dataCenter,
DefaultLevelDbDir: defaultLevelDbDirectory,
+ DisableHttp: *fo.disableHttp,
+ Host: *fo.ip,
+ Port: uint32(*fo.port),
+ Cipher: *fo.cipher,
+ Filers: peers,
})
if nfs_err != nil {
glog.Fatalf("Filer startup error: %v", nfs_err)
}
if *fo.publicPort != 0 {
- publicListeningAddress := *fo.ip + ":" + strconv.Itoa(*fo.publicPort)
- glog.V(0).Infoln("Start Seaweed filer server", util.VERSION, "public at", publicListeningAddress)
+ publicListeningAddress := *fo.bindIp + ":" + strconv.Itoa(*fo.publicPort)
+ glog.V(0).Infoln("Start Seaweed filer server", util.Version(), "public at", publicListeningAddress)
publicListener, e := util.NewListener(publicListeningAddress, 0)
if e != nil {
glog.Fatalf("Filer server public listener error on port %d:%v", *fo.publicPort, e)
@@ -126,9 +141,9 @@ func (fo *FilerOptions) startFiler() {
}()
}
- glog.V(0).Infof("Start Seaweed Filer %s at %s:%d", util.VERSION, *fo.ip, *fo.port)
+ glog.V(0).Infof("Start Seaweed Filer %s at %s:%d", util.Version(), *fo.ip, *fo.port)
filerListener, e := util.NewListener(
- ":"+strconv.Itoa(*fo.port),
+ *fo.bindIp+":"+strconv.Itoa(*fo.port),
time.Duration(10)*time.Second,
)
if e != nil {
@@ -136,15 +151,12 @@ func (fo *FilerOptions) startFiler() {
}
// starting grpc server
- grpcPort := *fo.grpcPort
- if grpcPort == 0 {
- grpcPort = *fo.port + 10000
- }
+ grpcPort := *fo.port + 10000
grpcL, err := util.NewListener(":"+strconv.Itoa(grpcPort), 0)
if err != nil {
glog.Fatalf("failed to listen on grpc port %d: %v", grpcPort, err)
}
- grpcS := util.NewGrpcServer()
+ grpcS := pb.NewGrpcServer(security.LoadServerTLS(util.GetViper(), "grpc.filer"))
filer_pb.RegisterSeaweedFilerServer(grpcS, fs)
reflection.Register(grpcS)
go grpcS.Serve(grpcL)
diff --git a/weed/command/filer_copy.go b/weed/command/filer_copy.go
index 3638bcb27..2d6ba94d6 100644
--- a/weed/command/filer_copy.go
+++ b/weed/command/filer_copy.go
@@ -1,52 +1,62 @@
package command
import (
+ "context"
"fmt"
+ "io"
"io/ioutil"
+ "net/http"
"net/url"
"os"
"path/filepath"
+ "strconv"
"strings"
+ "sync"
+ "time"
+
+ "google.golang.org/grpc"
+
+ "github.com/chrislusf/seaweedfs/weed/util/grace"
- "context"
"github.com/chrislusf/seaweedfs/weed/operation"
+ "github.com/chrislusf/seaweedfs/weed/pb"
"github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
"github.com/chrislusf/seaweedfs/weed/security"
+ "github.com/chrislusf/seaweedfs/weed/storage/needle"
"github.com/chrislusf/seaweedfs/weed/util"
- "io"
- "net/http"
- "strconv"
- "time"
+ "github.com/chrislusf/seaweedfs/weed/wdclient"
)
var (
- copy CopyOptions
+ copy CopyOptions
+ waitGroup sync.WaitGroup
)
type CopyOptions struct {
- filerGrpcPort *int
- master *string
- include *string
- replication *string
- collection *string
- ttl *string
- maxMB *int
- secretKey *string
-
- secret security.Secret
+ include *string
+ replication *string
+ collection *string
+ ttl *string
+ maxMB *int
+ masterClient *wdclient.MasterClient
+ concurrenctFiles *int
+ concurrenctChunks *int
+ grpcDialOption grpc.DialOption
+ masters []string
+ cipher bool
+ ttlSec int32
}
func init() {
cmdCopy.Run = runCopy // break init cycle
cmdCopy.IsDebug = cmdCopy.Flag.Bool("debug", false, "verbose debug information")
- copy.master = cmdCopy.Flag.String("master", "localhost:9333", "SeaweedFS master location")
copy.include = cmdCopy.Flag.String("include", "", "pattens of files to copy, e.g., *.pdf, *.html, ab?d.txt, works together with -dir")
copy.replication = cmdCopy.Flag.String("replication", "", "replication type")
copy.collection = cmdCopy.Flag.String("collection", "", "optional collection name")
copy.ttl = cmdCopy.Flag.String("ttl", "", "time to live, e.g.: 1m, 1h, 1d, 1M, 1y")
- copy.maxMB = cmdCopy.Flag.Int("maxMB", 0, "split files larger than the limit")
- copy.filerGrpcPort = cmdCopy.Flag.Int("filer.port.grpc", 0, "filer grpc server listen port, default to filer port + 10000")
- copy.secretKey = cmdCopy.Flag.String("secure.secret", "", "secret to encrypt Json Web Token(JWT)")
+ copy.maxMB = cmdCopy.Flag.Int("maxMB", 32, "split files larger than the limit")
+ copy.concurrenctFiles = cmdCopy.Flag.Int("c", 8, "concurrent file copy goroutines")
+ copy.concurrenctChunks = cmdCopy.Flag.Int("concurrentChunks", 8, "concurrent chunk copy goroutines for each file")
}
var cmdCopy = &Command{
@@ -66,7 +76,9 @@ var cmdCopy = &Command{
}
func runCopy(cmd *Command, args []string) bool {
- copy.secret = security.Secret(*copy.secretKey)
+
+ util.LoadConfiguration("security", false)
+
if len(args) <= 1 {
return false
}
@@ -96,221 +108,380 @@ func runCopy(cmd *Command, args []string) bool {
}
filerGrpcPort := filerPort + 10000
- if *copy.filerGrpcPort != 0 {
- filerGrpcPort = uint64(*copy.filerGrpcPort)
+ filerGrpcAddress := fmt.Sprintf("%s:%d", filerUrl.Hostname(), filerGrpcPort)
+ copy.grpcDialOption = security.LoadClientTLS(util.GetViper(), "grpc.client")
+
+ masters, collection, replication, maxMB, cipher, err := readFilerConfiguration(copy.grpcDialOption, filerGrpcAddress)
+ if err != nil {
+ fmt.Printf("read from filer %s: %v\n", filerGrpcAddress, err)
+ return false
+ }
+ if *copy.collection == "" {
+ *copy.collection = collection
+ }
+ if *copy.replication == "" {
+ *copy.replication = replication
}
+ if *copy.maxMB == 0 {
+ *copy.maxMB = int(maxMB)
+ }
+ copy.masters = masters
+ copy.cipher = cipher
- filerGrpcAddress := fmt.Sprintf("%s:%d", filerUrl.Hostname(), filerGrpcPort)
+ ttl, err := needle.ReadTTL(*copy.ttl)
+ if err != nil {
+ fmt.Printf("parsing ttl %s: %v\n", *copy.ttl, err)
+ return false
+ }
+ copy.ttlSec = int32(ttl.Minutes()) * 60
+
+ if *cmdCopy.IsDebug {
+ grace.SetupProfiling("filer.copy.cpu.pprof", "filer.copy.mem.pprof")
+ }
+
+ fileCopyTaskChan := make(chan FileCopyTask, *copy.concurrenctFiles)
- for _, fileOrDir := range fileOrDirs {
- if !doEachCopy(fileOrDir, filerUrl.Host, filerGrpcAddress, urlPath) {
- return false
+ go func() {
+ defer close(fileCopyTaskChan)
+ for _, fileOrDir := range fileOrDirs {
+ if err := genFileCopyTask(fileOrDir, urlPath, fileCopyTaskChan); err != nil {
+ fmt.Fprintf(os.Stderr, "gen file list error: %v\n", err)
+ break
+ }
}
+ }()
+ for i := 0; i < *copy.concurrenctFiles; i++ {
+ waitGroup.Add(1)
+ go func() {
+ defer waitGroup.Done()
+ worker := FileCopyWorker{
+ options: ©,
+ filerHost: filerUrl.Host,
+ filerGrpcAddress: filerGrpcAddress,
+ }
+ if err := worker.copyFiles(fileCopyTaskChan); err != nil {
+ fmt.Fprintf(os.Stderr, "copy file error: %v\n", err)
+ return
+ }
+ }()
}
+ waitGroup.Wait()
+
return true
}
-func doEachCopy(fileOrDir string, filerAddress, filerGrpcAddress string, path string) bool {
- f, err := os.Open(fileOrDir)
- if err != nil {
- fmt.Printf("Failed to open file %s: %v\n", fileOrDir, err)
- return false
- }
- defer f.Close()
+func readFilerConfiguration(grpcDialOption grpc.DialOption, filerGrpcAddress string) (masters []string, collection, replication string, maxMB uint32, cipher bool, err error) {
+ err = pb.WithGrpcFilerClient(filerGrpcAddress, grpcDialOption, func(client filer_pb.SeaweedFilerClient) error {
+ resp, err := client.GetFilerConfiguration(context.Background(), &filer_pb.GetFilerConfigurationRequest{})
+ if err != nil {
+ return fmt.Errorf("get filer %s configuration: %v", filerGrpcAddress, err)
+ }
+ masters, collection, replication, maxMB = resp.Masters, resp.Collection, resp.Replication, resp.MaxMb
+ cipher = resp.Cipher
+ return nil
+ })
+ return
+}
+
+func genFileCopyTask(fileOrDir string, destPath string, fileCopyTaskChan chan FileCopyTask) error {
- fi, err := f.Stat()
+ fi, err := os.Stat(fileOrDir)
if err != nil {
- fmt.Printf("Failed to get stat for file %s: %v\n", fileOrDir, err)
- return false
+ fmt.Fprintf(os.Stderr, "Failed to get stat for file %s: %v\n", fileOrDir, err)
+ return nil
}
mode := fi.Mode()
if mode.IsDir() {
files, _ := ioutil.ReadDir(fileOrDir)
for _, subFileOrDir := range files {
- if !doEachCopy(fileOrDir+"/"+subFileOrDir.Name(), filerAddress, filerGrpcAddress, path+fi.Name()+"/") {
- return false
+ if err = genFileCopyTask(fileOrDir+"/"+subFileOrDir.Name(), destPath+fi.Name()+"/", fileCopyTaskChan); err != nil {
+ return err
}
}
- return true
+ return nil
}
+ uid, gid := util.GetFileUidGid(fi)
+
+ fileCopyTaskChan <- FileCopyTask{
+ sourceLocation: fileOrDir,
+ destinationUrlPath: destPath,
+ fileSize: fi.Size(),
+ fileMode: fi.Mode(),
+ uid: uid,
+ gid: gid,
+ }
+
+ return nil
+}
+
+type FileCopyWorker struct {
+ options *CopyOptions
+ filerHost string
+ filerGrpcAddress string
+}
+
+func (worker *FileCopyWorker) copyFiles(fileCopyTaskChan chan FileCopyTask) error {
+ for task := range fileCopyTaskChan {
+ if err := worker.doEachCopy(task); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+type FileCopyTask struct {
+ sourceLocation string
+ destinationUrlPath string
+ fileSize int64
+ fileMode os.FileMode
+ uid uint32
+ gid uint32
+}
+
+func (worker *FileCopyWorker) doEachCopy(task FileCopyTask) error {
+
+ f, err := os.Open(task.sourceLocation)
+ if err != nil {
+ fmt.Printf("Failed to open file %s: %v\n", task.sourceLocation, err)
+ if _, ok := err.(*os.PathError); ok {
+ fmt.Printf("skipping %s\n", task.sourceLocation)
+ return nil
+ }
+ return err
+ }
+ defer f.Close()
+
// this is a regular file
- if *copy.include != "" {
- if ok, _ := filepath.Match(*copy.include, filepath.Base(fileOrDir)); !ok {
- return true
+ if *worker.options.include != "" {
+ if ok, _ := filepath.Match(*worker.options.include, filepath.Base(task.sourceLocation)); !ok {
+ return nil
}
}
// find the chunk count
- chunkSize := int64(*copy.maxMB * 1024 * 1024)
+ chunkSize := int64(*worker.options.maxMB * 1024 * 1024)
chunkCount := 1
- if chunkSize > 0 && fi.Size() > chunkSize {
- chunkCount = int(fi.Size()/chunkSize) + 1
+ if chunkSize > 0 && task.fileSize > chunkSize {
+ chunkCount = int(task.fileSize/chunkSize) + 1
}
if chunkCount == 1 {
- return uploadFileAsOne(filerAddress, filerGrpcAddress, path, f, fi)
+ return worker.uploadFileAsOne(task, f)
}
- return uploadFileInChunks(filerAddress, filerGrpcAddress, path, f, fi, chunkCount, chunkSize)
+ return worker.uploadFileInChunks(task, f, chunkCount, chunkSize)
}
-func uploadFileAsOne(filerAddress, filerGrpcAddress string, urlFolder string, f *os.File, fi os.FileInfo) bool {
+func (worker *FileCopyWorker) uploadFileAsOne(task FileCopyTask, f *os.File) error {
// upload the file content
fileName := filepath.Base(f.Name())
mimeType := detectMimeType(f)
+ data, err := ioutil.ReadAll(f)
+ if err != nil {
+ return err
+ }
var chunks []*filer_pb.FileChunk
+ var assignResult *filer_pb.AssignVolumeResponse
+ var assignError error
- if fi.Size() > 0 {
+ if task.fileSize > 0 {
// assign a volume
- assignResult, err := operation.Assign(*copy.master, &operation.VolumeAssignRequest{
- Count: 1,
- Replication: *copy.replication,
- Collection: *copy.collection,
- Ttl: *copy.ttl,
+ err := pb.WithGrpcFilerClient(worker.filerGrpcAddress, worker.options.grpcDialOption, func(client filer_pb.SeaweedFilerClient) error {
+
+ request := &filer_pb.AssignVolumeRequest{
+ Count: 1,
+ Replication: *worker.options.replication,
+ Collection: *worker.options.collection,
+ TtlSec: worker.options.ttlSec,
+ ParentPath: task.destinationUrlPath,
+ }
+
+ assignResult, assignError = client.AssignVolume(context.Background(), request)
+ if assignError != nil {
+ return fmt.Errorf("assign volume failure %v: %v", request, assignError)
+ }
+ if assignResult.Error != "" {
+ return fmt.Errorf("assign volume failure %v: %v", request, assignResult.Error)
+ }
+ return nil
})
if err != nil {
- fmt.Printf("Failed to assign from %s: %v\n", *copy.master, err)
+ return fmt.Errorf("Failed to assign from %v: %v\n", worker.options.masters, err)
}
- targetUrl := "http://" + assignResult.Url + "/" + assignResult.Fid
+ targetUrl := "http://" + assignResult.Url + "/" + assignResult.FileId
- uploadResult, err := operation.Upload(targetUrl, fileName, f, false, mimeType, nil, "")
+ uploadResult, err := operation.UploadData(targetUrl, fileName, worker.options.cipher, data, false, mimeType, nil, security.EncodedJwt(assignResult.Auth))
if err != nil {
- fmt.Printf("upload data %v to %s: %v\n", fileName, targetUrl, err)
- return false
+ return fmt.Errorf("upload data %v to %s: %v\n", fileName, targetUrl, err)
}
if uploadResult.Error != "" {
- fmt.Printf("upload %v to %s result: %v\n", fileName, targetUrl, uploadResult.Error)
- return false
+ return fmt.Errorf("upload %v to %s result: %v\n", fileName, targetUrl, uploadResult.Error)
}
fmt.Printf("uploaded %s to %s\n", fileName, targetUrl)
- chunks = append(chunks, &filer_pb.FileChunk{
- FileId: assignResult.Fid,
- Offset: 0,
- Size: uint64(uploadResult.Size),
- Mtime: time.Now().UnixNano(),
- ETag: uploadResult.ETag,
- })
+ chunks = append(chunks, uploadResult.ToPbFileChunk(assignResult.FileId, 0))
- fmt.Printf("copied %s => http://%s%s%s\n", fileName, filerAddress, urlFolder, fileName)
+ fmt.Printf("copied %s => http://%s%s%s\n", fileName, worker.filerHost, task.destinationUrlPath, fileName)
}
- if err := withFilerClient(filerGrpcAddress, func(client filer_pb.SeaweedFilerClient) error {
+ if err := pb.WithGrpcFilerClient(worker.filerGrpcAddress, worker.options.grpcDialOption, func(client filer_pb.SeaweedFilerClient) error {
request := &filer_pb.CreateEntryRequest{
- Directory: urlFolder,
+ Directory: task.destinationUrlPath,
Entry: &filer_pb.Entry{
Name: fileName,
Attributes: &filer_pb.FuseAttributes{
Crtime: time.Now().Unix(),
Mtime: time.Now().Unix(),
- Gid: uint32(os.Getgid()),
- Uid: uint32(os.Getuid()),
- FileSize: uint64(fi.Size()),
- FileMode: uint32(fi.Mode()),
+ Gid: task.gid,
+ Uid: task.uid,
+ FileSize: uint64(task.fileSize),
+ FileMode: uint32(task.fileMode),
Mime: mimeType,
- Replication: *copy.replication,
- Collection: *copy.collection,
- TtlSec: int32(util.ParseInt(*copy.ttl, 0)),
+ Replication: *worker.options.replication,
+ Collection: *worker.options.collection,
+ TtlSec: worker.options.ttlSec,
},
Chunks: chunks,
},
}
- if _, err := client.CreateEntry(context.Background(), request); err != nil {
+ if err := filer_pb.CreateEntry(client, request); err != nil {
return fmt.Errorf("update fh: %v", err)
}
return nil
}); err != nil {
- fmt.Printf("upload data %v to http://%s%s%s: %v\n", fileName, filerAddress, urlFolder, fileName, err)
- return false
+ return fmt.Errorf("upload data %v to http://%s%s%s: %v\n", fileName, worker.filerHost, task.destinationUrlPath, fileName, err)
}
- return true
+ return nil
}
-func uploadFileInChunks(filerAddress, filerGrpcAddress string, urlFolder string, f *os.File, fi os.FileInfo, chunkCount int, chunkSize int64) bool {
+func (worker *FileCopyWorker) uploadFileInChunks(task FileCopyTask, f *os.File, chunkCount int, chunkSize int64) error {
fileName := filepath.Base(f.Name())
mimeType := detectMimeType(f)
- var chunks []*filer_pb.FileChunk
+ chunksChan := make(chan *filer_pb.FileChunk, chunkCount)
+
+ concurrentChunks := make(chan struct{}, *worker.options.concurrenctChunks)
+ var wg sync.WaitGroup
+ var uploadError error
+ var collection, replication string
+
+ fmt.Printf("uploading %s in %d chunks ...\n", fileName, chunkCount)
+ for i := int64(0); i < int64(chunkCount) && uploadError == nil; i++ {
+ wg.Add(1)
+ concurrentChunks <- struct{}{}
+ go func(i int64) {
+ defer func() {
+ wg.Done()
+ <-concurrentChunks
+ }()
+ // assign a volume
+ var assignResult *filer_pb.AssignVolumeResponse
+ var assignError error
+ err := pb.WithGrpcFilerClient(worker.filerGrpcAddress, worker.options.grpcDialOption, func(client filer_pb.SeaweedFilerClient) error {
+ request := &filer_pb.AssignVolumeRequest{
+ Count: 1,
+ Replication: *worker.options.replication,
+ Collection: *worker.options.collection,
+ TtlSec: worker.options.ttlSec,
+ ParentPath: task.destinationUrlPath,
+ }
+
+ assignResult, assignError = client.AssignVolume(context.Background(), request)
+ if assignError != nil {
+ return fmt.Errorf("assign volume failure %v: %v", request, assignError)
+ }
+ if assignResult.Error != "" {
+ return fmt.Errorf("assign volume failure %v: %v", request, assignResult.Error)
+ }
+ return nil
+ })
+ if err != nil {
+ fmt.Printf("Failed to assign from %v: %v\n", worker.options.masters, err)
+ }
+ if err != nil {
+ fmt.Printf("Failed to assign from %v: %v\n", worker.options.masters, err)
+ }
- for i := int64(0); i < int64(chunkCount); i++ {
+ targetUrl := "http://" + assignResult.Url + "/" + assignResult.FileId
+ if collection == "" {
+ collection = assignResult.Collection
+ }
+ if replication == "" {
+ replication = assignResult.Replication
+ }
- // assign a volume
- assignResult, err := operation.Assign(*copy.master, &operation.VolumeAssignRequest{
- Count: 1,
- Replication: *copy.replication,
- Collection: *copy.collection,
- Ttl: *copy.ttl,
- })
- if err != nil {
- fmt.Printf("Failed to assign from %s: %v\n", *copy.master, err)
- }
+ uploadResult, err, _ := operation.Upload(targetUrl, fileName+"-"+strconv.FormatInt(i+1, 10), worker.options.cipher, io.NewSectionReader(f, i*chunkSize, chunkSize), false, "", nil, security.EncodedJwt(assignResult.Auth))
+ if err != nil {
+ uploadError = fmt.Errorf("upload data %v to %s: %v\n", fileName, targetUrl, err)
+ return
+ }
+ if uploadResult.Error != "" {
+ uploadError = fmt.Errorf("upload %v to %s result: %v\n", fileName, targetUrl, uploadResult.Error)
+ return
+ }
+ chunksChan <- uploadResult.ToPbFileChunk(assignResult.FileId, i*chunkSize)
- targetUrl := "http://" + assignResult.Url + "/" + assignResult.Fid
+ fmt.Printf("uploaded %s-%d to %s [%d,%d)\n", fileName, i+1, targetUrl, i*chunkSize, i*chunkSize+int64(uploadResult.Size))
+ }(i)
+ }
+ wg.Wait()
+ close(chunksChan)
- uploadResult, err := operation.Upload(targetUrl,
- fileName+"-"+strconv.FormatInt(i+1, 10),
- io.LimitReader(f, chunkSize),
- false, "application/octet-stream", nil, "")
- if err != nil {
- fmt.Printf("upload data %v to %s: %v\n", fileName, targetUrl, err)
- return false
- }
- if uploadResult.Error != "" {
- fmt.Printf("upload %v to %s result: %v\n", fileName, targetUrl, uploadResult.Error)
- return false
+ var chunks []*filer_pb.FileChunk
+ for chunk := range chunksChan {
+ chunks = append(chunks, chunk)
+ }
+
+ if uploadError != nil {
+ var fileIds []string
+ for _, chunk := range chunks {
+ fileIds = append(fileIds, chunk.FileId)
}
- chunks = append(chunks, &filer_pb.FileChunk{
- FileId: assignResult.Fid,
- Offset: i * chunkSize,
- Size: uint64(uploadResult.Size),
- Mtime: time.Now().UnixNano(),
- ETag: uploadResult.ETag,
- })
- fmt.Printf("uploaded %s-%d to %s [%d,%d)\n", fileName, i+1, targetUrl, i*chunkSize, i*chunkSize+int64(uploadResult.Size))
+ operation.DeleteFiles(copy.masters[0], false, worker.options.grpcDialOption, fileIds)
+ return uploadError
}
- if err := withFilerClient(filerGrpcAddress, func(client filer_pb.SeaweedFilerClient) error {
+ if err := pb.WithGrpcFilerClient(worker.filerGrpcAddress, worker.options.grpcDialOption, func(client filer_pb.SeaweedFilerClient) error {
request := &filer_pb.CreateEntryRequest{
- Directory: urlFolder,
+ Directory: task.destinationUrlPath,
Entry: &filer_pb.Entry{
Name: fileName,
Attributes: &filer_pb.FuseAttributes{
Crtime: time.Now().Unix(),
Mtime: time.Now().Unix(),
- Gid: uint32(os.Getgid()),
- Uid: uint32(os.Getuid()),
- FileSize: uint64(fi.Size()),
- FileMode: uint32(fi.Mode()),
+ Gid: task.gid,
+ Uid: task.uid,
+ FileSize: uint64(task.fileSize),
+ FileMode: uint32(task.fileMode),
Mime: mimeType,
- Replication: *copy.replication,
- Collection: *copy.collection,
- TtlSec: int32(util.ParseInt(*copy.ttl, 0)),
+ Replication: replication,
+ Collection: collection,
+ TtlSec: worker.options.ttlSec,
},
Chunks: chunks,
},
}
- if _, err := client.CreateEntry(context.Background(), request); err != nil {
+ if err := filer_pb.CreateEntry(client, request); err != nil {
return fmt.Errorf("update fh: %v", err)
}
return nil
}); err != nil {
- fmt.Printf("upload data %v to http://%s%s%s: %v\n", fileName, filerAddress, urlFolder, fileName, err)
- return false
+ return fmt.Errorf("upload data %v to http://%s%s%s: %v\n", fileName, worker.filerHost, task.destinationUrlPath, fileName, err)
}
- fmt.Printf("copied %s => http://%s%s%s\n", fileName, filerAddress, urlFolder, fileName)
+ fmt.Printf("copied %s => http://%s%s%s\n", fileName, worker.filerHost, task.destinationUrlPath, fileName)
- return true
+ return nil
}
func detectMimeType(f *os.File) string {
@@ -322,22 +493,12 @@ func detectMimeType(f *os.File) string {
}
if err != nil {
fmt.Printf("read head of %v: %v\n", f.Name(), err)
- return "application/octet-stream"
+ return ""
}
f.Seek(0, io.SeekStart)
mimeType := http.DetectContentType(head[:n])
- return mimeType
-}
-
-func withFilerClient(filerAddress string, fn func(filer_pb.SeaweedFilerClient) error) error {
-
- grpcConnection, err := util.GrpcDial(filerAddress)
- if err != nil {
- return fmt.Errorf("fail to dial %s: %v", filerAddress, err)
+ if mimeType == "application/octet-stream" {
+ return ""
}
- defer grpcConnection.Close()
-
- client := filer_pb.NewSeaweedFilerClient(grpcConnection)
-
- return fn(client)
+ return mimeType
}
diff --git a/weed/command/filer_export.go b/weed/command/filer_export.go
deleted file mode 100644
index 7a2e7920a..000000000
--- a/weed/command/filer_export.go
+++ /dev/null
@@ -1,187 +0,0 @@
-package command
-
-import (
- "github.com/chrislusf/seaweedfs/weed/filer2"
- "github.com/chrislusf/seaweedfs/weed/glog"
- "github.com/chrislusf/seaweedfs/weed/notification"
- "github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
- "github.com/chrislusf/seaweedfs/weed/server"
- "github.com/spf13/viper"
-)
-
-func init() {
- cmdFilerExport.Run = runFilerExport // break init cycle
-}
-
-var cmdFilerExport = &Command{
- UsageLine: "filer.export -sourceStore=mysql -targetStore=cassandra",
- Short: "export meta data in filer store",
- Long: `Iterate the file tree and export all metadata out
-
- Both source and target store:
- * should be a store name already specified in filer.toml
- * do not need to be enabled state
-
- If target store is empty, only the directory tree will be listed.
-
- If target store is "notification", the list of entries will be sent to notification.
- This is usually used to bootstrap filer replication to a remote system.
-
- `,
-}
-
-var (
- // filerExportOutputFile = cmdFilerExport.Flag.String("output", "", "the output file. If empty, only list out the directory tree")
- filerExportSourceStore = cmdFilerExport.Flag.String("sourceStore", "", "the source store name in filer.toml, default to currently enabled store")
- filerExportTargetStore = cmdFilerExport.Flag.String("targetStore", "", "the target store name in filer.toml, or \"notification\" to export all files to message queue")
- dir = cmdFilerExport.Flag.String("dir", "/", "only process files under this directory")
- dirListLimit = cmdFilerExport.Flag.Int("dirListLimit", 100000, "limit directory list size")
- dryRun = cmdFilerExport.Flag.Bool("dryRun", false, "not actually moving data")
- verboseFilerExport = cmdFilerExport.Flag.Bool("v", false, "verbose entry details")
-)
-
-type statistics struct {
- directoryCount int
- fileCount int
-}
-
-func runFilerExport(cmd *Command, args []string) bool {
-
- weed_server.LoadConfiguration("filer", true)
- config := viper.GetViper()
-
- var sourceStore, targetStore filer2.FilerStore
-
- for _, store := range filer2.Stores {
- if store.GetName() == *filerExportSourceStore || *filerExportSourceStore == "" && config.GetBool(store.GetName()+".enabled") {
- viperSub := config.Sub(store.GetName())
- if err := store.Initialize(viperSub); err != nil {
- glog.Fatalf("Failed to initialize source store for %s: %+v",
- store.GetName(), err)
- } else {
- sourceStore = store
- }
- break
- }
- }
-
- for _, store := range filer2.Stores {
- if store.GetName() == *filerExportTargetStore {
- viperSub := config.Sub(store.GetName())
- if err := store.Initialize(viperSub); err != nil {
- glog.Fatalf("Failed to initialize target store for %s: %+v",
- store.GetName(), err)
- } else {
- targetStore = store
- }
- break
- }
- }
-
- if sourceStore == nil {
- glog.Errorf("Failed to find source store %s", *filerExportSourceStore)
- println("existing data sources are:")
- for _, store := range filer2.Stores {
- println(" " + store.GetName())
- }
- return false
- }
-
- if targetStore == nil && *filerExportTargetStore != "" && *filerExportTargetStore != "notification" {
- glog.Errorf("Failed to find target store %s", *filerExportTargetStore)
- println("existing data sources are:")
- for _, store := range filer2.Stores {
- println(" " + store.GetName())
- }
- return false
- }
-
- stat := statistics{}
-
- var fn func(level int, entry *filer2.Entry) error
-
- if *filerExportTargetStore == "notification" {
- weed_server.LoadConfiguration("notification", false)
- v := viper.GetViper()
- notification.LoadConfiguration(v.Sub("notification"))
-
- fn = func(level int, entry *filer2.Entry) error {
- printout(level, entry)
- if *dryRun {
- return nil
- }
- return notification.Queue.SendMessage(
- string(entry.FullPath),
- &filer_pb.EventNotification{
- NewEntry: entry.ToProtoEntry(),
- },
- )
- }
- } else if targetStore == nil {
- fn = printout
- } else {
- fn = func(level int, entry *filer2.Entry) error {
- printout(level, entry)
- if *dryRun {
- return nil
- }
- return targetStore.InsertEntry(entry)
- }
- }
-
- doTraverse(&stat, sourceStore, filer2.FullPath(*dir), 0, fn)
-
- glog.Infof("processed %d directories, %d files", stat.directoryCount, stat.fileCount)
-
- return true
-}
-
-func doTraverse(stat *statistics, filerStore filer2.FilerStore, parentPath filer2.FullPath, level int, fn func(level int, entry *filer2.Entry) error) {
-
- limit := *dirListLimit
- lastEntryName := ""
- for {
- entries, err := filerStore.ListDirectoryEntries(parentPath, lastEntryName, false, limit)
- if err != nil {
- break
- }
- for _, entry := range entries {
- if fnErr := fn(level, entry); fnErr != nil {
- glog.Errorf("failed to process entry: %s", entry.FullPath)
- }
- if entry.IsDirectory() {
- stat.directoryCount++
- doTraverse(stat, filerStore, entry.FullPath, level+1, fn)
- } else {
- stat.fileCount++
- }
- }
- if len(entries) < limit {
- break
- }
- }
-}
-
-func printout(level int, entry *filer2.Entry) error {
- for i := 0; i < level; i++ {
- if i == level-1 {
- print("+-")
- } else {
- print("| ")
- }
- }
- print(entry.FullPath.Name())
- if *verboseFilerExport {
- for _, chunk := range entry.Chunks {
- print("[")
- print(chunk.FileId)
- print(",")
- print(chunk.Offset)
- print(",")
- print(chunk.Size)
- print(")")
- }
- }
- println()
- return nil
-}
diff --git a/weed/command/filer_replication.go b/weed/command/filer_replication.go
index 3384e4023..40f2b570b 100644
--- a/weed/command/filer_replication.go
+++ b/weed/command/filer_replication.go
@@ -1,6 +1,7 @@
package command
import (
+ "context"
"strings"
"github.com/chrislusf/seaweedfs/weed/glog"
@@ -12,7 +13,7 @@ import (
_ "github.com/chrislusf/seaweedfs/weed/replication/sink/gcssink"
_ "github.com/chrislusf/seaweedfs/weed/replication/sink/s3sink"
"github.com/chrislusf/seaweedfs/weed/replication/sub"
- "github.com/chrislusf/seaweedfs/weed/server"
+ "github.com/chrislusf/seaweedfs/weed/util"
"github.com/spf13/viper"
)
@@ -28,16 +29,17 @@ var cmdFilerReplicate = &Command{
filer.replicate listens on filer notifications. If any file is updated, it will fetch the updated content,
and write to the other destination.
- Run "weed scaffold -config replication" to generate a replication.toml file and customize the parameters.
+ Run "weed scaffold -config=replication" to generate a replication.toml file and customize the parameters.
`,
}
func runFilerReplicate(cmd *Command, args []string) bool {
- weed_server.LoadConfiguration("replication", true)
- weed_server.LoadConfiguration("notification", true)
- config := viper.GetViper()
+ util.LoadConfiguration("security", false)
+ util.LoadConfiguration("replication", true)
+ util.LoadConfiguration("notification", true)
+ config := util.GetViper()
var notificationInput sub.NotificationInput
@@ -45,8 +47,7 @@ func runFilerReplicate(cmd *Command, args []string) bool {
for _, input := range sub.NotificationInputs {
if config.GetBool("notification." + input.GetName() + ".enabled") {
- viperSub := config.Sub("notification." + input.GetName())
- if err := input.Initialize(viperSub); err != nil {
+ if err := input.Initialize(config, "notification."+input.GetName()+"."); err != nil {
glog.Fatalf("Failed to initialize notification input for %s: %+v",
input.GetName(), err)
}
@@ -64,10 +65,9 @@ func runFilerReplicate(cmd *Command, args []string) bool {
// avoid recursive replication
if config.GetBool("notification.source.filer.enabled") && config.GetBool("notification.sink.filer.enabled") {
- sourceConfig, sinkConfig := config.Sub("source.filer"), config.Sub("sink.filer")
- if sourceConfig.GetString("grpcAddress") == sinkConfig.GetString("grpcAddress") {
- fromDir := sourceConfig.GetString("directory")
- toDir := sinkConfig.GetString("directory")
+ if config.GetString("source.filer.grpcAddress") == config.GetString("sink.filer.grpcAddress") {
+ fromDir := config.GetString("source.filer.directory")
+ toDir := config.GetString("sink.filer.directory")
if strings.HasPrefix(toDir, fromDir) {
glog.Fatalf("recursive replication! source directory %s includes the sink directory %s", fromDir, toDir)
}
@@ -77,8 +77,7 @@ func runFilerReplicate(cmd *Command, args []string) bool {
var dataSink sink.ReplicationSink
for _, sk := range sink.Sinks {
if config.GetBool("sink." + sk.GetName() + ".enabled") {
- viperSub := config.Sub("sink." + sk.GetName())
- if err := sk.Initialize(viperSub); err != nil {
+ if err := sk.Initialize(config, "sink."+sk.GetName()+"."); err != nil {
glog.Fatalf("Failed to initialize sink for %s: %+v",
sk.GetName(), err)
}
@@ -96,7 +95,7 @@ func runFilerReplicate(cmd *Command, args []string) bool {
return true
}
- replicator := replication.NewReplicator(config.Sub("source.filer"), dataSink)
+ replicator := replication.NewReplicator(config, "source.filer.", dataSink)
for {
key, m, err := notificationInput.ReceiveMessage()
@@ -115,14 +114,13 @@ func runFilerReplicate(cmd *Command, args []string) bool {
} else {
glog.V(1).Infof("modify: %s", key)
}
- if err = replicator.Replicate(key, m); err != nil {
+ if err = replicator.Replicate(context.Background(), key, m); err != nil {
glog.Errorf("replicate %s: %+v", key, err)
} else {
glog.V(1).Infof("replicated %s", key)
}
}
- return true
}
func validateOneEnabledInput(config *viper.Viper) {
diff --git a/weed/command/fix.go b/weed/command/fix.go
index a800978c6..223808f4b 100644
--- a/weed/command/fix.go
+++ b/weed/command/fix.go
@@ -7,6 +7,9 @@ import (
"github.com/chrislusf/seaweedfs/weed/glog"
"github.com/chrislusf/seaweedfs/weed/storage"
+ "github.com/chrislusf/seaweedfs/weed/storage/needle"
+ "github.com/chrislusf/seaweedfs/weed/storage/needle_map"
+ "github.com/chrislusf/seaweedfs/weed/storage/super_block"
"github.com/chrislusf/seaweedfs/weed/storage/types"
)
@@ -29,12 +32,12 @@ var (
)
type VolumeFileScanner4Fix struct {
- version storage.Version
- nm *storage.NeedleMap
+ version needle.Version
+ nm *needle_map.MemDb
}
-func (scanner *VolumeFileScanner4Fix) VisitSuperBlock(superBlock storage.SuperBlock) error {
- scanner.version = superBlock.Version()
+func (scanner *VolumeFileScanner4Fix) VisitSuperBlock(superBlock super_block.SuperBlock) error {
+ scanner.version = superBlock.Version
return nil
}
@@ -42,14 +45,14 @@ func (scanner *VolumeFileScanner4Fix) ReadNeedleBody() bool {
return false
}
-func (scanner *VolumeFileScanner4Fix) VisitNeedle(n *storage.Needle, offset int64) error {
- glog.V(2).Infof("key %d offset %d size %d disk_size %d gzip %v", n.Id, offset, n.Size, n.DiskSize(scanner.version), n.IsGzipped())
- if n.Size > 0 {
- pe := scanner.nm.Put(n.Id, types.Offset(offset/types.NeedlePaddingSize), n.Size)
+func (scanner *VolumeFileScanner4Fix) VisitNeedle(n *needle.Needle, offset int64, needleHeader, needleBody []byte) error {
+ glog.V(2).Infof("key %d offset %d size %d disk_size %d compressed %v", n.Id, offset, n.Size, n.DiskSize(scanner.version), n.IsCompressed())
+ if n.Size > 0 && n.Size != types.TombstoneFileSize {
+ pe := scanner.nm.Set(n.Id, types.ToOffset(offset), n.Size)
glog.V(2).Infof("saved %d with error %v", n.Size, pe)
} else {
glog.V(2).Infof("skipping deleted file ...")
- return scanner.nm.Delete(n.Id, types.Offset(offset/types.NeedlePaddingSize))
+ return scanner.nm.Delete(n.Id)
}
return nil
}
@@ -65,23 +68,22 @@ func runFix(cmd *Command, args []string) bool {
baseFileName = *fixVolumeCollection + "_" + baseFileName
}
indexFileName := path.Join(*fixVolumePath, baseFileName+".idx")
- indexFile, err := os.OpenFile(indexFileName, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0644)
- if err != nil {
- glog.Fatalf("Create Volume Index [ERROR] %s\n", err)
- }
- defer indexFile.Close()
- nm := storage.NewBtreeNeedleMap(indexFile)
+ nm := needle_map.NewMemDb()
defer nm.Close()
- vid := storage.VolumeId(*fixVolumeId)
+ vid := needle.VolumeId(*fixVolumeId)
scanner := &VolumeFileScanner4Fix{
nm: nm,
}
- err = storage.ScanVolumeFile(*fixVolumePath, *fixVolumeCollection, vid, storage.NeedleMapInMemory, scanner)
- if err != nil {
- glog.Fatalf("Export Volume File [ERROR] %s\n", err)
+ if err := storage.ScanVolumeFile(*fixVolumePath, *fixVolumeCollection, vid, storage.NeedleMapInMemory, scanner); err != nil {
+ glog.Fatalf("scan .dat File: %v", err)
+ os.Remove(indexFileName)
+ }
+
+ if err := nm.SaveToIdx(indexFileName); err != nil {
+ glog.Fatalf("save to .idx File: %v", err)
os.Remove(indexFileName)
}
diff --git a/weed/command/master.go b/weed/command/master.go
index bd2267b9e..21c759f4e 100644
--- a/weed/command/master.go
+++ b/weed/command/master.go
@@ -6,120 +6,149 @@ import (
"runtime"
"strconv"
"strings"
- "time"
+
+ "github.com/chrislusf/raft/protobuf"
+ "github.com/chrislusf/seaweedfs/weed/util/grace"
+ "github.com/gorilla/mux"
+ "google.golang.org/grpc/reflection"
"github.com/chrislusf/seaweedfs/weed/glog"
+ "github.com/chrislusf/seaweedfs/weed/pb"
"github.com/chrislusf/seaweedfs/weed/pb/master_pb"
+ "github.com/chrislusf/seaweedfs/weed/security"
"github.com/chrislusf/seaweedfs/weed/server"
+ "github.com/chrislusf/seaweedfs/weed/storage/backend"
"github.com/chrislusf/seaweedfs/weed/util"
- "github.com/gorilla/mux"
- "google.golang.org/grpc/reflection"
)
+var (
+ m MasterOptions
+)
+
+type MasterOptions struct {
+ port *int
+ ip *string
+ ipBind *string
+ metaFolder *string
+ peers *string
+ volumeSizeLimitMB *uint
+ volumePreallocate *bool
+ // pulseSeconds *int
+ defaultReplication *string
+ garbageThreshold *float64
+ whiteList *string
+ disableHttp *bool
+ metricsAddress *string
+ metricsIntervalSec *int
+}
+
func init() {
cmdMaster.Run = runMaster // break init cycle
+ m.port = cmdMaster.Flag.Int("port", 9333, "http listen port")
+ m.ip = cmdMaster.Flag.String("ip", util.DetectedHostAddress(), "master | address")
+ m.ipBind = cmdMaster.Flag.String("ip.bind", "0.0.0.0", "ip address to bind to")
+ m.metaFolder = cmdMaster.Flag.String("mdir", os.TempDir(), "data directory to store meta data")
+ m.peers = cmdMaster.Flag.String("peers", "", "all master nodes in comma separated ip:port list, example: 127.0.0.1:9093,127.0.0.1:9094,127.0.0.1:9095")
+ m.volumeSizeLimitMB = cmdMaster.Flag.Uint("volumeSizeLimitMB", 30*1000, "Master stops directing writes to oversized volumes.")
+ m.volumePreallocate = cmdMaster.Flag.Bool("volumePreallocate", false, "Preallocate disk space for volumes.")
+ // m.pulseSeconds = cmdMaster.Flag.Int("pulseSeconds", 5, "number of seconds between heartbeats")
+ m.defaultReplication = cmdMaster.Flag.String("defaultReplication", "000", "Default replication type if not specified.")
+ m.garbageThreshold = cmdMaster.Flag.Float64("garbageThreshold", 0.3, "threshold to vacuum and reclaim spaces")
+ m.whiteList = cmdMaster.Flag.String("whiteList", "", "comma separated Ip addresses having write permission. No limit if empty.")
+ m.disableHttp = cmdMaster.Flag.Bool("disableHttp", false, "disable http requests, only gRPC operations are allowed.")
+ m.metricsAddress = cmdMaster.Flag.String("metrics.address", "", "Prometheus gateway address")
+ m.metricsIntervalSec = cmdMaster.Flag.Int("metrics.intervalSeconds", 15, "Prometheus push interval in seconds")
}
var cmdMaster = &Command{
UsageLine: "master -port=9333",
Short: "start a master server",
- Long: `start a master server to provide volume=>location mapping service
- and sequence number of file ids
+ Long: `start a master server to provide volume=>location mapping service and sequence number of file ids
+
+ The configuration file "security.toml" is read from ".", "$HOME/.seaweedfs/", or "/etc/seaweedfs/", in that order.
+
+ The example security.toml configuration file can be generated by "weed scaffold -config=security"
`,
}
var (
- mport = cmdMaster.Flag.Int("port", 9333, "http listen port")
- mGrpcPort = cmdMaster.Flag.Int("port.grpc", 0, "grpc server listen port, default to http port + 10000")
- masterIp = cmdMaster.Flag.String("ip", "localhost", "master | address")
- masterBindIp = cmdMaster.Flag.String("ip.bind", "0.0.0.0", "ip address to bind to")
- metaFolder = cmdMaster.Flag.String("mdir", os.TempDir(), "data directory to store meta data")
- masterPeers = cmdMaster.Flag.String("peers", "", "all master nodes in comma separated ip:port list, example: 127.0.0.1:9093,127.0.0.1:9094")
- volumeSizeLimitMB = cmdMaster.Flag.Uint("volumeSizeLimitMB", 30*1000, "Master stops directing writes to oversized volumes.")
- volumePreallocate = cmdMaster.Flag.Bool("volumePreallocate", false, "Preallocate disk space for volumes.")
- mpulse = cmdMaster.Flag.Int("pulseSeconds", 5, "number of seconds between heartbeats")
- defaultReplicaPlacement = cmdMaster.Flag.String("defaultReplication", "000", "Default replication type if not specified.")
- // mTimeout = cmdMaster.Flag.Int("idleTimeout", 30, "connection idle seconds")
- mMaxCpu = cmdMaster.Flag.Int("maxCpu", 0, "maximum number of CPUs. 0 means all available CPUs")
- garbageThreshold = cmdMaster.Flag.Float64("garbageThreshold", 0.3, "threshold to vacuum and reclaim spaces")
- masterWhiteListOption = cmdMaster.Flag.String("whiteList", "", "comma separated Ip addresses having write permission. No limit if empty.")
- masterSecureKey = cmdMaster.Flag.String("secure.secret", "", "secret to encrypt Json Web Token(JWT)")
- masterCpuProfile = cmdMaster.Flag.String("cpuprofile", "", "cpu profile output file")
- masterMemProfile = cmdMaster.Flag.String("memprofile", "", "memory profile output file")
-
- masterWhiteList []string
+ masterCpuProfile = cmdMaster.Flag.String("cpuprofile", "", "cpu profile output file")
+ masterMemProfile = cmdMaster.Flag.String("memprofile", "", "memory profile output file")
)
func runMaster(cmd *Command, args []string) bool {
- if *mMaxCpu < 1 {
- *mMaxCpu = runtime.NumCPU()
- }
- runtime.GOMAXPROCS(*mMaxCpu)
- util.SetupProfiling(*masterCpuProfile, *masterMemProfile)
- if err := util.TestFolderWritable(*metaFolder); err != nil {
- glog.Fatalf("Check Meta Folder (-mdir) Writable %s : %s", *metaFolder, err)
+ util.LoadConfiguration("security", false)
+ util.LoadConfiguration("master", false)
+
+ runtime.GOMAXPROCS(runtime.NumCPU())
+ grace.SetupProfiling(*masterCpuProfile, *masterMemProfile)
+
+ if err := util.TestFolderWritable(*m.metaFolder); err != nil {
+ glog.Fatalf("Check Meta Folder (-mdir) Writable %s : %s", *m.metaFolder, err)
}
- if *masterWhiteListOption != "" {
- masterWhiteList = strings.Split(*masterWhiteListOption, ",")
+
+ var masterWhiteList []string
+ if *m.whiteList != "" {
+ masterWhiteList = strings.Split(*m.whiteList, ",")
}
- if *volumeSizeLimitMB > 30*1000 {
+ if *m.volumeSizeLimitMB > util.VolumeSizeLimitGB*1000 {
glog.Fatalf("volumeSizeLimitMB should be smaller than 30000")
}
- r := mux.NewRouter()
- ms := weed_server.NewMasterServer(r, *mport, *metaFolder,
- *volumeSizeLimitMB, *volumePreallocate,
- *mpulse, *defaultReplicaPlacement, *garbageThreshold,
- masterWhiteList, *masterSecureKey,
- )
+ startMaster(m, masterWhiteList)
+
+ return true
+}
- listeningAddress := *masterBindIp + ":" + strconv.Itoa(*mport)
+func startMaster(masterOption MasterOptions, masterWhiteList []string) {
- glog.V(0).Infoln("Start Seaweed Master", util.VERSION, "at", listeningAddress)
+ backend.LoadConfiguration(util.GetViper())
+ myMasterAddress, peers := checkPeers(*masterOption.ip, *masterOption.port, *masterOption.peers)
+
+ r := mux.NewRouter()
+ ms := weed_server.NewMasterServer(r, masterOption.toMasterOption(masterWhiteList), peers)
+ listeningAddress := *masterOption.ipBind + ":" + strconv.Itoa(*masterOption.port)
+ glog.V(0).Infof("Start Seaweed Master %s at %s", util.Version(), listeningAddress)
masterListener, e := util.NewListener(listeningAddress, 0)
if e != nil {
glog.Fatalf("Master startup error: %v", e)
}
+ // start raftServer
+ raftServer := weed_server.NewRaftServer(security.LoadClientTLS(util.GetViper(), "grpc.master"),
+ peers, myMasterAddress, *masterOption.metaFolder, ms.Topo, 5)
+ if raftServer == nil {
+ glog.Fatalf("please verify %s is writable, see https://github.com/chrislusf/seaweedfs/issues/717", *masterOption.metaFolder)
+ }
+ ms.SetRaftServer(raftServer)
+ r.HandleFunc("/cluster/status", raftServer.StatusHandler).Methods("GET")
+ // starting grpc server
+ grpcPort := *masterOption.port + 10000
+ grpcL, err := util.NewListener(*masterOption.ipBind+":"+strconv.Itoa(grpcPort), 0)
+ if err != nil {
+ glog.Fatalf("master failed to listen on grpc port %d: %v", grpcPort, err)
+ }
+ // Create your protocol servers.
+ grpcS := pb.NewGrpcServer(security.LoadServerTLS(util.GetViper(), "grpc.master"))
+ master_pb.RegisterSeaweedServer(grpcS, ms)
+ protobuf.RegisterRaftServer(grpcS, raftServer)
+ reflection.Register(grpcS)
+ glog.V(0).Infof("Start Seaweed Master %s grpc server at %s:%d", util.Version(), *masterOption.ipBind, grpcPort)
+ go grpcS.Serve(grpcL)
- go func() {
- time.Sleep(100 * time.Millisecond)
- myMasterAddress, peers := checkPeers(*masterIp, *mport, *masterPeers)
- raftServer := weed_server.NewRaftServer(r, peers, myMasterAddress, *metaFolder, ms.Topo, *mpulse)
- ms.SetRaftServer(raftServer)
- }()
-
- go func() {
- // starting grpc server
- grpcPort := *mGrpcPort
- if grpcPort == 0 {
- grpcPort = *mport + 10000
- }
- grpcL, err := util.NewListener(*masterBindIp+":"+strconv.Itoa(grpcPort), 0)
- if err != nil {
- glog.Fatalf("master failed to listen on grpc port %d: %v", grpcPort, err)
- }
- // Create your protocol servers.
- grpcS := util.NewGrpcServer()
- master_pb.RegisterSeaweedServer(grpcS, ms)
- reflection.Register(grpcS)
-
- glog.V(0).Infof("Start Seaweed Master %s grpc server at %s:%d", util.VERSION, *masterBindIp, grpcPort)
- grpcS.Serve(grpcL)
- }()
+ go ms.MasterClient.KeepConnectedToMaster()
// start http server
httpS := &http.Server{Handler: r}
- if err := httpS.Serve(masterListener); err != nil {
- glog.Fatalf("master server failed to serve: %v", err)
- }
+ go httpS.Serve(masterListener)
- return true
+ select {}
}
func checkPeers(masterIp string, masterPort int, peers string) (masterAddress string, cleanedPeers []string) {
+ glog.V(0).Infof("current: %s:%d peers:%s", masterIp, masterPort, peers)
masterAddress = masterIp + ":" + strconv.Itoa(masterPort)
if peers != "" {
cleanedPeers = strings.Split(peers, ",")
@@ -133,12 +162,28 @@ func checkPeers(masterIp string, masterPort int, peers string) (masterAddress st
}
}
- peerCount := len(cleanedPeers)
if !hasSelf {
- peerCount += 1
+ cleanedPeers = append(cleanedPeers, masterAddress)
}
- if peerCount%2 == 0 {
+ if len(cleanedPeers)%2 == 0 {
glog.Fatalf("Only odd number of masters are supported!")
}
return
}
+
+func (m *MasterOptions) toMasterOption(whiteList []string) *weed_server.MasterOption {
+ return &weed_server.MasterOption{
+ Host: *m.ip,
+ Port: *m.port,
+ MetaFolder: *m.metaFolder,
+ VolumeSizeLimitMB: *m.volumeSizeLimitMB,
+ VolumePreallocate: *m.volumePreallocate,
+ // PulseSeconds: *m.pulseSeconds,
+ DefaultReplicaPlacement: *m.defaultReplication,
+ GarbageThreshold: *m.garbageThreshold,
+ WhiteList: whiteList,
+ DisableHttp: *m.disableHttp,
+ MetricsAddress: *m.metricsAddress,
+ MetricsIntervalSec: *m.metricsIntervalSec,
+ }
+}
diff --git a/weed/command/mount.go b/weed/command/mount.go
index e61f16783..440aca8c6 100644
--- a/weed/command/mount.go
+++ b/weed/command/mount.go
@@ -1,22 +1,26 @@
package command
import (
- "fmt"
- "strconv"
- "strings"
+ "os"
)
type MountOptions struct {
- filer *string
- filerGrpcPort *int
- filerMountRootPath *string
- dir *string
- dirListingLimit *int
- collection *string
- replication *string
- ttlSec *int
- chunkSizeLimitMB *int
- dataCenter *string
+ filer *string
+ filerMountRootPath *string
+ dir *string
+ dirAutoCreate *bool
+ dirListCacheLimit *int64
+ collection *string
+ replication *string
+ ttlSec *int
+ chunkSizeLimitMB *int
+ cacheDir *string
+ cacheSizeMB *int64
+ dataCenter *string
+ allowOthers *bool
+ umaskString *string
+ nonempty *bool
+ outsideContainerClusterMode *bool
}
var (
@@ -28,17 +32,23 @@ var (
func init() {
cmdMount.Run = runMount // break init cycle
mountOptions.filer = cmdMount.Flag.String("filer", "localhost:8888", "weed filer location")
- mountOptions.filerGrpcPort = cmdMount.Flag.Int("filer.grpc.port", 0, "filer grpc server listen port, default to http port + 10000")
mountOptions.filerMountRootPath = cmdMount.Flag.String("filer.path", "/", "mount this remote path from filer server")
mountOptions.dir = cmdMount.Flag.String("dir", ".", "mount weed filer to this directory")
- mountOptions.dirListingLimit = cmdMount.Flag.Int("dirListLimit", 100000, "limit directory listing size")
+ mountOptions.dirAutoCreate = cmdMount.Flag.Bool("dirAutoCreate", false, "auto create the directory to mount to")
+ mountOptions.dirListCacheLimit = cmdMount.Flag.Int64("dirListCacheLimit", 1000000, "limit cache size to speed up directory long format listing")
mountOptions.collection = cmdMount.Flag.String("collection", "", "collection to create the files")
mountOptions.replication = cmdMount.Flag.String("replication", "", "replication(e.g. 000, 001) to create to files. If empty, let filer decide.")
mountOptions.ttlSec = cmdMount.Flag.Int("ttl", 0, "file ttl in seconds")
- mountOptions.chunkSizeLimitMB = cmdMount.Flag.Int("chunkSizeLimitMB", 4, "local write buffer size, also chunk large files")
+ mountOptions.chunkSizeLimitMB = cmdMount.Flag.Int("chunkSizeLimitMB", 16, "local write buffer size, also chunk large files")
+ mountOptions.cacheDir = cmdMount.Flag.String("cacheDir", os.TempDir(), "local cache directory for file chunks and meta data")
+ mountOptions.cacheSizeMB = cmdMount.Flag.Int64("cacheCapacityMB", 1000, "local file chunk cache capacity in MB (0 will disable cache)")
mountOptions.dataCenter = cmdMount.Flag.String("dataCenter", "", "prefer to write to the data center")
+ mountOptions.allowOthers = cmdMount.Flag.Bool("allowOthers", true, "allows other users to access the file system")
+ mountOptions.umaskString = cmdMount.Flag.String("umask", "022", "octal umask, e.g., 022, 0111")
+ mountOptions.nonempty = cmdMount.Flag.Bool("nonempty", false, "allows the mounting over a non-empty directory")
mountCpuProfile = cmdMount.Flag.String("cpuprofile", "", "cpu profile output file")
mountMemProfile = cmdMount.Flag.String("memprofile", "", "memory profile output file")
+ mountOptions.outsideContainerClusterMode = cmdMount.Flag.Bool("outsideContainerClusterMode", false, "allows other users to access the file system")
}
var cmdMount = &Command{
@@ -56,24 +66,11 @@ var cmdMount = &Command{
On OS X, it requires OSXFUSE (http://osxfuse.github.com/).
- `,
-}
-
-func parseFilerGrpcAddress(filer string, optionalGrpcPort int) (filerGrpcAddress string, err error) {
- hostnameAndPort := strings.Split(filer, ":")
- if len(hostnameAndPort) != 2 {
- return "", fmt.Errorf("The filer should have hostname:port format: %v", hostnameAndPort)
- }
+ If the SeaweedFS system runs in a container cluster, e.g. managed by kubernetes or docker compose,
+ the volume servers are not accessible by their own ip addresses.
+ In "outsideContainerClusterMode", the mount will use the filer ip address instead, assuming:
+ * All volume server containers are accessible through the same hostname or IP address as the filer.
+ * All volume server container ports are open external to the cluster.
- filerPort, parseErr := strconv.ParseUint(hostnameAndPort[1], 10, 64)
- if parseErr != nil {
- return "", fmt.Errorf("The filer filer port parse error: %v", parseErr)
- }
-
- filerGrpcPort := int(filerPort) + 10000
- if optionalGrpcPort != 0 {
- filerGrpcPort = optionalGrpcPort
- }
-
- return fmt.Sprintf("%s:%d", hostnameAndPort[0], filerGrpcPort), nil
+ `,
}
diff --git a/weed/command/mount_darwin.go b/weed/command/mount_darwin.go
new file mode 100644
index 000000000..f0a5581e7
--- /dev/null
+++ b/weed/command/mount_darwin.go
@@ -0,0 +1,13 @@
+package command
+
+import (
+ "github.com/seaweedfs/fuse"
+)
+
+func osSpecificMountOptions() []fuse.MountOption {
+ return []fuse.MountOption{}
+}
+
+func checkMountPointAvailable(dir string) bool {
+ return true
+}
diff --git a/weed/command/mount_freebsd.go b/weed/command/mount_freebsd.go
new file mode 100644
index 000000000..f0a5581e7
--- /dev/null
+++ b/weed/command/mount_freebsd.go
@@ -0,0 +1,13 @@
+package command
+
+import (
+ "github.com/seaweedfs/fuse"
+)
+
+func osSpecificMountOptions() []fuse.MountOption {
+ return []fuse.MountOption{}
+}
+
+func checkMountPointAvailable(dir string) bool {
+ return true
+}
diff --git a/weed/command/mount_linux.go b/weed/command/mount_linux.go
new file mode 100644
index 000000000..25c4f72cf
--- /dev/null
+++ b/weed/command/mount_linux.go
@@ -0,0 +1,155 @@
+package command
+
+import (
+ "bufio"
+ "fmt"
+ "io"
+ "os"
+ "strings"
+
+ "github.com/seaweedfs/fuse"
+)
+
+const (
+ /* 36 35 98:0 /mnt1 /mnt2 rw,noatime master:1 - ext3 /dev/root rw,errors=continue
+ (1)(2)(3) (4) (5) (6) (7) (8) (9) (10) (11)
+
+ (1) mount ID: unique identifier of the mount (may be reused after umount)
+ (2) parent ID: ID of parent (or of self for the top of the mount tree)
+ (3) major:minor: value of st_dev for files on filesystem
+ (4) root: root of the mount within the filesystem
+ (5) mount point: mount point relative to the process's root
+ (6) mount options: per mount options
+ (7) optional fields: zero or more fields of the form "tag[:value]"
+ (8) separator: marks the end of the optional fields
+ (9) filesystem type: name of filesystem of the form "type[.subtype]"
+ (10) mount source: filesystem specific information or "none"
+ (11) super options: per super block options*/
+ mountinfoFormat = "%d %d %d:%d %s %s %s %s"
+)
+
+// Info reveals information about a particular mounted filesystem. This
+// struct is populated from the content in the /proc//mountinfo file.
+type Info struct {
+ // ID is a unique identifier of the mount (may be reused after umount).
+ ID int
+
+ // Parent indicates the ID of the mount parent (or of self for the top of the
+ // mount tree).
+ Parent int
+
+ // Major indicates one half of the device ID which identifies the device class.
+ Major int
+
+ // Minor indicates one half of the device ID which identifies a specific
+ // instance of device.
+ Minor int
+
+ // Root of the mount within the filesystem.
+ Root string
+
+ // Mountpoint indicates the mount point relative to the process's root.
+ Mountpoint string
+
+ // Opts represents mount-specific options.
+ Opts string
+
+ // Optional represents optional fields.
+ Optional string
+
+ // Fstype indicates the type of filesystem, such as EXT3.
+ Fstype string
+
+ // Source indicates filesystem specific information or "none".
+ Source string
+
+ // VfsOpts represents per super block options.
+ VfsOpts string
+}
+
+// Mounted determines if a specified mountpoint has been mounted.
+// On Linux it looks at /proc/self/mountinfo and on Solaris at mnttab.
+func mounted(mountPoint string) (bool, error) {
+ entries, err := parseMountTable()
+ if err != nil {
+ return false, err
+ }
+
+ // Search the table for the mountPoint
+ for _, e := range entries {
+ if e.Mountpoint == mountPoint {
+ return true, nil
+ }
+ }
+ return false, nil
+}
+
+// Parse /proc/self/mountinfo because comparing Dev and ino does not work from
+// bind mounts
+func parseMountTable() ([]*Info, error) {
+ f, err := os.Open("/proc/self/mountinfo")
+ if err != nil {
+ return nil, err
+ }
+ defer f.Close()
+
+ return parseInfoFile(f)
+}
+
+func parseInfoFile(r io.Reader) ([]*Info, error) {
+ var (
+ s = bufio.NewScanner(r)
+ out []*Info
+ )
+
+ for s.Scan() {
+ if err := s.Err(); err != nil {
+ return nil, err
+ }
+
+ var (
+ p = &Info{}
+ text = s.Text()
+ optionalFields string
+ )
+
+ if _, err := fmt.Sscanf(text, mountinfoFormat,
+ &p.ID, &p.Parent, &p.Major, &p.Minor,
+ &p.Root, &p.Mountpoint, &p.Opts, &optionalFields); err != nil {
+ return nil, fmt.Errorf("Scanning '%s' failed: %s", text, err)
+ }
+ // Safe as mountinfo encodes mountpoints with spaces as \040.
+ index := strings.Index(text, " - ")
+ postSeparatorFields := strings.Fields(text[index+3:])
+ if len(postSeparatorFields) < 3 {
+ return nil, fmt.Errorf("Error found less than 3 fields post '-' in %q", text)
+ }
+
+ if optionalFields != "-" {
+ p.Optional = optionalFields
+ }
+
+ p.Fstype = postSeparatorFields[0]
+ p.Source = postSeparatorFields[1]
+ p.VfsOpts = strings.Join(postSeparatorFields[2:], " ")
+ out = append(out, p)
+ }
+ return out, nil
+}
+
+func osSpecificMountOptions() []fuse.MountOption {
+ return []fuse.MountOption{}
+}
+
+func checkMountPointAvailable(dir string) bool {
+ mountPoint := dir
+ if mountPoint != "/" && strings.HasSuffix(mountPoint, "/") {
+ mountPoint = mountPoint[0 : len(mountPoint)-1]
+ }
+
+ if mounted, err := mounted(mountPoint); err != nil || mounted {
+ return false
+ }
+
+ return true
+}
diff --git a/weed/command/mount_notsupported.go b/weed/command/mount_notsupported.go
index 3bf22ddc4..f3c0de3d6 100644
--- a/weed/command/mount_notsupported.go
+++ b/weed/command/mount_notsupported.go
@@ -1,5 +1,6 @@
// +build !linux
// +build !darwin
+// +build !freebsd
package command
diff --git a/weed/command/mount_std.go b/weed/command/mount_std.go
index 2937b9ef1..915754166 100644
--- a/weed/command/mount_std.go
+++ b/weed/command/mount_std.go
@@ -1,11 +1,13 @@
-// +build linux darwin
+// +build linux darwin freebsd
package command
import (
+ "context"
"fmt"
"os"
"os/user"
+ "path"
"runtime"
"strconv"
"strings"
@@ -13,104 +15,185 @@ import (
"github.com/chrislusf/seaweedfs/weed/filesys"
"github.com/chrislusf/seaweedfs/weed/glog"
+ "github.com/chrislusf/seaweedfs/weed/pb"
+ "github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
+ "github.com/chrislusf/seaweedfs/weed/security"
"github.com/chrislusf/seaweedfs/weed/util"
+ "github.com/chrislusf/seaweedfs/weed/util/grace"
"github.com/seaweedfs/fuse"
"github.com/seaweedfs/fuse/fs"
)
func runMount(cmd *Command, args []string) bool {
- fmt.Printf("This is SeaweedFS version %s %s %s\n", util.VERSION, runtime.GOOS, runtime.GOARCH)
- if *mountOptions.dir == "" {
+
+ grace.SetupProfiling(*mountCpuProfile, *mountMemProfile)
+
+ umask, umaskErr := strconv.ParseUint(*mountOptions.umaskString, 8, 64)
+ if umaskErr != nil {
+ fmt.Printf("can not parse umask %s", *mountOptions.umaskString)
+ return false
+ }
+
+ if len(args) > 0 {
+ return false
+ }
+
+ return RunMount(&mountOptions, os.FileMode(umask))
+}
+
+func RunMount(option *MountOptions, umask os.FileMode) bool {
+
+ filer := *option.filer
+ // parse filer grpc address
+ filerGrpcAddress, err := pb.ParseFilerGrpcAddress(filer)
+ if err != nil {
+ glog.V(0).Infof("ParseFilerGrpcAddress: %v", err)
+ return true
+ }
+
+ // try to connect to filer, filerBucketsPath may be useful later
+ grpcDialOption := security.LoadClientTLS(util.GetViper(), "grpc.client")
+ var cipher bool
+ err = pb.WithGrpcFilerClient(filerGrpcAddress, grpcDialOption, func(client filer_pb.SeaweedFilerClient) error {
+ resp, err := client.GetFilerConfiguration(context.Background(), &filer_pb.GetFilerConfigurationRequest{})
+ if err != nil {
+ return fmt.Errorf("get filer grpc address %s configuration: %v", filerGrpcAddress, err)
+ }
+ cipher = resp.Cipher
+ return nil
+ })
+ if err != nil {
+ glog.Infof("failed to talk to filer %s: %v", filerGrpcAddress, err)
+ return true
+ }
+
+ filerMountRootPath := *option.filerMountRootPath
+ dir := *option.dir
+ chunkSizeLimitMB := *mountOptions.chunkSizeLimitMB
+
+ util.LoadConfiguration("security", false)
+
+ fmt.Printf("This is SeaweedFS version %s %s %s\n", util.Version(), runtime.GOOS, runtime.GOARCH)
+ if dir == "" {
fmt.Printf("Please specify the mount directory via \"-dir\"")
return false
}
- if *mountOptions.chunkSizeLimitMB <= 0 {
+ if chunkSizeLimitMB <= 0 {
fmt.Printf("Please specify a reasonable buffer size.")
return false
}
- fuse.Unmount(*mountOptions.dir)
+ fuse.Unmount(dir)
+
+ uid, gid := uint32(0), uint32(0)
// detect mount folder mode
+ if *option.dirAutoCreate {
+ os.MkdirAll(dir, 0755)
+ }
mountMode := os.ModeDir | 0755
- if fileInfo, err := os.Stat(*mountOptions.dir); err == nil {
+ fileInfo, err := os.Stat(dir)
+ if err == nil {
mountMode = os.ModeDir | fileInfo.Mode()
+ uid, gid = util.GetFileUidGid(fileInfo)
+ fmt.Printf("mount point owner uid=%d gid=%d mode=%s\n", uid, gid, fileInfo.Mode())
}
- // detect current user
- uid, gid := uint32(0), uint32(0)
- if u, err := user.Current(); err == nil {
- if parsedId, pe := strconv.ParseUint(u.Uid, 10, 32); pe == nil {
- uid = uint32(parsedId)
- }
- if parsedId, pe := strconv.ParseUint(u.Gid, 10, 32); pe == nil {
- gid = uint32(parsedId)
+ if uid == 0 {
+ if u, err := user.Current(); err == nil {
+ if parsedId, pe := strconv.ParseUint(u.Uid, 10, 32); pe == nil {
+ uid = uint32(parsedId)
+ }
+ if parsedId, pe := strconv.ParseUint(u.Gid, 10, 32); pe == nil {
+ gid = uint32(parsedId)
+ }
+ fmt.Printf("current uid=%d gid=%d\n", uid, gid)
}
}
- util.SetupProfiling(*mountCpuProfile, *mountMemProfile)
+ // Ensure target mount point availability
+ if isValid := checkMountPointAvailable(dir); !isValid {
+ glog.Fatalf("Expected mount to still be active, target mount point: %s, please check!", dir)
+ return true
+ }
- c, err := fuse.Mount(
- *mountOptions.dir,
- fuse.VolumeName("SeaweedFS"),
- fuse.FSName("SeaweedFS"),
- fuse.Subtype("SeaweedFS"),
- fuse.NoAppleDouble(),
+ mountName := path.Base(dir)
+
+ options := []fuse.MountOption{
+ fuse.VolumeName(mountName),
+ fuse.FSName(filer + ":" + filerMountRootPath),
+ fuse.Subtype("seaweedfs"),
+ // fuse.NoAppleDouble(), // include .DS_Store, otherwise can not delete non-empty folders
fuse.NoAppleXattr(),
fuse.NoBrowse(),
fuse.AutoXattr(),
fuse.ExclCreate(),
fuse.DaemonTimeout("3600"),
- fuse.AllowOther(),
fuse.AllowSUID(),
fuse.DefaultPermissions(),
- fuse.MaxReadahead(1024*128),
+ fuse.MaxReadahead(1024 * 128),
fuse.AsyncRead(),
fuse.WritebackCache(),
- )
- if err != nil {
- glog.Fatal(err)
- return false
}
- util.OnInterrupt(func() {
- fuse.Unmount(*mountOptions.dir)
- c.Close()
- })
-
- filerGrpcAddress, err := parseFilerGrpcAddress(*mountOptions.filer, *mountOptions.filerGrpcPort)
- if err != nil {
- glog.Fatal(err)
- return false
+ options = append(options, osSpecificMountOptions()...)
+ if *option.allowOthers {
+ options = append(options, fuse.AllowOther())
+ }
+ if *option.nonempty {
+ options = append(options, fuse.AllowNonEmptyMount())
}
- mountRoot := *mountOptions.filerMountRootPath
+ // find mount point
+ mountRoot := filerMountRootPath
if mountRoot != "/" && strings.HasSuffix(mountRoot, "/") {
mountRoot = mountRoot[0 : len(mountRoot)-1]
}
- err = fs.Serve(c, filesys.NewSeaweedFileSystem(&filesys.Option{
- FilerGrpcAddress: filerGrpcAddress,
- FilerMountRootPath: mountRoot,
- Collection: *mountOptions.collection,
- Replication: *mountOptions.replication,
- TtlSec: int32(*mountOptions.ttlSec),
- ChunkSizeLimit: int64(*mountOptions.chunkSizeLimitMB) * 1024 * 1024,
- DataCenter: *mountOptions.dataCenter,
- DirListingLimit: *mountOptions.dirListingLimit,
- EntryCacheTtl: 3 * time.Second,
- MountUid: uid,
- MountGid: gid,
- MountMode: mountMode,
- }))
+ seaweedFileSystem := filesys.NewSeaweedFileSystem(&filesys.Option{
+ FilerGrpcAddress: filerGrpcAddress,
+ GrpcDialOption: grpcDialOption,
+ FilerMountRootPath: mountRoot,
+ Collection: *option.collection,
+ Replication: *option.replication,
+ TtlSec: int32(*option.ttlSec),
+ ChunkSizeLimit: int64(chunkSizeLimitMB) * 1024 * 1024,
+ CacheDir: *option.cacheDir,
+ CacheSizeMB: *option.cacheSizeMB,
+ DataCenter: *option.dataCenter,
+ DirListCacheLimit: *option.dirListCacheLimit,
+ EntryCacheTtl: 3 * time.Second,
+ MountUid: uid,
+ MountGid: gid,
+ MountMode: mountMode,
+ MountCtime: fileInfo.ModTime(),
+ MountMtime: time.Now(),
+ Umask: umask,
+ OutsideContainerClusterMode: *mountOptions.outsideContainerClusterMode,
+ Cipher: cipher,
+ })
+
+ // mount
+ c, err := fuse.Mount(dir, options...)
if err != nil {
- fuse.Unmount(*mountOptions.dir)
+ glog.V(0).Infof("mount: %v", err)
+ return true
}
+ defer fuse.Unmount(dir)
+
+ grace.OnInterrupt(func() {
+ fuse.Unmount(dir)
+ c.Close()
+ })
+
+ glog.V(0).Infof("mounted %s%s to %s", filer, mountRoot, dir)
+ err = fs.Serve(c, seaweedFileSystem)
// check if the mount process has an error to report
<-c.Ready
if err := c.MountError; err != nil {
- glog.Fatal(err)
+ glog.V(0).Infof("mount process: %v", err)
+ return true
}
return true
diff --git a/weed/command/msg_broker.go b/weed/command/msg_broker.go
new file mode 100644
index 000000000..b4b5855ff
--- /dev/null
+++ b/weed/command/msg_broker.go
@@ -0,0 +1,114 @@
+package command
+
+import (
+ "context"
+ "fmt"
+ "strconv"
+ "time"
+
+ "google.golang.org/grpc/reflection"
+
+ "github.com/chrislusf/seaweedfs/weed/util/grace"
+
+ "github.com/chrislusf/seaweedfs/weed/glog"
+ "github.com/chrislusf/seaweedfs/weed/messaging/broker"
+ "github.com/chrislusf/seaweedfs/weed/pb"
+ "github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
+ "github.com/chrislusf/seaweedfs/weed/pb/messaging_pb"
+ "github.com/chrislusf/seaweedfs/weed/security"
+ "github.com/chrislusf/seaweedfs/weed/util"
+)
+
+var (
+ messageBrokerStandaloneOptions MessageBrokerOptions
+)
+
+type MessageBrokerOptions struct {
+ filer *string
+ ip *string
+ port *int
+ cpuprofile *string
+ memprofile *string
+}
+
+func init() {
+ cmdMsgBroker.Run = runMsgBroker // break init cycle
+ messageBrokerStandaloneOptions.filer = cmdMsgBroker.Flag.String("filer", "localhost:8888", "filer server address")
+ messageBrokerStandaloneOptions.ip = cmdMsgBroker.Flag.String("ip", util.DetectedHostAddress(), "broker host address")
+ messageBrokerStandaloneOptions.port = cmdMsgBroker.Flag.Int("port", 17777, "broker gRPC listen port")
+ messageBrokerStandaloneOptions.cpuprofile = cmdMsgBroker.Flag.String("cpuprofile", "", "cpu profile output file")
+ messageBrokerStandaloneOptions.memprofile = cmdMsgBroker.Flag.String("memprofile", "", "memory profile output file")
+}
+
+var cmdMsgBroker = &Command{
+ UsageLine: "msgBroker [-port=17777] [-filer=]",
+ Short: "start a message queue broker",
+ Long: `start a message queue broker
+
+ The broker can accept gRPC calls to write or read messages. The messages are stored via filer.
+ The brokers are stateless. To scale up, just add more brokers.
+
+`,
+}
+
+func runMsgBroker(cmd *Command, args []string) bool {
+
+ util.LoadConfiguration("security", false)
+
+ return messageBrokerStandaloneOptions.startQueueServer()
+
+}
+
+func (msgBrokerOpt *MessageBrokerOptions) startQueueServer() bool {
+
+ grace.SetupProfiling(*messageBrokerStandaloneOptions.cpuprofile, *messageBrokerStandaloneOptions.memprofile)
+
+ filerGrpcAddress, err := pb.ParseFilerGrpcAddress(*msgBrokerOpt.filer)
+ if err != nil {
+ glog.Fatal(err)
+ return false
+ }
+
+ grpcDialOption := security.LoadClientTLS(util.GetViper(), "grpc.msg_broker")
+ cipher := false
+
+ for {
+ err = pb.WithGrpcFilerClient(filerGrpcAddress, grpcDialOption, func(client filer_pb.SeaweedFilerClient) error {
+ resp, err := client.GetFilerConfiguration(context.Background(), &filer_pb.GetFilerConfigurationRequest{})
+ if err != nil {
+ return fmt.Errorf("get filer %s configuration: %v", filerGrpcAddress, err)
+ }
+ cipher = resp.Cipher
+ return nil
+ })
+ if err != nil {
+ glog.V(0).Infof("wait to connect to filer %s grpc address %s", *msgBrokerOpt.filer, filerGrpcAddress)
+ time.Sleep(time.Second)
+ } else {
+ glog.V(0).Infof("connected to filer %s grpc address %s", *msgBrokerOpt.filer, filerGrpcAddress)
+ break
+ }
+ }
+
+ qs, err := broker.NewMessageBroker(&broker.MessageBrokerOption{
+ Filers: []string{*msgBrokerOpt.filer},
+ DefaultReplication: "",
+ MaxMB: 0,
+ Ip: *msgBrokerOpt.ip,
+ Port: *msgBrokerOpt.port,
+ Cipher: cipher,
+ }, grpcDialOption)
+
+ // start grpc listener
+ grpcL, err := util.NewListener(":"+strconv.Itoa(*msgBrokerOpt.port), 0)
+ if err != nil {
+ glog.Fatalf("failed to listen on grpc port %d: %v", *msgBrokerOpt.port, err)
+ }
+ grpcS := pb.NewGrpcServer(security.LoadServerTLS(util.GetViper(), "grpc.msg_broker"))
+ messaging_pb.RegisterSeaweedMessagingServer(grpcS, qs)
+ reflection.Register(grpcS)
+ grpcS.Serve(grpcL)
+
+ return true
+
+}
diff --git a/weed/command/s3.go b/weed/command/s3.go
index 16a9490ff..7ebd4fab0 100644
--- a/weed/command/s3.go
+++ b/weed/command/s3.go
@@ -1,64 +1,161 @@
package command
import (
+ "context"
+ "fmt"
"net/http"
"time"
- "fmt"
+ "github.com/chrislusf/seaweedfs/weed/pb"
+ "github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
+ "github.com/chrislusf/seaweedfs/weed/security"
+
+ "github.com/gorilla/mux"
+
"github.com/chrislusf/seaweedfs/weed/glog"
"github.com/chrislusf/seaweedfs/weed/s3api"
"github.com/chrislusf/seaweedfs/weed/util"
- "github.com/gorilla/mux"
)
var (
- s3options S3Options
+ s3StandaloneOptions S3Options
)
type S3Options struct {
- filer *string
- filerGrpcPort *int
- filerBucketsPath *string
- port *int
- domainName *string
- tlsPrivateKey *string
- tlsCertificate *string
+ filer *string
+ port *int
+ config *string
+ domainName *string
+ tlsPrivateKey *string
+ tlsCertificate *string
}
func init() {
cmdS3.Run = runS3 // break init cycle
- s3options.filer = cmdS3.Flag.String("filer", "localhost:8888", "filer server address")
- s3options.filerGrpcPort = cmdS3.Flag.Int("filer.grpcPort", 0, "filer server grpc port, default to filer http port plus 10000")
- s3options.filerBucketsPath = cmdS3.Flag.String("filer.dir.buckets", "/buckets", "folder on filer to store all buckets")
- s3options.port = cmdS3.Flag.Int("port", 8333, "s3options server http listen port")
- s3options.domainName = cmdS3.Flag.String("domainName", "", "suffix of the host name, {bucket}.{domainName}")
- s3options.tlsPrivateKey = cmdS3.Flag.String("key.file", "", "path to the TLS private key file")
- s3options.tlsCertificate = cmdS3.Flag.String("cert.file", "", "path to the TLS certificate file")
+ s3StandaloneOptions.filer = cmdS3.Flag.String("filer", "localhost:8888", "filer server address")
+ s3StandaloneOptions.port = cmdS3.Flag.Int("port", 8333, "s3 server http listen port")
+ s3StandaloneOptions.domainName = cmdS3.Flag.String("domainName", "", "suffix of the host name, {bucket}.{domainName}")
+ s3StandaloneOptions.config = cmdS3.Flag.String("config", "", "path to the config file")
+ s3StandaloneOptions.tlsPrivateKey = cmdS3.Flag.String("key.file", "", "path to the TLS private key file")
+ s3StandaloneOptions.tlsCertificate = cmdS3.Flag.String("cert.file", "", "path to the TLS certificate file")
}
var cmdS3 = &Command{
- UsageLine: "s3 -port=8333 -filer=",
+ UsageLine: "s3 [-port=8333] [-filer=] [-config=]",
Short: "start a s3 API compatible server that is backed by a filer",
Long: `start a s3 API compatible server that is backed by a filer.
+ By default, you can use any access key and secret key to access the S3 APIs.
+ To enable credential based access, create a config.json file similar to this:
+
+{
+ "identities": [
+ {
+ "name": "some_name",
+ "credentials": [
+ {
+ "accessKey": "some_access_key1",
+ "secretKey": "some_secret_key1"
+ }
+ ],
+ "actions": [
+ "Admin",
+ "Read",
+ "Write"
+ ]
+ },
+ {
+ "name": "some_read_only_user",
+ "credentials": [
+ {
+ "accessKey": "some_access_key2",
+ "secretKey": "some_secret_key2"
+ }
+ ],
+ "actions": [
+ "Read"
+ ]
+ },
+ {
+ "name": "some_normal_user",
+ "credentials": [
+ {
+ "accessKey": "some_access_key3",
+ "secretKey": "some_secret_key3"
+ }
+ ],
+ "actions": [
+ "Read",
+ "Write"
+ ]
+ },
+ {
+ "name": "user_limited_to_bucket1",
+ "credentials": [
+ {
+ "accessKey": "some_access_key4",
+ "secretKey": "some_secret_key4"
+ }
+ ],
+ "actions": [
+ "Read:bucket1",
+ "Write:bucket1"
+ ]
+ }
+ ]
+}
+
`,
}
func runS3(cmd *Command, args []string) bool {
- filerGrpcAddress, err := parseFilerGrpcAddress(*s3options.filer, *s3options.filerGrpcPort)
+ util.LoadConfiguration("security", false)
+
+ return s3StandaloneOptions.startS3Server()
+
+}
+
+func (s3opt *S3Options) startS3Server() bool {
+
+ filerGrpcAddress, err := pb.ParseFilerGrpcAddress(*s3opt.filer)
if err != nil {
glog.Fatal(err)
return false
}
+ filerBucketsPath := "/buckets"
+
+ grpcDialOption := security.LoadClientTLS(util.GetViper(), "grpc.client")
+
+ for {
+ err = pb.WithGrpcFilerClient(filerGrpcAddress, grpcDialOption, func(client filer_pb.SeaweedFilerClient) error {
+ resp, err := client.GetFilerConfiguration(context.Background(), &filer_pb.GetFilerConfigurationRequest{})
+ if err != nil {
+ return fmt.Errorf("get filer %s configuration: %v", filerGrpcAddress, err)
+ }
+ filerBucketsPath = resp.DirBuckets
+ glog.V(0).Infof("S3 read filer buckets dir: %s", filerBucketsPath)
+ return nil
+ })
+ if err != nil {
+ glog.V(0).Infof("wait to connect to filer %s grpc address %s", *s3opt.filer, filerGrpcAddress)
+ time.Sleep(time.Second)
+ } else {
+ glog.V(0).Infof("connected to filer %s grpc address %s", *s3opt.filer, filerGrpcAddress)
+ break
+ }
+ }
+
router := mux.NewRouter().SkipClean(true)
_, s3ApiServer_err := s3api.NewS3ApiServer(router, &s3api.S3ApiServerOption{
- Filer: *s3options.filer,
+ Filer: *s3opt.filer,
FilerGrpcAddress: filerGrpcAddress,
- DomainName: *s3options.domainName,
- BucketsPath: *s3options.filerBucketsPath,
+ Config: *s3opt.config,
+ DomainName: *s3opt.domainName,
+ BucketsPath: filerBucketsPath,
+ GrpcDialOption: grpcDialOption,
})
if s3ApiServer_err != nil {
glog.Fatalf("S3 API Server startup error: %v", s3ApiServer_err)
@@ -66,22 +163,22 @@ func runS3(cmd *Command, args []string) bool {
httpS := &http.Server{Handler: router}
- listenAddress := fmt.Sprintf(":%d", *s3options.port)
+ listenAddress := fmt.Sprintf(":%d", *s3opt.port)
s3ApiListener, err := util.NewListener(listenAddress, time.Duration(10)*time.Second)
if err != nil {
glog.Fatalf("S3 API Server listener on %s error: %v", listenAddress, err)
}
- if *s3options.tlsPrivateKey != "" {
- if err = httpS.ServeTLS(s3ApiListener, *s3options.tlsCertificate, *s3options.tlsPrivateKey); err != nil {
+ if *s3opt.tlsPrivateKey != "" {
+ glog.V(0).Infof("Start Seaweed S3 API Server %s at https port %d", util.Version(), *s3opt.port)
+ if err = httpS.ServeTLS(s3ApiListener, *s3opt.tlsCertificate, *s3opt.tlsPrivateKey); err != nil {
glog.Fatalf("S3 API Server Fail to serve: %v", err)
}
- glog.V(0).Infof("Start Seaweed S3 API Server %s at https port %d", util.VERSION, *s3options.port)
} else {
+ glog.V(0).Infof("Start Seaweed S3 API Server %s at http port %d", util.Version(), *s3opt.port)
if err = httpS.Serve(s3ApiListener); err != nil {
glog.Fatalf("S3 API Server Fail to serve: %v", err)
}
- glog.V(0).Infof("Start Seaweed S3 API Server %s at http port %d", util.VERSION, *s3options.port)
}
return true
diff --git a/weed/command/scaffold.go b/weed/command/scaffold.go
index ec0723859..b199f2d2d 100644
--- a/weed/command/scaffold.go
+++ b/weed/command/scaffold.go
@@ -10,16 +10,24 @@ func init() {
}
var cmdScaffold = &Command{
- UsageLine: "scaffold [filer]",
+ UsageLine: "scaffold -config=[filer|notification|replication|security|master]",
Short: "generate basic configuration files",
Long: `Generate filer.toml with all possible configurations for you to customize.
+ The options can also be overwritten by environment variables.
+ For example, the filer.toml mysql password can be overwritten by environment variable
+ export WEED_MYSQL_PASSWORD=some_password
+ Environment variable rules:
+ * Prefix the variable name with "WEED_"
+ * Upppercase the reset of variable name.
+ * Replace '.' with '_'
+
`,
}
var (
outputPath = cmdScaffold.Flag.String("output", "", "if not empty, save the configuration file to this directory")
- config = cmdScaffold.Flag.String("config", "filer", "[filer|notification|replication] the configuration file to generate")
+ config = cmdScaffold.Flag.String("config", "filer", "[filer|notification|replication|security|master] the configuration file to generate")
)
func runScaffold(cmd *Command, args []string) bool {
@@ -32,6 +40,10 @@ func runScaffold(cmd *Command, args []string) bool {
content = NOTIFICATION_TOML_EXAMPLE
case "replication":
content = REPLICATION_TOML_EXAMPLE
+ case "security":
+ content = SECURITY_TOML_EXAMPLE
+ case "master":
+ content = MASTER_TOML_EXAMPLE
}
if content == "" {
println("need a valid -config option")
@@ -55,27 +67,39 @@ const (
# $HOME/.seaweedfs/filer.toml
# /etc/seaweedfs/filer.toml
-[memory]
-# local in memory, mostly for testing purpose
-enabled = false
+####################################################
+# Customizable filer server options
+####################################################
+[filer.options]
+# with http DELETE, by default the filer would check whether a folder is empty.
+# recursive_delete will delete all sub folders and files, similar to "rm -Rf"
+recursive_delete = false
+# directories under this folder will be automatically creating a separate bucket
+buckets_folder = "/buckets"
+buckets_fsync = [ # a list of buckets with all write requests fsync=true
+ "important_bucket",
+ "should_always_fsync",
+]
+
+####################################################
+# The following are filer store options
+####################################################
-[leveldb]
+[leveldb2]
# local on disk, mostly for simple single-machine setup, fairly scalable
+# faster than previous leveldb, recommended.
enabled = true
dir = "." # directory to store level db files
-####################################################
-# multiple filers on shared storage, fairly scalable
-####################################################
-
-[mysql]
+[mysql] # or tidb
# CREATE TABLE IF NOT EXISTS filemeta (
-# dirhash BIGINT COMMENT 'first 64 bits of MD5 hash value of directory field',
-# name VARCHAR(1000) COMMENT 'directory or file name',
-# directory VARCHAR(4096) COMMENT 'full path to parent directory',
-# meta BLOB,
+# dirhash BIGINT COMMENT 'first 64 bits of MD5 hash value of directory field',
+# name VARCHAR(1000) COMMENT 'directory or file name',
+# directory TEXT COMMENT 'full path to parent directory',
+# meta LONGBLOB,
# PRIMARY KEY (dirhash, name)
# ) DEFAULT CHARSET=utf8;
+
enabled = false
hostname = "localhost"
port = 3306
@@ -84,12 +108,13 @@ password = ""
database = "" # create or use an existing database
connection_max_idle = 2
connection_max_open = 100
+interpolateParams = false
-[postgres]
+[postgres] # or cockroachdb
# CREATE TABLE IF NOT EXISTS filemeta (
# dirhash BIGINT,
-# name VARCHAR(1000),
-# directory VARCHAR(4096),
+# name VARCHAR(65535),
+# directory VARCHAR(65535),
# meta bytea,
# PRIMARY KEY (dirhash, name)
# );
@@ -116,13 +141,13 @@ hosts=[
"localhost:9042",
]
-[redis]
+[redis2]
enabled = false
address = "localhost:6379"
password = ""
-db = 0
+database = 0
-[redis_cluster]
+[redis_cluster2]
enabled = false
addresses = [
"localhost:30001",
@@ -132,7 +157,22 @@ addresses = [
"localhost:30005",
"localhost:30006",
]
+password = ""
+# allows reads from slave servers or the master, but all writes still go to the master
+readOnly = true
+# automatically use the closest Redis server for reads
+routeByLatency = true
+
+[etcd]
+enabled = false
+servers = "localhost:2379"
+timeout = "3s"
+[mongodb]
+enabled = false
+uri = "mongodb://localhost:27017"
+option_pool_size = 0
+database = "seaweedfs"
`
NOTIFICATION_TOML_EXAMPLE = `
@@ -178,6 +218,17 @@ google_application_credentials = "/path/to/x.json" # path to json credential fil
project_id = "" # an existing project id
topic = "seaweedfs_filer_topic" # a topic, auto created if does not exists
+[notification.gocdk_pub_sub]
+# The Go Cloud Development Kit (https://gocloud.dev).
+# PubSub API (https://godoc.org/gocloud.dev/pubsub).
+# Supports AWS SNS/SQS, Azure Service Bus, Google PubSub, NATS and RabbitMQ.
+enabled = false
+# This URL will Dial the RabbitMQ server at the URL in the environment
+# variable RABBIT_SERVER_URL and open the exchange "myexchange".
+# The exchange must have already been created by some other means, like
+# the RabbitMQ management plugin.
+topic_url = "rabbit://myexchange"
+sub_url = "rabbit://myqueue"
`
REPLICATION_TOML_EXAMPLE = `
@@ -194,28 +245,29 @@ grpcAddress = "localhost:18888"
# all files under this directory tree are replicated.
# this is not a directory on your hard drive, but on your filer.
# i.e., all files with this "prefix" are sent to notification message queue.
-directory = "/buckets"
+directory = "/buckets"
[sink.filer]
enabled = false
grpcAddress = "localhost:18888"
# all replicated files are under this directory tree
-# this is not a directory on your hard drive, but on your filer.
+# this is not a directory on your hard drive, but on your filer.
# i.e., all received files will be "prefixed" to this directory.
-directory = "/backup"
+directory = "/backup"
replication = ""
collection = ""
ttlSec = 0
[sink.s3]
# read credentials doc at https://docs.aws.amazon.com/sdk-for-go/v1/developer-guide/sessions.html
-# default loads credentials from the shared credentials file (~/.aws/credentials).
+# default loads credentials from the shared credentials file (~/.aws/credentials).
enabled = false
aws_access_key_id = "" # if empty, loads from the shared credentials file (~/.aws/credentials).
aws_secret_access_key = "" # if empty, loads from the shared credentials file (~/.aws/credentials).
region = "us-east-2"
bucket = "your_bucket_name" # an existing bucket
directory = "/" # destination directory
+endpoint = ""
[sink.google_cloud_storage]
# read credentials doc at https://cloud.google.com/docs/authentication/getting-started
@@ -239,5 +291,128 @@ b2_master_application_key = ""
bucket = "mybucket" # an existing bucket
directory = "/" # destination directory
+`
+
+ SECURITY_TOML_EXAMPLE = `
+# Put this file to one of the location, with descending priority
+# ./security.toml
+# $HOME/.seaweedfs/security.toml
+# /etc/seaweedfs/security.toml
+# this file is read by master, volume server, and filer
+
+# the jwt signing key is read by master and volume server.
+# a jwt defaults to expire after 10 seconds.
+[jwt.signing]
+key = ""
+expires_after_seconds = 10 # seconds
+
+# jwt for read is only supported with master+volume setup. Filer does not support this mode.
+[jwt.signing.read]
+key = ""
+expires_after_seconds = 10 # seconds
+
+# all grpc tls authentications are mutual
+# the values for the following ca, cert, and key are paths to the PERM files.
+# the host name is not checked, so the PERM files can be shared.
+[grpc]
+ca = ""
+
+[grpc.volume]
+cert = ""
+key = ""
+
+[grpc.master]
+cert = ""
+key = ""
+
+[grpc.filer]
+cert = ""
+key = ""
+
+[grpc.msg_broker]
+cert = ""
+key = ""
+
+# use this for any place needs a grpc client
+# i.e., "weed backup|benchmark|filer.copy|filer.replicate|mount|s3|upload"
+[grpc.client]
+cert = ""
+key = ""
+
+
+# volume server https options
+# Note: work in progress!
+# this does not work with other clients, e.g., "weed filer|mount" etc, yet.
+[https.client]
+enabled = true
+[https.volume]
+cert = ""
+key = ""
+
+
+`
+
+ MASTER_TOML_EXAMPLE = `
+# Put this file to one of the location, with descending priority
+# ./master.toml
+# $HOME/.seaweedfs/master.toml
+# /etc/seaweedfs/master.toml
+# this file is read by master
+
+[master.maintenance]
+# periodically run these scripts are the same as running them from 'weed shell'
+scripts = """
+ lock
+ ec.encode -fullPercent=95 -quietFor=1h
+ ec.rebuild -force
+ ec.balance -force
+ volume.balance -force
+ volume.fix.replication
+ unlock
+"""
+sleep_minutes = 17 # sleep minutes between each script execution
+
+[master.filer]
+default = "localhost:8888" # used by maintenance scripts if the scripts needs to use fs related commands
+
+
+[master.sequencer]
+type = "memory" # Choose [memory|etcd] type for storing the file id sequence
+# when sequencer.type = etcd, set listen client urls of etcd cluster that store file id sequence
+# example : http://127.0.0.1:2379,http://127.0.0.1:2389
+sequencer_etcd_urls = "http://127.0.0.1:2379"
+
+
+# configurations for tiered cloud storage
+# old volumes are transparently moved to cloud for cost efficiency
+[storage.backend]
+ [storage.backend.s3.default]
+ enabled = false
+ aws_access_key_id = "" # if empty, loads from the shared credentials file (~/.aws/credentials).
+ aws_secret_access_key = "" # if empty, loads from the shared credentials file (~/.aws/credentials).
+ region = "us-east-2"
+ bucket = "your_bucket_name" # an existing bucket
+ endpoint = ""
+
+# create this number of logical volumes if no more writable volumes
+# count_x means how many copies of data.
+# e.g.:
+# 000 has only one copy, copy_1
+# 010 and 001 has two copies, copy_2
+# 011 has only 3 copies, copy_3
+[master.volume_growth]
+copy_1 = 7 # create 1 x 7 = 7 actual volumes
+copy_2 = 6 # create 2 x 6 = 12 actual volumes
+copy_3 = 3 # create 3 x 3 = 9 actual volumes
+copy_other = 1 # create n x 1 = n actual volumes
+
+# configuration flags for replication
+[master.replication]
+# any replication counts should be considered minimums. If you specify 010 and
+# have 3 different racks, that's still considered writable. Writes will still
+# try to replicate to all available volumes. You should only use this option
+# if you are doing your own replication or periodic sync of volumes.
+treat_replication_as_minimums = false
+
`
)
diff --git a/weed/command/scaffold_test.go b/weed/command/scaffold_test.go
new file mode 100644
index 000000000..423dacc32
--- /dev/null
+++ b/weed/command/scaffold_test.go
@@ -0,0 +1,44 @@
+package command
+
+import (
+ "bytes"
+ "fmt"
+ "testing"
+
+ "github.com/spf13/viper"
+)
+
+func TestReadingTomlConfiguration(t *testing.T) {
+
+ viper.SetConfigType("toml")
+
+ // any approach to require this configuration into your program.
+ var tomlExample = []byte(`
+[database]
+server = "192.168.1.1"
+ports = [ 8001, 8001, 8002 ]
+connection_max = 5000
+enabled = true
+
+[servers]
+
+ # You can indent as you please. Tabs or spaces. TOML don't care.
+ [servers.alpha]
+ ip = "10.0.0.1"
+ dc = "eqdc10"
+
+ [servers.beta]
+ ip = "10.0.0.2"
+ dc = "eqdc10"
+
+`)
+
+ viper.ReadConfig(bytes.NewBuffer(tomlExample))
+
+ fmt.Printf("database is %v\n", viper.Get("database"))
+ fmt.Printf("servers is %v\n", viper.GetStringMap("servers"))
+
+ alpha := viper.Sub("servers.alpha")
+
+ fmt.Printf("alpha ip is %v\n", alpha.GetString("ip"))
+}
diff --git a/weed/command/server.go b/weed/command/server.go
index ba5305a97..443f041c5 100644
--- a/weed/command/server.go
+++ b/weed/command/server.go
@@ -1,21 +1,15 @@
package command
import (
- "net/http"
+ "fmt"
"os"
"runtime"
"runtime/pprof"
- "strconv"
"strings"
- "sync"
"time"
"github.com/chrislusf/seaweedfs/weed/glog"
- "github.com/chrislusf/seaweedfs/weed/pb/master_pb"
- "github.com/chrislusf/seaweedfs/weed/server"
"github.com/chrislusf/seaweedfs/weed/util"
- "github.com/gorilla/mux"
- "google.golang.org/grpc/reflection"
)
type ServerOptions struct {
@@ -24,8 +18,11 @@ type ServerOptions struct {
}
var (
- serverOptions ServerOptions
- filerOptions FilerOptions
+ serverOptions ServerOptions
+ masterOptions MasterOptions
+ filerOptions FilerOptions
+ s3Options S3Options
+ msgBrokerOptions MessageBrokerOptions
)
func init() {
@@ -33,70 +30,90 @@ func init() {
}
var cmdServer = &Command{
- UsageLine: "server -port=8080 -dir=/tmp -volume.max=5 -ip=server_name",
- Short: "start a server, including volume server, and automatically elect a master server",
+ UsageLine: "server -dir=/tmp -volume.max=5 -ip=server_name",
+ Short: "start a master server, a volume server, and optionally a filer and a S3 gateway",
Long: `start both a volume server to provide storage spaces
and a master server to provide volume=>location mapping service and sequence number of file ids
This is provided as a convenient way to start both volume server and master server.
- The servers are exactly the same as starting them separately.
+ The servers acts exactly the same as starting them separately.
+ So other volume servers can connect to this master server also.
- So other volume servers can use this embedded master server also.
-
- Optionally, one filer server can be started. Logically, filer servers should not be in a cluster.
- They run with meta data on disk, not shared. So each filer server is different.
+ Optionally, a filer server can be started.
+ Also optionally, a S3 gateway can be started.
`,
}
var (
- serverIp = cmdServer.Flag.String("ip", "localhost", "ip or server name")
- serverBindIp = cmdServer.Flag.String("ip.bind", "0.0.0.0", "ip address to bind to")
- serverMaxCpu = cmdServer.Flag.Int("maxCpu", 0, "maximum number of CPUs. 0 means all available CPUs")
- serverTimeout = cmdServer.Flag.Int("idleTimeout", 30, "connection idle seconds")
- serverDataCenter = cmdServer.Flag.String("dataCenter", "", "current volume server's data center name")
- serverRack = cmdServer.Flag.String("rack", "", "current volume server's rack name")
- serverWhiteListOption = cmdServer.Flag.String("whiteList", "", "comma separated Ip addresses having write permission. No limit if empty.")
- serverPeers = cmdServer.Flag.String("master.peers", "", "all master nodes in comma separated ip:masterPort list")
- serverSecureKey = cmdServer.Flag.String("secure.secret", "", "secret to encrypt Json Web Token(JWT)")
- serverGarbageThreshold = cmdServer.Flag.Float64("garbageThreshold", 0.3, "threshold to vacuum and reclaim spaces")
- masterPort = cmdServer.Flag.Int("master.port", 9333, "master server http listen port")
- masterGrpcPort = cmdServer.Flag.Int("master.port.grpc", 0, "master grpc server listen port, default to http port + 10000")
- masterMetaFolder = cmdServer.Flag.String("master.dir", "", "data directory to store meta data, default to same as -dir specified")
- masterVolumeSizeLimitMB = cmdServer.Flag.Uint("master.volumeSizeLimitMB", 30*1000, "Master stops directing writes to oversized volumes.")
- masterVolumePreallocate = cmdServer.Flag.Bool("master.volumePreallocate", false, "Preallocate disk space for volumes.")
- masterDefaultReplicaPlacement = cmdServer.Flag.String("master.defaultReplicaPlacement", "000", "Default replication type if not specified.")
- volumeDataFolders = cmdServer.Flag.String("dir", os.TempDir(), "directories to store data files. dir[,dir]...")
- volumeMaxDataVolumeCounts = cmdServer.Flag.String("volume.max", "7", "maximum numbers of volumes, count[,count]...")
- pulseSeconds = cmdServer.Flag.Int("pulseSeconds", 5, "number of seconds between heartbeats")
- isStartingFiler = cmdServer.Flag.Bool("filer", false, "whether to start filer")
+ serverIp = cmdServer.Flag.String("ip", util.DetectedHostAddress(), "ip or server name")
+ serverBindIp = cmdServer.Flag.String("ip.bind", "0.0.0.0", "ip address to bind to")
+ serverTimeout = cmdServer.Flag.Int("idleTimeout", 30, "connection idle seconds")
+ serverDataCenter = cmdServer.Flag.String("dataCenter", "", "current volume server's data center name")
+ serverRack = cmdServer.Flag.String("rack", "", "current volume server's rack name")
+ serverWhiteListOption = cmdServer.Flag.String("whiteList", "", "comma separated Ip addresses having write permission. No limit if empty.")
+ serverDisableHttp = cmdServer.Flag.Bool("disableHttp", false, "disable http requests, only gRPC operations are allowed.")
+ volumeDataFolders = cmdServer.Flag.String("dir", os.TempDir(), "directories to store data files. dir[,dir]...")
+ volumeMaxDataVolumeCounts = cmdServer.Flag.String("volume.max", "8", "maximum numbers of volumes, count[,count]... If set to zero, the limit will be auto configured.")
+ volumeMinFreeSpacePercent = cmdServer.Flag.String("volume.minFreeSpacePercent", "1", "minimum free disk space (default to 1%). Low disk space will mark all volumes as ReadOnly.")
+
+ // pulseSeconds = cmdServer.Flag.Int("pulseSeconds", 5, "number of seconds between heartbeats")
+ isStartingFiler = cmdServer.Flag.Bool("filer", false, "whether to start filer")
+ isStartingS3 = cmdServer.Flag.Bool("s3", false, "whether to start S3 gateway")
+ isStartingMsgBroker = cmdServer.Flag.Bool("msgBroker", false, "whether to start message broker")
serverWhiteList []string
+
+ False = false
)
func init() {
serverOptions.cpuprofile = cmdServer.Flag.String("cpuprofile", "", "cpu profile output file")
+
+ masterOptions.port = cmdServer.Flag.Int("master.port", 9333, "master server http listen port")
+ masterOptions.metaFolder = cmdServer.Flag.String("master.dir", "", "data directory to store meta data, default to same as -dir specified")
+ masterOptions.peers = cmdServer.Flag.String("master.peers", "", "all master nodes in comma separated ip:masterPort list")
+ masterOptions.volumeSizeLimitMB = cmdServer.Flag.Uint("master.volumeSizeLimitMB", 30*1000, "Master stops directing writes to oversized volumes.")
+ masterOptions.volumePreallocate = cmdServer.Flag.Bool("master.volumePreallocate", false, "Preallocate disk space for volumes.")
+ masterOptions.defaultReplication = cmdServer.Flag.String("master.defaultReplication", "000", "Default replication type if not specified.")
+ masterOptions.garbageThreshold = cmdServer.Flag.Float64("garbageThreshold", 0.3, "threshold to vacuum and reclaim spaces")
+ masterOptions.metricsAddress = cmdServer.Flag.String("metrics.address", "", "Prometheus gateway address")
+ masterOptions.metricsIntervalSec = cmdServer.Flag.Int("metrics.intervalSeconds", 15, "Prometheus push interval in seconds")
+
filerOptions.collection = cmdServer.Flag.String("filer.collection", "", "all data will be stored in this collection")
filerOptions.port = cmdServer.Flag.Int("filer.port", 8888, "filer server http listen port")
- filerOptions.grpcPort = cmdServer.Flag.Int("filer.port.grpc", 0, "filer grpc server listen port, default to http port + 10000")
filerOptions.publicPort = cmdServer.Flag.Int("filer.port.public", 0, "filer server public http listen port")
filerOptions.defaultReplicaPlacement = cmdServer.Flag.String("filer.defaultReplicaPlacement", "", "Default replication type if not specified during runtime.")
- filerOptions.redirectOnRead = cmdServer.Flag.Bool("filer.redirectOnRead", false, "whether proxy or redirect to volume server during file GET request")
filerOptions.disableDirListing = cmdServer.Flag.Bool("filer.disableDirListing", false, "turn off directory listing")
filerOptions.maxMB = cmdServer.Flag.Int("filer.maxMB", 32, "split files larger than the limit")
filerOptions.dirListingLimit = cmdServer.Flag.Int("filer.dirListLimit", 1000, "limit sub dir listing size")
+ filerOptions.cipher = cmdServer.Flag.Bool("filer.encryptVolumeData", false, "encrypt data on volume servers")
+ filerOptions.peers = cmdServer.Flag.String("filer.peers", "", "all filers sharing the same filer store in comma separated ip:port list")
serverOptions.v.port = cmdServer.Flag.Int("volume.port", 8080, "volume server http listen port")
serverOptions.v.publicPort = cmdServer.Flag.Int("volume.port.public", 0, "volume server public port")
- serverOptions.v.indexType = cmdServer.Flag.String("volume.index", "memory", "Choose [memory|leveldb|boltdb|btree] mode for memory~performance balance.")
- serverOptions.v.fixJpgOrientation = cmdServer.Flag.Bool("volume.images.fix.orientation", false, "Adjust jpg orientation when uploading.")
+ serverOptions.v.indexType = cmdServer.Flag.String("volume.index", "memory", "Choose [memory|leveldb|leveldbMedium|leveldbLarge] mode for memory~performance balance.")
serverOptions.v.readRedirect = cmdServer.Flag.Bool("volume.read.redirect", true, "Redirect moved or non-local volumes.")
+ serverOptions.v.compactionMBPerSecond = cmdServer.Flag.Int("volume.compactionMBps", 0, "limit compaction speed in mega bytes per second")
+ serverOptions.v.fileSizeLimitMB = cmdServer.Flag.Int("volume.fileSizeLimitMB", 256, "limit file size to avoid out of memory")
serverOptions.v.publicUrl = cmdServer.Flag.String("volume.publicUrl", "", "publicly accessible address")
+ serverOptions.v.pprof = &False
+
+ s3Options.port = cmdServer.Flag.Int("s3.port", 8333, "s3 server http listen port")
+ s3Options.domainName = cmdServer.Flag.String("s3.domainName", "", "suffix of the host name, {bucket}.{domainName}")
+ s3Options.tlsPrivateKey = cmdServer.Flag.String("s3.key.file", "", "path to the TLS private key file")
+ s3Options.tlsCertificate = cmdServer.Flag.String("s3.cert.file", "", "path to the TLS certificate file")
+ s3Options.config = cmdServer.Flag.String("s3.config", "", "path to the config file")
+
+ msgBrokerOptions.port = cmdServer.Flag.Int("msgBroker.port", 17777, "broker gRPC listen port")
}
func runServer(cmd *Command, args []string) bool {
- filerOptions.secretKey = serverSecureKey
+
+ util.LoadConfiguration("security", false)
+ util.LoadConfiguration("master", false)
+
if *serverOptions.cpuprofile != "" {
f, err := os.Create(*serverOptions.cpuprofile)
if err != nil {
@@ -106,45 +123,62 @@ func runServer(cmd *Command, args []string) bool {
defer pprof.StopCPUProfile()
}
- if *filerOptions.redirectOnRead {
+ if *isStartingS3 {
+ *isStartingFiler = true
+ }
+ if *isStartingMsgBroker {
*isStartingFiler = true
}
- master := *serverIp + ":" + strconv.Itoa(*masterPort)
+ _, peerList := checkPeers(*serverIp, *masterOptions.port, *masterOptions.peers)
+ peers := strings.Join(peerList, ",")
+ masterOptions.peers = &peers
+
+ masterOptions.ip = serverIp
+ masterOptions.ipBind = serverBindIp
+ filerOptions.masters = &peers
filerOptions.ip = serverIp
+ filerOptions.bindIp = serverBindIp
serverOptions.v.ip = serverIp
serverOptions.v.bindIp = serverBindIp
- serverOptions.v.masters = &master
+ serverOptions.v.masters = &peers
serverOptions.v.idleConnectionTimeout = serverTimeout
- serverOptions.v.maxCpu = serverMaxCpu
serverOptions.v.dataCenter = serverDataCenter
serverOptions.v.rack = serverRack
- serverOptions.v.pulseSeconds = pulseSeconds
+ msgBrokerOptions.ip = serverIp
+
+ // serverOptions.v.pulseSeconds = pulseSeconds
+ // masterOptions.pulseSeconds = pulseSeconds
+
+ masterOptions.whiteList = serverWhiteListOption
filerOptions.dataCenter = serverDataCenter
+ filerOptions.disableHttp = serverDisableHttp
+ masterOptions.disableHttp = serverDisableHttp
+
+ filerAddress := fmt.Sprintf("%s:%d", *serverIp, *filerOptions.port)
+ s3Options.filer = &filerAddress
+ msgBrokerOptions.filer = &filerAddress
if *filerOptions.defaultReplicaPlacement == "" {
- *filerOptions.defaultReplicaPlacement = *masterDefaultReplicaPlacement
+ *filerOptions.defaultReplicaPlacement = *masterOptions.defaultReplication
}
- if *serverMaxCpu < 1 {
- *serverMaxCpu = runtime.NumCPU()
- }
- runtime.GOMAXPROCS(*serverMaxCpu)
+ runtime.GOMAXPROCS(runtime.NumCPU())
folders := strings.Split(*volumeDataFolders, ",")
- if *masterVolumeSizeLimitMB > 30*1000 {
+ if *masterOptions.volumeSizeLimitMB > util.VolumeSizeLimitGB*1000 {
glog.Fatalf("masterVolumeSizeLimitMB should be less than 30000")
}
- if *masterMetaFolder == "" {
- *masterMetaFolder = folders[0]
+ if *masterOptions.metaFolder == "" {
+ *masterOptions.metaFolder = folders[0]
}
- if err := util.TestFolderWritable(*masterMetaFolder); err != nil {
- glog.Fatalf("Check Meta Folder (-mdir=\"%s\") Writable: %s", *masterMetaFolder, err)
+ if err := util.TestFolderWritable(*masterOptions.metaFolder); err != nil {
+ glog.Fatalf("Check Meta Folder (-mdir=\"%s\") Writable: %s", *masterOptions.metaFolder, err)
}
- filerOptions.defaultLevelDbDirectory = masterMetaFolder
+ filerOptions.defaultLevelDbDirectory = masterOptions.metaFolder
if *serverWhiteListOption != "" {
serverWhiteList = strings.Split(*serverWhiteListOption, ",")
@@ -159,68 +193,29 @@ func runServer(cmd *Command, args []string) bool {
}()
}
- var raftWaitForMaster sync.WaitGroup
- var volumeWait sync.WaitGroup
-
- raftWaitForMaster.Add(1)
- volumeWait.Add(1)
-
- go func() {
- r := mux.NewRouter()
- ms := weed_server.NewMasterServer(r, *masterPort, *masterMetaFolder,
- *masterVolumeSizeLimitMB, *masterVolumePreallocate,
- *pulseSeconds, *masterDefaultReplicaPlacement, *serverGarbageThreshold,
- serverWhiteList, *serverSecureKey,
- )
+ if *isStartingS3 {
+ go func() {
+ time.Sleep(2 * time.Second)
- glog.V(0).Infof("Start Seaweed Master %s at %s:%d", util.VERSION, *serverIp, *masterPort)
- masterListener, e := util.NewListener(*serverBindIp+":"+strconv.Itoa(*masterPort), 0)
- if e != nil {
- glog.Fatalf("Master startup error: %v", e)
- }
+ s3Options.startS3Server()
- go func() {
- // starting grpc server
- grpcPort := *masterGrpcPort
- if grpcPort == 0 {
- grpcPort = *masterPort + 10000
- }
- grpcL, err := util.NewListener(*serverIp+":"+strconv.Itoa(grpcPort), 0)
- if err != nil {
- glog.Fatalf("master failed to listen on grpc port %d: %v", grpcPort, err)
- }
- // Create your protocol servers.
- grpcS := util.NewGrpcServer()
- master_pb.RegisterSeaweedServer(grpcS, ms)
- reflection.Register(grpcS)
-
- glog.V(0).Infof("Start Seaweed Master %s grpc server at %s:%d", util.VERSION, *serverIp, grpcPort)
- grpcS.Serve(grpcL)
}()
+ }
+ if *isStartingMsgBroker {
go func() {
- raftWaitForMaster.Wait()
- time.Sleep(100 * time.Millisecond)
- myAddress, peers := checkPeers(*serverIp, *masterPort, *serverPeers)
- raftServer := weed_server.NewRaftServer(r, peers, myAddress, *masterMetaFolder, ms.Topo, *pulseSeconds)
- ms.SetRaftServer(raftServer)
- volumeWait.Done()
+ time.Sleep(2 * time.Second)
+ msgBrokerOptions.startQueueServer()
}()
+ }
- raftWaitForMaster.Done()
-
- // start http server
- httpS := &http.Server{Handler: r}
- if err := httpS.Serve(masterListener); err != nil {
- glog.Fatalf("master server failed to serve: %v", err)
- }
-
- }()
+ // start volume server
+ {
+ go serverOptions.v.startVolumeServer(*volumeDataFolders, *volumeMaxDataVolumeCounts, *serverWhiteListOption, *volumeMinFreeSpacePercent)
- volumeWait.Wait()
- time.Sleep(100 * time.Millisecond)
+ }
- serverOptions.v.startVolumeServer(*volumeDataFolders, *volumeMaxDataVolumeCounts, *serverWhiteListOption)
+ startMaster(masterOptions, serverWhiteList)
return true
}
diff --git a/weed/command/shell.go b/weed/command/shell.go
index 19c5049c5..6dd768f47 100644
--- a/weed/command/shell.go
+++ b/weed/command/shell.go
@@ -1,61 +1,47 @@
package command
import (
- "bufio"
"fmt"
- "os"
- "github.com/chrislusf/seaweedfs/weed/glog"
+ "github.com/chrislusf/seaweedfs/weed/security"
+ "github.com/chrislusf/seaweedfs/weed/shell"
+ "github.com/chrislusf/seaweedfs/weed/util"
+)
+
+var (
+ shellOptions shell.ShellOptions
+ shellInitialFiler *string
)
func init() {
cmdShell.Run = runShell // break init cycle
+ shellOptions.Masters = cmdShell.Flag.String("master", "localhost:9333", "comma-separated master servers")
+ shellInitialFiler = cmdShell.Flag.String("filer", "localhost:8888", "filer host and port")
}
var cmdShell = &Command{
UsageLine: "shell",
- Short: "run interactive commands, now just echo",
- Long: `run interactive commands.
+ Short: "run interactive administrative commands",
+ Long: `run interactive administrative commands.
`,
}
-var ()
-
func runShell(command *Command, args []string) bool {
- r := bufio.NewReader(os.Stdin)
- o := bufio.NewWriter(os.Stdout)
- e := bufio.NewWriter(os.Stderr)
- prompt := func() {
- var err error
- if _, err = o.WriteString("> "); err != nil {
- glog.V(0).Infoln("error writing to stdout:", err)
- }
- if err = o.Flush(); err != nil {
- glog.V(0).Infoln("error flushing stdout:", err)
- }
- }
- readLine := func() string {
- ret, err := r.ReadString('\n')
- if err != nil {
- fmt.Fprint(e, err)
- os.Exit(1)
- }
- return ret
- }
- execCmd := func(cmd string) int {
- if cmd != "" {
- if _, err := o.WriteString(cmd); err != nil {
- glog.V(0).Infoln("error writing to stdout:", err)
- }
- }
- return 0
- }
- cmd := ""
- for {
- prompt()
- cmd = readLine()
- execCmd(cmd)
+ util.LoadConfiguration("security", false)
+ shellOptions.GrpcDialOption = security.LoadClientTLS(util.GetViper(), "grpc.client")
+
+ var err error
+ shellOptions.FilerHost, shellOptions.FilerPort, err = util.ParseHostPort(*shellInitialFiler)
+ if err != nil {
+ fmt.Printf("failed to parse filer %s: %v\n", *shellInitialFiler, err)
+ return false
}
+ shellOptions.Directory = "/"
+
+ shell.RunShell(shellOptions)
+
+ return true
+
}
diff --git a/weed/command/upload.go b/weed/command/upload.go
index f664c0e3a..358897aee 100644
--- a/weed/command/upload.go
+++ b/weed/command/upload.go
@@ -8,6 +8,7 @@ import (
"github.com/chrislusf/seaweedfs/weed/operation"
"github.com/chrislusf/seaweedfs/weed/security"
+ "github.com/chrislusf/seaweedfs/weed/util"
)
var (
@@ -15,15 +16,15 @@ var (
)
type UploadOptions struct {
- master *string
- dir *string
- include *string
- replication *string
- collection *string
- dataCenter *string
- ttl *string
- maxMB *int
- secretKey *string
+ master *string
+ dir *string
+ include *string
+ replication *string
+ collection *string
+ dataCenter *string
+ ttl *string
+ maxMB *int
+ usePublicUrl *bool
}
func init() {
@@ -36,8 +37,8 @@ func init() {
upload.collection = cmdUpload.Flag.String("collection", "", "optional collection name")
upload.dataCenter = cmdUpload.Flag.String("dataCenter", "", "optional data center name")
upload.ttl = cmdUpload.Flag.String("ttl", "", "time to live, e.g.: 1m, 1h, 1d, 1M, 1y")
- upload.maxMB = cmdUpload.Flag.Int("maxMB", 0, "split files larger than the limit")
- upload.secretKey = cmdUpload.Flag.String("secure.secret", "", "secret to encrypt Json Web Token(JWT)")
+ upload.maxMB = cmdUpload.Flag.Int("maxMB", 32, "split files larger than the limit")
+ upload.usePublicUrl = cmdUpload.Flag.Bool("usePublicUrl", false, "upload to public url from volume server")
}
var cmdUpload = &Command{
@@ -53,14 +54,17 @@ var cmdUpload = &Command{
All files under the folder and subfolders will be uploaded, each with its own file key.
Optional parameter "-include" allows you to specify the file name patterns.
- If "maxMB" is set to a positive number, files larger than it would be split into chunks and uploaded separatedly.
+ If "maxMB" is set to a positive number, files larger than it would be split into chunks and uploaded separately.
The list of file ids of those chunks would be stored in an additional chunk, and this additional chunk's file id would be returned.
`,
}
func runUpload(cmd *Command, args []string) bool {
- secret := security.Secret(*upload.secretKey)
+
+ util.LoadConfiguration("security", false)
+ grpcDialOption := security.LoadClientTLS(util.GetViper(), "grpc.client")
+
if len(args) == 0 {
if *upload.dir == "" {
return false
@@ -77,9 +81,7 @@ func runUpload(cmd *Command, args []string) bool {
if e != nil {
return e
}
- results, e := operation.SubmitFiles(*upload.master, parts,
- *upload.replication, *upload.collection, *upload.dataCenter,
- *upload.ttl, *upload.maxMB, secret)
+ results, e := operation.SubmitFiles(*upload.master, grpcDialOption, parts, *upload.replication, *upload.collection, *upload.dataCenter, *upload.ttl, *upload.maxMB, *upload.usePublicUrl)
bytes, _ := json.Marshal(results)
fmt.Println(string(bytes))
if e != nil {
@@ -96,9 +98,7 @@ func runUpload(cmd *Command, args []string) bool {
if e != nil {
fmt.Println(e.Error())
}
- results, _ := operation.SubmitFiles(*upload.master, parts,
- *upload.replication, *upload.collection, *upload.dataCenter,
- *upload.ttl, *upload.maxMB, secret)
+ results, _ := operation.SubmitFiles(*upload.master, grpcDialOption, parts, *upload.replication, *upload.collection, *upload.dataCenter, *upload.ttl, *upload.maxMB, *upload.usePublicUrl)
bytes, _ := json.Marshal(results)
fmt.Println(string(bytes))
}
diff --git a/weed/command/version.go b/weed/command/version.go
index 8fdd68ec8..9caf7dc4e 100644
--- a/weed/command/version.go
+++ b/weed/command/version.go
@@ -19,6 +19,6 @@ func runVersion(cmd *Command, args []string) bool {
cmd.Usage()
}
- fmt.Printf("version %s %s %s\n", util.VERSION, runtime.GOOS, runtime.GOARCH)
+ fmt.Printf("version %s %s %s\n", util.Version(), runtime.GOOS, runtime.GOARCH)
return true
}
diff --git a/weed/command/volume.go b/weed/command/volume.go
index 27a075b5b..0a7d52049 100644
--- a/weed/command/volume.go
+++ b/weed/command/volume.go
@@ -1,7 +1,9 @@
package command
import (
+ "fmt"
"net/http"
+ httppprof "net/http/pprof"
"os"
"runtime"
"runtime/pprof"
@@ -9,12 +11,22 @@ import (
"strings"
"time"
+ "github.com/spf13/viper"
+ "google.golang.org/grpc"
+
+ "github.com/chrislusf/seaweedfs/weed/util/grace"
+
+ "github.com/chrislusf/seaweedfs/weed/pb"
+ "github.com/chrislusf/seaweedfs/weed/security"
+ "github.com/chrislusf/seaweedfs/weed/util/httpdown"
+
+ "google.golang.org/grpc/reflection"
+
"github.com/chrislusf/seaweedfs/weed/glog"
"github.com/chrislusf/seaweedfs/weed/pb/volume_server_pb"
"github.com/chrislusf/seaweedfs/weed/server"
"github.com/chrislusf/seaweedfs/weed/storage"
"github.com/chrislusf/seaweedfs/weed/util"
- "google.golang.org/grpc/reflection"
)
var (
@@ -30,37 +42,40 @@ type VolumeServerOptions struct {
publicUrl *string
bindIp *string
masters *string
- pulseSeconds *int
idleConnectionTimeout *int
- maxCpu *int
dataCenter *string
rack *string
whiteList []string
indexType *string
- fixJpgOrientation *bool
readRedirect *bool
cpuProfile *string
memProfile *string
+ compactionMBPerSecond *int
+ fileSizeLimitMB *int
+ minFreeSpacePercents []float32
+ pprof *bool
+ // pulseSeconds *int
}
func init() {
cmdVolume.Run = runVolume // break init cycle
v.port = cmdVolume.Flag.Int("port", 8080, "http listen port")
v.publicPort = cmdVolume.Flag.Int("port.public", 0, "port opened to public")
- v.ip = cmdVolume.Flag.String("ip", "", "ip or server name")
+ v.ip = cmdVolume.Flag.String("ip", util.DetectedHostAddress(), "ip or server name")
v.publicUrl = cmdVolume.Flag.String("publicUrl", "", "Publicly accessible address")
v.bindIp = cmdVolume.Flag.String("ip.bind", "0.0.0.0", "ip address to bind to")
v.masters = cmdVolume.Flag.String("mserver", "localhost:9333", "comma-separated master servers")
- v.pulseSeconds = cmdVolume.Flag.Int("pulseSeconds", 5, "number of seconds between heartbeats, must be smaller than or equal to the master's setting")
+ // v.pulseSeconds = cmdVolume.Flag.Int("pulseSeconds", 5, "number of seconds between heartbeats, must be smaller than or equal to the master's setting")
v.idleConnectionTimeout = cmdVolume.Flag.Int("idleTimeout", 30, "connection idle seconds")
- v.maxCpu = cmdVolume.Flag.Int("maxCpu", 0, "maximum number of CPUs. 0 means all available CPUs")
v.dataCenter = cmdVolume.Flag.String("dataCenter", "", "current volume server's data center name")
v.rack = cmdVolume.Flag.String("rack", "", "current volume server's rack name")
- v.indexType = cmdVolume.Flag.String("index", "memory", "Choose [memory|leveldb|boltdb|btree] mode for memory~performance balance.")
- v.fixJpgOrientation = cmdVolume.Flag.Bool("images.fix.orientation", false, "Adjust jpg orientation when uploading.")
+ v.indexType = cmdVolume.Flag.String("index", "memory", "Choose [memory|leveldb|leveldbMedium|leveldbLarge] mode for memory~performance balance.")
v.readRedirect = cmdVolume.Flag.Bool("read.redirect", true, "Redirect moved or non-local volumes.")
v.cpuProfile = cmdVolume.Flag.String("cpuprofile", "", "cpu profile output file")
v.memProfile = cmdVolume.Flag.String("memprofile", "", "memory profile output file")
+ v.compactionMBPerSecond = cmdVolume.Flag.Int("compactionMBps", 0, "limit background compaction or copying speed in mega bytes per second")
+ v.fileSizeLimitMB = cmdVolume.Flag.Int("fileSizeLimitMB", 256, "limit file size to avoid out of memory")
+ v.pprof = cmdVolume.Flag.Bool("pprof", false, "enable pprof http handlers. precludes --memprofile and --cpuprofile")
}
var cmdVolume = &Command{
@@ -73,26 +88,39 @@ var cmdVolume = &Command{
var (
volumeFolders = cmdVolume.Flag.String("dir", os.TempDir(), "directories to store data files. dir[,dir]...")
- maxVolumeCounts = cmdVolume.Flag.String("max", "7", "maximum numbers of volumes, count[,count]...")
+ maxVolumeCounts = cmdVolume.Flag.String("max", "8", "maximum numbers of volumes, count[,count]... If set to zero, the limit will be auto configured.")
volumeWhiteListOption = cmdVolume.Flag.String("whiteList", "", "comma separated Ip addresses having write permission. No limit if empty.")
+ minFreeSpacePercent = cmdVolume.Flag.String("minFreeSpacePercent", "1", "minimum free disk space (default to 1%). Low disk space will mark all volumes as ReadOnly.")
)
func runVolume(cmd *Command, args []string) bool {
- if *v.maxCpu < 1 {
- *v.maxCpu = runtime.NumCPU()
+
+ util.LoadConfiguration("security", false)
+
+ runtime.GOMAXPROCS(runtime.NumCPU())
+
+ // If --pprof is set we assume the caller wants to be able to collect
+ // cpu and memory profiles via go tool pprof
+ if !*v.pprof {
+ grace.SetupProfiling(*v.cpuProfile, *v.memProfile)
}
- runtime.GOMAXPROCS(*v.maxCpu)
- util.SetupProfiling(*v.cpuProfile, *v.memProfile)
- v.startVolumeServer(*volumeFolders, *maxVolumeCounts, *volumeWhiteListOption)
+ v.startVolumeServer(*volumeFolders, *maxVolumeCounts, *volumeWhiteListOption, *minFreeSpacePercent)
return true
}
-func (v VolumeServerOptions) startVolumeServer(volumeFolders, maxVolumeCounts, volumeWhiteListOption string) {
+func (v VolumeServerOptions) startVolumeServer(volumeFolders, maxVolumeCounts, volumeWhiteListOption, minFreeSpacePercent string) {
- //Set multiple folders and each folder's max volume count limit'
+ // Set multiple folders and each folder's max volume count limit'
v.folders = strings.Split(volumeFolders, ",")
+ for _, folder := range v.folders {
+ if err := util.TestFolderWritable(folder); err != nil {
+ glog.Fatalf("Check Data Folder(-dir) Writable %s : %s", folder, err)
+ }
+ }
+
+ // set max
maxCountStrings := strings.Split(maxVolumeCounts, ",")
for _, maxString := range maxCountStrings {
if max, e := strconv.Atoi(maxString); e == nil {
@@ -104,19 +132,33 @@ func (v VolumeServerOptions) startVolumeServer(volumeFolders, maxVolumeCounts, v
if len(v.folders) != len(v.folderMaxLimits) {
glog.Fatalf("%d directories by -dir, but only %d max is set by -max", len(v.folders), len(v.folderMaxLimits))
}
- for _, folder := range v.folders {
- if err := util.TestFolderWritable(folder); err != nil {
- glog.Fatalf("Check Data Folder(-dir) Writable %s : %s", folder, err)
+
+ // set minFreeSpacePercent
+ minFreeSpacePercentStrings := strings.Split(minFreeSpacePercent, ",")
+ for _, freeString := range minFreeSpacePercentStrings {
+ if value, e := strconv.ParseFloat(freeString, 32); e == nil {
+ v.minFreeSpacePercents = append(v.minFreeSpacePercents, float32(value))
+ } else {
+ glog.Fatalf("The value specified in -minFreeSpacePercent not a valid value %s", freeString)
+ }
+ }
+ if len(v.minFreeSpacePercents) == 1 && len(v.folders) > 1 {
+ for i := 0; i < len(v.folders)-1; i++ {
+ v.minFreeSpacePercents = append(v.minFreeSpacePercents, v.minFreeSpacePercents[0])
}
}
+ if len(v.folders) != len(v.minFreeSpacePercents) {
+ glog.Fatalf("%d directories by -dir, but only %d minFreeSpacePercent is set by -minFreeSpacePercent", len(v.folders), len(v.minFreeSpacePercents))
+ }
- //security related white list configuration
+ // security related white list configuration
if volumeWhiteListOption != "" {
v.whiteList = strings.Split(volumeWhiteListOption, ",")
}
if *v.ip == "" {
- *v.ip = "127.0.0.1"
+ *v.ip = util.DetectedHostAddress()
+ glog.V(0).Infof("detected volume server ip address: %v", *v.ip)
}
if *v.publicPort == 0 {
@@ -125,73 +167,169 @@ func (v VolumeServerOptions) startVolumeServer(volumeFolders, maxVolumeCounts, v
if *v.publicUrl == "" {
*v.publicUrl = *v.ip + ":" + strconv.Itoa(*v.publicPort)
}
- isSeperatedPublicPort := *v.publicPort != *v.port
volumeMux := http.NewServeMux()
publicVolumeMux := volumeMux
- if isSeperatedPublicPort {
+ if v.isSeparatedPublicPort() {
publicVolumeMux = http.NewServeMux()
}
+ if *v.pprof {
+ volumeMux.HandleFunc("/debug/pprof/", httppprof.Index)
+ volumeMux.HandleFunc("/debug/pprof/cmdline", httppprof.Cmdline)
+ volumeMux.HandleFunc("/debug/pprof/profile", httppprof.Profile)
+ volumeMux.HandleFunc("/debug/pprof/symbol", httppprof.Symbol)
+ volumeMux.HandleFunc("/debug/pprof/trace", httppprof.Trace)
+ }
+
volumeNeedleMapKind := storage.NeedleMapInMemory
switch *v.indexType {
case "leveldb":
volumeNeedleMapKind = storage.NeedleMapLevelDb
- case "boltdb":
- volumeNeedleMapKind = storage.NeedleMapBoltDb
- case "btree":
- volumeNeedleMapKind = storage.NeedleMapBtree
+ case "leveldbMedium":
+ volumeNeedleMapKind = storage.NeedleMapLevelDbMedium
+ case "leveldbLarge":
+ volumeNeedleMapKind = storage.NeedleMapLevelDbLarge
}
masters := *v.masters
volumeServer := weed_server.NewVolumeServer(volumeMux, publicVolumeMux,
*v.ip, *v.port, *v.publicUrl,
- v.folders, v.folderMaxLimits,
+ v.folders, v.folderMaxLimits, v.minFreeSpacePercents,
volumeNeedleMapKind,
- strings.Split(masters, ","), *v.pulseSeconds, *v.dataCenter, *v.rack,
+ strings.Split(masters, ","), 5, *v.dataCenter, *v.rack,
v.whiteList,
- *v.fixJpgOrientation, *v.readRedirect,
+ *v.readRedirect,
+ *v.compactionMBPerSecond,
+ *v.fileSizeLimitMB,
)
- listeningAddress := *v.bindIp + ":" + strconv.Itoa(*v.port)
- glog.V(0).Infof("Start Seaweed volume server %s at %s", util.VERSION, listeningAddress)
- listener, e := util.NewListener(listeningAddress, time.Duration(*v.idleConnectionTimeout)*time.Second)
- if e != nil {
- glog.Fatalf("Volume server listener error:%v", e)
- }
- if isSeperatedPublicPort {
- publicListeningAddress := *v.bindIp + ":" + strconv.Itoa(*v.publicPort)
- glog.V(0).Infoln("Start Seaweed volume server", util.VERSION, "public at", publicListeningAddress)
- publicListener, e := util.NewListener(publicListeningAddress, time.Duration(*v.idleConnectionTimeout)*time.Second)
- if e != nil {
- glog.Fatalf("Volume server listener error:%v", e)
+ // starting grpc server
+ grpcS := v.startGrpcService(volumeServer)
+
+ // starting public http server
+ var publicHttpDown httpdown.Server
+ if v.isSeparatedPublicPort() {
+ publicHttpDown = v.startPublicHttpService(publicVolumeMux)
+ if nil == publicHttpDown {
+ glog.Fatalf("start public http service failed")
}
- go func() {
- if e := http.Serve(publicListener, publicVolumeMux); e != nil {
- glog.Fatalf("Volume server fail to serve public: %v", e)
- }
- }()
}
- util.OnInterrupt(func() {
+ // starting the cluster http server
+ clusterHttpServer := v.startClusterHttpService(volumeMux)
+
+ stopChain := make(chan struct{})
+ grace.OnInterrupt(func() {
+ fmt.Println("volume server has be killed")
+ var startTime time.Time
+
+ // firstly, stop the public http service to prevent from receiving new user request
+ if nil != publicHttpDown {
+ startTime = time.Now()
+ if err := publicHttpDown.Stop(); err != nil {
+ glog.Warningf("stop the public http server failed, %v", err)
+ }
+ delta := time.Now().Sub(startTime).Nanoseconds() / 1e6
+ glog.V(0).Infof("stop public http server, elapsed %dms", delta)
+ }
+
+ startTime = time.Now()
+ if err := clusterHttpServer.Stop(); err != nil {
+ glog.Warningf("stop the cluster http server failed, %v", err)
+ }
+ delta := time.Now().Sub(startTime).Nanoseconds() / 1e6
+ glog.V(0).Infof("graceful stop cluster http server, elapsed [%d]", delta)
+
+ startTime = time.Now()
+ grpcS.GracefulStop()
+ delta = time.Now().Sub(startTime).Nanoseconds() / 1e6
+ glog.V(0).Infof("graceful stop gRPC, elapsed [%d]", delta)
+
+ startTime = time.Now()
volumeServer.Shutdown()
+ delta = time.Now().Sub(startTime).Nanoseconds() / 1e6
+ glog.V(0).Infof("stop volume server, elapsed [%d]", delta)
+
pprof.StopCPUProfile()
+
+ close(stopChain) // notify exit
})
- // starting grpc server
+ select {
+ case <-stopChain:
+ }
+ glog.Warningf("the volume server exit.")
+}
+
+// check whether configure the public port
+func (v VolumeServerOptions) isSeparatedPublicPort() bool {
+ return *v.publicPort != *v.port
+}
+
+func (v VolumeServerOptions) startGrpcService(vs volume_server_pb.VolumeServerServer) *grpc.Server {
grpcPort := *v.port + 10000
grpcL, err := util.NewListener(*v.bindIp+":"+strconv.Itoa(grpcPort), 0)
if err != nil {
glog.Fatalf("failed to listen on grpc port %d: %v", grpcPort, err)
}
- grpcS := util.NewGrpcServer()
- volume_server_pb.RegisterVolumeServerServer(grpcS, volumeServer)
+ grpcS := pb.NewGrpcServer(security.LoadServerTLS(util.GetViper(), "grpc.volume"))
+ volume_server_pb.RegisterVolumeServerServer(grpcS, vs)
reflection.Register(grpcS)
- go grpcS.Serve(grpcL)
+ go func() {
+ if err := grpcS.Serve(grpcL); err != nil {
+ glog.Fatalf("start gRPC service failed, %s", err)
+ }
+ }()
+ return grpcS
+}
- if e := http.Serve(listener, volumeMux); e != nil {
- glog.Fatalf("Volume server fail to serve: %v", e)
+func (v VolumeServerOptions) startPublicHttpService(handler http.Handler) httpdown.Server {
+ publicListeningAddress := *v.bindIp + ":" + strconv.Itoa(*v.publicPort)
+ glog.V(0).Infoln("Start Seaweed volume server", util.Version(), "public at", publicListeningAddress)
+ publicListener, e := util.NewListener(publicListeningAddress, time.Duration(*v.idleConnectionTimeout)*time.Second)
+ if e != nil {
+ glog.Fatalf("Volume server listener error:%v", e)
+ }
+
+ pubHttp := httpdown.HTTP{StopTimeout: 5 * time.Minute, KillTimeout: 5 * time.Minute}
+ publicHttpDown := pubHttp.Serve(&http.Server{Handler: handler}, publicListener)
+ go func() {
+ if err := publicHttpDown.Wait(); err != nil {
+ glog.Errorf("public http down wait failed, %v", err)
+ }
+ }()
+
+ return publicHttpDown
+}
+
+func (v VolumeServerOptions) startClusterHttpService(handler http.Handler) httpdown.Server {
+ var (
+ certFile, keyFile string
+ )
+ if viper.GetString("https.volume.key") != "" {
+ certFile = viper.GetString("https.volume.cert")
+ keyFile = viper.GetString("https.volume.key")
+ }
+
+ listeningAddress := *v.bindIp + ":" + strconv.Itoa(*v.port)
+ glog.V(0).Infof("Start Seaweed volume server %s at %s", util.Version(), listeningAddress)
+ listener, e := util.NewListener(listeningAddress, time.Duration(*v.idleConnectionTimeout)*time.Second)
+ if e != nil {
+ glog.Fatalf("Volume server listener error:%v", e)
}
+ httpDown := httpdown.HTTP{
+ KillTimeout: 5 * time.Minute,
+ StopTimeout: 5 * time.Minute,
+ CertFile: certFile,
+ KeyFile: keyFile}
+ clusterHttpServer := httpDown.Serve(&http.Server{Handler: handler}, listener)
+ go func() {
+ if e := clusterHttpServer.Wait(); e != nil {
+ glog.Fatalf("Volume server fail to serve: %v", e)
+ }
+ }()
+ return clusterHttpServer
}
diff --git a/weed/command/watch.go b/weed/command/watch.go
new file mode 100644
index 000000000..b46707a62
--- /dev/null
+++ b/weed/command/watch.go
@@ -0,0 +1,65 @@
+package command
+
+import (
+ "context"
+ "fmt"
+ "io"
+ "time"
+
+ "github.com/chrislusf/seaweedfs/weed/pb"
+ "github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
+ "github.com/chrislusf/seaweedfs/weed/security"
+ "github.com/chrislusf/seaweedfs/weed/util"
+)
+
+func init() {
+ cmdWatch.Run = runWatch // break init cycle
+}
+
+var cmdWatch = &Command{
+ UsageLine: "watch [-filer=localhost:8888] [-target=/]",
+ Short: "see recent changes on a filer",
+ Long: `See recent changes on a filer.
+
+ `,
+}
+
+var (
+ watchFiler = cmdWatch.Flag.String("filer", "localhost:8888", "filer hostname:port")
+ watchTarget = cmdWatch.Flag.String("pathPrefix", "/", "path to a folder or file, or common prefix for the folders or files on filer")
+ watchStart = cmdWatch.Flag.Duration("timeAgo", 0, "start time before now. \"300ms\", \"1.5h\" or \"2h45m\". Valid time units are \"ns\", \"us\" (or \"µs\"), \"ms\", \"s\", \"m\", \"h\"")
+)
+
+func runWatch(cmd *Command, args []string) bool {
+
+ grpcDialOption := security.LoadClientTLS(util.GetViper(), "grpc.client")
+
+ watchErr := pb.WithFilerClient(*watchFiler, grpcDialOption, func(client filer_pb.SeaweedFilerClient) error {
+
+ stream, err := client.SubscribeMetadata(context.Background(), &filer_pb.SubscribeMetadataRequest{
+ ClientName: "watch",
+ PathPrefix: *watchTarget,
+ SinceNs: time.Now().Add(-*watchStart).UnixNano(),
+ })
+ if err != nil {
+ return fmt.Errorf("listen: %v", err)
+ }
+
+ for {
+ resp, listenErr := stream.Recv()
+ if listenErr == io.EOF {
+ return nil
+ }
+ if listenErr != nil {
+ return listenErr
+ }
+ fmt.Printf("events: %+v\n", resp.EventNotification)
+ }
+
+ })
+ if watchErr != nil {
+ fmt.Printf("watch %s: %v\n", *watchFiler, watchErr)
+ }
+
+ return true
+}
diff --git a/weed/command/webdav.go b/weed/command/webdav.go
new file mode 100644
index 000000000..b9676c909
--- /dev/null
+++ b/weed/command/webdav.go
@@ -0,0 +1,142 @@
+package command
+
+import (
+ "context"
+ "fmt"
+ "net/http"
+ "os"
+ "os/user"
+ "strconv"
+ "time"
+
+ "github.com/chrislusf/seaweedfs/weed/glog"
+ "github.com/chrislusf/seaweedfs/weed/pb"
+ "github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
+ "github.com/chrislusf/seaweedfs/weed/security"
+ "github.com/chrislusf/seaweedfs/weed/server"
+ "github.com/chrislusf/seaweedfs/weed/util"
+)
+
+var (
+ webDavStandaloneOptions WebDavOption
+)
+
+type WebDavOption struct {
+ filer *string
+ port *int
+ collection *string
+ tlsPrivateKey *string
+ tlsCertificate *string
+ cacheDir *string
+ cacheSizeMB *int64
+}
+
+func init() {
+ cmdWebDav.Run = runWebDav // break init cycle
+ webDavStandaloneOptions.filer = cmdWebDav.Flag.String("filer", "localhost:8888", "filer server address")
+ webDavStandaloneOptions.port = cmdWebDav.Flag.Int("port", 7333, "webdav server http listen port")
+ webDavStandaloneOptions.collection = cmdWebDav.Flag.String("collection", "", "collection to create the files")
+ webDavStandaloneOptions.tlsPrivateKey = cmdWebDav.Flag.String("key.file", "", "path to the TLS private key file")
+ webDavStandaloneOptions.tlsCertificate = cmdWebDav.Flag.String("cert.file", "", "path to the TLS certificate file")
+ webDavStandaloneOptions.cacheDir = cmdWebDav.Flag.String("cacheDir", os.TempDir(), "local cache directory for file chunks")
+ webDavStandaloneOptions.cacheSizeMB = cmdWebDav.Flag.Int64("cacheCapacityMB", 1000, "local cache capacity in MB")
+}
+
+var cmdWebDav = &Command{
+ UsageLine: "webdav -port=7333 -filer=",
+ Short: "start a webdav server that is backed by a filer",
+ Long: `start a webdav server that is backed by a filer.
+
+`,
+}
+
+func runWebDav(cmd *Command, args []string) bool {
+
+ util.LoadConfiguration("security", false)
+
+ glog.V(0).Infof("Starting Seaweed WebDav Server %s at https port %d", util.Version(), *webDavStandaloneOptions.port)
+
+ return webDavStandaloneOptions.startWebDav()
+
+}
+
+func (wo *WebDavOption) startWebDav() bool {
+
+ // detect current user
+ uid, gid := uint32(0), uint32(0)
+ if u, err := user.Current(); err == nil {
+ if parsedId, pe := strconv.ParseUint(u.Uid, 10, 32); pe == nil {
+ uid = uint32(parsedId)
+ }
+ if parsedId, pe := strconv.ParseUint(u.Gid, 10, 32); pe == nil {
+ gid = uint32(parsedId)
+ }
+ }
+
+ // parse filer grpc address
+ filerGrpcAddress, err := pb.ParseFilerGrpcAddress(*wo.filer)
+ if err != nil {
+ glog.Fatal(err)
+ return false
+ }
+
+ grpcDialOption := security.LoadClientTLS(util.GetViper(), "grpc.client")
+
+ var cipher bool
+ // connect to filer
+ for {
+ err = pb.WithGrpcFilerClient(filerGrpcAddress, grpcDialOption, func(client filer_pb.SeaweedFilerClient) error {
+ resp, err := client.GetFilerConfiguration(context.Background(), &filer_pb.GetFilerConfigurationRequest{})
+ if err != nil {
+ return fmt.Errorf("get filer %s configuration: %v", filerGrpcAddress, err)
+ }
+ cipher = resp.Cipher
+ return nil
+ })
+ if err != nil {
+ glog.V(0).Infof("wait to connect to filer %s grpc address %s", *wo.filer, filerGrpcAddress)
+ time.Sleep(time.Second)
+ } else {
+ glog.V(0).Infof("connected to filer %s grpc address %s", *wo.filer, filerGrpcAddress)
+ break
+ }
+ }
+
+ ws, webdavServer_err := weed_server.NewWebDavServer(&weed_server.WebDavOption{
+ Filer: *wo.filer,
+ FilerGrpcAddress: filerGrpcAddress,
+ GrpcDialOption: grpcDialOption,
+ Collection: *wo.collection,
+ Uid: uid,
+ Gid: gid,
+ Cipher: cipher,
+ CacheDir: *wo.cacheDir,
+ CacheSizeMB: *wo.cacheSizeMB,
+ })
+ if webdavServer_err != nil {
+ glog.Fatalf("WebDav Server startup error: %v", webdavServer_err)
+ }
+
+ httpS := &http.Server{Handler: ws.Handler}
+
+ listenAddress := fmt.Sprintf(":%d", *wo.port)
+ webDavListener, err := util.NewListener(listenAddress, time.Duration(10)*time.Second)
+ if err != nil {
+ glog.Fatalf("WebDav Server listener on %s error: %v", listenAddress, err)
+ }
+
+ if *wo.tlsPrivateKey != "" {
+ glog.V(0).Infof("Start Seaweed WebDav Server %s at https port %d", util.Version(), *wo.port)
+ if err = httpS.ServeTLS(webDavListener, *wo.tlsCertificate, *wo.tlsPrivateKey); err != nil {
+ glog.Fatalf("WebDav Server Fail to serve: %v", err)
+ }
+ } else {
+ glog.V(0).Infof("Start Seaweed WebDav Server %s at http port %d", util.Version(), *wo.port)
+ if err = httpS.Serve(webDavListener); err != nil {
+ glog.Fatalf("WebDav Server Fail to serve: %v", err)
+ }
+ }
+
+ return true
+
+}
diff --git a/weed/filer2/abstract_sql/abstract_sql_store.go b/weed/filer2/abstract_sql/abstract_sql_store.go
index 5f2990475..5ade18960 100644
--- a/weed/filer2/abstract_sql/abstract_sql_store.go
+++ b/weed/filer2/abstract_sql/abstract_sql_store.go
@@ -1,24 +1,65 @@
package abstract_sql
import (
+ "context"
"database/sql"
"fmt"
"github.com/chrislusf/seaweedfs/weed/filer2"
"github.com/chrislusf/seaweedfs/weed/glog"
+ "github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
+ "github.com/chrislusf/seaweedfs/weed/util"
)
type AbstractSqlStore struct {
- DB *sql.DB
- SqlInsert string
- SqlUpdate string
- SqlFind string
- SqlDelete string
- SqlListExclusive string
- SqlListInclusive string
+ DB *sql.DB
+ SqlInsert string
+ SqlUpdate string
+ SqlFind string
+ SqlDelete string
+ SqlDeleteFolderChildren string
+ SqlListExclusive string
+ SqlListInclusive string
}
-func (store *AbstractSqlStore) InsertEntry(entry *filer2.Entry) (err error) {
+type TxOrDB interface {
+ ExecContext(ctx context.Context, query string, args ...interface{}) (sql.Result, error)
+ QueryRowContext(ctx context.Context, query string, args ...interface{}) *sql.Row
+ QueryContext(ctx context.Context, query string, args ...interface{}) (*sql.Rows, error)
+}
+
+func (store *AbstractSqlStore) BeginTransaction(ctx context.Context) (context.Context, error) {
+ tx, err := store.DB.BeginTx(ctx, &sql.TxOptions{
+ Isolation: sql.LevelReadCommitted,
+ ReadOnly: false,
+ })
+ if err != nil {
+ return ctx, err
+ }
+
+ return context.WithValue(ctx, "tx", tx), nil
+}
+func (store *AbstractSqlStore) CommitTransaction(ctx context.Context) error {
+ if tx, ok := ctx.Value("tx").(*sql.Tx); ok {
+ return tx.Commit()
+ }
+ return nil
+}
+func (store *AbstractSqlStore) RollbackTransaction(ctx context.Context) error {
+ if tx, ok := ctx.Value("tx").(*sql.Tx); ok {
+ return tx.Rollback()
+ }
+ return nil
+}
+
+func (store *AbstractSqlStore) getTxOrDB(ctx context.Context) TxOrDB {
+ if tx, ok := ctx.Value("tx").(*sql.Tx); ok {
+ return tx
+ }
+ return store.DB
+}
+
+func (store *AbstractSqlStore) InsertEntry(ctx context.Context, entry *filer2.Entry) (err error) {
dir, name := entry.FullPath.DirAndName()
meta, err := entry.EncodeAttributesAndChunks()
@@ -26,7 +67,7 @@ func (store *AbstractSqlStore) InsertEntry(entry *filer2.Entry) (err error) {
return fmt.Errorf("encode %s: %s", entry.FullPath, err)
}
- res, err := store.DB.Exec(store.SqlInsert, hashToLong(dir), name, dir, meta)
+ res, err := store.getTxOrDB(ctx).ExecContext(ctx, store.SqlInsert, util.HashStringToLong(dir), name, dir, meta)
if err != nil {
return fmt.Errorf("insert %s: %s", entry.FullPath, err)
}
@@ -38,7 +79,7 @@ func (store *AbstractSqlStore) InsertEntry(entry *filer2.Entry) (err error) {
return nil
}
-func (store *AbstractSqlStore) UpdateEntry(entry *filer2.Entry) (err error) {
+func (store *AbstractSqlStore) UpdateEntry(ctx context.Context, entry *filer2.Entry) (err error) {
dir, name := entry.FullPath.DirAndName()
meta, err := entry.EncodeAttributesAndChunks()
@@ -46,7 +87,7 @@ func (store *AbstractSqlStore) UpdateEntry(entry *filer2.Entry) (err error) {
return fmt.Errorf("encode %s: %s", entry.FullPath, err)
}
- res, err := store.DB.Exec(store.SqlUpdate, meta, hashToLong(dir), name, dir)
+ res, err := store.getTxOrDB(ctx).ExecContext(ctx, store.SqlUpdate, meta, util.HashStringToLong(dir), name, dir)
if err != nil {
return fmt.Errorf("update %s: %s", entry.FullPath, err)
}
@@ -58,13 +99,13 @@ func (store *AbstractSqlStore) UpdateEntry(entry *filer2.Entry) (err error) {
return nil
}
-func (store *AbstractSqlStore) FindEntry(fullpath filer2.FullPath) (*filer2.Entry, error) {
+func (store *AbstractSqlStore) FindEntry(ctx context.Context, fullpath util.FullPath) (*filer2.Entry, error) {
dir, name := fullpath.DirAndName()
- row := store.DB.QueryRow(store.SqlFind, hashToLong(dir), name, dir)
+ row := store.getTxOrDB(ctx).QueryRowContext(ctx, store.SqlFind, util.HashStringToLong(dir), name, dir)
var data []byte
if err := row.Scan(&data); err != nil {
- return nil, filer2.ErrNotFound
+ return nil, filer_pb.ErrNotFound
}
entry := &filer2.Entry{
@@ -77,11 +118,11 @@ func (store *AbstractSqlStore) FindEntry(fullpath filer2.FullPath) (*filer2.Entr
return entry, nil
}
-func (store *AbstractSqlStore) DeleteEntry(fullpath filer2.FullPath) error {
+func (store *AbstractSqlStore) DeleteEntry(ctx context.Context, fullpath util.FullPath) error {
dir, name := fullpath.DirAndName()
- res, err := store.DB.Exec(store.SqlDelete, hashToLong(dir), name, dir)
+ res, err := store.getTxOrDB(ctx).ExecContext(ctx, store.SqlDelete, util.HashStringToLong(dir), name, dir)
if err != nil {
return fmt.Errorf("delete %s: %s", fullpath, err)
}
@@ -94,14 +135,29 @@ func (store *AbstractSqlStore) DeleteEntry(fullpath filer2.FullPath) error {
return nil
}
-func (store *AbstractSqlStore) ListDirectoryEntries(fullpath filer2.FullPath, startFileName string, inclusive bool, limit int) (entries []*filer2.Entry, err error) {
+func (store *AbstractSqlStore) DeleteFolderChildren(ctx context.Context, fullpath util.FullPath) error {
+
+ res, err := store.getTxOrDB(ctx).ExecContext(ctx, store.SqlDeleteFolderChildren, util.HashStringToLong(string(fullpath)), fullpath)
+ if err != nil {
+ return fmt.Errorf("deleteFolderChildren %s: %s", fullpath, err)
+ }
+
+ _, err = res.RowsAffected()
+ if err != nil {
+ return fmt.Errorf("deleteFolderChildren %s but no rows affected: %s", fullpath, err)
+ }
+
+ return nil
+}
+
+func (store *AbstractSqlStore) ListDirectoryEntries(ctx context.Context, fullpath util.FullPath, startFileName string, inclusive bool, limit int) (entries []*filer2.Entry, err error) {
sqlText := store.SqlListExclusive
if inclusive {
sqlText = store.SqlListInclusive
}
- rows, err := store.DB.Query(sqlText, hashToLong(string(fullpath)), startFileName, string(fullpath), limit)
+ rows, err := store.getTxOrDB(ctx).QueryContext(ctx, sqlText, util.HashStringToLong(string(fullpath)), startFileName, string(fullpath), limit)
if err != nil {
return nil, fmt.Errorf("list %s : %v", fullpath, err)
}
@@ -116,7 +172,7 @@ func (store *AbstractSqlStore) ListDirectoryEntries(fullpath filer2.FullPath, st
}
entry := &filer2.Entry{
- FullPath: filer2.NewFullPath(string(fullpath), name),
+ FullPath: util.NewFullPath(string(fullpath), name),
}
if err = entry.DecodeAttributesAndChunks(data); err != nil {
glog.V(0).Infof("scan decode %s : %v", entry.FullPath, err)
@@ -128,3 +184,7 @@ func (store *AbstractSqlStore) ListDirectoryEntries(fullpath filer2.FullPath, st
return entries, nil
}
+
+func (store *AbstractSqlStore) Shutdown() {
+ store.DB.Close()
+}
diff --git a/weed/filer2/abstract_sql/hashing.go b/weed/filer2/abstract_sql/hashing.go
deleted file mode 100644
index 5c982c537..000000000
--- a/weed/filer2/abstract_sql/hashing.go
+++ /dev/null
@@ -1,32 +0,0 @@
-package abstract_sql
-
-import (
- "crypto/md5"
- "io"
-)
-
-// returns a 64 bit big int
-func hashToLong(dir string) (v int64) {
- h := md5.New()
- io.WriteString(h, dir)
-
- b := h.Sum(nil)
-
- v += int64(b[0])
- v <<= 8
- v += int64(b[1])
- v <<= 8
- v += int64(b[2])
- v <<= 8
- v += int64(b[3])
- v <<= 8
- v += int64(b[4])
- v <<= 8
- v += int64(b[5])
- v <<= 8
- v += int64(b[6])
- v <<= 8
- v += int64(b[7])
-
- return
-}
diff --git a/weed/filer2/cassandra/cassandra_store.go b/weed/filer2/cassandra/cassandra_store.go
index 2c1f03182..5dd7d8036 100644
--- a/weed/filer2/cassandra/cassandra_store.go
+++ b/weed/filer2/cassandra/cassandra_store.go
@@ -1,11 +1,15 @@
package cassandra
import (
+ "context"
"fmt"
+
+ "github.com/gocql/gocql"
+
"github.com/chrislusf/seaweedfs/weed/filer2"
"github.com/chrislusf/seaweedfs/weed/glog"
+ "github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
"github.com/chrislusf/seaweedfs/weed/util"
- "github.com/gocql/gocql"
)
func init() {
@@ -21,10 +25,10 @@ func (store *CassandraStore) GetName() string {
return "cassandra"
}
-func (store *CassandraStore) Initialize(configuration util.Configuration) (err error) {
+func (store *CassandraStore) Initialize(configuration util.Configuration, prefix string) (err error) {
return store.initialize(
- configuration.GetString("keyspace"),
- configuration.GetStringSlice("hosts"),
+ configuration.GetString(prefix+"keyspace"),
+ configuration.GetStringSlice(prefix+"hosts"),
)
}
@@ -39,7 +43,17 @@ func (store *CassandraStore) initialize(keyspace string, hosts []string) (err er
return
}
-func (store *CassandraStore) InsertEntry(entry *filer2.Entry) (err error) {
+func (store *CassandraStore) BeginTransaction(ctx context.Context) (context.Context, error) {
+ return ctx, nil
+}
+func (store *CassandraStore) CommitTransaction(ctx context.Context) error {
+ return nil
+}
+func (store *CassandraStore) RollbackTransaction(ctx context.Context) error {
+ return nil
+}
+
+func (store *CassandraStore) InsertEntry(ctx context.Context, entry *filer2.Entry) (err error) {
dir, name := entry.FullPath.DirAndName()
meta, err := entry.EncodeAttributesAndChunks()
@@ -56,12 +70,12 @@ func (store *CassandraStore) InsertEntry(entry *filer2.Entry) (err error) {
return nil
}
-func (store *CassandraStore) UpdateEntry(entry *filer2.Entry) (err error) {
+func (store *CassandraStore) UpdateEntry(ctx context.Context, entry *filer2.Entry) (err error) {
- return store.InsertEntry(entry)
+ return store.InsertEntry(ctx, entry)
}
-func (store *CassandraStore) FindEntry(fullpath filer2.FullPath) (entry *filer2.Entry, err error) {
+func (store *CassandraStore) FindEntry(ctx context.Context, fullpath util.FullPath) (entry *filer2.Entry, err error) {
dir, name := fullpath.DirAndName()
var data []byte
@@ -69,12 +83,12 @@ func (store *CassandraStore) FindEntry(fullpath filer2.FullPath) (entry *filer2.
"SELECT meta FROM filemeta WHERE directory=? AND name=?",
dir, name).Consistency(gocql.One).Scan(&data); err != nil {
if err != gocql.ErrNotFound {
- return nil, filer2.ErrNotFound
+ return nil, filer_pb.ErrNotFound
}
}
if len(data) == 0 {
- return nil, fmt.Errorf("not found: %s", fullpath)
+ return nil, filer_pb.ErrNotFound
}
entry = &filer2.Entry{
@@ -88,7 +102,7 @@ func (store *CassandraStore) FindEntry(fullpath filer2.FullPath) (entry *filer2.
return entry, nil
}
-func (store *CassandraStore) DeleteEntry(fullpath filer2.FullPath) error {
+func (store *CassandraStore) DeleteEntry(ctx context.Context, fullpath util.FullPath) error {
dir, name := fullpath.DirAndName()
@@ -101,7 +115,18 @@ func (store *CassandraStore) DeleteEntry(fullpath filer2.FullPath) error {
return nil
}
-func (store *CassandraStore) ListDirectoryEntries(fullpath filer2.FullPath, startFileName string, inclusive bool,
+func (store *CassandraStore) DeleteFolderChildren(ctx context.Context, fullpath util.FullPath) error {
+
+ if err := store.session.Query(
+ "DELETE FROM filemeta WHERE directory=?",
+ fullpath).Exec(); err != nil {
+ return fmt.Errorf("delete %s : %v", fullpath, err)
+ }
+
+ return nil
+}
+
+func (store *CassandraStore) ListDirectoryEntries(ctx context.Context, fullpath util.FullPath, startFileName string, inclusive bool,
limit int) (entries []*filer2.Entry, err error) {
cqlStr := "SELECT NAME, meta FROM filemeta WHERE directory=? AND name>? ORDER BY NAME ASC LIMIT ?"
@@ -114,7 +139,7 @@ func (store *CassandraStore) ListDirectoryEntries(fullpath filer2.FullPath, star
iter := store.session.Query(cqlStr, string(fullpath), startFileName, limit).Iter()
for iter.Scan(&name, &data) {
entry := &filer2.Entry{
- FullPath: filer2.NewFullPath(string(fullpath), name),
+ FullPath: util.NewFullPath(string(fullpath), name),
}
if decodeErr := entry.DecodeAttributesAndChunks(data); decodeErr != nil {
err = decodeErr
@@ -129,3 +154,7 @@ func (store *CassandraStore) ListDirectoryEntries(fullpath filer2.FullPath, star
return entries, err
}
+
+func (store *CassandraStore) Shutdown() {
+ store.session.Close()
+}
diff --git a/weed/filer2/configuration.go b/weed/filer2/configuration.go
index 7b05b53dc..a174117ea 100644
--- a/weed/filer2/configuration.go
+++ b/weed/filer2/configuration.go
@@ -17,8 +17,7 @@ func (f *Filer) LoadConfiguration(config *viper.Viper) {
for _, store := range Stores {
if config.GetBool(store.GetName() + ".enabled") {
- viperSub := config.Sub(store.GetName())
- if err := store.Initialize(viperSub); err != nil {
+ if err := store.Initialize(config, store.GetName()+"."); err != nil {
glog.Fatalf("Failed to initialize store for %s: %+v",
store.GetName(), err)
}
diff --git a/weed/filer2/entry.go b/weed/filer2/entry.go
index f17a11727..00b9b132d 100644
--- a/weed/filer2/entry.go
+++ b/weed/filer2/entry.go
@@ -5,6 +5,7 @@ import (
"time"
"github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
+ "github.com/chrislusf/seaweedfs/weed/util"
)
type Attr struct {
@@ -20,6 +21,7 @@ type Attr struct {
UserName string
GroupNames []string
SymlinkTarget string
+ Md5 []byte
}
func (attr Attr) IsDirectory() bool {
@@ -27,9 +29,10 @@ func (attr Attr) IsDirectory() bool {
}
type Entry struct {
- FullPath
+ util.FullPath
Attr
+ Extended map[string][]byte
// the following is for files
Chunks []*filer_pb.FileChunk `json:"chunks,omitempty"`
@@ -52,9 +55,29 @@ func (entry *Entry) ToProtoEntry() *filer_pb.Entry {
return nil
}
return &filer_pb.Entry{
- Name: string(entry.FullPath),
+ Name: entry.FullPath.Name(),
IsDirectory: entry.IsDirectory(),
Attributes: EntryAttributeToPb(entry),
Chunks: entry.Chunks,
+ Extended: entry.Extended,
+ }
+}
+
+func (entry *Entry) ToProtoFullEntry() *filer_pb.FullEntry {
+ if entry == nil {
+ return nil
+ }
+ dir, _ := entry.FullPath.DirAndName()
+ return &filer_pb.FullEntry{
+ Dir: dir,
+ Entry: entry.ToProtoEntry(),
+ }
+}
+
+func FromPbEntry(dir string, entry *filer_pb.Entry) *Entry {
+ return &Entry{
+ FullPath: util.NewFullPath(dir, entry.Name),
+ Attr: PbToEntryAttribute(entry.Attributes),
+ Chunks: entry.Chunks,
}
}
diff --git a/weed/filer2/entry_codec.go b/weed/filer2/entry_codec.go
index e50b3fa9a..47c911011 100644
--- a/weed/filer2/entry_codec.go
+++ b/weed/filer2/entry_codec.go
@@ -1,18 +1,21 @@
package filer2
import (
+ "bytes"
+ "fmt"
"os"
"time"
- "fmt"
+ "github.com/golang/protobuf/proto"
+
"github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
- "github.com/gogo/protobuf/proto"
)
func (entry *Entry) EncodeAttributesAndChunks() ([]byte, error) {
message := &filer_pb.Entry{
Attributes: EntryAttributeToPb(entry),
Chunks: entry.Chunks,
+ Extended: entry.Extended,
}
return proto.Marshal(message)
}
@@ -27,6 +30,8 @@ func (entry *Entry) DecodeAttributesAndChunks(blob []byte) error {
entry.Attr = PbToEntryAttribute(message.Attributes)
+ entry.Extended = message.Extended
+
entry.Chunks = message.Chunks
return nil
@@ -47,6 +52,7 @@ func EntryAttributeToPb(entry *Entry) *filer_pb.FuseAttributes {
UserName: entry.Attr.UserName,
GroupName: entry.Attr.GroupNames,
SymlinkTarget: entry.Attr.SymlinkTarget,
+ Md5: entry.Attr.Md5,
}
}
@@ -66,6 +72,7 @@ func PbToEntryAttribute(attr *filer_pb.FuseAttributes) Attr {
t.UserName = attr.UserName
t.GroupNames = attr.GroupName
t.SymlinkTarget = attr.SymlinkTarget
+ t.Md5 = attr.Md5
return t
}
@@ -84,6 +91,14 @@ func EqualEntry(a, b *Entry) bool {
return false
}
+ if !eq(a.Extended, b.Extended) {
+ return false
+ }
+
+ if !bytes.Equal(a.Md5, b.Md5) {
+ return false
+ }
+
for i := 0; i < len(a.Chunks); i++ {
if !proto.Equal(a.Chunks[i], b.Chunks[i]) {
return false
@@ -91,3 +106,17 @@ func EqualEntry(a, b *Entry) bool {
}
return true
}
+
+func eq(a, b map[string][]byte) bool {
+ if len(a) != len(b) {
+ return false
+ }
+
+ for k, v := range a {
+ if w, ok := b[k]; !ok || !bytes.Equal(v, w) {
+ return false
+ }
+ }
+
+ return true
+}
diff --git a/weed/filer2/etcd/etcd_store.go b/weed/filer2/etcd/etcd_store.go
new file mode 100644
index 000000000..2ef65b4a0
--- /dev/null
+++ b/weed/filer2/etcd/etcd_store.go
@@ -0,0 +1,202 @@
+package etcd
+
+import (
+ "context"
+ "fmt"
+ "strings"
+ "time"
+
+ "go.etcd.io/etcd/clientv3"
+
+ "github.com/chrislusf/seaweedfs/weed/filer2"
+ "github.com/chrislusf/seaweedfs/weed/glog"
+ "github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
+ weed_util "github.com/chrislusf/seaweedfs/weed/util"
+)
+
+const (
+ DIR_FILE_SEPARATOR = byte(0x00)
+)
+
+func init() {
+ filer2.Stores = append(filer2.Stores, &EtcdStore{})
+}
+
+type EtcdStore struct {
+ client *clientv3.Client
+}
+
+func (store *EtcdStore) GetName() string {
+ return "etcd"
+}
+
+func (store *EtcdStore) Initialize(configuration weed_util.Configuration, prefix string) (err error) {
+ servers := configuration.GetString(prefix + "servers")
+ if servers == "" {
+ servers = "localhost:2379"
+ }
+
+ timeout := configuration.GetString(prefix + "timeout")
+ if timeout == "" {
+ timeout = "3s"
+ }
+
+ return store.initialize(servers, timeout)
+}
+
+func (store *EtcdStore) initialize(servers string, timeout string) (err error) {
+ glog.Infof("filer store etcd: %s", servers)
+
+ to, err := time.ParseDuration(timeout)
+ if err != nil {
+ return fmt.Errorf("parse timeout %s: %s", timeout, err)
+ }
+
+ store.client, err = clientv3.New(clientv3.Config{
+ Endpoints: strings.Split(servers, ","),
+ DialTimeout: to,
+ })
+ if err != nil {
+ return fmt.Errorf("connect to etcd %s: %s", servers, err)
+ }
+
+ return
+}
+
+func (store *EtcdStore) BeginTransaction(ctx context.Context) (context.Context, error) {
+ return ctx, nil
+}
+func (store *EtcdStore) CommitTransaction(ctx context.Context) error {
+ return nil
+}
+func (store *EtcdStore) RollbackTransaction(ctx context.Context) error {
+ return nil
+}
+
+func (store *EtcdStore) InsertEntry(ctx context.Context, entry *filer2.Entry) (err error) {
+ key := genKey(entry.DirAndName())
+
+ value, err := entry.EncodeAttributesAndChunks()
+ if err != nil {
+ return fmt.Errorf("encoding %s %+v: %v", entry.FullPath, entry.Attr, err)
+ }
+
+ if _, err := store.client.Put(ctx, string(key), string(value)); err != nil {
+ return fmt.Errorf("persisting %s : %v", entry.FullPath, err)
+ }
+
+ return nil
+}
+
+func (store *EtcdStore) UpdateEntry(ctx context.Context, entry *filer2.Entry) (err error) {
+ return store.InsertEntry(ctx, entry)
+}
+
+func (store *EtcdStore) FindEntry(ctx context.Context, fullpath weed_util.FullPath) (entry *filer2.Entry, err error) {
+ key := genKey(fullpath.DirAndName())
+
+ resp, err := store.client.Get(ctx, string(key))
+ if err != nil {
+ return nil, fmt.Errorf("get %s : %v", entry.FullPath, err)
+ }
+
+ if len(resp.Kvs) == 0 {
+ return nil, filer_pb.ErrNotFound
+ }
+
+ entry = &filer2.Entry{
+ FullPath: fullpath,
+ }
+ err = entry.DecodeAttributesAndChunks(resp.Kvs[0].Value)
+ if err != nil {
+ return entry, fmt.Errorf("decode %s : %v", entry.FullPath, err)
+ }
+
+ return entry, nil
+}
+
+func (store *EtcdStore) DeleteEntry(ctx context.Context, fullpath weed_util.FullPath) (err error) {
+ key := genKey(fullpath.DirAndName())
+
+ if _, err := store.client.Delete(ctx, string(key)); err != nil {
+ return fmt.Errorf("delete %s : %v", fullpath, err)
+ }
+
+ return nil
+}
+
+func (store *EtcdStore) DeleteFolderChildren(ctx context.Context, fullpath weed_util.FullPath) (err error) {
+ directoryPrefix := genDirectoryKeyPrefix(fullpath, "")
+
+ if _, err := store.client.Delete(ctx, string(directoryPrefix), clientv3.WithPrefix()); err != nil {
+ return fmt.Errorf("deleteFolderChildren %s : %v", fullpath, err)
+ }
+
+ return nil
+}
+
+func (store *EtcdStore) ListDirectoryEntries(
+ ctx context.Context, fullpath weed_util.FullPath, startFileName string, inclusive bool, limit int,
+) (entries []*filer2.Entry, err error) {
+ directoryPrefix := genDirectoryKeyPrefix(fullpath, "")
+
+ resp, err := store.client.Get(ctx, string(directoryPrefix),
+ clientv3.WithPrefix(), clientv3.WithSort(clientv3.SortByKey, clientv3.SortDescend))
+ if err != nil {
+ return nil, fmt.Errorf("list %s : %v", fullpath, err)
+ }
+
+ for _, kv := range resp.Kvs {
+ fileName := getNameFromKey(kv.Key)
+ if fileName == "" {
+ continue
+ }
+ if fileName == startFileName && !inclusive {
+ continue
+ }
+ limit--
+ if limit < 0 {
+ break
+ }
+ entry := &filer2.Entry{
+ FullPath: weed_util.NewFullPath(string(fullpath), fileName),
+ }
+ if decodeErr := entry.DecodeAttributesAndChunks(kv.Value); decodeErr != nil {
+ err = decodeErr
+ glog.V(0).Infof("list %s : %v", entry.FullPath, err)
+ break
+ }
+ entries = append(entries, entry)
+ }
+
+ return entries, err
+}
+
+func genKey(dirPath, fileName string) (key []byte) {
+ key = []byte(dirPath)
+ key = append(key, DIR_FILE_SEPARATOR)
+ key = append(key, []byte(fileName)...)
+ return key
+}
+
+func genDirectoryKeyPrefix(fullpath weed_util.FullPath, startFileName string) (keyPrefix []byte) {
+ keyPrefix = []byte(string(fullpath))
+ keyPrefix = append(keyPrefix, DIR_FILE_SEPARATOR)
+ if len(startFileName) > 0 {
+ keyPrefix = append(keyPrefix, []byte(startFileName)...)
+ }
+ return keyPrefix
+}
+
+func getNameFromKey(key []byte) string {
+ sepIndex := len(key) - 1
+ for sepIndex >= 0 && key[sepIndex] != DIR_FILE_SEPARATOR {
+ sepIndex--
+ }
+
+ return string(key[sepIndex+1:])
+}
+
+func (store *EtcdStore) Shutdown() {
+ store.client.Close()
+}
diff --git a/weed/filer2/filechunks.go b/weed/filer2/filechunks.go
index 6c3157e6c..6832d0f31 100644
--- a/weed/filer2/filechunks.go
+++ b/weed/filer2/filechunks.go
@@ -3,6 +3,7 @@ package filer2
import (
"fmt"
"hash/fnv"
+ "math"
"sort"
"sync"
@@ -19,7 +20,21 @@ func TotalSize(chunks []*filer_pb.FileChunk) (size uint64) {
return
}
-func ETag(chunks []*filer_pb.FileChunk) (etag string) {
+func ETag(entry *filer_pb.Entry) (etag string) {
+ if entry.Attributes == nil || entry.Attributes.Md5 == nil {
+ return ETagChunks(entry.Chunks)
+ }
+ return fmt.Sprintf("%x", entry.Attributes.Md5)
+}
+
+func ETagEntry(entry *Entry) (etag string) {
+ if entry.Attr.Md5 == nil {
+ return ETagChunks(entry.Chunks)
+ }
+ return fmt.Sprintf("%x", entry.Attr.Md5)
+}
+
+func ETagChunks(chunks []*filer_pb.FileChunk) (etag string) {
if len(chunks) == 1 {
return chunks[0].ETag
}
@@ -40,7 +55,7 @@ func CompactFileChunks(chunks []*filer_pb.FileChunk) (compacted, garbage []*file
fileIds[interval.fileId] = true
}
for _, chunk := range chunks {
- if found := fileIds[chunk.FileId]; found {
+ if _, found := fileIds[chunk.GetFileIdString()]; found {
compacted = append(compacted, chunk)
} else {
garbage = append(garbage, chunk)
@@ -50,15 +65,15 @@ func CompactFileChunks(chunks []*filer_pb.FileChunk) (compacted, garbage []*file
return
}
-func FindUnusedFileChunks(oldChunks, newChunks []*filer_pb.FileChunk) (unused []*filer_pb.FileChunk) {
+func MinusChunks(as, bs []*filer_pb.FileChunk) (delta []*filer_pb.FileChunk) {
fileIds := make(map[string]bool)
- for _, interval := range newChunks {
- fileIds[interval.FileId] = true
+ for _, interval := range bs {
+ fileIds[interval.GetFileIdString()] = true
}
- for _, chunk := range oldChunks {
- if found := fileIds[chunk.FileId]; !found {
- unused = append(unused, chunk)
+ for _, chunk := range as {
+ if _, found := fileIds[chunk.GetFileIdString()]; !found {
+ delta = append(delta, chunk)
}
}
@@ -70,10 +85,16 @@ type ChunkView struct {
Offset int64
Size uint64
LogicOffset int64
- IsFullChunk bool
+ ChunkSize uint64
+ CipherKey []byte
+ IsGzipped bool
}
-func ViewFromChunks(chunks []*filer_pb.FileChunk, offset int64, size int) (views []*ChunkView) {
+func (cv *ChunkView) IsFullChunk() bool {
+ return cv.Size == cv.ChunkSize
+}
+
+func ViewFromChunks(chunks []*filer_pb.FileChunk, offset int64, size int64) (views []*ChunkView) {
visibles := NonOverlappingVisibleIntervals(chunks)
@@ -81,19 +102,27 @@ func ViewFromChunks(chunks []*filer_pb.FileChunk, offset int64, size int) (views
}
-func ViewFromVisibleIntervals(visibles []VisibleInterval, offset int64, size int) (views []*ChunkView) {
+func ViewFromVisibleIntervals(visibles []VisibleInterval, offset int64, size int64) (views []*ChunkView) {
- stop := offset + int64(size)
+ stop := offset + size
+ if size == math.MaxInt64 {
+ stop = math.MaxInt64
+ }
+ if stop < offset {
+ stop = math.MaxInt64
+ }
for _, chunk := range visibles {
+
if chunk.start <= offset && offset < chunk.stop && offset < stop {
- isFullChunk := chunk.isFullChunk && chunk.start == offset && chunk.stop <= stop
views = append(views, &ChunkView{
FileId: chunk.fileId,
Offset: offset - chunk.start, // offset is the data starting location in this file id
Size: uint64(min(chunk.stop, stop) - offset),
LogicOffset: offset,
- IsFullChunk: isFullChunk,
+ ChunkSize: chunk.chunkSize,
+ CipherKey: chunk.cipherKey,
+ IsGzipped: chunk.isGzipped,
})
offset = min(chunk.stop, stop)
}
@@ -120,13 +149,7 @@ var bufPool = sync.Pool{
func MergeIntoVisibles(visibles, newVisibles []VisibleInterval, chunk *filer_pb.FileChunk) []VisibleInterval {
- newV := newVisibleInterval(
- chunk.Offset,
- chunk.Offset+int64(chunk.Size),
- chunk.FileId,
- chunk.Mtime,
- true,
- )
+ newV := newVisibleInterval(chunk.Offset, chunk.Offset+int64(chunk.Size), chunk.GetFileIdString(), chunk.Mtime, chunk.Size, chunk.CipherKey, chunk.IsCompressed)
length := len(visibles)
if length == 0 {
@@ -140,23 +163,11 @@ func MergeIntoVisibles(visibles, newVisibles []VisibleInterval, chunk *filer_pb.
logPrintf(" before", visibles)
for _, v := range visibles {
if v.start < chunk.Offset && chunk.Offset < v.stop {
- newVisibles = append(newVisibles, newVisibleInterval(
- v.start,
- chunk.Offset,
- v.fileId,
- v.modifiedTime,
- false,
- ))
+ newVisibles = append(newVisibles, newVisibleInterval(v.start, chunk.Offset, v.fileId, v.modifiedTime, chunk.Size, v.cipherKey, v.isGzipped))
}
chunkStop := chunk.Offset + int64(chunk.Size)
if v.start < chunkStop && chunkStop < v.stop {
- newVisibles = append(newVisibles, newVisibleInterval(
- chunkStop,
- v.stop,
- v.fileId,
- v.modifiedTime,
- false,
- ))
+ newVisibles = append(newVisibles, newVisibleInterval(chunkStop, v.stop, v.fileId, v.modifiedTime, chunk.Size, v.cipherKey, v.isGzipped))
}
if chunkStop <= v.start || v.stop <= chunk.Offset {
newVisibles = append(newVisibles, v)
@@ -187,6 +198,7 @@ func NonOverlappingVisibleIntervals(chunks []*filer_pb.FileChunk) (visibles []Vi
var newVisibles []VisibleInterval
for _, chunk := range chunks {
+
newVisibles = MergeIntoVisibles(visibles, newVisibles, chunk)
t := visibles[:0]
visibles = newVisibles
@@ -207,16 +219,20 @@ type VisibleInterval struct {
stop int64
modifiedTime int64
fileId string
- isFullChunk bool
+ chunkSize uint64
+ cipherKey []byte
+ isGzipped bool
}
-func newVisibleInterval(start, stop int64, fileId string, modifiedTime int64, isFullChunk bool) VisibleInterval {
+func newVisibleInterval(start, stop int64, fileId string, modifiedTime int64, chunkSize uint64, cipherKey []byte, isGzipped bool) VisibleInterval {
return VisibleInterval{
start: start,
stop: stop,
fileId: fileId,
modifiedTime: modifiedTime,
- isFullChunk: isFullChunk,
+ chunkSize: chunkSize,
+ cipherKey: cipherKey,
+ isGzipped: isGzipped,
}
}
diff --git a/weed/filer2/filechunks_test.go b/weed/filer2/filechunks_test.go
index e75e60753..7b1133b85 100644
--- a/weed/filer2/filechunks_test.go
+++ b/weed/filer2/filechunks_test.go
@@ -218,7 +218,7 @@ func TestChunksReading(t *testing.T) {
testcases := []struct {
Chunks []*filer_pb.FileChunk
Offset int64
- Size int
+ Size int64
Expected []*ChunkView
}{
// case 0: normal
@@ -331,6 +331,42 @@ func TestChunksReading(t *testing.T) {
{Offset: 0, Size: 100, FileId: "asdf", LogicOffset: 100},
},
},
+ // case 8: edge cases
+ {
+ Chunks: []*filer_pb.FileChunk{
+ {Offset: 0, Size: 100, FileId: "abc", Mtime: 123},
+ {Offset: 90, Size: 200, FileId: "asdf", Mtime: 134},
+ {Offset: 190, Size: 300, FileId: "fsad", Mtime: 353},
+ },
+ Offset: 0,
+ Size: 300,
+ Expected: []*ChunkView{
+ {Offset: 0, Size: 90, FileId: "abc", LogicOffset: 0},
+ {Offset: 0, Size: 100, FileId: "asdf", LogicOffset: 90},
+ {Offset: 0, Size: 110, FileId: "fsad", LogicOffset: 190},
+ },
+ },
+ // case 9: edge cases
+ {
+ Chunks: []*filer_pb.FileChunk{
+ {Offset: 0, Size: 43175947, FileId: "2,111fc2cbfac1", Mtime: 1},
+ {Offset: 43175936, Size: 52981771 - 43175936, FileId: "2,112a36ea7f85", Mtime: 2},
+ {Offset: 52981760, Size: 72564747 - 52981760, FileId: "4,112d5f31c5e7", Mtime: 3},
+ {Offset: 72564736, Size: 133255179 - 72564736, FileId: "1,113245f0cdb6", Mtime: 4},
+ {Offset: 133255168, Size: 137269259 - 133255168, FileId: "3,1141a70733b5", Mtime: 5},
+ {Offset: 137269248, Size: 153578836 - 137269248, FileId: "1,114201d5bbdb", Mtime: 6},
+ },
+ Offset: 0,
+ Size: 153578836,
+ Expected: []*ChunkView{
+ {Offset: 0, Size: 43175936, FileId: "2,111fc2cbfac1", LogicOffset: 0},
+ {Offset: 0, Size: 52981760 - 43175936, FileId: "2,112a36ea7f85", LogicOffset: 43175936},
+ {Offset: 0, Size: 72564736 - 52981760, FileId: "4,112d5f31c5e7", LogicOffset: 52981760},
+ {Offset: 0, Size: 133255168 - 72564736, FileId: "1,113245f0cdb6", LogicOffset: 72564736},
+ {Offset: 0, Size: 137269248 - 133255168, FileId: "3,1141a70733b5", LogicOffset: 133255168},
+ {Offset: 0, Size: 153578836 - 137269248, FileId: "1,114201d5bbdb", LogicOffset: 137269248},
+ },
+ },
}
for i, testcase := range testcases {
diff --git a/weed/filer2/filer.go b/weed/filer2/filer.go
index 1ee2f5ede..06c997f36 100644
--- a/weed/filer2/filer.go
+++ b/weed/filer2/filer.go
@@ -3,35 +3,53 @@ package filer2
import (
"context"
"fmt"
- "math"
"os"
- "path/filepath"
"strings"
"time"
+ "google.golang.org/grpc"
+
+ "github.com/karlseguin/ccache"
+
"github.com/chrislusf/seaweedfs/weed/glog"
+ "github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
+ "github.com/chrislusf/seaweedfs/weed/util"
+ "github.com/chrislusf/seaweedfs/weed/util/log_buffer"
"github.com/chrislusf/seaweedfs/weed/wdclient"
- "github.com/karlseguin/ccache"
)
+const PaginationSize = 1024 * 256
+
var (
OS_UID = uint32(os.Getuid())
OS_GID = uint32(os.Getgid())
)
type Filer struct {
- store FilerStore
- directoryCache *ccache.Cache
- MasterClient *wdclient.MasterClient
- fileIdDeletionChan chan string
+ store *FilerStoreWrapper
+ directoryCache *ccache.Cache
+ MasterClient *wdclient.MasterClient
+ fileIdDeletionQueue *util.UnboundedQueue
+ GrpcDialOption grpc.DialOption
+ DirBucketsPath string
+ FsyncBuckets []string
+ buckets *FilerBuckets
+ Cipher bool
+ LocalMetaLogBuffer *log_buffer.LogBuffer
+ metaLogCollection string
+ metaLogReplication string
}
-func NewFiler(masters []string) *Filer {
+func NewFiler(masters []string, grpcDialOption grpc.DialOption, filerHost string, filerGrpcPort uint32, collection string, replication string, notifyFn func()) *Filer {
f := &Filer{
- directoryCache: ccache.New(ccache.Configure().MaxSize(1000).ItemsToPrune(100)),
- MasterClient: wdclient.NewMasterClient(context.Background(), "filer", masters),
- fileIdDeletionChan: make(chan string, 4096),
+ directoryCache: ccache.New(ccache.Configure().MaxSize(1000).ItemsToPrune(100)),
+ MasterClient: wdclient.NewMasterClient(grpcDialOption, "filer", filerHost, filerGrpcPort, masters),
+ fileIdDeletionQueue: util.NewUnboundedQueue(),
+ GrpcDialOption: grpcDialOption,
}
+ f.LocalMetaLogBuffer = log_buffer.NewLogBuffer(time.Minute, f.logFlushFunc, notifyFn)
+ f.metaLogCollection = collection
+ f.metaLogReplication = replication
go f.loopProcessingDeletion()
@@ -39,7 +57,11 @@ func NewFiler(masters []string) *Filer {
}
func (f *Filer) SetStore(store FilerStore) {
- f.store = store
+ f.store = NewFilerStoreWrapper(store)
+}
+
+func (f *Filer) GetStore() (store FilerStore) {
+ return f.store
}
func (f *Filer) DisableDirectoryCache() {
@@ -54,7 +76,19 @@ func (fs *Filer) KeepConnectedToMaster() {
fs.MasterClient.KeepConnectedToMaster()
}
-func (f *Filer) CreateEntry(entry *Entry) error {
+func (f *Filer) BeginTransaction(ctx context.Context) (context.Context, error) {
+ return f.store.BeginTransaction(ctx)
+}
+
+func (f *Filer) CommitTransaction(ctx context.Context) error {
+ return f.store.CommitTransaction(ctx)
+}
+
+func (f *Filer) RollbackTransaction(ctx context.Context) error {
+ return f.store.RollbackTransaction(ctx)
+}
+
+func (f *Filer) CreateEntry(ctx context.Context, entry *Entry, o_excl bool, isFromOtherCluster bool) error {
if string(entry.FullPath) == "/" {
return nil
@@ -67,7 +101,7 @@ func (f *Filer) CreateEntry(entry *Entry) error {
var lastDirectoryEntry *Entry
for i := 1; i < len(dirParts); i++ {
- dirPath := "/" + filepath.Join(dirParts[:i]...)
+ dirPath := "/" + util.Join(dirParts[:i]...)
// fmt.Printf("%d directory: %+v\n", i, dirPath)
// first check local cache
@@ -76,9 +110,9 @@ func (f *Filer) CreateEntry(entry *Entry) error {
// not found, check the store directly
if dirEntry == nil {
glog.V(4).Infof("find uncached directory: %s", dirPath)
- dirEntry, _ = f.FindEntry(FullPath(dirPath))
+ dirEntry, _ = f.FindEntry(ctx, util.FullPath(dirPath))
} else {
- glog.V(4).Infof("found cached directory: %s", dirPath)
+ // glog.V(4).Infof("found cached directory: %s", dirPath)
}
// no such existing directory
@@ -88,27 +122,34 @@ func (f *Filer) CreateEntry(entry *Entry) error {
now := time.Now()
dirEntry = &Entry{
- FullPath: FullPath(dirPath),
+ FullPath: util.FullPath(dirPath),
Attr: Attr{
- Mtime: now,
- Crtime: now,
- Mode: os.ModeDir | 0770,
- Uid: entry.Uid,
- Gid: entry.Gid,
+ Mtime: now,
+ Crtime: now,
+ Mode: os.ModeDir | entry.Mode | 0110,
+ Uid: entry.Uid,
+ Gid: entry.Gid,
+ Collection: entry.Collection,
+ Replication: entry.Replication,
+ UserName: entry.UserName,
+ GroupNames: entry.GroupNames,
},
}
glog.V(2).Infof("create directory: %s %v", dirPath, dirEntry.Mode)
- mkdirErr := f.store.InsertEntry(dirEntry)
+ mkdirErr := f.store.InsertEntry(ctx, dirEntry)
if mkdirErr != nil {
- if _, err := f.FindEntry(FullPath(dirPath)); err == ErrNotFound {
+ if _, err := f.FindEntry(ctx, util.FullPath(dirPath)); err == filer_pb.ErrNotFound {
+ glog.V(3).Infof("mkdir %s: %v", dirPath, mkdirErr)
return fmt.Errorf("mkdir %s: %v", dirPath, mkdirErr)
}
} else {
- f.NotifyUpdateEvent(nil, dirEntry, false)
+ f.maybeAddBucket(dirEntry)
+ f.NotifyUpdateEvent(ctx, nil, dirEntry, false, isFromOtherCluster)
}
} else if !dirEntry.IsDirectory() {
+ glog.Errorf("CreateEntry %s: %s should be a directory", entry.FullPath, dirPath)
return fmt.Errorf("%s is a file", dirPath)
}
@@ -123,6 +164,7 @@ func (f *Filer) CreateEntry(entry *Entry) error {
}
if lastDirectoryEntry == nil {
+ glog.Errorf("CreateEntry %s: lastDirectoryEntry is nil", entry.FullPath)
return fmt.Errorf("parent folder not found: %v", entry.FullPath)
}
@@ -134,38 +176,50 @@ func (f *Filer) CreateEntry(entry *Entry) error {
}
*/
- oldEntry, _ := f.FindEntry(entry.FullPath)
+ oldEntry, _ := f.FindEntry(ctx, entry.FullPath)
+ glog.V(4).Infof("CreateEntry %s: old entry: %v exclusive:%v", entry.FullPath, oldEntry, o_excl)
if oldEntry == nil {
- if err := f.store.InsertEntry(entry); err != nil {
+ if err := f.store.InsertEntry(ctx, entry); err != nil {
+ glog.Errorf("insert entry %s: %v", entry.FullPath, err)
return fmt.Errorf("insert entry %s: %v", entry.FullPath, err)
}
} else {
- if err := f.UpdateEntry(oldEntry, entry); err != nil {
+ if o_excl {
+ glog.V(3).Infof("EEXIST: entry %s already exists", entry.FullPath)
+ return fmt.Errorf("EEXIST: entry %s already exists", entry.FullPath)
+ }
+ if err := f.UpdateEntry(ctx, oldEntry, entry); err != nil {
+ glog.Errorf("update entry %s: %v", entry.FullPath, err)
return fmt.Errorf("update entry %s: %v", entry.FullPath, err)
}
}
- f.NotifyUpdateEvent(oldEntry, entry, true)
+ f.maybeAddBucket(entry)
+ f.NotifyUpdateEvent(ctx, oldEntry, entry, true, isFromOtherCluster)
f.deleteChunksIfNotNew(oldEntry, entry)
+ glog.V(4).Infof("CreateEntry %s: created", entry.FullPath)
+
return nil
}
-func (f *Filer) UpdateEntry(oldEntry, entry *Entry) (err error) {
+func (f *Filer) UpdateEntry(ctx context.Context, oldEntry, entry *Entry) (err error) {
if oldEntry != nil {
if oldEntry.IsDirectory() && !entry.IsDirectory() {
+ glog.Errorf("existing %s is a directory", entry.FullPath)
return fmt.Errorf("existing %s is a directory", entry.FullPath)
}
if !oldEntry.IsDirectory() && entry.IsDirectory() {
+ glog.Errorf("existing %s is a file", entry.FullPath)
return fmt.Errorf("existing %s is a file", entry.FullPath)
}
}
- return f.store.UpdateEntry(entry)
+ return f.store.UpdateEntry(ctx, entry)
}
-func (f *Filer) FindEntry(p FullPath) (entry *Entry, err error) {
+func (f *Filer) FindEntry(ctx context.Context, p util.FullPath) (entry *Entry, err error) {
now := time.Now()
@@ -181,74 +235,59 @@ func (f *Filer) FindEntry(p FullPath) (entry *Entry, err error) {
},
}, nil
}
- return f.store.FindEntry(p)
-}
-
-func (f *Filer) DeleteEntryMetaAndData(p FullPath, isRecursive bool, shouldDeleteChunks bool) (err error) {
- entry, err := f.FindEntry(p)
- if err != nil {
- return err
- }
-
- if entry.IsDirectory() {
- limit := int(1)
- if isRecursive {
- limit = math.MaxInt32
- }
- lastFileName := ""
- includeLastFile := false
- for limit > 0 {
- entries, err := f.ListDirectoryEntries(p, lastFileName, includeLastFile, 1024)
- if err != nil {
- return fmt.Errorf("list folder %s: %v", p, err)
- }
- if len(entries) == 0 {
- break
- } else {
- if isRecursive {
- for _, sub := range entries {
- lastFileName = sub.Name()
- f.DeleteEntryMetaAndData(sub.FullPath, isRecursive, shouldDeleteChunks)
- limit--
- if limit <= 0 {
- break
- }
- }
- } else {
- if len(entries) > 0 {
- return fmt.Errorf("folder %s is not empty", p)
- }
- }
- f.cacheDelDirectory(string(p))
- if len(entries) < 1024 {
- break
- }
- }
+ entry, err = f.store.FindEntry(ctx, p)
+ if entry != nil && entry.TtlSec > 0 {
+ if entry.Crtime.Add(time.Duration(entry.TtlSec) * time.Second).Before(time.Now()) {
+ f.store.DeleteEntry(ctx, p.Child(entry.Name()))
+ return nil, filer_pb.ErrNotFound
}
}
+ return
- if shouldDeleteChunks {
- f.DeleteChunks(entry.Chunks)
- }
+}
- if p == "/" {
- return nil
+func (f *Filer) ListDirectoryEntries(ctx context.Context, p util.FullPath, startFileName string, inclusive bool, limit int) ([]*Entry, error) {
+ if strings.HasSuffix(string(p), "/") && len(p) > 1 {
+ p = p[0 : len(p)-1]
}
- glog.V(3).Infof("deleting entry %v", p)
- f.NotifyUpdateEvent(entry, nil, shouldDeleteChunks)
+ var makeupEntries []*Entry
+ entries, expiredCount, lastFileName, err := f.doListDirectoryEntries(ctx, p, startFileName, inclusive, limit)
+ for expiredCount > 0 && err == nil {
+ makeupEntries, expiredCount, lastFileName, err = f.doListDirectoryEntries(ctx, p, lastFileName, false, expiredCount)
+ if err == nil {
+ entries = append(entries, makeupEntries...)
+ }
+ }
- return f.store.DeleteEntry(p)
+ return entries, err
}
-func (f *Filer) ListDirectoryEntries(p FullPath, startFileName string, inclusive bool, limit int) ([]*Entry, error) {
- if strings.HasSuffix(string(p), "/") && len(p) > 1 {
- p = p[0 : len(p)-1]
+func (f *Filer) doListDirectoryEntries(ctx context.Context, p util.FullPath, startFileName string, inclusive bool, limit int) (entries []*Entry, expiredCount int, lastFileName string, err error) {
+ listedEntries, listErr := f.store.ListDirectoryEntries(ctx, p, startFileName, inclusive, limit)
+ if listErr != nil {
+ return listedEntries, expiredCount, "", listErr
}
- return f.store.ListDirectoryEntries(p, startFileName, inclusive, limit)
+ for _, entry := range listedEntries {
+ lastFileName = entry.Name()
+ if entry.TtlSec > 0 {
+ if entry.Crtime.Add(time.Duration(entry.TtlSec) * time.Second).Before(time.Now()) {
+ f.store.DeleteEntry(ctx, p.Child(entry.Name()))
+ expiredCount++
+ continue
+ }
+ }
+ entries = append(entries, entry)
+ }
+ return
}
func (f *Filer) cacheDelDirectory(dirpath string) {
+
+ if dirpath == "/" {
+ return
+ }
+
if f.directoryCache == nil {
return
}
@@ -257,6 +296,7 @@ func (f *Filer) cacheDelDirectory(dirpath string) {
}
func (f *Filer) cacheGetDirectory(dirpath string) *Entry {
+
if f.directoryCache == nil {
return nil
}
@@ -280,3 +320,8 @@ func (f *Filer) cacheSetDirectory(dirpath string, dirEntry *Entry, level int) {
f.directoryCache.Set(dirpath, dirEntry, time.Duration(minutes)*time.Minute)
}
+
+func (f *Filer) Shutdown() {
+ f.LocalMetaLogBuffer.Shutdown()
+ f.store.Shutdown()
+}
diff --git a/weed/filer2/filer_buckets.go b/weed/filer2/filer_buckets.go
new file mode 100644
index 000000000..7a57e7ee1
--- /dev/null
+++ b/weed/filer2/filer_buckets.go
@@ -0,0 +1,121 @@
+package filer2
+
+import (
+ "context"
+ "math"
+ "sync"
+
+ "github.com/chrislusf/seaweedfs/weed/glog"
+ "github.com/chrislusf/seaweedfs/weed/util"
+)
+
+type BucketName string
+type BucketOption struct {
+ Name BucketName
+ Replication string
+ fsync bool
+}
+type FilerBuckets struct {
+ dirBucketsPath string
+ buckets map[BucketName]*BucketOption
+ sync.RWMutex
+}
+
+func (f *Filer) LoadBuckets() {
+
+ f.buckets = &FilerBuckets{
+ buckets: make(map[BucketName]*BucketOption),
+ }
+
+ limit := math.MaxInt32
+
+ entries, err := f.ListDirectoryEntries(context.Background(), util.FullPath(f.DirBucketsPath), "", false, limit)
+
+ if err != nil {
+ glog.V(1).Infof("no buckets found: %v", err)
+ return
+ }
+
+ shouldFsyncMap := make(map[string]bool)
+ for _, bucket := range f.FsyncBuckets {
+ shouldFsyncMap[bucket] = true
+ }
+
+ glog.V(1).Infof("buckets found: %d", len(entries))
+
+ f.buckets.Lock()
+ for _, entry := range entries {
+ _, shouldFsnyc := shouldFsyncMap[entry.Name()]
+ f.buckets.buckets[BucketName(entry.Name())] = &BucketOption{
+ Name: BucketName(entry.Name()),
+ Replication: entry.Replication,
+ fsync: shouldFsnyc,
+ }
+ }
+ f.buckets.Unlock()
+
+}
+
+func (f *Filer) ReadBucketOption(buketName string) (replication string, fsync bool) {
+
+ f.buckets.RLock()
+ defer f.buckets.RUnlock()
+
+ option, found := f.buckets.buckets[BucketName(buketName)]
+
+ if !found {
+ return "", false
+ }
+ return option.Replication, option.fsync
+
+}
+
+func (f *Filer) isBucket(entry *Entry) bool {
+ if !entry.IsDirectory() {
+ return false
+ }
+ parent, dirName := entry.FullPath.DirAndName()
+ if parent != f.DirBucketsPath {
+ return false
+ }
+
+ f.buckets.RLock()
+ defer f.buckets.RUnlock()
+
+ _, found := f.buckets.buckets[BucketName(dirName)]
+
+ return found
+
+}
+
+func (f *Filer) maybeAddBucket(entry *Entry) {
+ if !entry.IsDirectory() {
+ return
+ }
+ parent, dirName := entry.FullPath.DirAndName()
+ if parent != f.DirBucketsPath {
+ return
+ }
+ f.addBucket(dirName, &BucketOption{
+ Name: BucketName(dirName),
+ Replication: entry.Replication,
+ })
+}
+
+func (f *Filer) addBucket(buketName string, bucketOption *BucketOption) {
+
+ f.buckets.Lock()
+ defer f.buckets.Unlock()
+
+ f.buckets.buckets[BucketName(buketName)] = bucketOption
+
+}
+
+func (f *Filer) deleteBucket(buketName string) {
+
+ f.buckets.Lock()
+ defer f.buckets.Unlock()
+
+ delete(f.buckets.buckets, BucketName(buketName))
+
+}
diff --git a/weed/filer2/filer_delete_entry.go b/weed/filer2/filer_delete_entry.go
new file mode 100644
index 000000000..f30e10d59
--- /dev/null
+++ b/weed/filer2/filer_delete_entry.go
@@ -0,0 +1,128 @@
+package filer2
+
+import (
+ "context"
+ "fmt"
+
+ "github.com/chrislusf/seaweedfs/weed/glog"
+ "github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
+ "github.com/chrislusf/seaweedfs/weed/pb/master_pb"
+ "github.com/chrislusf/seaweedfs/weed/util"
+)
+
+func (f *Filer) DeleteEntryMetaAndData(ctx context.Context, p util.FullPath, isRecursive, ignoreRecursiveError, shouldDeleteChunks, isFromOtherCluster bool) (err error) {
+ if p == "/" {
+ return nil
+ }
+
+ entry, findErr := f.FindEntry(ctx, p)
+ if findErr != nil {
+ return findErr
+ }
+
+ isCollection := f.isBucket(entry)
+
+ var chunks []*filer_pb.FileChunk
+ chunks = append(chunks, entry.Chunks...)
+ if entry.IsDirectory() {
+ // delete the folder children, not including the folder itself
+ var dirChunks []*filer_pb.FileChunk
+ dirChunks, err = f.doBatchDeleteFolderMetaAndData(ctx, entry, isRecursive, ignoreRecursiveError, shouldDeleteChunks && !isCollection, isFromOtherCluster)
+ if err != nil {
+ glog.V(0).Infof("delete directory %s: %v", p, err)
+ return fmt.Errorf("delete directory %s: %v", p, err)
+ }
+ chunks = append(chunks, dirChunks...)
+ }
+
+ // delete the file or folder
+ err = f.doDeleteEntryMetaAndData(ctx, entry, shouldDeleteChunks, isFromOtherCluster)
+ if err != nil {
+ return fmt.Errorf("delete file %s: %v", p, err)
+ }
+
+ if shouldDeleteChunks && !isCollection {
+ go f.DeleteChunks(chunks)
+ }
+ if isCollection {
+ collectionName := entry.Name()
+ f.doDeleteCollection(collectionName)
+ f.deleteBucket(collectionName)
+ }
+
+ return nil
+}
+
+func (f *Filer) doBatchDeleteFolderMetaAndData(ctx context.Context, entry *Entry, isRecursive, ignoreRecursiveError, shouldDeleteChunks, isFromOtherCluster bool) (chunks []*filer_pb.FileChunk, err error) {
+
+ lastFileName := ""
+ includeLastFile := false
+ for {
+ entries, err := f.ListDirectoryEntries(ctx, entry.FullPath, lastFileName, includeLastFile, PaginationSize)
+ if err != nil {
+ glog.Errorf("list folder %s: %v", entry.FullPath, err)
+ return nil, fmt.Errorf("list folder %s: %v", entry.FullPath, err)
+ }
+ if lastFileName == "" && !isRecursive && len(entries) > 0 {
+ // only for first iteration in the loop
+ return nil, fmt.Errorf("fail to delete non-empty folder: %s", entry.FullPath)
+ }
+
+ for _, sub := range entries {
+ lastFileName = sub.Name()
+ var dirChunks []*filer_pb.FileChunk
+ if sub.IsDirectory() {
+ dirChunks, err = f.doBatchDeleteFolderMetaAndData(ctx, sub, isRecursive, ignoreRecursiveError, shouldDeleteChunks, false)
+ f.cacheDelDirectory(string(sub.FullPath))
+ f.NotifyUpdateEvent(ctx, sub, nil, shouldDeleteChunks, isFromOtherCluster)
+ chunks = append(chunks, dirChunks...)
+ } else {
+ chunks = append(chunks, sub.Chunks...)
+ }
+ if err != nil && !ignoreRecursiveError {
+ return nil, err
+ }
+ }
+
+ if len(entries) < PaginationSize {
+ break
+ }
+ }
+
+ glog.V(3).Infof("deleting directory %v delete %d chunks: %v", entry.FullPath, len(chunks), shouldDeleteChunks)
+
+ if storeDeletionErr := f.store.DeleteFolderChildren(ctx, entry.FullPath); storeDeletionErr != nil {
+ return nil, fmt.Errorf("filer store delete: %v", storeDeletionErr)
+ }
+
+ return chunks, nil
+}
+
+func (f *Filer) doDeleteEntryMetaAndData(ctx context.Context, entry *Entry, shouldDeleteChunks bool, isFromOtherCluster bool) (err error) {
+
+ glog.V(3).Infof("deleting entry %v, delete chunks: %v", entry.FullPath, shouldDeleteChunks)
+
+ if storeDeletionErr := f.store.DeleteEntry(ctx, entry.FullPath); storeDeletionErr != nil {
+ return fmt.Errorf("filer store delete: %v", storeDeletionErr)
+ }
+ if entry.IsDirectory() {
+ f.cacheDelDirectory(string(entry.FullPath))
+ }
+ f.NotifyUpdateEvent(ctx, entry, nil, shouldDeleteChunks, isFromOtherCluster)
+
+ return nil
+}
+
+func (f *Filer) doDeleteCollection(collectionName string) (err error) {
+
+ return f.MasterClient.WithClient(func(client master_pb.SeaweedClient) error {
+ _, err := client.CollectionDelete(context.Background(), &master_pb.CollectionDeleteRequest{
+ Name: collectionName,
+ })
+ if err != nil {
+ glog.Infof("delete collection %s: %v", collectionName, err)
+ }
+ return err
+ })
+
+}
diff --git a/weed/filer2/filer_deletion.go b/weed/filer2/filer_deletion.go
index 8fe8ae04f..a6b229771 100644
--- a/weed/filer2/filer_deletion.go
+++ b/weed/filer2/filer_deletion.go
@@ -6,16 +6,14 @@ import (
"github.com/chrislusf/seaweedfs/weed/glog"
"github.com/chrislusf/seaweedfs/weed/operation"
"github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
+ "github.com/chrislusf/seaweedfs/weed/wdclient"
)
-func (f *Filer) loopProcessingDeletion() {
-
- ticker := time.NewTicker(5 * time.Second)
-
- lookupFunc := func(vids []string) (map[string]operation.LookupResult, error) {
+func LookupByMasterClientFn(masterClient *wdclient.MasterClient) func(vids []string) (map[string]operation.LookupResult, error) {
+ return func(vids []string) (map[string]operation.LookupResult, error) {
m := make(map[string]operation.LookupResult)
for _, vid := range vids {
- locs := f.MasterClient.GetVidLocations(vid)
+ locs, _ := masterClient.GetVidLocations(vid)
var locations []operation.Location
for _, loc := range locs {
locations = append(locations, operation.Location{
@@ -30,35 +28,56 @@ func (f *Filer) loopProcessingDeletion() {
}
return m, nil
}
+}
+
+func (f *Filer) loopProcessingDeletion() {
+
+ lookupFunc := LookupByMasterClientFn(f.MasterClient)
- var fileIds []string
+ DeletionBatchSize := 100000 // roughly 20 bytes cost per file id.
+
+ var deletionCount int
for {
- select {
- case fid := <-f.fileIdDeletionChan:
- fileIds = append(fileIds, fid)
- if len(fileIds) >= 4096 {
- glog.V(1).Infof("deleting fileIds len=%d", len(fileIds))
- operation.DeleteFilesWithLookupVolumeId(fileIds, lookupFunc)
- fileIds = fileIds[:0]
- }
- case <-ticker.C:
- if len(fileIds) > 0 {
- glog.V(1).Infof("timed deletion fileIds len=%d", len(fileIds))
- operation.DeleteFilesWithLookupVolumeId(fileIds, lookupFunc)
- fileIds = fileIds[:0]
+ deletionCount = 0
+ f.fileIdDeletionQueue.Consume(func(fileIds []string) {
+ for len(fileIds) > 0 {
+ var toDeleteFileIds []string
+ if len(fileIds) > DeletionBatchSize {
+ toDeleteFileIds = fileIds[:DeletionBatchSize]
+ fileIds = fileIds[DeletionBatchSize:]
+ } else {
+ toDeleteFileIds = fileIds
+ fileIds = fileIds[:0]
+ }
+ deletionCount = len(toDeleteFileIds)
+ deleteResults, err := operation.DeleteFilesWithLookupVolumeId(f.GrpcDialOption, toDeleteFileIds, lookupFunc)
+ if err != nil {
+ glog.V(0).Infof("deleting fileIds len=%d error: %v", deletionCount, err)
+ } else {
+ glog.V(1).Infof("deleting fileIds len=%d", deletionCount)
+ }
+ if len(deleteResults) != deletionCount {
+ glog.V(0).Infof("delete %d fileIds actual %d", deletionCount, len(deleteResults))
+ }
}
+ })
+
+ if deletionCount == 0 {
+ time.Sleep(1123 * time.Millisecond)
}
}
}
func (f *Filer) DeleteChunks(chunks []*filer_pb.FileChunk) {
for _, chunk := range chunks {
- f.fileIdDeletionChan <- chunk.FileId
+ f.fileIdDeletionQueue.EnQueue(chunk.GetFileIdString())
}
}
+// DeleteFileByFileId direct delete by file id.
+// Only used when the fileId is not being managed by snapshots.
func (f *Filer) DeleteFileByFileId(fileId string) {
- f.fileIdDeletionChan <- fileId
+ f.fileIdDeletionQueue.EnQueue(fileId)
}
func (f *Filer) deleteChunksIfNotNew(oldEntry, newEntry *Entry) {
@@ -71,16 +90,13 @@ func (f *Filer) deleteChunksIfNotNew(oldEntry, newEntry *Entry) {
}
var toDelete []*filer_pb.FileChunk
+ newChunkIds := make(map[string]bool)
+ for _, newChunk := range newEntry.Chunks {
+ newChunkIds[newChunk.GetFileIdString()] = true
+ }
for _, oldChunk := range oldEntry.Chunks {
- found := false
- for _, newChunk := range newEntry.Chunks {
- if oldChunk.FileId == newChunk.FileId {
- found = true
- break
- }
- }
- if !found {
+ if _, found := newChunkIds[oldChunk.GetFileIdString()]; !found {
toDelete = append(toDelete, oldChunk)
}
}
diff --git a/weed/filer2/filer_notify.go b/weed/filer2/filer_notify.go
index b3c215249..340f19fb5 100644
--- a/weed/filer2/filer_notify.go
+++ b/weed/filer2/filer_notify.go
@@ -1,33 +1,160 @@
package filer2
import (
+ "context"
+ "fmt"
+ "io"
+ "strings"
+ "time"
+
+ "github.com/golang/protobuf/proto"
+
"github.com/chrislusf/seaweedfs/weed/glog"
"github.com/chrislusf/seaweedfs/weed/notification"
"github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
+ "github.com/chrislusf/seaweedfs/weed/util"
)
-func (f *Filer) NotifyUpdateEvent(oldEntry, newEntry *Entry, deleteChunks bool) {
- var key string
+func (f *Filer) NotifyUpdateEvent(ctx context.Context, oldEntry, newEntry *Entry, deleteChunks, isFromOtherCluster bool) {
+ var fullpath string
if oldEntry != nil {
- key = string(oldEntry.FullPath)
+ fullpath = string(oldEntry.FullPath)
} else if newEntry != nil {
- key = string(newEntry.FullPath)
+ fullpath = string(newEntry.FullPath)
} else {
return
}
+ // println("fullpath:", fullpath)
+
+ if strings.HasPrefix(fullpath, SystemLogDir) {
+ return
+ }
+
+ newParentPath := ""
+ if newEntry != nil {
+ newParentPath, _ = newEntry.FullPath.DirAndName()
+ }
+ eventNotification := &filer_pb.EventNotification{
+ OldEntry: oldEntry.ToProtoEntry(),
+ NewEntry: newEntry.ToProtoEntry(),
+ DeleteChunks: deleteChunks,
+ NewParentPath: newParentPath,
+ IsFromOtherCluster: isFromOtherCluster,
+ }
+
if notification.Queue != nil {
+ glog.V(3).Infof("notifying entry update %v", fullpath)
+ notification.Queue.SendMessage(fullpath, eventNotification)
+ }
+
+ f.logMetaEvent(ctx, fullpath, eventNotification)
+
+}
- glog.V(3).Infof("notifying entry update %v", key)
+func (f *Filer) logMetaEvent(ctx context.Context, fullpath string, eventNotification *filer_pb.EventNotification) {
- notification.Queue.SendMessage(
- key,
- &filer_pb.EventNotification{
- OldEntry: oldEntry.ToProtoEntry(),
- NewEntry: newEntry.ToProtoEntry(),
- DeleteChunks: deleteChunks,
- },
- )
+ dir, _ := util.FullPath(fullpath).DirAndName()
+
+ event := &filer_pb.SubscribeMetadataResponse{
+ Directory: dir,
+ EventNotification: eventNotification,
+ TsNs: time.Now().UnixNano(),
+ }
+ data, err := proto.Marshal(event)
+ if err != nil {
+ glog.Errorf("failed to marshal filer_pb.SubscribeMetadataResponse %+v: %v", event, err)
+ return
+ }
+
+ f.LocalMetaLogBuffer.AddToBuffer([]byte(dir), data)
+
+}
+
+func (f *Filer) logFlushFunc(startTime, stopTime time.Time, buf []byte) {
+
+ targetFile := fmt.Sprintf("%s/%04d-%02d-%02d/%02d-%02d.segment", SystemLogDir,
+ startTime.Year(), startTime.Month(), startTime.Day(), startTime.Hour(), startTime.Minute(),
+ // startTime.Second(), startTime.Nanosecond(),
+ )
+
+ if err := f.appendToFile(targetFile, buf); err != nil {
+ glog.V(0).Infof("log write failed %s: %v", targetFile, err)
+ }
+}
+
+func (f *Filer) ReadPersistedLogBuffer(startTime time.Time, eachLogEntryFn func(logEntry *filer_pb.LogEntry) error) (lastTsNs int64, err error) {
+
+ startDate := fmt.Sprintf("%04d-%02d-%02d", startTime.Year(), startTime.Month(), startTime.Day())
+ startHourMinute := fmt.Sprintf("%02d-%02d.segment", startTime.Hour(), startTime.Minute())
+
+ sizeBuf := make([]byte, 4)
+ startTsNs := startTime.UnixNano()
+
+ dayEntries, listDayErr := f.ListDirectoryEntries(context.Background(), SystemLogDir, startDate, true, 366)
+ if listDayErr != nil {
+ return lastTsNs, fmt.Errorf("fail to list log by day: %v", listDayErr)
+ }
+ for _, dayEntry := range dayEntries {
+ // println("checking day", dayEntry.FullPath)
+ hourMinuteEntries, listHourMinuteErr := f.ListDirectoryEntries(context.Background(), util.NewFullPath(SystemLogDir, dayEntry.Name()), "", false, 24*60)
+ if listHourMinuteErr != nil {
+ return lastTsNs, fmt.Errorf("fail to list log %s by day: %v", dayEntry.Name(), listHourMinuteErr)
+ }
+ for _, hourMinuteEntry := range hourMinuteEntries {
+ // println("checking hh-mm", hourMinuteEntry.FullPath)
+ if dayEntry.Name() == startDate {
+ if strings.Compare(hourMinuteEntry.Name(), startHourMinute) < 0 {
+ continue
+ }
+ }
+ // println("processing", hourMinuteEntry.FullPath)
+ chunkedFileReader := NewChunkStreamReaderFromFiler(f.MasterClient, hourMinuteEntry.Chunks)
+ if lastTsNs, err = ReadEachLogEntry(chunkedFileReader, sizeBuf, startTsNs, eachLogEntryFn); err != nil {
+ chunkedFileReader.Close()
+ if err == io.EOF {
+ break
+ }
+ return lastTsNs, fmt.Errorf("reading %s: %v", hourMinuteEntry.FullPath, err)
+ }
+ chunkedFileReader.Close()
+ }
+ }
+
+ return lastTsNs, nil
+}
+func ReadEachLogEntry(r io.Reader, sizeBuf []byte, ns int64, eachLogEntryFn func(logEntry *filer_pb.LogEntry) error) (lastTsNs int64, err error) {
+ for {
+ n, err := r.Read(sizeBuf)
+ if err != nil {
+ return lastTsNs, err
+ }
+ if n != 4 {
+ return lastTsNs, fmt.Errorf("size %d bytes, expected 4 bytes", n)
+ }
+ size := util.BytesToUint32(sizeBuf)
+ // println("entry size", size)
+ entryData := make([]byte, size)
+ n, err = r.Read(entryData)
+ if err != nil {
+ return lastTsNs, err
+ }
+ if n != int(size) {
+ return lastTsNs, fmt.Errorf("entry data %d bytes, expected %d bytes", n, size)
+ }
+ logEntry := &filer_pb.LogEntry{}
+ if err = proto.Unmarshal(entryData, logEntry); err != nil {
+ return lastTsNs, err
+ }
+ if logEntry.TsNs <= ns {
+ return lastTsNs, nil
+ }
+ // println("each log: ", logEntry.TsNs)
+ if err := eachLogEntryFn(logEntry); err != nil {
+ return lastTsNs, err
+ } else {
+ lastTsNs = logEntry.TsNs
+ }
}
}
diff --git a/weed/filer2/filer_notify_append.go b/weed/filer2/filer_notify_append.go
new file mode 100644
index 000000000..61bbc9c45
--- /dev/null
+++ b/weed/filer2/filer_notify_append.go
@@ -0,0 +1,73 @@
+package filer2
+
+import (
+ "context"
+ "fmt"
+ "os"
+ "time"
+
+ "github.com/chrislusf/seaweedfs/weed/operation"
+ "github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
+ "github.com/chrislusf/seaweedfs/weed/util"
+)
+
+func (f *Filer) appendToFile(targetFile string, data []byte) error {
+
+ assignResult, uploadResult, err2 := f.assignAndUpload(data)
+ if err2 != nil {
+ return err2
+ }
+
+ // find out existing entry
+ fullpath := util.FullPath(targetFile)
+ entry, err := f.FindEntry(context.Background(), fullpath)
+ var offset int64 = 0
+ if err == filer_pb.ErrNotFound {
+ entry = &Entry{
+ FullPath: fullpath,
+ Attr: Attr{
+ Crtime: time.Now(),
+ Mtime: time.Now(),
+ Mode: os.FileMode(0644),
+ Uid: OS_UID,
+ Gid: OS_GID,
+ },
+ }
+ } else {
+ offset = int64(TotalSize(entry.Chunks))
+ }
+
+ // append to existing chunks
+ entry.Chunks = append(entry.Chunks, uploadResult.ToPbFileChunk(assignResult.Fid, offset))
+
+ // update the entry
+ err = f.CreateEntry(context.Background(), entry, false, false)
+
+ return err
+}
+
+func (f *Filer) assignAndUpload(data []byte) (*operation.AssignResult, *operation.UploadResult, error) {
+ // assign a volume location
+ assignRequest := &operation.VolumeAssignRequest{
+ Count: 1,
+ Collection: f.metaLogCollection,
+ Replication: f.metaLogReplication,
+ WritableVolumeCount: 1,
+ }
+ assignResult, err := operation.Assign(f.GetMaster(), f.GrpcDialOption, assignRequest)
+ if err != nil {
+ return nil, nil, fmt.Errorf("AssignVolume: %v", err)
+ }
+ if assignResult.Error != "" {
+ return nil, nil, fmt.Errorf("AssignVolume error: %v", assignResult.Error)
+ }
+
+ // upload data
+ targetUrl := "http://" + assignResult.Url + "/" + assignResult.Fid
+ uploadResult, err := operation.UploadData(targetUrl, "", f.Cipher, data, false, "", nil, assignResult.Auth)
+ if err != nil {
+ return nil, nil, fmt.Errorf("upload data %s: %v", targetUrl, err)
+ }
+ // println("uploaded to", targetUrl)
+ return assignResult, uploadResult, nil
+}
diff --git a/weed/filer2/filer_notify_test.go b/weed/filer2/filer_notify_test.go
index b74e2ad35..29170bfdf 100644
--- a/weed/filer2/filer_notify_test.go
+++ b/weed/filer2/filer_notify_test.go
@@ -5,13 +5,15 @@ import (
"time"
"github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
+ "github.com/chrislusf/seaweedfs/weed/util"
+
"github.com/golang/protobuf/proto"
)
func TestProtoMarshalText(t *testing.T) {
oldEntry := &Entry{
- FullPath: FullPath("/this/path/to"),
+ FullPath: util.FullPath("/this/path/to"),
Attr: Attr{
Mtime: time.Now(),
Mode: 0644,
diff --git a/weed/filer2/filerstore.go b/weed/filer2/filerstore.go
index 9ef1d9d48..f36c74f14 100644
--- a/weed/filer2/filerstore.go
+++ b/weed/filer2/filerstore.go
@@ -1,7 +1,11 @@
package filer2
import (
- "errors"
+ "context"
+ "time"
+
+ "github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
+ "github.com/chrislusf/seaweedfs/weed/stats"
"github.com/chrislusf/seaweedfs/weed/util"
)
@@ -9,13 +13,129 @@ type FilerStore interface {
// GetName gets the name to locate the configuration in filer.toml file
GetName() string
// Initialize initializes the file store
- Initialize(configuration util.Configuration) error
- InsertEntry(*Entry) error
- UpdateEntry(*Entry) (err error)
+ Initialize(configuration util.Configuration, prefix string) error
+ InsertEntry(context.Context, *Entry) error
+ UpdateEntry(context.Context, *Entry) (err error)
// err == filer2.ErrNotFound if not found
- FindEntry(FullPath) (entry *Entry, err error)
- DeleteEntry(FullPath) (err error)
- ListDirectoryEntries(dirPath FullPath, startFileName string, includeStartFile bool, limit int) ([]*Entry, error)
+ FindEntry(context.Context, util.FullPath) (entry *Entry, err error)
+ DeleteEntry(context.Context, util.FullPath) (err error)
+ DeleteFolderChildren(context.Context, util.FullPath) (err error)
+ ListDirectoryEntries(ctx context.Context, dirPath util.FullPath, startFileName string, includeStartFile bool, limit int) ([]*Entry, error)
+
+ BeginTransaction(ctx context.Context) (context.Context, error)
+ CommitTransaction(ctx context.Context) error
+ RollbackTransaction(ctx context.Context) error
+
+ Shutdown()
+}
+
+type FilerStoreWrapper struct {
+ actualStore FilerStore
+}
+
+func NewFilerStoreWrapper(store FilerStore) *FilerStoreWrapper {
+ if innerStore, ok := store.(*FilerStoreWrapper); ok {
+ return innerStore
+ }
+ return &FilerStoreWrapper{
+ actualStore: store,
+ }
+}
+
+func (fsw *FilerStoreWrapper) GetName() string {
+ return fsw.actualStore.GetName()
+}
+
+func (fsw *FilerStoreWrapper) Initialize(configuration util.Configuration, prefix string) error {
+ return fsw.actualStore.Initialize(configuration, prefix)
+}
+
+func (fsw *FilerStoreWrapper) InsertEntry(ctx context.Context, entry *Entry) error {
+ stats.FilerStoreCounter.WithLabelValues(fsw.actualStore.GetName(), "insert").Inc()
+ start := time.Now()
+ defer func() {
+ stats.FilerStoreHistogram.WithLabelValues(fsw.actualStore.GetName(), "insert").Observe(time.Since(start).Seconds())
+ }()
+
+ filer_pb.BeforeEntrySerialization(entry.Chunks)
+ return fsw.actualStore.InsertEntry(ctx, entry)
+}
+
+func (fsw *FilerStoreWrapper) UpdateEntry(ctx context.Context, entry *Entry) error {
+ stats.FilerStoreCounter.WithLabelValues(fsw.actualStore.GetName(), "update").Inc()
+ start := time.Now()
+ defer func() {
+ stats.FilerStoreHistogram.WithLabelValues(fsw.actualStore.GetName(), "update").Observe(time.Since(start).Seconds())
+ }()
+
+ filer_pb.BeforeEntrySerialization(entry.Chunks)
+ return fsw.actualStore.UpdateEntry(ctx, entry)
+}
+
+func (fsw *FilerStoreWrapper) FindEntry(ctx context.Context, fp util.FullPath) (entry *Entry, err error) {
+ stats.FilerStoreCounter.WithLabelValues(fsw.actualStore.GetName(), "find").Inc()
+ start := time.Now()
+ defer func() {
+ stats.FilerStoreHistogram.WithLabelValues(fsw.actualStore.GetName(), "find").Observe(time.Since(start).Seconds())
+ }()
+
+ entry, err = fsw.actualStore.FindEntry(ctx, fp)
+ if err != nil {
+ return nil, err
+ }
+ filer_pb.AfterEntryDeserialization(entry.Chunks)
+ return
+}
+
+func (fsw *FilerStoreWrapper) DeleteEntry(ctx context.Context, fp util.FullPath) (err error) {
+ stats.FilerStoreCounter.WithLabelValues(fsw.actualStore.GetName(), "delete").Inc()
+ start := time.Now()
+ defer func() {
+ stats.FilerStoreHistogram.WithLabelValues(fsw.actualStore.GetName(), "delete").Observe(time.Since(start).Seconds())
+ }()
+
+ return fsw.actualStore.DeleteEntry(ctx, fp)
+}
+
+func (fsw *FilerStoreWrapper) DeleteFolderChildren(ctx context.Context, fp util.FullPath) (err error) {
+ stats.FilerStoreCounter.WithLabelValues(fsw.actualStore.GetName(), "deleteFolderChildren").Inc()
+ start := time.Now()
+ defer func() {
+ stats.FilerStoreHistogram.WithLabelValues(fsw.actualStore.GetName(), "deleteFolderChildren").Observe(time.Since(start).Seconds())
+ }()
+
+ return fsw.actualStore.DeleteFolderChildren(ctx, fp)
+}
+
+func (fsw *FilerStoreWrapper) ListDirectoryEntries(ctx context.Context, dirPath util.FullPath, startFileName string, includeStartFile bool, limit int) ([]*Entry, error) {
+ stats.FilerStoreCounter.WithLabelValues(fsw.actualStore.GetName(), "list").Inc()
+ start := time.Now()
+ defer func() {
+ stats.FilerStoreHistogram.WithLabelValues(fsw.actualStore.GetName(), "list").Observe(time.Since(start).Seconds())
+ }()
+
+ entries, err := fsw.actualStore.ListDirectoryEntries(ctx, dirPath, startFileName, includeStartFile, limit)
+ if err != nil {
+ return nil, err
+ }
+ for _, entry := range entries {
+ filer_pb.AfterEntryDeserialization(entry.Chunks)
+ }
+ return entries, err
}
-var ErrNotFound = errors.New("filer: no entry is found in filer store")
+func (fsw *FilerStoreWrapper) BeginTransaction(ctx context.Context) (context.Context, error) {
+ return fsw.actualStore.BeginTransaction(ctx)
+}
+
+func (fsw *FilerStoreWrapper) CommitTransaction(ctx context.Context) error {
+ return fsw.actualStore.CommitTransaction(ctx)
+}
+
+func (fsw *FilerStoreWrapper) RollbackTransaction(ctx context.Context) error {
+ return fsw.actualStore.RollbackTransaction(ctx)
+}
+
+func (fsw *FilerStoreWrapper) Shutdown() {
+ fsw.actualStore.Shutdown()
+}
diff --git a/weed/filer2/fullpath.go b/weed/filer2/fullpath.go
deleted file mode 100644
index be6e34431..000000000
--- a/weed/filer2/fullpath.go
+++ /dev/null
@@ -1,31 +0,0 @@
-package filer2
-
-import (
- "path/filepath"
- "strings"
-)
-
-type FullPath string
-
-func NewFullPath(dir, name string) FullPath {
- if strings.HasSuffix(dir, "/") {
- return FullPath(dir + name)
- }
- return FullPath(dir + "/" + name)
-}
-
-func (fp FullPath) DirAndName() (string, string) {
- dir, name := filepath.Split(string(fp))
- if dir == "/" {
- return dir, name
- }
- if len(dir) < 1 {
- return "/", ""
- }
- return dir[:len(dir)-1], name
-}
-
-func (fp FullPath) Name() string {
- _, name := filepath.Split(string(fp))
- return name
-}
diff --git a/weed/filer2/leveldb/leveldb_store.go b/weed/filer2/leveldb/leveldb_store.go
index 179107e2c..31919ca49 100644
--- a/weed/filer2/leveldb/leveldb_store.go
+++ b/weed/filer2/leveldb/leveldb_store.go
@@ -2,13 +2,18 @@ package leveldb
import (
"bytes"
+ "context"
"fmt"
+ "github.com/syndtr/goleveldb/leveldb"
+ "github.com/syndtr/goleveldb/leveldb/errors"
+ "github.com/syndtr/goleveldb/leveldb/opt"
+ leveldb_util "github.com/syndtr/goleveldb/leveldb/util"
+
"github.com/chrislusf/seaweedfs/weed/filer2"
"github.com/chrislusf/seaweedfs/weed/glog"
+ "github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
weed_util "github.com/chrislusf/seaweedfs/weed/util"
- "github.com/syndtr/goleveldb/leveldb"
- leveldb_util "github.com/syndtr/goleveldb/leveldb/util"
)
const (
@@ -27,8 +32,8 @@ func (store *LevelDBStore) GetName() string {
return "leveldb"
}
-func (store *LevelDBStore) Initialize(configuration weed_util.Configuration) (err error) {
- dir := configuration.GetString("dir")
+func (store *LevelDBStore) Initialize(configuration weed_util.Configuration, prefix string) (err error) {
+ dir := configuration.GetString(prefix + "dir")
return store.initialize(dir)
}
@@ -38,14 +43,35 @@ func (store *LevelDBStore) initialize(dir string) (err error) {
return fmt.Errorf("Check Level Folder %s Writable: %s", dir, err)
}
- if store.db, err = leveldb.OpenFile(dir, nil); err != nil {
- glog.Infof("filer store open dir %s: %v", dir, err)
- return
+ opts := &opt.Options{
+ BlockCacheCapacity: 32 * 1024 * 1024, // default value is 8MiB
+ WriteBuffer: 16 * 1024 * 1024, // default value is 4MiB
+ CompactionTableSizeMultiplier: 10,
+ }
+
+ if store.db, err = leveldb.OpenFile(dir, opts); err != nil {
+ if errors.IsCorrupted(err) {
+ store.db, err = leveldb.RecoverFile(dir, opts)
+ }
+ if err != nil {
+ glog.Infof("filer store open dir %s: %v", dir, err)
+ return
+ }
}
return
}
-func (store *LevelDBStore) InsertEntry(entry *filer2.Entry) (err error) {
+func (store *LevelDBStore) BeginTransaction(ctx context.Context) (context.Context, error) {
+ return ctx, nil
+}
+func (store *LevelDBStore) CommitTransaction(ctx context.Context) error {
+ return nil
+}
+func (store *LevelDBStore) RollbackTransaction(ctx context.Context) error {
+ return nil
+}
+
+func (store *LevelDBStore) InsertEntry(ctx context.Context, entry *filer2.Entry) (err error) {
key := genKey(entry.DirAndName())
value, err := entry.EncodeAttributesAndChunks()
@@ -64,18 +90,18 @@ func (store *LevelDBStore) InsertEntry(entry *filer2.Entry) (err error) {
return nil
}
-func (store *LevelDBStore) UpdateEntry(entry *filer2.Entry) (err error) {
+func (store *LevelDBStore) UpdateEntry(ctx context.Context, entry *filer2.Entry) (err error) {
- return store.InsertEntry(entry)
+ return store.InsertEntry(ctx, entry)
}
-func (store *LevelDBStore) FindEntry(fullpath filer2.FullPath) (entry *filer2.Entry, err error) {
+func (store *LevelDBStore) FindEntry(ctx context.Context, fullpath weed_util.FullPath) (entry *filer2.Entry, err error) {
key := genKey(fullpath.DirAndName())
data, err := store.db.Get(key, nil)
if err == leveldb.ErrNotFound {
- return nil, filer2.ErrNotFound
+ return nil, filer_pb.ErrNotFound
}
if err != nil {
return nil, fmt.Errorf("get %s : %v", entry.FullPath, err)
@@ -94,7 +120,7 @@ func (store *LevelDBStore) FindEntry(fullpath filer2.FullPath) (entry *filer2.En
return entry, nil
}
-func (store *LevelDBStore) DeleteEntry(fullpath filer2.FullPath) (err error) {
+func (store *LevelDBStore) DeleteEntry(ctx context.Context, fullpath weed_util.FullPath) (err error) {
key := genKey(fullpath.DirAndName())
err = store.db.Delete(key, nil)
@@ -105,7 +131,35 @@ func (store *LevelDBStore) DeleteEntry(fullpath filer2.FullPath) (err error) {
return nil
}
-func (store *LevelDBStore) ListDirectoryEntries(fullpath filer2.FullPath, startFileName string, inclusive bool,
+func (store *LevelDBStore) DeleteFolderChildren(ctx context.Context, fullpath weed_util.FullPath) (err error) {
+
+ batch := new(leveldb.Batch)
+
+ directoryPrefix := genDirectoryKeyPrefix(fullpath, "")
+ iter := store.db.NewIterator(&leveldb_util.Range{Start: directoryPrefix}, nil)
+ for iter.Next() {
+ key := iter.Key()
+ if !bytes.HasPrefix(key, directoryPrefix) {
+ break
+ }
+ fileName := getNameFromKey(key)
+ if fileName == "" {
+ continue
+ }
+ batch.Delete([]byte(genKey(string(fullpath), fileName)))
+ }
+ iter.Release()
+
+ err = store.db.Write(batch, nil)
+
+ if err != nil {
+ return fmt.Errorf("delete %s : %v", fullpath, err)
+ }
+
+ return nil
+}
+
+func (store *LevelDBStore) ListDirectoryEntries(ctx context.Context, fullpath weed_util.FullPath, startFileName string, inclusive bool,
limit int) (entries []*filer2.Entry, err error) {
directoryPrefix := genDirectoryKeyPrefix(fullpath, "")
@@ -128,7 +182,7 @@ func (store *LevelDBStore) ListDirectoryEntries(fullpath filer2.FullPath, startF
break
}
entry := &filer2.Entry{
- FullPath: filer2.NewFullPath(string(fullpath), fileName),
+ FullPath: weed_util.NewFullPath(string(fullpath), fileName),
}
if decodeErr := entry.DecodeAttributesAndChunks(iter.Value()); decodeErr != nil {
err = decodeErr
@@ -149,7 +203,7 @@ func genKey(dirPath, fileName string) (key []byte) {
return key
}
-func genDirectoryKeyPrefix(fullpath filer2.FullPath, startFileName string) (keyPrefix []byte) {
+func genDirectoryKeyPrefix(fullpath weed_util.FullPath, startFileName string) (keyPrefix []byte) {
keyPrefix = []byte(string(fullpath))
keyPrefix = append(keyPrefix, DIR_FILE_SEPARATOR)
if len(startFileName) > 0 {
@@ -168,3 +222,7 @@ func getNameFromKey(key []byte) string {
return string(key[sepIndex+1:])
}
+
+func (store *LevelDBStore) Shutdown() {
+ store.db.Close()
+}
diff --git a/weed/filer2/leveldb/leveldb_store_test.go b/weed/filer2/leveldb/leveldb_store_test.go
index 5b214558f..77df07a9b 100644
--- a/weed/filer2/leveldb/leveldb_store_test.go
+++ b/weed/filer2/leveldb/leveldb_store_test.go
@@ -1,14 +1,17 @@
package leveldb
import (
- "github.com/chrislusf/seaweedfs/weed/filer2"
+ "context"
"io/ioutil"
"os"
"testing"
+
+ "github.com/chrislusf/seaweedfs/weed/filer2"
+ "github.com/chrislusf/seaweedfs/weed/util"
)
func TestCreateAndFind(t *testing.T) {
- filer := filer2.NewFiler(nil)
+ filer := filer2.NewFiler(nil, nil, "", 0, "", "", nil)
dir, _ := ioutil.TempDir("", "seaweedfs_filer_test")
defer os.RemoveAll(dir)
store := &LevelDBStore{}
@@ -16,7 +19,9 @@ func TestCreateAndFind(t *testing.T) {
filer.SetStore(store)
filer.DisableDirectoryCache()
- fullpath := filer2.FullPath("/home/chris/this/is/one/file1.jpg")
+ fullpath := util.FullPath("/home/chris/this/is/one/file1.jpg")
+
+ ctx := context.Background()
entry1 := &filer2.Entry{
FullPath: fullpath,
@@ -27,12 +32,12 @@ func TestCreateAndFind(t *testing.T) {
},
}
- if err := filer.CreateEntry(entry1); err != nil {
+ if err := filer.CreateEntry(ctx, entry1, false, false); err != nil {
t.Errorf("create entry %v: %v", entry1.FullPath, err)
return
}
- entry, err := filer.FindEntry(fullpath)
+ entry, err := filer.FindEntry(ctx, fullpath)
if err != nil {
t.Errorf("find entry: %v", err)
@@ -45,14 +50,14 @@ func TestCreateAndFind(t *testing.T) {
}
// checking one upper directory
- entries, _ := filer.ListDirectoryEntries(filer2.FullPath("/home/chris/this/is/one"), "", false, 100)
+ entries, _ := filer.ListDirectoryEntries(ctx, util.FullPath("/home/chris/this/is/one"), "", false, 100)
if len(entries) != 1 {
t.Errorf("list entries count: %v", len(entries))
return
}
// checking one upper directory
- entries, _ = filer.ListDirectoryEntries(filer2.FullPath("/"), "", false, 100)
+ entries, _ = filer.ListDirectoryEntries(ctx, util.FullPath("/"), "", false, 100)
if len(entries) != 1 {
t.Errorf("list entries count: %v", len(entries))
return
@@ -61,7 +66,7 @@ func TestCreateAndFind(t *testing.T) {
}
func TestEmptyRoot(t *testing.T) {
- filer := filer2.NewFiler(nil)
+ filer := filer2.NewFiler(nil, nil, "", 0, "", "", nil)
dir, _ := ioutil.TempDir("", "seaweedfs_filer_test2")
defer os.RemoveAll(dir)
store := &LevelDBStore{}
@@ -69,8 +74,10 @@ func TestEmptyRoot(t *testing.T) {
filer.SetStore(store)
filer.DisableDirectoryCache()
+ ctx := context.Background()
+
// checking one upper directory
- entries, err := filer.ListDirectoryEntries(filer2.FullPath("/"), "", false, 100)
+ entries, err := filer.ListDirectoryEntries(ctx, util.FullPath("/"), "", false, 100)
if err != nil {
t.Errorf("list entries: %v", err)
return
diff --git a/weed/filer2/leveldb2/leveldb2_store.go b/weed/filer2/leveldb2/leveldb2_store.go
new file mode 100644
index 000000000..c907e8746
--- /dev/null
+++ b/weed/filer2/leveldb2/leveldb2_store.go
@@ -0,0 +1,248 @@
+package leveldb
+
+import (
+ "bytes"
+ "context"
+ "crypto/md5"
+ "fmt"
+ "io"
+ "os"
+
+ "github.com/syndtr/goleveldb/leveldb"
+ "github.com/syndtr/goleveldb/leveldb/errors"
+ "github.com/syndtr/goleveldb/leveldb/opt"
+ leveldb_util "github.com/syndtr/goleveldb/leveldb/util"
+
+ "github.com/chrislusf/seaweedfs/weed/filer2"
+ "github.com/chrislusf/seaweedfs/weed/glog"
+ "github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
+ weed_util "github.com/chrislusf/seaweedfs/weed/util"
+)
+
+func init() {
+ filer2.Stores = append(filer2.Stores, &LevelDB2Store{})
+}
+
+type LevelDB2Store struct {
+ dbs []*leveldb.DB
+ dbCount int
+}
+
+func (store *LevelDB2Store) GetName() string {
+ return "leveldb2"
+}
+
+func (store *LevelDB2Store) Initialize(configuration weed_util.Configuration, prefix string) (err error) {
+ dir := configuration.GetString(prefix + "dir")
+ return store.initialize(dir, 8)
+}
+
+func (store *LevelDB2Store) initialize(dir string, dbCount int) (err error) {
+ glog.Infof("filer store leveldb2 dir: %s", dir)
+ if err := weed_util.TestFolderWritable(dir); err != nil {
+ return fmt.Errorf("Check Level Folder %s Writable: %s", dir, err)
+ }
+
+ opts := &opt.Options{
+ BlockCacheCapacity: 32 * 1024 * 1024, // default value is 8MiB
+ WriteBuffer: 16 * 1024 * 1024, // default value is 4MiB
+ CompactionTableSizeMultiplier: 4,
+ }
+
+ for d := 0; d < dbCount; d++ {
+ dbFolder := fmt.Sprintf("%s/%02d", dir, d)
+ os.MkdirAll(dbFolder, 0755)
+ db, dbErr := leveldb.OpenFile(dbFolder, opts)
+ if errors.IsCorrupted(dbErr) {
+ db, dbErr = leveldb.RecoverFile(dbFolder, opts)
+ }
+ if dbErr != nil {
+ glog.Errorf("filer store open dir %s: %v", dbFolder, dbErr)
+ return dbErr
+ }
+ store.dbs = append(store.dbs, db)
+ }
+ store.dbCount = dbCount
+
+ return
+}
+
+func (store *LevelDB2Store) BeginTransaction(ctx context.Context) (context.Context, error) {
+ return ctx, nil
+}
+func (store *LevelDB2Store) CommitTransaction(ctx context.Context) error {
+ return nil
+}
+func (store *LevelDB2Store) RollbackTransaction(ctx context.Context) error {
+ return nil
+}
+
+func (store *LevelDB2Store) InsertEntry(ctx context.Context, entry *filer2.Entry) (err error) {
+ dir, name := entry.DirAndName()
+ key, partitionId := genKey(dir, name, store.dbCount)
+
+ value, err := entry.EncodeAttributesAndChunks()
+ if err != nil {
+ return fmt.Errorf("encoding %s %+v: %v", entry.FullPath, entry.Attr, err)
+ }
+
+ err = store.dbs[partitionId].Put(key, value, nil)
+
+ if err != nil {
+ return fmt.Errorf("persisting %s : %v", entry.FullPath, err)
+ }
+
+ // println("saved", entry.FullPath, "chunks", len(entry.Chunks))
+
+ return nil
+}
+
+func (store *LevelDB2Store) UpdateEntry(ctx context.Context, entry *filer2.Entry) (err error) {
+
+ return store.InsertEntry(ctx, entry)
+}
+
+func (store *LevelDB2Store) FindEntry(ctx context.Context, fullpath weed_util.FullPath) (entry *filer2.Entry, err error) {
+ dir, name := fullpath.DirAndName()
+ key, partitionId := genKey(dir, name, store.dbCount)
+
+ data, err := store.dbs[partitionId].Get(key, nil)
+
+ if err == leveldb.ErrNotFound {
+ return nil, filer_pb.ErrNotFound
+ }
+ if err != nil {
+ return nil, fmt.Errorf("get %s : %v", entry.FullPath, err)
+ }
+
+ entry = &filer2.Entry{
+ FullPath: fullpath,
+ }
+ err = entry.DecodeAttributesAndChunks(data)
+ if err != nil {
+ return entry, fmt.Errorf("decode %s : %v", entry.FullPath, err)
+ }
+
+ // println("read", entry.FullPath, "chunks", len(entry.Chunks), "data", len(data), string(data))
+
+ return entry, nil
+}
+
+func (store *LevelDB2Store) DeleteEntry(ctx context.Context, fullpath weed_util.FullPath) (err error) {
+ dir, name := fullpath.DirAndName()
+ key, partitionId := genKey(dir, name, store.dbCount)
+
+ err = store.dbs[partitionId].Delete(key, nil)
+ if err != nil {
+ return fmt.Errorf("delete %s : %v", fullpath, err)
+ }
+
+ return nil
+}
+
+func (store *LevelDB2Store) DeleteFolderChildren(ctx context.Context, fullpath weed_util.FullPath) (err error) {
+ directoryPrefix, partitionId := genDirectoryKeyPrefix(fullpath, "", store.dbCount)
+
+ batch := new(leveldb.Batch)
+
+ iter := store.dbs[partitionId].NewIterator(&leveldb_util.Range{Start: directoryPrefix}, nil)
+ for iter.Next() {
+ key := iter.Key()
+ if !bytes.HasPrefix(key, directoryPrefix) {
+ break
+ }
+ fileName := getNameFromKey(key)
+ if fileName == "" {
+ continue
+ }
+ batch.Delete(append(directoryPrefix, []byte(fileName)...))
+ }
+ iter.Release()
+
+ err = store.dbs[partitionId].Write(batch, nil)
+
+ if err != nil {
+ return fmt.Errorf("delete %s : %v", fullpath, err)
+ }
+
+ return nil
+}
+
+func (store *LevelDB2Store) ListDirectoryEntries(ctx context.Context, fullpath weed_util.FullPath, startFileName string, inclusive bool,
+ limit int) (entries []*filer2.Entry, err error) {
+
+ directoryPrefix, partitionId := genDirectoryKeyPrefix(fullpath, "", store.dbCount)
+ lastFileStart, _ := genDirectoryKeyPrefix(fullpath, startFileName, store.dbCount)
+
+ iter := store.dbs[partitionId].NewIterator(&leveldb_util.Range{Start: lastFileStart}, nil)
+ for iter.Next() {
+ key := iter.Key()
+ if !bytes.HasPrefix(key, directoryPrefix) {
+ break
+ }
+ fileName := getNameFromKey(key)
+ if fileName == "" {
+ continue
+ }
+ if fileName == startFileName && !inclusive {
+ continue
+ }
+ limit--
+ if limit < 0 {
+ break
+ }
+ entry := &filer2.Entry{
+ FullPath: weed_util.NewFullPath(string(fullpath), fileName),
+ }
+
+ // println("list", entry.FullPath, "chunks", len(entry.Chunks))
+
+ if decodeErr := entry.DecodeAttributesAndChunks(iter.Value()); decodeErr != nil {
+ err = decodeErr
+ glog.V(0).Infof("list %s : %v", entry.FullPath, err)
+ break
+ }
+ entries = append(entries, entry)
+ }
+ iter.Release()
+
+ return entries, err
+}
+
+func genKey(dirPath, fileName string, dbCount int) (key []byte, partitionId int) {
+ key, partitionId = hashToBytes(dirPath, dbCount)
+ key = append(key, []byte(fileName)...)
+ return key, partitionId
+}
+
+func genDirectoryKeyPrefix(fullpath weed_util.FullPath, startFileName string, dbCount int) (keyPrefix []byte, partitionId int) {
+ keyPrefix, partitionId = hashToBytes(string(fullpath), dbCount)
+ if len(startFileName) > 0 {
+ keyPrefix = append(keyPrefix, []byte(startFileName)...)
+ }
+ return keyPrefix, partitionId
+}
+
+func getNameFromKey(key []byte) string {
+
+ return string(key[md5.Size:])
+
+}
+
+// hash directory, and use last byte for partitioning
+func hashToBytes(dir string, dbCount int) ([]byte, int) {
+ h := md5.New()
+ io.WriteString(h, dir)
+
+ b := h.Sum(nil)
+
+ x := b[len(b)-1]
+
+ return b, int(x) % dbCount
+}
+
+func (store *LevelDB2Store) Shutdown() {
+ for d := 0; d < store.dbCount; d++ {
+ store.dbs[d].Close()
+ }
+}
diff --git a/weed/filer2/leveldb2/leveldb2_store_test.go b/weed/filer2/leveldb2/leveldb2_store_test.go
new file mode 100644
index 000000000..b211d86e4
--- /dev/null
+++ b/weed/filer2/leveldb2/leveldb2_store_test.go
@@ -0,0 +1,90 @@
+package leveldb
+
+import (
+ "context"
+ "io/ioutil"
+ "os"
+ "testing"
+
+ "github.com/chrislusf/seaweedfs/weed/filer2"
+ "github.com/chrislusf/seaweedfs/weed/util"
+)
+
+func TestCreateAndFind(t *testing.T) {
+ filer := filer2.NewFiler(nil, nil, "", 0, "", "", nil)
+ dir, _ := ioutil.TempDir("", "seaweedfs_filer_test")
+ defer os.RemoveAll(dir)
+ store := &LevelDB2Store{}
+ store.initialize(dir, 2)
+ filer.SetStore(store)
+ filer.DisableDirectoryCache()
+
+ fullpath := util.FullPath("/home/chris/this/is/one/file1.jpg")
+
+ ctx := context.Background()
+
+ entry1 := &filer2.Entry{
+ FullPath: fullpath,
+ Attr: filer2.Attr{
+ Mode: 0440,
+ Uid: 1234,
+ Gid: 5678,
+ },
+ }
+
+ if err := filer.CreateEntry(ctx, entry1, false, false); err != nil {
+ t.Errorf("create entry %v: %v", entry1.FullPath, err)
+ return
+ }
+
+ entry, err := filer.FindEntry(ctx, fullpath)
+
+ if err != nil {
+ t.Errorf("find entry: %v", err)
+ return
+ }
+
+ if entry.FullPath != entry1.FullPath {
+ t.Errorf("find wrong entry: %v", entry.FullPath)
+ return
+ }
+
+ // checking one upper directory
+ entries, _ := filer.ListDirectoryEntries(ctx, util.FullPath("/home/chris/this/is/one"), "", false, 100)
+ if len(entries) != 1 {
+ t.Errorf("list entries count: %v", len(entries))
+ return
+ }
+
+ // checking one upper directory
+ entries, _ = filer.ListDirectoryEntries(ctx, util.FullPath("/"), "", false, 100)
+ if len(entries) != 1 {
+ t.Errorf("list entries count: %v", len(entries))
+ return
+ }
+
+}
+
+func TestEmptyRoot(t *testing.T) {
+ filer := filer2.NewFiler(nil, nil, "", 0, "", "", nil)
+ dir, _ := ioutil.TempDir("", "seaweedfs_filer_test2")
+ defer os.RemoveAll(dir)
+ store := &LevelDB2Store{}
+ store.initialize(dir, 2)
+ filer.SetStore(store)
+ filer.DisableDirectoryCache()
+
+ ctx := context.Background()
+
+ // checking one upper directory
+ entries, err := filer.ListDirectoryEntries(ctx, util.FullPath("/"), "", false, 100)
+ if err != nil {
+ t.Errorf("list entries: %v", err)
+ return
+ }
+ if len(entries) != 0 {
+ t.Errorf("list entries count: %v", len(entries))
+ return
+ }
+
+}
diff --git a/weed/filer2/memdb/memdb_store.go b/weed/filer2/memdb/memdb_store.go
deleted file mode 100644
index 062f1cd1c..000000000
--- a/weed/filer2/memdb/memdb_store.go
+++ /dev/null
@@ -1,113 +0,0 @@
-package memdb
-
-import (
- "fmt"
- "github.com/chrislusf/seaweedfs/weed/filer2"
- "github.com/chrislusf/seaweedfs/weed/util"
- "github.com/google/btree"
- "strings"
-)
-
-func init() {
- filer2.Stores = append(filer2.Stores, &MemDbStore{})
-}
-
-type MemDbStore struct {
- tree *btree.BTree
-}
-
-type entryItem struct {
- *filer2.Entry
-}
-
-func (a entryItem) Less(b btree.Item) bool {
- return strings.Compare(string(a.FullPath), string(b.(entryItem).FullPath)) < 0
-}
-
-func (store *MemDbStore) GetName() string {
- return "memory"
-}
-
-func (store *MemDbStore) Initialize(configuration util.Configuration) (err error) {
- store.tree = btree.New(8)
- return nil
-}
-
-func (store *MemDbStore) InsertEntry(entry *filer2.Entry) (err error) {
- // println("inserting", entry.FullPath)
- store.tree.ReplaceOrInsert(entryItem{entry})
- return nil
-}
-
-func (store *MemDbStore) UpdateEntry(entry *filer2.Entry) (err error) {
- if _, err = store.FindEntry(entry.FullPath); err != nil {
- return fmt.Errorf("no such file %s : %v", entry.FullPath, err)
- }
- store.tree.ReplaceOrInsert(entryItem{entry})
- return nil
-}
-
-func (store *MemDbStore) FindEntry(fullpath filer2.FullPath) (entry *filer2.Entry, err error) {
- item := store.tree.Get(entryItem{&filer2.Entry{FullPath: fullpath}})
- if item == nil {
- return nil, filer2.ErrNotFound
- }
- entry = item.(entryItem).Entry
- return entry, nil
-}
-
-func (store *MemDbStore) DeleteEntry(fullpath filer2.FullPath) (err error) {
- store.tree.Delete(entryItem{&filer2.Entry{FullPath: fullpath}})
- return nil
-}
-
-func (store *MemDbStore) ListDirectoryEntries(fullpath filer2.FullPath, startFileName string, inclusive bool, limit int) (entries []*filer2.Entry, err error) {
-
- startFrom := string(fullpath)
- if startFileName != "" {
- startFrom = startFrom + "/" + startFileName
- }
-
- store.tree.AscendGreaterOrEqual(entryItem{&filer2.Entry{FullPath: filer2.FullPath(startFrom)}},
- func(item btree.Item) bool {
- if limit <= 0 {
- return false
- }
- entry := item.(entryItem).Entry
- // println("checking", entry.FullPath)
-
- if entry.FullPath == fullpath {
- // skipping the current directory
- // println("skipping the folder", entry.FullPath)
- return true
- }
-
- dir, name := entry.FullPath.DirAndName()
- if name == startFileName {
- if inclusive {
- limit--
- entries = append(entries, entry)
- }
- return true
- }
-
- // only iterate the same prefix
- if !strings.HasPrefix(string(entry.FullPath), string(fullpath)) {
- // println("breaking from", entry.FullPath)
- return false
- }
-
- if dir != string(fullpath) {
- // this could be items in deeper directories
- // println("skipping deeper folder", entry.FullPath)
- return true
- }
- // now process the directory items
- // println("adding entry", entry.FullPath)
- limit--
- entries = append(entries, entry)
- return true
- },
- )
- return entries, nil
-}
diff --git a/weed/filer2/memdb/memdb_store_test.go b/weed/filer2/memdb/memdb_store_test.go
deleted file mode 100644
index cf813e04b..000000000
--- a/weed/filer2/memdb/memdb_store_test.go
+++ /dev/null
@@ -1,144 +0,0 @@
-package memdb
-
-import (
- "github.com/chrislusf/seaweedfs/weed/filer2"
- "testing"
-)
-
-func TestCreateAndFind(t *testing.T) {
- filer := filer2.NewFiler(nil)
- store := &MemDbStore{}
- store.Initialize(nil)
- filer.SetStore(store)
- filer.DisableDirectoryCache()
-
- fullpath := filer2.FullPath("/home/chris/this/is/one/file1.jpg")
-
- entry1 := &filer2.Entry{
- FullPath: fullpath,
- Attr: filer2.Attr{
- Mode: 0440,
- Uid: 1234,
- Gid: 5678,
- },
- }
-
- if err := filer.CreateEntry(entry1); err != nil {
- t.Errorf("create entry %v: %v", entry1.FullPath, err)
- return
- }
-
- entry, err := filer.FindEntry(fullpath)
-
- if err != nil {
- t.Errorf("find entry: %v", err)
- return
- }
-
- if entry.FullPath != entry1.FullPath {
- t.Errorf("find wrong entry: %v", entry.FullPath)
- return
- }
-
-}
-
-func TestCreateFileAndList(t *testing.T) {
- filer := filer2.NewFiler(nil)
- store := &MemDbStore{}
- store.Initialize(nil)
- filer.SetStore(store)
- filer.DisableDirectoryCache()
-
- entry1 := &filer2.Entry{
- FullPath: filer2.FullPath("/home/chris/this/is/one/file1.jpg"),
- Attr: filer2.Attr{
- Mode: 0440,
- Uid: 1234,
- Gid: 5678,
- },
- }
-
- entry2 := &filer2.Entry{
- FullPath: filer2.FullPath("/home/chris/this/is/one/file2.jpg"),
- Attr: filer2.Attr{
- Mode: 0440,
- Uid: 1234,
- Gid: 5678,
- },
- }
-
- filer.CreateEntry(entry1)
- filer.CreateEntry(entry2)
-
- // checking the 2 files
- entries, err := filer.ListDirectoryEntries(filer2.FullPath("/home/chris/this/is/one/"), "", false, 100)
-
- if err != nil {
- t.Errorf("list entries: %v", err)
- return
- }
-
- if len(entries) != 2 {
- t.Errorf("list entries count: %v", len(entries))
- return
- }
-
- if entries[0].FullPath != entry1.FullPath {
- t.Errorf("find wrong entry 1: %v", entries[0].FullPath)
- return
- }
-
- if entries[1].FullPath != entry2.FullPath {
- t.Errorf("find wrong entry 2: %v", entries[1].FullPath)
- return
- }
-
- // checking the offset
- entries, err = filer.ListDirectoryEntries(filer2.FullPath("/home/chris/this/is/one/"), "file1.jpg", false, 100)
- if len(entries) != 1 {
- t.Errorf("list entries count: %v", len(entries))
- return
- }
-
- // checking one upper directory
- entries, _ = filer.ListDirectoryEntries(filer2.FullPath("/home/chris/this/is"), "", false, 100)
- if len(entries) != 1 {
- t.Errorf("list entries count: %v", len(entries))
- return
- }
-
- // checking root directory
- entries, _ = filer.ListDirectoryEntries(filer2.FullPath("/"), "", false, 100)
- if len(entries) != 1 {
- t.Errorf("list entries count: %v", len(entries))
- return
- }
-
- // add file3
- file3Path := filer2.FullPath("/home/chris/this/is/file3.jpg")
- entry3 := &filer2.Entry{
- FullPath: file3Path,
- Attr: filer2.Attr{
- Mode: 0440,
- Uid: 1234,
- Gid: 5678,
- },
- }
- filer.CreateEntry(entry3)
-
- // checking one upper directory
- entries, _ = filer.ListDirectoryEntries(filer2.FullPath("/home/chris/this/is"), "", false, 100)
- if len(entries) != 2 {
- t.Errorf("list entries count: %v", len(entries))
- return
- }
-
- // delete file and count
- filer.DeleteEntryMetaAndData(file3Path, false, false)
- entries, _ = filer.ListDirectoryEntries(filer2.FullPath("/home/chris/this/is"), "", false, 100)
- if len(entries) != 1 {
- t.Errorf("list entries count: %v", len(entries))
- return
- }
-
-}
diff --git a/weed/filer2/meta_aggregator.go b/weed/filer2/meta_aggregator.go
new file mode 100644
index 000000000..2f707b921
--- /dev/null
+++ b/weed/filer2/meta_aggregator.go
@@ -0,0 +1,91 @@
+package filer2
+
+import (
+ "context"
+ "fmt"
+ "io"
+ "sync"
+ "time"
+
+ "github.com/golang/protobuf/proto"
+ "google.golang.org/grpc"
+
+ "github.com/chrislusf/seaweedfs/weed/glog"
+ "github.com/chrislusf/seaweedfs/weed/pb"
+ "github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
+ "github.com/chrislusf/seaweedfs/weed/util/log_buffer"
+)
+
+type MetaAggregator struct {
+ filers []string
+ grpcDialOption grpc.DialOption
+ MetaLogBuffer *log_buffer.LogBuffer
+ // notifying clients
+ ListenersLock sync.Mutex
+ ListenersCond *sync.Cond
+}
+
+func NewMetaAggregator(filers []string, grpcDialOption grpc.DialOption) *MetaAggregator {
+ t := &MetaAggregator{
+ filers: filers,
+ grpcDialOption: grpcDialOption,
+ }
+ t.ListenersCond = sync.NewCond(&t.ListenersLock)
+ t.MetaLogBuffer = log_buffer.NewLogBuffer(time.Minute, nil, func() {
+ t.ListenersCond.Broadcast()
+ })
+ return t
+}
+
+func (ma *MetaAggregator) StartLoopSubscribe(lastTsNs int64) {
+ for _, filer := range ma.filers {
+ go ma.subscribeToOneFiler(filer, lastTsNs)
+ }
+}
+
+func (ma *MetaAggregator) subscribeToOneFiler(filer string, lastTsNs int64) {
+
+ processEventFn := func(event *filer_pb.SubscribeMetadataResponse) error {
+ data, err := proto.Marshal(event)
+ if err != nil {
+ glog.Errorf("failed to marshal subscribed filer_pb.SubscribeMetadataResponse %+v: %v", event, err)
+ return err
+ }
+ dir := event.Directory
+ // println("received meta change", dir, "size", len(data))
+ ma.MetaLogBuffer.AddToBuffer([]byte(dir), data)
+ return nil
+ }
+
+ for {
+ err := pb.WithFilerClient(filer, ma.grpcDialOption, func(client filer_pb.SeaweedFilerClient) error {
+ stream, err := client.SubscribeLocalMetadata(context.Background(), &filer_pb.SubscribeMetadataRequest{
+ ClientName: "filer",
+ PathPrefix: "/",
+ SinceNs: lastTsNs,
+ })
+ if err != nil {
+ return fmt.Errorf("subscribe: %v", err)
+ }
+
+ for {
+ resp, listenErr := stream.Recv()
+ if listenErr == io.EOF {
+ return nil
+ }
+ if listenErr != nil {
+ return listenErr
+ }
+
+ if err := processEventFn(resp); err != nil {
+ return fmt.Errorf("process %v: %v", resp, err)
+ }
+ lastTsNs = resp.TsNs
+ }
+ })
+ if err != nil {
+ glog.V(0).Infof("subscribing remote %s meta change: %v", filer, err)
+ time.Sleep(1733 * time.Millisecond)
+ }
+ }
+}
diff --git a/weed/filer2/mongodb/mongodb_store.go b/weed/filer2/mongodb/mongodb_store.go
new file mode 100644
index 000000000..375a457a4
--- /dev/null
+++ b/weed/filer2/mongodb/mongodb_store.go
@@ -0,0 +1,210 @@
+package mongodb
+
+import (
+ "context"
+ "fmt"
+ "github.com/chrislusf/seaweedfs/weed/filer2"
+ "github.com/chrislusf/seaweedfs/weed/glog"
+ "github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
+ "github.com/chrislusf/seaweedfs/weed/util"
+ "go.mongodb.org/mongo-driver/bson"
+ "go.mongodb.org/mongo-driver/mongo"
+ "go.mongodb.org/mongo-driver/mongo/options"
+ "go.mongodb.org/mongo-driver/x/bsonx"
+ "time"
+)
+
+func init() {
+ filer2.Stores = append(filer2.Stores, &MongodbStore{})
+}
+
+type MongodbStore struct {
+ connect *mongo.Client
+ database string
+ collectionName string
+}
+
+type Model struct {
+ Directory string `bson:"directory"`
+ Name string `bson:"name"`
+ Meta []byte `bson:"meta"`
+}
+
+func (store *MongodbStore) GetName() string {
+ return "mongodb"
+}
+
+func (store *MongodbStore) Initialize(configuration util.Configuration, prefix string) (err error) {
+ store.database = configuration.GetString(prefix + "database")
+ store.collectionName = "filemeta"
+ poolSize := configuration.GetInt(prefix + "option_pool_size")
+ return store.connection(configuration.GetString(prefix+"uri"), uint64(poolSize))
+}
+
+func (store *MongodbStore) connection(uri string, poolSize uint64) (err error) {
+ ctx, _ := context.WithTimeout(context.Background(), 10*time.Second)
+ opts := options.Client().ApplyURI(uri)
+
+ if poolSize > 0 {
+ opts.SetMaxPoolSize(poolSize)
+ }
+
+ client, err := mongo.Connect(ctx, opts)
+ if err != nil {
+ return err
+ }
+
+ c := client.Database(store.database).Collection(store.collectionName)
+ err = store.indexUnique(c)
+ store.connect = client
+ return err
+}
+
+func (store *MongodbStore) createIndex(c *mongo.Collection, index mongo.IndexModel, opts *options.CreateIndexesOptions) error {
+ _, err := c.Indexes().CreateOne(context.Background(), index, opts)
+ return err
+}
+
+func (store *MongodbStore) indexUnique(c *mongo.Collection) error {
+ opts := options.CreateIndexes().SetMaxTime(10 * time.Second)
+
+ unique := new(bool)
+ *unique = true
+
+ index := mongo.IndexModel{
+ Keys: bsonx.Doc{{Key: "directory", Value: bsonx.Int32(1)}, {Key: "name", Value: bsonx.Int32(1)}},
+ Options: &options.IndexOptions{
+ Unique: unique,
+ },
+ }
+
+ return store.createIndex(c, index, opts)
+}
+
+func (store *MongodbStore) BeginTransaction(ctx context.Context) (context.Context, error) {
+ return ctx, nil
+}
+
+func (store *MongodbStore) CommitTransaction(ctx context.Context) error {
+ return nil
+}
+
+func (store *MongodbStore) RollbackTransaction(ctx context.Context) error {
+ return nil
+}
+
+func (store *MongodbStore) InsertEntry(ctx context.Context, entry *filer2.Entry) (err error) {
+
+ dir, name := entry.FullPath.DirAndName()
+ meta, err := entry.EncodeAttributesAndChunks()
+ if err != nil {
+ return fmt.Errorf("encode %s: %s", entry.FullPath, err)
+ }
+
+ c := store.connect.Database(store.database).Collection(store.collectionName)
+
+ _, err = c.InsertOne(ctx, Model{
+ Directory: dir,
+ Name: name,
+ Meta: meta,
+ })
+
+ return nil
+}
+
+func (store *MongodbStore) UpdateEntry(ctx context.Context, entry *filer2.Entry) (err error) {
+ return store.InsertEntry(ctx, entry)
+}
+
+func (store *MongodbStore) FindEntry(ctx context.Context, fullpath util.FullPath) (entry *filer2.Entry, err error) {
+
+ dir, name := fullpath.DirAndName()
+ var data Model
+
+ var where = bson.M{"directory": dir, "name": name}
+ err = store.connect.Database(store.database).Collection(store.collectionName).FindOne(ctx, where).Decode(&data)
+ if err != mongo.ErrNoDocuments && err != nil {
+ return nil, filer_pb.ErrNotFound
+ }
+
+ if len(data.Meta) == 0 {
+ return nil, filer_pb.ErrNotFound
+ }
+
+ entry = &filer2.Entry{
+ FullPath: fullpath,
+ }
+
+ err = entry.DecodeAttributesAndChunks(data.Meta)
+ if err != nil {
+ return entry, fmt.Errorf("decode %s : %v", entry.FullPath, err)
+ }
+
+ return entry, nil
+}
+
+func (store *MongodbStore) DeleteEntry(ctx context.Context, fullpath util.FullPath) error {
+
+ dir, name := fullpath.DirAndName()
+
+ where := bson.M{"directory": dir, "name": name}
+ _, err := store.connect.Database(store.database).Collection(store.collectionName).DeleteOne(ctx, where)
+ if err != nil {
+ return fmt.Errorf("delete %s : %v", fullpath, err)
+ }
+
+ return nil
+}
+
+func (store *MongodbStore) DeleteFolderChildren(ctx context.Context, fullpath util.FullPath) error {
+
+ where := bson.M{"directory": fullpath}
+ _, err := store.connect.Database(store.database).Collection(store.collectionName).DeleteMany(ctx, where)
+ if err != nil {
+ return fmt.Errorf("delete %s : %v", fullpath, err)
+ }
+
+ return nil
+}
+
+func (store *MongodbStore) ListDirectoryEntries(ctx context.Context, fullpath util.FullPath, startFileName string, inclusive bool, limit int) (entries []*filer2.Entry, err error) {
+
+ var where = bson.M{"directory": string(fullpath), "name": bson.M{"$gt": startFileName}}
+ if inclusive {
+ where["name"] = bson.M{
+ "$gte": startFileName,
+ }
+ }
+ optLimit := int64(limit)
+ opts := &options.FindOptions{Limit: &optLimit, Sort: bson.M{"name": 1}}
+ cur, err := store.connect.Database(store.database).Collection(store.collectionName).Find(ctx, where, opts)
+ for cur.Next(ctx) {
+ var data Model
+ err := cur.Decode(&data)
+ if err != nil && err != mongo.ErrNoDocuments {
+ return nil, err
+ }
+
+ entry := &filer2.Entry{
+ FullPath: util.NewFullPath(string(fullpath), data.Name),
+ }
+ if decodeErr := entry.DecodeAttributesAndChunks(data.Meta); decodeErr != nil {
+ err = decodeErr
+ glog.V(0).Infof("list %s : %v", entry.FullPath, err)
+ break
+ }
+
+ entries = append(entries, entry)
+ }
+
+ if err := cur.Close(ctx); err != nil {
+ glog.V(0).Infof("list iterator close: %v", err)
+ }
+
+ return entries, err
+}
+
+func (store *MongodbStore) Shutdown() {
+ ctx, _ := context.WithTimeout(context.Background(), 10*time.Second)
+ store.connect.Disconnect(ctx)
+}
diff --git a/weed/filer2/mysql/mysql_store.go b/weed/filer2/mysql/mysql_store.go
index e18299bd2..63d99cd9d 100644
--- a/weed/filer2/mysql/mysql_store.go
+++ b/weed/filer2/mysql/mysql_store.go
@@ -26,28 +26,35 @@ func (store *MysqlStore) GetName() string {
return "mysql"
}
-func (store *MysqlStore) Initialize(configuration util.Configuration) (err error) {
+func (store *MysqlStore) Initialize(configuration util.Configuration, prefix string) (err error) {
return store.initialize(
- configuration.GetString("username"),
- configuration.GetString("password"),
- configuration.GetString("hostname"),
- configuration.GetInt("port"),
- configuration.GetString("database"),
- configuration.GetInt("connection_max_idle"),
- configuration.GetInt("connection_max_open"),
+ configuration.GetString(prefix+"username"),
+ configuration.GetString(prefix+"password"),
+ configuration.GetString(prefix+"hostname"),
+ configuration.GetInt(prefix+"port"),
+ configuration.GetString(prefix+"database"),
+ configuration.GetInt(prefix+"connection_max_idle"),
+ configuration.GetInt(prefix+"connection_max_open"),
+ configuration.GetBool(prefix+"interpolateParams"),
)
}
-func (store *MysqlStore) initialize(user, password, hostname string, port int, database string, maxIdle, maxOpen int) (err error) {
+func (store *MysqlStore) initialize(user, password, hostname string, port int, database string, maxIdle, maxOpen int,
+ interpolateParams bool) (err error) {
store.SqlInsert = "INSERT INTO filemeta (dirhash,name,directory,meta) VALUES(?,?,?,?)"
store.SqlUpdate = "UPDATE filemeta SET meta=? WHERE dirhash=? AND name=? AND directory=?"
store.SqlFind = "SELECT meta FROM filemeta WHERE dirhash=? AND name=? AND directory=?"
store.SqlDelete = "DELETE FROM filemeta WHERE dirhash=? AND name=? AND directory=?"
+ store.SqlDeleteFolderChildren = "DELETE FROM filemeta WHERE dirhash=? AND directory=?"
store.SqlListExclusive = "SELECT NAME, meta FROM filemeta WHERE dirhash=? AND name>? AND directory=? ORDER BY NAME ASC LIMIT ?"
store.SqlListInclusive = "SELECT NAME, meta FROM filemeta WHERE dirhash=? AND name>=? AND directory=? ORDER BY NAME ASC LIMIT ?"
sqlUrl := fmt.Sprintf(CONNECTION_URL_PATTERN, user, password, hostname, port, database)
+ if interpolateParams {
+ sqlUrl += "&interpolateParams=true"
+ }
+
var dbErr error
store.DB, dbErr = sql.Open("mysql", sqlUrl)
if dbErr != nil {
diff --git a/weed/filer2/postgres/README.txt b/weed/filer2/postgres/README.txt
index ef2ef683b..cb0c99c63 100644
--- a/weed/filer2/postgres/README.txt
+++ b/weed/filer2/postgres/README.txt
@@ -9,8 +9,8 @@ $PGHOME/bin/psql --username=postgres --password seaweedfs
CREATE TABLE IF NOT EXISTS filemeta (
dirhash BIGINT,
- name VARCHAR(1000),
- directory VARCHAR(4096),
+ name VARCHAR(65535),
+ directory VARCHAR(65535),
meta bytea,
PRIMARY KEY (dirhash, name)
);
diff --git a/weed/filer2/postgres/postgres_store.go b/weed/filer2/postgres/postgres_store.go
index ffd3d1e01..51c069aae 100644
--- a/weed/filer2/postgres/postgres_store.go
+++ b/weed/filer2/postgres/postgres_store.go
@@ -11,7 +11,7 @@ import (
)
const (
- CONNECTION_URL_PATTERN = "host=%s port=%d user=%s password=%s dbname=%s sslmode=%s connect_timeout=30"
+ CONNECTION_URL_PATTERN = "host=%s port=%d user=%s sslmode=%s connect_timeout=30"
)
func init() {
@@ -26,16 +26,16 @@ func (store *PostgresStore) GetName() string {
return "postgres"
}
-func (store *PostgresStore) Initialize(configuration util.Configuration) (err error) {
+func (store *PostgresStore) Initialize(configuration util.Configuration, prefix string) (err error) {
return store.initialize(
- configuration.GetString("username"),
- configuration.GetString("password"),
- configuration.GetString("hostname"),
- configuration.GetInt("port"),
- configuration.GetString("database"),
- configuration.GetString("sslmode"),
- configuration.GetInt("connection_max_idle"),
- configuration.GetInt("connection_max_open"),
+ configuration.GetString(prefix+"username"),
+ configuration.GetString(prefix+"password"),
+ configuration.GetString(prefix+"hostname"),
+ configuration.GetInt(prefix+"port"),
+ configuration.GetString(prefix+"database"),
+ configuration.GetString(prefix+"sslmode"),
+ configuration.GetInt(prefix+"connection_max_idle"),
+ configuration.GetInt(prefix+"connection_max_open"),
)
}
@@ -45,10 +45,17 @@ func (store *PostgresStore) initialize(user, password, hostname string, port int
store.SqlUpdate = "UPDATE filemeta SET meta=$1 WHERE dirhash=$2 AND name=$3 AND directory=$4"
store.SqlFind = "SELECT meta FROM filemeta WHERE dirhash=$1 AND name=$2 AND directory=$3"
store.SqlDelete = "DELETE FROM filemeta WHERE dirhash=$1 AND name=$2 AND directory=$3"
+ store.SqlDeleteFolderChildren = "DELETE FROM filemeta WHERE dirhash=$1 AND directory=$2"
store.SqlListExclusive = "SELECT NAME, meta FROM filemeta WHERE dirhash=$1 AND name>$2 AND directory=$3 ORDER BY NAME ASC LIMIT $4"
store.SqlListInclusive = "SELECT NAME, meta FROM filemeta WHERE dirhash=$1 AND name>=$2 AND directory=$3 ORDER BY NAME ASC LIMIT $4"
- sqlUrl := fmt.Sprintf(CONNECTION_URL_PATTERN, hostname, port, user, password, database, sslmode)
+ sqlUrl := fmt.Sprintf(CONNECTION_URL_PATTERN, hostname, port, user, sslmode)
+ if password != "" {
+ sqlUrl += " password=" + password
+ }
+ if database != "" {
+ sqlUrl += " dbname=" + database
+ }
var dbErr error
store.DB, dbErr = sql.Open("postgres", sqlUrl)
if dbErr != nil {
diff --git a/weed/filer2/reader_at.go b/weed/filer2/reader_at.go
new file mode 100644
index 000000000..11a80443f
--- /dev/null
+++ b/weed/filer2/reader_at.go
@@ -0,0 +1,162 @@
+package filer2
+
+import (
+ "bytes"
+ "context"
+ "fmt"
+ "io"
+ "sync"
+
+ "github.com/chrislusf/seaweedfs/weed/glog"
+ "github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
+ "github.com/chrislusf/seaweedfs/weed/util"
+ "github.com/chrislusf/seaweedfs/weed/util/chunk_cache"
+ "github.com/chrislusf/seaweedfs/weed/wdclient"
+)
+
+type ChunkReadAt struct {
+ masterClient *wdclient.MasterClient
+ chunkViews []*ChunkView
+ buffer []byte
+ bufferOffset int64
+ lookupFileId func(fileId string) (targetUrl string, err error)
+ readerLock sync.Mutex
+
+ chunkCache *chunk_cache.ChunkCache
+}
+
+// var _ = io.ReaderAt(&ChunkReadAt{})
+
+type LookupFileIdFunctionType func(fileId string) (targetUrl string, err error)
+
+func LookupFn(filerClient filer_pb.FilerClient) LookupFileIdFunctionType {
+ return func(fileId string) (targetUrl string, err error) {
+ err = filerClient.WithFilerClient(func(client filer_pb.SeaweedFilerClient) error {
+ vid := VolumeId(fileId)
+ resp, err := client.LookupVolume(context.Background(), &filer_pb.LookupVolumeRequest{
+ VolumeIds: []string{vid},
+ })
+ if err != nil {
+ return err
+ }
+
+ locations := resp.LocationsMap[vid]
+ if locations == nil || len(locations.Locations) == 0 {
+ glog.V(0).Infof("failed to locate %s", fileId)
+ return fmt.Errorf("failed to locate %s", fileId)
+ }
+
+ volumeServerAddress := filerClient.AdjustedUrl(locations.Locations[0].Url)
+
+ targetUrl = fmt.Sprintf("http://%s/%s", volumeServerAddress, fileId)
+
+ return nil
+ })
+ return
+ }
+}
+
+func NewChunkReaderAtFromClient(filerClient filer_pb.FilerClient, chunkViews []*ChunkView, chunkCache *chunk_cache.ChunkCache) *ChunkReadAt {
+
+ return &ChunkReadAt{
+ chunkViews: chunkViews,
+ lookupFileId: LookupFn(filerClient),
+ bufferOffset: -1,
+ chunkCache: chunkCache,
+ }
+}
+
+func (c *ChunkReadAt) ReadAt(p []byte, offset int64) (n int, err error) {
+
+ c.readerLock.Lock()
+ defer c.readerLock.Unlock()
+
+ for n < len(p) && err == nil {
+ readCount, readErr := c.doReadAt(p[n:], offset+int64(n))
+ n += readCount
+ err = readErr
+ if readCount == 0 {
+ return n, io.EOF
+ }
+ }
+ return
+}
+
+func (c *ChunkReadAt) doReadAt(p []byte, offset int64) (n int, err error) {
+
+ var found bool
+ for _, chunk := range c.chunkViews {
+ if chunk.LogicOffset <= offset && offset < chunk.LogicOffset+int64(chunk.Size) {
+ found = true
+ if c.bufferOffset != chunk.LogicOffset {
+ c.buffer, err = c.fetchChunkData(chunk)
+ if err != nil {
+ glog.Errorf("fetching chunk %+v: %v\n", chunk, err)
+ }
+ c.bufferOffset = chunk.LogicOffset
+ }
+ break
+ }
+ }
+ if !found {
+ return 0, io.EOF
+ }
+
+ if err == nil {
+ n = copy(p, c.buffer[offset-c.bufferOffset:])
+ }
+
+ // fmt.Printf("> doReadAt [%d,%d), buffer:[%d,%d)\n", offset, offset+int64(n), c.bufferOffset, c.bufferOffset+int64(len(c.buffer)))
+
+ return
+
+}
+
+func (c *ChunkReadAt) fetchChunkData(chunkView *ChunkView) (data []byte, err error) {
+
+ glog.V(4).Infof("fetchChunkData %s [%d,%d)\n", chunkView.FileId, chunkView.LogicOffset, chunkView.LogicOffset+int64(chunkView.Size))
+
+ hasDataInCache := false
+ chunkData := c.chunkCache.GetChunk(chunkView.FileId, chunkView.ChunkSize)
+ if chunkData != nil {
+ glog.V(3).Infof("cache hit %s [%d,%d)", chunkView.FileId, chunkView.LogicOffset, chunkView.LogicOffset+int64(chunkView.Size))
+ hasDataInCache = true
+ } else {
+ chunkData, err = c.doFetchFullChunkData(chunkView.FileId, chunkView.CipherKey, chunkView.IsGzipped)
+ if err != nil {
+ return nil, err
+ }
+ }
+
+ if int64(len(chunkData)) < chunkView.Offset+int64(chunkView.Size) {
+ glog.Errorf("unexpected larger cached:%v chunk %s [%d,%d) than %d", hasDataInCache, chunkView.FileId, chunkView.Offset, chunkView.Offset+int64(chunkView.Size), len(chunkData))
+ return nil, fmt.Errorf("unexpected larger cached:%v chunk %s [%d,%d) than %d", hasDataInCache, chunkView.FileId, chunkView.Offset, chunkView.Offset+int64(chunkView.Size), len(chunkData))
+ }
+
+ data = chunkData[chunkView.Offset : chunkView.Offset+int64(chunkView.Size)]
+
+ if !hasDataInCache {
+ c.chunkCache.SetChunk(chunkView.FileId, chunkData)
+ }
+
+ return data, nil
+}
+
+func (c *ChunkReadAt) doFetchFullChunkData(fileId string, cipherKey []byte, isGzipped bool) ([]byte, error) {
+
+ urlString, err := c.lookupFileId(fileId)
+ if err != nil {
+ glog.V(1).Infof("operation LookupFileId %s failed, err: %v", fileId, err)
+ return nil, err
+ }
+ var buffer bytes.Buffer
+ err = util.ReadUrlAsStream(urlString, cipherKey, isGzipped, true, 0, 0, func(data []byte) {
+ buffer.Write(data)
+ })
+ if err != nil {
+ glog.V(0).Infof("read %s failed, err: %v", fileId, err)
+ return nil, err
+ }
+
+ return buffer.Bytes(), nil
+}
diff --git a/weed/filer2/redis/redis_cluster_store.go b/weed/filer2/redis/redis_cluster_store.go
index 4f74a8a22..eaaecb740 100644
--- a/weed/filer2/redis/redis_cluster_store.go
+++ b/weed/filer2/redis/redis_cluster_store.go
@@ -18,15 +18,25 @@ func (store *RedisClusterStore) GetName() string {
return "redis_cluster"
}
-func (store *RedisClusterStore) Initialize(configuration util.Configuration) (err error) {
+func (store *RedisClusterStore) Initialize(configuration util.Configuration, prefix string) (err error) {
+
+ configuration.SetDefault(prefix+"useReadOnly", true)
+ configuration.SetDefault(prefix+"routeByLatency", true)
+
return store.initialize(
- configuration.GetStringSlice("addresses"),
+ configuration.GetStringSlice(prefix+"addresses"),
+ configuration.GetString(prefix+"password"),
+ configuration.GetBool(prefix+"useReadOnly"),
+ configuration.GetBool(prefix+"routeByLatency"),
)
}
-func (store *RedisClusterStore) initialize(addresses []string) (err error) {
+func (store *RedisClusterStore) initialize(addresses []string, password string, readOnly, routeByLatency bool) (err error) {
store.Client = redis.NewClusterClient(&redis.ClusterOptions{
- Addrs: addresses,
+ Addrs: addresses,
+ Password: password,
+ ReadOnly: readOnly,
+ RouteByLatency: routeByLatency,
})
return
}
diff --git a/weed/filer2/redis/redis_store.go b/weed/filer2/redis/redis_store.go
index c56fa014c..9debdb070 100644
--- a/weed/filer2/redis/redis_store.go
+++ b/weed/filer2/redis/redis_store.go
@@ -18,11 +18,11 @@ func (store *RedisStore) GetName() string {
return "redis"
}
-func (store *RedisStore) Initialize(configuration util.Configuration) (err error) {
+func (store *RedisStore) Initialize(configuration util.Configuration, prefix string) (err error) {
return store.initialize(
- configuration.GetString("address"),
- configuration.GetString("password"),
- configuration.GetInt("database"),
+ configuration.GetString(prefix+"address"),
+ configuration.GetString(prefix+"password"),
+ configuration.GetInt(prefix+"database"),
)
}
diff --git a/weed/filer2/redis/universal_redis_store.go b/weed/filer2/redis/universal_redis_store.go
index 7fd7e1180..e5b9e8840 100644
--- a/weed/filer2/redis/universal_redis_store.go
+++ b/weed/filer2/redis/universal_redis_store.go
@@ -1,13 +1,18 @@
package redis
import (
+ "context"
"fmt"
- "github.com/chrislusf/seaweedfs/weed/filer2"
- "github.com/chrislusf/seaweedfs/weed/glog"
- "github.com/go-redis/redis"
"sort"
"strings"
"time"
+
+ "github.com/go-redis/redis"
+
+ "github.com/chrislusf/seaweedfs/weed/filer2"
+ "github.com/chrislusf/seaweedfs/weed/glog"
+ "github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
+ "github.com/chrislusf/seaweedfs/weed/util"
)
const (
@@ -18,7 +23,17 @@ type UniversalRedisStore struct {
Client redis.UniversalClient
}
-func (store *UniversalRedisStore) InsertEntry(entry *filer2.Entry) (err error) {
+func (store *UniversalRedisStore) BeginTransaction(ctx context.Context) (context.Context, error) {
+ return ctx, nil
+}
+func (store *UniversalRedisStore) CommitTransaction(ctx context.Context) error {
+ return nil
+}
+func (store *UniversalRedisStore) RollbackTransaction(ctx context.Context) error {
+ return nil
+}
+
+func (store *UniversalRedisStore) InsertEntry(ctx context.Context, entry *filer2.Entry) (err error) {
value, err := entry.EncodeAttributesAndChunks()
if err != nil {
@@ -42,16 +57,16 @@ func (store *UniversalRedisStore) InsertEntry(entry *filer2.Entry) (err error) {
return nil
}
-func (store *UniversalRedisStore) UpdateEntry(entry *filer2.Entry) (err error) {
+func (store *UniversalRedisStore) UpdateEntry(ctx context.Context, entry *filer2.Entry) (err error) {
- return store.InsertEntry(entry)
+ return store.InsertEntry(ctx, entry)
}
-func (store *UniversalRedisStore) FindEntry(fullpath filer2.FullPath) (entry *filer2.Entry, err error) {
+func (store *UniversalRedisStore) FindEntry(ctx context.Context, fullpath util.FullPath) (entry *filer2.Entry, err error) {
data, err := store.Client.Get(string(fullpath)).Result()
if err == redis.Nil {
- return nil, filer2.ErrNotFound
+ return nil, filer_pb.ErrNotFound
}
if err != nil {
@@ -69,7 +84,7 @@ func (store *UniversalRedisStore) FindEntry(fullpath filer2.FullPath) (entry *fi
return entry, nil
}
-func (store *UniversalRedisStore) DeleteEntry(fullpath filer2.FullPath) (err error) {
+func (store *UniversalRedisStore) DeleteEntry(ctx context.Context, fullpath util.FullPath) (err error) {
_, err = store.Client.Del(string(fullpath)).Result()
@@ -88,10 +103,29 @@ func (store *UniversalRedisStore) DeleteEntry(fullpath filer2.FullPath) (err err
return nil
}
-func (store *UniversalRedisStore) ListDirectoryEntries(fullpath filer2.FullPath, startFileName string, inclusive bool,
- limit int) (entries []*filer2.Entry, err error) {
+func (store *UniversalRedisStore) DeleteFolderChildren(ctx context.Context, fullpath util.FullPath) (err error) {
members, err := store.Client.SMembers(genDirectoryListKey(string(fullpath))).Result()
+ if err != nil {
+ return fmt.Errorf("delete folder %s : %v", fullpath, err)
+ }
+
+ for _, fileName := range members {
+ path := util.NewFullPath(string(fullpath), fileName)
+ _, err = store.Client.Del(string(path)).Result()
+ if err != nil {
+ return fmt.Errorf("delete %s in parent dir: %v", fullpath, err)
+ }
+ }
+
+ return nil
+}
+
+func (store *UniversalRedisStore) ListDirectoryEntries(ctx context.Context, fullpath util.FullPath, startFileName string, inclusive bool,
+ limit int) (entries []*filer2.Entry, err error) {
+
+ dirListKey := genDirectoryListKey(string(fullpath))
+ members, err := store.Client.SMembers(dirListKey).Result()
if err != nil {
return nil, fmt.Errorf("list %s : %v", fullpath, err)
}
@@ -125,11 +159,18 @@ func (store *UniversalRedisStore) ListDirectoryEntries(fullpath filer2.FullPath,
// fetch entry meta
for _, fileName := range members {
- path := filer2.NewFullPath(string(fullpath), fileName)
- entry, err := store.FindEntry(path)
+ path := util.NewFullPath(string(fullpath), fileName)
+ entry, err := store.FindEntry(ctx, path)
if err != nil {
glog.V(0).Infof("list %s : %v", path, err)
} else {
+ if entry.TtlSec > 0 {
+ if entry.Attr.Crtime.Add(time.Duration(entry.TtlSec) * time.Second).Before(time.Now()) {
+ store.Client.Del(string(path)).Result()
+ store.Client.SRem(dirListKey, fileName).Result()
+ continue
+ }
+ }
entries = append(entries, entry)
}
}
@@ -140,3 +181,7 @@ func (store *UniversalRedisStore) ListDirectoryEntries(fullpath filer2.FullPath,
func genDirectoryListKey(dir string) (dirList string) {
return dir + DIR_LIST_MARKER
}
+
+func (store *UniversalRedisStore) Shutdown() {
+ store.Client.Close()
+}
diff --git a/weed/filer2/redis2/redis_cluster_store.go b/weed/filer2/redis2/redis_cluster_store.go
new file mode 100644
index 000000000..b252eabab
--- /dev/null
+++ b/weed/filer2/redis2/redis_cluster_store.go
@@ -0,0 +1,42 @@
+package redis2
+
+import (
+ "github.com/chrislusf/seaweedfs/weed/filer2"
+ "github.com/chrislusf/seaweedfs/weed/util"
+ "github.com/go-redis/redis"
+)
+
+func init() {
+ filer2.Stores = append(filer2.Stores, &RedisCluster2Store{})
+}
+
+type RedisCluster2Store struct {
+ UniversalRedis2Store
+}
+
+func (store *RedisCluster2Store) GetName() string {
+ return "redis_cluster2"
+}
+
+func (store *RedisCluster2Store) Initialize(configuration util.Configuration, prefix string) (err error) {
+
+ configuration.SetDefault(prefix+"useReadOnly", true)
+ configuration.SetDefault(prefix+"routeByLatency", true)
+
+ return store.initialize(
+ configuration.GetStringSlice(prefix+"addresses"),
+ configuration.GetString(prefix+"password"),
+ configuration.GetBool(prefix+"useReadOnly"),
+ configuration.GetBool(prefix+"routeByLatency"),
+ )
+}
+
+func (store *RedisCluster2Store) initialize(addresses []string, password string, readOnly, routeByLatency bool) (err error) {
+ store.Client = redis.NewClusterClient(&redis.ClusterOptions{
+ Addrs: addresses,
+ Password: password,
+ ReadOnly: readOnly,
+ RouteByLatency: routeByLatency,
+ })
+ return
+}
diff --git a/weed/filer2/redis2/redis_store.go b/weed/filer2/redis2/redis_store.go
new file mode 100644
index 000000000..1e2a20043
--- /dev/null
+++ b/weed/filer2/redis2/redis_store.go
@@ -0,0 +1,36 @@
+package redis2
+
+import (
+ "github.com/chrislusf/seaweedfs/weed/filer2"
+ "github.com/chrislusf/seaweedfs/weed/util"
+ "github.com/go-redis/redis"
+)
+
+func init() {
+ filer2.Stores = append(filer2.Stores, &Redis2Store{})
+}
+
+type Redis2Store struct {
+ UniversalRedis2Store
+}
+
+func (store *Redis2Store) GetName() string {
+ return "redis2"
+}
+
+func (store *Redis2Store) Initialize(configuration util.Configuration, prefix string) (err error) {
+ return store.initialize(
+ configuration.GetString(prefix+"address"),
+ configuration.GetString(prefix+"password"),
+ configuration.GetInt(prefix+"database"),
+ )
+}
+
+func (store *Redis2Store) initialize(hostPort string, password string, database int) (err error) {
+ store.Client = redis.NewClient(&redis.Options{
+ Addr: hostPort,
+ Password: password,
+ DB: database,
+ })
+ return
+}
diff --git a/weed/filer2/redis2/universal_redis_store.go b/weed/filer2/redis2/universal_redis_store.go
new file mode 100644
index 000000000..420336b46
--- /dev/null
+++ b/weed/filer2/redis2/universal_redis_store.go
@@ -0,0 +1,162 @@
+package redis2
+
+import (
+ "context"
+ "fmt"
+ "time"
+
+ "github.com/go-redis/redis"
+
+ "github.com/chrislusf/seaweedfs/weed/filer2"
+ "github.com/chrislusf/seaweedfs/weed/glog"
+ "github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
+ "github.com/chrislusf/seaweedfs/weed/util"
+)
+
+const (
+ DIR_LIST_MARKER = "\x00"
+)
+
+type UniversalRedis2Store struct {
+ Client redis.UniversalClient
+}
+
+func (store *UniversalRedis2Store) BeginTransaction(ctx context.Context) (context.Context, error) {
+ return ctx, nil
+}
+func (store *UniversalRedis2Store) CommitTransaction(ctx context.Context) error {
+ return nil
+}
+func (store *UniversalRedis2Store) RollbackTransaction(ctx context.Context) error {
+ return nil
+}
+
+func (store *UniversalRedis2Store) InsertEntry(ctx context.Context, entry *filer2.Entry) (err error) {
+
+ value, err := entry.EncodeAttributesAndChunks()
+ if err != nil {
+ return fmt.Errorf("encoding %s %+v: %v", entry.FullPath, entry.Attr, err)
+ }
+
+ if err = store.Client.Set(string(entry.FullPath), value, time.Duration(entry.TtlSec)*time.Second).Err(); err != nil {
+ return fmt.Errorf("persisting %s : %v", entry.FullPath, err)
+ }
+
+ dir, name := entry.FullPath.DirAndName()
+ if name != "" {
+ if err = store.Client.ZAddNX(genDirectoryListKey(dir), redis.Z{Score: 0, Member: name}).Err(); err != nil {
+ return fmt.Errorf("persisting %s in parent dir: %v", entry.FullPath, err)
+ }
+ }
+
+ return nil
+}
+
+func (store *UniversalRedis2Store) UpdateEntry(ctx context.Context, entry *filer2.Entry) (err error) {
+
+ return store.InsertEntry(ctx, entry)
+}
+
+func (store *UniversalRedis2Store) FindEntry(ctx context.Context, fullpath util.FullPath) (entry *filer2.Entry, err error) {
+
+ data, err := store.Client.Get(string(fullpath)).Result()
+ if err == redis.Nil {
+ return nil, filer_pb.ErrNotFound
+ }
+
+ if err != nil {
+ return nil, fmt.Errorf("get %s : %v", fullpath, err)
+ }
+
+ entry = &filer2.Entry{
+ FullPath: fullpath,
+ }
+ err = entry.DecodeAttributesAndChunks([]byte(data))
+ if err != nil {
+ return entry, fmt.Errorf("decode %s : %v", entry.FullPath, err)
+ }
+
+ return entry, nil
+}
+
+func (store *UniversalRedis2Store) DeleteEntry(ctx context.Context, fullpath util.FullPath) (err error) {
+
+ _, err = store.Client.Del(string(fullpath)).Result()
+
+ if err != nil {
+ return fmt.Errorf("delete %s : %v", fullpath, err)
+ }
+
+ dir, name := fullpath.DirAndName()
+ if name != "" {
+ _, err = store.Client.ZRem(genDirectoryListKey(dir), name).Result()
+ if err != nil {
+ return fmt.Errorf("delete %s in parent dir: %v", fullpath, err)
+ }
+ }
+
+ return nil
+}
+
+func (store *UniversalRedis2Store) DeleteFolderChildren(ctx context.Context, fullpath util.FullPath) (err error) {
+
+ members, err := store.Client.ZRange(genDirectoryListKey(string(fullpath)), 0, -1).Result()
+ if err != nil {
+ return fmt.Errorf("delete folder %s : %v", fullpath, err)
+ }
+
+ for _, fileName := range members {
+ path := util.NewFullPath(string(fullpath), fileName)
+ _, err = store.Client.Del(string(path)).Result()
+ if err != nil {
+ return fmt.Errorf("delete %s in parent dir: %v", fullpath, err)
+ }
+ }
+
+ return nil
+}
+
+func (store *UniversalRedis2Store) ListDirectoryEntries(ctx context.Context, fullpath util.FullPath, startFileName string, inclusive bool,
+ limit int) (entries []*filer2.Entry, err error) {
+
+ dirListKey := genDirectoryListKey(string(fullpath))
+ start := int64(0)
+ if startFileName != "" {
+ start, _ = store.Client.ZRank(dirListKey, startFileName).Result()
+ if !inclusive {
+ start++
+ }
+ }
+ members, err := store.Client.ZRange(dirListKey, start, start+int64(limit)-1).Result()
+ if err != nil {
+ return nil, fmt.Errorf("list %s : %v", fullpath, err)
+ }
+
+ // fetch entry meta
+ for _, fileName := range members {
+ path := util.NewFullPath(string(fullpath), fileName)
+ entry, err := store.FindEntry(ctx, path)
+ if err != nil {
+ glog.V(0).Infof("list %s : %v", path, err)
+ } else {
+ if entry.TtlSec > 0 {
+ if entry.Attr.Crtime.Add(time.Duration(entry.TtlSec) * time.Second).Before(time.Now()) {
+ store.Client.Del(string(path)).Result()
+ store.Client.ZRem(dirListKey, fileName).Result()
+ continue
+ }
+ }
+ entries = append(entries, entry)
+ }
+ }
+
+ return entries, err
+}
+
+func genDirectoryListKey(dir string) (dirList string) {
+ return dir + DIR_LIST_MARKER
+}
+
+func (store *UniversalRedis2Store) Shutdown() {
+ store.Client.Close()
+}
diff --git a/weed/filer2/stream.go b/weed/filer2/stream.go
new file mode 100644
index 000000000..033a8dd13
--- /dev/null
+++ b/weed/filer2/stream.go
@@ -0,0 +1,199 @@
+package filer2
+
+import (
+ "bytes"
+ "io"
+ "math"
+ "strings"
+
+ "github.com/chrislusf/seaweedfs/weed/glog"
+ "github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
+ "github.com/chrislusf/seaweedfs/weed/util"
+ "github.com/chrislusf/seaweedfs/weed/wdclient"
+)
+
+func StreamContent(masterClient *wdclient.MasterClient, w io.Writer, chunks []*filer_pb.FileChunk, offset int64, size int64) error {
+
+ chunkViews := ViewFromChunks(chunks, offset, size)
+
+ fileId2Url := make(map[string]string)
+
+ for _, chunkView := range chunkViews {
+
+ urlString, err := masterClient.LookupFileId(chunkView.FileId)
+ if err != nil {
+ glog.V(1).Infof("operation LookupFileId %s failed, err: %v", chunkView.FileId, err)
+ return err
+ }
+ fileId2Url[chunkView.FileId] = urlString
+ }
+
+ for _, chunkView := range chunkViews {
+
+ urlString := fileId2Url[chunkView.FileId]
+ err := util.ReadUrlAsStream(urlString, chunkView.CipherKey, chunkView.IsGzipped, chunkView.IsFullChunk(), chunkView.Offset, int(chunkView.Size), func(data []byte) {
+ w.Write(data)
+ })
+ if err != nil {
+ glog.V(1).Infof("read %s failed, err: %v", chunkView.FileId, err)
+ return err
+ }
+ }
+
+ return nil
+
+}
+
+// ---------------- ReadAllReader ----------------------------------
+
+func ReadAll(masterClient *wdclient.MasterClient, chunks []*filer_pb.FileChunk) ([]byte, error) {
+
+ buffer := bytes.Buffer{}
+
+ chunkViews := ViewFromChunks(chunks, 0, math.MaxInt32)
+
+ lookupFileId := func(fileId string) (targetUrl string, err error) {
+ return masterClient.LookupFileId(fileId)
+ }
+
+ for _, chunkView := range chunkViews {
+ urlString, err := lookupFileId(chunkView.FileId)
+ if err != nil {
+ glog.V(1).Infof("operation LookupFileId %s failed, err: %v", chunkView.FileId, err)
+ return nil, err
+ }
+ err = util.ReadUrlAsStream(urlString, chunkView.CipherKey, chunkView.IsGzipped, chunkView.IsFullChunk(), chunkView.Offset, int(chunkView.Size), func(data []byte) {
+ buffer.Write(data)
+ })
+ if err != nil {
+ glog.V(1).Infof("read %s failed, err: %v", chunkView.FileId, err)
+ return nil, err
+ }
+ }
+ return buffer.Bytes(), nil
+}
+
+// ---------------- ChunkStreamReader ----------------------------------
+type ChunkStreamReader struct {
+ chunkViews []*ChunkView
+ logicOffset int64
+ buffer []byte
+ bufferOffset int64
+ bufferPos int
+ chunkIndex int
+ lookupFileId LookupFileIdFunctionType
+}
+
+var _ = io.ReadSeeker(&ChunkStreamReader{})
+
+func NewChunkStreamReaderFromFiler(masterClient *wdclient.MasterClient, chunks []*filer_pb.FileChunk) *ChunkStreamReader {
+
+ chunkViews := ViewFromChunks(chunks, 0, math.MaxInt32)
+
+ return &ChunkStreamReader{
+ chunkViews: chunkViews,
+ lookupFileId: func(fileId string) (targetUrl string, err error) {
+ return masterClient.LookupFileId(fileId)
+ },
+ }
+}
+
+func NewChunkStreamReader(filerClient filer_pb.FilerClient, chunks []*filer_pb.FileChunk) *ChunkStreamReader {
+
+ chunkViews := ViewFromChunks(chunks, 0, math.MaxInt32)
+
+ return &ChunkStreamReader{
+ chunkViews: chunkViews,
+ lookupFileId: LookupFn(filerClient),
+ }
+}
+
+func (c *ChunkStreamReader) Read(p []byte) (n int, err error) {
+ for n < len(p) {
+ if c.isBufferEmpty() {
+ if c.chunkIndex >= len(c.chunkViews) {
+ return n, io.EOF
+ }
+ chunkView := c.chunkViews[c.chunkIndex]
+ c.fetchChunkToBuffer(chunkView)
+ c.chunkIndex++
+ }
+ t := copy(p[n:], c.buffer[c.bufferPos:])
+ c.bufferPos += t
+ n += t
+ }
+ return
+}
+
+func (c *ChunkStreamReader) isBufferEmpty() bool {
+ return len(c.buffer) <= c.bufferPos
+}
+
+func (c *ChunkStreamReader) Seek(offset int64, whence int) (int64, error) {
+
+ var totalSize int64
+ for _, chunk := range c.chunkViews {
+ totalSize += int64(chunk.Size)
+ }
+
+ var err error
+ switch whence {
+ case io.SeekStart:
+ case io.SeekCurrent:
+ offset += c.bufferOffset + int64(c.bufferPos)
+ case io.SeekEnd:
+ offset = totalSize + offset
+ }
+ if offset > totalSize {
+ err = io.ErrUnexpectedEOF
+ }
+
+ for i, chunk := range c.chunkViews {
+ if chunk.LogicOffset <= offset && offset < chunk.LogicOffset+int64(chunk.Size) {
+ if c.isBufferEmpty() || c.bufferOffset != chunk.LogicOffset {
+ c.fetchChunkToBuffer(chunk)
+ c.chunkIndex = i + 1
+ break
+ }
+ }
+ }
+ c.bufferPos = int(offset - c.bufferOffset)
+
+ return offset, err
+
+}
+
+func (c *ChunkStreamReader) fetchChunkToBuffer(chunkView *ChunkView) error {
+ urlString, err := c.lookupFileId(chunkView.FileId)
+ if err != nil {
+ glog.V(1).Infof("operation LookupFileId %s failed, err: %v", chunkView.FileId, err)
+ return err
+ }
+ var buffer bytes.Buffer
+ err = util.ReadUrlAsStream(urlString, chunkView.CipherKey, chunkView.IsGzipped, chunkView.IsFullChunk(), chunkView.Offset, int(chunkView.Size), func(data []byte) {
+ buffer.Write(data)
+ })
+ if err != nil {
+ glog.V(1).Infof("read %s failed, err: %v", chunkView.FileId, err)
+ return err
+ }
+ c.buffer = buffer.Bytes()
+ c.bufferPos = 0
+ c.bufferOffset = chunkView.LogicOffset
+
+ // glog.V(0).Infof("read %s [%d,%d)", chunkView.FileId, chunkView.LogicOffset, chunkView.LogicOffset+int64(chunkView.Size))
+
+ return nil
+}
+
+func (c *ChunkStreamReader) Close() {
+ // TODO try to release and reuse buffer
+}
+
+func VolumeId(fileId string) string {
+ lastCommaIndex := strings.LastIndex(fileId, ",")
+ if lastCommaIndex > 0 {
+ return fileId[:lastCommaIndex]
+ }
+ return fileId
+}
diff --git a/weed/filer2/topics.go b/weed/filer2/topics.go
new file mode 100644
index 000000000..9c6e5c88d
--- /dev/null
+++ b/weed/filer2/topics.go
@@ -0,0 +1,6 @@
+package filer2
+
+const (
+ TopicsDir = "/topics"
+ SystemLogDir = TopicsDir + "/.system/log"
+)
diff --git a/weed/filesys/dir.go b/weed/filesys/dir.go
index fae289217..9ef74a95c 100644
--- a/weed/filesys/dir.go
+++ b/weed/filesys/dir.go
@@ -1,23 +1,27 @@
package filesys
import (
+ "bytes"
"context"
"os"
- "path"
- "path/filepath"
+ "strings"
"time"
+ "github.com/seaweedfs/fuse"
+ "github.com/seaweedfs/fuse/fs"
+
"github.com/chrislusf/seaweedfs/weed/filer2"
+ "github.com/chrislusf/seaweedfs/weed/filesys/meta_cache"
"github.com/chrislusf/seaweedfs/weed/glog"
"github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
- "github.com/seaweedfs/fuse"
- "github.com/seaweedfs/fuse/fs"
+ "github.com/chrislusf/seaweedfs/weed/util"
)
type Dir struct {
- Path string
- wfs *WFS
- attributes *filer_pb.FuseAttributes
+ name string
+ wfs *WFS
+ entry *filer_pb.Entry
+ parent *Dir
}
var _ = fs.Node(&Dir{})
@@ -28,99 +32,96 @@ var _ = fs.HandleReadDirAller(&Dir{})
var _ = fs.NodeRemover(&Dir{})
var _ = fs.NodeRenamer(&Dir{})
var _ = fs.NodeSetattrer(&Dir{})
+var _ = fs.NodeGetxattrer(&Dir{})
+var _ = fs.NodeSetxattrer(&Dir{})
+var _ = fs.NodeRemovexattrer(&Dir{})
+var _ = fs.NodeListxattrer(&Dir{})
+var _ = fs.NodeForgetter(&Dir{})
-func (dir *Dir) Attr(context context.Context, attr *fuse.Attr) error {
+func (dir *Dir) Attr(ctx context.Context, attr *fuse.Attr) error {
// https://github.com/bazil/fuse/issues/196
attr.Valid = time.Second
- if dir.Path == dir.wfs.option.FilerMountRootPath {
- attr.Uid = dir.wfs.option.MountUid
- attr.Gid = dir.wfs.option.MountGid
- attr.Mode = dir.wfs.option.MountMode
+ if dir.FullPath() == dir.wfs.option.FilerMountRootPath {
+ dir.setRootDirAttributes(attr)
+ glog.V(3).Infof("root dir Attr %s, attr: %+v", dir.FullPath(), attr)
return nil
}
- item := dir.wfs.listDirectoryEntriesCache.Get(dir.Path)
- if item != nil && !item.Expired() {
- entry := item.Value().(*filer_pb.Entry)
-
- attr.Mtime = time.Unix(entry.Attributes.Mtime, 0)
- attr.Ctime = time.Unix(entry.Attributes.Crtime, 0)
- attr.Mode = os.FileMode(entry.Attributes.FileMode)
- attr.Gid = entry.Attributes.Gid
- attr.Uid = entry.Attributes.Uid
-
- return nil
+ if err := dir.maybeLoadEntry(); err != nil {
+ glog.V(3).Infof("dir Attr %s,err: %+v", dir.FullPath(), err)
+ return err
}
- parent, name := filepath.Split(dir.Path)
+ attr.Inode = util.FullPath(dir.FullPath()).AsInode()
+ attr.Mode = os.FileMode(dir.entry.Attributes.FileMode) | os.ModeDir
+ attr.Mtime = time.Unix(dir.entry.Attributes.Mtime, 0)
+ attr.Crtime = time.Unix(dir.entry.Attributes.Crtime, 0)
+ attr.Gid = dir.entry.Attributes.Gid
+ attr.Uid = dir.entry.Attributes.Uid
- err := dir.wfs.withFilerClient(func(client filer_pb.SeaweedFilerClient) error {
-
- request := &filer_pb.LookupDirectoryEntryRequest{
- Directory: parent,
- Name: name,
- }
+ glog.V(4).Infof("dir Attr %s, attr: %+v", dir.FullPath(), attr)
- glog.V(1).Infof("read dir %s attr: %v", dir.Path, request)
- resp, err := client.LookupDirectoryEntry(context, request)
- if err != nil {
- if err == filer2.ErrNotFound {
- return nil
- }
- glog.V(0).Infof("read dir %s attr %v: %v", dir.Path, request, err)
- return err
- }
-
- if resp.Entry != nil {
- dir.attributes = resp.Entry.Attributes
- }
+ return nil
+}
- // dir.wfs.listDirectoryEntriesCache.Set(dir.Path, resp.Entry, dir.wfs.option.EntryCacheTtl)
+func (dir *Dir) Getxattr(ctx context.Context, req *fuse.GetxattrRequest, resp *fuse.GetxattrResponse) error {
- return nil
- })
+ glog.V(4).Infof("dir Getxattr %s", dir.FullPath())
- if err != nil {
+ if err := dir.maybeLoadEntry(); err != nil {
return err
}
- // glog.V(1).Infof("dir %s: %v", dir.Path, attributes)
- // glog.V(1).Infof("dir %s permission: %v", dir.Path, os.FileMode(attributes.FileMode))
-
- attr.Mode = os.FileMode(dir.attributes.FileMode) | os.ModeDir
+ return getxattr(dir.entry, req, resp)
+}
- attr.Mtime = time.Unix(dir.attributes.Mtime, 0)
- attr.Ctime = time.Unix(dir.attributes.Crtime, 0)
- attr.Gid = dir.attributes.Gid
- attr.Uid = dir.attributes.Uid
+func (dir *Dir) setRootDirAttributes(attr *fuse.Attr) {
+ attr.Inode = 1 // filer2.FullPath(dir.Path).AsInode()
+ attr.Valid = time.Hour
+ attr.Uid = dir.wfs.option.MountUid
+ attr.Gid = dir.wfs.option.MountGid
+ attr.Mode = dir.wfs.option.MountMode
+ attr.Crtime = dir.wfs.option.MountCtime
+ attr.Ctime = dir.wfs.option.MountCtime
+ attr.Mtime = dir.wfs.option.MountMtime
+ attr.Atime = dir.wfs.option.MountMtime
+ attr.BlockSize = 1024 * 1024
+}
- return nil
+func (dir *Dir) newFile(name string, entry *filer_pb.Entry) fs.Node {
+ return dir.wfs.fsNodeCache.EnsureFsNode(util.NewFullPath(dir.FullPath(), name), func() fs.Node {
+ return &File{
+ Name: name,
+ dir: dir,
+ wfs: dir.wfs,
+ entry: entry,
+ entryViewCache: nil,
+ }
+ })
}
-func (dir *Dir) newFile(name string, entry *filer_pb.Entry) *File {
- return &File{
- Name: name,
- dir: dir,
- wfs: dir.wfs,
- entry: entry,
- entryViewCache: nil,
- }
+func (dir *Dir) newDirectory(fullpath util.FullPath, entry *filer_pb.Entry) fs.Node {
+
+ return dir.wfs.fsNodeCache.EnsureFsNode(fullpath, func() fs.Node {
+ return &Dir{name: entry.Name, wfs: dir.wfs, entry: entry, parent: dir}
+ })
+
}
func (dir *Dir) Create(ctx context.Context, req *fuse.CreateRequest,
resp *fuse.CreateResponse) (fs.Node, fs.Handle, error) {
request := &filer_pb.CreateEntryRequest{
- Directory: dir.Path,
+ Directory: dir.FullPath(),
Entry: &filer_pb.Entry{
Name: req.Name,
IsDirectory: req.Mode&os.ModeDir > 0,
Attributes: &filer_pb.FuseAttributes{
Mtime: time.Now().Unix(),
Crtime: time.Now().Unix(),
- FileMode: uint32(req.Mode),
+ FileMode: uint32(req.Mode &^ dir.wfs.option.Umask),
Uid: req.Uid,
Gid: req.Gid,
Collection: dir.wfs.option.Collection,
@@ -128,109 +129,119 @@ func (dir *Dir) Create(ctx context.Context, req *fuse.CreateRequest,
TtlSec: dir.wfs.option.TtlSec,
},
},
+ OExcl: req.Flags&fuse.OpenExclusive != 0,
}
- glog.V(1).Infof("create: %v", request)
+ glog.V(1).Infof("create %s/%s: %v", dir.FullPath(), req.Name, req.Flags)
- if request.Entry.IsDirectory {
- if err := dir.wfs.withFilerClient(func(client filer_pb.SeaweedFilerClient) error {
- if _, err := client.CreateEntry(ctx, request); err != nil {
- glog.V(0).Infof("create %s/%s: %v", dir.Path, req.Name, err)
- return fuse.EIO
+ if err := dir.wfs.WithFilerClient(func(client filer_pb.SeaweedFilerClient) error {
+ if err := filer_pb.CreateEntry(client, request); err != nil {
+ if strings.Contains(err.Error(), "EEXIST") {
+ return fuse.EEXIST
}
- return nil
- }); err != nil {
- return nil, nil, err
+ return fuse.EIO
}
- }
- file := dir.newFile(req.Name, request.Entry)
- if !request.Entry.IsDirectory {
- file.isOpen = true
+ dir.wfs.metaCache.InsertEntry(context.Background(), filer2.FromPbEntry(request.Directory, request.Entry))
+
+ return nil
+ }); err != nil {
+ return nil, nil, err
}
+ var node fs.Node
+ if request.Entry.IsDirectory {
+ node = dir.newDirectory(util.NewFullPath(dir.FullPath(), req.Name), request.Entry)
+ return node, nil, nil
+ }
+
+ node = dir.newFile(req.Name, request.Entry)
+ file := node.(*File)
+ file.isOpen++
fh := dir.wfs.AcquireHandle(file, req.Uid, req.Gid)
- fh.dirtyMetadata = true
return file, fh, nil
}
func (dir *Dir) Mkdir(ctx context.Context, req *fuse.MkdirRequest) (fs.Node, error) {
- err := dir.wfs.withFilerClient(func(client filer_pb.SeaweedFilerClient) error {
+ glog.V(4).Infof("mkdir %s: %s", dir.FullPath(), req.Name)
+
+ newEntry := &filer_pb.Entry{
+ Name: req.Name,
+ IsDirectory: true,
+ Attributes: &filer_pb.FuseAttributes{
+ Mtime: time.Now().Unix(),
+ Crtime: time.Now().Unix(),
+ FileMode: uint32(req.Mode &^ dir.wfs.option.Umask),
+ Uid: req.Uid,
+ Gid: req.Gid,
+ },
+ }
+
+ err := dir.wfs.WithFilerClient(func(client filer_pb.SeaweedFilerClient) error {
request := &filer_pb.CreateEntryRequest{
- Directory: dir.Path,
- Entry: &filer_pb.Entry{
- Name: req.Name,
- IsDirectory: true,
- Attributes: &filer_pb.FuseAttributes{
- Mtime: time.Now().Unix(),
- Crtime: time.Now().Unix(),
- FileMode: uint32(req.Mode),
- Uid: req.Uid,
- Gid: req.Gid,
- },
- },
+ Directory: dir.FullPath(),
+ Entry: newEntry,
}
glog.V(1).Infof("mkdir: %v", request)
- if _, err := client.CreateEntry(ctx, request); err != nil {
- glog.V(0).Infof("mkdir %s/%s: %v", dir.Path, req.Name, err)
- return fuse.EIO
+ if err := filer_pb.CreateEntry(client, request); err != nil {
+ glog.V(0).Infof("mkdir %s/%s: %v", dir.FullPath(), req.Name, err)
+ return err
}
+ dir.wfs.metaCache.InsertEntry(context.Background(), filer2.FromPbEntry(request.Directory, request.Entry))
+
return nil
})
if err == nil {
- node := &Dir{Path: path.Join(dir.Path, req.Name), wfs: dir.wfs}
+ node := dir.newDirectory(util.NewFullPath(dir.FullPath(), req.Name), newEntry)
+
return node, nil
}
- return nil, err
+ glog.V(0).Infof("mkdir %s/%s: %v", dir.FullPath(), req.Name, err)
+
+ return nil, fuse.EIO
}
func (dir *Dir) Lookup(ctx context.Context, req *fuse.LookupRequest, resp *fuse.LookupResponse) (node fs.Node, err error) {
- var entry *filer_pb.Entry
+ glog.V(4).Infof("dir Lookup %s: %s by %s", dir.FullPath(), req.Name, req.Header.String())
- item := dir.wfs.listDirectoryEntriesCache.Get(path.Join(dir.Path, req.Name))
- if item != nil && !item.Expired() {
- entry = item.Value().(*filer_pb.Entry)
+ fullFilePath := util.NewFullPath(dir.FullPath(), req.Name)
+ dirPath := util.FullPath(dir.FullPath())
+ meta_cache.EnsureVisited(dir.wfs.metaCache, dir.wfs, util.FullPath(dirPath))
+ cachedEntry, cacheErr := dir.wfs.metaCache.FindEntry(context.Background(), fullFilePath)
+ if cacheErr == filer_pb.ErrNotFound {
+ return nil, fuse.ENOENT
}
+ entry := cachedEntry.ToProtoEntry()
if entry == nil {
- err = dir.wfs.withFilerClient(func(client filer_pb.SeaweedFilerClient) error {
-
- request := &filer_pb.LookupDirectoryEntryRequest{
- Directory: dir.Path,
- Name: req.Name,
- }
-
- glog.V(4).Infof("lookup directory entry: %v", request)
- resp, err := client.LookupDirectoryEntry(ctx, request)
- if err != nil {
- // glog.V(0).Infof("lookup %s/%s: %v", dir.Path, name, err)
- return fuse.ENOENT
- }
-
- entry = resp.Entry
-
- // dir.wfs.listDirectoryEntriesCache.Set(path.Join(dir.Path, entry.Name), entry, dir.wfs.option.EntryCacheTtl)
-
- return nil
- })
+ // glog.V(3).Infof("dir Lookup cache miss %s", fullFilePath)
+ entry, err = filer_pb.GetEntry(dir.wfs, fullFilePath)
+ if err != nil {
+ glog.V(1).Infof("dir GetEntry %s: %v", fullFilePath, err)
+ return nil, fuse.ENOENT
+ }
+ } else {
+ glog.V(4).Infof("dir Lookup cache hit %s", fullFilePath)
}
if entry != nil {
if entry.IsDirectory {
- node = &Dir{Path: path.Join(dir.Path, req.Name), wfs: dir.wfs, attributes: entry.Attributes}
+ node = dir.newDirectory(fullFilePath, entry)
} else {
node = dir.newFile(req.Name, entry)
}
- resp.EntryValid = time.Duration(0)
+ // resp.EntryValid = time.Second
+ resp.Attr.Inode = fullFilePath.AsInode()
+ resp.Attr.Valid = time.Second
resp.Attr.Mtime = time.Unix(entry.Attributes.Mtime, 0)
- resp.Attr.Ctime = time.Unix(entry.Attributes.Crtime, 0)
+ resp.Attr.Crtime = time.Unix(entry.Attributes.Crtime, 0)
resp.Attr.Mode = os.FileMode(entry.Attributes.FileMode)
resp.Attr.Gid = entry.Attributes.Gid
resp.Attr.Uid = entry.Attributes.Uid
@@ -238,203 +249,234 @@ func (dir *Dir) Lookup(ctx context.Context, req *fuse.LookupRequest, resp *fuse.
return node, nil
}
+ glog.V(4).Infof("not found dir GetEntry %s: %v", fullFilePath, err)
return nil, fuse.ENOENT
}
func (dir *Dir) ReadDirAll(ctx context.Context) (ret []fuse.Dirent, err error) {
- err = dir.wfs.withFilerClient(func(client filer_pb.SeaweedFilerClient) error {
+ glog.V(3).Infof("dir ReadDirAll %s", dir.FullPath())
- paginationLimit := 1024
- remaining := dir.wfs.option.DirListingLimit
+ processEachEntryFn := func(entry *filer_pb.Entry, isLast bool) error {
+ fullpath := util.NewFullPath(dir.FullPath(), entry.Name)
+ inode := fullpath.AsInode()
+ if entry.IsDirectory {
+ dirent := fuse.Dirent{Inode: inode, Name: entry.Name, Type: fuse.DT_Dir}
+ ret = append(ret, dirent)
+ } else {
+ dirent := fuse.Dirent{Inode: inode, Name: entry.Name, Type: fuse.DT_File}
+ ret = append(ret, dirent)
+ }
+ return nil
+ }
- lastEntryName := ""
+ dirPath := util.FullPath(dir.FullPath())
+ meta_cache.EnsureVisited(dir.wfs.metaCache, dir.wfs, dirPath)
+ listedEntries, listErr := dir.wfs.metaCache.ListDirectoryEntries(context.Background(), util.FullPath(dir.FullPath()), "", false, int(dir.wfs.option.DirListCacheLimit))
+ if listErr != nil {
+ glog.Errorf("list meta cache: %v", listErr)
+ return nil, fuse.EIO
+ }
+ for _, cachedEntry := range listedEntries {
+ processEachEntryFn(cachedEntry.ToProtoEntry(), false)
+ }
+ return
+}
- for remaining >= 0 {
+func (dir *Dir) Remove(ctx context.Context, req *fuse.RemoveRequest) error {
- request := &filer_pb.ListEntriesRequest{
- Directory: dir.Path,
- StartFromFileName: lastEntryName,
- Limit: uint32(paginationLimit),
- }
+ if !req.Dir {
+ return dir.removeOneFile(req)
+ }
- glog.V(4).Infof("read directory: %v", request)
- resp, err := client.ListEntries(ctx, request)
- if err != nil {
- glog.V(0).Infof("list %s: %v", dir.Path, err)
- return fuse.EIO
- }
+ return dir.removeFolder(req)
- cacheTtl := estimatedCacheTtl(len(resp.Entries))
-
- for _, entry := range resp.Entries {
- if entry.IsDirectory {
- dirent := fuse.Dirent{Name: entry.Name, Type: fuse.DT_Dir}
- ret = append(ret, dirent)
- } else {
- dirent := fuse.Dirent{Name: entry.Name, Type: fuse.DT_File}
- ret = append(ret, dirent)
- }
- dir.wfs.listDirectoryEntriesCache.Set(path.Join(dir.Path, entry.Name), entry, cacheTtl)
- lastEntryName = entry.Name
- }
+}
- remaining -= len(resp.Entries)
+func (dir *Dir) removeOneFile(req *fuse.RemoveRequest) error {
- if len(resp.Entries) < paginationLimit {
- break
- }
+ filePath := util.NewFullPath(dir.FullPath(), req.Name)
+ entry, err := filer_pb.GetEntry(dir.wfs, filePath)
+ if err != nil {
+ return err
+ }
+ if entry == nil {
+ return nil
+ }
- }
+ dir.wfs.deleteFileChunks(entry.Chunks)
- return nil
- })
+ dir.wfs.fsNodeCache.DeleteFsNode(filePath)
+
+ dir.wfs.metaCache.DeleteEntry(context.Background(), filePath)
+
+ glog.V(3).Infof("remove file: %v", req)
+ err = filer_pb.Remove(dir.wfs, dir.FullPath(), req.Name, false, false, false, false)
+ if err != nil {
+ glog.V(3).Infof("not found remove file %s/%s: %v", dir.FullPath(), req.Name, err)
+ return fuse.ENOENT
+ }
+
+ return nil
- return ret, err
}
-func (dir *Dir) Remove(ctx context.Context, req *fuse.RemoveRequest) error {
+func (dir *Dir) removeFolder(req *fuse.RemoveRequest) error {
- if !req.Dir {
- return dir.removeOneFile(ctx, req)
+ t := util.NewFullPath(dir.FullPath(), req.Name)
+ dir.wfs.fsNodeCache.DeleteFsNode(t)
+
+ dir.wfs.metaCache.DeleteEntry(context.Background(), t)
+
+ glog.V(3).Infof("remove directory entry: %v", req)
+ err := filer_pb.Remove(dir.wfs, dir.FullPath(), req.Name, true, false, false, false)
+ if err != nil {
+ glog.V(3).Infof("not found remove %s/%s: %v", dir.FullPath(), req.Name, err)
+ return fuse.ENOENT
}
- return dir.removeFolder(ctx, req)
+ return nil
}
-func (dir *Dir) removeOneFile(ctx context.Context, req *fuse.RemoveRequest) error {
+func (dir *Dir) Setattr(ctx context.Context, req *fuse.SetattrRequest, resp *fuse.SetattrResponse) error {
- var entry *filer_pb.Entry
- err := dir.wfs.withFilerClient(func(client filer_pb.SeaweedFilerClient) error {
+ glog.V(3).Infof("%v dir setattr %+v", dir.FullPath(), req)
- request := &filer_pb.LookupDirectoryEntryRequest{
- Directory: dir.Path,
- Name: req.Name,
- }
+ if err := dir.maybeLoadEntry(); err != nil {
+ return err
+ }
- glog.V(4).Infof("lookup to-be-removed entry: %v", request)
- resp, err := client.LookupDirectoryEntry(ctx, request)
- if err != nil {
- // glog.V(0).Infof("lookup %s/%s: %v", dir.Path, name, err)
- return fuse.ENOENT
- }
+ if req.Valid.Mode() {
+ dir.entry.Attributes.FileMode = uint32(req.Mode)
+ }
- entry = resp.Entry
+ if req.Valid.Uid() {
+ dir.entry.Attributes.Uid = req.Uid
+ }
- return nil
- })
+ if req.Valid.Gid() {
+ dir.entry.Attributes.Gid = req.Gid
+ }
- if err != nil {
- return err
+ if req.Valid.Mtime() {
+ dir.entry.Attributes.Mtime = req.Mtime.Unix()
}
- dir.wfs.deleteFileChunks(entry.Chunks)
+ return dir.saveEntry()
- return dir.wfs.withFilerClient(func(client filer_pb.SeaweedFilerClient) error {
+}
- request := &filer_pb.DeleteEntryRequest{
- Directory: dir.Path,
- Name: req.Name,
- IsDeleteData: false,
- }
+func (dir *Dir) Setxattr(ctx context.Context, req *fuse.SetxattrRequest) error {
- glog.V(3).Infof("remove file: %v", request)
- _, err := client.DeleteEntry(ctx, request)
- if err != nil {
- glog.V(3).Infof("remove file %s/%s: %v", dir.Path, req.Name, err)
- return fuse.ENOENT
- }
+ glog.V(4).Infof("dir Setxattr %s: %s", dir.FullPath(), req.Name)
- dir.wfs.listDirectoryEntriesCache.Delete(path.Join(dir.Path, req.Name))
+ if err := dir.maybeLoadEntry(); err != nil {
+ return err
+ }
- return nil
- })
+ if err := setxattr(dir.entry, req); err != nil {
+ return err
+ }
-}
+ return dir.saveEntry()
-func (dir *Dir) removeFolder(ctx context.Context, req *fuse.RemoveRequest) error {
+}
- return dir.wfs.withFilerClient(func(client filer_pb.SeaweedFilerClient) error {
+func (dir *Dir) Removexattr(ctx context.Context, req *fuse.RemovexattrRequest) error {
- request := &filer_pb.DeleteEntryRequest{
- Directory: dir.Path,
- Name: req.Name,
- IsDeleteData: true,
- }
+ glog.V(4).Infof("dir Removexattr %s: %s", dir.FullPath(), req.Name)
- glog.V(3).Infof("remove directory entry: %v", request)
- _, err := client.DeleteEntry(ctx, request)
- if err != nil {
- glog.V(3).Infof("remove %s/%s: %v", dir.Path, req.Name, err)
- return fuse.ENOENT
- }
+ if err := dir.maybeLoadEntry(); err != nil {
+ return err
+ }
- dir.wfs.listDirectoryEntriesCache.Delete(path.Join(dir.Path, req.Name))
+ if err := removexattr(dir.entry, req); err != nil {
+ return err
+ }
- return nil
- })
+ return dir.saveEntry()
}
-func (dir *Dir) Setattr(ctx context.Context, req *fuse.SetattrRequest, resp *fuse.SetattrResponse) error {
+func (dir *Dir) Listxattr(ctx context.Context, req *fuse.ListxattrRequest, resp *fuse.ListxattrResponse) error {
- glog.V(3).Infof("%v dir setattr %+v, fh=%d", dir.Path, req, req.Handle)
- if req.Valid.Mode() {
- dir.attributes.FileMode = uint32(req.Mode)
- }
+ glog.V(4).Infof("dir Listxattr %s", dir.FullPath())
- if req.Valid.Uid() {
- dir.attributes.Uid = req.Uid
+ if err := dir.maybeLoadEntry(); err != nil {
+ return err
}
- if req.Valid.Gid() {
- dir.attributes.Gid = req.Gid
+ if err := listxattr(dir.entry, req, resp); err != nil {
+ return err
}
- if req.Valid.Mtime() {
- dir.attributes.Mtime = req.Mtime.Unix()
+ return nil
+
+}
+
+func (dir *Dir) Forget() {
+ glog.V(3).Infof("Forget dir %s", dir.FullPath())
+
+ dir.wfs.fsNodeCache.DeleteFsNode(util.FullPath(dir.FullPath()))
+}
+
+func (dir *Dir) maybeLoadEntry() error {
+ if dir.entry == nil {
+ parentDirPath, name := util.FullPath(dir.FullPath()).DirAndName()
+ entry, err := dir.wfs.maybeLoadEntry(parentDirPath, name)
+ if err != nil {
+ return err
+ }
+ dir.entry = entry
}
+ return nil
+}
- parentDir, name := filer2.FullPath(dir.Path).DirAndName()
- return dir.wfs.withFilerClient(func(client filer_pb.SeaweedFilerClient) error {
+func (dir *Dir) saveEntry() error {
+
+ parentDir, name := util.FullPath(dir.FullPath()).DirAndName()
+
+ return dir.wfs.WithFilerClient(func(client filer_pb.SeaweedFilerClient) error {
request := &filer_pb.UpdateEntryRequest{
Directory: parentDir,
- Entry: &filer_pb.Entry{
- Name: name,
- Attributes: dir.attributes,
- },
+ Entry: dir.entry,
}
- glog.V(1).Infof("set attr directory entry: %v", request)
- _, err := client.UpdateEntry(ctx, request)
+ glog.V(1).Infof("save dir entry: %v", request)
+ _, err := client.UpdateEntry(context.Background(), request)
if err != nil {
- glog.V(0).Infof("UpdateEntry %s: %v", dir.Path, err)
+ glog.V(0).Infof("UpdateEntry dir %s/%s: %v", parentDir, name, err)
return fuse.EIO
}
- dir.wfs.listDirectoryEntriesCache.Delete(dir.Path)
+ dir.wfs.metaCache.UpdateEntry(context.Background(), filer2.FromPbEntry(request.Directory, request.Entry))
return nil
})
-
}
-func estimatedCacheTtl(numEntries int) time.Duration {
- if numEntries < 100 {
- // 30 ms per entry
- return 3 * time.Second
- }
- if numEntries < 1000 {
- // 10 ms per entry
- return 10 * time.Second
+func (dir *Dir) FullPath() string {
+ var parts []string
+ for p := dir; p != nil; p = p.parent {
+ if strings.HasPrefix(p.name, "/") {
+ if len(p.name) > 1 {
+ parts = append(parts, p.name[1:])
+ }
+ } else {
+ parts = append(parts, p.name)
+ }
}
- if numEntries < 10000 {
- // 10 ms per entry
- return 100 * time.Second
+
+ if len(parts) == 0 {
+ return "/"
}
- // 2 ms per entry
- return time.Duration(numEntries*2) * time.Millisecond
+ var buf bytes.Buffer
+ for i := len(parts) - 1; i >= 0; i-- {
+ buf.WriteString("/")
+ buf.WriteString(parts[i])
+ }
+ return buf.String()
}
diff --git a/weed/filesys/dir_link.go b/weed/filesys/dir_link.go
index 3b3735369..4990e743c 100644
--- a/weed/filesys/dir_link.go
+++ b/weed/filesys/dir_link.go
@@ -6,6 +6,7 @@ import (
"syscall"
"time"
+ "github.com/chrislusf/seaweedfs/weed/filer2"
"github.com/chrislusf/seaweedfs/weed/glog"
"github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
"github.com/seaweedfs/fuse"
@@ -17,17 +18,17 @@ var _ = fs.NodeReadlinker(&File{})
func (dir *Dir) Symlink(ctx context.Context, req *fuse.SymlinkRequest) (fs.Node, error) {
- glog.V(3).Infof("Symlink: %v/%v to %v", dir.Path, req.NewName, req.Target)
+ glog.V(3).Infof("Symlink: %v/%v to %v", dir.FullPath(), req.NewName, req.Target)
request := &filer_pb.CreateEntryRequest{
- Directory: dir.Path,
+ Directory: dir.FullPath(),
Entry: &filer_pb.Entry{
Name: req.NewName,
IsDirectory: false,
Attributes: &filer_pb.FuseAttributes{
Mtime: time.Now().Unix(),
Crtime: time.Now().Unix(),
- FileMode: uint32(os.FileMode(0755) | os.ModeSymlink),
+ FileMode: uint32((os.FileMode(0777) | os.ModeSymlink) &^ dir.wfs.option.Umask),
Uid: req.Uid,
Gid: req.Gid,
SymlinkTarget: req.Target,
@@ -35,11 +36,14 @@ func (dir *Dir) Symlink(ctx context.Context, req *fuse.SymlinkRequest) (fs.Node,
},
}
- err := dir.wfs.withFilerClient(func(client filer_pb.SeaweedFilerClient) error {
- if _, err := client.CreateEntry(ctx, request); err != nil {
- glog.V(0).Infof("symlink %s/%s: %v", dir.Path, req.NewName, err)
+ err := dir.wfs.WithFilerClient(func(client filer_pb.SeaweedFilerClient) error {
+ if err := filer_pb.CreateEntry(client, request); err != nil {
+ glog.V(0).Infof("symlink %s/%s: %v", dir.FullPath(), req.NewName, err)
return fuse.EIO
}
+
+ dir.wfs.metaCache.InsertEntry(context.Background(), filer2.FromPbEntry(request.Directory, request.Entry))
+
return nil
})
@@ -51,7 +55,7 @@ func (dir *Dir) Symlink(ctx context.Context, req *fuse.SymlinkRequest) (fs.Node,
func (file *File) Readlink(ctx context.Context, req *fuse.ReadlinkRequest) (string, error) {
- if err := file.maybeLoadAttributes(ctx); err != nil {
+ if err := file.maybeLoadEntry(ctx); err != nil {
return "", err
}
@@ -59,7 +63,7 @@ func (file *File) Readlink(ctx context.Context, req *fuse.ReadlinkRequest) (stri
return "", fuse.Errno(syscall.EINVAL)
}
- glog.V(3).Infof("Readlink: %v/%v => %v", file.dir.Path, file.Name, file.entry.Attributes.SymlinkTarget)
+ glog.V(3).Infof("Readlink: %v/%v => %v", file.dir.FullPath(), file.Name, file.entry.Attributes.SymlinkTarget)
return file.entry.Attributes.SymlinkTarget, nil
diff --git a/weed/filesys/dir_rename.go b/weed/filesys/dir_rename.go
index d29281f35..0f7f131b1 100644
--- a/weed/filesys/dir_rename.go
+++ b/weed/filesys/dir_rename.go
@@ -2,118 +2,49 @@ package filesys
import (
"context"
- "github.com/seaweedfs/fuse"
- "github.com/seaweedfs/fuse/fs"
- "math"
- "path/filepath"
"github.com/chrislusf/seaweedfs/weed/glog"
"github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
+ "github.com/chrislusf/seaweedfs/weed/util"
+ "github.com/seaweedfs/fuse"
+ "github.com/seaweedfs/fuse/fs"
)
func (dir *Dir) Rename(ctx context.Context, req *fuse.RenameRequest, newDirectory fs.Node) error {
newDir := newDirectory.(*Dir)
- return dir.wfs.withFilerClient(func(client filer_pb.SeaweedFilerClient) error {
+ newPath := util.NewFullPath(newDir.FullPath(), req.NewName)
+ oldPath := util.NewFullPath(dir.FullPath(), req.OldName)
- // find existing entry
- request := &filer_pb.LookupDirectoryEntryRequest{
- Directory: dir.Path,
- Name: req.OldName,
+ glog.V(4).Infof("dir Rename %s => %s", oldPath, newPath)
+
+ err := dir.wfs.WithFilerClient(func(client filer_pb.SeaweedFilerClient) error {
+
+ request := &filer_pb.AtomicRenameEntryRequest{
+ OldDirectory: dir.FullPath(),
+ OldName: req.OldName,
+ NewDirectory: newDir.FullPath(),
+ NewName: req.NewName,
}
- glog.V(4).Infof("find existing directory entry: %v", request)
- resp, err := client.LookupDirectoryEntry(ctx, request)
+ _, err := client.AtomicRenameEntry(context.Background(), request)
if err != nil {
- glog.V(3).Infof("renaming find %s/%s: %v", dir.Path, req.OldName, err)
- return fuse.ENOENT
+ glog.V(0).Infof("dir Rename %s => %s : %v", oldPath, newPath, err)
+ return fuse.EIO
}
- entry := resp.Entry
+ return nil
- glog.V(4).Infof("found existing directory entry resp: %+v", resp)
-
- return moveEntry(ctx, client, dir.Path, entry, newDir.Path, req.NewName)
})
-}
-
-func moveEntry(ctx context.Context, client filer_pb.SeaweedFilerClient, oldParent string, entry *filer_pb.Entry, newParent, newName string) error {
- if entry.IsDirectory {
- currentDirPath := filepath.Join(oldParent, entry.Name)
+ if err == nil {
- lastFileName := ""
- includeLastFile := false
- limit := math.MaxInt32
- for limit > 0 {
- request := &filer_pb.ListEntriesRequest{
- Directory: currentDirPath,
- StartFromFileName: lastFileName,
- InclusiveStartFrom: includeLastFile,
- Limit: 1024,
- }
- glog.V(4).Infof("read directory: %v", request)
- resp, err := client.ListEntries(ctx, request)
- if err != nil {
- glog.V(0).Infof("list %s: %v", oldParent, err)
- return fuse.EIO
- }
- if len(resp.Entries) == 0 {
- break
- }
-
- for _, item := range resp.Entries {
- lastFileName = item.Name
- err := moveEntry(ctx, client, currentDirPath, item, filepath.Join(newParent, newName), item.Name)
- if err != nil {
- return err
- }
- limit--
- }
- if len(resp.Entries) < 1024 {
- break
- }
- }
+ // fmt.Printf("rename path: %v => %v\n", oldPath, newPath)
+ dir.wfs.fsNodeCache.Move(oldPath, newPath)
+ delete(dir.wfs.handles, oldPath.AsInode())
}
- // add to new directory
- {
- request := &filer_pb.CreateEntryRequest{
- Directory: newParent,
- Entry: &filer_pb.Entry{
- Name: newName,
- IsDirectory: entry.IsDirectory,
- Attributes: entry.Attributes,
- Chunks: entry.Chunks,
- },
- }
-
- glog.V(1).Infof("create new entry: %v", request)
- if _, err := client.CreateEntry(ctx, request); err != nil {
- glog.V(0).Infof("renaming create %s/%s: %v", newParent, newName, err)
- return fuse.EIO
- }
- }
-
- // delete old entry
- {
- request := &filer_pb.DeleteEntryRequest{
- Directory: oldParent,
- Name: entry.Name,
- IsDeleteData: false,
- }
-
- glog.V(1).Infof("remove old entry: %v", request)
- _, err := client.DeleteEntry(ctx, request)
- if err != nil {
- glog.V(0).Infof("renaming delete %s/%s: %v", oldParent, entry.Name, err)
- return fuse.EIO
- }
-
- }
-
- return nil
-
+ return err
}
diff --git a/weed/filesys/dir_test.go b/weed/filesys/dir_test.go
new file mode 100644
index 000000000..49c76eb5e
--- /dev/null
+++ b/weed/filesys/dir_test.go
@@ -0,0 +1,34 @@
+package filesys
+
+import (
+ "testing"
+
+ "github.com/stretchr/testify/assert"
+)
+
+func TestDirPath(t *testing.T) {
+
+ p := &Dir{name: "/some"}
+ p = &Dir{name: "path", parent: p}
+ p = &Dir{name: "to", parent: p}
+ p = &Dir{name: "a", parent: p}
+ p = &Dir{name: "file", parent: p}
+
+ assert.Equal(t, "/some/path/to/a/file", p.FullPath())
+
+ p = &Dir{name: "/some"}
+ assert.Equal(t, "/some", p.FullPath())
+
+ p = &Dir{name: "/"}
+ assert.Equal(t, "/", p.FullPath())
+
+ p = &Dir{name: "/"}
+ p = &Dir{name: "path", parent: p}
+ assert.Equal(t, "/path", p.FullPath())
+
+ p = &Dir{name: "/"}
+ p = &Dir{name: "path", parent: p}
+ p = &Dir{name: "to", parent: p}
+ assert.Equal(t, "/path/to", p.FullPath())
+
+}
diff --git a/weed/filesys/dirty_page.go b/weed/filesys/dirty_page.go
index 696296e62..45224b3e7 100644
--- a/weed/filesys/dirty_page.go
+++ b/weed/filesys/dirty_page.go
@@ -4,168 +4,149 @@ import (
"bytes"
"context"
"fmt"
- "sync/atomic"
+ "io"
+ "sync"
"time"
"github.com/chrislusf/seaweedfs/weed/glog"
"github.com/chrislusf/seaweedfs/weed/operation"
"github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
- "sync"
+ "github.com/chrislusf/seaweedfs/weed/security"
)
type ContinuousDirtyPages struct {
- hasData bool
- Offset int64
- Size int64
- Data []byte
- f *File
- lock sync.Mutex
+ intervals *ContinuousIntervals
+ f *File
+ lock sync.Mutex
+ collection string
+ replication string
}
func newDirtyPages(file *File) *ContinuousDirtyPages {
return &ContinuousDirtyPages{
- Data: nil,
- f: file,
+ intervals: &ContinuousIntervals{},
+ f: file,
}
}
func (pages *ContinuousDirtyPages) releaseResource() {
- if pages.Data != nil {
- pages.f.wfs.bufPool.Put(pages.Data)
- pages.Data = nil
- atomic.AddInt32(&counter, -1)
- glog.V(3).Infof("%s/%s releasing resource %d", pages.f.dir.Path, pages.f.Name, counter)
- }
}
var counter = int32(0)
-func (pages *ContinuousDirtyPages) AddPage(ctx context.Context, offset int64, data []byte) (chunks []*filer_pb.FileChunk, err error) {
+func (pages *ContinuousDirtyPages) AddPage(offset int64, data []byte) (chunks []*filer_pb.FileChunk, err error) {
pages.lock.Lock()
defer pages.lock.Unlock()
- var chunk *filer_pb.FileChunk
+ glog.V(3).Infof("%s AddPage [%d,%d)", pages.f.fullpath(), offset, offset+int64(len(data)))
if len(data) > int(pages.f.wfs.option.ChunkSizeLimit) {
// this is more than what buffer can hold.
- return pages.flushAndSave(ctx, offset, data)
+ return pages.flushAndSave(offset, data)
}
- if pages.Data == nil {
- pages.Data = pages.f.wfs.bufPool.Get().([]byte)
- atomic.AddInt32(&counter, 1)
- glog.V(3).Infof("%s/%s acquire resource %d", pages.f.dir.Path, pages.f.Name, counter)
- }
-
- if offset < pages.Offset || offset >= pages.Offset+int64(len(pages.Data)) ||
- pages.Offset+int64(len(pages.Data)) < offset+int64(len(data)) {
- // if the data is out of range,
- // or buffer is full if adding new data,
- // flush current buffer and add new data
-
- // println("offset", offset, "size", len(data), "existing offset", pages.Offset, "size", pages.Size)
+ pages.intervals.AddInterval(data, offset)
- if chunk, err = pages.saveExistingPagesToStorage(ctx); err == nil {
- if chunk != nil {
- glog.V(4).Infof("%s/%s add save [%d,%d)", pages.f.dir.Path, pages.f.Name, chunk.Offset, chunk.Offset+int64(chunk.Size))
- chunks = append(chunks, chunk)
- }
- } else {
- glog.V(0).Infof("%s/%s add save [%d,%d): %v", pages.f.dir.Path, pages.f.Name, chunk.Offset, chunk.Offset+int64(chunk.Size), err)
- return
- }
- pages.Offset = offset
- copy(pages.Data, data)
- pages.Size = int64(len(data))
- return
- }
+ var chunk *filer_pb.FileChunk
+ var hasSavedData bool
- if offset != pages.Offset+pages.Size {
- // when this happens, debug shows the data overlapping with existing data is empty
- // the data is not just append
- if offset == pages.Offset && int(pages.Size) < len(data) {
- // glog.V(2).Infof("pages[%d,%d) pages.Data len=%v, data len=%d, pages.Size=%d", pages.Offset, pages.Offset+pages.Size, len(pages.Data), len(data), pages.Size)
- copy(pages.Data[pages.Size:], data[pages.Size:])
- } else {
- if pages.Size != 0 {
- glog.V(1).Infof("%s/%s add page: pages [%d, %d) write [%d, %d)", pages.f.dir.Path, pages.f.Name, pages.Offset, pages.Offset+pages.Size, offset, offset+int64(len(data)))
- }
- return pages.flushAndSave(ctx, offset, data)
+ if pages.intervals.TotalSize() > pages.f.wfs.option.ChunkSizeLimit {
+ chunk, hasSavedData, err = pages.saveExistingLargestPageToStorage()
+ if hasSavedData {
+ chunks = append(chunks, chunk)
}
- } else {
- copy(pages.Data[offset-pages.Offset:], data)
}
- pages.Size = max(pages.Size, offset+int64(len(data))-pages.Offset)
-
return
}
-func (pages *ContinuousDirtyPages) flushAndSave(ctx context.Context, offset int64, data []byte) (chunks []*filer_pb.FileChunk, err error) {
+func (pages *ContinuousDirtyPages) flushAndSave(offset int64, data []byte) (chunks []*filer_pb.FileChunk, err error) {
var chunk *filer_pb.FileChunk
+ var newChunks []*filer_pb.FileChunk
// flush existing
- if chunk, err = pages.saveExistingPagesToStorage(ctx); err == nil {
- if chunk != nil {
- glog.V(4).Infof("%s/%s flush existing [%d,%d)", pages.f.dir.Path, pages.f.Name, chunk.Offset, chunk.Offset+int64(chunk.Size))
- chunks = append(chunks, chunk)
+ if newChunks, err = pages.saveExistingPagesToStorage(); err == nil {
+ if newChunks != nil {
+ chunks = append(chunks, newChunks...)
}
} else {
- glog.V(0).Infof("%s/%s failed to flush1 [%d,%d): %v", pages.f.dir.Path, pages.f.Name, chunk.Offset, chunk.Offset+int64(chunk.Size), err)
return
}
- pages.Size = 0
- pages.Offset = 0
// flush the new page
- if chunk, err = pages.saveToStorage(ctx, data, offset); err == nil {
+ if chunk, err = pages.saveToStorage(bytes.NewReader(data), offset, int64(len(data))); err == nil {
if chunk != nil {
- glog.V(4).Infof("%s/%s flush big request [%d,%d)", pages.f.dir.Path, pages.f.Name, chunk.Offset, chunk.Offset+int64(chunk.Size))
+ glog.V(4).Infof("%s/%s flush big request [%d,%d) to %s", pages.f.dir.FullPath(), pages.f.Name, chunk.Offset, chunk.Offset+int64(chunk.Size), chunk.FileId)
chunks = append(chunks, chunk)
}
} else {
- glog.V(0).Infof("%s/%s failed to flush2 [%d,%d): %v", pages.f.dir.Path, pages.f.Name, chunk.Offset, chunk.Offset+int64(chunk.Size), err)
+ glog.V(0).Infof("%s/%s failed to flush2 [%d,%d): %v", pages.f.dir.FullPath(), pages.f.Name, chunk.Offset, chunk.Offset+int64(chunk.Size), err)
return
}
return
}
-func (pages *ContinuousDirtyPages) FlushToStorage(ctx context.Context) (chunk *filer_pb.FileChunk, err error) {
+func (pages *ContinuousDirtyPages) FlushToStorage() (chunks []*filer_pb.FileChunk, err error) {
pages.lock.Lock()
defer pages.lock.Unlock()
- if pages.Size == 0 {
- return nil, nil
- }
+ return pages.saveExistingPagesToStorage()
+}
- if chunk, err = pages.saveExistingPagesToStorage(ctx); err == nil {
- pages.Size = 0
- pages.Offset = 0
- if chunk != nil {
- glog.V(4).Infof("%s/%s flush [%d,%d)", pages.f.dir.Path, pages.f.Name, chunk.Offset, chunk.Offset+int64(chunk.Size))
+func (pages *ContinuousDirtyPages) saveExistingPagesToStorage() (chunks []*filer_pb.FileChunk, err error) {
+
+ var hasSavedData bool
+ var chunk *filer_pb.FileChunk
+
+ for {
+
+ chunk, hasSavedData, err = pages.saveExistingLargestPageToStorage()
+ if !hasSavedData {
+ return chunks, err
+ }
+
+ if err == nil {
+ chunks = append(chunks, chunk)
+ } else {
+ return
}
}
- return
+
}
-func (pages *ContinuousDirtyPages) saveExistingPagesToStorage(ctx context.Context) (*filer_pb.FileChunk, error) {
+func (pages *ContinuousDirtyPages) saveExistingLargestPageToStorage() (chunk *filer_pb.FileChunk, hasSavedData bool, err error) {
- if pages.Size == 0 {
- return nil, nil
+ maxList := pages.intervals.RemoveLargestIntervalLinkedList()
+ if maxList == nil {
+ return nil, false, nil
+ }
+
+ for {
+ chunk, err = pages.saveToStorage(maxList.ToReader(), maxList.Offset(), maxList.Size())
+ if err == nil {
+ hasSavedData = true
+ glog.V(3).Infof("%s saveToStorage [%d,%d) %s", pages.f.fullpath(), maxList.Offset(), maxList.Offset()+maxList.Size(), chunk.FileId)
+ return
+ } else {
+ glog.V(0).Infof("%s saveToStorage [%d,%d): %v", pages.f.fullpath(), maxList.Offset(), maxList.Offset()+maxList.Size(), err)
+ time.Sleep(5 * time.Second)
+ }
}
- return pages.saveToStorage(ctx, pages.Data[:pages.Size], pages.Offset)
}
-func (pages *ContinuousDirtyPages) saveToStorage(ctx context.Context, buf []byte, offset int64) (*filer_pb.FileChunk, error) {
+func (pages *ContinuousDirtyPages) saveToStorage(reader io.Reader, offset int64, size int64) (*filer_pb.FileChunk, error) {
var fileId, host string
+ var auth security.EncodedJwt
+
+ dir, _ := pages.f.fullpath().DirAndName()
- if err := pages.f.wfs.withFilerClient(func(client filer_pb.SeaweedFilerClient) error {
+ if err := pages.f.wfs.WithFilerClient(func(client filer_pb.SeaweedFilerClient) error {
request := &filer_pb.AssignVolumeRequest{
Count: 1,
@@ -173,15 +154,21 @@ func (pages *ContinuousDirtyPages) saveToStorage(ctx context.Context, buf []byte
Collection: pages.f.wfs.option.Collection,
TtlSec: pages.f.wfs.option.TtlSec,
DataCenter: pages.f.wfs.option.DataCenter,
+ ParentPath: dir,
}
- resp, err := client.AssignVolume(ctx, request)
+ resp, err := client.AssignVolume(context.Background(), request)
if err != nil {
glog.V(0).Infof("assign volume failure %v: %v", request, err)
return err
}
+ if resp.Error != "" {
+ return fmt.Errorf("assign volume failure %v: %v", request, resp.Error)
+ }
- fileId, host = resp.FileId, resp.Url
+ fileId, host, auth = resp.FileId, resp.Url, security.EncodedJwt(resp.Auth)
+ host = pages.f.wfs.AdjustedUrl(host)
+ pages.collection, pages.replication = resp.Collection, resp.Replication
return nil
}); err != nil {
@@ -189,8 +176,7 @@ func (pages *ContinuousDirtyPages) saveToStorage(ctx context.Context, buf []byte
}
fileUrl := fmt.Sprintf("http://%s/%s", host, fileId)
- bufReader := bytes.NewReader(buf)
- uploadResult, err := operation.Upload(fileUrl, pages.f.Name, bufReader, false, "application/octet-stream", nil, "")
+ uploadResult, err, data := operation.Upload(fileUrl, pages.f.Name, pages.f.wfs.option.Cipher, reader, false, "", nil, auth)
if err != nil {
glog.V(0).Infof("upload data %v to %s: %v", pages.f.Name, fileUrl, err)
return nil, fmt.Errorf("upload data: %v", err)
@@ -199,14 +185,9 @@ func (pages *ContinuousDirtyPages) saveToStorage(ctx context.Context, buf []byte
glog.V(0).Infof("upload failure %v to %s: %v", pages.f.Name, fileUrl, err)
return nil, fmt.Errorf("upload result: %v", uploadResult.Error)
}
+ pages.f.wfs.chunkCache.SetChunk(fileId, data)
- return &filer_pb.FileChunk{
- FileId: fileId,
- Offset: offset,
- Size: uint64(len(buf)),
- Mtime: time.Now().UnixNano(),
- ETag: uploadResult.ETag,
- }, nil
+ return uploadResult.ToPbFileChunk(fileId, offset), nil
}
@@ -216,3 +197,18 @@ func max(x, y int64) int64 {
}
return y
}
+func min(x, y int64) int64 {
+ if x < y {
+ return x
+ }
+ return y
+}
+
+func (pages *ContinuousDirtyPages) ReadDirtyData(data []byte, startOffset int64) (offset int64, size int) {
+
+ pages.lock.Lock()
+ defer pages.lock.Unlock()
+
+ return pages.intervals.ReadData(data, startOffset)
+
+}
diff --git a/weed/filesys/dirty_page_interval.go b/weed/filesys/dirty_page_interval.go
new file mode 100644
index 000000000..ec94c6df1
--- /dev/null
+++ b/weed/filesys/dirty_page_interval.go
@@ -0,0 +1,220 @@
+package filesys
+
+import (
+ "bytes"
+ "io"
+ "math"
+)
+
+type IntervalNode struct {
+ Data []byte
+ Offset int64
+ Size int64
+ Next *IntervalNode
+}
+
+type IntervalLinkedList struct {
+ Head *IntervalNode
+ Tail *IntervalNode
+}
+
+type ContinuousIntervals struct {
+ lists []*IntervalLinkedList
+}
+
+func (list *IntervalLinkedList) Offset() int64 {
+ return list.Head.Offset
+}
+func (list *IntervalLinkedList) Size() int64 {
+ return list.Tail.Offset + list.Tail.Size - list.Head.Offset
+}
+func (list *IntervalLinkedList) addNodeToTail(node *IntervalNode) {
+ // glog.V(4).Infof("add to tail [%d,%d) + [%d,%d) => [%d,%d)", list.Head.Offset, list.Tail.Offset+list.Tail.Size, node.Offset, node.Offset+node.Size, list.Head.Offset, node.Offset+node.Size)
+ list.Tail.Next = node
+ list.Tail = node
+}
+func (list *IntervalLinkedList) addNodeToHead(node *IntervalNode) {
+ // glog.V(4).Infof("add to head [%d,%d) + [%d,%d) => [%d,%d)", node.Offset, node.Offset+node.Size, list.Head.Offset, list.Tail.Offset+list.Tail.Size, node.Offset, list.Tail.Offset+list.Tail.Size)
+ node.Next = list.Head
+ list.Head = node
+}
+
+func (list *IntervalLinkedList) ReadData(buf []byte, start, stop int64) {
+ t := list.Head
+ for {
+
+ nodeStart, nodeStop := max(start, t.Offset), min(stop, t.Offset+t.Size)
+ if nodeStart < nodeStop {
+ // glog.V(0).Infof("copying start=%d stop=%d t=[%d,%d) t.data=%d => bufSize=%d nodeStart=%d, nodeStop=%d", start, stop, t.Offset, t.Offset+t.Size, len(t.Data), len(buf), nodeStart, nodeStop)
+ copy(buf[nodeStart-start:], t.Data[nodeStart-t.Offset:nodeStop-t.Offset])
+ }
+
+ if t.Next == nil {
+ break
+ }
+ t = t.Next
+ }
+}
+
+func (c *ContinuousIntervals) TotalSize() (total int64) {
+ for _, list := range c.lists {
+ total += list.Size()
+ }
+ return
+}
+
+func subList(list *IntervalLinkedList, start, stop int64) *IntervalLinkedList {
+ var nodes []*IntervalNode
+ for t := list.Head; t != nil; t = t.Next {
+ nodeStart, nodeStop := max(start, t.Offset), min(stop, t.Offset+t.Size)
+ if nodeStart >= nodeStop {
+ // skip non overlapping IntervalNode
+ continue
+ }
+ nodes = append(nodes, &IntervalNode{
+ Data: t.Data[nodeStart-t.Offset : nodeStop-t.Offset],
+ Offset: nodeStart,
+ Size: nodeStop - nodeStart,
+ Next: nil,
+ })
+ }
+ for i := 1; i < len(nodes); i++ {
+ nodes[i-1].Next = nodes[i]
+ }
+ return &IntervalLinkedList{
+ Head: nodes[0],
+ Tail: nodes[len(nodes)-1],
+ }
+}
+
+func (c *ContinuousIntervals) AddInterval(data []byte, offset int64) {
+
+ interval := &IntervalNode{Data: data, Offset: offset, Size: int64(len(data))}
+
+ var newLists []*IntervalLinkedList
+ for _, list := range c.lists {
+ // if list is to the left of new interval, add to the new list
+ if list.Tail.Offset+list.Tail.Size <= interval.Offset {
+ newLists = append(newLists, list)
+ }
+ // if list is to the right of new interval, add to the new list
+ if interval.Offset+interval.Size <= list.Head.Offset {
+ newLists = append(newLists, list)
+ }
+ // if new interval overwrite the right part of the list
+ if list.Head.Offset < interval.Offset && interval.Offset < list.Tail.Offset+list.Tail.Size {
+ // create a new list of the left part of existing list
+ newLists = append(newLists, subList(list, list.Offset(), interval.Offset))
+ }
+ // if new interval overwrite the left part of the list
+ if list.Head.Offset < interval.Offset+interval.Size && interval.Offset+interval.Size < list.Tail.Offset+list.Tail.Size {
+ // create a new list of the right part of existing list
+ newLists = append(newLists, subList(list, interval.Offset+interval.Size, list.Tail.Offset+list.Tail.Size))
+ }
+ // skip anything that is fully overwritten by the new interval
+ }
+
+ c.lists = newLists
+ // add the new interval to the lists, connecting neighbor lists
+ var prevList, nextList *IntervalLinkedList
+
+ for _, list := range c.lists {
+ if list.Head.Offset == interval.Offset+interval.Size {
+ nextList = list
+ break
+ }
+ }
+
+ for _, list := range c.lists {
+ if list.Head.Offset+list.Size() == offset {
+ list.addNodeToTail(interval)
+ prevList = list
+ break
+ }
+ }
+
+ if prevList != nil && nextList != nil {
+ // glog.V(4).Infof("connecting [%d,%d) + [%d,%d) => [%d,%d)", prevList.Head.Offset, prevList.Tail.Offset+prevList.Tail.Size, nextList.Head.Offset, nextList.Tail.Offset+nextList.Tail.Size, prevList.Head.Offset, nextList.Tail.Offset+nextList.Tail.Size)
+ prevList.Tail.Next = nextList.Head
+ prevList.Tail = nextList.Tail
+ c.removeList(nextList)
+ } else if nextList != nil {
+ // add to head was not done when checking
+ nextList.addNodeToHead(interval)
+ }
+ if prevList == nil && nextList == nil {
+ c.lists = append(c.lists, &IntervalLinkedList{
+ Head: interval,
+ Tail: interval,
+ })
+ }
+
+ return
+}
+
+func (c *ContinuousIntervals) RemoveLargestIntervalLinkedList() *IntervalLinkedList {
+ var maxSize int64
+ maxIndex := -1
+ for k, list := range c.lists {
+ if maxSize <= list.Size() {
+ maxSize = list.Size()
+ maxIndex = k
+ }
+ }
+ if maxSize <= 0 {
+ return nil
+ }
+
+ t := c.lists[maxIndex]
+ c.lists = append(c.lists[0:maxIndex], c.lists[maxIndex+1:]...)
+ return t
+
+}
+
+func (c *ContinuousIntervals) removeList(target *IntervalLinkedList) {
+ index := -1
+ for k, list := range c.lists {
+ if list.Offset() == target.Offset() {
+ index = k
+ }
+ }
+ if index < 0 {
+ return
+ }
+
+ c.lists = append(c.lists[0:index], c.lists[index+1:]...)
+
+}
+
+func (c *ContinuousIntervals) ReadData(data []byte, startOffset int64) (offset int64, size int) {
+ var minOffset int64 = math.MaxInt64
+ var maxStop int64
+ for _, list := range c.lists {
+ start := max(startOffset, list.Offset())
+ stop := min(startOffset+int64(len(data)), list.Offset()+list.Size())
+ if start <= stop {
+ list.ReadData(data[start-startOffset:], start, stop)
+ minOffset = min(minOffset, start)
+ maxStop = max(maxStop, stop)
+ }
+ }
+
+ if minOffset == math.MaxInt64 {
+ return 0, 0
+ }
+
+ offset = minOffset
+ size = int(maxStop - offset)
+ return
+}
+
+func (l *IntervalLinkedList) ToReader() io.Reader {
+ var readers []io.Reader
+ t := l.Head
+ readers = append(readers, bytes.NewReader(t.Data))
+ for t.Next != nil {
+ t = t.Next
+ readers = append(readers, bytes.NewReader(t.Data))
+ }
+ return io.MultiReader(readers...)
+}
diff --git a/weed/filesys/dirty_page_interval_test.go b/weed/filesys/dirty_page_interval_test.go
new file mode 100644
index 000000000..ab3b37b7c
--- /dev/null
+++ b/weed/filesys/dirty_page_interval_test.go
@@ -0,0 +1,89 @@
+package filesys
+
+import (
+ "bytes"
+ "testing"
+)
+
+func TestContinuousIntervals_AddIntervalAppend(t *testing.T) {
+
+ c := &ContinuousIntervals{}
+
+ // 25, 25, 25
+ c.AddInterval(getBytes(25, 3), 0)
+ // _, _, 23, 23, 23, 23
+ c.AddInterval(getBytes(23, 4), 2)
+
+ expectedData(t, c, 0, 25, 25, 23, 23, 23, 23)
+
+}
+
+func TestContinuousIntervals_AddIntervalInnerOverwrite(t *testing.T) {
+
+ c := &ContinuousIntervals{}
+
+ // 25, 25, 25, 25, 25
+ c.AddInterval(getBytes(25, 5), 0)
+ // _, _, 23, 23
+ c.AddInterval(getBytes(23, 2), 2)
+
+ expectedData(t, c, 0, 25, 25, 23, 23, 25)
+
+}
+
+func TestContinuousIntervals_AddIntervalFullOverwrite(t *testing.T) {
+
+ c := &ContinuousIntervals{}
+
+ // 1,
+ c.AddInterval(getBytes(1, 1), 0)
+ // _, 2,
+ c.AddInterval(getBytes(2, 1), 1)
+ // _, _, 3, 3, 3
+ c.AddInterval(getBytes(3, 3), 2)
+ // _, _, _, 4, 4, 4
+ c.AddInterval(getBytes(4, 3), 3)
+
+ expectedData(t, c, 0, 1, 2, 3, 4, 4, 4)
+
+}
+
+func TestContinuousIntervals_RealCase1(t *testing.T) {
+
+ c := &ContinuousIntervals{}
+
+ // 25,
+ c.AddInterval(getBytes(25, 1), 0)
+ // _, _, _, _, 23, 23
+ c.AddInterval(getBytes(23, 2), 4)
+ // _, _, _, 24, 24, 24, 24
+ c.AddInterval(getBytes(24, 4), 3)
+
+ // _, 22, 22
+ c.AddInterval(getBytes(22, 2), 1)
+
+ expectedData(t, c, 0, 25, 22, 22, 24, 24, 24, 24)
+
+}
+
+func expectedData(t *testing.T, c *ContinuousIntervals, offset int, data ...byte) {
+ start, stop := int64(offset), int64(offset+len(data))
+ for _, list := range c.lists {
+ nodeStart, nodeStop := max(start, list.Head.Offset), min(stop, list.Head.Offset+list.Size())
+ if nodeStart < nodeStop {
+ buf := make([]byte, nodeStop-nodeStart)
+ list.ReadData(buf, nodeStart, nodeStop)
+ if bytes.Compare(buf, data[nodeStart-start:nodeStop-start]) != 0 {
+ t.Errorf("expected %v actual %v", data[nodeStart-start:nodeStop-start], buf)
+ }
+ }
+ }
+}
+
+func getBytes(content byte, length int) []byte {
+ data := make([]byte, length)
+ for i := 0; i < length; i++ {
+ data[i] = content
+ }
+ return data
+}
diff --git a/weed/filesys/file.go b/weed/filesys/file.go
index 4bb169a33..4a6bc9a8a 100644
--- a/weed/filesys/file.go
+++ b/weed/filesys/file.go
@@ -2,14 +2,15 @@ package filesys
import (
"context"
+ "io"
"os"
- "path/filepath"
"sort"
"time"
"github.com/chrislusf/seaweedfs/weed/filer2"
"github.com/chrislusf/seaweedfs/weed/glog"
"github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
+ "github.com/chrislusf/seaweedfs/weed/util"
"github.com/seaweedfs/fuse"
"github.com/seaweedfs/fuse/fs"
)
@@ -20,6 +21,11 @@ var _ = fs.Node(&File{})
var _ = fs.NodeOpener(&File{})
var _ = fs.NodeFsyncer(&File{})
var _ = fs.NodeSetattrer(&File{})
+var _ = fs.NodeGetxattrer(&File{})
+var _ = fs.NodeSetxattrer(&File{})
+var _ = fs.NodeRemovexattrer(&File{})
+var _ = fs.NodeListxattrer(&File{})
+var _ = fs.NodeForgetter(&File{})
type File struct {
Name string
@@ -27,21 +33,33 @@ type File struct {
wfs *WFS
entry *filer_pb.Entry
entryViewCache []filer2.VisibleInterval
- isOpen bool
+ isOpen int
+ reader io.ReaderAt
}
-func (file *File) fullpath() string {
- return filepath.Join(file.dir.Path, file.Name)
+func (file *File) fullpath() util.FullPath {
+ return util.NewFullPath(file.dir.FullPath(), file.Name)
}
func (file *File) Attr(ctx context.Context, attr *fuse.Attr) error {
- if err := file.maybeLoadAttributes(ctx); err != nil {
- return err
+ glog.V(4).Infof("file Attr %s, open:%v, existing attr: %+v", file.fullpath(), file.isOpen, attr)
+
+ if file.isOpen <= 0 {
+ if err := file.maybeLoadEntry(ctx); err != nil {
+ return err
+ }
}
+ attr.Inode = file.fullpath().AsInode()
+ attr.Valid = time.Second
attr.Mode = os.FileMode(file.entry.Attributes.FileMode)
attr.Size = filer2.TotalSize(file.entry.Chunks)
+ if file.isOpen > 0 {
+ attr.Size = file.entry.Attributes.FileSize
+ glog.V(4).Infof("file Attr %s, open:%v, size: %d", file.fullpath(), file.isOpen, attr.Size)
+ }
+ attr.Crtime = time.Unix(file.entry.Attributes.Crtime, 0)
attr.Mtime = time.Unix(file.entry.Attributes.Mtime, 0)
attr.Gid = file.entry.Attributes.Gid
attr.Uid = file.entry.Attributes.Uid
@@ -52,11 +70,22 @@ func (file *File) Attr(ctx context.Context, attr *fuse.Attr) error {
}
+func (file *File) Getxattr(ctx context.Context, req *fuse.GetxattrRequest, resp *fuse.GetxattrResponse) error {
+
+ glog.V(4).Infof("file Getxattr %s", file.fullpath())
+
+ if err := file.maybeLoadEntry(ctx); err != nil {
+ return err
+ }
+
+ return getxattr(file.entry, req, resp)
+}
+
func (file *File) Open(ctx context.Context, req *fuse.OpenRequest, resp *fuse.OpenResponse) (fs.Handle, error) {
- glog.V(3).Infof("%v file open %+v", file.fullpath(), req)
+ glog.V(4).Infof("file %v open %+v", file.fullpath(), req)
- file.isOpen = true
+ file.isOpen++
handle := file.wfs.AcquireHandle(file, req.Uid, req.Gid)
@@ -70,22 +99,30 @@ func (file *File) Open(ctx context.Context, req *fuse.OpenRequest, resp *fuse.Op
func (file *File) Setattr(ctx context.Context, req *fuse.SetattrRequest, resp *fuse.SetattrResponse) error {
- if err := file.maybeLoadAttributes(ctx); err != nil {
- return err
- }
+ glog.V(3).Infof("%v file setattr %+v, old:%+v", file.fullpath(), req, file.entry.Attributes)
- if file.isOpen {
- return nil
+ if err := file.maybeLoadEntry(ctx); err != nil {
+ return err
}
- glog.V(3).Infof("%v file setattr %+v, old:%+v", file.fullpath(), req, file.entry.Attributes)
if req.Valid.Size() {
glog.V(3).Infof("%v file setattr set size=%v", file.fullpath(), req.Size)
- if req.Size == 0 {
+ if req.Size < filer2.TotalSize(file.entry.Chunks) {
// fmt.Printf("truncate %v \n", fullPath)
- file.entry.Chunks = nil
+ var chunks []*filer_pb.FileChunk
+ for _, chunk := range file.entry.Chunks {
+ int64Size := int64(chunk.Size)
+ if chunk.Offset+int64Size > int64(req.Size) {
+ int64Size = int64(req.Size) - chunk.Offset
+ }
+ if int64Size > 0 {
+ chunks = append(chunks, chunk)
+ }
+ }
+ file.entry.Chunks = chunks
file.entryViewCache = nil
+ file.reader = nil
}
file.entry.Attributes.FileSize = req.Size
}
@@ -109,75 +146,88 @@ func (file *File) Setattr(ctx context.Context, req *fuse.SetattrRequest, resp *f
file.entry.Attributes.Mtime = req.Mtime.Unix()
}
- return file.wfs.withFilerClient(func(client filer_pb.SeaweedFilerClient) error {
+ if file.isOpen > 0 {
+ return nil
+ }
- request := &filer_pb.UpdateEntryRequest{
- Directory: file.dir.Path,
- Entry: file.entry,
- }
+ return file.saveEntry()
- glog.V(1).Infof("set attr file entry: %v", request)
- _, err := client.UpdateEntry(ctx, request)
- if err != nil {
- glog.V(0).Infof("UpdateEntry file %s/%s: %v", file.dir.Path, file.Name, err)
- return fuse.EIO
- }
+}
- return nil
- })
+func (file *File) Setxattr(ctx context.Context, req *fuse.SetxattrRequest) error {
-}
+ glog.V(4).Infof("file Setxattr %s: %s", file.fullpath(), req.Name)
-func (file *File) Fsync(ctx context.Context, req *fuse.FsyncRequest) error {
- // fsync works at OS level
- // write the file chunks to the filerGrpcAddress
- glog.V(3).Infof("%s/%s fsync file %+v", file.dir.Path, file.Name, req)
+ if err := file.maybeLoadEntry(ctx); err != nil {
+ return err
+ }
+
+ if err := setxattr(file.entry, req); err != nil {
+ return err
+ }
+
+ return file.saveEntry()
- return nil
}
-func (file *File) maybeLoadAttributes(ctx context.Context) error {
- if file.entry == nil || !file.isOpen {
- item := file.wfs.listDirectoryEntriesCache.Get(file.fullpath())
- if item != nil && !item.Expired() {
- entry := item.Value().(*filer_pb.Entry)
- file.setEntry(entry)
- // glog.V(1).Infof("file attr read cached %v attributes", file.Name)
- } else {
- err := file.wfs.withFilerClient(func(client filer_pb.SeaweedFilerClient) error {
+func (file *File) Removexattr(ctx context.Context, req *fuse.RemovexattrRequest) error {
- request := &filer_pb.LookupDirectoryEntryRequest{
- Name: file.Name,
- Directory: file.dir.Path,
- }
+ glog.V(4).Infof("file Removexattr %s: %s", file.fullpath(), req.Name)
- resp, err := client.LookupDirectoryEntry(ctx, request)
- if err != nil {
- glog.V(3).Infof("file attr read file %v: %v", request, err)
- return fuse.ENOENT
- }
+ if err := file.maybeLoadEntry(ctx); err != nil {
+ return err
+ }
- file.setEntry(resp.Entry)
+ if err := removexattr(file.entry, req); err != nil {
+ return err
+ }
- glog.V(3).Infof("file attr %v %+v: %d", file.fullpath(), file.entry.Attributes, filer2.TotalSize(file.entry.Chunks))
+ return file.saveEntry()
- // file.wfs.listDirectoryEntriesCache.Set(file.fullpath(), file.entry, file.wfs.option.EntryCacheTtl)
+}
- return nil
- })
+func (file *File) Listxattr(ctx context.Context, req *fuse.ListxattrRequest, resp *fuse.ListxattrResponse) error {
- if err != nil {
- return err
- }
- }
+ glog.V(4).Infof("file Listxattr %s", file.fullpath())
+
+ if err := file.maybeLoadEntry(ctx); err != nil {
+ return err
+ }
+
+ if err := listxattr(file.entry, req, resp); err != nil {
+ return err
}
+
return nil
+
+}
+
+func (file *File) Fsync(ctx context.Context, req *fuse.FsyncRequest) error {
+ // fsync works at OS level
+ // write the file chunks to the filerGrpcAddress
+ glog.V(3).Infof("%s/%s fsync file %+v", file.dir.FullPath(), file.Name, req)
+
+ return nil
+}
+
+func (file *File) Forget() {
+ t := util.NewFullPath(file.dir.FullPath(), file.Name)
+ glog.V(3).Infof("Forget file %s", t)
+ file.wfs.fsNodeCache.DeleteFsNode(t)
}
-func (file *File) addChunk(chunk *filer_pb.FileChunk) {
- if chunk != nil {
- file.addChunks([]*filer_pb.FileChunk{chunk})
+func (file *File) maybeLoadEntry(ctx context.Context) error {
+ if file.entry == nil || file.isOpen <= 0 {
+ entry, err := file.wfs.maybeLoadEntry(file.dir.FullPath(), file.Name)
+ if err != nil {
+ glog.V(3).Infof("maybeLoadEntry file %s/%s: %v", file.dir.FullPath(), file.Name, err)
+ return err
+ }
+ if entry != nil {
+ file.setEntry(entry)
+ }
}
+ return nil
}
func (file *File) addChunks(chunks []*filer_pb.FileChunk) {
@@ -194,10 +244,36 @@ func (file *File) addChunks(chunks []*filer_pb.FileChunk) {
newVisibles = t
}
+ file.reader = nil
+
+ glog.V(3).Infof("%s existing %d chunks adds %d more", file.fullpath(), len(file.entry.Chunks), len(chunks))
+
file.entry.Chunks = append(file.entry.Chunks, chunks...)
}
func (file *File) setEntry(entry *filer_pb.Entry) {
file.entry = entry
file.entryViewCache = filer2.NonOverlappingVisibleIntervals(file.entry.Chunks)
+ file.reader = nil
+}
+
+func (file *File) saveEntry() error {
+ return file.wfs.WithFilerClient(func(client filer_pb.SeaweedFilerClient) error {
+
+ request := &filer_pb.UpdateEntryRequest{
+ Directory: file.dir.FullPath(),
+ Entry: file.entry,
+ }
+
+ glog.V(1).Infof("save file entry: %v", request)
+ _, err := client.UpdateEntry(context.Background(), request)
+ if err != nil {
+ glog.V(0).Infof("UpdateEntry file %s/%s: %v", file.dir.FullPath(), file.Name, err)
+ return fuse.EIO
+ }
+
+ file.wfs.metaCache.UpdateEntry(context.Background(), filer2.FromPbEntry(request.Directory, request.Entry))
+
+ return nil
+ })
}
diff --git a/weed/filesys/filehandle.go b/weed/filesys/filehandle.go
index 0f6ca1164..9b9df916c 100644
--- a/weed/filesys/filehandle.go
+++ b/weed/filesys/filehandle.go
@@ -3,17 +3,17 @@ package filesys
import (
"context"
"fmt"
+ "io"
+ "math"
+ "net/http"
+ "os"
+ "time"
+
"github.com/chrislusf/seaweedfs/weed/filer2"
"github.com/chrislusf/seaweedfs/weed/glog"
- "github.com/chrislusf/seaweedfs/weed/operation"
"github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
- "github.com/chrislusf/seaweedfs/weed/util"
"github.com/seaweedfs/fuse"
"github.com/seaweedfs/fuse/fs"
- "net/http"
- "strings"
- "sync"
- "time"
)
type FileHandle struct {
@@ -28,15 +28,20 @@ type FileHandle struct {
NodeId fuse.NodeID // file or directory the request is about
Uid uint32 // user ID of process making request
Gid uint32 // group ID of process making request
+
}
func newFileHandle(file *File, uid, gid uint32) *FileHandle {
- return &FileHandle{
+ fh := &FileHandle{
f: file,
dirtyPages: newDirtyPages(file),
Uid: uid,
Gid: gid,
}
+ if fh.f.entry != nil {
+ fh.f.entry.Attributes.FileSize = filer2.TotalSize(fh.f.entry.Chunks)
+ }
+ return fh
}
var _ = fs.Handle(&FileHandle{})
@@ -51,115 +56,91 @@ func (fh *FileHandle) Read(ctx context.Context, req *fuse.ReadRequest, resp *fus
glog.V(4).Infof("%s read fh %d: [%d,%d)", fh.f.fullpath(), fh.handle, req.Offset, req.Offset+int64(req.Size))
- // this value should come from the filer instead of the old f
- if len(fh.f.entry.Chunks) == 0 {
- glog.V(1).Infof("empty fh %v/%v", fh.f.dir.Path, fh.f.Name)
- return nil
- }
-
buff := make([]byte, req.Size)
- if fh.f.entryViewCache == nil {
- fh.f.entryViewCache = filer2.NonOverlappingVisibleIntervals(fh.f.entry.Chunks)
- }
-
- chunkViews := filer2.ViewFromVisibleIntervals(fh.f.entryViewCache, req.Offset, req.Size)
-
- var vids []string
- for _, chunkView := range chunkViews {
- vids = append(vids, volumeId(chunkView.FileId))
- }
-
- vid2Locations := make(map[string]*filer_pb.Locations)
-
- err := fh.f.wfs.withFilerClient(func(client filer_pb.SeaweedFilerClient) error {
-
- glog.V(4).Infof("read fh lookup volume id locations: %v", vids)
- resp, err := client.LookupVolume(ctx, &filer_pb.LookupVolumeRequest{
- VolumeIds: vids,
- })
- if err != nil {
- return err
+ totalRead, err := fh.readFromChunks(buff, req.Offset)
+ if err == nil {
+ dirtyOffset, dirtySize := fh.readFromDirtyPages(buff, req.Offset)
+ if totalRead+req.Offset < dirtyOffset+int64(dirtySize) {
+ totalRead = dirtyOffset + int64(dirtySize) - req.Offset
}
+ }
- vid2Locations = resp.LocationsMap
-
- return nil
- })
+ resp.Data = buff[:totalRead]
if err != nil {
- glog.V(4).Infof("%v/%v read fh lookup volume ids: %v", fh.f.dir.Path, fh.f.Name, err)
- return fmt.Errorf("failed to lookup volume ids %v: %v", vids, err)
+ glog.Errorf("file handle read %s: %v", fh.f.fullpath(), err)
+ return fuse.EIO
}
- var totalRead int64
- var wg sync.WaitGroup
- for _, chunkView := range chunkViews {
- wg.Add(1)
- go func(chunkView *filer2.ChunkView) {
- defer wg.Done()
+ return err
+}
- glog.V(4).Infof("read fh reading chunk: %+v", chunkView)
+func (fh *FileHandle) readFromDirtyPages(buff []byte, startOffset int64) (offset int64, size int) {
+ return fh.dirtyPages.ReadDirtyData(buff, startOffset)
+}
- locations := vid2Locations[volumeId(chunkView.FileId)]
- if locations == nil || len(locations.Locations) == 0 {
- glog.V(0).Infof("failed to locate %s", chunkView.FileId)
- err = fmt.Errorf("failed to locate %s", chunkView.FileId)
- return
- }
+func (fh *FileHandle) readFromChunks(buff []byte, offset int64) (int64, error) {
- var n int64
- n, err = util.ReadUrl(
- fmt.Sprintf("http://%s/%s", locations.Locations[0].Url, chunkView.FileId),
- chunkView.Offset,
- int(chunkView.Size),
- buff[chunkView.LogicOffset-req.Offset:chunkView.LogicOffset-req.Offset+int64(chunkView.Size)],
- !chunkView.IsFullChunk)
+ // this value should come from the filer instead of the old f
+ if len(fh.f.entry.Chunks) == 0 {
+ glog.V(1).Infof("empty fh %v", fh.f.fullpath())
+ return 0, nil
+ }
- if err != nil {
+ if fh.f.entryViewCache == nil {
+ fh.f.entryViewCache = filer2.NonOverlappingVisibleIntervals(fh.f.entry.Chunks)
+ fh.f.reader = nil
+ }
- glog.V(0).Infof("%v/%v read http://%s/%v %v bytes: %v", fh.f.dir.Path, fh.f.Name, locations.Locations[0].Url, chunkView.FileId, n, err)
+ if fh.f.reader == nil {
+ chunkViews := filer2.ViewFromVisibleIntervals(fh.f.entryViewCache, 0, math.MaxInt32)
+ fh.f.reader = filer2.NewChunkReaderAtFromClient(fh.f.wfs, chunkViews, fh.f.wfs.chunkCache)
+ }
- err = fmt.Errorf("failed to read http://%s/%s: %v",
- locations.Locations[0].Url, chunkView.FileId, err)
- return
- }
+ totalRead, err := fh.f.reader.ReadAt(buff, offset)
- glog.V(4).Infof("read fh read %d bytes: %+v", n, chunkView)
- totalRead += n
+ if err == io.EOF {
+ err = nil
+ }
- }(chunkView)
+ if err != nil {
+ glog.Errorf("file handle read %s: %v", fh.f.fullpath(), err)
}
- wg.Wait()
- resp.Data = buff[:totalRead]
+ // glog.V(0).Infof("file handle read %s [%d,%d] %d : %v", fh.f.fullpath(), offset, offset+int64(totalRead), totalRead, err)
- return err
+ return int64(totalRead), err
}
// Write to the file handle
func (fh *FileHandle) Write(ctx context.Context, req *fuse.WriteRequest, resp *fuse.WriteResponse) error {
// write the request to volume servers
+ data := make([]byte, len(req.Data))
+ copy(data, req.Data)
- glog.V(4).Infof("%+v/%v write fh %d: [%d,%d)", fh.f.dir.Path, fh.f.Name, fh.handle, req.Offset, req.Offset+int64(len(req.Data)))
+ fh.f.entry.Attributes.FileSize = uint64(max(req.Offset+int64(len(data)), int64(fh.f.entry.Attributes.FileSize)))
+ // glog.V(0).Infof("%v write [%d,%d)", fh.f.fullpath(), req.Offset, req.Offset+int64(len(req.Data)))
- chunks, err := fh.dirtyPages.AddPage(ctx, req.Offset, req.Data)
+ chunks, err := fh.dirtyPages.AddPage(req.Offset, data)
if err != nil {
- glog.Errorf("%+v/%v write fh %d: [%d,%d): %v", fh.f.dir.Path, fh.f.Name, fh.handle, req.Offset, req.Offset+int64(len(req.Data)), err)
- return fmt.Errorf("write %s/%s at [%d,%d): %v", fh.f.dir.Path, fh.f.Name, req.Offset, req.Offset+int64(len(req.Data)), err)
+ glog.Errorf("%v write fh %d: [%d,%d): %v", fh.f.fullpath(), fh.handle, req.Offset, req.Offset+int64(len(data)), err)
+ return fuse.EIO
}
- resp.Size = len(req.Data)
+ resp.Size = len(data)
if req.Offset == 0 {
- fh.contentType = http.DetectContentType(req.Data)
+ // detect mime type
+ fh.contentType = http.DetectContentType(data)
fh.dirtyMetadata = true
}
- fh.f.addChunks(chunks)
-
if len(chunks) > 0 {
+
+ fh.f.addChunks(chunks)
+
fh.dirtyMetadata = true
}
@@ -170,11 +151,14 @@ func (fh *FileHandle) Release(ctx context.Context, req *fuse.ReleaseRequest) err
glog.V(4).Infof("%v release fh %d", fh.f.fullpath(), fh.handle)
- fh.dirtyPages.releaseResource()
-
- fh.f.wfs.ReleaseHandle(fh.f.fullpath(), fuse.HandleID(fh.handle))
+ fh.f.isOpen--
- fh.f.isOpen = false
+ if fh.f.isOpen <= 0 {
+ fh.dirtyPages.releaseResource()
+ fh.f.wfs.ReleaseHandle(fh.f.fullpath(), fuse.HandleID(fh.handle))
+ }
+ fh.f.entryViewCache = nil
+ fh.f.reader = nil
return nil
}
@@ -184,19 +168,22 @@ func (fh *FileHandle) Flush(ctx context.Context, req *fuse.FlushRequest) error {
// send the data to the OS
glog.V(4).Infof("%s fh %d flush %v", fh.f.fullpath(), fh.handle, req)
- chunk, err := fh.dirtyPages.FlushToStorage(ctx)
+ chunks, err := fh.dirtyPages.FlushToStorage()
if err != nil {
- glog.Errorf("flush %s/%s: %v", fh.f.dir.Path, fh.f.Name, err)
- return fmt.Errorf("flush %s/%s: %v", fh.f.dir.Path, fh.f.Name, err)
+ glog.Errorf("flush %s: %v", fh.f.fullpath(), err)
+ return fuse.EIO
}
- fh.f.addChunk(chunk)
+ if len(chunks) > 0 {
+ fh.f.addChunks(chunks)
+ fh.dirtyMetadata = true
+ }
if !fh.dirtyMetadata {
return nil
}
- return fh.f.wfs.withFilerClient(func(client filer_pb.SeaweedFilerClient) error {
+ err = fh.f.wfs.WithFilerClient(func(client filer_pb.SeaweedFilerClient) error {
if fh.f.entry.Attributes != nil {
fh.f.entry.Attributes.Mime = fh.contentType
@@ -204,78 +191,48 @@ func (fh *FileHandle) Flush(ctx context.Context, req *fuse.FlushRequest) error {
fh.f.entry.Attributes.Gid = req.Gid
fh.f.entry.Attributes.Mtime = time.Now().Unix()
fh.f.entry.Attributes.Crtime = time.Now().Unix()
- fh.f.entry.Attributes.FileMode = uint32(0770)
+ fh.f.entry.Attributes.FileMode = uint32(os.FileMode(fh.f.entry.Attributes.FileMode) &^ fh.f.wfs.option.Umask)
+ fh.f.entry.Attributes.Collection = fh.dirtyPages.collection
+ fh.f.entry.Attributes.Replication = fh.dirtyPages.replication
}
request := &filer_pb.CreateEntryRequest{
- Directory: fh.f.dir.Path,
+ Directory: fh.f.dir.FullPath(),
Entry: fh.f.entry,
}
- //glog.V(1).Infof("%s/%s set chunks: %v", fh.f.dir.Path, fh.f.Name, len(fh.f.entry.Chunks))
- //for i, chunk := range fh.f.entry.Chunks {
- // glog.V(4).Infof("%s/%s chunks %d: %v [%d,%d)", fh.f.dir.Path, fh.f.Name, i, chunk.FileId, chunk.Offset, chunk.Offset+int64(chunk.Size))
- //}
+ glog.V(3).Infof("%s set chunks: %v", fh.f.fullpath(), len(fh.f.entry.Chunks))
+ for i, chunk := range fh.f.entry.Chunks {
+ glog.V(3).Infof("%s chunks %d: %v [%d,%d)", fh.f.fullpath(), i, chunk.FileId, chunk.Offset, chunk.Offset+int64(chunk.Size))
+ }
chunks, garbages := filer2.CompactFileChunks(fh.f.entry.Chunks)
fh.f.entry.Chunks = chunks
// fh.f.entryViewCache = nil
- fh.f.wfs.deleteFileChunks(garbages)
- if _, err := client.CreateEntry(ctx, request); err != nil {
- return fmt.Errorf("update fh: %v", err)
+ if err := filer_pb.CreateEntry(client, request); err != nil {
+ glog.Errorf("fh flush create %s: %v", fh.f.fullpath(), err)
+ return fmt.Errorf("fh flush create %s: %v", fh.f.fullpath(), err)
}
- return nil
- })
-}
-
-func deleteFileIds(ctx context.Context, client filer_pb.SeaweedFilerClient, fileIds []string) error {
-
- var vids []string
- for _, fileId := range fileIds {
- vids = append(vids, volumeId(fileId))
- }
-
- lookupFunc := func(vids []string) (map[string]operation.LookupResult, error) {
+ fh.f.wfs.metaCache.InsertEntry(context.Background(), filer2.FromPbEntry(request.Directory, request.Entry))
- m := make(map[string]operation.LookupResult)
-
- glog.V(4).Infof("remove file lookup volume id locations: %v", vids)
- resp, err := client.LookupVolume(ctx, &filer_pb.LookupVolumeRequest{
- VolumeIds: vids,
- })
- if err != nil {
- return m, err
+ fh.f.wfs.deleteFileChunks(garbages)
+ for i, chunk := range garbages {
+ glog.V(3).Infof("garbage %s chunks %d: %v [%d,%d)", fh.f.fullpath(), i, chunk.FileId, chunk.Offset, chunk.Offset+int64(chunk.Size))
}
- for _, vid := range vids {
- lr := operation.LookupResult{
- VolumeId: vid,
- Locations: nil,
- }
- locations := resp.LocationsMap[vid]
- for _, loc := range locations.Locations {
- lr.Locations = append(lr.Locations, operation.Location{
- Url: loc.Url,
- PublicUrl: loc.PublicUrl,
- })
- }
- m[vid] = lr
- }
+ return nil
+ })
- return m, err
+ if err == nil {
+ fh.dirtyMetadata = false
}
- _, err := operation.DeleteFilesWithLookupVolumeId(fileIds, lookupFunc)
-
- return err
-}
-
-func volumeId(fileId string) string {
- lastCommaIndex := strings.LastIndex(fileId, ",")
- if lastCommaIndex > 0 {
- return fileId[:lastCommaIndex]
+ if err != nil {
+ glog.Errorf("%v fh %d flush: %v", fh.f.fullpath(), fh.handle, err)
+ return fuse.EIO
}
- return fileId
+
+ return nil
}
diff --git a/weed/filesys/fscache.go b/weed/filesys/fscache.go
new file mode 100644
index 000000000..b146f0615
--- /dev/null
+++ b/weed/filesys/fscache.go
@@ -0,0 +1,207 @@
+package filesys
+
+import (
+ "sync"
+
+ "github.com/chrislusf/seaweedfs/weed/util"
+ "github.com/seaweedfs/fuse/fs"
+)
+
+type FsCache struct {
+ root *FsNode
+ sync.RWMutex
+}
+type FsNode struct {
+ parent *FsNode
+ node fs.Node
+ name string
+ childrenLock sync.RWMutex
+ children map[string]*FsNode
+}
+
+func newFsCache(root fs.Node) *FsCache {
+ return &FsCache{
+ root: &FsNode{
+ node: root,
+ },
+ }
+}
+
+func (c *FsCache) GetFsNode(path util.FullPath) fs.Node {
+
+ c.RLock()
+ defer c.RUnlock()
+
+ return c.doGetFsNode(path)
+}
+
+func (c *FsCache) doGetFsNode(path util.FullPath) fs.Node {
+ t := c.root
+ for _, p := range path.Split() {
+ t = t.findChild(p)
+ if t == nil {
+ return nil
+ }
+ }
+ return t.node
+}
+
+func (c *FsCache) SetFsNode(path util.FullPath, node fs.Node) {
+
+ c.Lock()
+ defer c.Unlock()
+
+ c.doSetFsNode(path, node)
+}
+
+func (c *FsCache) doSetFsNode(path util.FullPath, node fs.Node) {
+ t := c.root
+ for _, p := range path.Split() {
+ t = t.ensureChild(p)
+ }
+ t.node = node
+}
+
+func (c *FsCache) EnsureFsNode(path util.FullPath, genNodeFn func() fs.Node) fs.Node {
+
+ c.Lock()
+ defer c.Unlock()
+
+ t := c.doGetFsNode(path)
+ if t != nil {
+ return t
+ }
+ t = genNodeFn()
+ c.doSetFsNode(path, t)
+ return t
+}
+
+func (c *FsCache) DeleteFsNode(path util.FullPath) {
+
+ c.Lock()
+ defer c.Unlock()
+
+ t := c.root
+ for _, p := range path.Split() {
+ t = t.findChild(p)
+ if t == nil {
+ return
+ }
+ }
+ if t.parent != nil {
+ t.parent.disconnectChild(t)
+ }
+ t.deleteSelf()
+}
+
+// oldPath and newPath are full path including the new name
+func (c *FsCache) Move(oldPath util.FullPath, newPath util.FullPath) *FsNode {
+
+ c.Lock()
+ defer c.Unlock()
+
+ // find old node
+ src := c.root
+ for _, p := range oldPath.Split() {
+ src = src.findChild(p)
+ if src == nil {
+ return src
+ }
+ }
+ if src.parent != nil {
+ src.parent.disconnectChild(src)
+ }
+
+ // find new node
+ target := c.root
+ for _, p := range newPath.Split() {
+ target = target.ensureChild(p)
+ }
+ parent := target.parent
+ src.name = target.name
+ if dir, ok := src.node.(*Dir); ok {
+ dir.name = target.name // target is not Dir, but a shortcut
+ }
+ if f, ok := src.node.(*File); ok {
+ f.Name = target.name
+ if f.entry != nil {
+ f.entry.Name = f.Name
+ }
+ }
+ parent.disconnectChild(target)
+
+ target.deleteSelf()
+
+ src.connectToParent(parent)
+
+ return src
+}
+
+func (n *FsNode) connectToParent(parent *FsNode) {
+ n.parent = parent
+ oldNode := parent.findChild(n.name)
+ if oldNode != nil {
+ oldNode.deleteSelf()
+ }
+ if dir, ok := n.node.(*Dir); ok {
+ dir.parent = parent.node.(*Dir)
+ }
+ if f, ok := n.node.(*File); ok {
+ f.dir = parent.node.(*Dir)
+ }
+ n.childrenLock.Lock()
+ parent.children[n.name] = n
+ n.childrenLock.Unlock()
+}
+
+func (n *FsNode) findChild(name string) *FsNode {
+ n.childrenLock.RLock()
+ defer n.childrenLock.RUnlock()
+
+ child, found := n.children[name]
+ if found {
+ return child
+ }
+ return nil
+}
+
+func (n *FsNode) ensureChild(name string) *FsNode {
+ n.childrenLock.Lock()
+ defer n.childrenLock.Unlock()
+
+ if n.children == nil {
+ n.children = make(map[string]*FsNode)
+ }
+ child, found := n.children[name]
+ if found {
+ return child
+ }
+ t := &FsNode{
+ parent: n,
+ node: nil,
+ name: name,
+ children: nil,
+ }
+ n.children[name] = t
+ return t
+}
+
+func (n *FsNode) disconnectChild(child *FsNode) {
+ n.childrenLock.Lock()
+ delete(n.children, child.name)
+ n.childrenLock.Unlock()
+ child.parent = nil
+}
+
+func (n *FsNode) deleteSelf() {
+ n.childrenLock.Lock()
+ for _, child := range n.children {
+ child.deleteSelf()
+ }
+ n.children = nil
+ n.childrenLock.Unlock()
+
+ n.node = nil
+ n.parent = nil
+
+}
diff --git a/weed/filesys/fscache_test.go b/weed/filesys/fscache_test.go
new file mode 100644
index 000000000..67f9aacc8
--- /dev/null
+++ b/weed/filesys/fscache_test.go
@@ -0,0 +1,96 @@
+package filesys
+
+import (
+ "testing"
+
+ "github.com/chrislusf/seaweedfs/weed/util"
+)
+
+func TestPathSplit(t *testing.T) {
+ parts := util.FullPath("/").Split()
+ if len(parts) != 0 {
+ t.Errorf("expecting an empty list, but getting %d", len(parts))
+ }
+
+ parts = util.FullPath("/readme.md").Split()
+ if len(parts) != 1 {
+ t.Errorf("expecting an empty list, but getting %d", len(parts))
+ }
+
+}
+
+func TestFsCache(t *testing.T) {
+
+ cache := newFsCache(nil)
+
+ x := cache.GetFsNode(util.FullPath("/y/x"))
+ if x != nil {
+ t.Errorf("wrong node!")
+ }
+
+ p := util.FullPath("/a/b/c")
+ cache.SetFsNode(p, &File{Name: "cc"})
+ tNode := cache.GetFsNode(p)
+ tFile := tNode.(*File)
+ if tFile.Name != "cc" {
+ t.Errorf("expecting a FsNode")
+ }
+
+ cache.SetFsNode(util.FullPath("/a/b/d"), &File{Name: "dd"})
+ cache.SetFsNode(util.FullPath("/a/b/e"), &File{Name: "ee"})
+ cache.SetFsNode(util.FullPath("/a/b/f"), &File{Name: "ff"})
+ cache.SetFsNode(util.FullPath("/z"), &File{Name: "zz"})
+ cache.SetFsNode(util.FullPath("/a"), &File{Name: "aa"})
+
+ b := cache.GetFsNode(util.FullPath("/a/b"))
+ if b != nil {
+ t.Errorf("unexpected node!")
+ }
+
+ a := cache.GetFsNode(util.FullPath("/a"))
+ if a == nil {
+ t.Errorf("missing node!")
+ }
+
+ cache.DeleteFsNode(util.FullPath("/a"))
+ if b != nil {
+ t.Errorf("unexpected node!")
+ }
+
+ a = cache.GetFsNode(util.FullPath("/a"))
+ if a != nil {
+ t.Errorf("wrong DeleteFsNode!")
+ }
+
+ z := cache.GetFsNode(util.FullPath("/z"))
+ if z == nil {
+ t.Errorf("missing node!")
+ }
+
+ y := cache.GetFsNode(util.FullPath("/x/y"))
+ if y != nil {
+ t.Errorf("wrong node!")
+ }
+
+}
+
+func TestFsCacheMove(t *testing.T) {
+
+ cache := newFsCache(nil)
+
+ cache.SetFsNode(util.FullPath("/a/b/d"), &File{Name: "dd"})
+ cache.SetFsNode(util.FullPath("/a/b/e"), &File{Name: "ee"})
+ cache.SetFsNode(util.FullPath("/z"), &File{Name: "zz"})
+ cache.SetFsNode(util.FullPath("/a"), &File{Name: "aa"})
+
+ cache.Move(util.FullPath("/a/b"), util.FullPath("/z/x"))
+
+ d := cache.GetFsNode(util.FullPath("/z/x/d"))
+ if d == nil {
+ t.Errorf("unexpected nil node!")
+ }
+ if d.(*File).Name != "dd" {
+ t.Errorf("unexpected non dd node!")
+ }
+
+}
diff --git a/weed/filesys/meta_cache/cache_config.go b/weed/filesys/meta_cache/cache_config.go
new file mode 100644
index 000000000..e6593ebde
--- /dev/null
+++ b/weed/filesys/meta_cache/cache_config.go
@@ -0,0 +1,32 @@
+package meta_cache
+
+import "github.com/chrislusf/seaweedfs/weed/util"
+
+var (
+ _ = util.Configuration(&cacheConfig{})
+)
+
+// implementing util.Configuraion
+type cacheConfig struct {
+ dir string
+}
+
+func (c cacheConfig) GetString(key string) string {
+ return c.dir
+}
+
+func (c cacheConfig) GetBool(key string) bool {
+ panic("implement me")
+}
+
+func (c cacheConfig) GetInt(key string) int {
+ panic("implement me")
+}
+
+func (c cacheConfig) GetStringSlice(key string) []string {
+ panic("implement me")
+}
+
+func (c cacheConfig) SetDefault(key string, value interface{}) {
+ panic("implement me")
+}
diff --git a/weed/filesys/meta_cache/meta_cache.go b/weed/filesys/meta_cache/meta_cache.go
new file mode 100644
index 000000000..3b04040a5
--- /dev/null
+++ b/weed/filesys/meta_cache/meta_cache.go
@@ -0,0 +1,106 @@
+package meta_cache
+
+import (
+ "context"
+ "os"
+ "sync"
+
+ "github.com/chrislusf/seaweedfs/weed/filer2"
+ "github.com/chrislusf/seaweedfs/weed/filer2/leveldb"
+ "github.com/chrislusf/seaweedfs/weed/glog"
+ "github.com/chrislusf/seaweedfs/weed/util"
+ "github.com/chrislusf/seaweedfs/weed/util/bounded_tree"
+)
+
+type MetaCache struct {
+ actualStore filer2.FilerStore
+ sync.RWMutex
+ visitedBoundary *bounded_tree.BoundedTree
+}
+
+func NewMetaCache(dbFolder string) *MetaCache {
+ return &MetaCache{
+ actualStore: openMetaStore(dbFolder),
+ visitedBoundary: bounded_tree.NewBoundedTree(),
+ }
+}
+
+func openMetaStore(dbFolder string) filer2.FilerStore {
+
+ os.RemoveAll(dbFolder)
+ os.MkdirAll(dbFolder, 0755)
+
+ store := &leveldb.LevelDBStore{}
+ config := &cacheConfig{
+ dir: dbFolder,
+ }
+
+ if err := store.Initialize(config, ""); err != nil {
+ glog.Fatalf("Failed to initialize metadata cache store for %s: %+v", store.GetName(), err)
+ }
+
+ return store
+
+}
+
+func (mc *MetaCache) InsertEntry(ctx context.Context, entry *filer2.Entry) error {
+ mc.Lock()
+ defer mc.Unlock()
+ return mc.actualStore.InsertEntry(ctx, entry)
+}
+
+func (mc *MetaCache) AtomicUpdateEntry(ctx context.Context, oldPath util.FullPath, newEntry *filer2.Entry) error {
+ mc.Lock()
+ defer mc.Unlock()
+
+ oldDir, _ := oldPath.DirAndName()
+ if mc.visitedBoundary.HasVisited(util.FullPath(oldDir)) {
+ if oldPath != "" {
+ if err := mc.actualStore.DeleteEntry(ctx, oldPath); err != nil {
+ return err
+ }
+ }
+ }else{
+ // println("unknown old directory:", oldDir)
+ }
+
+ if newEntry != nil {
+ newDir, _ := newEntry.DirAndName()
+ if mc.visitedBoundary.HasVisited(util.FullPath(newDir)) {
+ if err := mc.actualStore.InsertEntry(ctx, newEntry); err != nil {
+ return err
+ }
+ }
+ }
+ return nil
+}
+
+func (mc *MetaCache) UpdateEntry(ctx context.Context, entry *filer2.Entry) error {
+ mc.Lock()
+ defer mc.Unlock()
+ return mc.actualStore.UpdateEntry(ctx, entry)
+}
+
+func (mc *MetaCache) FindEntry(ctx context.Context, fp util.FullPath) (entry *filer2.Entry, err error) {
+ mc.RLock()
+ defer mc.RUnlock()
+ return mc.actualStore.FindEntry(ctx, fp)
+}
+
+func (mc *MetaCache) DeleteEntry(ctx context.Context, fp util.FullPath) (err error) {
+ mc.Lock()
+ defer mc.Unlock()
+ return mc.actualStore.DeleteEntry(ctx, fp)
+}
+
+func (mc *MetaCache) ListDirectoryEntries(ctx context.Context, dirPath util.FullPath, startFileName string, includeStartFile bool, limit int) ([]*filer2.Entry, error) {
+ mc.RLock()
+ defer mc.RUnlock()
+ return mc.actualStore.ListDirectoryEntries(ctx, dirPath, startFileName, includeStartFile, limit)
+}
+
+func (mc *MetaCache) Shutdown() {
+ mc.Lock()
+ defer mc.Unlock()
+ mc.actualStore.Shutdown()
+}
diff --git a/weed/filesys/meta_cache/meta_cache_init.go b/weed/filesys/meta_cache/meta_cache_init.go
new file mode 100644
index 000000000..1fbc3e532
--- /dev/null
+++ b/weed/filesys/meta_cache/meta_cache_init.go
@@ -0,0 +1,47 @@
+package meta_cache
+
+import (
+ "context"
+ "fmt"
+
+ "github.com/chrislusf/seaweedfs/weed/filer2"
+ "github.com/chrislusf/seaweedfs/weed/glog"
+ "github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
+ "github.com/chrislusf/seaweedfs/weed/util"
+)
+
+func InitMetaCache(mc *MetaCache, client filer_pb.FilerClient, path string) error {
+ return nil
+ glog.V(0).Infof("synchronizing meta data ...")
+ filer_pb.TraverseBfs(client, util.FullPath(path), func(parentPath util.FullPath, pbEntry *filer_pb.Entry) {
+ entry := filer2.FromPbEntry(string(parentPath), pbEntry)
+ if err := mc.InsertEntry(context.Background(), entry); err != nil {
+ glog.V(0).Infof("read %s: %v", entry.FullPath, err)
+ }
+ })
+ return nil
+}
+
+func EnsureVisited(mc *MetaCache, client filer_pb.FilerClient, dirPath util.FullPath) {
+
+ mc.visitedBoundary.EnsureVisited(dirPath, func(path util.FullPath) (childDirectories []string, err error) {
+
+ glog.V(2).Infof("ReadDirAllEntries %s ...", path)
+
+ err = filer_pb.ReadDirAllEntries(client, dirPath, "", func(pbEntry *filer_pb.Entry, isLast bool) error {
+ entry := filer2.FromPbEntry(string(dirPath), pbEntry)
+ if err := mc.InsertEntry(context.Background(), entry); err != nil {
+ glog.V(0).Infof("read %s: %v", entry.FullPath, err)
+ return err
+ }
+ if entry.IsDirectory() {
+ childDirectories = append(childDirectories, entry.Name())
+ }
+ return nil
+ })
+ if err != nil {
+ err = fmt.Errorf("list %s: %v", dirPath, err)
+ }
+ return
+ })
+}
diff --git a/weed/filesys/meta_cache/meta_cache_subscribe.go b/weed/filesys/meta_cache/meta_cache_subscribe.go
new file mode 100644
index 000000000..2e411a48a
--- /dev/null
+++ b/weed/filesys/meta_cache/meta_cache_subscribe.go
@@ -0,0 +1,69 @@
+package meta_cache
+
+import (
+ "context"
+ "fmt"
+ "io"
+ "time"
+
+ "github.com/chrislusf/seaweedfs/weed/filer2"
+ "github.com/chrislusf/seaweedfs/weed/glog"
+ "github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
+ "github.com/chrislusf/seaweedfs/weed/util"
+)
+
+func SubscribeMetaEvents(mc *MetaCache, client filer_pb.FilerClient, dir string, lastTsNs int64) error {
+
+ processEventFn := func(resp *filer_pb.SubscribeMetadataResponse) error {
+ message := resp.EventNotification
+ var oldPath util.FullPath
+ var newEntry *filer2.Entry
+ if message.OldEntry != nil {
+ oldPath = util.NewFullPath(resp.Directory, message.OldEntry.Name)
+ glog.V(4).Infof("deleting %v", oldPath)
+ }
+
+ if message.NewEntry != nil {
+ dir := resp.Directory
+ if message.NewParentPath != "" {
+ dir = message.NewParentPath
+ }
+ key := util.NewFullPath(dir, message.NewEntry.Name)
+ glog.V(4).Infof("creating %v", key)
+ newEntry = filer2.FromPbEntry(dir, message.NewEntry)
+ }
+ return mc.AtomicUpdateEntry(context.Background(), oldPath, newEntry)
+ }
+
+ for {
+ err := client.WithFilerClient(func(client filer_pb.SeaweedFilerClient) error {
+ stream, err := client.SubscribeMetadata(context.Background(), &filer_pb.SubscribeMetadataRequest{
+ ClientName: "mount",
+ PathPrefix: dir,
+ SinceNs: lastTsNs,
+ })
+ if err != nil {
+ return fmt.Errorf("subscribe: %v", err)
+ }
+
+ for {
+ resp, listenErr := stream.Recv()
+ if listenErr == io.EOF {
+ return nil
+ }
+ if listenErr != nil {
+ return listenErr
+ }
+
+ if err := processEventFn(resp); err != nil {
+ return fmt.Errorf("process %v: %v", resp, err)
+ }
+ lastTsNs = resp.TsNs
+ }
+ })
+ if err != nil {
+ glog.V(0).Infof("subscribing filer meta change: %v", err)
+ time.Sleep(time.Second)
+ }
+ }
+}
diff --git a/weed/filesys/unimplemented.go b/weed/filesys/unimplemented.go
new file mode 100644
index 000000000..1f4fe554d
--- /dev/null
+++ b/weed/filesys/unimplemented.go
@@ -0,0 +1,20 @@
+package filesys
+
+import (
+ "context"
+
+ "github.com/seaweedfs/fuse"
+ "github.com/seaweedfs/fuse/fs"
+)
+
+// https://github.com/bazil/fuse/issues/130
+
+var _ = fs.NodeAccesser(&Dir{})
+func (dir *Dir) Access(ctx context.Context, req *fuse.AccessRequest) error {
+ return fuse.ENOSYS
+}
+
+var _ = fs.NodeAccesser(&File{})
+func (file *File) Access(ctx context.Context, req *fuse.AccessRequest) error {
+ return fuse.ENOSYS
+}
diff --git a/weed/filesys/wfs.go b/weed/filesys/wfs.go
index 969514a06..8dffa6555 100644
--- a/weed/filesys/wfs.go
+++ b/weed/filesys/wfs.go
@@ -5,32 +5,48 @@ import (
"fmt"
"math"
"os"
+ "path"
"sync"
"time"
+ "google.golang.org/grpc"
+
+ "github.com/chrislusf/seaweedfs/weed/util/grace"
+
+ "github.com/seaweedfs/fuse"
+ "github.com/seaweedfs/fuse/fs"
+
+ "github.com/chrislusf/seaweedfs/weed/filesys/meta_cache"
"github.com/chrislusf/seaweedfs/weed/glog"
"github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
"github.com/chrislusf/seaweedfs/weed/util"
- "github.com/karlseguin/ccache"
- "github.com/seaweedfs/fuse"
- "github.com/seaweedfs/fuse/fs"
- "google.golang.org/grpc"
+ "github.com/chrislusf/seaweedfs/weed/util/chunk_cache"
)
type Option struct {
FilerGrpcAddress string
+ GrpcDialOption grpc.DialOption
FilerMountRootPath string
Collection string
Replication string
TtlSec int32
ChunkSizeLimit int64
+ CacheDir string
+ CacheSizeMB int64
DataCenter string
- DirListingLimit int
+ DirListCacheLimit int64
EntryCacheTtl time.Duration
+ Umask os.FileMode
+
+ MountUid uint32
+ MountGid uint32
+ MountMode os.FileMode
+ MountCtime time.Time
+ MountMtime time.Time
+
+ OutsideContainerClusterMode bool // whether the mount runs outside SeaweedFS containers
+ Cipher bool // whether encrypt data on volume server
- MountUid uint32
- MountGid uint32
- MountMode os.FileMode
}
var _ = fs.FS(&WFS{})
@@ -38,17 +54,20 @@ var _ = fs.FSStatfser(&WFS{})
type WFS struct {
option *Option
- listDirectoryEntriesCache *ccache.Cache
- // contains all open handles
- handles []*FileHandle
- pathToHandleIndex map[string]int
- pathToHandleLock sync.Mutex
- bufPool sync.Pool
+ // contains all open handles, protected by handlesLock
+ handlesLock sync.Mutex
+ handles map[uint64]*FileHandle
- fileIdsDeletionChan chan []string
+ bufPool sync.Pool
stats statsCache
+
+ root fs.Node
+ fsNodeCache *FsCache
+
+ chunkCache *chunk_cache.ChunkCache
+ metaCache *meta_cache.MetaCache
}
type statsCache struct {
filer_pb.StatisticsResponse
@@ -58,74 +77,73 @@ type statsCache struct {
func NewSeaweedFileSystem(option *Option) *WFS {
wfs := &WFS{
option: option,
- listDirectoryEntriesCache: ccache.New(ccache.Configure().MaxSize(1024 * 8).ItemsToPrune(100)),
- pathToHandleIndex: make(map[string]int),
+ handles: make(map[uint64]*FileHandle),
bufPool: sync.Pool{
New: func() interface{} {
return make([]byte, option.ChunkSizeLimit)
},
},
- fileIdsDeletionChan: make(chan []string, 32),
+ }
+ cacheUniqueId := util.Md5([]byte(option.FilerGrpcAddress))[0:4]
+ cacheDir := path.Join(option.CacheDir, cacheUniqueId)
+ if option.CacheSizeMB > 0 {
+ os.MkdirAll(cacheDir, 0755)
+ wfs.chunkCache = chunk_cache.NewChunkCache(256, cacheDir, option.CacheSizeMB)
+ grace.OnInterrupt(func() {
+ wfs.chunkCache.Shutdown()
+ })
+ }
+
+ wfs.metaCache = meta_cache.NewMetaCache(path.Join(cacheDir, "meta"))
+ startTime := time.Now()
+ if err := meta_cache.InitMetaCache(wfs.metaCache, wfs, wfs.option.FilerMountRootPath); err != nil {
+ glog.V(0).Infof("failed to init meta cache: %v", err)
+ } else {
+ go meta_cache.SubscribeMetaEvents(wfs.metaCache, wfs, wfs.option.FilerMountRootPath, startTime.UnixNano())
+ grace.OnInterrupt(func() {
+ wfs.metaCache.Shutdown()
+ })
}
- go wfs.loopProcessingDeletion()
+ wfs.root = &Dir{name: wfs.option.FilerMountRootPath, wfs: wfs}
+ wfs.fsNodeCache = newFsCache(wfs.root)
return wfs
}
func (wfs *WFS) Root() (fs.Node, error) {
- return &Dir{Path: wfs.option.FilerMountRootPath, wfs: wfs}, nil
-}
-
-func (wfs *WFS) withFilerClient(fn func(filer_pb.SeaweedFilerClient) error) error {
-
- return util.WithCachedGrpcClient(func(grpcConnection *grpc.ClientConn) error {
- client := filer_pb.NewSeaweedFilerClient(grpcConnection)
- return fn(client)
- }, wfs.option.FilerGrpcAddress)
-
+ return wfs.root, nil
}
func (wfs *WFS) AcquireHandle(file *File, uid, gid uint32) (fileHandle *FileHandle) {
- wfs.pathToHandleLock.Lock()
- defer wfs.pathToHandleLock.Unlock()
fullpath := file.fullpath()
+ glog.V(4).Infof("%s AcquireHandle uid=%d gid=%d", fullpath, uid, gid)
- index, found := wfs.pathToHandleIndex[fullpath]
- if found && wfs.handles[index] != nil {
- glog.V(2).Infoln(fullpath, "found fileHandle id", index)
- return wfs.handles[index]
- }
+ wfs.handlesLock.Lock()
+ defer wfs.handlesLock.Unlock()
- fileHandle = newFileHandle(file, uid, gid)
- for i, h := range wfs.handles {
- if h == nil {
- wfs.handles[i] = fileHandle
- fileHandle.handle = uint64(i)
- wfs.pathToHandleIndex[fullpath] = i
- glog.V(4).Infoln(fullpath, "reuse fileHandle id", fileHandle.handle)
- return
- }
+ inodeId := file.fullpath().AsInode()
+ existingHandle, found := wfs.handles[inodeId]
+ if found && existingHandle != nil {
+ return existingHandle
}
- wfs.handles = append(wfs.handles, fileHandle)
- fileHandle.handle = uint64(len(wfs.handles) - 1)
- glog.V(2).Infoln(fullpath, "new fileHandle id", fileHandle.handle)
- wfs.pathToHandleIndex[fullpath] = int(fileHandle.handle)
+ fileHandle = newFileHandle(file, uid, gid)
+ wfs.handles[inodeId] = fileHandle
+ fileHandle.handle = inodeId
+ glog.V(4).Infof("%s new fh %d", fullpath, fileHandle.handle)
return
}
-func (wfs *WFS) ReleaseHandle(fullpath string, handleId fuse.HandleID) {
- wfs.pathToHandleLock.Lock()
- defer wfs.pathToHandleLock.Unlock()
+func (wfs *WFS) ReleaseHandle(fullpath util.FullPath, handleId fuse.HandleID) {
+ wfs.handlesLock.Lock()
+ defer wfs.handlesLock.Unlock()
- glog.V(4).Infof("%s releasing handle id %d current handles length %d", fullpath, handleId, len(wfs.handles))
- delete(wfs.pathToHandleIndex, fullpath)
- if int(handleId) < len(wfs.handles) {
- wfs.handles[int(handleId)] = nil
- }
+ glog.V(4).Infof("%s ReleaseHandle id %d current handles length %d", fullpath, handleId, len(wfs.handles))
+
+ delete(wfs.handles, fullpath.AsInode())
return
}
@@ -137,7 +155,7 @@ func (wfs *WFS) Statfs(ctx context.Context, req *fuse.StatfsRequest, resp *fuse.
if wfs.stats.lastChecked < time.Now().Unix()-20 {
- err := wfs.withFilerClient(func(client filer_pb.SeaweedFilerClient) error {
+ err := wfs.WithFilerClient(func(client filer_pb.SeaweedFilerClient) error {
request := &filer_pb.StatisticsRequest{
Collection: wfs.option.Collection,
@@ -146,7 +164,7 @@ func (wfs *WFS) Statfs(ctx context.Context, req *fuse.StatfsRequest, resp *fuse.
}
glog.V(4).Infof("reading filer stats: %+v", request)
- resp, err := client.Statistics(ctx, request)
+ resp, err := client.Statistics(context.Background(), request)
if err != nil {
glog.V(0).Infof("reading filer stats %v: %v", request, err)
return err
diff --git a/weed/filesys/wfs_deletion.go b/weed/filesys/wfs_deletion.go
index f58ef24f4..bf21b1808 100644
--- a/weed/filesys/wfs_deletion.go
+++ b/weed/filesys/wfs_deletion.go
@@ -2,39 +2,15 @@ package filesys
import (
"context"
- "time"
+ "google.golang.org/grpc"
+
+ "github.com/chrislusf/seaweedfs/weed/filer2"
"github.com/chrislusf/seaweedfs/weed/glog"
+ "github.com/chrislusf/seaweedfs/weed/operation"
"github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
)
-func (wfs *WFS) loopProcessingDeletion() {
-
- ticker := time.NewTicker(2 * time.Second)
-
- wfs.withFilerClient(func(client filer_pb.SeaweedFilerClient) error {
- var fileIds []string
- for {
- select {
- case fids := <-wfs.fileIdsDeletionChan:
- fileIds = append(fileIds, fids...)
- if len(fileIds) >= 1024 {
- glog.V(1).Infof("deleting fileIds len=%d", len(fileIds))
- deleteFileIds(context.Background(), client, fileIds)
- fileIds = fileIds[:0]
- }
- case <-ticker.C:
- if len(fileIds) > 0 {
- glog.V(1).Infof("timed deletion fileIds len=%d", len(fileIds))
- deleteFileIds(context.Background(), client, fileIds)
- fileIds = fileIds[:0]
- }
- }
- }
- })
-
-}
-
func (wfs *WFS) deleteFileChunks(chunks []*filer_pb.FileChunk) {
if len(chunks) == 0 {
return
@@ -42,17 +18,56 @@ func (wfs *WFS) deleteFileChunks(chunks []*filer_pb.FileChunk) {
var fileIds []string
for _, chunk := range chunks {
- fileIds = append(fileIds, chunk.FileId)
+ fileIds = append(fileIds, chunk.GetFileIdString())
}
- var async = false
- if async {
- wfs.fileIdsDeletionChan <- fileIds
- return
- }
-
- wfs.withFilerClient(func(client filer_pb.SeaweedFilerClient) error {
- deleteFileIds(context.Background(), client, fileIds)
+ wfs.WithFilerClient(func(client filer_pb.SeaweedFilerClient) error {
+ wfs.deleteFileIds(wfs.option.GrpcDialOption, client, fileIds)
return nil
})
}
+
+func (wfs *WFS) deleteFileIds(grpcDialOption grpc.DialOption, client filer_pb.SeaweedFilerClient, fileIds []string) error {
+
+ var vids []string
+ for _, fileId := range fileIds {
+ vids = append(vids, filer2.VolumeId(fileId))
+ }
+
+ lookupFunc := func(vids []string) (map[string]operation.LookupResult, error) {
+
+ m := make(map[string]operation.LookupResult)
+
+ glog.V(4).Infof("remove file lookup volume id locations: %v", vids)
+ resp, err := client.LookupVolume(context.Background(), &filer_pb.LookupVolumeRequest{
+ VolumeIds: vids,
+ })
+ if err != nil {
+ return m, err
+ }
+
+ for _, vid := range vids {
+ lr := operation.LookupResult{
+ VolumeId: vid,
+ Locations: nil,
+ }
+ locations, found := resp.LocationsMap[vid]
+ if !found {
+ continue
+ }
+ for _, loc := range locations.Locations {
+ lr.Locations = append(lr.Locations, operation.Location{
+ Url: wfs.AdjustedUrl(loc.Url),
+ PublicUrl: loc.PublicUrl,
+ })
+ }
+ m[vid] = lr
+ }
+
+ return m, err
+ }
+
+ _, err := operation.DeleteFilesWithLookupVolumeId(grpcDialOption, fileIds, lookupFunc)
+
+ return err
+}
diff --git a/weed/filesys/wfs_filer_client.go b/weed/filesys/wfs_filer_client.go
new file mode 100644
index 000000000..736df3588
--- /dev/null
+++ b/weed/filesys/wfs_filer_client.go
@@ -0,0 +1,40 @@
+package filesys
+
+import (
+ "fmt"
+ "strings"
+
+ "google.golang.org/grpc"
+
+ "github.com/chrislusf/seaweedfs/weed/pb"
+ "github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
+)
+
+var _ = filer_pb.FilerClient(&WFS{})
+
+func (wfs *WFS) WithFilerClient(fn func(filer_pb.SeaweedFilerClient) error) error {
+
+ err := pb.WithCachedGrpcClient(func(grpcConnection *grpc.ClientConn) error {
+ client := filer_pb.NewSeaweedFilerClient(grpcConnection)
+ return fn(client)
+ }, wfs.option.FilerGrpcAddress, wfs.option.GrpcDialOption)
+
+ if err == nil {
+ return nil
+ }
+ return err
+
+}
+
+func (wfs *WFS) AdjustedUrl(hostAndPort string) string {
+ if !wfs.option.OutsideContainerClusterMode {
+ return hostAndPort
+ }
+ commaIndex := strings.Index(hostAndPort, ":")
+ if commaIndex < 0 {
+ return hostAndPort
+ }
+ filerCommaIndex := strings.Index(wfs.option.FilerGrpcAddress, ":")
+ return fmt.Sprintf("%s:%s", wfs.option.FilerGrpcAddress[:filerCommaIndex], hostAndPort[commaIndex+1:])
+
+}
diff --git a/weed/filesys/xattr.go b/weed/filesys/xattr.go
new file mode 100644
index 000000000..091a70fa3
--- /dev/null
+++ b/weed/filesys/xattr.go
@@ -0,0 +1,123 @@
+package filesys
+
+import (
+ "context"
+
+ "github.com/seaweedfs/fuse"
+
+ "github.com/chrislusf/seaweedfs/weed/filesys/meta_cache"
+ "github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
+ "github.com/chrislusf/seaweedfs/weed/util"
+)
+
+func getxattr(entry *filer_pb.Entry, req *fuse.GetxattrRequest, resp *fuse.GetxattrResponse) error {
+
+ if entry == nil {
+ return fuse.ErrNoXattr
+ }
+ if entry.Extended == nil {
+ return fuse.ErrNoXattr
+ }
+ data, found := entry.Extended[req.Name]
+ if !found {
+ return fuse.ErrNoXattr
+ }
+ if req.Position < uint32(len(data)) {
+ size := req.Size
+ if req.Position+size >= uint32(len(data)) {
+ size = uint32(len(data)) - req.Position
+ }
+ if size == 0 {
+ resp.Xattr = data[req.Position:]
+ } else {
+ resp.Xattr = data[req.Position : req.Position+size]
+ }
+ }
+
+ return nil
+
+}
+
+func setxattr(entry *filer_pb.Entry, req *fuse.SetxattrRequest) error {
+
+ if entry == nil {
+ return fuse.EIO
+ }
+
+ if entry.Extended == nil {
+ entry.Extended = make(map[string][]byte)
+ }
+ data, _ := entry.Extended[req.Name]
+
+ newData := make([]byte, int(req.Position)+len(req.Xattr))
+
+ copy(newData, data)
+
+ copy(newData[int(req.Position):], req.Xattr)
+
+ entry.Extended[req.Name] = newData
+
+ return nil
+
+}
+
+func removexattr(entry *filer_pb.Entry, req *fuse.RemovexattrRequest) error {
+
+ if entry == nil {
+ return fuse.ErrNoXattr
+ }
+
+ if entry.Extended == nil {
+ return fuse.ErrNoXattr
+ }
+
+ _, found := entry.Extended[req.Name]
+
+ if !found {
+ return fuse.ErrNoXattr
+ }
+
+ delete(entry.Extended, req.Name)
+
+ return nil
+
+}
+
+func listxattr(entry *filer_pb.Entry, req *fuse.ListxattrRequest, resp *fuse.ListxattrResponse) error {
+
+ if entry == nil {
+ return fuse.EIO
+ }
+
+ for k := range entry.Extended {
+ resp.Append(k)
+ }
+
+ size := req.Size
+ if req.Position+size >= uint32(len(resp.Xattr)) {
+ size = uint32(len(resp.Xattr)) - req.Position
+ }
+
+ if size == 0 {
+ resp.Xattr = resp.Xattr[req.Position:]
+ } else {
+ resp.Xattr = resp.Xattr[req.Position : req.Position+size]
+ }
+
+ return nil
+
+}
+
+func (wfs *WFS) maybeLoadEntry(dir, name string) (entry *filer_pb.Entry, err error) {
+
+ fullpath := util.NewFullPath(dir, name)
+ // glog.V(3).Infof("read entry cache miss %s", fullpath)
+
+ // read from async meta cache
+ meta_cache.EnsureVisited(wfs.metaCache, wfs, util.FullPath(dir))
+ cachedEntry, cacheErr := wfs.metaCache.FindEntry(context.Background(), fullpath)
+ if cacheErr == filer_pb.ErrNotFound {
+ return nil, fuse.ENOENT
+ }
+ return cachedEntry.ToProtoEntry(), nil
+}
diff --git a/weed/glide.lock b/weed/glide.lock
deleted file mode 100644
index fee78be42..000000000
--- a/weed/glide.lock
+++ /dev/null
@@ -1,190 +0,0 @@
-hash: 2e3a065472829938d25e879451b6d1aa43e55270e1166a9c044803ef8a3b9eb1
-updated: 2018-06-28T22:01:35.910567-07:00
-imports:
-- name: github.com/seaweedfs/fuse
- version: 65cc252bf6691cb3c7014bcb2c8dc29de91e3a7e
- subpackages:
- - fs
- - fuseutil
-- name: github.com/boltdb/bolt
- version: 2f1ce7a837dcb8da3ec595b1dac9d0632f0f99e8
-- name: github.com/chrislusf/raft
- version: 5f7ddd8f479583daf05879d3d3b174aa202c8fb7
- subpackages:
- - protobuf
-- name: github.com/dgrijalva/jwt-go
- version: 06ea1031745cb8b3dab3f6a236daf2b0aa468b7e
-- name: github.com/disintegration/imaging
- version: bbcee2f5c9d5e94ca42c8b50ec847fec64a6c134
-- name: github.com/fsnotify/fsnotify
- version: c2828203cd70a50dcccfb2761f8b1f8ceef9a8e9
-- name: github.com/go-redis/redis
- version: 83fb42932f6145ce52df09860384a4653d2d332a
- subpackages:
- - internal
- - internal/consistenthash
- - internal/hashtag
- - internal/pool
- - internal/proto
- - internal/singleflight
- - internal/util
-- name: github.com/go-sql-driver/mysql
- version: d523deb1b23d913de5bdada721a6071e71283618
-- name: github.com/gocql/gocql
- version: e06f8c1bcd787e6bf0608288b314522f08cc7848
- subpackages:
- - internal/lru
- - internal/murmur
- - internal/streams
-- name: github.com/gogo/protobuf
- version: 30cf7ac33676b5786e78c746683f0d4cd64fa75b
- subpackages:
- - proto
-- name: github.com/golang/protobuf
- version: b4deda0973fb4c70b50d226b1af49f3da59f5265
- subpackages:
- - proto
- - protoc-gen-go/descriptor
- - ptypes
- - ptypes/any
- - ptypes/duration
- - ptypes/timestamp
-- name: github.com/golang/snappy
- version: 2e65f85255dbc3072edf28d6b5b8efc472979f5a
-- name: github.com/google/btree
- version: e89373fe6b4a7413d7acd6da1725b83ef713e6e4
-- name: github.com/gorilla/context
- version: 08b5f424b9271eedf6f9f0ce86cb9396ed337a42
-- name: github.com/gorilla/mux
- version: e3702bed27f0d39777b0b37b664b6280e8ef8fbf
-- name: github.com/hailocab/go-hostpool
- version: e80d13ce29ede4452c43dea11e79b9bc8a15b478
-- name: github.com/hashicorp/hcl
- version: ef8a98b0bbce4a65b5aa4c368430a80ddc533168
- subpackages:
- - hcl/ast
- - hcl/parser
- - hcl/printer
- - hcl/scanner
- - hcl/strconv
- - hcl/token
- - json/parser
- - json/scanner
- - json/token
-- name: github.com/karlseguin/ccache
- version: b425c9ca005a2050ebe723f6a0cddcb907354ab7
-- name: github.com/klauspost/crc32
- version: cb6bfca970f6908083f26f39a79009d608efd5cd
-- name: github.com/lib/pq
- version: 90697d60dd844d5ef6ff15135d0203f65d2f53b8
- subpackages:
- - oid
-- name: github.com/magiconair/properties
- version: c2353362d570a7bfa228149c62842019201cfb71
-- name: github.com/mitchellh/mapstructure
- version: bb74f1db0675b241733089d5a1faa5dd8b0ef57b
-- name: github.com/pelletier/go-toml
- version: c01d1270ff3e442a8a57cddc1c92dc1138598194
-- name: github.com/rwcarlsen/goexif
- version: 8d986c03457a2057c7b0fb0a48113f7dd48f9619
- subpackages:
- - exif
- - tiff
-- name: github.com/soheilhy/cmux
- version: e09e9389d85d8492d313d73d1469c029e710623f
-- name: github.com/spf13/afero
- version: 787d034dfe70e44075ccc060d346146ef53270ad
- subpackages:
- - mem
-- name: github.com/spf13/cast
- version: 8965335b8c7107321228e3e3702cab9832751bac
-- name: github.com/spf13/jwalterweatherman
- version: 7c0cea34c8ece3fbeb2b27ab9b59511d360fb394
-- name: github.com/spf13/pflag
- version: 3ebe029320b2676d667ae88da602a5f854788a8a
-- name: github.com/spf13/viper
- version: 15738813a09db5c8e5b60a19d67d3f9bd38da3a4
-- name: github.com/syndtr/goleveldb
- version: 0d5a0ceb10cf9ab89fdd744cc8c50a83134f6697
- subpackages:
- - leveldb
- - leveldb/cache
- - leveldb/comparer
- - leveldb/errors
- - leveldb/filter
- - leveldb/iterator
- - leveldb/journal
- - leveldb/memdb
- - leveldb/opt
- - leveldb/storage
- - leveldb/table
- - leveldb/util
-- name: golang.org/x/image
- version: cc896f830cedae125428bc9fe1b0362aa91b3fb1
- subpackages:
- - bmp
- - tiff
- - tiff/lzw
-- name: golang.org/x/net
- version: 4cb1c02c05b0e749b0365f61ae859a8e0cfceed9
- subpackages:
- - context
- - http/httpguts
- - http2
- - http2/hpack
- - idna
- - internal/timeseries
- - trace
-- name: golang.org/x/sys
- version: 7138fd3d9dc8335c567ca206f4333fb75eb05d56
- subpackages:
- - unix
-- name: golang.org/x/text
- version: 5cec4b58c438bd98288aeb248bab2c1840713d21
- subpackages:
- - secure/bidirule
- - transform
- - unicode/bidi
- - unicode/norm
-- name: google.golang.org/appengine
- version: b1f26356af11148e710935ed1ac8a7f5702c7612
- subpackages:
- - cloudsql
-- name: google.golang.org/genproto
- version: ff3583edef7de132f219f0efc00e097cabcc0ec0
- subpackages:
- - googleapis/rpc/status
-- name: google.golang.org/grpc
- version: 168a6198bcb0ef175f7dacec0b8691fc141dc9b8
- subpackages:
- - balancer
- - balancer/base
- - balancer/roundrobin
- - codes
- - connectivity
- - credentials
- - encoding
- - encoding/proto
- - grpclog
- - internal
- - internal/backoff
- - internal/channelz
- - internal/grpcrand
- - keepalive
- - metadata
- - naming
- - peer
- - reflection
- - reflection/grpc_reflection_v1alpha
- - resolver
- - resolver/dns
- - resolver/passthrough
- - stats
- - status
- - tap
- - transport
-- name: gopkg.in/inf.v0
- version: d2d2541c53f18d2a059457998ce2876cc8e67cbf
-- name: gopkg.in/yaml.v2
- version: 5420a8b6744d3b0345ab293f6fcba19c978f1183
-testImports: []
diff --git a/weed/glide.yaml b/weed/glide.yaml
deleted file mode 100644
index 740d2ad3d..000000000
--- a/weed/glide.yaml
+++ /dev/null
@@ -1,44 +0,0 @@
-package: github.com/chrislusf/seaweedfs/weed
-import:
-- package: github.com/seaweedfs/fuse
- subpackages:
- - fs
-- package: github.com/boltdb/bolt
- version: ^1.3.1
-- package: github.com/chrislusf/raft
-- package: github.com/dgrijalva/jwt-go
- version: ^3.2.0
-- package: github.com/disintegration/imaging
- version: ^1.4.1
-- package: github.com/go-redis/redis
- version: ^6.10.2
-- package: github.com/go-sql-driver/mysql
- version: ^1.3.0
-- package: github.com/gocql/gocql
-- package: github.com/golang/protobuf
- version: ^1.0.0
- subpackages:
- - proto
-- package: github.com/google/btree
-- package: github.com/gorilla/mux
- version: ^1.6.1
-- package: github.com/klauspost/crc32
- version: ^1.1.0
-- package: github.com/lib/pq
-- package: github.com/rwcarlsen/goexif
- subpackages:
- - exif
-- package: github.com/soheilhy/cmux
- version: ^0.1.4
-- package: github.com/syndtr/goleveldb
- subpackages:
- - leveldb
- - leveldb/util
-- package: golang.org/x/net
- subpackages:
- - context
-- package: google.golang.org/grpc
- version: ^1.11.3
- subpackages:
- - peer
- - reflection
diff --git a/weed/images/orientation.go b/weed/images/orientation.go
deleted file mode 100644
index 4bff89311..000000000
--- a/weed/images/orientation.go
+++ /dev/null
@@ -1,182 +0,0 @@
-package images
-
-import (
- "bytes"
- "image"
- "image/draw"
- "image/jpeg"
- "log"
-
- "github.com/rwcarlsen/goexif/exif"
-)
-
-//many code is copied from http://camlistore.org/pkg/images/images.go
-func FixJpgOrientation(data []byte) (oriented []byte) {
- ex, err := exif.Decode(bytes.NewReader(data))
- if err != nil {
- return data
- }
- tag, err := ex.Get(exif.Orientation)
- if err != nil {
- return data
- }
- angle := 0
- flipMode := FlipDirection(0)
- orient, err := tag.Int(0)
- if err != nil {
- return data
- }
- switch orient {
- case topLeftSide:
- // do nothing
- return data
- case topRightSide:
- flipMode = 2
- case bottomRightSide:
- angle = 180
- case bottomLeftSide:
- angle = 180
- flipMode = 2
- case leftSideTop:
- angle = -90
- flipMode = 2
- case rightSideTop:
- angle = -90
- case rightSideBottom:
- angle = 90
- flipMode = 2
- case leftSideBottom:
- angle = 90
- }
-
- if srcImage, _, err := image.Decode(bytes.NewReader(data)); err == nil {
- dstImage := flip(rotate(srcImage, angle), flipMode)
- var buf bytes.Buffer
- jpeg.Encode(&buf, dstImage, nil)
- return buf.Bytes()
- }
-
- return data
-}
-
-// Exif Orientation Tag values
-// http://sylvana.net/jpegcrop/exif_orientation.html
-const (
- topLeftSide = 1
- topRightSide = 2
- bottomRightSide = 3
- bottomLeftSide = 4
- leftSideTop = 5
- rightSideTop = 6
- rightSideBottom = 7
- leftSideBottom = 8
-)
-
-// The FlipDirection type is used by the Flip option in DecodeOpts
-// to indicate in which direction to flip an image.
-type FlipDirection int
-
-// FlipVertical and FlipHorizontal are two possible FlipDirections
-// values to indicate in which direction an image will be flipped.
-const (
- FlipVertical FlipDirection = 1 << iota
- FlipHorizontal
-)
-
-type DecodeOpts struct {
- // Rotate specifies how to rotate the image.
- // If nil, the image is rotated automatically based on EXIF metadata.
- // If an int, Rotate is the number of degrees to rotate
- // counter clockwise and must be one of 0, 90, -90, 180, or
- // -180.
- Rotate interface{}
-
- // Flip specifies how to flip the image.
- // If nil, the image is flipped automatically based on EXIF metadata.
- // Otherwise, Flip is a FlipDirection bitfield indicating how to flip.
- Flip interface{}
-}
-
-func rotate(im image.Image, angle int) image.Image {
- var rotated *image.NRGBA
- // trigonometric (i.e counter clock-wise)
- switch angle {
- case 90:
- newH, newW := im.Bounds().Dx(), im.Bounds().Dy()
- rotated = image.NewNRGBA(image.Rect(0, 0, newW, newH))
- for y := 0; y < newH; y++ {
- for x := 0; x < newW; x++ {
- rotated.Set(x, y, im.At(newH-1-y, x))
- }
- }
- case -90:
- newH, newW := im.Bounds().Dx(), im.Bounds().Dy()
- rotated = image.NewNRGBA(image.Rect(0, 0, newW, newH))
- for y := 0; y < newH; y++ {
- for x := 0; x < newW; x++ {
- rotated.Set(x, y, im.At(y, newW-1-x))
- }
- }
- case 180, -180:
- newW, newH := im.Bounds().Dx(), im.Bounds().Dy()
- rotated = image.NewNRGBA(image.Rect(0, 0, newW, newH))
- for y := 0; y < newH; y++ {
- for x := 0; x < newW; x++ {
- rotated.Set(x, y, im.At(newW-1-x, newH-1-y))
- }
- }
- default:
- return im
- }
- return rotated
-}
-
-// flip returns a flipped version of the image im, according to
-// the direction(s) in dir.
-// It may flip the imput im in place and return it, or it may allocate a
-// new NRGBA (if im is an *image.YCbCr).
-func flip(im image.Image, dir FlipDirection) image.Image {
- if dir == 0 {
- return im
- }
- ycbcr := false
- var nrgba image.Image
- dx, dy := im.Bounds().Dx(), im.Bounds().Dy()
- di, ok := im.(draw.Image)
- if !ok {
- if _, ok := im.(*image.YCbCr); !ok {
- log.Printf("failed to flip image: input does not satisfy draw.Image")
- return im
- }
- // because YCbCr does not implement Set, we replace it with a new NRGBA
- ycbcr = true
- nrgba = image.NewNRGBA(image.Rect(0, 0, dx, dy))
- di, ok = nrgba.(draw.Image)
- if !ok {
- log.Print("failed to flip image: could not cast an NRGBA to a draw.Image")
- return im
- }
- }
- if dir&FlipHorizontal != 0 {
- for y := 0; y < dy; y++ {
- for x := 0; x < dx/2; x++ {
- old := im.At(x, y)
- di.Set(x, y, im.At(dx-1-x, y))
- di.Set(dx-1-x, y, old)
- }
- }
- }
- if dir&FlipVertical != 0 {
- for y := 0; y < dy/2; y++ {
- for x := 0; x < dx; x++ {
- old := im.At(x, y)
- di.Set(x, y, im.At(x, dy-1-y))
- di.Set(x, dy-1-y, old)
- }
- }
- }
- if ycbcr {
- return nrgba
- }
- return im
-}
diff --git a/weed/images/orientation_test.go b/weed/images/orientation_test.go
deleted file mode 100644
index 32fa38f76..000000000
--- a/weed/images/orientation_test.go
+++ /dev/null
@@ -1,20 +0,0 @@
-package images
-
-import (
- "io/ioutil"
- "os"
- "testing"
-)
-
-func TestXYZ(t *testing.T) {
- fname := "sample1.jpg"
-
- dat, _ := ioutil.ReadFile(fname)
-
- fixed_data := FixJpgOrientation(dat)
-
- ioutil.WriteFile("fixed1.jpg", fixed_data, 0644)
-
- os.Remove("fixed1.jpg")
-
-}
diff --git a/weed/images/preprocess.go b/weed/images/preprocess.go
deleted file mode 100644
index f6f3b554d..000000000
--- a/weed/images/preprocess.go
+++ /dev/null
@@ -1,29 +0,0 @@
-package images
-
-import (
- "bytes"
- "io"
- "path/filepath"
- "strings"
-)
-
-/*
-* Preprocess image files on client side.
-* 1. possibly adjust the orientation
-* 2. resize the image to a width or height limit
-* 3. remove the exif data
-* Call this function on any file uploaded to SeaweedFS
-*
- */
-func MaybePreprocessImage(filename string, data []byte, width, height int) (resized io.ReadSeeker, w int, h int) {
- ext := filepath.Ext(filename)
- ext = strings.ToLower(ext)
- switch ext {
- case ".png", ".gif":
- return Resized(ext, bytes.NewReader(data), width, height, "")
- case ".jpg", ".jpeg":
- data = FixJpgOrientation(data)
- return Resized(ext, bytes.NewReader(data), width, height, "")
- }
- return bytes.NewReader(data), 0, 0
-}
diff --git a/weed/images/resizing.go b/weed/images/resizing.go
index ff0eff5e1..b048daa1c 100644
--- a/weed/images/resizing.go
+++ b/weed/images/resizing.go
@@ -6,10 +6,11 @@ import (
"image/gif"
"image/jpeg"
"image/png"
+ "io"
- "github.com/chrislusf/seaweedfs/weed/glog"
"github.com/disintegration/imaging"
- "io"
+
+ "github.com/chrislusf/seaweedfs/weed/glog"
)
func Resized(ext string, read io.ReadSeeker, width, height int, mode string) (resized io.ReadSeeker, w int, h int) {
@@ -35,6 +36,7 @@ func Resized(ext string, read io.ReadSeeker, width, height int, mode string) (re
}
}
} else {
+ read.Seek(0, 0)
return read, bounds.Dx(), bounds.Dy()
}
var buf bytes.Buffer
diff --git a/weed/messaging/broker/broker_append.go b/weed/messaging/broker/broker_append.go
new file mode 100644
index 000000000..80f107e00
--- /dev/null
+++ b/weed/messaging/broker/broker_append.go
@@ -0,0 +1,113 @@
+package broker
+
+import (
+ "context"
+ "fmt"
+ "io"
+
+ "github.com/chrislusf/seaweedfs/weed/glog"
+ "github.com/chrislusf/seaweedfs/weed/operation"
+ "github.com/chrislusf/seaweedfs/weed/pb"
+ "github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
+ "github.com/chrislusf/seaweedfs/weed/pb/messaging_pb"
+ "github.com/chrislusf/seaweedfs/weed/security"
+ "github.com/chrislusf/seaweedfs/weed/util"
+)
+
+func (broker *MessageBroker) appendToFile(targetFile string, topicConfig *messaging_pb.TopicConfiguration, data []byte) error {
+
+ assignResult, uploadResult, err2 := broker.assignAndUpload(topicConfig, data)
+ if err2 != nil {
+ return err2
+ }
+
+ dir, name := util.FullPath(targetFile).DirAndName()
+
+ // append the chunk
+ if err := broker.WithFilerClient(func(client filer_pb.SeaweedFilerClient) error {
+
+ request := &filer_pb.AppendToEntryRequest{
+ Directory: dir,
+ EntryName: name,
+ Chunks: []*filer_pb.FileChunk{uploadResult.ToPbFileChunk(assignResult.Fid, 0)},
+ }
+
+ _, err := client.AppendToEntry(context.Background(), request)
+ if err != nil {
+ glog.V(0).Infof("append to file %v: %v", request, err)
+ return err
+ }
+
+ return nil
+ }); err != nil {
+ return fmt.Errorf("append to file %v: %v", targetFile, err)
+ }
+
+ return nil
+}
+
+func (broker *MessageBroker) assignAndUpload(topicConfig *messaging_pb.TopicConfiguration, data []byte) (*operation.AssignResult, *operation.UploadResult, error) {
+
+ var assignResult = &operation.AssignResult{}
+
+ // assign a volume location
+ if err := broker.WithFilerClient(func(client filer_pb.SeaweedFilerClient) error {
+
+ request := &filer_pb.AssignVolumeRequest{
+ Count: 1,
+ Replication: topicConfig.Replication,
+ Collection: topicConfig.Collection,
+ }
+
+ resp, err := client.AssignVolume(context.Background(), request)
+ if err != nil {
+ glog.V(0).Infof("assign volume failure %v: %v", request, err)
+ return err
+ }
+ if resp.Error != "" {
+ return fmt.Errorf("assign volume failure %v: %v", request, resp.Error)
+ }
+
+ assignResult.Auth = security.EncodedJwt(resp.Auth)
+ assignResult.Fid = resp.FileId
+ assignResult.Url = resp.Url
+ assignResult.PublicUrl = resp.PublicUrl
+ assignResult.Count = uint64(resp.Count)
+
+ return nil
+ }); err != nil {
+ return nil, nil, err
+ }
+
+ // upload data
+ targetUrl := fmt.Sprintf("http://%s/%s", assignResult.Url, assignResult.Fid)
+ uploadResult, err := operation.UploadData(targetUrl, "", broker.option.Cipher, data, false, "", nil, assignResult.Auth)
+ if err != nil {
+ return nil, nil, fmt.Errorf("upload data %s: %v", targetUrl, err)
+ }
+ // println("uploaded to", targetUrl)
+ return assignResult, uploadResult, nil
+}
+
+var _ = filer_pb.FilerClient(&MessageBroker{})
+
+func (broker *MessageBroker) WithFilerClient(fn func(filer_pb.SeaweedFilerClient) error) (err error) {
+
+ for _, filer := range broker.option.Filers {
+ if err = pb.WithFilerClient(filer, broker.grpcDialOption, fn); err != nil {
+ if err == io.EOF {
+ return
+ }
+ glog.V(0).Infof("fail to connect to %s: %v", filer, err)
+ } else {
+ break
+ }
+ }
+
+ return
+
+}
+
+func (broker *MessageBroker) AdjustedUrl(hostAndPort string) string {
+ return hostAndPort
+}
diff --git a/weed/messaging/broker/broker_grpc_server.go b/weed/messaging/broker/broker_grpc_server.go
new file mode 100644
index 000000000..abd5c9f73
--- /dev/null
+++ b/weed/messaging/broker/broker_grpc_server.go
@@ -0,0 +1,37 @@
+package broker
+
+import (
+ "context"
+ "fmt"
+
+ "github.com/chrislusf/seaweedfs/weed/filer2"
+ "github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
+ "github.com/chrislusf/seaweedfs/weed/pb/messaging_pb"
+)
+
+func (broker *MessageBroker) ConfigureTopic(c context.Context, request *messaging_pb.ConfigureTopicRequest) (*messaging_pb.ConfigureTopicResponse, error) {
+ panic("implement me")
+}
+
+func (broker *MessageBroker) DeleteTopic(c context.Context, request *messaging_pb.DeleteTopicRequest) (*messaging_pb.DeleteTopicResponse, error) {
+ resp := &messaging_pb.DeleteTopicResponse{}
+ dir, entry := genTopicDirEntry(request.Namespace, request.Topic)
+ if exists, err := filer_pb.Exists(broker, dir, entry, true); err != nil {
+ return nil, err
+ } else if exists {
+ err = filer_pb.Remove(broker, dir, entry, true, true, true, false)
+ }
+ return resp, nil
+}
+
+func (broker *MessageBroker) GetTopicConfiguration(c context.Context, request *messaging_pb.GetTopicConfigurationRequest) (*messaging_pb.GetTopicConfigurationResponse, error) {
+ panic("implement me")
+}
+
+func genTopicDir(namespace, topic string) string {
+ return fmt.Sprintf("%s/%s/%s", filer2.TopicsDir, namespace, topic)
+}
+
+func genTopicDirEntry(namespace, topic string) (dir, entry string) {
+ return fmt.Sprintf("%s/%s", filer2.TopicsDir, namespace), topic
+}
diff --git a/weed/messaging/broker/broker_grpc_server_discovery.go b/weed/messaging/broker/broker_grpc_server_discovery.go
new file mode 100644
index 000000000..3c14f3220
--- /dev/null
+++ b/weed/messaging/broker/broker_grpc_server_discovery.go
@@ -0,0 +1,116 @@
+package broker
+
+import (
+ "context"
+ "fmt"
+ "time"
+
+ "github.com/chrislusf/seaweedfs/weed/glog"
+ "github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
+ "github.com/chrislusf/seaweedfs/weed/pb/master_pb"
+ "github.com/chrislusf/seaweedfs/weed/pb/messaging_pb"
+)
+
+/*
+Topic discovery:
+
+When pub or sub connects, it ask for the whole broker list, and run consistent hashing to find the broker.
+
+The broker will check peers whether it is already hosted by some other broker, if that broker is alive and acknowledged alive, redirect to it.
+Otherwise, just host the topic.
+
+So, if the pub or sub connects around the same time, they would connect to the same broker. Everyone is happy.
+If one of the pub or sub connects very late, and the system topo changed quite a bit with new servers added or old servers died, checking peers will help.
+
+*/
+
+func (broker *MessageBroker) FindBroker(c context.Context, request *messaging_pb.FindBrokerRequest) (*messaging_pb.FindBrokerResponse, error) {
+
+ t := &messaging_pb.FindBrokerResponse{}
+ var peers []string
+
+ targetTopicPartition := fmt.Sprintf(TopicPartitionFmt, request.Namespace, request.Topic, request.Parition)
+
+ for _, filer := range broker.option.Filers {
+ err := broker.withFilerClient(filer, func(client filer_pb.SeaweedFilerClient) error {
+ resp, err := client.LocateBroker(context.Background(), &filer_pb.LocateBrokerRequest{
+ Resource: targetTopicPartition,
+ })
+ if err != nil {
+ return err
+ }
+ if resp.Found && len(resp.Resources) > 0 {
+ t.Broker = resp.Resources[0].GrpcAddresses
+ return nil
+ }
+ for _, b := range resp.Resources {
+ peers = append(peers, b.GrpcAddresses)
+ }
+ return nil
+ })
+ if err != nil {
+ return nil, err
+ }
+ }
+
+ t.Broker = PickMember(peers, []byte(targetTopicPartition))
+
+ return t, nil
+
+}
+
+func (broker *MessageBroker) checkFilers() {
+
+ // contact a filer about masters
+ var masters []string
+ found := false
+ for !found {
+ for _, filer := range broker.option.Filers {
+ err := broker.withFilerClient(filer, func(client filer_pb.SeaweedFilerClient) error {
+ resp, err := client.GetFilerConfiguration(context.Background(), &filer_pb.GetFilerConfigurationRequest{})
+ if err != nil {
+ return err
+ }
+ masters = append(masters, resp.Masters...)
+ return nil
+ })
+ if err == nil {
+ found = true
+ break
+ }
+ glog.V(0).Infof("failed to read masters from %+v: %v", broker.option.Filers, err)
+ time.Sleep(time.Second)
+ }
+ }
+ glog.V(0).Infof("received master list: %s", masters)
+
+ // contact each masters for filers
+ var filers []string
+ found = false
+ for !found {
+ for _, master := range masters {
+ err := broker.withMasterClient(master, func(client master_pb.SeaweedClient) error {
+ resp, err := client.ListMasterClients(context.Background(), &master_pb.ListMasterClientsRequest{
+ ClientType: "filer",
+ })
+ if err != nil {
+ return err
+ }
+
+ filers = append(filers, resp.GrpcAddresses...)
+
+ return nil
+ })
+ if err == nil {
+ found = true
+ break
+ }
+ glog.V(0).Infof("failed to list filers: %v", err)
+ time.Sleep(time.Second)
+ }
+ }
+ glog.V(0).Infof("received filer list: %s", filers)
+
+ broker.option.Filers = filers
+
+}
diff --git a/weed/messaging/broker/broker_grpc_server_publish.go b/weed/messaging/broker/broker_grpc_server_publish.go
new file mode 100644
index 000000000..dc11061af
--- /dev/null
+++ b/weed/messaging/broker/broker_grpc_server_publish.go
@@ -0,0 +1,112 @@
+package broker
+
+import (
+ "crypto/md5"
+ "fmt"
+ "io"
+
+ "github.com/golang/protobuf/proto"
+
+ "github.com/chrislusf/seaweedfs/weed/filer2"
+ "github.com/chrislusf/seaweedfs/weed/glog"
+ "github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
+ "github.com/chrislusf/seaweedfs/weed/pb/messaging_pb"
+)
+
+func (broker *MessageBroker) Publish(stream messaging_pb.SeaweedMessaging_PublishServer) error {
+
+ // process initial request
+ in, err := stream.Recv()
+ if err == io.EOF {
+ return nil
+ }
+ if err != nil {
+ return err
+ }
+
+ // TODO look it up
+ topicConfig := &messaging_pb.TopicConfiguration{
+ // IsTransient: true,
+ }
+
+ // send init response
+ initResponse := &messaging_pb.PublishResponse{
+ Config: nil,
+ Redirect: nil,
+ }
+ err = stream.Send(initResponse)
+ if err != nil {
+ return err
+ }
+ if initResponse.Redirect != nil {
+ return nil
+ }
+
+ // get lock
+ tp := TopicPartition{
+ Namespace: in.Init.Namespace,
+ Topic: in.Init.Topic,
+ Partition: in.Init.Partition,
+ }
+
+ tpDir := fmt.Sprintf("%s/%s/%s", filer2.TopicsDir, tp.Namespace, tp.Topic)
+ md5File := fmt.Sprintf("p%02d.md5", tp.Partition)
+ // println("chan data stored under", tpDir, "as", md5File)
+
+ if exists, err := filer_pb.Exists(broker, tpDir, md5File, false); err == nil && exists {
+ return fmt.Errorf("channel is already closed")
+ }
+
+ tl := broker.topicManager.RequestLock(tp, topicConfig, true)
+ defer broker.topicManager.ReleaseLock(tp, true)
+
+ md5hash := md5.New()
+ // process each message
+ for {
+ // println("recv")
+ in, err := stream.Recv()
+ // glog.V(0).Infof("recieved %v err: %v", in, err)
+ if err == io.EOF {
+ return nil
+ }
+ if err != nil {
+ return err
+ }
+
+ if in.Data == nil {
+ continue
+ }
+
+ // fmt.Printf("received: %d : %s\n", len(in.Data.Value), string(in.Data.Value))
+
+ data, err := proto.Marshal(in.Data)
+ if err != nil {
+ glog.Errorf("marshall error: %v\n", err)
+ continue
+ }
+
+ tl.logBuffer.AddToBuffer(in.Data.Key, data)
+
+ if in.Data.IsClose {
+ // println("server received closing")
+ break
+ }
+
+ md5hash.Write(in.Data.Value)
+
+ }
+
+ if err := broker.appendToFile(tpDir+"/"+md5File, topicConfig, md5hash.Sum(nil)); err != nil {
+ glog.V(0).Infof("err writing %s: %v", md5File, err)
+ }
+
+ // fmt.Printf("received md5 %X\n", md5hash.Sum(nil))
+
+ // send the close ack
+ // println("server send ack closing")
+ if err := stream.Send(&messaging_pb.PublishResponse{IsClosed: true}); err != nil {
+ glog.V(0).Infof("err sending close response: %v", err)
+ }
+ return nil
+
+}
diff --git a/weed/messaging/broker/broker_grpc_server_subscribe.go b/weed/messaging/broker/broker_grpc_server_subscribe.go
new file mode 100644
index 000000000..9a7d653b5
--- /dev/null
+++ b/weed/messaging/broker/broker_grpc_server_subscribe.go
@@ -0,0 +1,162 @@
+package broker
+
+import (
+ "fmt"
+ "io"
+ "strings"
+ "time"
+
+ "github.com/golang/protobuf/proto"
+
+ "github.com/chrislusf/seaweedfs/weed/filer2"
+ "github.com/chrislusf/seaweedfs/weed/glog"
+ "github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
+ "github.com/chrislusf/seaweedfs/weed/pb/messaging_pb"
+)
+
+func (broker *MessageBroker) Subscribe(stream messaging_pb.SeaweedMessaging_SubscribeServer) error {
+
+ // process initial request
+ in, err := stream.Recv()
+ if err == io.EOF {
+ return nil
+ }
+ if err != nil {
+ return err
+ }
+
+ var processedTsNs int64
+ var messageCount int64
+ subscriberId := in.Init.SubscriberId
+
+ // TODO look it up
+ topicConfig := &messaging_pb.TopicConfiguration{
+ // IsTransient: true,
+ }
+
+ // get lock
+ tp := TopicPartition{
+ Namespace: in.Init.Namespace,
+ Topic: in.Init.Topic,
+ Partition: in.Init.Partition,
+ }
+ fmt.Printf("+ subscriber %s for %s\n", subscriberId, tp.String())
+ defer func() {
+ fmt.Printf("- subscriber %s for %s %d messages last %v\n", subscriberId, tp.String(), messageCount, time.Unix(0, processedTsNs))
+ }()
+
+ lock := broker.topicManager.RequestLock(tp, topicConfig, false)
+ defer broker.topicManager.ReleaseLock(tp, false)
+
+ isConnected := true
+ go func() {
+ for isConnected {
+ if _, err := stream.Recv(); err != nil {
+ // println("disconnecting connection to", subscriberId, tp.String())
+ isConnected = false
+ lock.cond.Signal()
+ }
+ }
+ }()
+
+ lastReadTime := time.Now()
+ switch in.Init.StartPosition {
+ case messaging_pb.SubscriberMessage_InitMessage_TIMESTAMP:
+ lastReadTime = time.Unix(0, in.Init.TimestampNs)
+ case messaging_pb.SubscriberMessage_InitMessage_LATEST:
+ case messaging_pb.SubscriberMessage_InitMessage_EARLIEST:
+ lastReadTime = time.Unix(0, 0)
+ }
+
+ // how to process each message
+ // an error returned will end the subscription
+ eachMessageFn := func(m *messaging_pb.Message) error {
+ err := stream.Send(&messaging_pb.BrokerMessage{
+ Data: m,
+ })
+ if err != nil {
+ glog.V(0).Infof("=> subscriber %v: %+v", subscriberId, err)
+ }
+ return err
+ }
+
+ eachLogEntryFn := func(logEntry *filer_pb.LogEntry) error {
+ m := &messaging_pb.Message{}
+ if err = proto.Unmarshal(logEntry.Data, m); err != nil {
+ glog.Errorf("unexpected unmarshal messaging_pb.Message: %v", err)
+ return err
+ }
+ // fmt.Printf("sending : %d bytes ts %d\n", len(m.Value), logEntry.TsNs)
+ if err = eachMessageFn(m); err != nil {
+ glog.Errorf("sending %d bytes to %s: %s", len(m.Value), subscriberId, err)
+ return err
+ }
+ if m.IsClose {
+ // println("processed EOF")
+ return io.EOF
+ }
+ processedTsNs = logEntry.TsNs
+ messageCount++
+ return nil
+ }
+
+ if err = broker.readPersistedLogBuffer(&tp, lastReadTime, eachLogEntryFn); err != nil {
+ if err != io.EOF {
+ // println("stopping from persisted logs", err.Error())
+ return err
+ }
+ }
+
+ if processedTsNs != 0 {
+ lastReadTime = time.Unix(0, processedTsNs)
+ }
+
+ // fmt.Printf("subscriber %s read %d on disk log %v\n", subscriberId, messageCount, lastReadTime)
+
+ err = lock.logBuffer.LoopProcessLogData(lastReadTime, func() bool {
+ lock.Mutex.Lock()
+ lock.cond.Wait()
+ lock.Mutex.Unlock()
+ return isConnected
+ }, eachLogEntryFn)
+
+ return err
+
+}
+
+func (broker *MessageBroker) readPersistedLogBuffer(tp *TopicPartition, startTime time.Time, eachLogEntryFn func(logEntry *filer_pb.LogEntry) error) (err error) {
+ startDate := fmt.Sprintf("%04d-%02d-%02d", startTime.Year(), startTime.Month(), startTime.Day())
+ startHourMinute := fmt.Sprintf("%02d-%02d.segment", startTime.Hour(), startTime.Minute())
+
+ sizeBuf := make([]byte, 4)
+ startTsNs := startTime.UnixNano()
+
+ topicDir := genTopicDir(tp.Namespace, tp.Topic)
+ partitionSuffix := fmt.Sprintf(".part%02d", tp.Partition)
+
+ return filer_pb.List(broker, topicDir, "", func(dayEntry *filer_pb.Entry, isLast bool) error {
+ dayDir := fmt.Sprintf("%s/%s", topicDir, dayEntry.Name)
+ return filer_pb.List(broker, dayDir, "", func(hourMinuteEntry *filer_pb.Entry, isLast bool) error {
+ if dayEntry.Name == startDate {
+ if strings.Compare(hourMinuteEntry.Name, startHourMinute) < 0 {
+ return nil
+ }
+ }
+ if !strings.HasSuffix(hourMinuteEntry.Name, partitionSuffix) {
+ return nil
+ }
+ // println("partition", tp.Partition, "processing", dayDir, "/", hourMinuteEntry.Name)
+ chunkedFileReader := filer2.NewChunkStreamReader(broker, hourMinuteEntry.Chunks)
+ defer chunkedFileReader.Close()
+ if _, err := filer2.ReadEachLogEntry(chunkedFileReader, sizeBuf, startTsNs, eachLogEntryFn); err != nil {
+ chunkedFileReader.Close()
+ if err == io.EOF {
+ return err
+ }
+ return fmt.Errorf("reading %s/%s: %v", dayDir, hourMinuteEntry.Name, err)
+ }
+ return nil
+ }, "", false, 24*60)
+ }, startDate, true, 366)
+
+}
diff --git a/weed/messaging/broker/broker_server.go b/weed/messaging/broker/broker_server.go
new file mode 100644
index 000000000..0c04d2841
--- /dev/null
+++ b/weed/messaging/broker/broker_server.go
@@ -0,0 +1,112 @@
+package broker
+
+import (
+ "context"
+ "time"
+
+ "google.golang.org/grpc"
+
+ "github.com/chrislusf/seaweedfs/weed/glog"
+ "github.com/chrislusf/seaweedfs/weed/pb"
+ "github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
+ "github.com/chrislusf/seaweedfs/weed/pb/master_pb"
+)
+
+type MessageBrokerOption struct {
+ Filers []string
+ DefaultReplication string
+ MaxMB int
+ Ip string
+ Port int
+ Cipher bool
+}
+
+type MessageBroker struct {
+ option *MessageBrokerOption
+ grpcDialOption grpc.DialOption
+ topicManager *TopicManager
+}
+
+func NewMessageBroker(option *MessageBrokerOption, grpcDialOption grpc.DialOption) (messageBroker *MessageBroker, err error) {
+
+ messageBroker = &MessageBroker{
+ option: option,
+ grpcDialOption: grpcDialOption,
+ }
+
+ messageBroker.topicManager = NewTopicManager(messageBroker)
+
+ messageBroker.checkFilers()
+
+ go messageBroker.keepConnectedToOneFiler()
+
+ return messageBroker, nil
+}
+
+func (broker *MessageBroker) keepConnectedToOneFiler() {
+
+ for {
+ for _, filer := range broker.option.Filers {
+ broker.withFilerClient(filer, func(client filer_pb.SeaweedFilerClient) error {
+ stream, err := client.KeepConnected(context.Background())
+ if err != nil {
+ glog.V(0).Infof("%s:%d failed to keep connected to %s: %v", broker.option.Ip, broker.option.Port, filer, err)
+ return err
+ }
+
+ initRequest := &filer_pb.KeepConnectedRequest{
+ Name: broker.option.Ip,
+ GrpcPort: uint32(broker.option.Port),
+ }
+ for _, tp := range broker.topicManager.ListTopicPartitions() {
+ initRequest.Resources = append(initRequest.Resources, tp.String())
+ }
+ if err := stream.Send(&filer_pb.KeepConnectedRequest{
+ Name: broker.option.Ip,
+ GrpcPort: uint32(broker.option.Port),
+ }); err != nil {
+ glog.V(0).Infof("broker %s:%d failed to init at %s: %v", broker.option.Ip, broker.option.Port, filer, err)
+ return err
+ }
+
+ // TODO send events of adding/removing topics
+
+ glog.V(0).Infof("conntected with filer: %v", filer)
+ for {
+ if err := stream.Send(&filer_pb.KeepConnectedRequest{
+ Name: broker.option.Ip,
+ GrpcPort: uint32(broker.option.Port),
+ }); err != nil {
+ glog.V(0).Infof("%s:%d failed to sendto %s: %v", broker.option.Ip, broker.option.Port, filer, err)
+ return err
+ }
+ // println("send heartbeat")
+ if _, err := stream.Recv(); err != nil {
+ glog.V(0).Infof("%s:%d failed to receive from %s: %v", broker.option.Ip, broker.option.Port, filer, err)
+ return err
+ }
+ // println("received reply")
+ time.Sleep(11 * time.Second)
+ // println("woke up")
+ }
+ return nil
+ })
+ time.Sleep(3 * time.Second)
+ }
+ }
+
+}
+
+func (broker *MessageBroker) withFilerClient(filer string, fn func(filer_pb.SeaweedFilerClient) error) error {
+
+ return pb.WithFilerClient(filer, broker.grpcDialOption, fn)
+
+}
+
+func (broker *MessageBroker) withMasterClient(master string, fn func(client master_pb.SeaweedClient) error) error {
+
+ return pb.WithMasterClient(master, broker.grpcDialOption, func(client master_pb.SeaweedClient) error {
+ return fn(client)
+ })
+
+}
diff --git a/weed/messaging/broker/consistent_distribution.go b/weed/messaging/broker/consistent_distribution.go
new file mode 100644
index 000000000..465a2a8f2
--- /dev/null
+++ b/weed/messaging/broker/consistent_distribution.go
@@ -0,0 +1,38 @@
+package broker
+
+import (
+ "github.com/buraksezer/consistent"
+ "github.com/cespare/xxhash"
+)
+
+type Member string
+
+func (m Member) String() string {
+ return string(m)
+}
+
+type hasher struct{}
+
+func (h hasher) Sum64(data []byte) uint64 {
+ return xxhash.Sum64(data)
+}
+
+func PickMember(members []string, key []byte) string {
+ cfg := consistent.Config{
+ PartitionCount: 9791,
+ ReplicationFactor: 2,
+ Load: 1.25,
+ Hasher: hasher{},
+ }
+
+ cmembers := []consistent.Member{}
+ for _, m := range members {
+ cmembers = append(cmembers, Member(m))
+ }
+
+ c := consistent.New(cmembers, cfg)
+
+ m := c.LocateKey(key)
+
+ return m.String()
+}
diff --git a/weed/messaging/broker/consistent_distribution_test.go b/weed/messaging/broker/consistent_distribution_test.go
new file mode 100644
index 000000000..f58fe4e0e
--- /dev/null
+++ b/weed/messaging/broker/consistent_distribution_test.go
@@ -0,0 +1,32 @@
+package broker
+
+import (
+ "fmt"
+ "testing"
+)
+
+func TestPickMember(t *testing.T) {
+
+ servers := []string{
+ "s1:port",
+ "s2:port",
+ "s3:port",
+ "s5:port",
+ "s4:port",
+ }
+
+ total := 1000
+
+ distribution := make(map[string]int)
+ for i := 0; i < total; i++ {
+ tp := fmt.Sprintf("tp:%2d", i)
+ m := PickMember(servers, []byte(tp))
+ // println(tp, "=>", m)
+ distribution[m]++
+ }
+
+ for member, count := range distribution {
+ fmt.Printf("member: %s, key count: %d load=%.2f\n", member, count, float64(count*100)/float64(total/len(servers)))
+ }
+
+}
diff --git a/weed/messaging/broker/topic_manager.go b/weed/messaging/broker/topic_manager.go
new file mode 100644
index 000000000..b563fffa1
--- /dev/null
+++ b/weed/messaging/broker/topic_manager.go
@@ -0,0 +1,123 @@
+package broker
+
+import (
+ "fmt"
+ "sync"
+ "time"
+
+ "github.com/chrislusf/seaweedfs/weed/filer2"
+ "github.com/chrislusf/seaweedfs/weed/glog"
+ "github.com/chrislusf/seaweedfs/weed/pb/messaging_pb"
+ "github.com/chrislusf/seaweedfs/weed/util/log_buffer"
+)
+
+type TopicPartition struct {
+ Namespace string
+ Topic string
+ Partition int32
+}
+
+const (
+ TopicPartitionFmt = "%s/%s_%02d"
+)
+
+func (tp *TopicPartition) String() string {
+ return fmt.Sprintf(TopicPartitionFmt, tp.Namespace, tp.Topic, tp.Partition)
+}
+
+type TopicControl struct {
+ sync.Mutex
+ cond *sync.Cond
+ subscriberCount int
+ publisherCount int
+ logBuffer *log_buffer.LogBuffer
+}
+
+type TopicManager struct {
+ sync.Mutex
+ topicControls map[TopicPartition]*TopicControl
+ broker *MessageBroker
+}
+
+func NewTopicManager(messageBroker *MessageBroker) *TopicManager {
+ return &TopicManager{
+ topicControls: make(map[TopicPartition]*TopicControl),
+ broker: messageBroker,
+ }
+}
+
+func (tm *TopicManager) buildLogBuffer(tl *TopicControl, tp TopicPartition, topicConfig *messaging_pb.TopicConfiguration) *log_buffer.LogBuffer {
+
+ flushFn := func(startTime, stopTime time.Time, buf []byte) {
+
+ if topicConfig.IsTransient {
+ // return
+ }
+
+ // fmt.Printf("flushing with topic config %+v\n", topicConfig)
+
+ targetFile := fmt.Sprintf(
+ "%s/%s/%s/%04d-%02d-%02d/%02d-%02d.part%02d",
+ filer2.TopicsDir, tp.Namespace, tp.Topic,
+ startTime.Year(), startTime.Month(), startTime.Day(), startTime.Hour(), startTime.Minute(),
+ tp.Partition,
+ )
+
+ if err := tm.broker.appendToFile(targetFile, topicConfig, buf); err != nil {
+ glog.V(0).Infof("log write failed %s: %v", targetFile, err)
+ }
+ }
+ logBuffer := log_buffer.NewLogBuffer(time.Minute, flushFn, func() {
+ tl.cond.Broadcast()
+ })
+
+ return logBuffer
+}
+
+func (tm *TopicManager) RequestLock(partition TopicPartition, topicConfig *messaging_pb.TopicConfiguration, isPublisher bool) *TopicControl {
+ tm.Lock()
+ defer tm.Unlock()
+
+ tc, found := tm.topicControls[partition]
+ if !found {
+ tc = &TopicControl{}
+ tc.cond = sync.NewCond(&tc.Mutex)
+ tm.topicControls[partition] = tc
+ tc.logBuffer = tm.buildLogBuffer(tc, partition, topicConfig)
+ }
+ if isPublisher {
+ tc.publisherCount++
+ } else {
+ tc.subscriberCount++
+ }
+ return tc
+}
+
+func (tm *TopicManager) ReleaseLock(partition TopicPartition, isPublisher bool) {
+ tm.Lock()
+ defer tm.Unlock()
+
+ lock, found := tm.topicControls[partition]
+ if !found {
+ return
+ }
+ if isPublisher {
+ lock.publisherCount--
+ } else {
+ lock.subscriberCount--
+ }
+ if lock.subscriberCount <= 0 && lock.publisherCount <= 0 {
+ delete(tm.topicControls, partition)
+ lock.logBuffer.Shutdown()
+ }
+}
+
+func (tm *TopicManager) ListTopicPartitions() (tps []TopicPartition) {
+ tm.Lock()
+ defer tm.Unlock()
+
+ for k := range tm.topicControls {
+ tps = append(tps, k)
+ }
+ return
+}
diff --git a/weed/messaging/msgclient/chan_config.go b/weed/messaging/msgclient/chan_config.go
new file mode 100644
index 000000000..a75678815
--- /dev/null
+++ b/weed/messaging/msgclient/chan_config.go
@@ -0,0 +1,5 @@
+package msgclient
+
+func (mc *MessagingClient) DeleteChannel(chanName string) error {
+ return mc.DeleteTopic("chan", chanName)
+}
diff --git a/weed/messaging/msgclient/chan_pub.go b/weed/messaging/msgclient/chan_pub.go
new file mode 100644
index 000000000..9bc88f7c0
--- /dev/null
+++ b/weed/messaging/msgclient/chan_pub.go
@@ -0,0 +1,76 @@
+package msgclient
+
+import (
+ "crypto/md5"
+ "hash"
+ "io"
+ "log"
+
+ "google.golang.org/grpc"
+
+ "github.com/chrislusf/seaweedfs/weed/messaging/broker"
+ "github.com/chrislusf/seaweedfs/weed/pb/messaging_pb"
+)
+
+type PubChannel struct {
+ client messaging_pb.SeaweedMessaging_PublishClient
+ grpcConnection *grpc.ClientConn
+ md5hash hash.Hash
+}
+
+func (mc *MessagingClient) NewPubChannel(chanName string) (*PubChannel, error) {
+ tp := broker.TopicPartition{
+ Namespace: "chan",
+ Topic: chanName,
+ Partition: 0,
+ }
+ grpcConnection, err := mc.findBroker(tp)
+ if err != nil {
+ return nil, err
+ }
+ pc, err := setupPublisherClient(grpcConnection, tp)
+ if err != nil {
+ return nil, err
+ }
+ return &PubChannel{
+ client: pc,
+ grpcConnection: grpcConnection,
+ md5hash: md5.New(),
+ }, nil
+}
+
+func (pc *PubChannel) Publish(m []byte) error {
+ err := pc.client.Send(&messaging_pb.PublishRequest{
+ Data: &messaging_pb.Message{
+ Value: m,
+ },
+ })
+ if err == nil {
+ pc.md5hash.Write(m)
+ }
+ return err
+}
+func (pc *PubChannel) Close() error {
+
+ // println("send closing")
+ if err := pc.client.Send(&messaging_pb.PublishRequest{
+ Data: &messaging_pb.Message{
+ IsClose: true,
+ },
+ }); err != nil {
+ log.Printf("err send close: %v", err)
+ }
+ // println("receive closing")
+ if _, err := pc.client.Recv(); err != nil && err != io.EOF {
+ log.Printf("err receive close: %v", err)
+ }
+ // println("close connection")
+ if err := pc.grpcConnection.Close(); err != nil {
+ log.Printf("err connection close: %v", err)
+ }
+ return nil
+}
+
+func (pc *PubChannel) Md5() []byte {
+ return pc.md5hash.Sum(nil)
+}
diff --git a/weed/messaging/msgclient/chan_sub.go b/weed/messaging/msgclient/chan_sub.go
new file mode 100644
index 000000000..213ff4666
--- /dev/null
+++ b/weed/messaging/msgclient/chan_sub.go
@@ -0,0 +1,85 @@
+package msgclient
+
+import (
+ "context"
+ "crypto/md5"
+ "hash"
+ "io"
+ "log"
+ "time"
+
+ "github.com/chrislusf/seaweedfs/weed/messaging/broker"
+ "github.com/chrislusf/seaweedfs/weed/pb/messaging_pb"
+)
+
+type SubChannel struct {
+ ch chan []byte
+ stream messaging_pb.SeaweedMessaging_SubscribeClient
+ md5hash hash.Hash
+ cancel context.CancelFunc
+}
+
+func (mc *MessagingClient) NewSubChannel(subscriberId, chanName string) (*SubChannel, error) {
+ tp := broker.TopicPartition{
+ Namespace: "chan",
+ Topic: chanName,
+ Partition: 0,
+ }
+ grpcConnection, err := mc.findBroker(tp)
+ if err != nil {
+ return nil, err
+ }
+ ctx, cancel := context.WithCancel(context.Background())
+ sc, err := setupSubscriberClient(ctx, grpcConnection, tp, subscriberId, time.Unix(0, 0))
+ if err != nil {
+ return nil, err
+ }
+
+ t := &SubChannel{
+ ch: make(chan []byte),
+ stream: sc,
+ md5hash: md5.New(),
+ cancel: cancel,
+ }
+
+ go func() {
+ for {
+ resp, subErr := t.stream.Recv()
+ if subErr == io.EOF {
+ return
+ }
+ if subErr != nil {
+ log.Printf("fail to receive from netchan %s: %v", chanName, subErr)
+ return
+ }
+ if resp.Data == nil {
+ // this could be heartbeat from broker
+ continue
+ }
+ if resp.Data.IsClose {
+ t.stream.Send(&messaging_pb.SubscriberMessage{
+ IsClose: true,
+ })
+ close(t.ch)
+ cancel()
+ return
+ }
+ t.ch <- resp.Data.Value
+ t.md5hash.Write(resp.Data.Value)
+ }
+ }()
+
+ return t, nil
+}
+
+func (sc *SubChannel) Channel() chan []byte {
+ return sc.ch
+}
+
+func (sc *SubChannel) Md5() []byte {
+ return sc.md5hash.Sum(nil)
+}
+
+func (sc *SubChannel) Cancel() {
+ sc.cancel()
+}
diff --git a/weed/messaging/msgclient/client.go b/weed/messaging/msgclient/client.go
new file mode 100644
index 000000000..4d7ef2b8e
--- /dev/null
+++ b/weed/messaging/msgclient/client.go
@@ -0,0 +1,55 @@
+package msgclient
+
+import (
+ "context"
+ "fmt"
+ "log"
+
+ "google.golang.org/grpc"
+
+ "github.com/chrislusf/seaweedfs/weed/messaging/broker"
+ "github.com/chrislusf/seaweedfs/weed/pb"
+ "github.com/chrislusf/seaweedfs/weed/pb/messaging_pb"
+ "github.com/chrislusf/seaweedfs/weed/security"
+ "github.com/chrislusf/seaweedfs/weed/util"
+)
+
+type MessagingClient struct {
+ bootstrapBrokers []string
+ grpcConnections map[broker.TopicPartition]*grpc.ClientConn
+ grpcDialOption grpc.DialOption
+}
+
+func NewMessagingClient(bootstrapBrokers ...string) *MessagingClient {
+ return &MessagingClient{
+ bootstrapBrokers: bootstrapBrokers,
+ grpcConnections: make(map[broker.TopicPartition]*grpc.ClientConn),
+ grpcDialOption: security.LoadClientTLS(util.GetViper(), "grpc.msg_client"),
+ }
+}
+
+func (mc *MessagingClient) findBroker(tp broker.TopicPartition) (*grpc.ClientConn, error) {
+
+ for _, broker := range mc.bootstrapBrokers {
+ grpcConnection, err := pb.GrpcDial(context.Background(), broker, mc.grpcDialOption)
+ if err != nil {
+ log.Printf("dial broker %s: %v", broker, err)
+ continue
+ }
+ defer grpcConnection.Close()
+
+ resp, err := messaging_pb.NewSeaweedMessagingClient(grpcConnection).FindBroker(context.Background(),
+ &messaging_pb.FindBrokerRequest{
+ Namespace: tp.Namespace,
+ Topic: tp.Topic,
+ Parition: tp.Partition,
+ })
+ if err != nil {
+ return nil, err
+ }
+
+ targetBroker := resp.Broker
+ return pb.GrpcDial(context.Background(), targetBroker, mc.grpcDialOption)
+ }
+ return nil, fmt.Errorf("no broker found for %+v", tp)
+}
diff --git a/weed/messaging/msgclient/config.go b/weed/messaging/msgclient/config.go
new file mode 100644
index 000000000..2b9eba1a8
--- /dev/null
+++ b/weed/messaging/msgclient/config.go
@@ -0,0 +1,63 @@
+package msgclient
+
+import (
+ "context"
+ "log"
+
+ "github.com/chrislusf/seaweedfs/weed/messaging/broker"
+ "github.com/chrislusf/seaweedfs/weed/pb"
+ "github.com/chrislusf/seaweedfs/weed/pb/messaging_pb"
+)
+
+func (mc *MessagingClient) configureTopic(tp broker.TopicPartition) error {
+
+ return mc.withAnyBroker(func(client messaging_pb.SeaweedMessagingClient) error {
+ _, err := client.ConfigureTopic(context.Background(),
+ &messaging_pb.ConfigureTopicRequest{
+ Namespace: tp.Namespace,
+ Topic: tp.Topic,
+ Configuration: &messaging_pb.TopicConfiguration{
+ PartitionCount: 0,
+ Collection: "",
+ Replication: "",
+ IsTransient: false,
+ Partitoning: 0,
+ },
+ })
+ return err
+ })
+
+}
+
+func (mc *MessagingClient) DeleteTopic(namespace, topic string) error {
+
+ return mc.withAnyBroker(func(client messaging_pb.SeaweedMessagingClient) error {
+ _, err := client.DeleteTopic(context.Background(),
+ &messaging_pb.DeleteTopicRequest{
+ Namespace: namespace,
+ Topic: topic,
+ })
+ return err
+ })
+}
+
+func (mc *MessagingClient) withAnyBroker(fn func(client messaging_pb.SeaweedMessagingClient) error) error {
+
+ var lastErr error
+ for _, broker := range mc.bootstrapBrokers {
+ grpcConnection, err := pb.GrpcDial(context.Background(), broker, mc.grpcDialOption)
+ if err != nil {
+ log.Printf("dial broker %s: %v", broker, err)
+ continue
+ }
+ defer grpcConnection.Close()
+
+ err = fn(messaging_pb.NewSeaweedMessagingClient(grpcConnection))
+ if err == nil {
+ return nil
+ }
+ lastErr = err
+ }
+
+ return lastErr
+}
diff --git a/weed/messaging/msgclient/publisher.go b/weed/messaging/msgclient/publisher.go
new file mode 100644
index 000000000..1aa483ff8
--- /dev/null
+++ b/weed/messaging/msgclient/publisher.go
@@ -0,0 +1,118 @@
+package msgclient
+
+import (
+ "context"
+
+ "github.com/OneOfOne/xxhash"
+ "google.golang.org/grpc"
+
+ "github.com/chrislusf/seaweedfs/weed/messaging/broker"
+ "github.com/chrislusf/seaweedfs/weed/pb/messaging_pb"
+)
+
+type Publisher struct {
+ publishClients []messaging_pb.SeaweedMessaging_PublishClient
+ topicConfiguration *messaging_pb.TopicConfiguration
+ messageCount uint64
+ publisherId string
+}
+
+func (mc *MessagingClient) NewPublisher(publisherId, namespace, topic string) (*Publisher, error) {
+ // read topic configuration
+ topicConfiguration := &messaging_pb.TopicConfiguration{
+ PartitionCount: 4,
+ }
+ publishClients := make([]messaging_pb.SeaweedMessaging_PublishClient, topicConfiguration.PartitionCount)
+ for i := 0; i < int(topicConfiguration.PartitionCount); i++ {
+ tp := broker.TopicPartition{
+ Namespace: namespace,
+ Topic: topic,
+ Partition: int32(i),
+ }
+ grpcClientConn, err := mc.findBroker(tp)
+ if err != nil {
+ return nil, err
+ }
+ client, err := setupPublisherClient(grpcClientConn, tp)
+ if err != nil {
+ return nil, err
+ }
+ publishClients[i] = client
+ }
+ return &Publisher{
+ publishClients: publishClients,
+ topicConfiguration: topicConfiguration,
+ }, nil
+}
+
+func setupPublisherClient(grpcConnection *grpc.ClientConn, tp broker.TopicPartition) (messaging_pb.SeaweedMessaging_PublishClient, error) {
+
+ stream, err := messaging_pb.NewSeaweedMessagingClient(grpcConnection).Publish(context.Background())
+ if err != nil {
+ return nil, err
+ }
+
+ // send init message
+ err = stream.Send(&messaging_pb.PublishRequest{
+ Init: &messaging_pb.PublishRequest_InitMessage{
+ Namespace: tp.Namespace,
+ Topic: tp.Topic,
+ Partition: tp.Partition,
+ },
+ })
+ if err != nil {
+ return nil, err
+ }
+
+ // process init response
+ initResponse, err := stream.Recv()
+ if err != nil {
+ return nil, err
+ }
+ if initResponse.Redirect != nil {
+ // TODO follow redirection
+ }
+ if initResponse.Config != nil {
+ }
+
+ // setup looks for control messages
+ doneChan := make(chan error, 1)
+ go func() {
+ for {
+ in, err := stream.Recv()
+ if err != nil {
+ doneChan <- err
+ return
+ }
+ if in.Redirect != nil {
+ }
+ if in.Config != nil {
+ }
+ }
+ }()
+
+ return stream, nil
+
+}
+
+func (p *Publisher) Publish(m *messaging_pb.Message) error {
+ hashValue := p.messageCount
+ p.messageCount++
+ if p.topicConfiguration.Partitoning == messaging_pb.TopicConfiguration_NonNullKeyHash {
+ if m.Key != nil {
+ hashValue = xxhash.Checksum64(m.Key)
+ }
+ } else if p.topicConfiguration.Partitoning == messaging_pb.TopicConfiguration_KeyHash {
+ hashValue = xxhash.Checksum64(m.Key)
+ } else {
+ // round robin
+ }
+
+ idx := int(hashValue) % len(p.publishClients)
+ if idx < 0 {
+ idx += len(p.publishClients)
+ }
+ return p.publishClients[idx].Send(&messaging_pb.PublishRequest{
+ Data: m,
+ })
+}
diff --git a/weed/messaging/msgclient/subscriber.go b/weed/messaging/msgclient/subscriber.go
new file mode 100644
index 000000000..6c7dc1ab7
--- /dev/null
+++ b/weed/messaging/msgclient/subscriber.go
@@ -0,0 +1,120 @@
+package msgclient
+
+import (
+ "context"
+ "io"
+ "sync"
+ "time"
+
+ "github.com/chrislusf/seaweedfs/weed/messaging/broker"
+ "github.com/chrislusf/seaweedfs/weed/pb/messaging_pb"
+ "google.golang.org/grpc"
+)
+
+type Subscriber struct {
+ subscriberClients []messaging_pb.SeaweedMessaging_SubscribeClient
+ subscriberCancels []context.CancelFunc
+ subscriberId string
+}
+
+func (mc *MessagingClient) NewSubscriber(subscriberId, namespace, topic string, partitionId int, startTime time.Time) (*Subscriber, error) {
+ // read topic configuration
+ topicConfiguration := &messaging_pb.TopicConfiguration{
+ PartitionCount: 4,
+ }
+ subscriberClients := make([]messaging_pb.SeaweedMessaging_SubscribeClient, topicConfiguration.PartitionCount)
+ subscriberCancels := make([]context.CancelFunc, topicConfiguration.PartitionCount)
+
+ for i := 0; i < int(topicConfiguration.PartitionCount); i++ {
+ if partitionId >= 0 && i != partitionId {
+ continue
+ }
+ tp := broker.TopicPartition{
+ Namespace: namespace,
+ Topic: topic,
+ Partition: int32(i),
+ }
+ grpcClientConn, err := mc.findBroker(tp)
+ if err != nil {
+ return nil, err
+ }
+ ctx, cancel := context.WithCancel(context.Background())
+ client, err := setupSubscriberClient(ctx, grpcClientConn, tp, subscriberId, startTime)
+ if err != nil {
+ return nil, err
+ }
+ subscriberClients[i] = client
+ subscriberCancels[i] = cancel
+ }
+
+ return &Subscriber{
+ subscriberClients: subscriberClients,
+ subscriberCancels: subscriberCancels,
+ subscriberId: subscriberId,
+ }, nil
+}
+
+func setupSubscriberClient(ctx context.Context, grpcConnection *grpc.ClientConn, tp broker.TopicPartition, subscriberId string, startTime time.Time) (stream messaging_pb.SeaweedMessaging_SubscribeClient, err error) {
+ stream, err = messaging_pb.NewSeaweedMessagingClient(grpcConnection).Subscribe(ctx)
+ if err != nil {
+ return
+ }
+
+ // send init message
+ err = stream.Send(&messaging_pb.SubscriberMessage{
+ Init: &messaging_pb.SubscriberMessage_InitMessage{
+ Namespace: tp.Namespace,
+ Topic: tp.Topic,
+ Partition: tp.Partition,
+ StartPosition: messaging_pb.SubscriberMessage_InitMessage_TIMESTAMP,
+ TimestampNs: startTime.UnixNano(),
+ SubscriberId: subscriberId,
+ },
+ })
+ if err != nil {
+ return
+ }
+
+ return stream, nil
+}
+
+func doSubscribe(subscriberClient messaging_pb.SeaweedMessaging_SubscribeClient, processFn func(m *messaging_pb.Message)) error {
+ for {
+ resp, listenErr := subscriberClient.Recv()
+ if listenErr == io.EOF {
+ return nil
+ }
+ if listenErr != nil {
+ println(listenErr.Error())
+ return listenErr
+ }
+ if resp.Data == nil {
+ // this could be heartbeat from broker
+ continue
+ }
+ processFn(resp.Data)
+ }
+}
+
+// Subscribe starts goroutines to process the messages
+func (s *Subscriber) Subscribe(processFn func(m *messaging_pb.Message)) {
+ var wg sync.WaitGroup
+ for i := 0; i < len(s.subscriberClients); i++ {
+ if s.subscriberClients[i] != nil {
+ wg.Add(1)
+ go func(subscriberClient messaging_pb.SeaweedMessaging_SubscribeClient) {
+ defer wg.Done()
+ doSubscribe(subscriberClient, processFn)
+ }(s.subscriberClients[i])
+ }
+ }
+ wg.Wait()
+}
+
+func (s *Subscriber) Shutdown() {
+ for i := 0; i < len(s.subscriberClients); i++ {
+ if s.subscriberCancels[i] != nil {
+ s.subscriberCancels[i]()
+ }
+ }
+}
diff --git a/weed/notification/aws_sqs/aws_sqs_pub.go b/weed/notification/aws_sqs/aws_sqs_pub.go
index c1af7f27a..d881049dd 100644
--- a/weed/notification/aws_sqs/aws_sqs_pub.go
+++ b/weed/notification/aws_sqs/aws_sqs_pub.go
@@ -27,24 +27,24 @@ func (k *AwsSqsPub) GetName() string {
return "aws_sqs"
}
-func (k *AwsSqsPub) Initialize(configuration util.Configuration) (err error) {
- glog.V(0).Infof("filer.notification.aws_sqs.region: %v", configuration.GetString("region"))
- glog.V(0).Infof("filer.notification.aws_sqs.sqs_queue_name: %v", configuration.GetString("sqs_queue_name"))
+func (k *AwsSqsPub) Initialize(configuration util.Configuration, prefix string) (err error) {
+ glog.V(0).Infof("filer.notification.aws_sqs.region: %v", configuration.GetString(prefix+"region"))
+ glog.V(0).Infof("filer.notification.aws_sqs.sqs_queue_name: %v", configuration.GetString(prefix+"sqs_queue_name"))
return k.initialize(
- configuration.GetString("aws_access_key_id"),
- configuration.GetString("aws_secret_access_key"),
- configuration.GetString("region"),
- configuration.GetString("sqs_queue_name"),
+ configuration.GetString(prefix+"aws_access_key_id"),
+ configuration.GetString(prefix+"aws_secret_access_key"),
+ configuration.GetString(prefix+"region"),
+ configuration.GetString(prefix+"sqs_queue_name"),
)
}
-func (k *AwsSqsPub) initialize(awsAccessKeyId, aswSecretAccessKey, region, queueName string) (err error) {
+func (k *AwsSqsPub) initialize(awsAccessKeyId, awsSecretAccessKey, region, queueName string) (err error) {
config := &aws.Config{
Region: aws.String(region),
}
- if awsAccessKeyId != "" && aswSecretAccessKey != "" {
- config.Credentials = credentials.NewStaticCredentials(awsAccessKeyId, aswSecretAccessKey, "")
+ if awsAccessKeyId != "" && awsSecretAccessKey != "" {
+ config.Credentials = credentials.NewStaticCredentials(awsAccessKeyId, awsSecretAccessKey, "")
}
sess, err := session.NewSession(config)
diff --git a/weed/notification/configuration.go b/weed/notification/configuration.go
index 7f8765cc3..36211692c 100644
--- a/weed/notification/configuration.go
+++ b/weed/notification/configuration.go
@@ -11,7 +11,7 @@ type MessageQueue interface {
// GetName gets the name to locate the configuration in filer.toml file
GetName() string
// Initialize initializes the file store
- Initialize(configuration util.Configuration) error
+ Initialize(configuration util.Configuration, prefix string) error
SendMessage(key string, message proto.Message) error
}
@@ -21,7 +21,7 @@ var (
Queue MessageQueue
)
-func LoadConfiguration(config *viper.Viper) {
+func LoadConfiguration(config *viper.Viper, prefix string) {
if config == nil {
return
@@ -30,9 +30,8 @@ func LoadConfiguration(config *viper.Viper) {
validateOneEnabledQueue(config)
for _, queue := range MessageQueues {
- if config.GetBool(queue.GetName() + ".enabled") {
- viperSub := config.Sub(queue.GetName())
- if err := queue.Initialize(viperSub); err != nil {
+ if config.GetBool(prefix + queue.GetName() + ".enabled") {
+ if err := queue.Initialize(config, prefix+queue.GetName()+"."); err != nil {
glog.Fatalf("Failed to initialize notification for %s: %+v",
queue.GetName(), err)
}
diff --git a/weed/notification/gocdk_pub_sub/gocdk_pub_sub.go b/weed/notification/gocdk_pub_sub/gocdk_pub_sub.go
new file mode 100644
index 000000000..1ae102509
--- /dev/null
+++ b/weed/notification/gocdk_pub_sub/gocdk_pub_sub.go
@@ -0,0 +1,71 @@
+// Package gocdk_pub_sub supports the Go CDK (Cloud Development Kit) PubSub API,
+// which in turn supports many providers, including Amazon SNS/SQS, Azure Service Bus,
+// Google Cloud PubSub, and RabbitMQ.
+//
+// In the config, select a provider and topic using a URL. See
+// https://godoc.org/gocloud.dev/pubsub and its sub-packages for details.
+//
+// The Go CDK PubSub API does not support administrative operations like topic
+// creation. Create the topic using a UI, CLI or provider-specific API before running
+// weed.
+//
+// The Go CDK obtains credentials via environment variables and other
+// provider-specific default mechanisms. See the provider's documentation for
+// details.
+package gocdk_pub_sub
+
+import (
+ "context"
+ "fmt"
+
+ "github.com/golang/protobuf/proto"
+ "gocloud.dev/pubsub"
+ _ "gocloud.dev/pubsub/awssnssqs"
+
+ "github.com/chrislusf/seaweedfs/weed/glog"
+ "github.com/chrislusf/seaweedfs/weed/notification"
+ "github.com/chrislusf/seaweedfs/weed/util"
+ // _ "gocloud.dev/pubsub/azuresb"
+ _ "gocloud.dev/pubsub/gcppubsub"
+ _ "gocloud.dev/pubsub/natspubsub"
+ _ "gocloud.dev/pubsub/rabbitpubsub"
+)
+
+func init() {
+ notification.MessageQueues = append(notification.MessageQueues, &GoCDKPubSub{})
+}
+
+type GoCDKPubSub struct {
+ topicURL string
+ topic *pubsub.Topic
+}
+
+func (k *GoCDKPubSub) GetName() string {
+ return "gocdk_pub_sub"
+}
+
+func (k *GoCDKPubSub) Initialize(configuration util.Configuration, prefix string) error {
+ k.topicURL = configuration.GetString(prefix + "topic_url")
+ glog.V(0).Infof("notification.gocdk_pub_sub.topic_url: %v", k.topicURL)
+ topic, err := pubsub.OpenTopic(context.Background(), k.topicURL)
+ if err != nil {
+ glog.Fatalf("Failed to open topic: %v", err)
+ }
+ k.topic = topic
+ return nil
+}
+
+func (k *GoCDKPubSub) SendMessage(key string, message proto.Message) error {
+ bytes, err := proto.Marshal(message)
+ if err != nil {
+ return err
+ }
+ err = k.topic.Send(context.Background(), &pubsub.Message{
+ Body: bytes,
+ Metadata: map[string]string{"key": key},
+ })
+ if err != nil {
+ return fmt.Errorf("send message via Go CDK pubsub %s: %v", k.topicURL, err)
+ }
+ return nil
+}
diff --git a/weed/notification/google_pub_sub/google_pub_sub.go b/weed/notification/google_pub_sub/google_pub_sub.go
index 7b26bfe38..363a86eb6 100644
--- a/weed/notification/google_pub_sub/google_pub_sub.go
+++ b/weed/notification/google_pub_sub/google_pub_sub.go
@@ -25,13 +25,13 @@ func (k *GooglePubSub) GetName() string {
return "google_pub_sub"
}
-func (k *GooglePubSub) Initialize(configuration util.Configuration) (err error) {
- glog.V(0).Infof("notification.google_pub_sub.project_id: %v", configuration.GetString("project_id"))
- glog.V(0).Infof("notification.google_pub_sub.topic: %v", configuration.GetString("topic"))
+func (k *GooglePubSub) Initialize(configuration util.Configuration, prefix string) (err error) {
+ glog.V(0).Infof("notification.google_pub_sub.project_id: %v", configuration.GetString(prefix+"project_id"))
+ glog.V(0).Infof("notification.google_pub_sub.topic: %v", configuration.GetString(prefix+"topic"))
return k.initialize(
- configuration.GetString("google_application_credentials"),
- configuration.GetString("project_id"),
- configuration.GetString("topic"),
+ configuration.GetString(prefix+"google_application_credentials"),
+ configuration.GetString(prefix+"project_id"),
+ configuration.GetString(prefix+"topic"),
)
}
diff --git a/weed/notification/kafka/kafka_queue.go b/weed/notification/kafka/kafka_queue.go
index 830709a51..8d83b5892 100644
--- a/weed/notification/kafka/kafka_queue.go
+++ b/weed/notification/kafka/kafka_queue.go
@@ -21,12 +21,12 @@ func (k *KafkaQueue) GetName() string {
return "kafka"
}
-func (k *KafkaQueue) Initialize(configuration util.Configuration) (err error) {
- glog.V(0).Infof("filer.notification.kafka.hosts: %v\n", configuration.GetStringSlice("hosts"))
- glog.V(0).Infof("filer.notification.kafka.topic: %v\n", configuration.GetString("topic"))
+func (k *KafkaQueue) Initialize(configuration util.Configuration, prefix string) (err error) {
+ glog.V(0).Infof("filer.notification.kafka.hosts: %v\n", configuration.GetStringSlice(prefix+"hosts"))
+ glog.V(0).Infof("filer.notification.kafka.topic: %v\n", configuration.GetString(prefix+"topic"))
return k.initialize(
- configuration.GetStringSlice("hosts"),
- configuration.GetString("topic"),
+ configuration.GetStringSlice(prefix+"hosts"),
+ configuration.GetString(prefix+"topic"),
)
}
@@ -76,7 +76,7 @@ func (k *KafkaQueue) handleError() {
for {
err := <-k.producer.Errors()
if err != nil {
- glog.Errorf("producer message error, partition:%d offset:%d key:%v valus:%s error(%v) topic:%s", err.Msg.Partition, err.Msg.Offset, err.Msg.Key, err.Msg.Value, err.Err, k.topic)
+ glog.Errorf("producer message error, partition:%d offset:%d key:%v value:%s error(%v) topic:%s", err.Msg.Partition, err.Msg.Offset, err.Msg.Key, err.Msg.Value, err.Err, k.topic)
}
}
}
diff --git a/weed/notification/log/log_queue.go b/weed/notification/log/log_queue.go
index dcc038dfc..1ca4786a1 100644
--- a/weed/notification/log/log_queue.go
+++ b/weed/notification/log/log_queue.go
@@ -18,7 +18,7 @@ func (k *LogQueue) GetName() string {
return "log"
}
-func (k *LogQueue) Initialize(configuration util.Configuration) (err error) {
+func (k *LogQueue) Initialize(configuration util.Configuration, prefix string) (err error) {
return nil
}
diff --git a/weed/operation/assign_file_id.go b/weed/operation/assign_file_id.go
index 00e1caad5..893bf516c 100644
--- a/weed/operation/assign_file_id.go
+++ b/weed/operation/assign_file_id.go
@@ -3,30 +3,36 @@ package operation
import (
"context"
"fmt"
- "time"
+ "strings"
+
+ "google.golang.org/grpc"
"github.com/chrislusf/seaweedfs/weed/pb/master_pb"
+ "github.com/chrislusf/seaweedfs/weed/security"
+ "github.com/chrislusf/seaweedfs/weed/util"
)
type VolumeAssignRequest struct {
- Count uint64
- Replication string
- Collection string
- Ttl string
- DataCenter string
- Rack string
- DataNode string
+ Count uint64
+ Replication string
+ Collection string
+ Ttl string
+ DataCenter string
+ Rack string
+ DataNode string
+ WritableVolumeCount uint32
}
type AssignResult struct {
- Fid string `json:"fid,omitempty"`
- Url string `json:"url,omitempty"`
- PublicUrl string `json:"publicUrl,omitempty"`
- Count uint64 `json:"count,omitempty"`
- Error string `json:"error,omitempty"`
+ Fid string `json:"fid,omitempty"`
+ Url string `json:"url,omitempty"`
+ PublicUrl string `json:"publicUrl,omitempty"`
+ Count uint64 `json:"count,omitempty"`
+ Error string `json:"error,omitempty"`
+ Auth security.EncodedJwt `json:"auth,omitempty"`
}
-func Assign(server string, primaryRequest *VolumeAssignRequest, alternativeRequests ...*VolumeAssignRequest) (*AssignResult, error) {
+func Assign(server string, grpcDialOption grpc.DialOption, primaryRequest *VolumeAssignRequest, alternativeRequests ...*VolumeAssignRequest) (*AssignResult, error) {
var requests []*VolumeAssignRequest
requests = append(requests, primaryRequest)
@@ -40,20 +46,19 @@ func Assign(server string, primaryRequest *VolumeAssignRequest, alternativeReque
continue
}
- lastError = withMasterServerClient(server, func(masterClient master_pb.SeaweedClient) error {
- ctx, cancel := context.WithTimeout(context.Background(), time.Duration(5*time.Second))
- defer cancel()
+ lastError = WithMasterServerClient(server, grpcDialOption, func(masterClient master_pb.SeaweedClient) error {
req := &master_pb.AssignRequest{
- Count: primaryRequest.Count,
- Replication: primaryRequest.Replication,
- Collection: primaryRequest.Collection,
- Ttl: primaryRequest.Ttl,
- DataCenter: primaryRequest.DataCenter,
- Rack: primaryRequest.Rack,
- DataNode: primaryRequest.DataNode,
+ Count: primaryRequest.Count,
+ Replication: primaryRequest.Replication,
+ Collection: primaryRequest.Collection,
+ Ttl: primaryRequest.Ttl,
+ DataCenter: primaryRequest.DataCenter,
+ Rack: primaryRequest.Rack,
+ DataNode: primaryRequest.DataNode,
+ WritableVolumeCount: primaryRequest.WritableVolumeCount,
}
- resp, grpcErr := masterClient.Assign(ctx, req)
+ resp, grpcErr := masterClient.Assign(context.Background(), req)
if grpcErr != nil {
return grpcErr
}
@@ -63,6 +68,7 @@ func Assign(server string, primaryRequest *VolumeAssignRequest, alternativeReque
ret.Url = resp.Url
ret.PublicUrl = resp.PublicUrl
ret.Error = resp.Error
+ ret.Auth = security.EncodedJwt(resp.Auth)
return nil
@@ -81,3 +87,17 @@ func Assign(server string, primaryRequest *VolumeAssignRequest, alternativeReque
return ret, lastError
}
+
+func LookupJwt(master string, fileId string) security.EncodedJwt {
+
+ tokenStr := ""
+
+ if h, e := util.Head(fmt.Sprintf("http://%s/dir/lookup?fileId=%s", master, fileId)); e == nil {
+ bearer := h.Get("Authorization")
+ if len(bearer) > 7 && strings.ToUpper(bearer[0:6]) == "BEARER" {
+ tokenStr = bearer[7:]
+ }
+ }
+
+ return security.EncodedJwt(tokenStr)
+}
diff --git a/weed/operation/chunked_file.go b/weed/operation/chunked_file.go
index 9d8267dee..653b7bf13 100644
--- a/weed/operation/chunked_file.go
+++ b/weed/operation/chunked_file.go
@@ -5,11 +5,13 @@ import (
"errors"
"fmt"
"io"
+ "io/ioutil"
"net/http"
"sort"
-
"sync"
+ "google.golang.org/grpc"
+
"github.com/chrislusf/seaweedfs/weed/glog"
"github.com/chrislusf/seaweedfs/weed/util"
)
@@ -38,22 +40,23 @@ type ChunkManifest struct {
// seekable chunked file reader
type ChunkedFileReader struct {
- Manifest *ChunkManifest
- Master string
- pos int64
- pr *io.PipeReader
- pw *io.PipeWriter
- mutex sync.Mutex
+ totalSize int64
+ chunkList []*ChunkInfo
+ master string
+ pos int64
+ pr *io.PipeReader
+ pw *io.PipeWriter
+ mutex sync.Mutex
}
func (s ChunkList) Len() int { return len(s) }
func (s ChunkList) Less(i, j int) bool { return s[i].Offset < s[j].Offset }
func (s ChunkList) Swap(i, j int) { s[i], s[j] = s[j], s[i] }
-func LoadChunkManifest(buffer []byte, isGzipped bool) (*ChunkManifest, error) {
- if isGzipped {
+func LoadChunkManifest(buffer []byte, isCompressed bool) (*ChunkManifest, error) {
+ if isCompressed {
var err error
- if buffer, err = UnGzipData(buffer); err != nil {
+ if buffer, err = util.DecompressData(buffer); err != nil {
return nil, err
}
}
@@ -69,12 +72,12 @@ func (cm *ChunkManifest) Marshal() ([]byte, error) {
return json.Marshal(cm)
}
-func (cm *ChunkManifest) DeleteChunks(master string) error {
+func (cm *ChunkManifest) DeleteChunks(master string, usePublicUrl bool, grpcDialOption grpc.DialOption) error {
var fileIds []string
for _, ci := range cm.Chunks {
fileIds = append(fileIds, ci.Fid)
}
- results, err := DeleteFiles(master, fileIds)
+ results, err := DeleteFiles(master, usePublicUrl, grpcDialOption, fileIds)
if err != nil {
glog.V(0).Infof("delete %+v: %v", fileIds, err)
return fmt.Errorf("chunk delete: %v", err)
@@ -102,7 +105,10 @@ func readChunkNeedle(fileUrl string, w io.Writer, offset int64) (written int64,
if err != nil {
return written, err
}
- defer resp.Body.Close()
+ defer func() {
+ io.Copy(ioutil.Discard, resp.Body)
+ resp.Body.Close()
+ }()
switch resp.StatusCode {
case http.StatusRequestedRangeNotSatisfiable:
@@ -120,16 +126,29 @@ func readChunkNeedle(fileUrl string, w io.Writer, offset int64) (written int64,
return io.Copy(w, resp.Body)
}
+func NewChunkedFileReader(chunkList []*ChunkInfo, master string) *ChunkedFileReader {
+ var totalSize int64
+ for _, chunk := range chunkList {
+ totalSize += chunk.Size
+ }
+ sort.Sort(ChunkList(chunkList))
+ return &ChunkedFileReader{
+ totalSize: totalSize,
+ chunkList: chunkList,
+ master: master,
+ }
+}
+
func (cf *ChunkedFileReader) Seek(offset int64, whence int) (int64, error) {
var err error
switch whence {
- case 0:
- case 1:
+ case io.SeekStart:
+ case io.SeekCurrent:
offset += cf.pos
- case 2:
- offset = cf.Manifest.Size - offset
+ case io.SeekEnd:
+ offset = cf.totalSize + offset
}
- if offset > cf.Manifest.Size {
+ if offset > cf.totalSize {
err = ErrInvalidRange
}
if cf.pos != offset {
@@ -140,10 +159,9 @@ func (cf *ChunkedFileReader) Seek(offset int64, whence int) (int64, error) {
}
func (cf *ChunkedFileReader) WriteTo(w io.Writer) (n int64, err error) {
- cm := cf.Manifest
chunkIndex := -1
chunkStartOffset := int64(0)
- for i, ci := range cm.Chunks {
+ for i, ci := range cf.chunkList {
if cf.pos >= ci.Offset && cf.pos < ci.Offset+ci.Size {
chunkIndex = i
chunkStartOffset = cf.pos - ci.Offset
@@ -153,10 +171,10 @@ func (cf *ChunkedFileReader) WriteTo(w io.Writer) (n int64, err error) {
if chunkIndex < 0 {
return n, ErrInvalidRange
}
- for ; chunkIndex < cm.Chunks.Len(); chunkIndex++ {
- ci := cm.Chunks[chunkIndex]
+ for ; chunkIndex < len(cf.chunkList); chunkIndex++ {
+ ci := cf.chunkList[chunkIndex]
// if we need read date from local volume server first?
- fileUrl, lookupError := LookupFileId(cf.Master, ci.Fid)
+ fileUrl, lookupError := LookupFileId(cf.master, ci.Fid)
if lookupError != nil {
return n, lookupError
}
diff --git a/weed/operation/compress.go b/weed/operation/compress.go
deleted file mode 100644
index 65979d529..000000000
--- a/weed/operation/compress.go
+++ /dev/null
@@ -1,82 +0,0 @@
-package operation
-
-import (
- "bytes"
- "compress/flate"
- "compress/gzip"
- "io/ioutil"
- "strings"
-
- "github.com/chrislusf/seaweedfs/weed/glog"
- "golang.org/x/tools/godoc/util"
-)
-
-/*
-* Default more not to gzip since gzip can be done on client side.
- */
-func IsGzippable(ext, mtype string, data []byte) bool {
-
- // text
- if strings.HasPrefix(mtype, "text/") {
- return true
- }
-
- // images
- switch ext {
- case ".svg", ".bmp":
- return true
- }
- if strings.HasPrefix(mtype, "image/") {
- return false
- }
-
- // by file name extention
- switch ext {
- case ".zip", ".rar", ".gz", ".bz2", ".xz":
- return false
- case ".pdf", ".txt", ".html", ".htm", ".css", ".js", ".json":
- return true
- case ".php", ".java", ".go", ".rb", ".c", ".cpp", ".h", ".hpp":
- return true
- case ".png", ".jpg", ".jpeg":
- return false
- }
-
- // by mime type
- if strings.HasPrefix(mtype, "application/") {
- if strings.HasSuffix(mtype, "xml") {
- return true
- }
- if strings.HasSuffix(mtype, "script") {
- return true
- }
- }
-
- isMostlyText := util.IsText(data)
-
- return isMostlyText
-}
-
-func GzipData(input []byte) ([]byte, error) {
- buf := new(bytes.Buffer)
- w, _ := gzip.NewWriterLevel(buf, flate.BestCompression)
- if _, err := w.Write(input); err != nil {
- glog.V(2).Infoln("error compressing data:", err)
- return nil, err
- }
- if err := w.Close(); err != nil {
- glog.V(2).Infoln("error closing compressed data:", err)
- return nil, err
- }
- return buf.Bytes(), nil
-}
-func UnGzipData(input []byte) ([]byte, error) {
- buf := bytes.NewBuffer(input)
- r, _ := gzip.NewReader(buf)
- defer r.Close()
- output, err := ioutil.ReadAll(r)
- if err != nil {
- glog.V(2).Infoln("error uncompressing data:", err)
- }
- return output, err
-}
diff --git a/weed/operation/data_struts.go b/weed/operation/data_struts.go
index bfc53aa50..4980f9913 100644
--- a/weed/operation/data_struts.go
+++ b/weed/operation/data_struts.go
@@ -2,6 +2,5 @@ package operation
type JoinResult struct {
VolumeSizeLimit uint64 `json:"VolumeSizeLimit,omitempty"`
- SecretKey string `json:"secretKey,omitempty"`
Error string `json:"error,omitempty"`
}
diff --git a/weed/operation/delete_content.go b/weed/operation/delete_content.go
index 3e468e1a3..9868a411d 100644
--- a/weed/operation/delete_content.go
+++ b/weed/operation/delete_content.go
@@ -7,7 +7,8 @@ import (
"net/http"
"strings"
"sync"
- "time"
+
+ "google.golang.org/grpc"
"github.com/chrislusf/seaweedfs/weed/pb/volume_server_pb"
)
@@ -28,17 +29,25 @@ func ParseFileId(fid string) (vid string, key_cookie string, err error) {
}
// DeleteFiles batch deletes a list of fileIds
-func DeleteFiles(master string, fileIds []string) ([]*volume_server_pb.DeleteResult, error) {
-
- lookupFunc := func(vids []string) (map[string]LookupResult, error) {
- return LookupVolumeIds(master, vids)
+func DeleteFiles(master string, usePublicUrl bool, grpcDialOption grpc.DialOption, fileIds []string) ([]*volume_server_pb.DeleteResult, error) {
+
+ lookupFunc := func(vids []string) (results map[string]LookupResult, err error) {
+ results, err = LookupVolumeIds(master, grpcDialOption, vids)
+ if err == nil && usePublicUrl {
+ for _, result := range results {
+ for _, loc := range result.Locations {
+ loc.Url = loc.PublicUrl
+ }
+ }
+ }
+ return
}
- return DeleteFilesWithLookupVolumeId(fileIds, lookupFunc)
+ return DeleteFilesWithLookupVolumeId(grpcDialOption, fileIds, lookupFunc)
}
-func DeleteFilesWithLookupVolumeId(fileIds []string, lookupFunc func(vid []string) (map[string]LookupResult, error)) ([]*volume_server_pb.DeleteResult, error) {
+func DeleteFilesWithLookupVolumeId(grpcDialOption grpc.DialOption, fileIds []string, lookupFunc func(vid []string) (map[string]LookupResult, error)) ([]*volume_server_pb.DeleteResult, error) {
var ret []*volume_server_pb.DeleteResult
@@ -48,7 +57,7 @@ func DeleteFilesWithLookupVolumeId(fileIds []string, lookupFunc func(vid []strin
vid, _, err := ParseFileId(fileId)
if err != nil {
ret = append(ret, &volume_server_pb.DeleteResult{
- FileId: vid,
+ FileId: fileId,
Status: http.StatusBadRequest,
Error: err.Error()},
)
@@ -85,38 +94,42 @@ func DeleteFilesWithLookupVolumeId(fileIds []string, lookupFunc func(vid []strin
}
}
+ resultChan := make(chan []*volume_server_pb.DeleteResult, len(server_to_fileIds))
var wg sync.WaitGroup
-
for server, fidList := range server_to_fileIds {
wg.Add(1)
go func(server string, fidList []string) {
defer wg.Done()
- if deleteResults, deleteErr := DeleteFilesAtOneVolumeServer(server, fidList); deleteErr != nil {
+ if deleteResults, deleteErr := DeleteFilesAtOneVolumeServer(server, grpcDialOption, fidList, true); deleteErr != nil {
err = deleteErr
- } else {
- ret = append(ret, deleteResults...)
+ } else if deleteResults != nil {
+ resultChan <- deleteResults
}
}(server, fidList)
}
wg.Wait()
+ close(resultChan)
+
+ for result := range resultChan {
+ ret = append(ret, result...)
+ }
return ret, err
}
// DeleteFilesAtOneVolumeServer deletes a list of files that is on one volume server via gRpc
-func DeleteFilesAtOneVolumeServer(volumeServer string, fileIds []string) (ret []*volume_server_pb.DeleteResult, err error) {
+func DeleteFilesAtOneVolumeServer(volumeServer string, grpcDialOption grpc.DialOption, fileIds []string, includeCookie bool) (ret []*volume_server_pb.DeleteResult, err error) {
- err = WithVolumeServerClient(volumeServer, func(volumeServerClient volume_server_pb.VolumeServerClient) error {
- ctx, cancel := context.WithTimeout(context.Background(), time.Duration(5*time.Second))
- defer cancel()
+ err = WithVolumeServerClient(volumeServer, grpcDialOption, func(volumeServerClient volume_server_pb.VolumeServerClient) error {
req := &volume_server_pb.BatchDeleteRequest{
- FileIds: fileIds,
+ FileIds: fileIds,
+ SkipCookieCheck: !includeCookie,
}
- resp, err := volumeServerClient.BatchDelete(ctx, req)
+ resp, err := volumeServerClient.BatchDelete(context.Background(), req)
// fmt.Printf("deleted %v %v: %v\n", fileIds, err, resp)
diff --git a/weed/operation/grpc_client.go b/weed/operation/grpc_client.go
index d0931a8d3..025a65b38 100644
--- a/weed/operation/grpc_client.go
+++ b/weed/operation/grpc_client.go
@@ -4,31 +4,27 @@ import (
"fmt"
"strconv"
"strings"
- "sync"
+
+ "google.golang.org/grpc"
"github.com/chrislusf/seaweedfs/weed/glog"
+ "github.com/chrislusf/seaweedfs/weed/pb"
+ "github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
"github.com/chrislusf/seaweedfs/weed/pb/master_pb"
"github.com/chrislusf/seaweedfs/weed/pb/volume_server_pb"
- "github.com/chrislusf/seaweedfs/weed/util"
- "google.golang.org/grpc"
)
-var (
- grpcClients = make(map[string]*grpc.ClientConn)
- grpcClientsLock sync.Mutex
-)
-
-func WithVolumeServerClient(volumeServer string, fn func(volume_server_pb.VolumeServerClient) error) error {
+func WithVolumeServerClient(volumeServer string, grpcDialOption grpc.DialOption, fn func(volume_server_pb.VolumeServerClient) error) error {
grpcAddress, err := toVolumeServerGrpcAddress(volumeServer)
if err != nil {
- return err
+ return fmt.Errorf("failed to parse volume server %v: %v", volumeServer, err)
}
- return util.WithCachedGrpcClient(func(grpcConnection *grpc.ClientConn) error {
+ return pb.WithCachedGrpcClient(func(grpcConnection *grpc.ClientConn) error {
client := volume_server_pb.NewVolumeServerClient(grpcConnection)
return fn(client)
- }, grpcAddress)
+ }, grpcAddress, grpcDialOption)
}
@@ -42,16 +38,30 @@ func toVolumeServerGrpcAddress(volumeServer string) (grpcAddress string, err err
return fmt.Sprintf("%s:%d", volumeServer[0:sepIndex], port+10000), nil
}
-func withMasterServerClient(masterServer string, fn func(masterClient master_pb.SeaweedClient) error) error {
+func WithMasterServerClient(masterServer string, grpcDialOption grpc.DialOption, fn func(masterClient master_pb.SeaweedClient) error) error {
- masterGrpcAddress, parseErr := util.ParseServerToGrpcAddress(masterServer, 0)
+ masterGrpcAddress, parseErr := pb.ParseServerToGrpcAddress(masterServer)
if parseErr != nil {
- return fmt.Errorf("failed to parse master grpc %v", masterServer)
+ return fmt.Errorf("failed to parse master %v: %v", masterServer, parseErr)
}
- return util.WithCachedGrpcClient(func(grpcConnection *grpc.ClientConn) error {
+ return pb.WithCachedGrpcClient(func(grpcConnection *grpc.ClientConn) error {
client := master_pb.NewSeaweedClient(grpcConnection)
return fn(client)
- }, masterGrpcAddress)
+ }, masterGrpcAddress, grpcDialOption)
+
+}
+
+func WithFilerServerClient(filerServer string, grpcDialOption grpc.DialOption, fn func(masterClient filer_pb.SeaweedFilerClient) error) error {
+
+ filerGrpcAddress, parseErr := pb.ParseServerToGrpcAddress(filerServer)
+ if parseErr != nil {
+ return fmt.Errorf("failed to parse filer %v: %v", filerGrpcAddress, parseErr)
+ }
+
+ return pb.WithCachedGrpcClient(func(grpcConnection *grpc.ClientConn) error {
+ client := filer_pb.NewSeaweedFilerClient(grpcConnection)
+ return fn(client)
+ }, filerGrpcAddress, grpcDialOption)
}
diff --git a/weed/operation/list_masters.go b/weed/operation/list_masters.go
deleted file mode 100644
index 75838de4d..000000000
--- a/weed/operation/list_masters.go
+++ /dev/null
@@ -1,32 +0,0 @@
-package operation
-
-import (
- "encoding/json"
-
- "github.com/chrislusf/seaweedfs/weed/glog"
- "github.com/chrislusf/seaweedfs/weed/util"
-)
-
-type ClusterStatusResult struct {
- IsLeader bool `json:"IsLeader,omitempty"`
- Leader string `json:"Leader,omitempty"`
- Peers []string `json:"Peers,omitempty"`
-}
-
-func ListMasters(server string) (leader string, peers []string, err error) {
- jsonBlob, err := util.Get("http://" + server + "/cluster/status")
- glog.V(2).Info("list masters result :", string(jsonBlob))
- if err != nil {
- return "", nil, err
- }
- var ret ClusterStatusResult
- err = json.Unmarshal(jsonBlob, &ret)
- if err != nil {
- return "", nil, err
- }
- peers = ret.Peers
- if ret.IsLeader {
- peers = append(peers, ret.Leader)
- }
- return ret.Leader, peers, nil
-}
diff --git a/weed/operation/lookup.go b/weed/operation/lookup.go
index 562a11580..d0773e7fd 100644
--- a/weed/operation/lookup.go
+++ b/weed/operation/lookup.go
@@ -5,6 +5,7 @@ import (
"encoding/json"
"errors"
"fmt"
+ "google.golang.org/grpc"
"math/rand"
"net/url"
"strings"
@@ -78,7 +79,7 @@ func LookupFileId(server string, fileId string) (fullUrl string, err error) {
}
// LookupVolumeIds find volume locations by cache and actual lookup
-func LookupVolumeIds(server string, vids []string) (map[string]LookupResult, error) {
+func LookupVolumeIds(server string, grpcDialOption grpc.DialOption, vids []string) (map[string]LookupResult, error) {
ret := make(map[string]LookupResult)
var unknown_vids []string
@@ -98,14 +99,12 @@ func LookupVolumeIds(server string, vids []string) (map[string]LookupResult, err
//only query unknown_vids
- err := withMasterServerClient(server, func(masterClient master_pb.SeaweedClient) error {
- ctx, cancel := context.WithTimeout(context.Background(), time.Duration(5*time.Second))
- defer cancel()
+ err := WithMasterServerClient(server, grpcDialOption, func(masterClient master_pb.SeaweedClient) error {
req := &master_pb.LookupVolumeRequest{
VolumeIds: unknown_vids,
}
- resp, grpcErr := masterClient.LookupVolume(ctx, req)
+ resp, grpcErr := masterClient.LookupVolume(context.Background(), req)
if grpcErr != nil {
return grpcErr
}
diff --git a/weed/operation/needle_parse_test.go b/weed/operation/needle_parse_test.go
new file mode 100644
index 000000000..20a610eaa
--- /dev/null
+++ b/weed/operation/needle_parse_test.go
@@ -0,0 +1,130 @@
+package operation
+
+import (
+ "bytes"
+ "fmt"
+ "io"
+ "net/http"
+ "testing"
+
+ "github.com/stretchr/testify/assert"
+
+ "github.com/chrislusf/seaweedfs/weed/storage/needle"
+ "github.com/chrislusf/seaweedfs/weed/util"
+)
+
+type MockClient struct {
+ needleHandling func(n *needle.Needle, originalSize int, e error)
+}
+
+func (m *MockClient) Do(req *http.Request) (*http.Response, error) {
+ n, originalSize, err := needle.CreateNeedleFromRequest(req, 1024*1024)
+ if m.needleHandling != nil {
+ m.needleHandling(n, originalSize, err)
+ }
+ return &http.Response{
+ StatusCode: http.StatusNoContent,
+ }, io.EOF
+}
+
+/*
+
+The mime type is always the value passed in.
+
+Compress or not depends on the content detection, file name extension, and compression ratio.
+
+If the content is already compressed, need to know the content size.
+
+*/
+
+func TestCreateNeedleFromRequest(t *testing.T) {
+ mc := &MockClient{}
+ tmp := HttpClient
+ HttpClient = mc
+ defer func() {
+ HttpClient = tmp
+ }()
+
+ {
+ mc.needleHandling = func(n *needle.Needle, originalSize int, err error) {
+ assert.Equal(t, nil, err, "upload: %v", err)
+ assert.Equal(t, "", string(n.Mime), "mime detection failed: %v", string(n.Mime))
+ assert.Equal(t, true, n.IsCompressed(), "this should be compressed")
+ assert.Equal(t, true, util.IsGzippedContent(n.Data), "this should be gzip")
+ fmt.Printf("needle: %v, originalSize: %d\n", n, originalSize)
+ }
+ uploadResult, err, data := Upload("http://localhost:8080/389,0f084d17353afda0", "t.txt", false, bytes.NewReader([]byte(textContent)), false, "", nil, "")
+ if len(data) != len(textContent) {
+ t.Errorf("data actual %d expected %d", len(data), len(textContent))
+ }
+ if err != nil {
+ fmt.Printf("err: %v\n", err)
+ }
+ fmt.Printf("uploadResult: %+v\n", uploadResult)
+ }
+
+ {
+ mc.needleHandling = func(n *needle.Needle, originalSize int, err error) {
+ assert.Equal(t, nil, err, "upload: %v", err)
+ assert.Equal(t, "text/plain", string(n.Mime), "mime detection failed: %v", string(n.Mime))
+ assert.Equal(t, true, n.IsCompressed(), "this should be compressed")
+ assert.Equal(t, true, util.IsGzippedContent(n.Data), "this should be gzip")
+ fmt.Printf("needle: %v, dataSize:%d originalSize:%d\n", n, len(n.Data), originalSize)
+ }
+ gzippedData, _ := util.GzipData([]byte(textContent))
+ Upload("http://localhost:8080/389,0f084d17353afda0", "t.txt", false, bytes.NewReader(gzippedData), true, "text/plain", nil, "")
+ }
+
+ {
+ mc.needleHandling = func(n *needle.Needle, originalSize int, err error) {
+ assert.Equal(t, nil, err, "upload: %v", err)
+ assert.Equal(t, "text/plain", string(n.Mime), "mime detection failed: %v", string(n.Mime))
+ assert.Equal(t, true, n.IsCompressed(), "this should be compressed")
+ assert.Equal(t, true, util.IsZstdContent(n.Data), "this should be zstd")
+ fmt.Printf("needle: %v, dataSize:%d originalSize:%d\n", n, len(n.Data), originalSize)
+ }
+ zstdData, _ := util.ZstdData([]byte(textContent))
+ Upload("http://localhost:8080/389,0f084d17353afda0", "t.txt", false, bytes.NewReader(zstdData), true, "text/plain", nil, "")
+ }
+
+ {
+ mc.needleHandling = func(n *needle.Needle, originalSize int, err error) {
+ assert.Equal(t, nil, err, "upload: %v", err)
+ assert.Equal(t, "application/zstd", string(n.Mime), "mime detection failed: %v", string(n.Mime))
+ assert.Equal(t, false, n.IsCompressed(), "this should not be compressed")
+ assert.Equal(t, true, util.IsZstdContent(n.Data), "this should still be zstd")
+ fmt.Printf("needle: %v, dataSize:%d originalSize:%d\n", n, len(n.Data), originalSize)
+ }
+ zstdData, _ := util.ZstdData([]byte(textContent))
+ Upload("http://localhost:8080/389,0f084d17353afda0", "t.txt", false, bytes.NewReader(zstdData), false, "application/zstd", nil, "")
+ }
+
+}
+
+
+var textContent = `Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are
+met:
+
+ * Redistributions of source code must retain the above copyright
+notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above
+copyright notice, this list of conditions and the following disclaimer
+in the documentation and/or other materials provided with the
+distribution.
+ * Neither the name of Google Inc. nor the names of its
+contributors may be used to endorse or promote products derived from
+this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+`
\ No newline at end of file
diff --git a/weed/operation/stats.go b/weed/operation/stats.go
deleted file mode 100644
index 364727272..000000000
--- a/weed/operation/stats.go
+++ /dev/null
@@ -1,28 +0,0 @@
-package operation
-
-import (
- "context"
- "time"
-
- "github.com/chrislusf/seaweedfs/weed/pb/master_pb"
-)
-
-func Statistics(server string, req *master_pb.StatisticsRequest) (resp *master_pb.StatisticsResponse, err error) {
-
- err = withMasterServerClient(server, func(masterClient master_pb.SeaweedClient) error {
- ctx, cancel := context.WithTimeout(context.Background(), time.Duration(5*time.Second))
- defer cancel()
-
- grpcResponse, grpcErr := masterClient.Statistics(ctx, req)
- if grpcErr != nil {
- return grpcErr
- }
-
- resp = grpcResponse
-
- return nil
-
- })
-
- return
-}
diff --git a/weed/operation/submit.go b/weed/operation/submit.go
index 7a1a3085e..e8bec382a 100644
--- a/weed/operation/submit.go
+++ b/weed/operation/submit.go
@@ -1,7 +1,6 @@
package operation
import (
- "bytes"
"io"
"mime"
"net/url"
@@ -10,6 +9,8 @@ import (
"strconv"
"strings"
+ "google.golang.org/grpc"
+
"github.com/chrislusf/seaweedfs/weed/glog"
"github.com/chrislusf/seaweedfs/weed/security"
)
@@ -26,6 +27,7 @@ type FilePart struct {
Ttl string
Server string //this comes from assign result
Fid string //this comes from assign result, but customizable
+ Fsync bool
}
type SubmitResult struct {
@@ -36,10 +38,7 @@ type SubmitResult struct {
Error string `json:"error,omitempty"`
}
-func SubmitFiles(master string, files []FilePart,
- replication string, collection string, dataCenter string, ttl string, maxMB int,
- secret security.Secret,
-) ([]SubmitResult, error) {
+func SubmitFiles(master string, grpcDialOption grpc.DialOption, files []FilePart, replication string, collection string, dataCenter string, ttl string, maxMB int, usePublicUrl bool) ([]SubmitResult, error) {
results := make([]SubmitResult, len(files))
for index, file := range files {
results[index].FileName = file.FileName
@@ -51,9 +50,9 @@ func SubmitFiles(master string, files []FilePart,
DataCenter: dataCenter,
Ttl: ttl,
}
- ret, err := Assign(master, ar)
+ ret, err := Assign(master, grpcDialOption, ar)
if err != nil {
- for index, _ := range files {
+ for index := range files {
results[index].Error = err.Error()
}
return results, err
@@ -64,10 +63,13 @@ func SubmitFiles(master string, files []FilePart,
file.Fid = file.Fid + "_" + strconv.Itoa(index)
}
file.Server = ret.Url
+ if usePublicUrl {
+ file.Server = ret.PublicUrl
+ }
file.Replication = replication
file.Collection = collection
file.DataCenter = dataCenter
- results[index].Size, err = file.Upload(maxMB, master, secret)
+ results[index].Size, err = file.Upload(maxMB, master, usePublicUrl, ret.Auth, grpcDialOption)
if err != nil {
results[index].Error = err.Error()
}
@@ -110,12 +112,14 @@ func newFilePart(fullPathFilename string) (ret FilePart, err error) {
return ret, nil
}
-func (fi FilePart) Upload(maxMB int, master string, secret security.Secret) (retSize uint32, err error) {
- jwt := security.GenJwt(secret, fi.Fid)
+func (fi FilePart) Upload(maxMB int, master string, usePublicUrl bool, jwt security.EncodedJwt, grpcDialOption grpc.DialOption) (retSize uint32, err error) {
fileUrl := "http://" + fi.Server + "/" + fi.Fid
if fi.ModTime != 0 {
fileUrl += "?ts=" + strconv.Itoa(int(fi.ModTime))
}
+ if fi.Fsync {
+ fileUrl += "?fsync=true"
+ }
if closer, ok := fi.Reader.(io.Closer); ok {
defer closer.Close()
}
@@ -139,7 +143,7 @@ func (fi FilePart) Upload(maxMB int, master string, secret security.Secret) (ret
Collection: fi.Collection,
Ttl: fi.Ttl,
}
- ret, err = Assign(master, ar)
+ ret, err = Assign(master, grpcDialOption, ar)
if err != nil {
return
}
@@ -152,10 +156,10 @@ func (fi FilePart) Upload(maxMB int, master string, secret security.Secret) (ret
Collection: fi.Collection,
Ttl: fi.Ttl,
}
- ret, err = Assign(master, ar)
+ ret, err = Assign(master, grpcDialOption, ar)
if err != nil {
// delete all uploaded chunks
- cm.DeleteChunks(master)
+ cm.DeleteChunks(master, usePublicUrl, grpcDialOption)
return
}
id = ret.Fid
@@ -170,10 +174,10 @@ func (fi FilePart) Upload(maxMB int, master string, secret security.Secret) (ret
baseName+"-"+strconv.FormatInt(i+1, 10),
io.LimitReader(fi.Reader, chunkSize),
master, fileUrl,
- jwt)
+ ret.Auth)
if e != nil {
// delete all uploaded chunks
- cm.DeleteChunks(master)
+ cm.DeleteChunks(master, usePublicUrl, grpcDialOption)
return 0, e
}
cm.Chunks = append(cm.Chunks,
@@ -188,10 +192,10 @@ func (fi FilePart) Upload(maxMB int, master string, secret security.Secret) (ret
err = upload_chunked_file_manifest(fileUrl, &cm, jwt)
if err != nil {
// delete all uploaded chunks
- cm.DeleteChunks(master)
+ cm.DeleteChunks(master, usePublicUrl, grpcDialOption)
}
} else {
- ret, e := Upload(fileUrl, baseName, fi.Reader, false, fi.MimeType, nil, jwt)
+ ret, e, _ := Upload(fileUrl, baseName, false, fi.Reader, false, fi.MimeType, nil, jwt)
if e != nil {
return 0, e
}
@@ -204,8 +208,7 @@ func upload_one_chunk(filename string, reader io.Reader, master,
fileUrl string, jwt security.EncodedJwt,
) (size uint32, e error) {
glog.V(4).Info("Uploading part ", filename, " to ", fileUrl, "...")
- uploadResult, uploadError := Upload(fileUrl, filename, reader, false,
- "application/octet-stream", nil, jwt)
+ uploadResult, uploadError, _ := Upload(fileUrl, filename, false, reader, false, "", nil, jwt)
if uploadError != nil {
return 0, uploadError
}
@@ -217,12 +220,11 @@ func upload_chunked_file_manifest(fileUrl string, manifest *ChunkManifest, jwt s
if e != nil {
return e
}
- bufReader := bytes.NewReader(buf)
glog.V(4).Info("Uploading chunks manifest ", manifest.Name, " to ", fileUrl, "...")
u, _ := url.Parse(fileUrl)
q := u.Query()
q.Set("cm", "true")
u.RawQuery = q.Encode()
- _, e = Upload(u.String(), manifest.Name, bufReader, false, "application/json", nil, jwt)
+ _, e = UploadData(u.String(), manifest.Name, false, buf, false, "application/json", nil, jwt)
return e
}
diff --git a/weed/operation/sync_volume.go b/weed/operation/sync_volume.go
index e40c7de41..5562f12ab 100644
--- a/weed/operation/sync_volume.go
+++ b/weed/operation/sync_volume.go
@@ -2,63 +2,19 @@ package operation
import (
"context"
- "fmt"
- "io"
- "time"
-
"github.com/chrislusf/seaweedfs/weed/pb/volume_server_pb"
- . "github.com/chrislusf/seaweedfs/weed/storage/types"
- "github.com/chrislusf/seaweedfs/weed/util"
+ "google.golang.org/grpc"
)
-func GetVolumeSyncStatus(server string, vid uint32) (resp *volume_server_pb.VolumeSyncStatusResponse, err error) {
+func GetVolumeSyncStatus(server string, grpcDialOption grpc.DialOption, vid uint32) (resp *volume_server_pb.VolumeSyncStatusResponse, err error) {
- WithVolumeServerClient(server, func(client volume_server_pb.VolumeServerClient) error {
- ctx, cancel := context.WithTimeout(context.Background(), time.Duration(5*time.Second))
- defer cancel()
+ WithVolumeServerClient(server, grpcDialOption, func(client volume_server_pb.VolumeServerClient) error {
- resp, err = client.VolumeSyncStatus(ctx, &volume_server_pb.VolumeSyncStatusRequest{
- VolumdId: vid,
+ resp, err = client.VolumeSyncStatus(context.Background(), &volume_server_pb.VolumeSyncStatusRequest{
+ VolumeId: vid,
})
return nil
})
return
}
-
-func GetVolumeIdxEntries(server string, vid uint32, eachEntryFn func(key NeedleId, offset Offset, size uint32)) error {
-
- return WithVolumeServerClient(server, func(client volume_server_pb.VolumeServerClient) error {
- stream, err := client.VolumeSyncIndex(context.Background(), &volume_server_pb.VolumeSyncIndexRequest{
- VolumdId: vid,
- })
- if err != nil {
- return err
- }
-
- var indexFileContent []byte
-
- for {
- resp, err := stream.Recv()
- if err == io.EOF {
- break
- }
- if err != nil {
- return fmt.Errorf("read index entries: %v", err)
- }
- indexFileContent = append(indexFileContent, resp.IndexFileContent...)
- }
-
- dataSize := len(indexFileContent)
-
- for idx := 0; idx+NeedleEntrySize <= dataSize; idx += NeedleEntrySize {
- line := indexFileContent[idx : idx+NeedleEntrySize]
- key := BytesToNeedleId(line[:NeedleIdSize])
- offset := BytesToOffset(line[NeedleIdSize : NeedleIdSize+OffsetSize])
- size := util.BytesToUint32(line[NeedleIdSize+OffsetSize : NeedleIdSize+OffsetSize+SizeSize])
- eachEntryFn(key, offset, size)
- }
-
- return nil
- })
-}
diff --git a/weed/operation/tail_volume.go b/weed/operation/tail_volume.go
new file mode 100644
index 000000000..3cd66b5da
--- /dev/null
+++ b/weed/operation/tail_volume.go
@@ -0,0 +1,83 @@
+package operation
+
+import (
+ "context"
+ "fmt"
+ "io"
+
+ "google.golang.org/grpc"
+
+ "github.com/chrislusf/seaweedfs/weed/pb/volume_server_pb"
+ "github.com/chrislusf/seaweedfs/weed/storage/needle"
+)
+
+func TailVolume(master string, grpcDialOption grpc.DialOption, vid needle.VolumeId, sinceNs uint64, timeoutSeconds int, fn func(n *needle.Needle) error) error {
+ // find volume location, replication, ttl info
+ lookup, err := Lookup(master, vid.String())
+ if err != nil {
+ return fmt.Errorf("look up volume %d: %v", vid, err)
+ }
+ if len(lookup.Locations) == 0 {
+ return fmt.Errorf("unable to locate volume %d", vid)
+ }
+
+ volumeServer := lookup.Locations[0].Url
+
+ return TailVolumeFromSource(volumeServer, grpcDialOption, vid, sinceNs, timeoutSeconds, fn)
+}
+
+func TailVolumeFromSource(volumeServer string, grpcDialOption grpc.DialOption, vid needle.VolumeId, sinceNs uint64, idleTimeoutSeconds int, fn func(n *needle.Needle) error) error {
+ return WithVolumeServerClient(volumeServer, grpcDialOption, func(client volume_server_pb.VolumeServerClient) error {
+
+ stream, err := client.VolumeTailSender(context.Background(), &volume_server_pb.VolumeTailSenderRequest{
+ VolumeId: uint32(vid),
+ SinceNs: sinceNs,
+ IdleTimeoutSeconds: uint32(idleTimeoutSeconds),
+ })
+ if err != nil {
+ return err
+ }
+
+ for {
+ resp, recvErr := stream.Recv()
+ if recvErr != nil {
+ if recvErr == io.EOF {
+ break
+ } else {
+ return recvErr
+ }
+ }
+
+ needleHeader := resp.NeedleHeader
+ needleBody := resp.NeedleBody
+
+ if len(needleHeader) == 0 {
+ continue
+ }
+
+ for !resp.IsLastChunk {
+ resp, recvErr = stream.Recv()
+ if recvErr != nil {
+ if recvErr == io.EOF {
+ break
+ } else {
+ return recvErr
+ }
+ }
+ needleBody = append(needleBody, resp.NeedleBody...)
+ }
+
+ n := new(needle.Needle)
+ n.ParseNeedleHeader(needleHeader)
+ n.ReadNeedleBodyBytes(needleBody, needle.CurrentVersion)
+
+ err = fn(n)
+
+ if err != nil {
+ return err
+ }
+
+ }
+ return nil
+ })
+}
diff --git a/weed/operation/upload_content.go b/weed/operation/upload_content.go
index 030bf5889..658588ec3 100644
--- a/weed/operation/upload_content.go
+++ b/weed/operation/upload_content.go
@@ -2,6 +2,7 @@ package operation
import (
"bytes"
+ "crypto/md5"
"encoding/json"
"errors"
"fmt"
@@ -13,38 +14,166 @@ import (
"net/textproto"
"path/filepath"
"strings"
+ "time"
"github.com/chrislusf/seaweedfs/weed/glog"
+ "github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
"github.com/chrislusf/seaweedfs/weed/security"
+ "github.com/chrislusf/seaweedfs/weed/util"
)
type UploadResult struct {
- Name string `json:"name,omitempty"`
- Size uint32 `json:"size,omitempty"`
- Error string `json:"error,omitempty"`
- ETag string `json:"eTag,omitempty"`
+ Name string `json:"name,omitempty"`
+ Size uint32 `json:"size,omitempty"`
+ Error string `json:"error,omitempty"`
+ ETag string `json:"eTag,omitempty"`
+ CipherKey []byte `json:"cipherKey,omitempty"`
+ Mime string `json:"mime,omitempty"`
+ Gzip uint32 `json:"gzip,omitempty"`
+ Md5 string `json:"md5,omitempty"`
+}
+
+func (uploadResult *UploadResult) ToPbFileChunk(fileId string, offset int64) *filer_pb.FileChunk {
+ return &filer_pb.FileChunk{
+ FileId: fileId,
+ Offset: offset,
+ Size: uint64(uploadResult.Size),
+ Mtime: time.Now().UnixNano(),
+ ETag: uploadResult.ETag,
+ CipherKey: uploadResult.CipherKey,
+ IsCompressed: uploadResult.Gzip > 0,
+ }
+}
+
+// HTTPClient interface for testing
+type HTTPClient interface {
+ Do(req *http.Request) (*http.Response, error)
}
var (
- client *http.Client
+ HttpClient HTTPClient
)
func init() {
- client = &http.Client{Transport: &http.Transport{
+ HttpClient = &http.Client{Transport: &http.Transport{
MaxIdleConnsPerHost: 1024,
}}
}
var fileNameEscaper = strings.NewReplacer("\\", "\\\\", "\"", "\\\"")
-// Upload sends a POST request to a volume server to upload the content
-func Upload(uploadUrl string, filename string, reader io.Reader, isGzipped bool, mtype string, pairMap map[string]string, jwt security.EncodedJwt) (*UploadResult, error) {
- return upload_content(uploadUrl, func(w io.Writer) (err error) {
- _, err = io.Copy(w, reader)
+// Upload sends a POST request to a volume server to upload the content with adjustable compression level
+func UploadData(uploadUrl string, filename string, cipher bool, data []byte, isInputCompressed bool, mtype string, pairMap map[string]string, jwt security.EncodedJwt) (uploadResult *UploadResult, err error) {
+ uploadResult, err = doUploadData(uploadUrl, filename, cipher, data, isInputCompressed, mtype, pairMap, jwt)
+ if uploadResult != nil {
+ uploadResult.Md5 = util.Md5(data)
+ }
+ return
+}
+
+// Upload sends a POST request to a volume server to upload the content with fast compression
+func Upload(uploadUrl string, filename string, cipher bool, reader io.Reader, isInputCompressed bool, mtype string, pairMap map[string]string, jwt security.EncodedJwt) (uploadResult *UploadResult, err error, data []byte) {
+ hash := md5.New()
+ reader = io.TeeReader(reader, hash)
+ uploadResult, err, data = doUpload(uploadUrl, filename, cipher, reader, isInputCompressed, mtype, pairMap, jwt)
+ if uploadResult != nil {
+ uploadResult.Md5 = fmt.Sprintf("%x", hash.Sum(nil))
+ }
+ return
+}
+
+func doUpload(uploadUrl string, filename string, cipher bool, reader io.Reader, isInputCompressed bool, mtype string, pairMap map[string]string, jwt security.EncodedJwt) (uploadResult *UploadResult, err error, data []byte) {
+ data, err = ioutil.ReadAll(reader)
+ if err != nil {
+ err = fmt.Errorf("read input: %v", err)
+ return
+ }
+ uploadResult, uploadErr := doUploadData(uploadUrl, filename, cipher, data, isInputCompressed, mtype, pairMap, jwt)
+ return uploadResult, uploadErr, data
+}
+
+func doUploadData(uploadUrl string, filename string, cipher bool, data []byte, isInputCompressed bool, mtype string, pairMap map[string]string, jwt security.EncodedJwt) (uploadResult *UploadResult, err error) {
+ contentIsGzipped := isInputCompressed
+ shouldGzipNow := false
+ if !isInputCompressed {
+ if mtype == "" {
+ mtype = http.DetectContentType(data)
+ // println("detect1 mimetype to", mtype)
+ if mtype == "application/octet-stream" {
+ mtype = ""
+ }
+ }
+ if shouldBeCompressed, iAmSure := util.IsCompressableFileType(filepath.Base(filename), mtype); iAmSure && shouldBeCompressed {
+ shouldGzipNow = true
+ } else if !iAmSure && mtype == "" && len(data) > 128 {
+ var compressed []byte
+ compressed, err = util.GzipData(data[0:128])
+ shouldGzipNow = len(compressed)*10 < 128*9 // can not compress to less than 90%
+ }
+ }
+
+ var clearDataLen int
+
+ // gzip if possible
+ // this could be double copying
+ clearDataLen = len(data)
+ if shouldGzipNow {
+ compressed, compressErr := util.GzipData(data)
+ // fmt.Printf("data is compressed from %d ==> %d\n", len(data), len(compressed))
+ if compressErr == nil {
+ data = compressed
+ contentIsGzipped = true
+ }
+ } else if isInputCompressed {
+ // just to get the clear data length
+ clearData, err := util.DecompressData(data)
+ if err == nil {
+ clearDataLen = len(clearData)
+ }
+ }
+
+ if cipher {
+ // encrypt(gzip(data))
+
+ // encrypt
+ cipherKey := util.GenCipherKey()
+ encryptedData, encryptionErr := util.Encrypt(data, cipherKey)
+ if encryptionErr != nil {
+ err = fmt.Errorf("encrypt input: %v", encryptionErr)
+ return
+ }
+
+ // upload data
+ uploadResult, err = upload_content(uploadUrl, func(w io.Writer) (err error) {
+ _, err = w.Write(encryptedData)
+ return
+ }, "", false, len(encryptedData), "", nil, jwt)
+ if uploadResult != nil {
+ uploadResult.Name = filename
+ uploadResult.Mime = mtype
+ uploadResult.CipherKey = cipherKey
+ }
+ } else {
+ // upload data
+ uploadResult, err = upload_content(uploadUrl, func(w io.Writer) (err error) {
+ _, err = w.Write(data)
+ return
+ }, filename, contentIsGzipped, 0, mtype, pairMap, jwt)
+ }
+
+ if uploadResult == nil {
return
- }, filename, isGzipped, mtype, pairMap, jwt)
+ }
+
+ uploadResult.Size = uint32(clearDataLen)
+ if contentIsGzipped {
+ uploadResult.Gzip = 1
+ }
+
+ return uploadResult, err
}
-func upload_content(uploadUrl string, fillBufferFunction func(w io.Writer) error, filename string, isGzipped bool, mtype string, pairMap map[string]string, jwt security.EncodedJwt) (*UploadResult, error) {
+
+func upload_content(uploadUrl string, fillBufferFunction func(w io.Writer) error, filename string, isGzipped bool, originalDataSize int, mtype string, pairMap map[string]string, jwt security.EncodedJwt) (*UploadResult, error) {
body_buf := bytes.NewBufferString("")
body_writer := multipart.NewWriter(body_buf)
h := make(textproto.MIMEHeader)
@@ -58,9 +187,6 @@ func upload_content(uploadUrl string, fillBufferFunction func(w io.Writer) error
if isGzipped {
h.Set("Content-Encoding", "gzip")
}
- if jwt != "" {
- h.Set("Authorization", "BEARER "+string(jwt))
- }
file_writer, cp_err := body_writer.CreatePart(h)
if cp_err != nil {
@@ -86,24 +212,26 @@ func upload_content(uploadUrl string, fillBufferFunction func(w io.Writer) error
for k, v := range pairMap {
req.Header.Set(k, v)
}
- resp, post_err := client.Do(req)
+ if jwt != "" {
+ req.Header.Set("Authorization", "BEARER "+string(jwt))
+ }
+ resp, post_err := HttpClient.Do(req)
if post_err != nil {
glog.V(0).Infoln("failing to upload to", uploadUrl, post_err.Error())
return nil, post_err
}
defer resp.Body.Close()
- if resp.StatusCode < http.StatusOK ||
- resp.StatusCode > http.StatusIMUsed {
- return nil, errors.New(http.StatusText(resp.StatusCode))
- }
-
+ var ret UploadResult
etag := getEtag(resp)
+ if resp.StatusCode == http.StatusNoContent {
+ ret.ETag = etag
+ return &ret, nil
+ }
resp_body, ra_err := ioutil.ReadAll(resp.Body)
if ra_err != nil {
return nil, ra_err
}
- var ret UploadResult
unmarshal_err := json.Unmarshal(resp_body, &ret)
if unmarshal_err != nil {
glog.V(0).Infoln("failing to read upload response", uploadUrl, string(resp_body))
diff --git a/weed/pb/Makefile b/weed/pb/Makefile
index c50410574..d2618937b 100644
--- a/weed/pb/Makefile
+++ b/weed/pb/Makefile
@@ -3,8 +3,10 @@ all: gen
.PHONY : gen
gen:
- protoc master.proto --go_out=plugins=grpc:./master_pb
- protoc volume_server.proto --go_out=plugins=grpc:./volume_server_pb
- protoc filer.proto --go_out=plugins=grpc:./filer_pb
+ protoc master.proto --go_out=plugins=grpc:./master_pb --go_opt=paths=source_relative
+ protoc volume_server.proto --go_out=plugins=grpc:./volume_server_pb --go_opt=paths=source_relative
+ protoc filer.proto --go_out=plugins=grpc:./filer_pb --go_opt=paths=source_relative
+ protoc iam.proto --go_out=plugins=grpc:./iam_pb --go_opt=paths=source_relative
+ protoc messaging.proto --go_out=plugins=grpc:./messaging_pb --go_opt=paths=source_relative
# protoc filer.proto --java_out=../../other/java/client/src/main/java
cp filer.proto ../../other/java/client/src/main/proto
diff --git a/weed/pb/filer.proto b/weed/pb/filer.proto
index 6cd4df6b4..37121f29c 100644
--- a/weed/pb/filer.proto
+++ b/weed/pb/filer.proto
@@ -2,6 +2,7 @@ syntax = "proto3";
package filer_pb;
+option go_package = "github.com/chrislusf/seaweedfs/weed/pb/filer_pb";
option java_package = "seaweedfs.client";
option java_outer_classname = "FilerProto";
@@ -12,7 +13,7 @@ service SeaweedFiler {
rpc LookupDirectoryEntry (LookupDirectoryEntryRequest) returns (LookupDirectoryEntryResponse) {
}
- rpc ListEntries (ListEntriesRequest) returns (ListEntriesResponse) {
+ rpc ListEntries (ListEntriesRequest) returns (stream ListEntriesResponse) {
}
rpc CreateEntry (CreateEntryRequest) returns (CreateEntryResponse) {
@@ -21,9 +22,15 @@ service SeaweedFiler {
rpc UpdateEntry (UpdateEntryRequest) returns (UpdateEntryResponse) {
}
+ rpc AppendToEntry (AppendToEntryRequest) returns (AppendToEntryResponse) {
+ }
+
rpc DeleteEntry (DeleteEntryRequest) returns (DeleteEntryResponse) {
}
+ rpc AtomicRenameEntry (AtomicRenameEntryRequest) returns (AtomicRenameEntryResponse) {
+ }
+
rpc AssignVolume (AssignVolumeRequest) returns (AssignVolumeResponse) {
}
@@ -36,6 +43,21 @@ service SeaweedFiler {
rpc Statistics (StatisticsRequest) returns (StatisticsResponse) {
}
+ rpc GetFilerConfiguration (GetFilerConfigurationRequest) returns (GetFilerConfigurationResponse) {
+ }
+
+ rpc SubscribeMetadata (SubscribeMetadataRequest) returns (stream SubscribeMetadataResponse) {
+ }
+
+ rpc SubscribeLocalMetadata (SubscribeMetadataRequest) returns (stream SubscribeMetadataResponse) {
+ }
+
+ rpc KeepConnected (stream KeepConnectedRequest) returns (stream KeepConnectedResponse) {
+ }
+
+ rpc LocateBroker (LocateBrokerRequest) returns (LocateBrokerResponse) {
+ }
+
}
//////////////////////////////////////////////////
@@ -58,7 +80,7 @@ message ListEntriesRequest {
}
message ListEntriesResponse {
- repeated Entry entries = 1;
+ Entry entry = 1;
}
message Entry {
@@ -69,19 +91,36 @@ message Entry {
map extended = 5;
}
+message FullEntry {
+ string dir = 1;
+ Entry entry = 2;
+}
+
message EventNotification {
Entry old_entry = 1;
Entry new_entry = 2;
bool delete_chunks = 3;
+ string new_parent_path = 4;
+ bool is_from_other_cluster = 5;
}
message FileChunk {
- string file_id = 1;
+ string file_id = 1; // to be deprecated
int64 offset = 2;
uint64 size = 3;
int64 mtime = 4;
string e_tag = 5;
- string source_file_id = 6;
+ string source_file_id = 6; // to be deprecated
+ FileId fid = 7;
+ FileId source_fid = 8;
+ bytes cipher_key = 9;
+ bool is_compressed = 10;
+}
+
+message FileId {
+ uint32 volume_id = 1;
+ uint64 file_key = 2;
+ fixed32 cookie = 3;
}
message FuseAttributes {
@@ -98,32 +137,58 @@ message FuseAttributes {
string user_name = 11; // for hdfs
repeated string group_name = 12; // for hdfs
string symlink_target = 13;
+ bytes md5 = 14;
}
message CreateEntryRequest {
string directory = 1;
Entry entry = 2;
+ bool o_excl = 3;
+ bool is_from_other_cluster = 4;
}
message CreateEntryResponse {
+ string error = 1;
}
message UpdateEntryRequest {
string directory = 1;
Entry entry = 2;
+ bool is_from_other_cluster = 3;
}
message UpdateEntryResponse {
}
+message AppendToEntryRequest {
+ string directory = 1;
+ string entry_name = 2;
+ repeated FileChunk chunks = 3;
+}
+message AppendToEntryResponse {
+}
+
message DeleteEntryRequest {
string directory = 1;
string name = 2;
// bool is_directory = 3;
bool is_delete_data = 4;
bool is_recursive = 5;
+ bool ignore_recursive_error = 6;
+ bool is_from_other_cluster = 7;
}
message DeleteEntryResponse {
+ string error = 1;
+}
+
+message AtomicRenameEntryRequest {
+ string old_directory = 1;
+ string old_name = 2;
+ string new_directory = 3;
+ string new_name = 4;
+}
+
+message AtomicRenameEntryResponse {
}
message AssignVolumeRequest {
@@ -132,6 +197,7 @@ message AssignVolumeRequest {
string replication = 3;
int32 ttl_sec = 4;
string data_center = 5;
+ string parent_path = 6;
}
message AssignVolumeResponse {
@@ -139,6 +205,10 @@ message AssignVolumeResponse {
string url = 2;
string public_url = 3;
int32 count = 4;
+ string auth = 5;
+ string collection = 6;
+ string replication = 7;
+ string error = 8;
}
message LookupVolumeRequest {
@@ -177,3 +247,53 @@ message StatisticsResponse {
uint64 used_size = 5;
uint64 file_count = 6;
}
+
+message GetFilerConfigurationRequest {
+}
+message GetFilerConfigurationResponse {
+ repeated string masters = 1;
+ string replication = 2;
+ string collection = 3;
+ uint32 max_mb = 4;
+ string dir_buckets = 5;
+ bool cipher = 7;
+}
+
+message SubscribeMetadataRequest {
+ string client_name = 1;
+ string path_prefix = 2;
+ int64 since_ns = 3;
+}
+message SubscribeMetadataResponse {
+ string directory = 1;
+ EventNotification event_notification = 2;
+ int64 ts_ns = 3;
+}
+
+message LogEntry {
+ int64 ts_ns = 1;
+ int32 partition_key_hash = 2;
+ bytes data = 3;
+}
+
+message KeepConnectedRequest {
+ string name = 1;
+ uint32 grpc_port = 2;
+ repeated string resources = 3;
+}
+message KeepConnectedResponse {
+}
+
+message LocateBrokerRequest {
+ string resource = 1;
+}
+message LocateBrokerResponse {
+ bool found = 1;
+ // if found, send the exact address
+ // if not found, send the full list of existing brokers
+ message Resource {
+ string grpc_addresses = 1;
+ int32 resource_count = 2;
+ }
+ repeated Resource resources = 2;
+}
diff --git a/weed/pb/filer_pb/filer.pb.go b/weed/pb/filer_pb/filer.pb.go
index 6b4a27c0a..456a31a1d 100644
--- a/weed/pb/filer_pb/filer.pb.go
+++ b/weed/pb/filer_pb/filer.pb.go
@@ -1,876 +1,3686 @@
-// Code generated by protoc-gen-go.
+// Code generated by protoc-gen-go. DO NOT EDIT.
+// versions:
+// protoc-gen-go v1.24.0
+// protoc v3.12.3
// source: filer.proto
-// DO NOT EDIT!
-
-/*
-Package filer_pb is a generated protocol buffer package.
-
-It is generated from these files:
- filer.proto
-
-It has these top-level messages:
- LookupDirectoryEntryRequest
- LookupDirectoryEntryResponse
- ListEntriesRequest
- ListEntriesResponse
- Entry
- EventNotification
- FileChunk
- FuseAttributes
- CreateEntryRequest
- CreateEntryResponse
- UpdateEntryRequest
- UpdateEntryResponse
- DeleteEntryRequest
- DeleteEntryResponse
- AssignVolumeRequest
- AssignVolumeResponse
- LookupVolumeRequest
- Locations
- Location
- LookupVolumeResponse
- DeleteCollectionRequest
- DeleteCollectionResponse
- StatisticsRequest
- StatisticsResponse
-*/
-package filer_pb
-import proto "github.com/golang/protobuf/proto"
-import fmt "fmt"
-import math "math"
+package filer_pb
import (
- context "golang.org/x/net/context"
+ context "context"
+ proto "github.com/golang/protobuf/proto"
grpc "google.golang.org/grpc"
+ codes "google.golang.org/grpc/codes"
+ status "google.golang.org/grpc/status"
+ protoreflect "google.golang.org/protobuf/reflect/protoreflect"
+ protoimpl "google.golang.org/protobuf/runtime/protoimpl"
+ reflect "reflect"
+ sync "sync"
)
-// Reference imports to suppress errors if they are not otherwise used.
-var _ = proto.Marshal
-var _ = fmt.Errorf
-var _ = math.Inf
+const (
+ // Verify that this generated code is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
+ // Verify that runtime/protoimpl is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
+)
-// This is a compile-time assertion to ensure that this generated file
-// is compatible with the proto package it is being compiled against.
-// A compilation error at this line likely means your copy of the
-// proto package needs to be updated.
-const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package
+// This is a compile-time assertion that a sufficiently up-to-date version
+// of the legacy proto package is being used.
+const _ = proto.ProtoPackageIsVersion4
type LookupDirectoryEntryRequest struct {
- Directory string `protobuf:"bytes,1,opt,name=directory" json:"directory,omitempty"`
- Name string `protobuf:"bytes,2,opt,name=name" json:"name,omitempty"`
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ Directory string `protobuf:"bytes,1,opt,name=directory,proto3" json:"directory,omitempty"`
+ Name string `protobuf:"bytes,2,opt,name=name,proto3" json:"name,omitempty"`
+}
+
+func (x *LookupDirectoryEntryRequest) Reset() {
+ *x = LookupDirectoryEntryRequest{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_filer_proto_msgTypes[0]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *LookupDirectoryEntryRequest) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*LookupDirectoryEntryRequest) ProtoMessage() {}
+
+func (x *LookupDirectoryEntryRequest) ProtoReflect() protoreflect.Message {
+ mi := &file_filer_proto_msgTypes[0]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
}
-func (m *LookupDirectoryEntryRequest) Reset() { *m = LookupDirectoryEntryRequest{} }
-func (m *LookupDirectoryEntryRequest) String() string { return proto.CompactTextString(m) }
-func (*LookupDirectoryEntryRequest) ProtoMessage() {}
-func (*LookupDirectoryEntryRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0} }
+// Deprecated: Use LookupDirectoryEntryRequest.ProtoReflect.Descriptor instead.
+func (*LookupDirectoryEntryRequest) Descriptor() ([]byte, []int) {
+ return file_filer_proto_rawDescGZIP(), []int{0}
+}
-func (m *LookupDirectoryEntryRequest) GetDirectory() string {
- if m != nil {
- return m.Directory
+func (x *LookupDirectoryEntryRequest) GetDirectory() string {
+ if x != nil {
+ return x.Directory
}
return ""
}
-func (m *LookupDirectoryEntryRequest) GetName() string {
- if m != nil {
- return m.Name
+func (x *LookupDirectoryEntryRequest) GetName() string {
+ if x != nil {
+ return x.Name
}
return ""
}
type LookupDirectoryEntryResponse struct {
- Entry *Entry `protobuf:"bytes,1,opt,name=entry" json:"entry,omitempty"`
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ Entry *Entry `protobuf:"bytes,1,opt,name=entry,proto3" json:"entry,omitempty"`
+}
+
+func (x *LookupDirectoryEntryResponse) Reset() {
+ *x = LookupDirectoryEntryResponse{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_filer_proto_msgTypes[1]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *LookupDirectoryEntryResponse) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*LookupDirectoryEntryResponse) ProtoMessage() {}
+
+func (x *LookupDirectoryEntryResponse) ProtoReflect() protoreflect.Message {
+ mi := &file_filer_proto_msgTypes[1]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
}
-func (m *LookupDirectoryEntryResponse) Reset() { *m = LookupDirectoryEntryResponse{} }
-func (m *LookupDirectoryEntryResponse) String() string { return proto.CompactTextString(m) }
-func (*LookupDirectoryEntryResponse) ProtoMessage() {}
-func (*LookupDirectoryEntryResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{1} }
+// Deprecated: Use LookupDirectoryEntryResponse.ProtoReflect.Descriptor instead.
+func (*LookupDirectoryEntryResponse) Descriptor() ([]byte, []int) {
+ return file_filer_proto_rawDescGZIP(), []int{1}
+}
-func (m *LookupDirectoryEntryResponse) GetEntry() *Entry {
- if m != nil {
- return m.Entry
+func (x *LookupDirectoryEntryResponse) GetEntry() *Entry {
+ if x != nil {
+ return x.Entry
}
return nil
}
type ListEntriesRequest struct {
- Directory string `protobuf:"bytes,1,opt,name=directory" json:"directory,omitempty"`
- Prefix string `protobuf:"bytes,2,opt,name=prefix" json:"prefix,omitempty"`
- StartFromFileName string `protobuf:"bytes,3,opt,name=startFromFileName" json:"startFromFileName,omitempty"`
- InclusiveStartFrom bool `protobuf:"varint,4,opt,name=inclusiveStartFrom" json:"inclusiveStartFrom,omitempty"`
- Limit uint32 `protobuf:"varint,5,opt,name=limit" json:"limit,omitempty"`
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ Directory string `protobuf:"bytes,1,opt,name=directory,proto3" json:"directory,omitempty"`
+ Prefix string `protobuf:"bytes,2,opt,name=prefix,proto3" json:"prefix,omitempty"`
+ StartFromFileName string `protobuf:"bytes,3,opt,name=startFromFileName,proto3" json:"startFromFileName,omitempty"`
+ InclusiveStartFrom bool `protobuf:"varint,4,opt,name=inclusiveStartFrom,proto3" json:"inclusiveStartFrom,omitempty"`
+ Limit uint32 `protobuf:"varint,5,opt,name=limit,proto3" json:"limit,omitempty"`
+}
+
+func (x *ListEntriesRequest) Reset() {
+ *x = ListEntriesRequest{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_filer_proto_msgTypes[2]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *ListEntriesRequest) String() string {
+ return protoimpl.X.MessageStringOf(x)
}
-func (m *ListEntriesRequest) Reset() { *m = ListEntriesRequest{} }
-func (m *ListEntriesRequest) String() string { return proto.CompactTextString(m) }
-func (*ListEntriesRequest) ProtoMessage() {}
-func (*ListEntriesRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{2} }
+func (*ListEntriesRequest) ProtoMessage() {}
-func (m *ListEntriesRequest) GetDirectory() string {
- if m != nil {
- return m.Directory
+func (x *ListEntriesRequest) ProtoReflect() protoreflect.Message {
+ mi := &file_filer_proto_msgTypes[2]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use ListEntriesRequest.ProtoReflect.Descriptor instead.
+func (*ListEntriesRequest) Descriptor() ([]byte, []int) {
+ return file_filer_proto_rawDescGZIP(), []int{2}
+}
+
+func (x *ListEntriesRequest) GetDirectory() string {
+ if x != nil {
+ return x.Directory
}
return ""
}
-func (m *ListEntriesRequest) GetPrefix() string {
- if m != nil {
- return m.Prefix
+func (x *ListEntriesRequest) GetPrefix() string {
+ if x != nil {
+ return x.Prefix
}
return ""
}
-func (m *ListEntriesRequest) GetStartFromFileName() string {
- if m != nil {
- return m.StartFromFileName
+func (x *ListEntriesRequest) GetStartFromFileName() string {
+ if x != nil {
+ return x.StartFromFileName
}
return ""
}
-func (m *ListEntriesRequest) GetInclusiveStartFrom() bool {
- if m != nil {
- return m.InclusiveStartFrom
+func (x *ListEntriesRequest) GetInclusiveStartFrom() bool {
+ if x != nil {
+ return x.InclusiveStartFrom
}
return false
}
-func (m *ListEntriesRequest) GetLimit() uint32 {
- if m != nil {
- return m.Limit
+func (x *ListEntriesRequest) GetLimit() uint32 {
+ if x != nil {
+ return x.Limit
}
return 0
}
type ListEntriesResponse struct {
- Entries []*Entry `protobuf:"bytes,1,rep,name=entries" json:"entries,omitempty"`
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ Entry *Entry `protobuf:"bytes,1,opt,name=entry,proto3" json:"entry,omitempty"`
+}
+
+func (x *ListEntriesResponse) Reset() {
+ *x = ListEntriesResponse{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_filer_proto_msgTypes[3]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *ListEntriesResponse) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*ListEntriesResponse) ProtoMessage() {}
+
+func (x *ListEntriesResponse) ProtoReflect() protoreflect.Message {
+ mi := &file_filer_proto_msgTypes[3]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
}
-func (m *ListEntriesResponse) Reset() { *m = ListEntriesResponse{} }
-func (m *ListEntriesResponse) String() string { return proto.CompactTextString(m) }
-func (*ListEntriesResponse) ProtoMessage() {}
-func (*ListEntriesResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{3} }
+// Deprecated: Use ListEntriesResponse.ProtoReflect.Descriptor instead.
+func (*ListEntriesResponse) Descriptor() ([]byte, []int) {
+ return file_filer_proto_rawDescGZIP(), []int{3}
+}
-func (m *ListEntriesResponse) GetEntries() []*Entry {
- if m != nil {
- return m.Entries
+func (x *ListEntriesResponse) GetEntry() *Entry {
+ if x != nil {
+ return x.Entry
}
return nil
}
type Entry struct {
- Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"`
- IsDirectory bool `protobuf:"varint,2,opt,name=is_directory,json=isDirectory" json:"is_directory,omitempty"`
- Chunks []*FileChunk `protobuf:"bytes,3,rep,name=chunks" json:"chunks,omitempty"`
- Attributes *FuseAttributes `protobuf:"bytes,4,opt,name=attributes" json:"attributes,omitempty"`
- Extended map[string][]byte `protobuf:"bytes,5,rep,name=extended" json:"extended,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value,proto3"`
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
+ IsDirectory bool `protobuf:"varint,2,opt,name=is_directory,json=isDirectory,proto3" json:"is_directory,omitempty"`
+ Chunks []*FileChunk `protobuf:"bytes,3,rep,name=chunks,proto3" json:"chunks,omitempty"`
+ Attributes *FuseAttributes `protobuf:"bytes,4,opt,name=attributes,proto3" json:"attributes,omitempty"`
+ Extended map[string][]byte `protobuf:"bytes,5,rep,name=extended,proto3" json:"extended,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"`
}
-func (m *Entry) Reset() { *m = Entry{} }
-func (m *Entry) String() string { return proto.CompactTextString(m) }
-func (*Entry) ProtoMessage() {}
-func (*Entry) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{4} }
+func (x *Entry) Reset() {
+ *x = Entry{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_filer_proto_msgTypes[4]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *Entry) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*Entry) ProtoMessage() {}
+
+func (x *Entry) ProtoReflect() protoreflect.Message {
+ mi := &file_filer_proto_msgTypes[4]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
-func (m *Entry) GetName() string {
- if m != nil {
- return m.Name
+// Deprecated: Use Entry.ProtoReflect.Descriptor instead.
+func (*Entry) Descriptor() ([]byte, []int) {
+ return file_filer_proto_rawDescGZIP(), []int{4}
+}
+
+func (x *Entry) GetName() string {
+ if x != nil {
+ return x.Name
}
return ""
}
-func (m *Entry) GetIsDirectory() bool {
- if m != nil {
- return m.IsDirectory
+func (x *Entry) GetIsDirectory() bool {
+ if x != nil {
+ return x.IsDirectory
}
return false
}
-func (m *Entry) GetChunks() []*FileChunk {
- if m != nil {
- return m.Chunks
+func (x *Entry) GetChunks() []*FileChunk {
+ if x != nil {
+ return x.Chunks
+ }
+ return nil
+}
+
+func (x *Entry) GetAttributes() *FuseAttributes {
+ if x != nil {
+ return x.Attributes
}
return nil
}
-func (m *Entry) GetAttributes() *FuseAttributes {
- if m != nil {
- return m.Attributes
+func (x *Entry) GetExtended() map[string][]byte {
+ if x != nil {
+ return x.Extended
}
return nil
}
-func (m *Entry) GetExtended() map[string][]byte {
- if m != nil {
- return m.Extended
+type FullEntry struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ Dir string `protobuf:"bytes,1,opt,name=dir,proto3" json:"dir,omitempty"`
+ Entry *Entry `protobuf:"bytes,2,opt,name=entry,proto3" json:"entry,omitempty"`
+}
+
+func (x *FullEntry) Reset() {
+ *x = FullEntry{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_filer_proto_msgTypes[5]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *FullEntry) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*FullEntry) ProtoMessage() {}
+
+func (x *FullEntry) ProtoReflect() protoreflect.Message {
+ mi := &file_filer_proto_msgTypes[5]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use FullEntry.ProtoReflect.Descriptor instead.
+func (*FullEntry) Descriptor() ([]byte, []int) {
+ return file_filer_proto_rawDescGZIP(), []int{5}
+}
+
+func (x *FullEntry) GetDir() string {
+ if x != nil {
+ return x.Dir
+ }
+ return ""
+}
+
+func (x *FullEntry) GetEntry() *Entry {
+ if x != nil {
+ return x.Entry
}
return nil
}
type EventNotification struct {
- OldEntry *Entry `protobuf:"bytes,1,opt,name=old_entry,json=oldEntry" json:"old_entry,omitempty"`
- NewEntry *Entry `protobuf:"bytes,2,opt,name=new_entry,json=newEntry" json:"new_entry,omitempty"`
- DeleteChunks bool `protobuf:"varint,3,opt,name=delete_chunks,json=deleteChunks" json:"delete_chunks,omitempty"`
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ OldEntry *Entry `protobuf:"bytes,1,opt,name=old_entry,json=oldEntry,proto3" json:"old_entry,omitempty"`
+ NewEntry *Entry `protobuf:"bytes,2,opt,name=new_entry,json=newEntry,proto3" json:"new_entry,omitempty"`
+ DeleteChunks bool `protobuf:"varint,3,opt,name=delete_chunks,json=deleteChunks,proto3" json:"delete_chunks,omitempty"`
+ NewParentPath string `protobuf:"bytes,4,opt,name=new_parent_path,json=newParentPath,proto3" json:"new_parent_path,omitempty"`
+ IsFromOtherCluster bool `protobuf:"varint,5,opt,name=is_from_other_cluster,json=isFromOtherCluster,proto3" json:"is_from_other_cluster,omitempty"`
}
-func (m *EventNotification) Reset() { *m = EventNotification{} }
-func (m *EventNotification) String() string { return proto.CompactTextString(m) }
-func (*EventNotification) ProtoMessage() {}
-func (*EventNotification) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{5} }
+func (x *EventNotification) Reset() {
+ *x = EventNotification{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_filer_proto_msgTypes[6]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *EventNotification) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*EventNotification) ProtoMessage() {}
+
+func (x *EventNotification) ProtoReflect() protoreflect.Message {
+ mi := &file_filer_proto_msgTypes[6]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
-func (m *EventNotification) GetOldEntry() *Entry {
- if m != nil {
- return m.OldEntry
+// Deprecated: Use EventNotification.ProtoReflect.Descriptor instead.
+func (*EventNotification) Descriptor() ([]byte, []int) {
+ return file_filer_proto_rawDescGZIP(), []int{6}
+}
+
+func (x *EventNotification) GetOldEntry() *Entry {
+ if x != nil {
+ return x.OldEntry
}
return nil
}
-func (m *EventNotification) GetNewEntry() *Entry {
- if m != nil {
- return m.NewEntry
+func (x *EventNotification) GetNewEntry() *Entry {
+ if x != nil {
+ return x.NewEntry
}
return nil
}
-func (m *EventNotification) GetDeleteChunks() bool {
- if m != nil {
- return m.DeleteChunks
+func (x *EventNotification) GetDeleteChunks() bool {
+ if x != nil {
+ return x.DeleteChunks
+ }
+ return false
+}
+
+func (x *EventNotification) GetNewParentPath() string {
+ if x != nil {
+ return x.NewParentPath
+ }
+ return ""
+}
+
+func (x *EventNotification) GetIsFromOtherCluster() bool {
+ if x != nil {
+ return x.IsFromOtherCluster
}
return false
}
type FileChunk struct {
- FileId string `protobuf:"bytes,1,opt,name=file_id,json=fileId" json:"file_id,omitempty"`
- Offset int64 `protobuf:"varint,2,opt,name=offset" json:"offset,omitempty"`
- Size uint64 `protobuf:"varint,3,opt,name=size" json:"size,omitempty"`
- Mtime int64 `protobuf:"varint,4,opt,name=mtime" json:"mtime,omitempty"`
- ETag string `protobuf:"bytes,5,opt,name=e_tag,json=eTag" json:"e_tag,omitempty"`
- SourceFileId string `protobuf:"bytes,6,opt,name=source_file_id,json=sourceFileId" json:"source_file_id,omitempty"`
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ FileId string `protobuf:"bytes,1,opt,name=file_id,json=fileId,proto3" json:"file_id,omitempty"` // to be deprecated
+ Offset int64 `protobuf:"varint,2,opt,name=offset,proto3" json:"offset,omitempty"`
+ Size uint64 `protobuf:"varint,3,opt,name=size,proto3" json:"size,omitempty"`
+ Mtime int64 `protobuf:"varint,4,opt,name=mtime,proto3" json:"mtime,omitempty"`
+ ETag string `protobuf:"bytes,5,opt,name=e_tag,json=eTag,proto3" json:"e_tag,omitempty"`
+ SourceFileId string `protobuf:"bytes,6,opt,name=source_file_id,json=sourceFileId,proto3" json:"source_file_id,omitempty"` // to be deprecated
+ Fid *FileId `protobuf:"bytes,7,opt,name=fid,proto3" json:"fid,omitempty"`
+ SourceFid *FileId `protobuf:"bytes,8,opt,name=source_fid,json=sourceFid,proto3" json:"source_fid,omitempty"`
+ CipherKey []byte `protobuf:"bytes,9,opt,name=cipher_key,json=cipherKey,proto3" json:"cipher_key,omitempty"`
+ IsCompressed bool `protobuf:"varint,10,opt,name=is_compressed,json=isCompressed,proto3" json:"is_compressed,omitempty"`
+}
+
+func (x *FileChunk) Reset() {
+ *x = FileChunk{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_filer_proto_msgTypes[7]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *FileChunk) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*FileChunk) ProtoMessage() {}
+
+func (x *FileChunk) ProtoReflect() protoreflect.Message {
+ mi := &file_filer_proto_msgTypes[7]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
}
-func (m *FileChunk) Reset() { *m = FileChunk{} }
-func (m *FileChunk) String() string { return proto.CompactTextString(m) }
-func (*FileChunk) ProtoMessage() {}
-func (*FileChunk) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{6} }
+// Deprecated: Use FileChunk.ProtoReflect.Descriptor instead.
+func (*FileChunk) Descriptor() ([]byte, []int) {
+ return file_filer_proto_rawDescGZIP(), []int{7}
+}
-func (m *FileChunk) GetFileId() string {
- if m != nil {
- return m.FileId
+func (x *FileChunk) GetFileId() string {
+ if x != nil {
+ return x.FileId
}
return ""
}
-func (m *FileChunk) GetOffset() int64 {
- if m != nil {
- return m.Offset
+func (x *FileChunk) GetOffset() int64 {
+ if x != nil {
+ return x.Offset
}
return 0
}
-func (m *FileChunk) GetSize() uint64 {
- if m != nil {
- return m.Size
+func (x *FileChunk) GetSize() uint64 {
+ if x != nil {
+ return x.Size
}
return 0
}
-func (m *FileChunk) GetMtime() int64 {
- if m != nil {
- return m.Mtime
+func (x *FileChunk) GetMtime() int64 {
+ if x != nil {
+ return x.Mtime
}
return 0
}
-func (m *FileChunk) GetETag() string {
- if m != nil {
- return m.ETag
+func (x *FileChunk) GetETag() string {
+ if x != nil {
+ return x.ETag
}
return ""
}
-func (m *FileChunk) GetSourceFileId() string {
- if m != nil {
- return m.SourceFileId
+func (x *FileChunk) GetSourceFileId() string {
+ if x != nil {
+ return x.SourceFileId
}
return ""
}
+func (x *FileChunk) GetFid() *FileId {
+ if x != nil {
+ return x.Fid
+ }
+ return nil
+}
+
+func (x *FileChunk) GetSourceFid() *FileId {
+ if x != nil {
+ return x.SourceFid
+ }
+ return nil
+}
+
+func (x *FileChunk) GetCipherKey() []byte {
+ if x != nil {
+ return x.CipherKey
+ }
+ return nil
+}
+
+func (x *FileChunk) GetIsCompressed() bool {
+ if x != nil {
+ return x.IsCompressed
+ }
+ return false
+}
+
+type FileId struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ VolumeId uint32 `protobuf:"varint,1,opt,name=volume_id,json=volumeId,proto3" json:"volume_id,omitempty"`
+ FileKey uint64 `protobuf:"varint,2,opt,name=file_key,json=fileKey,proto3" json:"file_key,omitempty"`
+ Cookie uint32 `protobuf:"fixed32,3,opt,name=cookie,proto3" json:"cookie,omitempty"`
+}
+
+func (x *FileId) Reset() {
+ *x = FileId{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_filer_proto_msgTypes[8]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *FileId) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*FileId) ProtoMessage() {}
+
+func (x *FileId) ProtoReflect() protoreflect.Message {
+ mi := &file_filer_proto_msgTypes[8]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use FileId.ProtoReflect.Descriptor instead.
+func (*FileId) Descriptor() ([]byte, []int) {
+ return file_filer_proto_rawDescGZIP(), []int{8}
+}
+
+func (x *FileId) GetVolumeId() uint32 {
+ if x != nil {
+ return x.VolumeId
+ }
+ return 0
+}
+
+func (x *FileId) GetFileKey() uint64 {
+ if x != nil {
+ return x.FileKey
+ }
+ return 0
+}
+
+func (x *FileId) GetCookie() uint32 {
+ if x != nil {
+ return x.Cookie
+ }
+ return 0
+}
+
type FuseAttributes struct {
- FileSize uint64 `protobuf:"varint,1,opt,name=file_size,json=fileSize" json:"file_size,omitempty"`
- Mtime int64 `protobuf:"varint,2,opt,name=mtime" json:"mtime,omitempty"`
- FileMode uint32 `protobuf:"varint,3,opt,name=file_mode,json=fileMode" json:"file_mode,omitempty"`
- Uid uint32 `protobuf:"varint,4,opt,name=uid" json:"uid,omitempty"`
- Gid uint32 `protobuf:"varint,5,opt,name=gid" json:"gid,omitempty"`
- Crtime int64 `protobuf:"varint,6,opt,name=crtime" json:"crtime,omitempty"`
- Mime string `protobuf:"bytes,7,opt,name=mime" json:"mime,omitempty"`
- Replication string `protobuf:"bytes,8,opt,name=replication" json:"replication,omitempty"`
- Collection string `protobuf:"bytes,9,opt,name=collection" json:"collection,omitempty"`
- TtlSec int32 `protobuf:"varint,10,opt,name=ttl_sec,json=ttlSec" json:"ttl_sec,omitempty"`
- UserName string `protobuf:"bytes,11,opt,name=user_name,json=userName" json:"user_name,omitempty"`
- GroupName []string `protobuf:"bytes,12,rep,name=group_name,json=groupName" json:"group_name,omitempty"`
- SymlinkTarget string `protobuf:"bytes,13,opt,name=symlink_target,json=symlinkTarget" json:"symlink_target,omitempty"`
-}
-
-func (m *FuseAttributes) Reset() { *m = FuseAttributes{} }
-func (m *FuseAttributes) String() string { return proto.CompactTextString(m) }
-func (*FuseAttributes) ProtoMessage() {}
-func (*FuseAttributes) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{7} }
-
-func (m *FuseAttributes) GetFileSize() uint64 {
- if m != nil {
- return m.FileSize
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ FileSize uint64 `protobuf:"varint,1,opt,name=file_size,json=fileSize,proto3" json:"file_size,omitempty"`
+ Mtime int64 `protobuf:"varint,2,opt,name=mtime,proto3" json:"mtime,omitempty"` // unix time in seconds
+ FileMode uint32 `protobuf:"varint,3,opt,name=file_mode,json=fileMode,proto3" json:"file_mode,omitempty"`
+ Uid uint32 `protobuf:"varint,4,opt,name=uid,proto3" json:"uid,omitempty"`
+ Gid uint32 `protobuf:"varint,5,opt,name=gid,proto3" json:"gid,omitempty"`
+ Crtime int64 `protobuf:"varint,6,opt,name=crtime,proto3" json:"crtime,omitempty"` // unix time in seconds
+ Mime string `protobuf:"bytes,7,opt,name=mime,proto3" json:"mime,omitempty"`
+ Replication string `protobuf:"bytes,8,opt,name=replication,proto3" json:"replication,omitempty"`
+ Collection string `protobuf:"bytes,9,opt,name=collection,proto3" json:"collection,omitempty"`
+ TtlSec int32 `protobuf:"varint,10,opt,name=ttl_sec,json=ttlSec,proto3" json:"ttl_sec,omitempty"`
+ UserName string `protobuf:"bytes,11,opt,name=user_name,json=userName,proto3" json:"user_name,omitempty"` // for hdfs
+ GroupName []string `protobuf:"bytes,12,rep,name=group_name,json=groupName,proto3" json:"group_name,omitempty"` // for hdfs
+ SymlinkTarget string `protobuf:"bytes,13,opt,name=symlink_target,json=symlinkTarget,proto3" json:"symlink_target,omitempty"`
+ Md5 []byte `protobuf:"bytes,14,opt,name=md5,proto3" json:"md5,omitempty"`
+}
+
+func (x *FuseAttributes) Reset() {
+ *x = FuseAttributes{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_filer_proto_msgTypes[9]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *FuseAttributes) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*FuseAttributes) ProtoMessage() {}
+
+func (x *FuseAttributes) ProtoReflect() protoreflect.Message {
+ mi := &file_filer_proto_msgTypes[9]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use FuseAttributes.ProtoReflect.Descriptor instead.
+func (*FuseAttributes) Descriptor() ([]byte, []int) {
+ return file_filer_proto_rawDescGZIP(), []int{9}
+}
+
+func (x *FuseAttributes) GetFileSize() uint64 {
+ if x != nil {
+ return x.FileSize
}
return 0
}
-func (m *FuseAttributes) GetMtime() int64 {
- if m != nil {
- return m.Mtime
+func (x *FuseAttributes) GetMtime() int64 {
+ if x != nil {
+ return x.Mtime
}
return 0
}
-func (m *FuseAttributes) GetFileMode() uint32 {
- if m != nil {
- return m.FileMode
+func (x *FuseAttributes) GetFileMode() uint32 {
+ if x != nil {
+ return x.FileMode
}
return 0
}
-func (m *FuseAttributes) GetUid() uint32 {
- if m != nil {
- return m.Uid
+func (x *FuseAttributes) GetUid() uint32 {
+ if x != nil {
+ return x.Uid
}
return 0
}
-func (m *FuseAttributes) GetGid() uint32 {
- if m != nil {
- return m.Gid
+func (x *FuseAttributes) GetGid() uint32 {
+ if x != nil {
+ return x.Gid
}
return 0
}
-func (m *FuseAttributes) GetCrtime() int64 {
- if m != nil {
- return m.Crtime
+func (x *FuseAttributes) GetCrtime() int64 {
+ if x != nil {
+ return x.Crtime
}
return 0
}
-func (m *FuseAttributes) GetMime() string {
- if m != nil {
- return m.Mime
+func (x *FuseAttributes) GetMime() string {
+ if x != nil {
+ return x.Mime
}
return ""
}
-func (m *FuseAttributes) GetReplication() string {
- if m != nil {
- return m.Replication
+func (x *FuseAttributes) GetReplication() string {
+ if x != nil {
+ return x.Replication
}
return ""
}
-func (m *FuseAttributes) GetCollection() string {
- if m != nil {
- return m.Collection
+func (x *FuseAttributes) GetCollection() string {
+ if x != nil {
+ return x.Collection
}
return ""
}
-func (m *FuseAttributes) GetTtlSec() int32 {
- if m != nil {
- return m.TtlSec
+func (x *FuseAttributes) GetTtlSec() int32 {
+ if x != nil {
+ return x.TtlSec
}
return 0
}
-func (m *FuseAttributes) GetUserName() string {
- if m != nil {
- return m.UserName
+func (x *FuseAttributes) GetUserName() string {
+ if x != nil {
+ return x.UserName
}
return ""
}
-func (m *FuseAttributes) GetGroupName() []string {
- if m != nil {
- return m.GroupName
+func (x *FuseAttributes) GetGroupName() []string {
+ if x != nil {
+ return x.GroupName
}
return nil
}
-func (m *FuseAttributes) GetSymlinkTarget() string {
- if m != nil {
- return m.SymlinkTarget
+func (x *FuseAttributes) GetSymlinkTarget() string {
+ if x != nil {
+ return x.SymlinkTarget
}
return ""
}
+func (x *FuseAttributes) GetMd5() []byte {
+ if x != nil {
+ return x.Md5
+ }
+ return nil
+}
+
type CreateEntryRequest struct {
- Directory string `protobuf:"bytes,1,opt,name=directory" json:"directory,omitempty"`
- Entry *Entry `protobuf:"bytes,2,opt,name=entry" json:"entry,omitempty"`
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ Directory string `protobuf:"bytes,1,opt,name=directory,proto3" json:"directory,omitempty"`
+ Entry *Entry `protobuf:"bytes,2,opt,name=entry,proto3" json:"entry,omitempty"`
+ OExcl bool `protobuf:"varint,3,opt,name=o_excl,json=oExcl,proto3" json:"o_excl,omitempty"`
+ IsFromOtherCluster bool `protobuf:"varint,4,opt,name=is_from_other_cluster,json=isFromOtherCluster,proto3" json:"is_from_other_cluster,omitempty"`
+}
+
+func (x *CreateEntryRequest) Reset() {
+ *x = CreateEntryRequest{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_filer_proto_msgTypes[10]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *CreateEntryRequest) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*CreateEntryRequest) ProtoMessage() {}
+
+func (x *CreateEntryRequest) ProtoReflect() protoreflect.Message {
+ mi := &file_filer_proto_msgTypes[10]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
}
-func (m *CreateEntryRequest) Reset() { *m = CreateEntryRequest{} }
-func (m *CreateEntryRequest) String() string { return proto.CompactTextString(m) }
-func (*CreateEntryRequest) ProtoMessage() {}
-func (*CreateEntryRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{8} }
+// Deprecated: Use CreateEntryRequest.ProtoReflect.Descriptor instead.
+func (*CreateEntryRequest) Descriptor() ([]byte, []int) {
+ return file_filer_proto_rawDescGZIP(), []int{10}
+}
-func (m *CreateEntryRequest) GetDirectory() string {
- if m != nil {
- return m.Directory
+func (x *CreateEntryRequest) GetDirectory() string {
+ if x != nil {
+ return x.Directory
}
return ""
}
-func (m *CreateEntryRequest) GetEntry() *Entry {
- if m != nil {
- return m.Entry
+func (x *CreateEntryRequest) GetEntry() *Entry {
+ if x != nil {
+ return x.Entry
}
return nil
}
+func (x *CreateEntryRequest) GetOExcl() bool {
+ if x != nil {
+ return x.OExcl
+ }
+ return false
+}
+
+func (x *CreateEntryRequest) GetIsFromOtherCluster() bool {
+ if x != nil {
+ return x.IsFromOtherCluster
+ }
+ return false
+}
+
type CreateEntryResponse struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ Error string `protobuf:"bytes,1,opt,name=error,proto3" json:"error,omitempty"`
}
-func (m *CreateEntryResponse) Reset() { *m = CreateEntryResponse{} }
-func (m *CreateEntryResponse) String() string { return proto.CompactTextString(m) }
-func (*CreateEntryResponse) ProtoMessage() {}
-func (*CreateEntryResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{9} }
+func (x *CreateEntryResponse) Reset() {
+ *x = CreateEntryResponse{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_filer_proto_msgTypes[11]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
-type UpdateEntryRequest struct {
- Directory string `protobuf:"bytes,1,opt,name=directory" json:"directory,omitempty"`
- Entry *Entry `protobuf:"bytes,2,opt,name=entry" json:"entry,omitempty"`
+func (x *CreateEntryResponse) String() string {
+ return protoimpl.X.MessageStringOf(x)
}
-func (m *UpdateEntryRequest) Reset() { *m = UpdateEntryRequest{} }
-func (m *UpdateEntryRequest) String() string { return proto.CompactTextString(m) }
-func (*UpdateEntryRequest) ProtoMessage() {}
-func (*UpdateEntryRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{10} }
+func (*CreateEntryResponse) ProtoMessage() {}
-func (m *UpdateEntryRequest) GetDirectory() string {
- if m != nil {
- return m.Directory
+func (x *CreateEntryResponse) ProtoReflect() protoreflect.Message {
+ mi := &file_filer_proto_msgTypes[11]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
}
- return ""
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use CreateEntryResponse.ProtoReflect.Descriptor instead.
+func (*CreateEntryResponse) Descriptor() ([]byte, []int) {
+ return file_filer_proto_rawDescGZIP(), []int{11}
}
-func (m *UpdateEntryRequest) GetEntry() *Entry {
- if m != nil {
- return m.Entry
+func (x *CreateEntryResponse) GetError() string {
+ if x != nil {
+ return x.Error
}
- return nil
+ return ""
}
-type UpdateEntryResponse struct {
+type UpdateEntryRequest struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ Directory string `protobuf:"bytes,1,opt,name=directory,proto3" json:"directory,omitempty"`
+ Entry *Entry `protobuf:"bytes,2,opt,name=entry,proto3" json:"entry,omitempty"`
+ IsFromOtherCluster bool `protobuf:"varint,3,opt,name=is_from_other_cluster,json=isFromOtherCluster,proto3" json:"is_from_other_cluster,omitempty"`
}
-func (m *UpdateEntryResponse) Reset() { *m = UpdateEntryResponse{} }
-func (m *UpdateEntryResponse) String() string { return proto.CompactTextString(m) }
-func (*UpdateEntryResponse) ProtoMessage() {}
-func (*UpdateEntryResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{11} }
+func (x *UpdateEntryRequest) Reset() {
+ *x = UpdateEntryRequest{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_filer_proto_msgTypes[12]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
-type DeleteEntryRequest struct {
- Directory string `protobuf:"bytes,1,opt,name=directory" json:"directory,omitempty"`
- Name string `protobuf:"bytes,2,opt,name=name" json:"name,omitempty"`
- // bool is_directory = 3;
- IsDeleteData bool `protobuf:"varint,4,opt,name=is_delete_data,json=isDeleteData" json:"is_delete_data,omitempty"`
- IsRecursive bool `protobuf:"varint,5,opt,name=is_recursive,json=isRecursive" json:"is_recursive,omitempty"`
+func (x *UpdateEntryRequest) String() string {
+ return protoimpl.X.MessageStringOf(x)
}
-func (m *DeleteEntryRequest) Reset() { *m = DeleteEntryRequest{} }
-func (m *DeleteEntryRequest) String() string { return proto.CompactTextString(m) }
-func (*DeleteEntryRequest) ProtoMessage() {}
-func (*DeleteEntryRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{12} }
+func (*UpdateEntryRequest) ProtoMessage() {}
-func (m *DeleteEntryRequest) GetDirectory() string {
- if m != nil {
- return m.Directory
+func (x *UpdateEntryRequest) ProtoReflect() protoreflect.Message {
+ mi := &file_filer_proto_msgTypes[12]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
}
- return ""
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use UpdateEntryRequest.ProtoReflect.Descriptor instead.
+func (*UpdateEntryRequest) Descriptor() ([]byte, []int) {
+ return file_filer_proto_rawDescGZIP(), []int{12}
}
-func (m *DeleteEntryRequest) GetName() string {
- if m != nil {
- return m.Name
+func (x *UpdateEntryRequest) GetDirectory() string {
+ if x != nil {
+ return x.Directory
}
return ""
}
-func (m *DeleteEntryRequest) GetIsDeleteData() bool {
- if m != nil {
- return m.IsDeleteData
+func (x *UpdateEntryRequest) GetEntry() *Entry {
+ if x != nil {
+ return x.Entry
}
- return false
+ return nil
}
-func (m *DeleteEntryRequest) GetIsRecursive() bool {
- if m != nil {
- return m.IsRecursive
+func (x *UpdateEntryRequest) GetIsFromOtherCluster() bool {
+ if x != nil {
+ return x.IsFromOtherCluster
}
return false
}
-type DeleteEntryResponse struct {
+type UpdateEntryResponse struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
}
-func (m *DeleteEntryResponse) Reset() { *m = DeleteEntryResponse{} }
-func (m *DeleteEntryResponse) String() string { return proto.CompactTextString(m) }
-func (*DeleteEntryResponse) ProtoMessage() {}
-func (*DeleteEntryResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{13} }
+func (x *UpdateEntryResponse) Reset() {
+ *x = UpdateEntryResponse{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_filer_proto_msgTypes[13]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
-type AssignVolumeRequest struct {
- Count int32 `protobuf:"varint,1,opt,name=count" json:"count,omitempty"`
- Collection string `protobuf:"bytes,2,opt,name=collection" json:"collection,omitempty"`
- Replication string `protobuf:"bytes,3,opt,name=replication" json:"replication,omitempty"`
- TtlSec int32 `protobuf:"varint,4,opt,name=ttl_sec,json=ttlSec" json:"ttl_sec,omitempty"`
- DataCenter string `protobuf:"bytes,5,opt,name=data_center,json=dataCenter" json:"data_center,omitempty"`
+func (x *UpdateEntryResponse) String() string {
+ return protoimpl.X.MessageStringOf(x)
}
-func (m *AssignVolumeRequest) Reset() { *m = AssignVolumeRequest{} }
-func (m *AssignVolumeRequest) String() string { return proto.CompactTextString(m) }
-func (*AssignVolumeRequest) ProtoMessage() {}
-func (*AssignVolumeRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{14} }
+func (*UpdateEntryResponse) ProtoMessage() {}
-func (m *AssignVolumeRequest) GetCount() int32 {
- if m != nil {
- return m.Count
+func (x *UpdateEntryResponse) ProtoReflect() protoreflect.Message {
+ mi := &file_filer_proto_msgTypes[13]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
}
- return 0
+ return mi.MessageOf(x)
}
-func (m *AssignVolumeRequest) GetCollection() string {
- if m != nil {
- return m.Collection
- }
- return ""
+// Deprecated: Use UpdateEntryResponse.ProtoReflect.Descriptor instead.
+func (*UpdateEntryResponse) Descriptor() ([]byte, []int) {
+ return file_filer_proto_rawDescGZIP(), []int{13}
}
-func (m *AssignVolumeRequest) GetReplication() string {
- if m != nil {
- return m.Replication
- }
- return ""
+type AppendToEntryRequest struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ Directory string `protobuf:"bytes,1,opt,name=directory,proto3" json:"directory,omitempty"`
+ EntryName string `protobuf:"bytes,2,opt,name=entry_name,json=entryName,proto3" json:"entry_name,omitempty"`
+ Chunks []*FileChunk `protobuf:"bytes,3,rep,name=chunks,proto3" json:"chunks,omitempty"`
}
-func (m *AssignVolumeRequest) GetTtlSec() int32 {
- if m != nil {
- return m.TtlSec
+func (x *AppendToEntryRequest) Reset() {
+ *x = AppendToEntryRequest{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_filer_proto_msgTypes[14]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
- return 0
}
-func (m *AssignVolumeRequest) GetDataCenter() string {
- if m != nil {
- return m.DataCenter
- }
- return ""
+func (x *AppendToEntryRequest) String() string {
+ return protoimpl.X.MessageStringOf(x)
}
-type AssignVolumeResponse struct {
- FileId string `protobuf:"bytes,1,opt,name=file_id,json=fileId" json:"file_id,omitempty"`
- Url string `protobuf:"bytes,2,opt,name=url" json:"url,omitempty"`
- PublicUrl string `protobuf:"bytes,3,opt,name=public_url,json=publicUrl" json:"public_url,omitempty"`
- Count int32 `protobuf:"varint,4,opt,name=count" json:"count,omitempty"`
+func (*AppendToEntryRequest) ProtoMessage() {}
+
+func (x *AppendToEntryRequest) ProtoReflect() protoreflect.Message {
+ mi := &file_filer_proto_msgTypes[14]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
}
-func (m *AssignVolumeResponse) Reset() { *m = AssignVolumeResponse{} }
-func (m *AssignVolumeResponse) String() string { return proto.CompactTextString(m) }
-func (*AssignVolumeResponse) ProtoMessage() {}
-func (*AssignVolumeResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{15} }
+// Deprecated: Use AppendToEntryRequest.ProtoReflect.Descriptor instead.
+func (*AppendToEntryRequest) Descriptor() ([]byte, []int) {
+ return file_filer_proto_rawDescGZIP(), []int{14}
+}
-func (m *AssignVolumeResponse) GetFileId() string {
- if m != nil {
- return m.FileId
+func (x *AppendToEntryRequest) GetDirectory() string {
+ if x != nil {
+ return x.Directory
}
return ""
}
-func (m *AssignVolumeResponse) GetUrl() string {
- if m != nil {
- return m.Url
+func (x *AppendToEntryRequest) GetEntryName() string {
+ if x != nil {
+ return x.EntryName
}
return ""
}
-func (m *AssignVolumeResponse) GetPublicUrl() string {
- if m != nil {
- return m.PublicUrl
+func (x *AppendToEntryRequest) GetChunks() []*FileChunk {
+ if x != nil {
+ return x.Chunks
}
- return ""
+ return nil
+}
+
+type AppendToEntryResponse struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
}
-func (m *AssignVolumeResponse) GetCount() int32 {
- if m != nil {
- return m.Count
+func (x *AppendToEntryResponse) Reset() {
+ *x = AppendToEntryResponse{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_filer_proto_msgTypes[15]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
- return 0
}
-type LookupVolumeRequest struct {
- VolumeIds []string `protobuf:"bytes,1,rep,name=volume_ids,json=volumeIds" json:"volume_ids,omitempty"`
+func (x *AppendToEntryResponse) String() string {
+ return protoimpl.X.MessageStringOf(x)
}
-func (m *LookupVolumeRequest) Reset() { *m = LookupVolumeRequest{} }
-func (m *LookupVolumeRequest) String() string { return proto.CompactTextString(m) }
-func (*LookupVolumeRequest) ProtoMessage() {}
-func (*LookupVolumeRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{16} }
+func (*AppendToEntryResponse) ProtoMessage() {}
-func (m *LookupVolumeRequest) GetVolumeIds() []string {
- if m != nil {
- return m.VolumeIds
+func (x *AppendToEntryResponse) ProtoReflect() protoreflect.Message {
+ mi := &file_filer_proto_msgTypes[15]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
}
- return nil
+ return mi.MessageOf(x)
}
-type Locations struct {
- Locations []*Location `protobuf:"bytes,1,rep,name=locations" json:"locations,omitempty"`
+// Deprecated: Use AppendToEntryResponse.ProtoReflect.Descriptor instead.
+func (*AppendToEntryResponse) Descriptor() ([]byte, []int) {
+ return file_filer_proto_rawDescGZIP(), []int{15}
}
-func (m *Locations) Reset() { *m = Locations{} }
-func (m *Locations) String() string { return proto.CompactTextString(m) }
-func (*Locations) ProtoMessage() {}
-func (*Locations) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{17} }
+type DeleteEntryRequest struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ Directory string `protobuf:"bytes,1,opt,name=directory,proto3" json:"directory,omitempty"`
+ Name string `protobuf:"bytes,2,opt,name=name,proto3" json:"name,omitempty"`
+ // bool is_directory = 3;
+ IsDeleteData bool `protobuf:"varint,4,opt,name=is_delete_data,json=isDeleteData,proto3" json:"is_delete_data,omitempty"`
+ IsRecursive bool `protobuf:"varint,5,opt,name=is_recursive,json=isRecursive,proto3" json:"is_recursive,omitempty"`
+ IgnoreRecursiveError bool `protobuf:"varint,6,opt,name=ignore_recursive_error,json=ignoreRecursiveError,proto3" json:"ignore_recursive_error,omitempty"`
+ IsFromOtherCluster bool `protobuf:"varint,7,opt,name=is_from_other_cluster,json=isFromOtherCluster,proto3" json:"is_from_other_cluster,omitempty"`
+}
-func (m *Locations) GetLocations() []*Location {
- if m != nil {
- return m.Locations
+func (x *DeleteEntryRequest) Reset() {
+ *x = DeleteEntryRequest{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_filer_proto_msgTypes[16]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
- return nil
}
-type Location struct {
- Url string `protobuf:"bytes,1,opt,name=url" json:"url,omitempty"`
- PublicUrl string `protobuf:"bytes,2,opt,name=public_url,json=publicUrl" json:"public_url,omitempty"`
+func (x *DeleteEntryRequest) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*DeleteEntryRequest) ProtoMessage() {}
+
+func (x *DeleteEntryRequest) ProtoReflect() protoreflect.Message {
+ mi := &file_filer_proto_msgTypes[16]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
}
-func (m *Location) Reset() { *m = Location{} }
-func (m *Location) String() string { return proto.CompactTextString(m) }
-func (*Location) ProtoMessage() {}
-func (*Location) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{18} }
+// Deprecated: Use DeleteEntryRequest.ProtoReflect.Descriptor instead.
+func (*DeleteEntryRequest) Descriptor() ([]byte, []int) {
+ return file_filer_proto_rawDescGZIP(), []int{16}
+}
-func (m *Location) GetUrl() string {
- if m != nil {
- return m.Url
+func (x *DeleteEntryRequest) GetDirectory() string {
+ if x != nil {
+ return x.Directory
}
return ""
}
-func (m *Location) GetPublicUrl() string {
- if m != nil {
- return m.PublicUrl
+func (x *DeleteEntryRequest) GetName() string {
+ if x != nil {
+ return x.Name
}
return ""
}
-type LookupVolumeResponse struct {
- LocationsMap map[string]*Locations `protobuf:"bytes,1,rep,name=locations_map,json=locationsMap" json:"locations_map,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"`
+func (x *DeleteEntryRequest) GetIsDeleteData() bool {
+ if x != nil {
+ return x.IsDeleteData
+ }
+ return false
+}
+
+func (x *DeleteEntryRequest) GetIsRecursive() bool {
+ if x != nil {
+ return x.IsRecursive
+ }
+ return false
+}
+
+func (x *DeleteEntryRequest) GetIgnoreRecursiveError() bool {
+ if x != nil {
+ return x.IgnoreRecursiveError
+ }
+ return false
}
-func (m *LookupVolumeResponse) Reset() { *m = LookupVolumeResponse{} }
-func (m *LookupVolumeResponse) String() string { return proto.CompactTextString(m) }
-func (*LookupVolumeResponse) ProtoMessage() {}
-func (*LookupVolumeResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{19} }
+func (x *DeleteEntryRequest) GetIsFromOtherCluster() bool {
+ if x != nil {
+ return x.IsFromOtherCluster
+ }
+ return false
+}
+
+type DeleteEntryResponse struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ Error string `protobuf:"bytes,1,opt,name=error,proto3" json:"error,omitempty"`
+}
+
+func (x *DeleteEntryResponse) Reset() {
+ *x = DeleteEntryResponse{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_filer_proto_msgTypes[17]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *DeleteEntryResponse) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*DeleteEntryResponse) ProtoMessage() {}
+
+func (x *DeleteEntryResponse) ProtoReflect() protoreflect.Message {
+ mi := &file_filer_proto_msgTypes[17]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use DeleteEntryResponse.ProtoReflect.Descriptor instead.
+func (*DeleteEntryResponse) Descriptor() ([]byte, []int) {
+ return file_filer_proto_rawDescGZIP(), []int{17}
+}
+
+func (x *DeleteEntryResponse) GetError() string {
+ if x != nil {
+ return x.Error
+ }
+ return ""
+}
+
+type AtomicRenameEntryRequest struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ OldDirectory string `protobuf:"bytes,1,opt,name=old_directory,json=oldDirectory,proto3" json:"old_directory,omitempty"`
+ OldName string `protobuf:"bytes,2,opt,name=old_name,json=oldName,proto3" json:"old_name,omitempty"`
+ NewDirectory string `protobuf:"bytes,3,opt,name=new_directory,json=newDirectory,proto3" json:"new_directory,omitempty"`
+ NewName string `protobuf:"bytes,4,opt,name=new_name,json=newName,proto3" json:"new_name,omitempty"`
+}
+
+func (x *AtomicRenameEntryRequest) Reset() {
+ *x = AtomicRenameEntryRequest{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_filer_proto_msgTypes[18]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *AtomicRenameEntryRequest) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*AtomicRenameEntryRequest) ProtoMessage() {}
+
+func (x *AtomicRenameEntryRequest) ProtoReflect() protoreflect.Message {
+ mi := &file_filer_proto_msgTypes[18]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use AtomicRenameEntryRequest.ProtoReflect.Descriptor instead.
+func (*AtomicRenameEntryRequest) Descriptor() ([]byte, []int) {
+ return file_filer_proto_rawDescGZIP(), []int{18}
+}
+
+func (x *AtomicRenameEntryRequest) GetOldDirectory() string {
+ if x != nil {
+ return x.OldDirectory
+ }
+ return ""
+}
+
+func (x *AtomicRenameEntryRequest) GetOldName() string {
+ if x != nil {
+ return x.OldName
+ }
+ return ""
+}
+
+func (x *AtomicRenameEntryRequest) GetNewDirectory() string {
+ if x != nil {
+ return x.NewDirectory
+ }
+ return ""
+}
+
+func (x *AtomicRenameEntryRequest) GetNewName() string {
+ if x != nil {
+ return x.NewName
+ }
+ return ""
+}
+
+type AtomicRenameEntryResponse struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+}
+
+func (x *AtomicRenameEntryResponse) Reset() {
+ *x = AtomicRenameEntryResponse{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_filer_proto_msgTypes[19]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *AtomicRenameEntryResponse) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*AtomicRenameEntryResponse) ProtoMessage() {}
+
+func (x *AtomicRenameEntryResponse) ProtoReflect() protoreflect.Message {
+ mi := &file_filer_proto_msgTypes[19]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use AtomicRenameEntryResponse.ProtoReflect.Descriptor instead.
+func (*AtomicRenameEntryResponse) Descriptor() ([]byte, []int) {
+ return file_filer_proto_rawDescGZIP(), []int{19}
+}
+
+type AssignVolumeRequest struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ Count int32 `protobuf:"varint,1,opt,name=count,proto3" json:"count,omitempty"`
+ Collection string `protobuf:"bytes,2,opt,name=collection,proto3" json:"collection,omitempty"`
+ Replication string `protobuf:"bytes,3,opt,name=replication,proto3" json:"replication,omitempty"`
+ TtlSec int32 `protobuf:"varint,4,opt,name=ttl_sec,json=ttlSec,proto3" json:"ttl_sec,omitempty"`
+ DataCenter string `protobuf:"bytes,5,opt,name=data_center,json=dataCenter,proto3" json:"data_center,omitempty"`
+ ParentPath string `protobuf:"bytes,6,opt,name=parent_path,json=parentPath,proto3" json:"parent_path,omitempty"`
+}
+
+func (x *AssignVolumeRequest) Reset() {
+ *x = AssignVolumeRequest{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_filer_proto_msgTypes[20]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *AssignVolumeRequest) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*AssignVolumeRequest) ProtoMessage() {}
+
+func (x *AssignVolumeRequest) ProtoReflect() protoreflect.Message {
+ mi := &file_filer_proto_msgTypes[20]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use AssignVolumeRequest.ProtoReflect.Descriptor instead.
+func (*AssignVolumeRequest) Descriptor() ([]byte, []int) {
+ return file_filer_proto_rawDescGZIP(), []int{20}
+}
+
+func (x *AssignVolumeRequest) GetCount() int32 {
+ if x != nil {
+ return x.Count
+ }
+ return 0
+}
+
+func (x *AssignVolumeRequest) GetCollection() string {
+ if x != nil {
+ return x.Collection
+ }
+ return ""
+}
+
+func (x *AssignVolumeRequest) GetReplication() string {
+ if x != nil {
+ return x.Replication
+ }
+ return ""
+}
+
+func (x *AssignVolumeRequest) GetTtlSec() int32 {
+ if x != nil {
+ return x.TtlSec
+ }
+ return 0
+}
+
+func (x *AssignVolumeRequest) GetDataCenter() string {
+ if x != nil {
+ return x.DataCenter
+ }
+ return ""
+}
+
+func (x *AssignVolumeRequest) GetParentPath() string {
+ if x != nil {
+ return x.ParentPath
+ }
+ return ""
+}
+
+type AssignVolumeResponse struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ FileId string `protobuf:"bytes,1,opt,name=file_id,json=fileId,proto3" json:"file_id,omitempty"`
+ Url string `protobuf:"bytes,2,opt,name=url,proto3" json:"url,omitempty"`
+ PublicUrl string `protobuf:"bytes,3,opt,name=public_url,json=publicUrl,proto3" json:"public_url,omitempty"`
+ Count int32 `protobuf:"varint,4,opt,name=count,proto3" json:"count,omitempty"`
+ Auth string `protobuf:"bytes,5,opt,name=auth,proto3" json:"auth,omitempty"`
+ Collection string `protobuf:"bytes,6,opt,name=collection,proto3" json:"collection,omitempty"`
+ Replication string `protobuf:"bytes,7,opt,name=replication,proto3" json:"replication,omitempty"`
+ Error string `protobuf:"bytes,8,opt,name=error,proto3" json:"error,omitempty"`
+}
+
+func (x *AssignVolumeResponse) Reset() {
+ *x = AssignVolumeResponse{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_filer_proto_msgTypes[21]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *AssignVolumeResponse) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*AssignVolumeResponse) ProtoMessage() {}
+
+func (x *AssignVolumeResponse) ProtoReflect() protoreflect.Message {
+ mi := &file_filer_proto_msgTypes[21]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use AssignVolumeResponse.ProtoReflect.Descriptor instead.
+func (*AssignVolumeResponse) Descriptor() ([]byte, []int) {
+ return file_filer_proto_rawDescGZIP(), []int{21}
+}
+
+func (x *AssignVolumeResponse) GetFileId() string {
+ if x != nil {
+ return x.FileId
+ }
+ return ""
+}
+
+func (x *AssignVolumeResponse) GetUrl() string {
+ if x != nil {
+ return x.Url
+ }
+ return ""
+}
+
+func (x *AssignVolumeResponse) GetPublicUrl() string {
+ if x != nil {
+ return x.PublicUrl
+ }
+ return ""
+}
+
+func (x *AssignVolumeResponse) GetCount() int32 {
+ if x != nil {
+ return x.Count
+ }
+ return 0
+}
+
+func (x *AssignVolumeResponse) GetAuth() string {
+ if x != nil {
+ return x.Auth
+ }
+ return ""
+}
+
+func (x *AssignVolumeResponse) GetCollection() string {
+ if x != nil {
+ return x.Collection
+ }
+ return ""
+}
+
+func (x *AssignVolumeResponse) GetReplication() string {
+ if x != nil {
+ return x.Replication
+ }
+ return ""
+}
+
+func (x *AssignVolumeResponse) GetError() string {
+ if x != nil {
+ return x.Error
+ }
+ return ""
+}
+
+type LookupVolumeRequest struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ VolumeIds []string `protobuf:"bytes,1,rep,name=volume_ids,json=volumeIds,proto3" json:"volume_ids,omitempty"`
+}
+
+func (x *LookupVolumeRequest) Reset() {
+ *x = LookupVolumeRequest{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_filer_proto_msgTypes[22]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *LookupVolumeRequest) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*LookupVolumeRequest) ProtoMessage() {}
+
+func (x *LookupVolumeRequest) ProtoReflect() protoreflect.Message {
+ mi := &file_filer_proto_msgTypes[22]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use LookupVolumeRequest.ProtoReflect.Descriptor instead.
+func (*LookupVolumeRequest) Descriptor() ([]byte, []int) {
+ return file_filer_proto_rawDescGZIP(), []int{22}
+}
+
+func (x *LookupVolumeRequest) GetVolumeIds() []string {
+ if x != nil {
+ return x.VolumeIds
+ }
+ return nil
+}
+
+type Locations struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ Locations []*Location `protobuf:"bytes,1,rep,name=locations,proto3" json:"locations,omitempty"`
+}
+
+func (x *Locations) Reset() {
+ *x = Locations{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_filer_proto_msgTypes[23]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *Locations) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*Locations) ProtoMessage() {}
+
+func (x *Locations) ProtoReflect() protoreflect.Message {
+ mi := &file_filer_proto_msgTypes[23]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use Locations.ProtoReflect.Descriptor instead.
+func (*Locations) Descriptor() ([]byte, []int) {
+ return file_filer_proto_rawDescGZIP(), []int{23}
+}
+
+func (x *Locations) GetLocations() []*Location {
+ if x != nil {
+ return x.Locations
+ }
+ return nil
+}
+
+type Location struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ Url string `protobuf:"bytes,1,opt,name=url,proto3" json:"url,omitempty"`
+ PublicUrl string `protobuf:"bytes,2,opt,name=public_url,json=publicUrl,proto3" json:"public_url,omitempty"`
+}
+
+func (x *Location) Reset() {
+ *x = Location{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_filer_proto_msgTypes[24]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *Location) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*Location) ProtoMessage() {}
+
+func (x *Location) ProtoReflect() protoreflect.Message {
+ mi := &file_filer_proto_msgTypes[24]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use Location.ProtoReflect.Descriptor instead.
+func (*Location) Descriptor() ([]byte, []int) {
+ return file_filer_proto_rawDescGZIP(), []int{24}
+}
+
+func (x *Location) GetUrl() string {
+ if x != nil {
+ return x.Url
+ }
+ return ""
+}
+
+func (x *Location) GetPublicUrl() string {
+ if x != nil {
+ return x.PublicUrl
+ }
+ return ""
+}
+
+type LookupVolumeResponse struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ LocationsMap map[string]*Locations `protobuf:"bytes,1,rep,name=locations_map,json=locationsMap,proto3" json:"locations_map,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"`
+}
+
+func (x *LookupVolumeResponse) Reset() {
+ *x = LookupVolumeResponse{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_filer_proto_msgTypes[25]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *LookupVolumeResponse) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*LookupVolumeResponse) ProtoMessage() {}
+
+func (x *LookupVolumeResponse) ProtoReflect() protoreflect.Message {
+ mi := &file_filer_proto_msgTypes[25]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use LookupVolumeResponse.ProtoReflect.Descriptor instead.
+func (*LookupVolumeResponse) Descriptor() ([]byte, []int) {
+ return file_filer_proto_rawDescGZIP(), []int{25}
+}
+
+func (x *LookupVolumeResponse) GetLocationsMap() map[string]*Locations {
+ if x != nil {
+ return x.LocationsMap
+ }
+ return nil
+}
+
+type DeleteCollectionRequest struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ Collection string `protobuf:"bytes,1,opt,name=collection,proto3" json:"collection,omitempty"`
+}
+
+func (x *DeleteCollectionRequest) Reset() {
+ *x = DeleteCollectionRequest{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_filer_proto_msgTypes[26]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *DeleteCollectionRequest) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*DeleteCollectionRequest) ProtoMessage() {}
+
+func (x *DeleteCollectionRequest) ProtoReflect() protoreflect.Message {
+ mi := &file_filer_proto_msgTypes[26]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use DeleteCollectionRequest.ProtoReflect.Descriptor instead.
+func (*DeleteCollectionRequest) Descriptor() ([]byte, []int) {
+ return file_filer_proto_rawDescGZIP(), []int{26}
+}
+
+func (x *DeleteCollectionRequest) GetCollection() string {
+ if x != nil {
+ return x.Collection
+ }
+ return ""
+}
+
+type DeleteCollectionResponse struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+}
+
+func (x *DeleteCollectionResponse) Reset() {
+ *x = DeleteCollectionResponse{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_filer_proto_msgTypes[27]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *DeleteCollectionResponse) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*DeleteCollectionResponse) ProtoMessage() {}
+
+func (x *DeleteCollectionResponse) ProtoReflect() protoreflect.Message {
+ mi := &file_filer_proto_msgTypes[27]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use DeleteCollectionResponse.ProtoReflect.Descriptor instead.
+func (*DeleteCollectionResponse) Descriptor() ([]byte, []int) {
+ return file_filer_proto_rawDescGZIP(), []int{27}
+}
+
+type StatisticsRequest struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ Replication string `protobuf:"bytes,1,opt,name=replication,proto3" json:"replication,omitempty"`
+ Collection string `protobuf:"bytes,2,opt,name=collection,proto3" json:"collection,omitempty"`
+ Ttl string `protobuf:"bytes,3,opt,name=ttl,proto3" json:"ttl,omitempty"`
+}
+
+func (x *StatisticsRequest) Reset() {
+ *x = StatisticsRequest{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_filer_proto_msgTypes[28]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *StatisticsRequest) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*StatisticsRequest) ProtoMessage() {}
+
+func (x *StatisticsRequest) ProtoReflect() protoreflect.Message {
+ mi := &file_filer_proto_msgTypes[28]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use StatisticsRequest.ProtoReflect.Descriptor instead.
+func (*StatisticsRequest) Descriptor() ([]byte, []int) {
+ return file_filer_proto_rawDescGZIP(), []int{28}
+}
+
+func (x *StatisticsRequest) GetReplication() string {
+ if x != nil {
+ return x.Replication
+ }
+ return ""
+}
+
+func (x *StatisticsRequest) GetCollection() string {
+ if x != nil {
+ return x.Collection
+ }
+ return ""
+}
+
+func (x *StatisticsRequest) GetTtl() string {
+ if x != nil {
+ return x.Ttl
+ }
+ return ""
+}
+
+type StatisticsResponse struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ Replication string `protobuf:"bytes,1,opt,name=replication,proto3" json:"replication,omitempty"`
+ Collection string `protobuf:"bytes,2,opt,name=collection,proto3" json:"collection,omitempty"`
+ Ttl string `protobuf:"bytes,3,opt,name=ttl,proto3" json:"ttl,omitempty"`
+ TotalSize uint64 `protobuf:"varint,4,opt,name=total_size,json=totalSize,proto3" json:"total_size,omitempty"`
+ UsedSize uint64 `protobuf:"varint,5,opt,name=used_size,json=usedSize,proto3" json:"used_size,omitempty"`
+ FileCount uint64 `protobuf:"varint,6,opt,name=file_count,json=fileCount,proto3" json:"file_count,omitempty"`
+}
+
+func (x *StatisticsResponse) Reset() {
+ *x = StatisticsResponse{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_filer_proto_msgTypes[29]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *StatisticsResponse) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*StatisticsResponse) ProtoMessage() {}
+
+func (x *StatisticsResponse) ProtoReflect() protoreflect.Message {
+ mi := &file_filer_proto_msgTypes[29]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use StatisticsResponse.ProtoReflect.Descriptor instead.
+func (*StatisticsResponse) Descriptor() ([]byte, []int) {
+ return file_filer_proto_rawDescGZIP(), []int{29}
+}
+
+func (x *StatisticsResponse) GetReplication() string {
+ if x != nil {
+ return x.Replication
+ }
+ return ""
+}
+
+func (x *StatisticsResponse) GetCollection() string {
+ if x != nil {
+ return x.Collection
+ }
+ return ""
+}
+
+func (x *StatisticsResponse) GetTtl() string {
+ if x != nil {
+ return x.Ttl
+ }
+ return ""
+}
+
+func (x *StatisticsResponse) GetTotalSize() uint64 {
+ if x != nil {
+ return x.TotalSize
+ }
+ return 0
+}
+
+func (x *StatisticsResponse) GetUsedSize() uint64 {
+ if x != nil {
+ return x.UsedSize
+ }
+ return 0
+}
+
+func (x *StatisticsResponse) GetFileCount() uint64 {
+ if x != nil {
+ return x.FileCount
+ }
+ return 0
+}
+
+type GetFilerConfigurationRequest struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+}
+
+func (x *GetFilerConfigurationRequest) Reset() {
+ *x = GetFilerConfigurationRequest{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_filer_proto_msgTypes[30]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *GetFilerConfigurationRequest) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*GetFilerConfigurationRequest) ProtoMessage() {}
+
+func (x *GetFilerConfigurationRequest) ProtoReflect() protoreflect.Message {
+ mi := &file_filer_proto_msgTypes[30]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use GetFilerConfigurationRequest.ProtoReflect.Descriptor instead.
+func (*GetFilerConfigurationRequest) Descriptor() ([]byte, []int) {
+ return file_filer_proto_rawDescGZIP(), []int{30}
+}
+
+type GetFilerConfigurationResponse struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ Masters []string `protobuf:"bytes,1,rep,name=masters,proto3" json:"masters,omitempty"`
+ Replication string `protobuf:"bytes,2,opt,name=replication,proto3" json:"replication,omitempty"`
+ Collection string `protobuf:"bytes,3,opt,name=collection,proto3" json:"collection,omitempty"`
+ MaxMb uint32 `protobuf:"varint,4,opt,name=max_mb,json=maxMb,proto3" json:"max_mb,omitempty"`
+ DirBuckets string `protobuf:"bytes,5,opt,name=dir_buckets,json=dirBuckets,proto3" json:"dir_buckets,omitempty"`
+ Cipher bool `protobuf:"varint,7,opt,name=cipher,proto3" json:"cipher,omitempty"`
+}
+
+func (x *GetFilerConfigurationResponse) Reset() {
+ *x = GetFilerConfigurationResponse{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_filer_proto_msgTypes[31]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *GetFilerConfigurationResponse) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*GetFilerConfigurationResponse) ProtoMessage() {}
+
+func (x *GetFilerConfigurationResponse) ProtoReflect() protoreflect.Message {
+ mi := &file_filer_proto_msgTypes[31]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use GetFilerConfigurationResponse.ProtoReflect.Descriptor instead.
+func (*GetFilerConfigurationResponse) Descriptor() ([]byte, []int) {
+ return file_filer_proto_rawDescGZIP(), []int{31}
+}
+
+func (x *GetFilerConfigurationResponse) GetMasters() []string {
+ if x != nil {
+ return x.Masters
+ }
+ return nil
+}
+
+func (x *GetFilerConfigurationResponse) GetReplication() string {
+ if x != nil {
+ return x.Replication
+ }
+ return ""
+}
+
+func (x *GetFilerConfigurationResponse) GetCollection() string {
+ if x != nil {
+ return x.Collection
+ }
+ return ""
+}
+
+func (x *GetFilerConfigurationResponse) GetMaxMb() uint32 {
+ if x != nil {
+ return x.MaxMb
+ }
+ return 0
+}
+
+func (x *GetFilerConfigurationResponse) GetDirBuckets() string {
+ if x != nil {
+ return x.DirBuckets
+ }
+ return ""
+}
+
+func (x *GetFilerConfigurationResponse) GetCipher() bool {
+ if x != nil {
+ return x.Cipher
+ }
+ return false
+}
+
+type SubscribeMetadataRequest struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ ClientName string `protobuf:"bytes,1,opt,name=client_name,json=clientName,proto3" json:"client_name,omitempty"`
+ PathPrefix string `protobuf:"bytes,2,opt,name=path_prefix,json=pathPrefix,proto3" json:"path_prefix,omitempty"`
+ SinceNs int64 `protobuf:"varint,3,opt,name=since_ns,json=sinceNs,proto3" json:"since_ns,omitempty"`
+}
+
+func (x *SubscribeMetadataRequest) Reset() {
+ *x = SubscribeMetadataRequest{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_filer_proto_msgTypes[32]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *SubscribeMetadataRequest) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*SubscribeMetadataRequest) ProtoMessage() {}
+
+func (x *SubscribeMetadataRequest) ProtoReflect() protoreflect.Message {
+ mi := &file_filer_proto_msgTypes[32]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use SubscribeMetadataRequest.ProtoReflect.Descriptor instead.
+func (*SubscribeMetadataRequest) Descriptor() ([]byte, []int) {
+ return file_filer_proto_rawDescGZIP(), []int{32}
+}
+
+func (x *SubscribeMetadataRequest) GetClientName() string {
+ if x != nil {
+ return x.ClientName
+ }
+ return ""
+}
+
+func (x *SubscribeMetadataRequest) GetPathPrefix() string {
+ if x != nil {
+ return x.PathPrefix
+ }
+ return ""
+}
+
+func (x *SubscribeMetadataRequest) GetSinceNs() int64 {
+ if x != nil {
+ return x.SinceNs
+ }
+ return 0
+}
+
+type SubscribeMetadataResponse struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ Directory string `protobuf:"bytes,1,opt,name=directory,proto3" json:"directory,omitempty"`
+ EventNotification *EventNotification `protobuf:"bytes,2,opt,name=event_notification,json=eventNotification,proto3" json:"event_notification,omitempty"`
+ TsNs int64 `protobuf:"varint,3,opt,name=ts_ns,json=tsNs,proto3" json:"ts_ns,omitempty"`
+}
+
+func (x *SubscribeMetadataResponse) Reset() {
+ *x = SubscribeMetadataResponse{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_filer_proto_msgTypes[33]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *SubscribeMetadataResponse) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*SubscribeMetadataResponse) ProtoMessage() {}
+
+func (x *SubscribeMetadataResponse) ProtoReflect() protoreflect.Message {
+ mi := &file_filer_proto_msgTypes[33]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use SubscribeMetadataResponse.ProtoReflect.Descriptor instead.
+func (*SubscribeMetadataResponse) Descriptor() ([]byte, []int) {
+ return file_filer_proto_rawDescGZIP(), []int{33}
+}
+
+func (x *SubscribeMetadataResponse) GetDirectory() string {
+ if x != nil {
+ return x.Directory
+ }
+ return ""
+}
+
+func (x *SubscribeMetadataResponse) GetEventNotification() *EventNotification {
+ if x != nil {
+ return x.EventNotification
+ }
+ return nil
+}
+
+func (x *SubscribeMetadataResponse) GetTsNs() int64 {
+ if x != nil {
+ return x.TsNs
+ }
+ return 0
+}
+
+type LogEntry struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ TsNs int64 `protobuf:"varint,1,opt,name=ts_ns,json=tsNs,proto3" json:"ts_ns,omitempty"`
+ PartitionKeyHash int32 `protobuf:"varint,2,opt,name=partition_key_hash,json=partitionKeyHash,proto3" json:"partition_key_hash,omitempty"`
+ Data []byte `protobuf:"bytes,3,opt,name=data,proto3" json:"data,omitempty"`
+}
+
+func (x *LogEntry) Reset() {
+ *x = LogEntry{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_filer_proto_msgTypes[34]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *LogEntry) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*LogEntry) ProtoMessage() {}
+
+func (x *LogEntry) ProtoReflect() protoreflect.Message {
+ mi := &file_filer_proto_msgTypes[34]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use LogEntry.ProtoReflect.Descriptor instead.
+func (*LogEntry) Descriptor() ([]byte, []int) {
+ return file_filer_proto_rawDescGZIP(), []int{34}
+}
+
+func (x *LogEntry) GetTsNs() int64 {
+ if x != nil {
+ return x.TsNs
+ }
+ return 0
+}
+
+func (x *LogEntry) GetPartitionKeyHash() int32 {
+ if x != nil {
+ return x.PartitionKeyHash
+ }
+ return 0
+}
+
+func (x *LogEntry) GetData() []byte {
+ if x != nil {
+ return x.Data
+ }
+ return nil
+}
+
+type KeepConnectedRequest struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
+ GrpcPort uint32 `protobuf:"varint,2,opt,name=grpc_port,json=grpcPort,proto3" json:"grpc_port,omitempty"`
+ Resources []string `protobuf:"bytes,3,rep,name=resources,proto3" json:"resources,omitempty"`
+}
+
+func (x *KeepConnectedRequest) Reset() {
+ *x = KeepConnectedRequest{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_filer_proto_msgTypes[35]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *KeepConnectedRequest) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*KeepConnectedRequest) ProtoMessage() {}
+
+func (x *KeepConnectedRequest) ProtoReflect() protoreflect.Message {
+ mi := &file_filer_proto_msgTypes[35]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use KeepConnectedRequest.ProtoReflect.Descriptor instead.
+func (*KeepConnectedRequest) Descriptor() ([]byte, []int) {
+ return file_filer_proto_rawDescGZIP(), []int{35}
+}
+
+func (x *KeepConnectedRequest) GetName() string {
+ if x != nil {
+ return x.Name
+ }
+ return ""
+}
+
+func (x *KeepConnectedRequest) GetGrpcPort() uint32 {
+ if x != nil {
+ return x.GrpcPort
+ }
+ return 0
+}
+
+func (x *KeepConnectedRequest) GetResources() []string {
+ if x != nil {
+ return x.Resources
+ }
+ return nil
+}
+
+type KeepConnectedResponse struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+}
+
+func (x *KeepConnectedResponse) Reset() {
+ *x = KeepConnectedResponse{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_filer_proto_msgTypes[36]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *KeepConnectedResponse) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*KeepConnectedResponse) ProtoMessage() {}
+
+func (x *KeepConnectedResponse) ProtoReflect() protoreflect.Message {
+ mi := &file_filer_proto_msgTypes[36]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use KeepConnectedResponse.ProtoReflect.Descriptor instead.
+func (*KeepConnectedResponse) Descriptor() ([]byte, []int) {
+ return file_filer_proto_rawDescGZIP(), []int{36}
+}
+
+type LocateBrokerRequest struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ Resource string `protobuf:"bytes,1,opt,name=resource,proto3" json:"resource,omitempty"`
+}
+
+func (x *LocateBrokerRequest) Reset() {
+ *x = LocateBrokerRequest{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_filer_proto_msgTypes[37]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *LocateBrokerRequest) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*LocateBrokerRequest) ProtoMessage() {}
+
+func (x *LocateBrokerRequest) ProtoReflect() protoreflect.Message {
+ mi := &file_filer_proto_msgTypes[37]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use LocateBrokerRequest.ProtoReflect.Descriptor instead.
+func (*LocateBrokerRequest) Descriptor() ([]byte, []int) {
+ return file_filer_proto_rawDescGZIP(), []int{37}
+}
+
+func (x *LocateBrokerRequest) GetResource() string {
+ if x != nil {
+ return x.Resource
+ }
+ return ""
+}
+
+type LocateBrokerResponse struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ Found bool `protobuf:"varint,1,opt,name=found,proto3" json:"found,omitempty"`
+ Resources []*LocateBrokerResponse_Resource `protobuf:"bytes,2,rep,name=resources,proto3" json:"resources,omitempty"`
+}
-func (m *LookupVolumeResponse) GetLocationsMap() map[string]*Locations {
- if m != nil {
- return m.LocationsMap
+func (x *LocateBrokerResponse) Reset() {
+ *x = LocateBrokerResponse{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_filer_proto_msgTypes[38]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
- return nil
}
-type DeleteCollectionRequest struct {
- Collection string `protobuf:"bytes,1,opt,name=collection" json:"collection,omitempty"`
+func (x *LocateBrokerResponse) String() string {
+ return protoimpl.X.MessageStringOf(x)
}
-func (m *DeleteCollectionRequest) Reset() { *m = DeleteCollectionRequest{} }
-func (m *DeleteCollectionRequest) String() string { return proto.CompactTextString(m) }
-func (*DeleteCollectionRequest) ProtoMessage() {}
-func (*DeleteCollectionRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{20} }
+func (*LocateBrokerResponse) ProtoMessage() {}
-func (m *DeleteCollectionRequest) GetCollection() string {
- if m != nil {
- return m.Collection
+func (x *LocateBrokerResponse) ProtoReflect() protoreflect.Message {
+ mi := &file_filer_proto_msgTypes[38]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
}
- return ""
+ return mi.MessageOf(x)
}
-type DeleteCollectionResponse struct {
+// Deprecated: Use LocateBrokerResponse.ProtoReflect.Descriptor instead.
+func (*LocateBrokerResponse) Descriptor() ([]byte, []int) {
+ return file_filer_proto_rawDescGZIP(), []int{38}
}
-func (m *DeleteCollectionResponse) Reset() { *m = DeleteCollectionResponse{} }
-func (m *DeleteCollectionResponse) String() string { return proto.CompactTextString(m) }
-func (*DeleteCollectionResponse) ProtoMessage() {}
-func (*DeleteCollectionResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{21} }
-
-type StatisticsRequest struct {
- Replication string `protobuf:"bytes,1,opt,name=replication" json:"replication,omitempty"`
- Collection string `protobuf:"bytes,2,opt,name=collection" json:"collection,omitempty"`
- Ttl string `protobuf:"bytes,3,opt,name=ttl" json:"ttl,omitempty"`
+func (x *LocateBrokerResponse) GetFound() bool {
+ if x != nil {
+ return x.Found
+ }
+ return false
}
-func (m *StatisticsRequest) Reset() { *m = StatisticsRequest{} }
-func (m *StatisticsRequest) String() string { return proto.CompactTextString(m) }
-func (*StatisticsRequest) ProtoMessage() {}
-func (*StatisticsRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{22} }
-
-func (m *StatisticsRequest) GetReplication() string {
- if m != nil {
- return m.Replication
+func (x *LocateBrokerResponse) GetResources() []*LocateBrokerResponse_Resource {
+ if x != nil {
+ return x.Resources
}
- return ""
+ return nil
}
-func (m *StatisticsRequest) GetCollection() string {
- if m != nil {
- return m.Collection
- }
- return ""
+// if found, send the exact address
+// if not found, send the full list of existing brokers
+type LocateBrokerResponse_Resource struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ GrpcAddresses string `protobuf:"bytes,1,opt,name=grpc_addresses,json=grpcAddresses,proto3" json:"grpc_addresses,omitempty"`
+ ResourceCount int32 `protobuf:"varint,2,opt,name=resource_count,json=resourceCount,proto3" json:"resource_count,omitempty"`
}
-func (m *StatisticsRequest) GetTtl() string {
- if m != nil {
- return m.Ttl
+func (x *LocateBrokerResponse_Resource) Reset() {
+ *x = LocateBrokerResponse_Resource{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_filer_proto_msgTypes[41]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
- return ""
}
-type StatisticsResponse struct {
- Replication string `protobuf:"bytes,1,opt,name=replication" json:"replication,omitempty"`
- Collection string `protobuf:"bytes,2,opt,name=collection" json:"collection,omitempty"`
- Ttl string `protobuf:"bytes,3,opt,name=ttl" json:"ttl,omitempty"`
- TotalSize uint64 `protobuf:"varint,4,opt,name=total_size,json=totalSize" json:"total_size,omitempty"`
- UsedSize uint64 `protobuf:"varint,5,opt,name=used_size,json=usedSize" json:"used_size,omitempty"`
- FileCount uint64 `protobuf:"varint,6,opt,name=file_count,json=fileCount" json:"file_count,omitempty"`
+func (x *LocateBrokerResponse_Resource) String() string {
+ return protoimpl.X.MessageStringOf(x)
}
-func (m *StatisticsResponse) Reset() { *m = StatisticsResponse{} }
-func (m *StatisticsResponse) String() string { return proto.CompactTextString(m) }
-func (*StatisticsResponse) ProtoMessage() {}
-func (*StatisticsResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{23} }
+func (*LocateBrokerResponse_Resource) ProtoMessage() {}
-func (m *StatisticsResponse) GetReplication() string {
- if m != nil {
- return m.Replication
+func (x *LocateBrokerResponse_Resource) ProtoReflect() protoreflect.Message {
+ mi := &file_filer_proto_msgTypes[41]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
}
- return ""
+ return mi.MessageOf(x)
}
-func (m *StatisticsResponse) GetCollection() string {
- if m != nil {
- return m.Collection
- }
- return ""
+// Deprecated: Use LocateBrokerResponse_Resource.ProtoReflect.Descriptor instead.
+func (*LocateBrokerResponse_Resource) Descriptor() ([]byte, []int) {
+ return file_filer_proto_rawDescGZIP(), []int{38, 0}
}
-func (m *StatisticsResponse) GetTtl() string {
- if m != nil {
- return m.Ttl
+func (x *LocateBrokerResponse_Resource) GetGrpcAddresses() string {
+ if x != nil {
+ return x.GrpcAddresses
}
return ""
}
-func (m *StatisticsResponse) GetTotalSize() uint64 {
- if m != nil {
- return m.TotalSize
+func (x *LocateBrokerResponse_Resource) GetResourceCount() int32 {
+ if x != nil {
+ return x.ResourceCount
}
return 0
}
-func (m *StatisticsResponse) GetUsedSize() uint64 {
- if m != nil {
- return m.UsedSize
- }
- return 0
+var File_filer_proto protoreflect.FileDescriptor
+
+var file_filer_proto_rawDesc = []byte{
+ 0x0a, 0x0b, 0x66, 0x69, 0x6c, 0x65, 0x72, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x08, 0x66,
+ 0x69, 0x6c, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x22, 0x4f, 0x0a, 0x1b, 0x4c, 0x6f, 0x6f, 0x6b, 0x75,
+ 0x70, 0x44, 0x69, 0x72, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x79, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52,
+ 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1c, 0x0a, 0x09, 0x64, 0x69, 0x72, 0x65, 0x63, 0x74,
+ 0x6f, 0x72, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x64, 0x69, 0x72, 0x65, 0x63,
+ 0x74, 0x6f, 0x72, 0x79, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01,
+ 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x22, 0x45, 0x0a, 0x1c, 0x4c, 0x6f, 0x6f, 0x6b,
+ 0x75, 0x70, 0x44, 0x69, 0x72, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x79, 0x45, 0x6e, 0x74, 0x72, 0x79,
+ 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x25, 0x0a, 0x05, 0x65, 0x6e, 0x74, 0x72,
+ 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0f, 0x2e, 0x66, 0x69, 0x6c, 0x65, 0x72, 0x5f,
+ 0x70, 0x62, 0x2e, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x05, 0x65, 0x6e, 0x74, 0x72, 0x79, 0x22,
+ 0xbe, 0x01, 0x0a, 0x12, 0x4c, 0x69, 0x73, 0x74, 0x45, 0x6e, 0x74, 0x72, 0x69, 0x65, 0x73, 0x52,
+ 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1c, 0x0a, 0x09, 0x64, 0x69, 0x72, 0x65, 0x63, 0x74,
+ 0x6f, 0x72, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x64, 0x69, 0x72, 0x65, 0x63,
+ 0x74, 0x6f, 0x72, 0x79, 0x12, 0x16, 0x0a, 0x06, 0x70, 0x72, 0x65, 0x66, 0x69, 0x78, 0x18, 0x02,
+ 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x70, 0x72, 0x65, 0x66, 0x69, 0x78, 0x12, 0x2c, 0x0a, 0x11,
+ 0x73, 0x74, 0x61, 0x72, 0x74, 0x46, 0x72, 0x6f, 0x6d, 0x46, 0x69, 0x6c, 0x65, 0x4e, 0x61, 0x6d,
+ 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x11, 0x73, 0x74, 0x61, 0x72, 0x74, 0x46, 0x72,
+ 0x6f, 0x6d, 0x46, 0x69, 0x6c, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x2e, 0x0a, 0x12, 0x69, 0x6e,
+ 0x63, 0x6c, 0x75, 0x73, 0x69, 0x76, 0x65, 0x53, 0x74, 0x61, 0x72, 0x74, 0x46, 0x72, 0x6f, 0x6d,
+ 0x18, 0x04, 0x20, 0x01, 0x28, 0x08, 0x52, 0x12, 0x69, 0x6e, 0x63, 0x6c, 0x75, 0x73, 0x69, 0x76,
+ 0x65, 0x53, 0x74, 0x61, 0x72, 0x74, 0x46, 0x72, 0x6f, 0x6d, 0x12, 0x14, 0x0a, 0x05, 0x6c, 0x69,
+ 0x6d, 0x69, 0x74, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x05, 0x6c, 0x69, 0x6d, 0x69, 0x74,
+ 0x22, 0x3c, 0x0a, 0x13, 0x4c, 0x69, 0x73, 0x74, 0x45, 0x6e, 0x74, 0x72, 0x69, 0x65, 0x73, 0x52,
+ 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x25, 0x0a, 0x05, 0x65, 0x6e, 0x74, 0x72, 0x79,
+ 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0f, 0x2e, 0x66, 0x69, 0x6c, 0x65, 0x72, 0x5f, 0x70,
+ 0x62, 0x2e, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x05, 0x65, 0x6e, 0x74, 0x72, 0x79, 0x22, 0x9d,
+ 0x02, 0x0a, 0x05, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65,
+ 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x21, 0x0a, 0x0c,
+ 0x69, 0x73, 0x5f, 0x64, 0x69, 0x72, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x79, 0x18, 0x02, 0x20, 0x01,
+ 0x28, 0x08, 0x52, 0x0b, 0x69, 0x73, 0x44, 0x69, 0x72, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x79, 0x12,
+ 0x2b, 0x0a, 0x06, 0x63, 0x68, 0x75, 0x6e, 0x6b, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32,
+ 0x13, 0x2e, 0x66, 0x69, 0x6c, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x46, 0x69, 0x6c, 0x65, 0x43,
+ 0x68, 0x75, 0x6e, 0x6b, 0x52, 0x06, 0x63, 0x68, 0x75, 0x6e, 0x6b, 0x73, 0x12, 0x38, 0x0a, 0x0a,
+ 0x61, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b,
+ 0x32, 0x18, 0x2e, 0x66, 0x69, 0x6c, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x46, 0x75, 0x73, 0x65,
+ 0x41, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x73, 0x52, 0x0a, 0x61, 0x74, 0x74, 0x72,
+ 0x69, 0x62, 0x75, 0x74, 0x65, 0x73, 0x12, 0x39, 0x0a, 0x08, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x64,
+ 0x65, 0x64, 0x18, 0x05, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x66, 0x69, 0x6c, 0x65, 0x72,
+ 0x5f, 0x70, 0x62, 0x2e, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x2e, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x64,
+ 0x65, 0x64, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x08, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x64, 0x65,
+ 0x64, 0x1a, 0x3b, 0x0a, 0x0d, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x64, 0x65, 0x64, 0x45, 0x6e, 0x74,
+ 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52,
+ 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20,
+ 0x01, 0x28, 0x0c, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0x44,
+ 0x0a, 0x09, 0x46, 0x75, 0x6c, 0x6c, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x64,
+ 0x69, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x64, 0x69, 0x72, 0x12, 0x25, 0x0a,
+ 0x05, 0x65, 0x6e, 0x74, 0x72, 0x79, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0f, 0x2e, 0x66,
+ 0x69, 0x6c, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x05, 0x65,
+ 0x6e, 0x74, 0x72, 0x79, 0x22, 0xef, 0x01, 0x0a, 0x11, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x4e, 0x6f,
+ 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x2c, 0x0a, 0x09, 0x6f, 0x6c,
+ 0x64, 0x5f, 0x65, 0x6e, 0x74, 0x72, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0f, 0x2e,
+ 0x66, 0x69, 0x6c, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x08,
+ 0x6f, 0x6c, 0x64, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x2c, 0x0a, 0x09, 0x6e, 0x65, 0x77, 0x5f,
+ 0x65, 0x6e, 0x74, 0x72, 0x79, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0f, 0x2e, 0x66, 0x69,
+ 0x6c, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x08, 0x6e, 0x65,
+ 0x77, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x23, 0x0a, 0x0d, 0x64, 0x65, 0x6c, 0x65, 0x74, 0x65,
+ 0x5f, 0x63, 0x68, 0x75, 0x6e, 0x6b, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0c, 0x64,
+ 0x65, 0x6c, 0x65, 0x74, 0x65, 0x43, 0x68, 0x75, 0x6e, 0x6b, 0x73, 0x12, 0x26, 0x0a, 0x0f, 0x6e,
+ 0x65, 0x77, 0x5f, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x5f, 0x70, 0x61, 0x74, 0x68, 0x18, 0x04,
+ 0x20, 0x01, 0x28, 0x09, 0x52, 0x0d, 0x6e, 0x65, 0x77, 0x50, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x50,
+ 0x61, 0x74, 0x68, 0x12, 0x31, 0x0a, 0x15, 0x69, 0x73, 0x5f, 0x66, 0x72, 0x6f, 0x6d, 0x5f, 0x6f,
+ 0x74, 0x68, 0x65, 0x72, 0x5f, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x18, 0x05, 0x20, 0x01,
+ 0x28, 0x08, 0x52, 0x12, 0x69, 0x73, 0x46, 0x72, 0x6f, 0x6d, 0x4f, 0x74, 0x68, 0x65, 0x72, 0x43,
+ 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x22, 0xba, 0x02, 0x0a, 0x09, 0x46, 0x69, 0x6c, 0x65, 0x43,
+ 0x68, 0x75, 0x6e, 0x6b, 0x12, 0x17, 0x0a, 0x07, 0x66, 0x69, 0x6c, 0x65, 0x5f, 0x69, 0x64, 0x18,
+ 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x66, 0x69, 0x6c, 0x65, 0x49, 0x64, 0x12, 0x16, 0x0a,
+ 0x06, 0x6f, 0x66, 0x66, 0x73, 0x65, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x03, 0x52, 0x06, 0x6f,
+ 0x66, 0x66, 0x73, 0x65, 0x74, 0x12, 0x12, 0x0a, 0x04, 0x73, 0x69, 0x7a, 0x65, 0x18, 0x03, 0x20,
+ 0x01, 0x28, 0x04, 0x52, 0x04, 0x73, 0x69, 0x7a, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x6d, 0x74, 0x69,
+ 0x6d, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x03, 0x52, 0x05, 0x6d, 0x74, 0x69, 0x6d, 0x65, 0x12,
+ 0x13, 0x0a, 0x05, 0x65, 0x5f, 0x74, 0x61, 0x67, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04,
+ 0x65, 0x54, 0x61, 0x67, 0x12, 0x24, 0x0a, 0x0e, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x66,
+ 0x69, 0x6c, 0x65, 0x5f, 0x69, 0x64, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x73, 0x6f,
+ 0x75, 0x72, 0x63, 0x65, 0x46, 0x69, 0x6c, 0x65, 0x49, 0x64, 0x12, 0x22, 0x0a, 0x03, 0x66, 0x69,
+ 0x64, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x10, 0x2e, 0x66, 0x69, 0x6c, 0x65, 0x72, 0x5f,
+ 0x70, 0x62, 0x2e, 0x46, 0x69, 0x6c, 0x65, 0x49, 0x64, 0x52, 0x03, 0x66, 0x69, 0x64, 0x12, 0x2f,
+ 0x0a, 0x0a, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x66, 0x69, 0x64, 0x18, 0x08, 0x20, 0x01,
+ 0x28, 0x0b, 0x32, 0x10, 0x2e, 0x66, 0x69, 0x6c, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x46, 0x69,
+ 0x6c, 0x65, 0x49, 0x64, 0x52, 0x09, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x46, 0x69, 0x64, 0x12,
+ 0x1d, 0x0a, 0x0a, 0x63, 0x69, 0x70, 0x68, 0x65, 0x72, 0x5f, 0x6b, 0x65, 0x79, 0x18, 0x09, 0x20,
+ 0x01, 0x28, 0x0c, 0x52, 0x09, 0x63, 0x69, 0x70, 0x68, 0x65, 0x72, 0x4b, 0x65, 0x79, 0x12, 0x23,
+ 0x0a, 0x0d, 0x69, 0x73, 0x5f, 0x63, 0x6f, 0x6d, 0x70, 0x72, 0x65, 0x73, 0x73, 0x65, 0x64, 0x18,
+ 0x0a, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0c, 0x69, 0x73, 0x43, 0x6f, 0x6d, 0x70, 0x72, 0x65, 0x73,
+ 0x73, 0x65, 0x64, 0x22, 0x58, 0x0a, 0x06, 0x46, 0x69, 0x6c, 0x65, 0x49, 0x64, 0x12, 0x1b, 0x0a,
+ 0x09, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0d,
+ 0x52, 0x08, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x49, 0x64, 0x12, 0x19, 0x0a, 0x08, 0x66, 0x69,
+ 0x6c, 0x65, 0x5f, 0x6b, 0x65, 0x79, 0x18, 0x02, 0x20, 0x01, 0x28, 0x04, 0x52, 0x07, 0x66, 0x69,
+ 0x6c, 0x65, 0x4b, 0x65, 0x79, 0x12, 0x16, 0x0a, 0x06, 0x63, 0x6f, 0x6f, 0x6b, 0x69, 0x65, 0x18,
+ 0x03, 0x20, 0x01, 0x28, 0x07, 0x52, 0x06, 0x63, 0x6f, 0x6f, 0x6b, 0x69, 0x65, 0x22, 0x80, 0x03,
+ 0x0a, 0x0e, 0x46, 0x75, 0x73, 0x65, 0x41, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x73,
+ 0x12, 0x1b, 0x0a, 0x09, 0x66, 0x69, 0x6c, 0x65, 0x5f, 0x73, 0x69, 0x7a, 0x65, 0x18, 0x01, 0x20,
+ 0x01, 0x28, 0x04, 0x52, 0x08, 0x66, 0x69, 0x6c, 0x65, 0x53, 0x69, 0x7a, 0x65, 0x12, 0x14, 0x0a,
+ 0x05, 0x6d, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x03, 0x52, 0x05, 0x6d, 0x74,
+ 0x69, 0x6d, 0x65, 0x12, 0x1b, 0x0a, 0x09, 0x66, 0x69, 0x6c, 0x65, 0x5f, 0x6d, 0x6f, 0x64, 0x65,
+ 0x18, 0x03, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x08, 0x66, 0x69, 0x6c, 0x65, 0x4d, 0x6f, 0x64, 0x65,
+ 0x12, 0x10, 0x0a, 0x03, 0x75, 0x69, 0x64, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x03, 0x75,
+ 0x69, 0x64, 0x12, 0x10, 0x0a, 0x03, 0x67, 0x69, 0x64, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0d, 0x52,
+ 0x03, 0x67, 0x69, 0x64, 0x12, 0x16, 0x0a, 0x06, 0x63, 0x72, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x06,
+ 0x20, 0x01, 0x28, 0x03, 0x52, 0x06, 0x63, 0x72, 0x74, 0x69, 0x6d, 0x65, 0x12, 0x12, 0x0a, 0x04,
+ 0x6d, 0x69, 0x6d, 0x65, 0x18, 0x07, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6d, 0x69, 0x6d, 0x65,
+ 0x12, 0x20, 0x0a, 0x0b, 0x72, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18,
+ 0x08, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x72, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69,
+ 0x6f, 0x6e, 0x12, 0x1e, 0x0a, 0x0a, 0x63, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e,
+ 0x18, 0x09, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x63, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69,
+ 0x6f, 0x6e, 0x12, 0x17, 0x0a, 0x07, 0x74, 0x74, 0x6c, 0x5f, 0x73, 0x65, 0x63, 0x18, 0x0a, 0x20,
+ 0x01, 0x28, 0x05, 0x52, 0x06, 0x74, 0x74, 0x6c, 0x53, 0x65, 0x63, 0x12, 0x1b, 0x0a, 0x09, 0x75,
+ 0x73, 0x65, 0x72, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08,
+ 0x75, 0x73, 0x65, 0x72, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x1d, 0x0a, 0x0a, 0x67, 0x72, 0x6f, 0x75,
+ 0x70, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x0c, 0x20, 0x03, 0x28, 0x09, 0x52, 0x09, 0x67, 0x72,
+ 0x6f, 0x75, 0x70, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x25, 0x0a, 0x0e, 0x73, 0x79, 0x6d, 0x6c, 0x69,
+ 0x6e, 0x6b, 0x5f, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x18, 0x0d, 0x20, 0x01, 0x28, 0x09, 0x52,
+ 0x0d, 0x73, 0x79, 0x6d, 0x6c, 0x69, 0x6e, 0x6b, 0x54, 0x61, 0x72, 0x67, 0x65, 0x74, 0x12, 0x10,
+ 0x0a, 0x03, 0x6d, 0x64, 0x35, 0x18, 0x0e, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x03, 0x6d, 0x64, 0x35,
+ 0x22, 0xa3, 0x01, 0x0a, 0x12, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x45, 0x6e, 0x74, 0x72, 0x79,
+ 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1c, 0x0a, 0x09, 0x64, 0x69, 0x72, 0x65, 0x63,
+ 0x74, 0x6f, 0x72, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x64, 0x69, 0x72, 0x65,
+ 0x63, 0x74, 0x6f, 0x72, 0x79, 0x12, 0x25, 0x0a, 0x05, 0x65, 0x6e, 0x74, 0x72, 0x79, 0x18, 0x02,
+ 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0f, 0x2e, 0x66, 0x69, 0x6c, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e,
+ 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x05, 0x65, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x15, 0x0a, 0x06,
+ 0x6f, 0x5f, 0x65, 0x78, 0x63, 0x6c, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x52, 0x05, 0x6f, 0x45,
+ 0x78, 0x63, 0x6c, 0x12, 0x31, 0x0a, 0x15, 0x69, 0x73, 0x5f, 0x66, 0x72, 0x6f, 0x6d, 0x5f, 0x6f,
+ 0x74, 0x68, 0x65, 0x72, 0x5f, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x18, 0x04, 0x20, 0x01,
+ 0x28, 0x08, 0x52, 0x12, 0x69, 0x73, 0x46, 0x72, 0x6f, 0x6d, 0x4f, 0x74, 0x68, 0x65, 0x72, 0x43,
+ 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x22, 0x2b, 0x0a, 0x13, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65,
+ 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x14, 0x0a,
+ 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x65, 0x72,
+ 0x72, 0x6f, 0x72, 0x22, 0x8c, 0x01, 0x0a, 0x12, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x45, 0x6e,
+ 0x74, 0x72, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1c, 0x0a, 0x09, 0x64, 0x69,
+ 0x72, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x64,
+ 0x69, 0x72, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x79, 0x12, 0x25, 0x0a, 0x05, 0x65, 0x6e, 0x74, 0x72,
+ 0x79, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0f, 0x2e, 0x66, 0x69, 0x6c, 0x65, 0x72, 0x5f,
+ 0x70, 0x62, 0x2e, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x05, 0x65, 0x6e, 0x74, 0x72, 0x79, 0x12,
+ 0x31, 0x0a, 0x15, 0x69, 0x73, 0x5f, 0x66, 0x72, 0x6f, 0x6d, 0x5f, 0x6f, 0x74, 0x68, 0x65, 0x72,
+ 0x5f, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x52, 0x12,
+ 0x69, 0x73, 0x46, 0x72, 0x6f, 0x6d, 0x4f, 0x74, 0x68, 0x65, 0x72, 0x43, 0x6c, 0x75, 0x73, 0x74,
+ 0x65, 0x72, 0x22, 0x15, 0x0a, 0x13, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x45, 0x6e, 0x74, 0x72,
+ 0x79, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x80, 0x01, 0x0a, 0x14, 0x41, 0x70,
+ 0x70, 0x65, 0x6e, 0x64, 0x54, 0x6f, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65,
+ 0x73, 0x74, 0x12, 0x1c, 0x0a, 0x09, 0x64, 0x69, 0x72, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x79, 0x18,
+ 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x64, 0x69, 0x72, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x79,
+ 0x12, 0x1d, 0x0a, 0x0a, 0x65, 0x6e, 0x74, 0x72, 0x79, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x02,
+ 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x65, 0x6e, 0x74, 0x72, 0x79, 0x4e, 0x61, 0x6d, 0x65, 0x12,
+ 0x2b, 0x0a, 0x06, 0x63, 0x68, 0x75, 0x6e, 0x6b, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32,
+ 0x13, 0x2e, 0x66, 0x69, 0x6c, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x46, 0x69, 0x6c, 0x65, 0x43,
+ 0x68, 0x75, 0x6e, 0x6b, 0x52, 0x06, 0x63, 0x68, 0x75, 0x6e, 0x6b, 0x73, 0x22, 0x17, 0x0a, 0x15,
+ 0x41, 0x70, 0x70, 0x65, 0x6e, 0x64, 0x54, 0x6f, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x65, 0x73,
+ 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0xf8, 0x01, 0x0a, 0x12, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65,
+ 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1c, 0x0a, 0x09,
+ 0x64, 0x69, 0x72, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52,
+ 0x09, 0x64, 0x69, 0x72, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x79, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61,
+ 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x24,
+ 0x0a, 0x0e, 0x69, 0x73, 0x5f, 0x64, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x5f, 0x64, 0x61, 0x74, 0x61,
+ 0x18, 0x04, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0c, 0x69, 0x73, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65,
+ 0x44, 0x61, 0x74, 0x61, 0x12, 0x21, 0x0a, 0x0c, 0x69, 0x73, 0x5f, 0x72, 0x65, 0x63, 0x75, 0x72,
+ 0x73, 0x69, 0x76, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0b, 0x69, 0x73, 0x52, 0x65,
+ 0x63, 0x75, 0x72, 0x73, 0x69, 0x76, 0x65, 0x12, 0x34, 0x0a, 0x16, 0x69, 0x67, 0x6e, 0x6f, 0x72,
+ 0x65, 0x5f, 0x72, 0x65, 0x63, 0x75, 0x72, 0x73, 0x69, 0x76, 0x65, 0x5f, 0x65, 0x72, 0x72, 0x6f,
+ 0x72, 0x18, 0x06, 0x20, 0x01, 0x28, 0x08, 0x52, 0x14, 0x69, 0x67, 0x6e, 0x6f, 0x72, 0x65, 0x52,
+ 0x65, 0x63, 0x75, 0x72, 0x73, 0x69, 0x76, 0x65, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x12, 0x31, 0x0a,
+ 0x15, 0x69, 0x73, 0x5f, 0x66, 0x72, 0x6f, 0x6d, 0x5f, 0x6f, 0x74, 0x68, 0x65, 0x72, 0x5f, 0x63,
+ 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x18, 0x07, 0x20, 0x01, 0x28, 0x08, 0x52, 0x12, 0x69, 0x73,
+ 0x46, 0x72, 0x6f, 0x6d, 0x4f, 0x74, 0x68, 0x65, 0x72, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72,
+ 0x22, 0x2b, 0x0a, 0x13, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52,
+ 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72,
+ 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x22, 0x9a, 0x01,
+ 0x0a, 0x18, 0x41, 0x74, 0x6f, 0x6d, 0x69, 0x63, 0x52, 0x65, 0x6e, 0x61, 0x6d, 0x65, 0x45, 0x6e,
+ 0x74, 0x72, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x23, 0x0a, 0x0d, 0x6f, 0x6c,
+ 0x64, 0x5f, 0x64, 0x69, 0x72, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28,
+ 0x09, 0x52, 0x0c, 0x6f, 0x6c, 0x64, 0x44, 0x69, 0x72, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x79, 0x12,
+ 0x19, 0x0a, 0x08, 0x6f, 0x6c, 0x64, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28,
+ 0x09, 0x52, 0x07, 0x6f, 0x6c, 0x64, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x23, 0x0a, 0x0d, 0x6e, 0x65,
+ 0x77, 0x5f, 0x64, 0x69, 0x72, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x79, 0x18, 0x03, 0x20, 0x01, 0x28,
+ 0x09, 0x52, 0x0c, 0x6e, 0x65, 0x77, 0x44, 0x69, 0x72, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x79, 0x12,
+ 0x19, 0x0a, 0x08, 0x6e, 0x65, 0x77, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28,
+ 0x09, 0x52, 0x07, 0x6e, 0x65, 0x77, 0x4e, 0x61, 0x6d, 0x65, 0x22, 0x1b, 0x0a, 0x19, 0x41, 0x74,
+ 0x6f, 0x6d, 0x69, 0x63, 0x52, 0x65, 0x6e, 0x61, 0x6d, 0x65, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52,
+ 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0xc8, 0x01, 0x0a, 0x13, 0x41, 0x73, 0x73, 0x69,
+ 0x67, 0x6e, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12,
+ 0x14, 0x0a, 0x05, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x05, 0x52, 0x05,
+ 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x12, 0x1e, 0x0a, 0x0a, 0x63, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74,
+ 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x63, 0x6f, 0x6c, 0x6c, 0x65,
+ 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x20, 0x0a, 0x0b, 0x72, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61,
+ 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x72, 0x65, 0x70, 0x6c,
+ 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x17, 0x0a, 0x07, 0x74, 0x74, 0x6c, 0x5f, 0x73,
+ 0x65, 0x63, 0x18, 0x04, 0x20, 0x01, 0x28, 0x05, 0x52, 0x06, 0x74, 0x74, 0x6c, 0x53, 0x65, 0x63,
+ 0x12, 0x1f, 0x0a, 0x0b, 0x64, 0x61, 0x74, 0x61, 0x5f, 0x63, 0x65, 0x6e, 0x74, 0x65, 0x72, 0x18,
+ 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x64, 0x61, 0x74, 0x61, 0x43, 0x65, 0x6e, 0x74, 0x65,
+ 0x72, 0x12, 0x1f, 0x0a, 0x0b, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x5f, 0x70, 0x61, 0x74, 0x68,
+ 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x50, 0x61,
+ 0x74, 0x68, 0x22, 0xe2, 0x01, 0x0a, 0x14, 0x41, 0x73, 0x73, 0x69, 0x67, 0x6e, 0x56, 0x6f, 0x6c,
+ 0x75, 0x6d, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x17, 0x0a, 0x07, 0x66,
+ 0x69, 0x6c, 0x65, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x66, 0x69,
+ 0x6c, 0x65, 0x49, 0x64, 0x12, 0x10, 0x0a, 0x03, 0x75, 0x72, 0x6c, 0x18, 0x02, 0x20, 0x01, 0x28,
+ 0x09, 0x52, 0x03, 0x75, 0x72, 0x6c, 0x12, 0x1d, 0x0a, 0x0a, 0x70, 0x75, 0x62, 0x6c, 0x69, 0x63,
+ 0x5f, 0x75, 0x72, 0x6c, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x70, 0x75, 0x62, 0x6c,
+ 0x69, 0x63, 0x55, 0x72, 0x6c, 0x12, 0x14, 0x0a, 0x05, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x04,
+ 0x20, 0x01, 0x28, 0x05, 0x52, 0x05, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x12, 0x12, 0x0a, 0x04, 0x61,
+ 0x75, 0x74, 0x68, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x61, 0x75, 0x74, 0x68, 0x12,
+ 0x1e, 0x0a, 0x0a, 0x63, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x06, 0x20,
+ 0x01, 0x28, 0x09, 0x52, 0x0a, 0x63, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12,
+ 0x20, 0x0a, 0x0b, 0x72, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x07,
+ 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x72, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f,
+ 0x6e, 0x12, 0x14, 0x0a, 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x18, 0x08, 0x20, 0x01, 0x28, 0x09,
+ 0x52, 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x22, 0x34, 0x0a, 0x13, 0x4c, 0x6f, 0x6f, 0x6b, 0x75,
+ 0x70, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1d,
+ 0x0a, 0x0a, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x69, 0x64, 0x73, 0x18, 0x01, 0x20, 0x03,
+ 0x28, 0x09, 0x52, 0x09, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x49, 0x64, 0x73, 0x22, 0x3d, 0x0a,
+ 0x09, 0x4c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x30, 0x0a, 0x09, 0x6c, 0x6f,
+ 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x12, 0x2e,
+ 0x66, 0x69, 0x6c, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x4c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f,
+ 0x6e, 0x52, 0x09, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x22, 0x3b, 0x0a, 0x08,
+ 0x4c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x10, 0x0a, 0x03, 0x75, 0x72, 0x6c, 0x18,
+ 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x75, 0x72, 0x6c, 0x12, 0x1d, 0x0a, 0x0a, 0x70, 0x75,
+ 0x62, 0x6c, 0x69, 0x63, 0x5f, 0x75, 0x72, 0x6c, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09,
+ 0x70, 0x75, 0x62, 0x6c, 0x69, 0x63, 0x55, 0x72, 0x6c, 0x22, 0xc3, 0x01, 0x0a, 0x14, 0x4c, 0x6f,
+ 0x6f, 0x6b, 0x75, 0x70, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e,
+ 0x73, 0x65, 0x12, 0x55, 0x0a, 0x0d, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x5f,
+ 0x6d, 0x61, 0x70, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x30, 0x2e, 0x66, 0x69, 0x6c, 0x65,
+ 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x4c, 0x6f, 0x6f, 0x6b, 0x75, 0x70, 0x56, 0x6f, 0x6c, 0x75, 0x6d,
+ 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x4c, 0x6f, 0x63, 0x61, 0x74, 0x69,
+ 0x6f, 0x6e, 0x73, 0x4d, 0x61, 0x70, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x0c, 0x6c, 0x6f, 0x63,
+ 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x4d, 0x61, 0x70, 0x1a, 0x54, 0x0a, 0x11, 0x4c, 0x6f, 0x63,
+ 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x4d, 0x61, 0x70, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10,
+ 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79,
+ 0x12, 0x29, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32,
+ 0x13, 0x2e, 0x66, 0x69, 0x6c, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x4c, 0x6f, 0x63, 0x61, 0x74,
+ 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22,
+ 0x39, 0x0a, 0x17, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x43, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74,
+ 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1e, 0x0a, 0x0a, 0x63, 0x6f,
+ 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a,
+ 0x63, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0x1a, 0x0a, 0x18, 0x44, 0x65,
+ 0x6c, 0x65, 0x74, 0x65, 0x43, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65,
+ 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x67, 0x0a, 0x11, 0x53, 0x74, 0x61, 0x74, 0x69, 0x73,
+ 0x74, 0x69, 0x63, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x20, 0x0a, 0x0b, 0x72,
+ 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09,
+ 0x52, 0x0b, 0x72, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x1e, 0x0a,
+ 0x0a, 0x63, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28,
+ 0x09, 0x52, 0x0a, 0x63, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x10, 0x0a,
+ 0x03, 0x74, 0x74, 0x6c, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x74, 0x74, 0x6c, 0x22,
+ 0xc3, 0x01, 0x0a, 0x12, 0x53, 0x74, 0x61, 0x74, 0x69, 0x73, 0x74, 0x69, 0x63, 0x73, 0x52, 0x65,
+ 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x20, 0x0a, 0x0b, 0x72, 0x65, 0x70, 0x6c, 0x69, 0x63,
+ 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x72, 0x65, 0x70,
+ 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x1e, 0x0a, 0x0a, 0x63, 0x6f, 0x6c, 0x6c,
+ 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x63, 0x6f,
+ 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x10, 0x0a, 0x03, 0x74, 0x74, 0x6c, 0x18,
+ 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x74, 0x74, 0x6c, 0x12, 0x1d, 0x0a, 0x0a, 0x74, 0x6f,
+ 0x74, 0x61, 0x6c, 0x5f, 0x73, 0x69, 0x7a, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x04, 0x52, 0x09,
+ 0x74, 0x6f, 0x74, 0x61, 0x6c, 0x53, 0x69, 0x7a, 0x65, 0x12, 0x1b, 0x0a, 0x09, 0x75, 0x73, 0x65,
+ 0x64, 0x5f, 0x73, 0x69, 0x7a, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x04, 0x52, 0x08, 0x75, 0x73,
+ 0x65, 0x64, 0x53, 0x69, 0x7a, 0x65, 0x12, 0x1d, 0x0a, 0x0a, 0x66, 0x69, 0x6c, 0x65, 0x5f, 0x63,
+ 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x06, 0x20, 0x01, 0x28, 0x04, 0x52, 0x09, 0x66, 0x69, 0x6c, 0x65,
+ 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x22, 0x1e, 0x0a, 0x1c, 0x47, 0x65, 0x74, 0x46, 0x69, 0x6c, 0x65,
+ 0x72, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65,
+ 0x71, 0x75, 0x65, 0x73, 0x74, 0x22, 0xcb, 0x01, 0x0a, 0x1d, 0x47, 0x65, 0x74, 0x46, 0x69, 0x6c,
+ 0x65, 0x72, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52,
+ 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x6d, 0x61, 0x73, 0x74, 0x65,
+ 0x72, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x09, 0x52, 0x07, 0x6d, 0x61, 0x73, 0x74, 0x65, 0x72,
+ 0x73, 0x12, 0x20, 0x0a, 0x0b, 0x72, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e,
+ 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x72, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74,
+ 0x69, 0x6f, 0x6e, 0x12, 0x1e, 0x0a, 0x0a, 0x63, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f,
+ 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x63, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74,
+ 0x69, 0x6f, 0x6e, 0x12, 0x15, 0x0a, 0x06, 0x6d, 0x61, 0x78, 0x5f, 0x6d, 0x62, 0x18, 0x04, 0x20,
+ 0x01, 0x28, 0x0d, 0x52, 0x05, 0x6d, 0x61, 0x78, 0x4d, 0x62, 0x12, 0x1f, 0x0a, 0x0b, 0x64, 0x69,
+ 0x72, 0x5f, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x73, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52,
+ 0x0a, 0x64, 0x69, 0x72, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x73, 0x12, 0x16, 0x0a, 0x06, 0x63,
+ 0x69, 0x70, 0x68, 0x65, 0x72, 0x18, 0x07, 0x20, 0x01, 0x28, 0x08, 0x52, 0x06, 0x63, 0x69, 0x70,
+ 0x68, 0x65, 0x72, 0x22, 0x77, 0x0a, 0x18, 0x53, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x62, 0x65,
+ 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12,
+ 0x1f, 0x0a, 0x0b, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01,
+ 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x4e, 0x61, 0x6d, 0x65,
+ 0x12, 0x1f, 0x0a, 0x0b, 0x70, 0x61, 0x74, 0x68, 0x5f, 0x70, 0x72, 0x65, 0x66, 0x69, 0x78, 0x18,
+ 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x70, 0x61, 0x74, 0x68, 0x50, 0x72, 0x65, 0x66, 0x69,
+ 0x78, 0x12, 0x19, 0x0a, 0x08, 0x73, 0x69, 0x6e, 0x63, 0x65, 0x5f, 0x6e, 0x73, 0x18, 0x03, 0x20,
+ 0x01, 0x28, 0x03, 0x52, 0x07, 0x73, 0x69, 0x6e, 0x63, 0x65, 0x4e, 0x73, 0x22, 0x9a, 0x01, 0x0a,
+ 0x19, 0x53, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x62, 0x65, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61,
+ 0x74, 0x61, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x1c, 0x0a, 0x09, 0x64, 0x69,
+ 0x72, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x64,
+ 0x69, 0x72, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x79, 0x12, 0x4a, 0x0a, 0x12, 0x65, 0x76, 0x65, 0x6e,
+ 0x74, 0x5f, 0x6e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x02,
+ 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x66, 0x69, 0x6c, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e,
+ 0x45, 0x76, 0x65, 0x6e, 0x74, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f,
+ 0x6e, 0x52, 0x11, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61,
+ 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x13, 0x0a, 0x05, 0x74, 0x73, 0x5f, 0x6e, 0x73, 0x18, 0x03, 0x20,
+ 0x01, 0x28, 0x03, 0x52, 0x04, 0x74, 0x73, 0x4e, 0x73, 0x22, 0x61, 0x0a, 0x08, 0x4c, 0x6f, 0x67,
+ 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x13, 0x0a, 0x05, 0x74, 0x73, 0x5f, 0x6e, 0x73, 0x18, 0x01,
+ 0x20, 0x01, 0x28, 0x03, 0x52, 0x04, 0x74, 0x73, 0x4e, 0x73, 0x12, 0x2c, 0x0a, 0x12, 0x70, 0x61,
+ 0x72, 0x74, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6b, 0x65, 0x79, 0x5f, 0x68, 0x61, 0x73, 0x68,
+ 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, 0x10, 0x70, 0x61, 0x72, 0x74, 0x69, 0x74, 0x69, 0x6f,
+ 0x6e, 0x4b, 0x65, 0x79, 0x48, 0x61, 0x73, 0x68, 0x12, 0x12, 0x0a, 0x04, 0x64, 0x61, 0x74, 0x61,
+ 0x18, 0x03, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x04, 0x64, 0x61, 0x74, 0x61, 0x22, 0x65, 0x0a, 0x14,
+ 0x4b, 0x65, 0x65, 0x70, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x65, 0x64, 0x52, 0x65, 0x71,
+ 0x75, 0x65, 0x73, 0x74, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01,
+ 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x1b, 0x0a, 0x09, 0x67, 0x72, 0x70, 0x63,
+ 0x5f, 0x70, 0x6f, 0x72, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x08, 0x67, 0x72, 0x70,
+ 0x63, 0x50, 0x6f, 0x72, 0x74, 0x12, 0x1c, 0x0a, 0x09, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63,
+ 0x65, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x09, 0x52, 0x09, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72,
+ 0x63, 0x65, 0x73, 0x22, 0x17, 0x0a, 0x15, 0x4b, 0x65, 0x65, 0x70, 0x43, 0x6f, 0x6e, 0x6e, 0x65,
+ 0x63, 0x74, 0x65, 0x64, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x31, 0x0a, 0x13,
+ 0x4c, 0x6f, 0x63, 0x61, 0x74, 0x65, 0x42, 0x72, 0x6f, 0x6b, 0x65, 0x72, 0x52, 0x65, 0x71, 0x75,
+ 0x65, 0x73, 0x74, 0x12, 0x1a, 0x0a, 0x08, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x18,
+ 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x22,
+ 0xcd, 0x01, 0x0a, 0x14, 0x4c, 0x6f, 0x63, 0x61, 0x74, 0x65, 0x42, 0x72, 0x6f, 0x6b, 0x65, 0x72,
+ 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x66, 0x6f, 0x75, 0x6e,
+ 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x52, 0x05, 0x66, 0x6f, 0x75, 0x6e, 0x64, 0x12, 0x45,
+ 0x0a, 0x09, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28,
+ 0x0b, 0x32, 0x27, 0x2e, 0x66, 0x69, 0x6c, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x4c, 0x6f, 0x63,
+ 0x61, 0x74, 0x65, 0x42, 0x72, 0x6f, 0x6b, 0x65, 0x72, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73,
+ 0x65, 0x2e, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x52, 0x09, 0x72, 0x65, 0x73, 0x6f,
+ 0x75, 0x72, 0x63, 0x65, 0x73, 0x1a, 0x58, 0x0a, 0x08, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63,
+ 0x65, 0x12, 0x25, 0x0a, 0x0e, 0x67, 0x72, 0x70, 0x63, 0x5f, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73,
+ 0x73, 0x65, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0d, 0x67, 0x72, 0x70, 0x63, 0x41,
+ 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x65, 0x73, 0x12, 0x25, 0x0a, 0x0e, 0x72, 0x65, 0x73, 0x6f,
+ 0x75, 0x72, 0x63, 0x65, 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05,
+ 0x52, 0x0d, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x32,
+ 0x8d, 0x0b, 0x0a, 0x0c, 0x53, 0x65, 0x61, 0x77, 0x65, 0x65, 0x64, 0x46, 0x69, 0x6c, 0x65, 0x72,
+ 0x12, 0x67, 0x0a, 0x14, 0x4c, 0x6f, 0x6f, 0x6b, 0x75, 0x70, 0x44, 0x69, 0x72, 0x65, 0x63, 0x74,
+ 0x6f, 0x72, 0x79, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x25, 0x2e, 0x66, 0x69, 0x6c, 0x65, 0x72,
+ 0x5f, 0x70, 0x62, 0x2e, 0x4c, 0x6f, 0x6f, 0x6b, 0x75, 0x70, 0x44, 0x69, 0x72, 0x65, 0x63, 0x74,
+ 0x6f, 0x72, 0x79, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a,
+ 0x26, 0x2e, 0x66, 0x69, 0x6c, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x4c, 0x6f, 0x6f, 0x6b, 0x75,
+ 0x70, 0x44, 0x69, 0x72, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x79, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52,
+ 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x4e, 0x0a, 0x0b, 0x4c, 0x69, 0x73,
+ 0x74, 0x45, 0x6e, 0x74, 0x72, 0x69, 0x65, 0x73, 0x12, 0x1c, 0x2e, 0x66, 0x69, 0x6c, 0x65, 0x72,
+ 0x5f, 0x70, 0x62, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x45, 0x6e, 0x74, 0x72, 0x69, 0x65, 0x73, 0x52,
+ 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1d, 0x2e, 0x66, 0x69, 0x6c, 0x65, 0x72, 0x5f, 0x70,
+ 0x62, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x45, 0x6e, 0x74, 0x72, 0x69, 0x65, 0x73, 0x52, 0x65, 0x73,
+ 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x30, 0x01, 0x12, 0x4c, 0x0a, 0x0b, 0x43, 0x72, 0x65,
+ 0x61, 0x74, 0x65, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x1c, 0x2e, 0x66, 0x69, 0x6c, 0x65, 0x72,
+ 0x5f, 0x70, 0x62, 0x2e, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52,
+ 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1d, 0x2e, 0x66, 0x69, 0x6c, 0x65, 0x72, 0x5f, 0x70,
+ 0x62, 0x2e, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x65, 0x73,
+ 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x4c, 0x0a, 0x0b, 0x55, 0x70, 0x64, 0x61, 0x74,
+ 0x65, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x1c, 0x2e, 0x66, 0x69, 0x6c, 0x65, 0x72, 0x5f, 0x70,
+ 0x62, 0x2e, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x65, 0x71,
+ 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1d, 0x2e, 0x66, 0x69, 0x6c, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e,
+ 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x65, 0x73, 0x70, 0x6f,
+ 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x52, 0x0a, 0x0d, 0x41, 0x70, 0x70, 0x65, 0x6e, 0x64, 0x54,
+ 0x6f, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x1e, 0x2e, 0x66, 0x69, 0x6c, 0x65, 0x72, 0x5f, 0x70,
+ 0x62, 0x2e, 0x41, 0x70, 0x70, 0x65, 0x6e, 0x64, 0x54, 0x6f, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52,
+ 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1f, 0x2e, 0x66, 0x69, 0x6c, 0x65, 0x72, 0x5f, 0x70,
+ 0x62, 0x2e, 0x41, 0x70, 0x70, 0x65, 0x6e, 0x64, 0x54, 0x6f, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52,
+ 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x4c, 0x0a, 0x0b, 0x44, 0x65, 0x6c,
+ 0x65, 0x74, 0x65, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x1c, 0x2e, 0x66, 0x69, 0x6c, 0x65, 0x72,
+ 0x5f, 0x70, 0x62, 0x2e, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52,
+ 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1d, 0x2e, 0x66, 0x69, 0x6c, 0x65, 0x72, 0x5f, 0x70,
+ 0x62, 0x2e, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x65, 0x73,
+ 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x5e, 0x0a, 0x11, 0x41, 0x74, 0x6f, 0x6d, 0x69,
+ 0x63, 0x52, 0x65, 0x6e, 0x61, 0x6d, 0x65, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x22, 0x2e, 0x66,
+ 0x69, 0x6c, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x41, 0x74, 0x6f, 0x6d, 0x69, 0x63, 0x52, 0x65,
+ 0x6e, 0x61, 0x6d, 0x65, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74,
+ 0x1a, 0x23, 0x2e, 0x66, 0x69, 0x6c, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x41, 0x74, 0x6f, 0x6d,
+ 0x69, 0x63, 0x52, 0x65, 0x6e, 0x61, 0x6d, 0x65, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x65, 0x73,
+ 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x4f, 0x0a, 0x0c, 0x41, 0x73, 0x73, 0x69, 0x67,
+ 0x6e, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x12, 0x1d, 0x2e, 0x66, 0x69, 0x6c, 0x65, 0x72, 0x5f,
+ 0x70, 0x62, 0x2e, 0x41, 0x73, 0x73, 0x69, 0x67, 0x6e, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x52,
+ 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1e, 0x2e, 0x66, 0x69, 0x6c, 0x65, 0x72, 0x5f, 0x70,
+ 0x62, 0x2e, 0x41, 0x73, 0x73, 0x69, 0x67, 0x6e, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x52, 0x65,
+ 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x4f, 0x0a, 0x0c, 0x4c, 0x6f, 0x6f, 0x6b,
+ 0x75, 0x70, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x12, 0x1d, 0x2e, 0x66, 0x69, 0x6c, 0x65, 0x72,
+ 0x5f, 0x70, 0x62, 0x2e, 0x4c, 0x6f, 0x6f, 0x6b, 0x75, 0x70, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65,
+ 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1e, 0x2e, 0x66, 0x69, 0x6c, 0x65, 0x72, 0x5f,
+ 0x70, 0x62, 0x2e, 0x4c, 0x6f, 0x6f, 0x6b, 0x75, 0x70, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x52,
+ 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x5b, 0x0a, 0x10, 0x44, 0x65, 0x6c,
+ 0x65, 0x74, 0x65, 0x43, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x21, 0x2e,
+ 0x66, 0x69, 0x6c, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x43,
+ 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74,
+ 0x1a, 0x22, 0x2e, 0x66, 0x69, 0x6c, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x44, 0x65, 0x6c, 0x65,
+ 0x74, 0x65, 0x43, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70,
+ 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x49, 0x0a, 0x0a, 0x53, 0x74, 0x61, 0x74, 0x69, 0x73,
+ 0x74, 0x69, 0x63, 0x73, 0x12, 0x1b, 0x2e, 0x66, 0x69, 0x6c, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e,
+ 0x53, 0x74, 0x61, 0x74, 0x69, 0x73, 0x74, 0x69, 0x63, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73,
+ 0x74, 0x1a, 0x1c, 0x2e, 0x66, 0x69, 0x6c, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x53, 0x74, 0x61,
+ 0x74, 0x69, 0x73, 0x74, 0x69, 0x63, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22,
+ 0x00, 0x12, 0x6a, 0x0a, 0x15, 0x47, 0x65, 0x74, 0x46, 0x69, 0x6c, 0x65, 0x72, 0x43, 0x6f, 0x6e,
+ 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x26, 0x2e, 0x66, 0x69, 0x6c,
+ 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x47, 0x65, 0x74, 0x46, 0x69, 0x6c, 0x65, 0x72, 0x43, 0x6f,
+ 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65,
+ 0x73, 0x74, 0x1a, 0x27, 0x2e, 0x66, 0x69, 0x6c, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x47, 0x65,
+ 0x74, 0x46, 0x69, 0x6c, 0x65, 0x72, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, 0x74,
+ 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x60, 0x0a,
+ 0x11, 0x53, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x62, 0x65, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61,
+ 0x74, 0x61, 0x12, 0x22, 0x2e, 0x66, 0x69, 0x6c, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x53, 0x75,
+ 0x62, 0x73, 0x63, 0x72, 0x69, 0x62, 0x65, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x52,
+ 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x23, 0x2e, 0x66, 0x69, 0x6c, 0x65, 0x72, 0x5f, 0x70,
+ 0x62, 0x2e, 0x53, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x62, 0x65, 0x4d, 0x65, 0x74, 0x61, 0x64,
+ 0x61, 0x74, 0x61, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x30, 0x01, 0x12,
+ 0x65, 0x0a, 0x16, 0x53, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x62, 0x65, 0x4c, 0x6f, 0x63, 0x61,
+ 0x6c, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x12, 0x22, 0x2e, 0x66, 0x69, 0x6c, 0x65,
+ 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x53, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x62, 0x65, 0x4d, 0x65,
+ 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x23, 0x2e,
+ 0x66, 0x69, 0x6c, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x53, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69,
+ 0x62, 0x65, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e,
+ 0x73, 0x65, 0x22, 0x00, 0x30, 0x01, 0x12, 0x56, 0x0a, 0x0d, 0x4b, 0x65, 0x65, 0x70, 0x43, 0x6f,
+ 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x65, 0x64, 0x12, 0x1e, 0x2e, 0x66, 0x69, 0x6c, 0x65, 0x72, 0x5f,
+ 0x70, 0x62, 0x2e, 0x4b, 0x65, 0x65, 0x70, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x65, 0x64,
+ 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1f, 0x2e, 0x66, 0x69, 0x6c, 0x65, 0x72, 0x5f,
+ 0x70, 0x62, 0x2e, 0x4b, 0x65, 0x65, 0x70, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x65, 0x64,
+ 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x28, 0x01, 0x30, 0x01, 0x12, 0x4f,
+ 0x0a, 0x0c, 0x4c, 0x6f, 0x63, 0x61, 0x74, 0x65, 0x42, 0x72, 0x6f, 0x6b, 0x65, 0x72, 0x12, 0x1d,
+ 0x2e, 0x66, 0x69, 0x6c, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x4c, 0x6f, 0x63, 0x61, 0x74, 0x65,
+ 0x42, 0x72, 0x6f, 0x6b, 0x65, 0x72, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1e, 0x2e,
+ 0x66, 0x69, 0x6c, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x4c, 0x6f, 0x63, 0x61, 0x74, 0x65, 0x42,
+ 0x72, 0x6f, 0x6b, 0x65, 0x72, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x42,
+ 0x4f, 0x0a, 0x10, 0x73, 0x65, 0x61, 0x77, 0x65, 0x65, 0x64, 0x66, 0x73, 0x2e, 0x63, 0x6c, 0x69,
+ 0x65, 0x6e, 0x74, 0x42, 0x0a, 0x46, 0x69, 0x6c, 0x65, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x5a,
+ 0x2f, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x63, 0x68, 0x72, 0x69,
+ 0x73, 0x6c, 0x75, 0x73, 0x66, 0x2f, 0x73, 0x65, 0x61, 0x77, 0x65, 0x65, 0x64, 0x66, 0x73, 0x2f,
+ 0x77, 0x65, 0x65, 0x64, 0x2f, 0x70, 0x62, 0x2f, 0x66, 0x69, 0x6c, 0x65, 0x72, 0x5f, 0x70, 0x62,
+ 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
}
-func (m *StatisticsResponse) GetFileCount() uint64 {
- if m != nil {
- return m.FileCount
- }
- return 0
+var (
+ file_filer_proto_rawDescOnce sync.Once
+ file_filer_proto_rawDescData = file_filer_proto_rawDesc
+)
+
+func file_filer_proto_rawDescGZIP() []byte {
+ file_filer_proto_rawDescOnce.Do(func() {
+ file_filer_proto_rawDescData = protoimpl.X.CompressGZIP(file_filer_proto_rawDescData)
+ })
+ return file_filer_proto_rawDescData
+}
+
+var file_filer_proto_msgTypes = make([]protoimpl.MessageInfo, 42)
+var file_filer_proto_goTypes = []interface{}{
+ (*LookupDirectoryEntryRequest)(nil), // 0: filer_pb.LookupDirectoryEntryRequest
+ (*LookupDirectoryEntryResponse)(nil), // 1: filer_pb.LookupDirectoryEntryResponse
+ (*ListEntriesRequest)(nil), // 2: filer_pb.ListEntriesRequest
+ (*ListEntriesResponse)(nil), // 3: filer_pb.ListEntriesResponse
+ (*Entry)(nil), // 4: filer_pb.Entry
+ (*FullEntry)(nil), // 5: filer_pb.FullEntry
+ (*EventNotification)(nil), // 6: filer_pb.EventNotification
+ (*FileChunk)(nil), // 7: filer_pb.FileChunk
+ (*FileId)(nil), // 8: filer_pb.FileId
+ (*FuseAttributes)(nil), // 9: filer_pb.FuseAttributes
+ (*CreateEntryRequest)(nil), // 10: filer_pb.CreateEntryRequest
+ (*CreateEntryResponse)(nil), // 11: filer_pb.CreateEntryResponse
+ (*UpdateEntryRequest)(nil), // 12: filer_pb.UpdateEntryRequest
+ (*UpdateEntryResponse)(nil), // 13: filer_pb.UpdateEntryResponse
+ (*AppendToEntryRequest)(nil), // 14: filer_pb.AppendToEntryRequest
+ (*AppendToEntryResponse)(nil), // 15: filer_pb.AppendToEntryResponse
+ (*DeleteEntryRequest)(nil), // 16: filer_pb.DeleteEntryRequest
+ (*DeleteEntryResponse)(nil), // 17: filer_pb.DeleteEntryResponse
+ (*AtomicRenameEntryRequest)(nil), // 18: filer_pb.AtomicRenameEntryRequest
+ (*AtomicRenameEntryResponse)(nil), // 19: filer_pb.AtomicRenameEntryResponse
+ (*AssignVolumeRequest)(nil), // 20: filer_pb.AssignVolumeRequest
+ (*AssignVolumeResponse)(nil), // 21: filer_pb.AssignVolumeResponse
+ (*LookupVolumeRequest)(nil), // 22: filer_pb.LookupVolumeRequest
+ (*Locations)(nil), // 23: filer_pb.Locations
+ (*Location)(nil), // 24: filer_pb.Location
+ (*LookupVolumeResponse)(nil), // 25: filer_pb.LookupVolumeResponse
+ (*DeleteCollectionRequest)(nil), // 26: filer_pb.DeleteCollectionRequest
+ (*DeleteCollectionResponse)(nil), // 27: filer_pb.DeleteCollectionResponse
+ (*StatisticsRequest)(nil), // 28: filer_pb.StatisticsRequest
+ (*StatisticsResponse)(nil), // 29: filer_pb.StatisticsResponse
+ (*GetFilerConfigurationRequest)(nil), // 30: filer_pb.GetFilerConfigurationRequest
+ (*GetFilerConfigurationResponse)(nil), // 31: filer_pb.GetFilerConfigurationResponse
+ (*SubscribeMetadataRequest)(nil), // 32: filer_pb.SubscribeMetadataRequest
+ (*SubscribeMetadataResponse)(nil), // 33: filer_pb.SubscribeMetadataResponse
+ (*LogEntry)(nil), // 34: filer_pb.LogEntry
+ (*KeepConnectedRequest)(nil), // 35: filer_pb.KeepConnectedRequest
+ (*KeepConnectedResponse)(nil), // 36: filer_pb.KeepConnectedResponse
+ (*LocateBrokerRequest)(nil), // 37: filer_pb.LocateBrokerRequest
+ (*LocateBrokerResponse)(nil), // 38: filer_pb.LocateBrokerResponse
+ nil, // 39: filer_pb.Entry.ExtendedEntry
+ nil, // 40: filer_pb.LookupVolumeResponse.LocationsMapEntry
+ (*LocateBrokerResponse_Resource)(nil), // 41: filer_pb.LocateBrokerResponse.Resource
+}
+var file_filer_proto_depIdxs = []int32{
+ 4, // 0: filer_pb.LookupDirectoryEntryResponse.entry:type_name -> filer_pb.Entry
+ 4, // 1: filer_pb.ListEntriesResponse.entry:type_name -> filer_pb.Entry
+ 7, // 2: filer_pb.Entry.chunks:type_name -> filer_pb.FileChunk
+ 9, // 3: filer_pb.Entry.attributes:type_name -> filer_pb.FuseAttributes
+ 39, // 4: filer_pb.Entry.extended:type_name -> filer_pb.Entry.ExtendedEntry
+ 4, // 5: filer_pb.FullEntry.entry:type_name -> filer_pb.Entry
+ 4, // 6: filer_pb.EventNotification.old_entry:type_name -> filer_pb.Entry
+ 4, // 7: filer_pb.EventNotification.new_entry:type_name -> filer_pb.Entry
+ 8, // 8: filer_pb.FileChunk.fid:type_name -> filer_pb.FileId
+ 8, // 9: filer_pb.FileChunk.source_fid:type_name -> filer_pb.FileId
+ 4, // 10: filer_pb.CreateEntryRequest.entry:type_name -> filer_pb.Entry
+ 4, // 11: filer_pb.UpdateEntryRequest.entry:type_name -> filer_pb.Entry
+ 7, // 12: filer_pb.AppendToEntryRequest.chunks:type_name -> filer_pb.FileChunk
+ 24, // 13: filer_pb.Locations.locations:type_name -> filer_pb.Location
+ 40, // 14: filer_pb.LookupVolumeResponse.locations_map:type_name -> filer_pb.LookupVolumeResponse.LocationsMapEntry
+ 6, // 15: filer_pb.SubscribeMetadataResponse.event_notification:type_name -> filer_pb.EventNotification
+ 41, // 16: filer_pb.LocateBrokerResponse.resources:type_name -> filer_pb.LocateBrokerResponse.Resource
+ 23, // 17: filer_pb.LookupVolumeResponse.LocationsMapEntry.value:type_name -> filer_pb.Locations
+ 0, // 18: filer_pb.SeaweedFiler.LookupDirectoryEntry:input_type -> filer_pb.LookupDirectoryEntryRequest
+ 2, // 19: filer_pb.SeaweedFiler.ListEntries:input_type -> filer_pb.ListEntriesRequest
+ 10, // 20: filer_pb.SeaweedFiler.CreateEntry:input_type -> filer_pb.CreateEntryRequest
+ 12, // 21: filer_pb.SeaweedFiler.UpdateEntry:input_type -> filer_pb.UpdateEntryRequest
+ 14, // 22: filer_pb.SeaweedFiler.AppendToEntry:input_type -> filer_pb.AppendToEntryRequest
+ 16, // 23: filer_pb.SeaweedFiler.DeleteEntry:input_type -> filer_pb.DeleteEntryRequest
+ 18, // 24: filer_pb.SeaweedFiler.AtomicRenameEntry:input_type -> filer_pb.AtomicRenameEntryRequest
+ 20, // 25: filer_pb.SeaweedFiler.AssignVolume:input_type -> filer_pb.AssignVolumeRequest
+ 22, // 26: filer_pb.SeaweedFiler.LookupVolume:input_type -> filer_pb.LookupVolumeRequest
+ 26, // 27: filer_pb.SeaweedFiler.DeleteCollection:input_type -> filer_pb.DeleteCollectionRequest
+ 28, // 28: filer_pb.SeaweedFiler.Statistics:input_type -> filer_pb.StatisticsRequest
+ 30, // 29: filer_pb.SeaweedFiler.GetFilerConfiguration:input_type -> filer_pb.GetFilerConfigurationRequest
+ 32, // 30: filer_pb.SeaweedFiler.SubscribeMetadata:input_type -> filer_pb.SubscribeMetadataRequest
+ 32, // 31: filer_pb.SeaweedFiler.SubscribeLocalMetadata:input_type -> filer_pb.SubscribeMetadataRequest
+ 35, // 32: filer_pb.SeaweedFiler.KeepConnected:input_type -> filer_pb.KeepConnectedRequest
+ 37, // 33: filer_pb.SeaweedFiler.LocateBroker:input_type -> filer_pb.LocateBrokerRequest
+ 1, // 34: filer_pb.SeaweedFiler.LookupDirectoryEntry:output_type -> filer_pb.LookupDirectoryEntryResponse
+ 3, // 35: filer_pb.SeaweedFiler.ListEntries:output_type -> filer_pb.ListEntriesResponse
+ 11, // 36: filer_pb.SeaweedFiler.CreateEntry:output_type -> filer_pb.CreateEntryResponse
+ 13, // 37: filer_pb.SeaweedFiler.UpdateEntry:output_type -> filer_pb.UpdateEntryResponse
+ 15, // 38: filer_pb.SeaweedFiler.AppendToEntry:output_type -> filer_pb.AppendToEntryResponse
+ 17, // 39: filer_pb.SeaweedFiler.DeleteEntry:output_type -> filer_pb.DeleteEntryResponse
+ 19, // 40: filer_pb.SeaweedFiler.AtomicRenameEntry:output_type -> filer_pb.AtomicRenameEntryResponse
+ 21, // 41: filer_pb.SeaweedFiler.AssignVolume:output_type -> filer_pb.AssignVolumeResponse
+ 25, // 42: filer_pb.SeaweedFiler.LookupVolume:output_type -> filer_pb.LookupVolumeResponse
+ 27, // 43: filer_pb.SeaweedFiler.DeleteCollection:output_type -> filer_pb.DeleteCollectionResponse
+ 29, // 44: filer_pb.SeaweedFiler.Statistics:output_type -> filer_pb.StatisticsResponse
+ 31, // 45: filer_pb.SeaweedFiler.GetFilerConfiguration:output_type -> filer_pb.GetFilerConfigurationResponse
+ 33, // 46: filer_pb.SeaweedFiler.SubscribeMetadata:output_type -> filer_pb.SubscribeMetadataResponse
+ 33, // 47: filer_pb.SeaweedFiler.SubscribeLocalMetadata:output_type -> filer_pb.SubscribeMetadataResponse
+ 36, // 48: filer_pb.SeaweedFiler.KeepConnected:output_type -> filer_pb.KeepConnectedResponse
+ 38, // 49: filer_pb.SeaweedFiler.LocateBroker:output_type -> filer_pb.LocateBrokerResponse
+ 34, // [34:50] is the sub-list for method output_type
+ 18, // [18:34] is the sub-list for method input_type
+ 18, // [18:18] is the sub-list for extension type_name
+ 18, // [18:18] is the sub-list for extension extendee
+ 0, // [0:18] is the sub-list for field type_name
}
-func init() {
- proto.RegisterType((*LookupDirectoryEntryRequest)(nil), "filer_pb.LookupDirectoryEntryRequest")
- proto.RegisterType((*LookupDirectoryEntryResponse)(nil), "filer_pb.LookupDirectoryEntryResponse")
- proto.RegisterType((*ListEntriesRequest)(nil), "filer_pb.ListEntriesRequest")
- proto.RegisterType((*ListEntriesResponse)(nil), "filer_pb.ListEntriesResponse")
- proto.RegisterType((*Entry)(nil), "filer_pb.Entry")
- proto.RegisterType((*EventNotification)(nil), "filer_pb.EventNotification")
- proto.RegisterType((*FileChunk)(nil), "filer_pb.FileChunk")
- proto.RegisterType((*FuseAttributes)(nil), "filer_pb.FuseAttributes")
- proto.RegisterType((*CreateEntryRequest)(nil), "filer_pb.CreateEntryRequest")
- proto.RegisterType((*CreateEntryResponse)(nil), "filer_pb.CreateEntryResponse")
- proto.RegisterType((*UpdateEntryRequest)(nil), "filer_pb.UpdateEntryRequest")
- proto.RegisterType((*UpdateEntryResponse)(nil), "filer_pb.UpdateEntryResponse")
- proto.RegisterType((*DeleteEntryRequest)(nil), "filer_pb.DeleteEntryRequest")
- proto.RegisterType((*DeleteEntryResponse)(nil), "filer_pb.DeleteEntryResponse")
- proto.RegisterType((*AssignVolumeRequest)(nil), "filer_pb.AssignVolumeRequest")
- proto.RegisterType((*AssignVolumeResponse)(nil), "filer_pb.AssignVolumeResponse")
- proto.RegisterType((*LookupVolumeRequest)(nil), "filer_pb.LookupVolumeRequest")
- proto.RegisterType((*Locations)(nil), "filer_pb.Locations")
- proto.RegisterType((*Location)(nil), "filer_pb.Location")
- proto.RegisterType((*LookupVolumeResponse)(nil), "filer_pb.LookupVolumeResponse")
- proto.RegisterType((*DeleteCollectionRequest)(nil), "filer_pb.DeleteCollectionRequest")
- proto.RegisterType((*DeleteCollectionResponse)(nil), "filer_pb.DeleteCollectionResponse")
- proto.RegisterType((*StatisticsRequest)(nil), "filer_pb.StatisticsRequest")
- proto.RegisterType((*StatisticsResponse)(nil), "filer_pb.StatisticsResponse")
+func init() { file_filer_proto_init() }
+func file_filer_proto_init() {
+ if File_filer_proto != nil {
+ return
+ }
+ if !protoimpl.UnsafeEnabled {
+ file_filer_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*LookupDirectoryEntryRequest); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_filer_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*LookupDirectoryEntryResponse); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_filer_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*ListEntriesRequest); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_filer_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*ListEntriesResponse); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_filer_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*Entry); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_filer_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*FullEntry); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_filer_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*EventNotification); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_filer_proto_msgTypes[7].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*FileChunk); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_filer_proto_msgTypes[8].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*FileId); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_filer_proto_msgTypes[9].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*FuseAttributes); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_filer_proto_msgTypes[10].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*CreateEntryRequest); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_filer_proto_msgTypes[11].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*CreateEntryResponse); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_filer_proto_msgTypes[12].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*UpdateEntryRequest); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_filer_proto_msgTypes[13].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*UpdateEntryResponse); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_filer_proto_msgTypes[14].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*AppendToEntryRequest); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_filer_proto_msgTypes[15].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*AppendToEntryResponse); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_filer_proto_msgTypes[16].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*DeleteEntryRequest); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_filer_proto_msgTypes[17].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*DeleteEntryResponse); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_filer_proto_msgTypes[18].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*AtomicRenameEntryRequest); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_filer_proto_msgTypes[19].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*AtomicRenameEntryResponse); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_filer_proto_msgTypes[20].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*AssignVolumeRequest); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_filer_proto_msgTypes[21].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*AssignVolumeResponse); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_filer_proto_msgTypes[22].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*LookupVolumeRequest); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_filer_proto_msgTypes[23].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*Locations); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_filer_proto_msgTypes[24].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*Location); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_filer_proto_msgTypes[25].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*LookupVolumeResponse); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_filer_proto_msgTypes[26].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*DeleteCollectionRequest); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_filer_proto_msgTypes[27].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*DeleteCollectionResponse); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_filer_proto_msgTypes[28].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*StatisticsRequest); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_filer_proto_msgTypes[29].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*StatisticsResponse); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_filer_proto_msgTypes[30].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*GetFilerConfigurationRequest); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_filer_proto_msgTypes[31].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*GetFilerConfigurationResponse); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_filer_proto_msgTypes[32].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*SubscribeMetadataRequest); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_filer_proto_msgTypes[33].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*SubscribeMetadataResponse); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_filer_proto_msgTypes[34].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*LogEntry); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_filer_proto_msgTypes[35].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*KeepConnectedRequest); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_filer_proto_msgTypes[36].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*KeepConnectedResponse); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_filer_proto_msgTypes[37].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*LocateBrokerRequest); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_filer_proto_msgTypes[38].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*LocateBrokerResponse); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_filer_proto_msgTypes[41].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*LocateBrokerResponse_Resource); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ }
+ type x struct{}
+ out := protoimpl.TypeBuilder{
+ File: protoimpl.DescBuilder{
+ GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
+ RawDescriptor: file_filer_proto_rawDesc,
+ NumEnums: 0,
+ NumMessages: 42,
+ NumExtensions: 0,
+ NumServices: 1,
+ },
+ GoTypes: file_filer_proto_goTypes,
+ DependencyIndexes: file_filer_proto_depIdxs,
+ MessageInfos: file_filer_proto_msgTypes,
+ }.Build()
+ File_filer_proto = out.File
+ file_filer_proto_rawDesc = nil
+ file_filer_proto_goTypes = nil
+ file_filer_proto_depIdxs = nil
}
// Reference imports to suppress errors if they are not otherwise used.
var _ context.Context
-var _ grpc.ClientConn
+var _ grpc.ClientConnInterface
// This is a compile-time assertion to ensure that this generated file
// is compatible with the grpc package it is being compiled against.
-const _ = grpc.SupportPackageIsVersion4
-
-// Client API for SeaweedFiler service
+const _ = grpc.SupportPackageIsVersion6
+// SeaweedFilerClient is the client API for SeaweedFiler service.
+//
+// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream.
type SeaweedFilerClient interface {
LookupDirectoryEntry(ctx context.Context, in *LookupDirectoryEntryRequest, opts ...grpc.CallOption) (*LookupDirectoryEntryResponse, error)
- ListEntries(ctx context.Context, in *ListEntriesRequest, opts ...grpc.CallOption) (*ListEntriesResponse, error)
+ ListEntries(ctx context.Context, in *ListEntriesRequest, opts ...grpc.CallOption) (SeaweedFiler_ListEntriesClient, error)
CreateEntry(ctx context.Context, in *CreateEntryRequest, opts ...grpc.CallOption) (*CreateEntryResponse, error)
UpdateEntry(ctx context.Context, in *UpdateEntryRequest, opts ...grpc.CallOption) (*UpdateEntryResponse, error)
+ AppendToEntry(ctx context.Context, in *AppendToEntryRequest, opts ...grpc.CallOption) (*AppendToEntryResponse, error)
DeleteEntry(ctx context.Context, in *DeleteEntryRequest, opts ...grpc.CallOption) (*DeleteEntryResponse, error)
+ AtomicRenameEntry(ctx context.Context, in *AtomicRenameEntryRequest, opts ...grpc.CallOption) (*AtomicRenameEntryResponse, error)
AssignVolume(ctx context.Context, in *AssignVolumeRequest, opts ...grpc.CallOption) (*AssignVolumeResponse, error)
LookupVolume(ctx context.Context, in *LookupVolumeRequest, opts ...grpc.CallOption) (*LookupVolumeResponse, error)
DeleteCollection(ctx context.Context, in *DeleteCollectionRequest, opts ...grpc.CallOption) (*DeleteCollectionResponse, error)
Statistics(ctx context.Context, in *StatisticsRequest, opts ...grpc.CallOption) (*StatisticsResponse, error)
+ GetFilerConfiguration(ctx context.Context, in *GetFilerConfigurationRequest, opts ...grpc.CallOption) (*GetFilerConfigurationResponse, error)
+ SubscribeMetadata(ctx context.Context, in *SubscribeMetadataRequest, opts ...grpc.CallOption) (SeaweedFiler_SubscribeMetadataClient, error)
+ SubscribeLocalMetadata(ctx context.Context, in *SubscribeMetadataRequest, opts ...grpc.CallOption) (SeaweedFiler_SubscribeLocalMetadataClient, error)
+ KeepConnected(ctx context.Context, opts ...grpc.CallOption) (SeaweedFiler_KeepConnectedClient, error)
+ LocateBroker(ctx context.Context, in *LocateBrokerRequest, opts ...grpc.CallOption) (*LocateBrokerResponse, error)
}
type seaweedFilerClient struct {
- cc *grpc.ClientConn
+ cc grpc.ClientConnInterface
}
-func NewSeaweedFilerClient(cc *grpc.ClientConn) SeaweedFilerClient {
+func NewSeaweedFilerClient(cc grpc.ClientConnInterface) SeaweedFilerClient {
return &seaweedFilerClient{cc}
}
func (c *seaweedFilerClient) LookupDirectoryEntry(ctx context.Context, in *LookupDirectoryEntryRequest, opts ...grpc.CallOption) (*LookupDirectoryEntryResponse, error) {
out := new(LookupDirectoryEntryResponse)
- err := grpc.Invoke(ctx, "/filer_pb.SeaweedFiler/LookupDirectoryEntry", in, out, c.cc, opts...)
+ err := c.cc.Invoke(ctx, "/filer_pb.SeaweedFiler/LookupDirectoryEntry", in, out, opts...)
if err != nil {
return nil, err
}
return out, nil
}
-func (c *seaweedFilerClient) ListEntries(ctx context.Context, in *ListEntriesRequest, opts ...grpc.CallOption) (*ListEntriesResponse, error) {
- out := new(ListEntriesResponse)
- err := grpc.Invoke(ctx, "/filer_pb.SeaweedFiler/ListEntries", in, out, c.cc, opts...)
+func (c *seaweedFilerClient) ListEntries(ctx context.Context, in *ListEntriesRequest, opts ...grpc.CallOption) (SeaweedFiler_ListEntriesClient, error) {
+ stream, err := c.cc.NewStream(ctx, &_SeaweedFiler_serviceDesc.Streams[0], "/filer_pb.SeaweedFiler/ListEntries", opts...)
if err != nil {
return nil, err
}
- return out, nil
+ x := &seaweedFilerListEntriesClient{stream}
+ if err := x.ClientStream.SendMsg(in); err != nil {
+ return nil, err
+ }
+ if err := x.ClientStream.CloseSend(); err != nil {
+ return nil, err
+ }
+ return x, nil
+}
+
+type SeaweedFiler_ListEntriesClient interface {
+ Recv() (*ListEntriesResponse, error)
+ grpc.ClientStream
+}
+
+type seaweedFilerListEntriesClient struct {
+ grpc.ClientStream
+}
+
+func (x *seaweedFilerListEntriesClient) Recv() (*ListEntriesResponse, error) {
+ m := new(ListEntriesResponse)
+ if err := x.ClientStream.RecvMsg(m); err != nil {
+ return nil, err
+ }
+ return m, nil
}
func (c *seaweedFilerClient) CreateEntry(ctx context.Context, in *CreateEntryRequest, opts ...grpc.CallOption) (*CreateEntryResponse, error) {
out := new(CreateEntryResponse)
- err := grpc.Invoke(ctx, "/filer_pb.SeaweedFiler/CreateEntry", in, out, c.cc, opts...)
+ err := c.cc.Invoke(ctx, "/filer_pb.SeaweedFiler/CreateEntry", in, out, opts...)
if err != nil {
return nil, err
}
@@ -879,7 +3689,16 @@ func (c *seaweedFilerClient) CreateEntry(ctx context.Context, in *CreateEntryReq
func (c *seaweedFilerClient) UpdateEntry(ctx context.Context, in *UpdateEntryRequest, opts ...grpc.CallOption) (*UpdateEntryResponse, error) {
out := new(UpdateEntryResponse)
- err := grpc.Invoke(ctx, "/filer_pb.SeaweedFiler/UpdateEntry", in, out, c.cc, opts...)
+ err := c.cc.Invoke(ctx, "/filer_pb.SeaweedFiler/UpdateEntry", in, out, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
+func (c *seaweedFilerClient) AppendToEntry(ctx context.Context, in *AppendToEntryRequest, opts ...grpc.CallOption) (*AppendToEntryResponse, error) {
+ out := new(AppendToEntryResponse)
+ err := c.cc.Invoke(ctx, "/filer_pb.SeaweedFiler/AppendToEntry", in, out, opts...)
if err != nil {
return nil, err
}
@@ -888,7 +3707,16 @@ func (c *seaweedFilerClient) UpdateEntry(ctx context.Context, in *UpdateEntryReq
func (c *seaweedFilerClient) DeleteEntry(ctx context.Context, in *DeleteEntryRequest, opts ...grpc.CallOption) (*DeleteEntryResponse, error) {
out := new(DeleteEntryResponse)
- err := grpc.Invoke(ctx, "/filer_pb.SeaweedFiler/DeleteEntry", in, out, c.cc, opts...)
+ err := c.cc.Invoke(ctx, "/filer_pb.SeaweedFiler/DeleteEntry", in, out, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
+func (c *seaweedFilerClient) AtomicRenameEntry(ctx context.Context, in *AtomicRenameEntryRequest, opts ...grpc.CallOption) (*AtomicRenameEntryResponse, error) {
+ out := new(AtomicRenameEntryResponse)
+ err := c.cc.Invoke(ctx, "/filer_pb.SeaweedFiler/AtomicRenameEntry", in, out, opts...)
if err != nil {
return nil, err
}
@@ -897,7 +3725,7 @@ func (c *seaweedFilerClient) DeleteEntry(ctx context.Context, in *DeleteEntryReq
func (c *seaweedFilerClient) AssignVolume(ctx context.Context, in *AssignVolumeRequest, opts ...grpc.CallOption) (*AssignVolumeResponse, error) {
out := new(AssignVolumeResponse)
- err := grpc.Invoke(ctx, "/filer_pb.SeaweedFiler/AssignVolume", in, out, c.cc, opts...)
+ err := c.cc.Invoke(ctx, "/filer_pb.SeaweedFiler/AssignVolume", in, out, opts...)
if err != nil {
return nil, err
}
@@ -906,7 +3734,7 @@ func (c *seaweedFilerClient) AssignVolume(ctx context.Context, in *AssignVolumeR
func (c *seaweedFilerClient) LookupVolume(ctx context.Context, in *LookupVolumeRequest, opts ...grpc.CallOption) (*LookupVolumeResponse, error) {
out := new(LookupVolumeResponse)
- err := grpc.Invoke(ctx, "/filer_pb.SeaweedFiler/LookupVolume", in, out, c.cc, opts...)
+ err := c.cc.Invoke(ctx, "/filer_pb.SeaweedFiler/LookupVolume", in, out, opts...)
if err != nil {
return nil, err
}
@@ -915,7 +3743,7 @@ func (c *seaweedFilerClient) LookupVolume(ctx context.Context, in *LookupVolumeR
func (c *seaweedFilerClient) DeleteCollection(ctx context.Context, in *DeleteCollectionRequest, opts ...grpc.CallOption) (*DeleteCollectionResponse, error) {
out := new(DeleteCollectionResponse)
- err := grpc.Invoke(ctx, "/filer_pb.SeaweedFiler/DeleteCollection", in, out, c.cc, opts...)
+ err := c.cc.Invoke(ctx, "/filer_pb.SeaweedFiler/DeleteCollection", in, out, opts...)
if err != nil {
return nil, err
}
@@ -924,25 +3752,197 @@ func (c *seaweedFilerClient) DeleteCollection(ctx context.Context, in *DeleteCol
func (c *seaweedFilerClient) Statistics(ctx context.Context, in *StatisticsRequest, opts ...grpc.CallOption) (*StatisticsResponse, error) {
out := new(StatisticsResponse)
- err := grpc.Invoke(ctx, "/filer_pb.SeaweedFiler/Statistics", in, out, c.cc, opts...)
+ err := c.cc.Invoke(ctx, "/filer_pb.SeaweedFiler/Statistics", in, out, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
+func (c *seaweedFilerClient) GetFilerConfiguration(ctx context.Context, in *GetFilerConfigurationRequest, opts ...grpc.CallOption) (*GetFilerConfigurationResponse, error) {
+ out := new(GetFilerConfigurationResponse)
+ err := c.cc.Invoke(ctx, "/filer_pb.SeaweedFiler/GetFilerConfiguration", in, out, opts...)
if err != nil {
return nil, err
}
return out, nil
}
-// Server API for SeaweedFiler service
+func (c *seaweedFilerClient) SubscribeMetadata(ctx context.Context, in *SubscribeMetadataRequest, opts ...grpc.CallOption) (SeaweedFiler_SubscribeMetadataClient, error) {
+ stream, err := c.cc.NewStream(ctx, &_SeaweedFiler_serviceDesc.Streams[1], "/filer_pb.SeaweedFiler/SubscribeMetadata", opts...)
+ if err != nil {
+ return nil, err
+ }
+ x := &seaweedFilerSubscribeMetadataClient{stream}
+ if err := x.ClientStream.SendMsg(in); err != nil {
+ return nil, err
+ }
+ if err := x.ClientStream.CloseSend(); err != nil {
+ return nil, err
+ }
+ return x, nil
+}
+
+type SeaweedFiler_SubscribeMetadataClient interface {
+ Recv() (*SubscribeMetadataResponse, error)
+ grpc.ClientStream
+}
+
+type seaweedFilerSubscribeMetadataClient struct {
+ grpc.ClientStream
+}
+
+func (x *seaweedFilerSubscribeMetadataClient) Recv() (*SubscribeMetadataResponse, error) {
+ m := new(SubscribeMetadataResponse)
+ if err := x.ClientStream.RecvMsg(m); err != nil {
+ return nil, err
+ }
+ return m, nil
+}
+
+func (c *seaweedFilerClient) SubscribeLocalMetadata(ctx context.Context, in *SubscribeMetadataRequest, opts ...grpc.CallOption) (SeaweedFiler_SubscribeLocalMetadataClient, error) {
+ stream, err := c.cc.NewStream(ctx, &_SeaweedFiler_serviceDesc.Streams[2], "/filer_pb.SeaweedFiler/SubscribeLocalMetadata", opts...)
+ if err != nil {
+ return nil, err
+ }
+ x := &seaweedFilerSubscribeLocalMetadataClient{stream}
+ if err := x.ClientStream.SendMsg(in); err != nil {
+ return nil, err
+ }
+ if err := x.ClientStream.CloseSend(); err != nil {
+ return nil, err
+ }
+ return x, nil
+}
+
+type SeaweedFiler_SubscribeLocalMetadataClient interface {
+ Recv() (*SubscribeMetadataResponse, error)
+ grpc.ClientStream
+}
+
+type seaweedFilerSubscribeLocalMetadataClient struct {
+ grpc.ClientStream
+}
+
+func (x *seaweedFilerSubscribeLocalMetadataClient) Recv() (*SubscribeMetadataResponse, error) {
+ m := new(SubscribeMetadataResponse)
+ if err := x.ClientStream.RecvMsg(m); err != nil {
+ return nil, err
+ }
+ return m, nil
+}
+
+func (c *seaweedFilerClient) KeepConnected(ctx context.Context, opts ...grpc.CallOption) (SeaweedFiler_KeepConnectedClient, error) {
+ stream, err := c.cc.NewStream(ctx, &_SeaweedFiler_serviceDesc.Streams[3], "/filer_pb.SeaweedFiler/KeepConnected", opts...)
+ if err != nil {
+ return nil, err
+ }
+ x := &seaweedFilerKeepConnectedClient{stream}
+ return x, nil
+}
+
+type SeaweedFiler_KeepConnectedClient interface {
+ Send(*KeepConnectedRequest) error
+ Recv() (*KeepConnectedResponse, error)
+ grpc.ClientStream
+}
+
+type seaweedFilerKeepConnectedClient struct {
+ grpc.ClientStream
+}
+
+func (x *seaweedFilerKeepConnectedClient) Send(m *KeepConnectedRequest) error {
+ return x.ClientStream.SendMsg(m)
+}
+
+func (x *seaweedFilerKeepConnectedClient) Recv() (*KeepConnectedResponse, error) {
+ m := new(KeepConnectedResponse)
+ if err := x.ClientStream.RecvMsg(m); err != nil {
+ return nil, err
+ }
+ return m, nil
+}
+func (c *seaweedFilerClient) LocateBroker(ctx context.Context, in *LocateBrokerRequest, opts ...grpc.CallOption) (*LocateBrokerResponse, error) {
+ out := new(LocateBrokerResponse)
+ err := c.cc.Invoke(ctx, "/filer_pb.SeaweedFiler/LocateBroker", in, out, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
+// SeaweedFilerServer is the server API for SeaweedFiler service.
type SeaweedFilerServer interface {
LookupDirectoryEntry(context.Context, *LookupDirectoryEntryRequest) (*LookupDirectoryEntryResponse, error)
- ListEntries(context.Context, *ListEntriesRequest) (*ListEntriesResponse, error)
+ ListEntries(*ListEntriesRequest, SeaweedFiler_ListEntriesServer) error
CreateEntry(context.Context, *CreateEntryRequest) (*CreateEntryResponse, error)
UpdateEntry(context.Context, *UpdateEntryRequest) (*UpdateEntryResponse, error)
+ AppendToEntry(context.Context, *AppendToEntryRequest) (*AppendToEntryResponse, error)
DeleteEntry(context.Context, *DeleteEntryRequest) (*DeleteEntryResponse, error)
+ AtomicRenameEntry(context.Context, *AtomicRenameEntryRequest) (*AtomicRenameEntryResponse, error)
AssignVolume(context.Context, *AssignVolumeRequest) (*AssignVolumeResponse, error)
LookupVolume(context.Context, *LookupVolumeRequest) (*LookupVolumeResponse, error)
DeleteCollection(context.Context, *DeleteCollectionRequest) (*DeleteCollectionResponse, error)
Statistics(context.Context, *StatisticsRequest) (*StatisticsResponse, error)
+ GetFilerConfiguration(context.Context, *GetFilerConfigurationRequest) (*GetFilerConfigurationResponse, error)
+ SubscribeMetadata(*SubscribeMetadataRequest, SeaweedFiler_SubscribeMetadataServer) error
+ SubscribeLocalMetadata(*SubscribeMetadataRequest, SeaweedFiler_SubscribeLocalMetadataServer) error
+ KeepConnected(SeaweedFiler_KeepConnectedServer) error
+ LocateBroker(context.Context, *LocateBrokerRequest) (*LocateBrokerResponse, error)
+}
+
+// UnimplementedSeaweedFilerServer can be embedded to have forward compatible implementations.
+type UnimplementedSeaweedFilerServer struct {
+}
+
+func (*UnimplementedSeaweedFilerServer) LookupDirectoryEntry(context.Context, *LookupDirectoryEntryRequest) (*LookupDirectoryEntryResponse, error) {
+ return nil, status.Errorf(codes.Unimplemented, "method LookupDirectoryEntry not implemented")
+}
+func (*UnimplementedSeaweedFilerServer) ListEntries(*ListEntriesRequest, SeaweedFiler_ListEntriesServer) error {
+ return status.Errorf(codes.Unimplemented, "method ListEntries not implemented")
+}
+func (*UnimplementedSeaweedFilerServer) CreateEntry(context.Context, *CreateEntryRequest) (*CreateEntryResponse, error) {
+ return nil, status.Errorf(codes.Unimplemented, "method CreateEntry not implemented")
+}
+func (*UnimplementedSeaweedFilerServer) UpdateEntry(context.Context, *UpdateEntryRequest) (*UpdateEntryResponse, error) {
+ return nil, status.Errorf(codes.Unimplemented, "method UpdateEntry not implemented")
+}
+func (*UnimplementedSeaweedFilerServer) AppendToEntry(context.Context, *AppendToEntryRequest) (*AppendToEntryResponse, error) {
+ return nil, status.Errorf(codes.Unimplemented, "method AppendToEntry not implemented")
+}
+func (*UnimplementedSeaweedFilerServer) DeleteEntry(context.Context, *DeleteEntryRequest) (*DeleteEntryResponse, error) {
+ return nil, status.Errorf(codes.Unimplemented, "method DeleteEntry not implemented")
+}
+func (*UnimplementedSeaweedFilerServer) AtomicRenameEntry(context.Context, *AtomicRenameEntryRequest) (*AtomicRenameEntryResponse, error) {
+ return nil, status.Errorf(codes.Unimplemented, "method AtomicRenameEntry not implemented")
+}
+func (*UnimplementedSeaweedFilerServer) AssignVolume(context.Context, *AssignVolumeRequest) (*AssignVolumeResponse, error) {
+ return nil, status.Errorf(codes.Unimplemented, "method AssignVolume not implemented")
+}
+func (*UnimplementedSeaweedFilerServer) LookupVolume(context.Context, *LookupVolumeRequest) (*LookupVolumeResponse, error) {
+ return nil, status.Errorf(codes.Unimplemented, "method LookupVolume not implemented")
+}
+func (*UnimplementedSeaweedFilerServer) DeleteCollection(context.Context, *DeleteCollectionRequest) (*DeleteCollectionResponse, error) {
+ return nil, status.Errorf(codes.Unimplemented, "method DeleteCollection not implemented")
+}
+func (*UnimplementedSeaweedFilerServer) Statistics(context.Context, *StatisticsRequest) (*StatisticsResponse, error) {
+ return nil, status.Errorf(codes.Unimplemented, "method Statistics not implemented")
+}
+func (*UnimplementedSeaweedFilerServer) GetFilerConfiguration(context.Context, *GetFilerConfigurationRequest) (*GetFilerConfigurationResponse, error) {
+ return nil, status.Errorf(codes.Unimplemented, "method GetFilerConfiguration not implemented")
+}
+func (*UnimplementedSeaweedFilerServer) SubscribeMetadata(*SubscribeMetadataRequest, SeaweedFiler_SubscribeMetadataServer) error {
+ return status.Errorf(codes.Unimplemented, "method SubscribeMetadata not implemented")
+}
+func (*UnimplementedSeaweedFilerServer) SubscribeLocalMetadata(*SubscribeMetadataRequest, SeaweedFiler_SubscribeLocalMetadataServer) error {
+ return status.Errorf(codes.Unimplemented, "method SubscribeLocalMetadata not implemented")
+}
+func (*UnimplementedSeaweedFilerServer) KeepConnected(SeaweedFiler_KeepConnectedServer) error {
+ return status.Errorf(codes.Unimplemented, "method KeepConnected not implemented")
+}
+func (*UnimplementedSeaweedFilerServer) LocateBroker(context.Context, *LocateBrokerRequest) (*LocateBrokerResponse, error) {
+ return nil, status.Errorf(codes.Unimplemented, "method LocateBroker not implemented")
}
func RegisterSeaweedFilerServer(s *grpc.Server, srv SeaweedFilerServer) {
@@ -967,22 +3967,25 @@ func _SeaweedFiler_LookupDirectoryEntry_Handler(srv interface{}, ctx context.Con
return interceptor(ctx, in, info, handler)
}
-func _SeaweedFiler_ListEntries_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
- in := new(ListEntriesRequest)
- if err := dec(in); err != nil {
- return nil, err
- }
- if interceptor == nil {
- return srv.(SeaweedFilerServer).ListEntries(ctx, in)
- }
- info := &grpc.UnaryServerInfo{
- Server: srv,
- FullMethod: "/filer_pb.SeaweedFiler/ListEntries",
+func _SeaweedFiler_ListEntries_Handler(srv interface{}, stream grpc.ServerStream) error {
+ m := new(ListEntriesRequest)
+ if err := stream.RecvMsg(m); err != nil {
+ return err
}
- handler := func(ctx context.Context, req interface{}) (interface{}, error) {
- return srv.(SeaweedFilerServer).ListEntries(ctx, req.(*ListEntriesRequest))
- }
- return interceptor(ctx, in, info, handler)
+ return srv.(SeaweedFilerServer).ListEntries(m, &seaweedFilerListEntriesServer{stream})
+}
+
+type SeaweedFiler_ListEntriesServer interface {
+ Send(*ListEntriesResponse) error
+ grpc.ServerStream
+}
+
+type seaweedFilerListEntriesServer struct {
+ grpc.ServerStream
+}
+
+func (x *seaweedFilerListEntriesServer) Send(m *ListEntriesResponse) error {
+ return x.ServerStream.SendMsg(m)
}
func _SeaweedFiler_CreateEntry_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
@@ -1021,6 +4024,24 @@ func _SeaweedFiler_UpdateEntry_Handler(srv interface{}, ctx context.Context, dec
return interceptor(ctx, in, info, handler)
}
+func _SeaweedFiler_AppendToEntry_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(AppendToEntryRequest)
+ if err := dec(in); err != nil {
+ return nil, err
+ }
+ if interceptor == nil {
+ return srv.(SeaweedFilerServer).AppendToEntry(ctx, in)
+ }
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: "/filer_pb.SeaweedFiler/AppendToEntry",
+ }
+ handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+ return srv.(SeaweedFilerServer).AppendToEntry(ctx, req.(*AppendToEntryRequest))
+ }
+ return interceptor(ctx, in, info, handler)
+}
+
func _SeaweedFiler_DeleteEntry_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
in := new(DeleteEntryRequest)
if err := dec(in); err != nil {
@@ -1039,6 +4060,24 @@ func _SeaweedFiler_DeleteEntry_Handler(srv interface{}, ctx context.Context, dec
return interceptor(ctx, in, info, handler)
}
+func _SeaweedFiler_AtomicRenameEntry_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(AtomicRenameEntryRequest)
+ if err := dec(in); err != nil {
+ return nil, err
+ }
+ if interceptor == nil {
+ return srv.(SeaweedFilerServer).AtomicRenameEntry(ctx, in)
+ }
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: "/filer_pb.SeaweedFiler/AtomicRenameEntry",
+ }
+ handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+ return srv.(SeaweedFilerServer).AtomicRenameEntry(ctx, req.(*AtomicRenameEntryRequest))
+ }
+ return interceptor(ctx, in, info, handler)
+}
+
func _SeaweedFiler_AssignVolume_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
in := new(AssignVolumeRequest)
if err := dec(in); err != nil {
@@ -1111,6 +4150,110 @@ func _SeaweedFiler_Statistics_Handler(srv interface{}, ctx context.Context, dec
return interceptor(ctx, in, info, handler)
}
+func _SeaweedFiler_GetFilerConfiguration_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(GetFilerConfigurationRequest)
+ if err := dec(in); err != nil {
+ return nil, err
+ }
+ if interceptor == nil {
+ return srv.(SeaweedFilerServer).GetFilerConfiguration(ctx, in)
+ }
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: "/filer_pb.SeaweedFiler/GetFilerConfiguration",
+ }
+ handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+ return srv.(SeaweedFilerServer).GetFilerConfiguration(ctx, req.(*GetFilerConfigurationRequest))
+ }
+ return interceptor(ctx, in, info, handler)
+}
+
+func _SeaweedFiler_SubscribeMetadata_Handler(srv interface{}, stream grpc.ServerStream) error {
+ m := new(SubscribeMetadataRequest)
+ if err := stream.RecvMsg(m); err != nil {
+ return err
+ }
+ return srv.(SeaweedFilerServer).SubscribeMetadata(m, &seaweedFilerSubscribeMetadataServer{stream})
+}
+
+type SeaweedFiler_SubscribeMetadataServer interface {
+ Send(*SubscribeMetadataResponse) error
+ grpc.ServerStream
+}
+
+type seaweedFilerSubscribeMetadataServer struct {
+ grpc.ServerStream
+}
+
+func (x *seaweedFilerSubscribeMetadataServer) Send(m *SubscribeMetadataResponse) error {
+ return x.ServerStream.SendMsg(m)
+}
+
+func _SeaweedFiler_SubscribeLocalMetadata_Handler(srv interface{}, stream grpc.ServerStream) error {
+ m := new(SubscribeMetadataRequest)
+ if err := stream.RecvMsg(m); err != nil {
+ return err
+ }
+ return srv.(SeaweedFilerServer).SubscribeLocalMetadata(m, &seaweedFilerSubscribeLocalMetadataServer{stream})
+}
+
+type SeaweedFiler_SubscribeLocalMetadataServer interface {
+ Send(*SubscribeMetadataResponse) error
+ grpc.ServerStream
+}
+
+type seaweedFilerSubscribeLocalMetadataServer struct {
+ grpc.ServerStream
+}
+
+func (x *seaweedFilerSubscribeLocalMetadataServer) Send(m *SubscribeMetadataResponse) error {
+ return x.ServerStream.SendMsg(m)
+}
+
+func _SeaweedFiler_KeepConnected_Handler(srv interface{}, stream grpc.ServerStream) error {
+ return srv.(SeaweedFilerServer).KeepConnected(&seaweedFilerKeepConnectedServer{stream})
+}
+
+type SeaweedFiler_KeepConnectedServer interface {
+ Send(*KeepConnectedResponse) error
+ Recv() (*KeepConnectedRequest, error)
+ grpc.ServerStream
+}
+
+type seaweedFilerKeepConnectedServer struct {
+ grpc.ServerStream
+}
+
+func (x *seaweedFilerKeepConnectedServer) Send(m *KeepConnectedResponse) error {
+ return x.ServerStream.SendMsg(m)
+}
+
+func (x *seaweedFilerKeepConnectedServer) Recv() (*KeepConnectedRequest, error) {
+ m := new(KeepConnectedRequest)
+ if err := x.ServerStream.RecvMsg(m); err != nil {
+ return nil, err
+ }
+ return m, nil
+}
+
+func _SeaweedFiler_LocateBroker_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(LocateBrokerRequest)
+ if err := dec(in); err != nil {
+ return nil, err
+ }
+ if interceptor == nil {
+ return srv.(SeaweedFilerServer).LocateBroker(ctx, in)
+ }
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: "/filer_pb.SeaweedFiler/LocateBroker",
+ }
+ handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+ return srv.(SeaweedFilerServer).LocateBroker(ctx, req.(*LocateBrokerRequest))
+ }
+ return interceptor(ctx, in, info, handler)
+}
+
var _SeaweedFiler_serviceDesc = grpc.ServiceDesc{
ServiceName: "filer_pb.SeaweedFiler",
HandlerType: (*SeaweedFilerServer)(nil),
@@ -1119,10 +4262,6 @@ var _SeaweedFiler_serviceDesc = grpc.ServiceDesc{
MethodName: "LookupDirectoryEntry",
Handler: _SeaweedFiler_LookupDirectoryEntry_Handler,
},
- {
- MethodName: "ListEntries",
- Handler: _SeaweedFiler_ListEntries_Handler,
- },
{
MethodName: "CreateEntry",
Handler: _SeaweedFiler_CreateEntry_Handler,
@@ -1131,10 +4270,18 @@ var _SeaweedFiler_serviceDesc = grpc.ServiceDesc{
MethodName: "UpdateEntry",
Handler: _SeaweedFiler_UpdateEntry_Handler,
},
+ {
+ MethodName: "AppendToEntry",
+ Handler: _SeaweedFiler_AppendToEntry_Handler,
+ },
{
MethodName: "DeleteEntry",
Handler: _SeaweedFiler_DeleteEntry_Handler,
},
+ {
+ MethodName: "AtomicRenameEntry",
+ Handler: _SeaweedFiler_AtomicRenameEntry_Handler,
+ },
{
MethodName: "AssignVolume",
Handler: _SeaweedFiler_AssignVolume_Handler,
@@ -1151,94 +4298,37 @@ var _SeaweedFiler_serviceDesc = grpc.ServiceDesc{
MethodName: "Statistics",
Handler: _SeaweedFiler_Statistics_Handler,
},
+ {
+ MethodName: "GetFilerConfiguration",
+ Handler: _SeaweedFiler_GetFilerConfiguration_Handler,
+ },
+ {
+ MethodName: "LocateBroker",
+ Handler: _SeaweedFiler_LocateBroker_Handler,
+ },
+ },
+ Streams: []grpc.StreamDesc{
+ {
+ StreamName: "ListEntries",
+ Handler: _SeaweedFiler_ListEntries_Handler,
+ ServerStreams: true,
+ },
+ {
+ StreamName: "SubscribeMetadata",
+ Handler: _SeaweedFiler_SubscribeMetadata_Handler,
+ ServerStreams: true,
+ },
+ {
+ StreamName: "SubscribeLocalMetadata",
+ Handler: _SeaweedFiler_SubscribeLocalMetadata_Handler,
+ ServerStreams: true,
+ },
+ {
+ StreamName: "KeepConnected",
+ Handler: _SeaweedFiler_KeepConnected_Handler,
+ ServerStreams: true,
+ ClientStreams: true,
+ },
},
- Streams: []grpc.StreamDesc{},
Metadata: "filer.proto",
}
-
-func init() { proto.RegisterFile("filer.proto", fileDescriptor0) }
-
-var fileDescriptor0 = []byte{
- // 1291 bytes of a gzipped FileDescriptorProto
- 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0xb4, 0x57, 0x4d, 0x8f, 0xdc, 0x44,
- 0x13, 0x8e, 0xe7, 0x2b, 0xe3, 0x9a, 0x99, 0xbc, 0xbb, 0x3d, 0xfb, 0x12, 0x6b, 0xb2, 0x1b, 0x26,
- 0x86, 0xa0, 0x8d, 0x88, 0x46, 0x51, 0xe0, 0x90, 0x10, 0x21, 0x91, 0x6c, 0x36, 0x52, 0xa4, 0x4d,
- 0x82, 0xbc, 0x09, 0x12, 0xe2, 0x60, 0x79, 0xed, 0x9e, 0xa1, 0xb5, 0x1e, 0x7b, 0x70, 0xb7, 0x37,
- 0x09, 0x7f, 0x82, 0x0b, 0x57, 0x0e, 0x9c, 0xf8, 0x17, 0x5c, 0xf8, 0x3f, 0xdc, 0xb9, 0xa1, 0xae,
- 0x6e, 0x7b, 0xda, 0x63, 0xef, 0x06, 0x84, 0x72, 0xeb, 0x7e, 0xaa, 0xba, 0xbe, 0xfa, 0xe9, 0x2a,
- 0x1b, 0x06, 0x73, 0x16, 0xd3, 0x6c, 0xb6, 0xca, 0x52, 0x91, 0x92, 0x3e, 0x6e, 0xfc, 0xd5, 0x89,
- 0xfb, 0x02, 0xae, 0x1d, 0xa5, 0xe9, 0x69, 0xbe, 0x7a, 0xcc, 0x32, 0x1a, 0x8a, 0x34, 0x7b, 0x7b,
- 0x98, 0x88, 0xec, 0xad, 0x47, 0x7f, 0xc8, 0x29, 0x17, 0x64, 0x17, 0xec, 0xa8, 0x10, 0x38, 0xd6,
- 0xd4, 0xda, 0xb7, 0xbd, 0x35, 0x40, 0x08, 0x74, 0x92, 0x60, 0x49, 0x9d, 0x16, 0x0a, 0x70, 0xed,
- 0x1e, 0xc2, 0x6e, 0xb3, 0x41, 0xbe, 0x4a, 0x13, 0x4e, 0xc9, 0x4d, 0xe8, 0x52, 0x09, 0xa0, 0xb5,
- 0xc1, 0xdd, 0xff, 0xcd, 0x8a, 0x50, 0x66, 0x4a, 0x4f, 0x49, 0xdd, 0xdf, 0x2d, 0x20, 0x47, 0x8c,
- 0x0b, 0x09, 0x32, 0xca, 0xff, 0x59, 0x3c, 0x1f, 0x40, 0x6f, 0x95, 0xd1, 0x39, 0x7b, 0xa3, 0x23,
- 0xd2, 0x3b, 0x72, 0x1b, 0xb6, 0xb9, 0x08, 0x32, 0xf1, 0x24, 0x4b, 0x97, 0x4f, 0x58, 0x4c, 0x9f,
- 0xcb, 0xa0, 0xdb, 0xa8, 0x52, 0x17, 0x90, 0x19, 0x10, 0x96, 0x84, 0x71, 0xce, 0xd9, 0x19, 0x3d,
- 0x2e, 0xa4, 0x4e, 0x67, 0x6a, 0xed, 0xf7, 0xbd, 0x06, 0x09, 0xd9, 0x81, 0x6e, 0xcc, 0x96, 0x4c,
- 0x38, 0xdd, 0xa9, 0xb5, 0x3f, 0xf2, 0xd4, 0xc6, 0xfd, 0x0a, 0xc6, 0x95, 0xf8, 0x75, 0xfa, 0xb7,
- 0xe0, 0x32, 0x55, 0x90, 0x63, 0x4d, 0xdb, 0x4d, 0x05, 0x28, 0xe4, 0xee, 0x2f, 0x2d, 0xe8, 0x22,
- 0x54, 0xd6, 0xd9, 0x5a, 0xd7, 0x99, 0xdc, 0x80, 0x21, 0xe3, 0xfe, 0xba, 0x18, 0x2d, 0x8c, 0x6f,
- 0xc0, 0x78, 0x59, 0x77, 0xf2, 0x29, 0xf4, 0xc2, 0xef, 0xf3, 0xe4, 0x94, 0x3b, 0x6d, 0x74, 0x35,
- 0x5e, 0xbb, 0x92, 0xc9, 0x1e, 0x48, 0x99, 0xa7, 0x55, 0xc8, 0x3d, 0x80, 0x40, 0x88, 0x8c, 0x9d,
- 0xe4, 0x82, 0x72, 0xcc, 0x76, 0x70, 0xd7, 0x31, 0x0e, 0xe4, 0x9c, 0x3e, 0x2c, 0xe5, 0x9e, 0xa1,
- 0x4b, 0xee, 0x43, 0x9f, 0xbe, 0x11, 0x34, 0x89, 0x68, 0xe4, 0x74, 0xd1, 0xd1, 0xde, 0x46, 0x4e,
- 0xb3, 0x43, 0x2d, 0x57, 0x19, 0x96, 0xea, 0x93, 0x07, 0x30, 0xaa, 0x88, 0xc8, 0x16, 0xb4, 0x4f,
- 0x69, 0x71, 0xb3, 0x72, 0x29, 0xab, 0x7b, 0x16, 0xc4, 0xb9, 0x22, 0xd9, 0xd0, 0x53, 0x9b, 0x2f,
- 0x5a, 0xf7, 0x2c, 0xf7, 0x67, 0x0b, 0xb6, 0x0f, 0xcf, 0x68, 0x22, 0x9e, 0xa7, 0x82, 0xcd, 0x59,
- 0x18, 0x08, 0x96, 0x26, 0xe4, 0x36, 0xd8, 0x69, 0x1c, 0xf9, 0x17, 0x72, 0xac, 0x9f, 0xc6, 0xda,
- 0xdf, 0x6d, 0xb0, 0x13, 0xfa, 0x5a, 0x6b, 0xb7, 0xce, 0xd1, 0x4e, 0xe8, 0x6b, 0xa5, 0xfd, 0x11,
- 0x8c, 0x22, 0x1a, 0x53, 0x41, 0xfd, 0xb2, 0xae, 0xb2, 0xe8, 0x43, 0x05, 0x62, 0x3d, 0xb9, 0xfb,
- 0xab, 0x05, 0x76, 0x59, 0x5e, 0x72, 0x15, 0x2e, 0x4b, 0x73, 0x3e, 0x8b, 0x74, 0x52, 0x3d, 0xb9,
- 0x7d, 0x1a, 0x49, 0xae, 0xa6, 0xf3, 0x39, 0xa7, 0x02, 0xdd, 0xb6, 0x3d, 0xbd, 0x93, 0x77, 0xcd,
- 0xd9, 0x8f, 0x8a, 0x9e, 0x1d, 0x0f, 0xd7, 0xb2, 0x06, 0x4b, 0xc1, 0x96, 0x14, 0xaf, 0xa5, 0xed,
- 0xa9, 0x0d, 0x19, 0x43, 0x97, 0xfa, 0x22, 0x58, 0x20, 0xef, 0x6c, 0xaf, 0x43, 0x5f, 0x06, 0x0b,
- 0xf2, 0x31, 0x5c, 0xe1, 0x69, 0x9e, 0x85, 0xd4, 0x2f, 0xdc, 0xf6, 0x50, 0x3a, 0x54, 0xe8, 0x13,
- 0x74, 0xee, 0xfe, 0xd9, 0x82, 0x2b, 0xd5, 0x1b, 0x25, 0xd7, 0xc0, 0xc6, 0x13, 0xe8, 0xdc, 0x42,
- 0xe7, 0xd8, 0x25, 0x8e, 0x2b, 0x01, 0xb4, 0xcc, 0x00, 0x8a, 0x23, 0xcb, 0x34, 0x52, 0xf1, 0x8e,
- 0xd4, 0x91, 0x67, 0x69, 0x44, 0xe5, 0x4d, 0xe6, 0x2c, 0xc2, 0x88, 0x47, 0x9e, 0x5c, 0x4a, 0x64,
- 0xc1, 0x22, 0xfd, 0x4a, 0xe4, 0x52, 0xd6, 0x20, 0xcc, 0xd0, 0x6e, 0x4f, 0xd5, 0x40, 0xed, 0x64,
- 0x0d, 0x96, 0x12, 0xbd, 0xac, 0x12, 0x93, 0x6b, 0x32, 0x85, 0x41, 0x46, 0x57, 0xb1, 0xbe, 0x66,
- 0xa7, 0x8f, 0x22, 0x13, 0x22, 0xd7, 0x01, 0xc2, 0x34, 0x8e, 0x69, 0x88, 0x0a, 0x36, 0x2a, 0x18,
- 0x88, 0xbc, 0x0a, 0x21, 0x62, 0x9f, 0xd3, 0xd0, 0x81, 0xa9, 0xb5, 0xdf, 0xf5, 0x7a, 0x42, 0xc4,
- 0xc7, 0x34, 0x94, 0x79, 0xe4, 0x9c, 0x66, 0x3e, 0xbe, 0xb1, 0x01, 0x9e, 0xeb, 0x4b, 0x00, 0xbb,
- 0xc1, 0x1e, 0xc0, 0x22, 0x4b, 0xf3, 0x95, 0x92, 0x0e, 0xa7, 0x6d, 0xd9, 0x72, 0x10, 0x41, 0xf1,
- 0x4d, 0xb8, 0xc2, 0xdf, 0x2e, 0x63, 0x96, 0x9c, 0xfa, 0x22, 0xc8, 0x16, 0x54, 0x38, 0x23, 0x34,
- 0x30, 0xd2, 0xe8, 0x4b, 0x04, 0xdd, 0x6f, 0x81, 0x1c, 0x64, 0x34, 0x10, 0xf4, 0x5f, 0x74, 0xd7,
- 0xb2, 0x53, 0xb6, 0x2e, 0xec, 0x94, 0xff, 0x87, 0x71, 0xc5, 0xb4, 0x6a, 0x34, 0xd2, 0xe3, 0xab,
- 0x55, 0xf4, 0xbe, 0x3c, 0x56, 0x4c, 0x6b, 0x8f, 0x3f, 0x59, 0x40, 0x1e, 0xe3, 0x4b, 0xf8, 0x6f,
- 0x23, 0x44, 0x72, 0x58, 0xb6, 0x36, 0xf5, 0xd2, 0xa2, 0x40, 0x04, 0xba, 0xf9, 0x0e, 0x19, 0x57,
- 0xf6, 0x1f, 0x07, 0x22, 0xd0, 0x0d, 0x30, 0xa3, 0x61, 0x9e, 0xc9, 0x7e, 0x8c, 0xbc, 0xc2, 0x06,
- 0xe8, 0x15, 0x90, 0x0c, 0xb4, 0x12, 0x90, 0x0e, 0xf4, 0x37, 0x0b, 0xc6, 0x0f, 0x39, 0x67, 0x8b,
- 0xe4, 0x9b, 0x34, 0xce, 0x97, 0xb4, 0x88, 0x74, 0x07, 0xba, 0x61, 0x9a, 0x27, 0x02, 0xa3, 0xec,
- 0x7a, 0x6a, 0xb3, 0x41, 0xab, 0x56, 0x8d, 0x56, 0x1b, 0xc4, 0x6c, 0xd7, 0x89, 0x69, 0x10, 0xaf,
- 0x53, 0x21, 0xde, 0x87, 0x30, 0x90, 0xe9, 0xf9, 0x21, 0x4d, 0x04, 0xcd, 0xf4, 0x3b, 0x06, 0x09,
- 0x1d, 0x20, 0xe2, 0x9e, 0xc1, 0x4e, 0x35, 0x50, 0x3d, 0x45, 0xce, 0xed, 0x2a, 0xf2, 0xd5, 0x65,
- 0xb1, 0x8e, 0x52, 0x2e, 0x25, 0x7f, 0x57, 0xf9, 0x49, 0xcc, 0x42, 0x5f, 0x0a, 0x54, 0x74, 0xb6,
- 0x42, 0x5e, 0x65, 0xf1, 0x3a, 0xe7, 0x8e, 0x91, 0xb3, 0xfb, 0x39, 0x8c, 0xd5, 0x10, 0xaf, 0x16,
- 0x68, 0x0f, 0xe0, 0x0c, 0x01, 0x9f, 0x45, 0x6a, 0x7e, 0xd9, 0x9e, 0xad, 0x90, 0xa7, 0x11, 0x77,
- 0xbf, 0x04, 0xfb, 0x28, 0x55, 0x39, 0x73, 0x72, 0x07, 0xec, 0xb8, 0xd8, 0xe8, 0x51, 0x47, 0xd6,
- 0x7c, 0x2a, 0xf4, 0xbc, 0xb5, 0x92, 0xfb, 0x00, 0xfa, 0x05, 0x5c, 0xe4, 0x61, 0x9d, 0x97, 0x47,
- 0x6b, 0x23, 0x0f, 0xf7, 0x0f, 0x0b, 0x76, 0xaa, 0x21, 0xeb, 0x52, 0xbd, 0x82, 0x51, 0xe9, 0xc2,
- 0x5f, 0x06, 0x2b, 0x1d, 0xcb, 0x1d, 0x33, 0x96, 0xfa, 0xb1, 0x32, 0x40, 0xfe, 0x2c, 0x58, 0x29,
- 0xf6, 0x0c, 0x63, 0x03, 0x9a, 0xbc, 0x84, 0xed, 0x9a, 0x4a, 0xc3, 0xf4, 0xba, 0x65, 0x4e, 0xaf,
- 0xca, 0x04, 0x2e, 0x4f, 0x9b, 0x23, 0xed, 0x3e, 0x5c, 0x55, 0x84, 0x3d, 0x28, 0xf9, 0x55, 0xd4,
- 0xbe, 0x4a, 0x43, 0x6b, 0x93, 0x86, 0xee, 0x04, 0x9c, 0xfa, 0x51, 0x4d, 0xf8, 0x05, 0x6c, 0x1f,
- 0x8b, 0x40, 0x30, 0x2e, 0x58, 0x58, 0x7e, 0x4a, 0x6d, 0xf0, 0xd6, 0x7a, 0x57, 0x43, 0xad, 0x33,
- 0x7f, 0x0b, 0xda, 0x42, 0x14, 0x9c, 0x92, 0x4b, 0x79, 0x0b, 0xc4, 0xf4, 0xa4, 0xef, 0xe0, 0x3d,
- 0xb8, 0x92, 0x7c, 0x10, 0xa9, 0x08, 0x62, 0x35, 0xb0, 0x3a, 0x38, 0xb0, 0x6c, 0x44, 0x70, 0x62,
- 0xa9, 0x9e, 0x1e, 0x29, 0x69, 0x57, 0x8d, 0x33, 0x09, 0xa0, 0x70, 0x0f, 0x00, 0x9f, 0x8f, 0x62,
- 0x7e, 0x4f, 0x9d, 0x95, 0xc8, 0x81, 0x04, 0xee, 0xfe, 0xd5, 0x85, 0xe1, 0x31, 0x0d, 0x5e, 0x53,
- 0x1a, 0xc9, 0x79, 0x99, 0x91, 0x45, 0xc1, 0xad, 0xea, 0x37, 0x2d, 0xb9, 0xb9, 0x49, 0xa2, 0xc6,
- 0x8f, 0xe8, 0xc9, 0x27, 0xef, 0x52, 0xd3, 0xd7, 0x74, 0x89, 0x1c, 0xc1, 0xc0, 0xf8, 0x68, 0x24,
- 0xbb, 0xc6, 0xc1, 0xda, 0xb7, 0xf0, 0x64, 0xef, 0x1c, 0xa9, 0x69, 0xcd, 0x98, 0x0c, 0xa6, 0xb5,
- 0xfa, 0x2c, 0x32, 0xad, 0x35, 0x8d, 0x13, 0xb4, 0x66, 0x74, 0x7d, 0xd3, 0x5a, 0x7d, 0xce, 0x98,
- 0xd6, 0x9a, 0x46, 0x05, 0x5a, 0x33, 0x5a, 0xb3, 0x69, 0xad, 0x3e, 0x42, 0x4c, 0x6b, 0x4d, 0xfd,
- 0xfc, 0x12, 0x79, 0x01, 0x43, 0xb3, 0x4f, 0x12, 0xe3, 0x40, 0x43, 0xa3, 0x9f, 0x5c, 0x3f, 0x4f,
- 0x6c, 0x1a, 0x34, 0xdb, 0x82, 0x69, 0xb0, 0xa1, 0x31, 0x9a, 0x06, 0x9b, 0xba, 0x89, 0x7b, 0x89,
- 0x7c, 0x07, 0x5b, 0x9b, 0xcf, 0x93, 0xdc, 0xd8, 0x4c, 0xab, 0xf6, 0xea, 0x27, 0xee, 0x45, 0x2a,
- 0xa5, 0xf1, 0xa7, 0x00, 0xeb, 0x57, 0x47, 0xae, 0xad, 0xcf, 0xd4, 0x5e, 0xfd, 0x64, 0xb7, 0x59,
- 0x58, 0x98, 0x7a, 0x74, 0x1d, 0xb6, 0xb8, 0xa2, 0xfe, 0x9c, 0xcf, 0xc2, 0x98, 0xd1, 0x44, 0x3c,
- 0x02, 0x7c, 0x05, 0x5f, 0xcb, 0x3f, 0xc7, 0x93, 0x1e, 0xfe, 0x40, 0x7e, 0xf6, 0x77, 0x00, 0x00,
- 0x00, 0xff, 0xff, 0x8d, 0x38, 0xa9, 0x9f, 0x4f, 0x0e, 0x00, 0x00,
-}
diff --git a/weed/pb/filer_pb/filer_client.go b/weed/pb/filer_pb/filer_client.go
new file mode 100644
index 000000000..535a3c247
--- /dev/null
+++ b/weed/pb/filer_pb/filer_client.go
@@ -0,0 +1,237 @@
+package filer_pb
+
+import (
+ "context"
+ "errors"
+ "fmt"
+ "io"
+ "math"
+ "os"
+ "time"
+
+ "github.com/chrislusf/seaweedfs/weed/glog"
+ "github.com/chrislusf/seaweedfs/weed/util"
+)
+
+var (
+ OS_UID = uint32(os.Getuid())
+ OS_GID = uint32(os.Getgid())
+)
+
+type FilerClient interface {
+ WithFilerClient(fn func(SeaweedFilerClient) error) error
+ AdjustedUrl(hostAndPort string) string
+}
+
+func GetEntry(filerClient FilerClient, fullFilePath util.FullPath) (entry *Entry, err error) {
+
+ dir, name := fullFilePath.DirAndName()
+
+ err = filerClient.WithFilerClient(func(client SeaweedFilerClient) error {
+
+ request := &LookupDirectoryEntryRequest{
+ Directory: dir,
+ Name: name,
+ }
+
+ // glog.V(3).Infof("read %s request: %v", fullFilePath, request)
+ resp, err := LookupEntry(client, request)
+ if err != nil {
+ if err == ErrNotFound {
+ return nil
+ }
+ glog.V(3).Infof("read %s %v: %v", fullFilePath, resp, err)
+ return err
+ }
+
+ if resp.Entry == nil {
+ // glog.V(3).Infof("read %s entry: %v", fullFilePath, entry)
+ return nil
+ }
+
+ entry = resp.Entry
+ return nil
+ })
+
+ return
+}
+
+type EachEntryFunciton func(entry *Entry, isLast bool) error
+
+func ReadDirAllEntries(filerClient FilerClient, fullDirPath util.FullPath, prefix string, fn EachEntryFunciton) (err error) {
+
+ return doList(filerClient, fullDirPath, prefix, fn, "", false, math.MaxUint32)
+
+}
+
+func List(filerClient FilerClient, parentDirectoryPath, prefix string, fn EachEntryFunciton, startFrom string, inclusive bool, limit uint32) (err error) {
+
+ return doList(filerClient, util.FullPath(parentDirectoryPath), prefix, fn, startFrom, inclusive, limit)
+
+}
+
+func doList(filerClient FilerClient, fullDirPath util.FullPath, prefix string, fn EachEntryFunciton, startFrom string, inclusive bool, limit uint32) (err error) {
+
+ err = filerClient.WithFilerClient(func(client SeaweedFilerClient) error {
+
+ request := &ListEntriesRequest{
+ Directory: string(fullDirPath),
+ Prefix: prefix,
+ StartFromFileName: startFrom,
+ Limit: limit,
+ InclusiveStartFrom: inclusive,
+ }
+
+ glog.V(3).Infof("read directory: %v", request)
+ ctx, cancel := context.WithCancel(context.Background())
+ stream, err := client.ListEntries(ctx, request)
+ if err != nil {
+ return fmt.Errorf("list %s: %v", fullDirPath, err)
+ }
+ defer cancel()
+
+ var prevEntry *Entry
+ for {
+ resp, recvErr := stream.Recv()
+ if recvErr != nil {
+ if recvErr == io.EOF {
+ if prevEntry != nil {
+ if err := fn(prevEntry, true); err != nil {
+ return err
+ }
+ }
+ break
+ } else {
+ return recvErr
+ }
+ }
+ if prevEntry != nil {
+ if err := fn(prevEntry, false); err != nil {
+ return err
+ }
+ }
+ prevEntry = resp.Entry
+ }
+
+ return nil
+
+ })
+
+ return
+}
+
+func Exists(filerClient FilerClient, parentDirectoryPath string, entryName string, isDirectory bool) (exists bool, err error) {
+
+ err = filerClient.WithFilerClient(func(client SeaweedFilerClient) error {
+
+ request := &LookupDirectoryEntryRequest{
+ Directory: parentDirectoryPath,
+ Name: entryName,
+ }
+
+ glog.V(4).Infof("exists entry %v/%v: %v", parentDirectoryPath, entryName, request)
+ resp, err := LookupEntry(client, request)
+ if err != nil {
+ if err == ErrNotFound {
+ exists = false
+ return nil
+ }
+ glog.V(0).Infof("exists entry %v: %v", request, err)
+ return fmt.Errorf("exists entry %s/%s: %v", parentDirectoryPath, entryName, err)
+ }
+
+ exists = resp.Entry.IsDirectory == isDirectory
+
+ return nil
+ })
+
+ return
+}
+
+func Mkdir(filerClient FilerClient, parentDirectoryPath string, dirName string, fn func(entry *Entry)) error {
+ return filerClient.WithFilerClient(func(client SeaweedFilerClient) error {
+
+ entry := &Entry{
+ Name: dirName,
+ IsDirectory: true,
+ Attributes: &FuseAttributes{
+ Mtime: time.Now().Unix(),
+ Crtime: time.Now().Unix(),
+ FileMode: uint32(0777 | os.ModeDir),
+ Uid: OS_UID,
+ Gid: OS_GID,
+ },
+ }
+
+ if fn != nil {
+ fn(entry)
+ }
+
+ request := &CreateEntryRequest{
+ Directory: parentDirectoryPath,
+ Entry: entry,
+ }
+
+ glog.V(1).Infof("mkdir: %v", request)
+ if err := CreateEntry(client, request); err != nil {
+ glog.V(0).Infof("mkdir %v: %v", request, err)
+ return fmt.Errorf("mkdir %s/%s: %v", parentDirectoryPath, dirName, err)
+ }
+
+ return nil
+ })
+}
+
+func MkFile(filerClient FilerClient, parentDirectoryPath string, fileName string, chunks []*FileChunk) error {
+ return filerClient.WithFilerClient(func(client SeaweedFilerClient) error {
+
+ entry := &Entry{
+ Name: fileName,
+ IsDirectory: false,
+ Attributes: &FuseAttributes{
+ Mtime: time.Now().Unix(),
+ Crtime: time.Now().Unix(),
+ FileMode: uint32(0770),
+ Uid: OS_UID,
+ Gid: OS_GID,
+ },
+ Chunks: chunks,
+ }
+
+ request := &CreateEntryRequest{
+ Directory: parentDirectoryPath,
+ Entry: entry,
+ }
+
+ glog.V(1).Infof("create file: %s/%s", parentDirectoryPath, fileName)
+ if err := CreateEntry(client, request); err != nil {
+ glog.V(0).Infof("create file %v:%v", request, err)
+ return fmt.Errorf("create file %s/%s: %v", parentDirectoryPath, fileName, err)
+ }
+
+ return nil
+ })
+}
+
+func Remove(filerClient FilerClient, parentDirectoryPath, name string, isDeleteData, isRecursive, ignoreRecursiveErr, isFromOtherCluster bool) error {
+ return filerClient.WithFilerClient(func(client SeaweedFilerClient) error {
+
+ if resp, err := client.DeleteEntry(context.Background(), &DeleteEntryRequest{
+ Directory: parentDirectoryPath,
+ Name: name,
+ IsDeleteData: isDeleteData,
+ IsRecursive: isRecursive,
+ IgnoreRecursiveError: ignoreRecursiveErr,
+ IsFromOtherCluster: isFromOtherCluster,
+ }); err != nil {
+ return err
+ } else {
+ if resp.Error != "" {
+ return errors.New(resp.Error)
+ }
+ }
+
+ return nil
+
+ })
+}
diff --git a/weed/pb/filer_pb/filer_client_bfs.go b/weed/pb/filer_pb/filer_client_bfs.go
new file mode 100644
index 000000000..4e5b65f12
--- /dev/null
+++ b/weed/pb/filer_pb/filer_client_bfs.go
@@ -0,0 +1,63 @@
+package filer_pb
+
+import (
+ "fmt"
+ "sync"
+ "time"
+
+ "github.com/chrislusf/seaweedfs/weed/util"
+)
+
+func TraverseBfs(filerClient FilerClient, parentPath util.FullPath, fn func(parentPath util.FullPath, entry *Entry)) (err error) {
+
+ K := 5
+
+ var jobQueueWg sync.WaitGroup
+ queue := util.NewQueue()
+ jobQueueWg.Add(1)
+ queue.Enqueue(parentPath)
+ var isTerminating bool
+
+ for i := 0; i < K; i++ {
+ go func() {
+ for {
+ if isTerminating {
+ break
+ }
+ t := queue.Dequeue()
+ if t == nil {
+ time.Sleep(329 * time.Millisecond)
+ continue
+ }
+ dir := t.(util.FullPath)
+ processErr := processOneDirectory(filerClient, dir, queue, &jobQueueWg, fn)
+ if processErr != nil {
+ err = processErr
+ }
+ jobQueueWg.Done()
+ }
+ }()
+ }
+ jobQueueWg.Wait()
+ isTerminating = true
+ return
+}
+
+func processOneDirectory(filerClient FilerClient, parentPath util.FullPath, queue *util.Queue, jobQueueWg *sync.WaitGroup, fn func(parentPath util.FullPath, entry *Entry)) (err error) {
+
+ return ReadDirAllEntries(filerClient, parentPath, "", func(entry *Entry, isLast bool) error {
+
+ fn(parentPath, entry)
+
+ if entry.IsDirectory {
+ subDir := fmt.Sprintf("%s/%s", parentPath, entry.Name)
+ if parentPath == "/" {
+ subDir = "/" + entry.Name
+ }
+ jobQueueWg.Add(1)
+ queue.Enqueue(util.FullPath(subDir))
+ }
+ return nil
+ })
+
+}
diff --git a/weed/pb/filer_pb/filer_pb_helper.go b/weed/pb/filer_pb/filer_pb_helper.go
new file mode 100644
index 000000000..96ab2154f
--- /dev/null
+++ b/weed/pb/filer_pb/filer_pb_helper.go
@@ -0,0 +1,105 @@
+package filer_pb
+
+import (
+ "context"
+ "errors"
+ "fmt"
+ "strings"
+
+ "github.com/chrislusf/seaweedfs/weed/glog"
+ "github.com/chrislusf/seaweedfs/weed/storage/needle"
+)
+
+func toFileIdObject(fileIdStr string) (*FileId, error) {
+ t, err := needle.ParseFileIdFromString(fileIdStr)
+ if err != nil {
+ return nil, err
+ }
+ return &FileId{
+ VolumeId: uint32(t.VolumeId),
+ Cookie: uint32(t.Cookie),
+ FileKey: uint64(t.Key),
+ }, nil
+
+}
+
+func (fid *FileId) toFileIdString() string {
+ return needle.NewFileId(needle.VolumeId(fid.VolumeId), fid.FileKey, fid.Cookie).String()
+}
+
+func (c *FileChunk) GetFileIdString() string {
+ if c.FileId != "" {
+ return c.FileId
+ }
+ if c.Fid != nil {
+ c.FileId = c.Fid.toFileIdString()
+ return c.FileId
+ }
+ return ""
+}
+
+func BeforeEntrySerialization(chunks []*FileChunk) {
+
+ for _, chunk := range chunks {
+
+ if chunk.FileId != "" {
+ if fid, err := toFileIdObject(chunk.FileId); err == nil {
+ chunk.Fid = fid
+ chunk.FileId = ""
+ }
+ }
+
+ if chunk.SourceFileId != "" {
+ if fid, err := toFileIdObject(chunk.SourceFileId); err == nil {
+ chunk.SourceFid = fid
+ chunk.SourceFileId = ""
+ }
+ }
+
+ }
+}
+
+func AfterEntryDeserialization(chunks []*FileChunk) {
+
+ for _, chunk := range chunks {
+
+ if chunk.Fid != nil && chunk.FileId == "" {
+ chunk.FileId = chunk.Fid.toFileIdString()
+ }
+
+ if chunk.SourceFid != nil && chunk.SourceFileId == "" {
+ chunk.SourceFileId = chunk.SourceFid.toFileIdString()
+ }
+
+ }
+}
+
+func CreateEntry(client SeaweedFilerClient, request *CreateEntryRequest) error {
+ resp, err := client.CreateEntry(context.Background(), request)
+ if err != nil {
+ glog.V(1).Infof("create entry %s/%s %v: %v", request.Directory, request.Entry.Name, request.OExcl, err)
+ return fmt.Errorf("CreateEntry: %v", err)
+ }
+ if resp.Error != "" {
+ glog.V(1).Infof("create entry %s/%s %v: %v", request.Directory, request.Entry.Name, request.OExcl, err)
+ return fmt.Errorf("CreateEntry : %v", resp.Error)
+ }
+ return nil
+}
+
+func LookupEntry(client SeaweedFilerClient, request *LookupDirectoryEntryRequest) (*LookupDirectoryEntryResponse, error) {
+ resp, err := client.LookupDirectoryEntry(context.Background(), request)
+ if err != nil {
+ if err == ErrNotFound || strings.Contains(err.Error(), ErrNotFound.Error()) {
+ return nil, ErrNotFound
+ }
+ glog.V(3).Infof("read %s/%v: %v", request.Directory, request.Name, err)
+ return nil, fmt.Errorf("LookupEntry1: %v", err)
+ }
+ if resp.Entry == nil {
+ return nil, ErrNotFound
+ }
+ return resp, nil
+}
+
+var ErrNotFound = errors.New("filer: no entry is found in filer store")
diff --git a/weed/pb/filer_pb/filer_pb_helper_test.go b/weed/pb/filer_pb/filer_pb_helper_test.go
new file mode 100644
index 000000000..d4468c011
--- /dev/null
+++ b/weed/pb/filer_pb/filer_pb_helper_test.go
@@ -0,0 +1,17 @@
+package filer_pb
+
+import (
+ "testing"
+
+ "github.com/golang/protobuf/proto"
+)
+
+func TestFileIdSize(t *testing.T) {
+ fileIdStr := "11745,0293434534cbb9892b"
+
+ fid, _ := toFileIdObject(fileIdStr)
+ bytes, _ := proto.Marshal(fid)
+
+ println(len(fileIdStr))
+ println(len(bytes))
+}
diff --git a/weed/pb/grpc_client_server.go b/weed/pb/grpc_client_server.go
new file mode 100644
index 000000000..ce706e282
--- /dev/null
+++ b/weed/pb/grpc_client_server.go
@@ -0,0 +1,197 @@
+package pb
+
+import (
+ "context"
+ "fmt"
+ "net/http"
+ "strconv"
+ "strings"
+ "sync"
+ "time"
+
+ "google.golang.org/grpc"
+ "google.golang.org/grpc/keepalive"
+
+ "github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
+ "github.com/chrislusf/seaweedfs/weed/pb/master_pb"
+ "github.com/chrislusf/seaweedfs/weed/pb/messaging_pb"
+)
+
+const (
+ Max_Message_Size = 1 << 30 // 1 GB
+)
+
+var (
+ // cache grpc connections
+ grpcClients = make(map[string]*grpc.ClientConn)
+ grpcClientsLock sync.Mutex
+)
+
+func init() {
+ http.DefaultTransport.(*http.Transport).MaxIdleConnsPerHost = 1024
+}
+
+func NewGrpcServer(opts ...grpc.ServerOption) *grpc.Server {
+ var options []grpc.ServerOption
+ options = append(options,
+ grpc.KeepaliveParams(keepalive.ServerParameters{
+ Time: 10 * time.Second, // wait time before ping if no activity
+ Timeout: 20 * time.Second, // ping timeout
+ }),
+ grpc.KeepaliveEnforcementPolicy(keepalive.EnforcementPolicy{
+ MinTime: 60 * time.Second, // min time a client should wait before sending a ping
+ PermitWithoutStream: false,
+ }),
+ grpc.MaxRecvMsgSize(Max_Message_Size),
+ grpc.MaxSendMsgSize(Max_Message_Size),
+ )
+ for _, opt := range opts {
+ if opt != nil {
+ options = append(options, opt)
+ }
+ }
+ return grpc.NewServer(options...)
+}
+
+func GrpcDial(ctx context.Context, address string, opts ...grpc.DialOption) (*grpc.ClientConn, error) {
+ // opts = append(opts, grpc.WithBlock())
+ // opts = append(opts, grpc.WithTimeout(time.Duration(5*time.Second)))
+ var options []grpc.DialOption
+ options = append(options,
+ // grpc.WithInsecure(),
+ grpc.WithDefaultCallOptions(
+ grpc.MaxCallSendMsgSize(Max_Message_Size),
+ grpc.MaxCallRecvMsgSize(Max_Message_Size),
+ ),
+ grpc.WithKeepaliveParams(keepalive.ClientParameters{
+ Time: 30 * time.Second, // client ping server if no activity for this long
+ Timeout: 20 * time.Second,
+ PermitWithoutStream: false,
+ }))
+ for _, opt := range opts {
+ if opt != nil {
+ options = append(options, opt)
+ }
+ }
+ return grpc.DialContext(ctx, address, options...)
+}
+
+func getOrCreateConnection(address string, opts ...grpc.DialOption) (*grpc.ClientConn, error) {
+
+ grpcClientsLock.Lock()
+ defer grpcClientsLock.Unlock()
+
+ existingConnection, found := grpcClients[address]
+ if found {
+ return existingConnection, nil
+ }
+
+ grpcConnection, err := GrpcDial(context.Background(), address, opts...)
+ if err != nil {
+ return nil, fmt.Errorf("fail to dial %s: %v", address, err)
+ }
+
+ grpcClients[address] = grpcConnection
+
+ return grpcConnection, nil
+}
+
+func WithCachedGrpcClient(fn func(*grpc.ClientConn) error, address string, opts ...grpc.DialOption) error {
+
+ grpcConnection, err := getOrCreateConnection(address, opts...)
+ if err != nil {
+ return fmt.Errorf("getOrCreateConnection %s: %v", address, err)
+ }
+ return fn(grpcConnection)
+}
+
+func ParseServerToGrpcAddress(server string) (serverGrpcAddress string, err error) {
+ colonIndex := strings.LastIndex(server, ":")
+ if colonIndex < 0 {
+ return "", fmt.Errorf("server should have hostname:port format: %v", server)
+ }
+
+ port, parseErr := strconv.ParseUint(server[colonIndex+1:], 10, 64)
+ if parseErr != nil {
+ return "", fmt.Errorf("server port parse error: %v", parseErr)
+ }
+
+ grpcPort := int(port) + 10000
+
+ return fmt.Sprintf("%s:%d", server[:colonIndex], grpcPort), nil
+}
+
+func ServerToGrpcAddress(server string) (serverGrpcAddress string) {
+ hostnameAndPort := strings.Split(server, ":")
+ if len(hostnameAndPort) != 2 {
+ return fmt.Sprintf("unexpected server address: %s", server)
+ }
+
+ port, parseErr := strconv.ParseUint(hostnameAndPort[1], 10, 64)
+ if parseErr != nil {
+ return fmt.Sprintf("failed to parse port for %s:%s", hostnameAndPort[0], hostnameAndPort[1])
+ }
+
+ grpcPort := int(port) + 10000
+
+ return fmt.Sprintf("%s:%d", hostnameAndPort[0], grpcPort)
+}
+
+func WithMasterClient(master string, grpcDialOption grpc.DialOption, fn func(client master_pb.SeaweedClient) error) error {
+
+ masterGrpcAddress, parseErr := ParseServerToGrpcAddress(master)
+ if parseErr != nil {
+ return fmt.Errorf("failed to parse master grpc %v: %v", master, parseErr)
+ }
+
+ return WithCachedGrpcClient(func(grpcConnection *grpc.ClientConn) error {
+ client := master_pb.NewSeaweedClient(grpcConnection)
+ return fn(client)
+ }, masterGrpcAddress, grpcDialOption)
+
+}
+
+func WithBrokerGrpcClient(brokerGrpcAddress string, grpcDialOption grpc.DialOption, fn func(client messaging_pb.SeaweedMessagingClient) error) error {
+
+ return WithCachedGrpcClient(func(grpcConnection *grpc.ClientConn) error {
+ client := messaging_pb.NewSeaweedMessagingClient(grpcConnection)
+ return fn(client)
+ }, brokerGrpcAddress, grpcDialOption)
+
+}
+
+func WithFilerClient(filer string, grpcDialOption grpc.DialOption, fn func(client filer_pb.SeaweedFilerClient) error) error {
+
+ filerGrpcAddress, parseErr := ParseServerToGrpcAddress(filer)
+ if parseErr != nil {
+ return fmt.Errorf("failed to parse filer grpc %v: %v", filer, parseErr)
+ }
+
+ return WithGrpcFilerClient(filerGrpcAddress, grpcDialOption, fn)
+
+}
+
+func WithGrpcFilerClient(filerGrpcAddress string, grpcDialOption grpc.DialOption, fn func(client filer_pb.SeaweedFilerClient) error) error {
+
+ return WithCachedGrpcClient(func(grpcConnection *grpc.ClientConn) error {
+ client := filer_pb.NewSeaweedFilerClient(grpcConnection)
+ return fn(client)
+ }, filerGrpcAddress, grpcDialOption)
+
+}
+
+func ParseFilerGrpcAddress(filer string) (filerGrpcAddress string, err error) {
+ hostnameAndPort := strings.Split(filer, ":")
+ if len(hostnameAndPort) != 2 {
+ return "", fmt.Errorf("filer should have hostname:port format: %v", hostnameAndPort)
+ }
+
+ filerPort, parseErr := strconv.ParseUint(hostnameAndPort[1], 10, 64)
+ if parseErr != nil {
+ return "", fmt.Errorf("filer port parse error: %v", parseErr)
+ }
+
+ filerGrpcPort := int(filerPort) + 10000
+
+ return fmt.Sprintf("%s:%d", hostnameAndPort[0], filerGrpcPort), nil
+}
diff --git a/weed/pb/iam.proto b/weed/pb/iam.proto
new file mode 100644
index 000000000..558bd2b70
--- /dev/null
+++ b/weed/pb/iam.proto
@@ -0,0 +1,51 @@
+syntax = "proto3";
+
+package iam_pb;
+
+option go_package = "github.com/chrislusf/seaweedfs/weed/pb/iam_pb";
+option java_package = "seaweedfs.client";
+option java_outer_classname = "IamProto";
+
+//////////////////////////////////////////////////
+
+service SeaweedIdentityAccessManagement {
+
+}
+
+//////////////////////////////////////////////////
+
+message S3ApiConfiguration {
+ repeated Identity identities = 1;
+}
+
+message Identity {
+ string name = 1;
+ repeated Credential credentials = 2;
+ repeated string actions = 3;
+}
+
+message Credential {
+ string access_key = 1;
+ string secret_key = 2;
+ // uint64 expiration = 3;
+ // bool is_disabled = 4;
+}
+
+/*
+message Policy {
+ repeated Statement statements = 1;
+}
+
+message Statement {
+ repeated Action action = 1;
+ repeated Resource resource = 2;
+}
+
+message Action {
+ string action = 1;
+}
+message Resource {
+ string bucket = 1;
+ // string path = 2;
+}
+*/
\ No newline at end of file
diff --git a/weed/pb/iam_pb/iam.pb.go b/weed/pb/iam_pb/iam.pb.go
new file mode 100644
index 000000000..93bc854cc
--- /dev/null
+++ b/weed/pb/iam_pb/iam.pb.go
@@ -0,0 +1,356 @@
+// Code generated by protoc-gen-go. DO NOT EDIT.
+// versions:
+// protoc-gen-go v1.24.0
+// protoc v3.12.3
+// source: iam.proto
+
+package iam_pb
+
+import (
+ context "context"
+ proto "github.com/golang/protobuf/proto"
+ grpc "google.golang.org/grpc"
+ protoreflect "google.golang.org/protobuf/reflect/protoreflect"
+ protoimpl "google.golang.org/protobuf/runtime/protoimpl"
+ reflect "reflect"
+ sync "sync"
+)
+
+const (
+ // Verify that this generated code is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
+ // Verify that runtime/protoimpl is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
+)
+
+// This is a compile-time assertion that a sufficiently up-to-date version
+// of the legacy proto package is being used.
+const _ = proto.ProtoPackageIsVersion4
+
+type S3ApiConfiguration struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ Identities []*Identity `protobuf:"bytes,1,rep,name=identities,proto3" json:"identities,omitempty"`
+}
+
+func (x *S3ApiConfiguration) Reset() {
+ *x = S3ApiConfiguration{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_iam_proto_msgTypes[0]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *S3ApiConfiguration) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*S3ApiConfiguration) ProtoMessage() {}
+
+func (x *S3ApiConfiguration) ProtoReflect() protoreflect.Message {
+ mi := &file_iam_proto_msgTypes[0]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use S3ApiConfiguration.ProtoReflect.Descriptor instead.
+func (*S3ApiConfiguration) Descriptor() ([]byte, []int) {
+ return file_iam_proto_rawDescGZIP(), []int{0}
+}
+
+func (x *S3ApiConfiguration) GetIdentities() []*Identity {
+ if x != nil {
+ return x.Identities
+ }
+ return nil
+}
+
+type Identity struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
+ Credentials []*Credential `protobuf:"bytes,2,rep,name=credentials,proto3" json:"credentials,omitempty"`
+ Actions []string `protobuf:"bytes,3,rep,name=actions,proto3" json:"actions,omitempty"`
+}
+
+func (x *Identity) Reset() {
+ *x = Identity{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_iam_proto_msgTypes[1]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *Identity) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*Identity) ProtoMessage() {}
+
+func (x *Identity) ProtoReflect() protoreflect.Message {
+ mi := &file_iam_proto_msgTypes[1]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use Identity.ProtoReflect.Descriptor instead.
+func (*Identity) Descriptor() ([]byte, []int) {
+ return file_iam_proto_rawDescGZIP(), []int{1}
+}
+
+func (x *Identity) GetName() string {
+ if x != nil {
+ return x.Name
+ }
+ return ""
+}
+
+func (x *Identity) GetCredentials() []*Credential {
+ if x != nil {
+ return x.Credentials
+ }
+ return nil
+}
+
+func (x *Identity) GetActions() []string {
+ if x != nil {
+ return x.Actions
+ }
+ return nil
+}
+
+type Credential struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ AccessKey string `protobuf:"bytes,1,opt,name=access_key,json=accessKey,proto3" json:"access_key,omitempty"`
+ SecretKey string `protobuf:"bytes,2,opt,name=secret_key,json=secretKey,proto3" json:"secret_key,omitempty"`
+}
+
+func (x *Credential) Reset() {
+ *x = Credential{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_iam_proto_msgTypes[2]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *Credential) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*Credential) ProtoMessage() {}
+
+func (x *Credential) ProtoReflect() protoreflect.Message {
+ mi := &file_iam_proto_msgTypes[2]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use Credential.ProtoReflect.Descriptor instead.
+func (*Credential) Descriptor() ([]byte, []int) {
+ return file_iam_proto_rawDescGZIP(), []int{2}
+}
+
+func (x *Credential) GetAccessKey() string {
+ if x != nil {
+ return x.AccessKey
+ }
+ return ""
+}
+
+func (x *Credential) GetSecretKey() string {
+ if x != nil {
+ return x.SecretKey
+ }
+ return ""
+}
+
+var File_iam_proto protoreflect.FileDescriptor
+
+var file_iam_proto_rawDesc = []byte{
+ 0x0a, 0x09, 0x69, 0x61, 0x6d, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x06, 0x69, 0x61, 0x6d,
+ 0x5f, 0x70, 0x62, 0x22, 0x46, 0x0a, 0x12, 0x53, 0x33, 0x41, 0x70, 0x69, 0x43, 0x6f, 0x6e, 0x66,
+ 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x30, 0x0a, 0x0a, 0x69, 0x64, 0x65,
+ 0x6e, 0x74, 0x69, 0x74, 0x69, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x10, 0x2e,
+ 0x69, 0x61, 0x6d, 0x5f, 0x70, 0x62, 0x2e, 0x49, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x52,
+ 0x0a, 0x69, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x69, 0x65, 0x73, 0x22, 0x6e, 0x0a, 0x08, 0x49,
+ 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18,
+ 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x34, 0x0a, 0x0b, 0x63,
+ 0x72, 0x65, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x61, 0x6c, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b,
+ 0x32, 0x12, 0x2e, 0x69, 0x61, 0x6d, 0x5f, 0x70, 0x62, 0x2e, 0x43, 0x72, 0x65, 0x64, 0x65, 0x6e,
+ 0x74, 0x69, 0x61, 0x6c, 0x52, 0x0b, 0x63, 0x72, 0x65, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x61, 0x6c,
+ 0x73, 0x12, 0x18, 0x0a, 0x07, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x03, 0x20, 0x03,
+ 0x28, 0x09, 0x52, 0x07, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x22, 0x4a, 0x0a, 0x0a, 0x43,
+ 0x72, 0x65, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x61, 0x6c, 0x12, 0x1d, 0x0a, 0x0a, 0x61, 0x63, 0x63,
+ 0x65, 0x73, 0x73, 0x5f, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x61,
+ 0x63, 0x63, 0x65, 0x73, 0x73, 0x4b, 0x65, 0x79, 0x12, 0x1d, 0x0a, 0x0a, 0x73, 0x65, 0x63, 0x72,
+ 0x65, 0x74, 0x5f, 0x6b, 0x65, 0x79, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x73, 0x65,
+ 0x63, 0x72, 0x65, 0x74, 0x4b, 0x65, 0x79, 0x32, 0x21, 0x0a, 0x1f, 0x53, 0x65, 0x61, 0x77, 0x65,
+ 0x65, 0x64, 0x49, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x41, 0x63, 0x63, 0x65, 0x73, 0x73,
+ 0x4d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x42, 0x4b, 0x0a, 0x10, 0x73, 0x65,
+ 0x61, 0x77, 0x65, 0x65, 0x64, 0x66, 0x73, 0x2e, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x42, 0x08,
+ 0x49, 0x61, 0x6d, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x5a, 0x2d, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62,
+ 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x63, 0x68, 0x72, 0x69, 0x73, 0x6c, 0x75, 0x73, 0x66, 0x2f, 0x73,
+ 0x65, 0x61, 0x77, 0x65, 0x65, 0x64, 0x66, 0x73, 0x2f, 0x77, 0x65, 0x65, 0x64, 0x2f, 0x70, 0x62,
+ 0x2f, 0x69, 0x61, 0x6d, 0x5f, 0x70, 0x62, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
+}
+
+var (
+ file_iam_proto_rawDescOnce sync.Once
+ file_iam_proto_rawDescData = file_iam_proto_rawDesc
+)
+
+func file_iam_proto_rawDescGZIP() []byte {
+ file_iam_proto_rawDescOnce.Do(func() {
+ file_iam_proto_rawDescData = protoimpl.X.CompressGZIP(file_iam_proto_rawDescData)
+ })
+ return file_iam_proto_rawDescData
+}
+
+var file_iam_proto_msgTypes = make([]protoimpl.MessageInfo, 3)
+var file_iam_proto_goTypes = []interface{}{
+ (*S3ApiConfiguration)(nil), // 0: iam_pb.S3ApiConfiguration
+ (*Identity)(nil), // 1: iam_pb.Identity
+ (*Credential)(nil), // 2: iam_pb.Credential
+}
+var file_iam_proto_depIdxs = []int32{
+ 1, // 0: iam_pb.S3ApiConfiguration.identities:type_name -> iam_pb.Identity
+ 2, // 1: iam_pb.Identity.credentials:type_name -> iam_pb.Credential
+ 2, // [2:2] is the sub-list for method output_type
+ 2, // [2:2] is the sub-list for method input_type
+ 2, // [2:2] is the sub-list for extension type_name
+ 2, // [2:2] is the sub-list for extension extendee
+ 0, // [0:2] is the sub-list for field type_name
+}
+
+func init() { file_iam_proto_init() }
+func file_iam_proto_init() {
+ if File_iam_proto != nil {
+ return
+ }
+ if !protoimpl.UnsafeEnabled {
+ file_iam_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*S3ApiConfiguration); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_iam_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*Identity); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_iam_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*Credential); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ }
+ type x struct{}
+ out := protoimpl.TypeBuilder{
+ File: protoimpl.DescBuilder{
+ GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
+ RawDescriptor: file_iam_proto_rawDesc,
+ NumEnums: 0,
+ NumMessages: 3,
+ NumExtensions: 0,
+ NumServices: 1,
+ },
+ GoTypes: file_iam_proto_goTypes,
+ DependencyIndexes: file_iam_proto_depIdxs,
+ MessageInfos: file_iam_proto_msgTypes,
+ }.Build()
+ File_iam_proto = out.File
+ file_iam_proto_rawDesc = nil
+ file_iam_proto_goTypes = nil
+ file_iam_proto_depIdxs = nil
+}
+
+// Reference imports to suppress errors if they are not otherwise used.
+var _ context.Context
+var _ grpc.ClientConnInterface
+
+// This is a compile-time assertion to ensure that this generated file
+// is compatible with the grpc package it is being compiled against.
+const _ = grpc.SupportPackageIsVersion6
+
+// SeaweedIdentityAccessManagementClient is the client API for SeaweedIdentityAccessManagement service.
+//
+// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream.
+type SeaweedIdentityAccessManagementClient interface {
+}
+
+type seaweedIdentityAccessManagementClient struct {
+ cc grpc.ClientConnInterface
+}
+
+func NewSeaweedIdentityAccessManagementClient(cc grpc.ClientConnInterface) SeaweedIdentityAccessManagementClient {
+ return &seaweedIdentityAccessManagementClient{cc}
+}
+
+// SeaweedIdentityAccessManagementServer is the server API for SeaweedIdentityAccessManagement service.
+type SeaweedIdentityAccessManagementServer interface {
+}
+
+// UnimplementedSeaweedIdentityAccessManagementServer can be embedded to have forward compatible implementations.
+type UnimplementedSeaweedIdentityAccessManagementServer struct {
+}
+
+func RegisterSeaweedIdentityAccessManagementServer(s *grpc.Server, srv SeaweedIdentityAccessManagementServer) {
+ s.RegisterService(&_SeaweedIdentityAccessManagement_serviceDesc, srv)
+}
+
+var _SeaweedIdentityAccessManagement_serviceDesc = grpc.ServiceDesc{
+ ServiceName: "iam_pb.SeaweedIdentityAccessManagement",
+ HandlerType: (*SeaweedIdentityAccessManagementServer)(nil),
+ Methods: []grpc.MethodDesc{},
+ Streams: []grpc.StreamDesc{},
+ Metadata: "iam.proto",
+}
diff --git a/weed/pb/master.proto b/weed/pb/master.proto
index 544160c06..4a612b8bc 100644
--- a/weed/pb/master.proto
+++ b/weed/pb/master.proto
@@ -2,12 +2,14 @@ syntax = "proto3";
package master_pb;
+option go_package = "github.com/chrislusf/seaweedfs/weed/pb/master_pb";
+
//////////////////////////////////////////////////
service Seaweed {
rpc SendHeartbeat (stream Heartbeat) returns (stream HeartbeatResponse) {
}
- rpc KeepConnected (stream ClientListenRequest) returns (stream VolumeLocation) {
+ rpc KeepConnected (stream KeepConnectedRequest) returns (stream VolumeLocation) {
}
rpc LookupVolume (LookupVolumeRequest) returns (LookupVolumeResponse) {
}
@@ -15,6 +17,23 @@ service Seaweed {
}
rpc Statistics (StatisticsRequest) returns (StatisticsResponse) {
}
+ rpc CollectionList (CollectionListRequest) returns (CollectionListResponse) {
+ }
+ rpc CollectionDelete (CollectionDeleteRequest) returns (CollectionDeleteResponse) {
+ }
+ rpc VolumeList (VolumeListRequest) returns (VolumeListResponse) {
+ }
+ rpc LookupEcVolume (LookupEcVolumeRequest) returns (LookupEcVolumeResponse) {
+ }
+ rpc GetMasterConfiguration (GetMasterConfigurationRequest) returns (GetMasterConfigurationResponse) {
+ }
+ rpc ListMasterClients (ListMasterClientsRequest) returns (ListMasterClientsResponse) {
+ }
+ rpc LeaseAdminToken (LeaseAdminTokenRequest) returns (LeaseAdminTokenResponse) {
+ }
+ rpc ReleaseAdminToken (ReleaseAdminTokenRequest) returns (ReleaseAdminTokenResponse) {
+ }
+
}
//////////////////////////////////////////////////
@@ -29,15 +48,26 @@ message Heartbeat {
string rack = 7;
uint32 admin_port = 8;
repeated VolumeInformationMessage volumes = 9;
- // delta volume ids
- repeated uint32 new_vids = 10;
- repeated uint32 deleted_vids = 11;
+ // delta volumes
+ repeated VolumeShortInformationMessage new_volumes = 10;
+ repeated VolumeShortInformationMessage deleted_volumes = 11;
+ bool has_no_volumes = 12;
+
+ // erasure coding
+ repeated VolumeEcShardInformationMessage ec_shards = 16;
+ // delta erasure coding shards
+ repeated VolumeEcShardInformationMessage new_ec_shards = 17;
+ repeated VolumeEcShardInformationMessage deleted_ec_shards = 18;
+ bool has_no_ec_shards = 19;
+
}
message HeartbeatResponse {
- uint64 volumeSizeLimit = 1;
- string secretKey = 2;
- string leader = 3;
+ uint64 volume_size_limit = 1;
+ string leader = 2;
+ string metrics_address = 3;
+ uint32 metrics_interval_seconds = 4;
+ repeated StorageBackend storage_backends = 5;
}
message VolumeInformationMessage {
@@ -51,6 +81,30 @@ message VolumeInformationMessage {
uint32 replica_placement = 8;
uint32 version = 9;
uint32 ttl = 10;
+ uint32 compact_revision = 11;
+ int64 modified_at_second = 12;
+ string remote_storage_name = 13;
+ string remote_storage_key = 14;
+}
+
+message VolumeShortInformationMessage {
+ uint32 id = 1;
+ string collection = 3;
+ uint32 replica_placement = 8;
+ uint32 version = 9;
+ uint32 ttl = 10;
+}
+
+message VolumeEcShardInformationMessage {
+ uint32 id = 1;
+ string collection = 2;
+ uint32 ec_index_bits = 3;
+}
+
+message StorageBackend {
+ string type = 1;
+ string id = 2;
+ map properties = 3;
}
message Empty {
@@ -65,8 +119,9 @@ message SuperBlockExtra {
ErasureCoding erasure_coding = 1;
}
-message ClientListenRequest {
+message KeepConnectedRequest {
string name = 1;
+ uint32 grpc_port = 2;
}
message VolumeLocation {
@@ -74,6 +129,7 @@ message VolumeLocation {
string public_url = 2;
repeated uint32 new_vids = 3;
repeated uint32 deleted_vids = 4;
+ string leader = 5; // optional when leader is not itself
}
message LookupVolumeRequest {
@@ -102,6 +158,8 @@ message AssignRequest {
string data_center = 5;
string rack = 6;
string data_node = 7;
+ uint32 memory_map_max_size_mb = 8;
+ uint32 Writable_volume_count = 9;
}
message AssignResponse {
string fid = 1;
@@ -109,6 +167,7 @@ message AssignResponse {
string public_url = 3;
uint64 count = 4;
string error = 5;
+ string auth = 6;
}
message StatisticsRequest {
@@ -124,3 +183,119 @@ message StatisticsResponse {
uint64 used_size = 5;
uint64 file_count = 6;
}
+
+//
+// collection related
+//
+
+message StorageType {
+ string replication = 1;
+ string ttl = 2;
+}
+message Collection {
+ string name = 1;
+}
+message CollectionListRequest {
+ bool include_normal_volumes = 1;
+ bool include_ec_volumes = 2;
+}
+message CollectionListResponse {
+ repeated Collection collections = 1;
+}
+
+message CollectionDeleteRequest {
+ string name = 1;
+}
+message CollectionDeleteResponse {
+}
+
+//
+// volume related
+//
+message DataNodeInfo {
+ string id = 1;
+ uint64 volume_count = 2;
+ uint64 max_volume_count = 3;
+ uint64 free_volume_count = 4;
+ uint64 active_volume_count = 5;
+ repeated VolumeInformationMessage volume_infos = 6;
+ repeated VolumeEcShardInformationMessage ec_shard_infos = 7;
+ uint64 remote_volume_count = 8;
+}
+message RackInfo {
+ string id = 1;
+ uint64 volume_count = 2;
+ uint64 max_volume_count = 3;
+ uint64 free_volume_count = 4;
+ uint64 active_volume_count = 5;
+ repeated DataNodeInfo data_node_infos = 6;
+ uint64 remote_volume_count = 7;
+}
+message DataCenterInfo {
+ string id = 1;
+ uint64 volume_count = 2;
+ uint64 max_volume_count = 3;
+ uint64 free_volume_count = 4;
+ uint64 active_volume_count = 5;
+ repeated RackInfo rack_infos = 6;
+ uint64 remote_volume_count = 7;
+}
+message TopologyInfo {
+ string id = 1;
+ uint64 volume_count = 2;
+ uint64 max_volume_count = 3;
+ uint64 free_volume_count = 4;
+ uint64 active_volume_count = 5;
+ repeated DataCenterInfo data_center_infos = 6;
+ uint64 remote_volume_count = 7;
+}
+message VolumeListRequest {
+}
+message VolumeListResponse {
+ TopologyInfo topology_info = 1;
+ uint64 volume_size_limit_mb = 2;
+}
+
+message LookupEcVolumeRequest {
+ uint32 volume_id = 1;
+}
+message LookupEcVolumeResponse {
+ uint32 volume_id = 1;
+ message EcShardIdLocation {
+ uint32 shard_id = 1;
+ repeated Location locations = 2;
+ }
+ repeated EcShardIdLocation shard_id_locations = 2;
+}
+
+message GetMasterConfigurationRequest {
+}
+message GetMasterConfigurationResponse {
+ string metrics_address = 1;
+ uint32 metrics_interval_seconds = 2;
+}
+
+message ListMasterClientsRequest {
+ string client_type = 1;
+}
+message ListMasterClientsResponse {
+ repeated string grpc_addresses = 1;
+}
+
+message LeaseAdminTokenRequest {
+ int64 previous_token = 1;
+ int64 previous_lock_time = 2;
+ string lock_name = 3;
+}
+message LeaseAdminTokenResponse {
+ int64 token = 1;
+ int64 lock_ts_ns = 2;
+}
+
+message ReleaseAdminTokenRequest {
+ int64 previous_token = 1;
+ int64 previous_lock_time = 2;
+ string lock_name = 3;
+}
+message ReleaseAdminTokenResponse {
+}
diff --git a/weed/pb/master_pb/master.pb.go b/weed/pb/master_pb/master.pb.go
index 894f08471..0d9782439 100644
--- a/weed/pb/master_pb/master.pb.go
+++ b/weed/pb/master_pb/master.pb.go
@@ -1,727 +1,3993 @@
-// Code generated by protoc-gen-go.
+// Code generated by protoc-gen-go. DO NOT EDIT.
+// versions:
+// protoc-gen-go v1.24.0
+// protoc v3.12.3
// source: master.proto
-// DO NOT EDIT!
-
-/*
-Package master_pb is a generated protocol buffer package.
-
-It is generated from these files:
- master.proto
-
-It has these top-level messages:
- Heartbeat
- HeartbeatResponse
- VolumeInformationMessage
- Empty
- SuperBlockExtra
- ClientListenRequest
- VolumeLocation
- LookupVolumeRequest
- LookupVolumeResponse
- Location
- AssignRequest
- AssignResponse
- StatisticsRequest
- StatisticsResponse
-*/
-package master_pb
-import proto "github.com/golang/protobuf/proto"
-import fmt "fmt"
-import math "math"
+package master_pb
import (
- context "golang.org/x/net/context"
+ context "context"
+ proto "github.com/golang/protobuf/proto"
grpc "google.golang.org/grpc"
+ codes "google.golang.org/grpc/codes"
+ status "google.golang.org/grpc/status"
+ protoreflect "google.golang.org/protobuf/reflect/protoreflect"
+ protoimpl "google.golang.org/protobuf/runtime/protoimpl"
+ reflect "reflect"
+ sync "sync"
)
-// Reference imports to suppress errors if they are not otherwise used.
-var _ = proto.Marshal
-var _ = fmt.Errorf
-var _ = math.Inf
+const (
+ // Verify that this generated code is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
+ // Verify that runtime/protoimpl is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
+)
-// This is a compile-time assertion to ensure that this generated file
-// is compatible with the proto package it is being compiled against.
-// A compilation error at this line likely means your copy of the
-// proto package needs to be updated.
-const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package
+// This is a compile-time assertion that a sufficiently up-to-date version
+// of the legacy proto package is being used.
+const _ = proto.ProtoPackageIsVersion4
type Heartbeat struct {
- Ip string `protobuf:"bytes,1,opt,name=ip" json:"ip,omitempty"`
- Port uint32 `protobuf:"varint,2,opt,name=port" json:"port,omitempty"`
- PublicUrl string `protobuf:"bytes,3,opt,name=public_url,json=publicUrl" json:"public_url,omitempty"`
- MaxVolumeCount uint32 `protobuf:"varint,4,opt,name=max_volume_count,json=maxVolumeCount" json:"max_volume_count,omitempty"`
- MaxFileKey uint64 `protobuf:"varint,5,opt,name=max_file_key,json=maxFileKey" json:"max_file_key,omitempty"`
- DataCenter string `protobuf:"bytes,6,opt,name=data_center,json=dataCenter" json:"data_center,omitempty"`
- Rack string `protobuf:"bytes,7,opt,name=rack" json:"rack,omitempty"`
- AdminPort uint32 `protobuf:"varint,8,opt,name=admin_port,json=adminPort" json:"admin_port,omitempty"`
- Volumes []*VolumeInformationMessage `protobuf:"bytes,9,rep,name=volumes" json:"volumes,omitempty"`
- // delta volume ids
- NewVids []uint32 `protobuf:"varint,10,rep,packed,name=new_vids,json=newVids" json:"new_vids,omitempty"`
- DeletedVids []uint32 `protobuf:"varint,11,rep,packed,name=deleted_vids,json=deletedVids" json:"deleted_vids,omitempty"`
-}
-
-func (m *Heartbeat) Reset() { *m = Heartbeat{} }
-func (m *Heartbeat) String() string { return proto.CompactTextString(m) }
-func (*Heartbeat) ProtoMessage() {}
-func (*Heartbeat) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0} }
-
-func (m *Heartbeat) GetIp() string {
- if m != nil {
- return m.Ip
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ Ip string `protobuf:"bytes,1,opt,name=ip,proto3" json:"ip,omitempty"`
+ Port uint32 `protobuf:"varint,2,opt,name=port,proto3" json:"port,omitempty"`
+ PublicUrl string `protobuf:"bytes,3,opt,name=public_url,json=publicUrl,proto3" json:"public_url,omitempty"`
+ MaxVolumeCount uint32 `protobuf:"varint,4,opt,name=max_volume_count,json=maxVolumeCount,proto3" json:"max_volume_count,omitempty"`
+ MaxFileKey uint64 `protobuf:"varint,5,opt,name=max_file_key,json=maxFileKey,proto3" json:"max_file_key,omitempty"`
+ DataCenter string `protobuf:"bytes,6,opt,name=data_center,json=dataCenter,proto3" json:"data_center,omitempty"`
+ Rack string `protobuf:"bytes,7,opt,name=rack,proto3" json:"rack,omitempty"`
+ AdminPort uint32 `protobuf:"varint,8,opt,name=admin_port,json=adminPort,proto3" json:"admin_port,omitempty"`
+ Volumes []*VolumeInformationMessage `protobuf:"bytes,9,rep,name=volumes,proto3" json:"volumes,omitempty"`
+ // delta volumes
+ NewVolumes []*VolumeShortInformationMessage `protobuf:"bytes,10,rep,name=new_volumes,json=newVolumes,proto3" json:"new_volumes,omitempty"`
+ DeletedVolumes []*VolumeShortInformationMessage `protobuf:"bytes,11,rep,name=deleted_volumes,json=deletedVolumes,proto3" json:"deleted_volumes,omitempty"`
+ HasNoVolumes bool `protobuf:"varint,12,opt,name=has_no_volumes,json=hasNoVolumes,proto3" json:"has_no_volumes,omitempty"`
+ // erasure coding
+ EcShards []*VolumeEcShardInformationMessage `protobuf:"bytes,16,rep,name=ec_shards,json=ecShards,proto3" json:"ec_shards,omitempty"`
+ // delta erasure coding shards
+ NewEcShards []*VolumeEcShardInformationMessage `protobuf:"bytes,17,rep,name=new_ec_shards,json=newEcShards,proto3" json:"new_ec_shards,omitempty"`
+ DeletedEcShards []*VolumeEcShardInformationMessage `protobuf:"bytes,18,rep,name=deleted_ec_shards,json=deletedEcShards,proto3" json:"deleted_ec_shards,omitempty"`
+ HasNoEcShards bool `protobuf:"varint,19,opt,name=has_no_ec_shards,json=hasNoEcShards,proto3" json:"has_no_ec_shards,omitempty"`
+}
+
+func (x *Heartbeat) Reset() {
+ *x = Heartbeat{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_master_proto_msgTypes[0]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *Heartbeat) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*Heartbeat) ProtoMessage() {}
+
+func (x *Heartbeat) ProtoReflect() protoreflect.Message {
+ mi := &file_master_proto_msgTypes[0]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use Heartbeat.ProtoReflect.Descriptor instead.
+func (*Heartbeat) Descriptor() ([]byte, []int) {
+ return file_master_proto_rawDescGZIP(), []int{0}
+}
+
+func (x *Heartbeat) GetIp() string {
+ if x != nil {
+ return x.Ip
}
return ""
}
-func (m *Heartbeat) GetPort() uint32 {
- if m != nil {
- return m.Port
+func (x *Heartbeat) GetPort() uint32 {
+ if x != nil {
+ return x.Port
}
return 0
}
-func (m *Heartbeat) GetPublicUrl() string {
- if m != nil {
- return m.PublicUrl
+func (x *Heartbeat) GetPublicUrl() string {
+ if x != nil {
+ return x.PublicUrl
}
return ""
}
-func (m *Heartbeat) GetMaxVolumeCount() uint32 {
- if m != nil {
- return m.MaxVolumeCount
+func (x *Heartbeat) GetMaxVolumeCount() uint32 {
+ if x != nil {
+ return x.MaxVolumeCount
}
return 0
}
-func (m *Heartbeat) GetMaxFileKey() uint64 {
- if m != nil {
- return m.MaxFileKey
+func (x *Heartbeat) GetMaxFileKey() uint64 {
+ if x != nil {
+ return x.MaxFileKey
}
return 0
}
-func (m *Heartbeat) GetDataCenter() string {
- if m != nil {
- return m.DataCenter
+func (x *Heartbeat) GetDataCenter() string {
+ if x != nil {
+ return x.DataCenter
}
return ""
}
-func (m *Heartbeat) GetRack() string {
- if m != nil {
- return m.Rack
+func (x *Heartbeat) GetRack() string {
+ if x != nil {
+ return x.Rack
}
return ""
}
-func (m *Heartbeat) GetAdminPort() uint32 {
- if m != nil {
- return m.AdminPort
+func (x *Heartbeat) GetAdminPort() uint32 {
+ if x != nil {
+ return x.AdminPort
}
return 0
}
-func (m *Heartbeat) GetVolumes() []*VolumeInformationMessage {
- if m != nil {
- return m.Volumes
+func (x *Heartbeat) GetVolumes() []*VolumeInformationMessage {
+ if x != nil {
+ return x.Volumes
+ }
+ return nil
+}
+
+func (x *Heartbeat) GetNewVolumes() []*VolumeShortInformationMessage {
+ if x != nil {
+ return x.NewVolumes
+ }
+ return nil
+}
+
+func (x *Heartbeat) GetDeletedVolumes() []*VolumeShortInformationMessage {
+ if x != nil {
+ return x.DeletedVolumes
+ }
+ return nil
+}
+
+func (x *Heartbeat) GetHasNoVolumes() bool {
+ if x != nil {
+ return x.HasNoVolumes
+ }
+ return false
+}
+
+func (x *Heartbeat) GetEcShards() []*VolumeEcShardInformationMessage {
+ if x != nil {
+ return x.EcShards
}
return nil
}
-func (m *Heartbeat) GetNewVids() []uint32 {
- if m != nil {
- return m.NewVids
+func (x *Heartbeat) GetNewEcShards() []*VolumeEcShardInformationMessage {
+ if x != nil {
+ return x.NewEcShards
}
return nil
}
-func (m *Heartbeat) GetDeletedVids() []uint32 {
- if m != nil {
- return m.DeletedVids
+func (x *Heartbeat) GetDeletedEcShards() []*VolumeEcShardInformationMessage {
+ if x != nil {
+ return x.DeletedEcShards
}
return nil
}
+func (x *Heartbeat) GetHasNoEcShards() bool {
+ if x != nil {
+ return x.HasNoEcShards
+ }
+ return false
+}
+
type HeartbeatResponse struct {
- VolumeSizeLimit uint64 `protobuf:"varint,1,opt,name=volumeSizeLimit" json:"volumeSizeLimit,omitempty"`
- SecretKey string `protobuf:"bytes,2,opt,name=secretKey" json:"secretKey,omitempty"`
- Leader string `protobuf:"bytes,3,opt,name=leader" json:"leader,omitempty"`
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ VolumeSizeLimit uint64 `protobuf:"varint,1,opt,name=volume_size_limit,json=volumeSizeLimit,proto3" json:"volume_size_limit,omitempty"`
+ Leader string `protobuf:"bytes,2,opt,name=leader,proto3" json:"leader,omitempty"`
+ MetricsAddress string `protobuf:"bytes,3,opt,name=metrics_address,json=metricsAddress,proto3" json:"metrics_address,omitempty"`
+ MetricsIntervalSeconds uint32 `protobuf:"varint,4,opt,name=metrics_interval_seconds,json=metricsIntervalSeconds,proto3" json:"metrics_interval_seconds,omitempty"`
+ StorageBackends []*StorageBackend `protobuf:"bytes,5,rep,name=storage_backends,json=storageBackends,proto3" json:"storage_backends,omitempty"`
+}
+
+func (x *HeartbeatResponse) Reset() {
+ *x = HeartbeatResponse{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_master_proto_msgTypes[1]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *HeartbeatResponse) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*HeartbeatResponse) ProtoMessage() {}
+
+func (x *HeartbeatResponse) ProtoReflect() protoreflect.Message {
+ mi := &file_master_proto_msgTypes[1]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
}
-func (m *HeartbeatResponse) Reset() { *m = HeartbeatResponse{} }
-func (m *HeartbeatResponse) String() string { return proto.CompactTextString(m) }
-func (*HeartbeatResponse) ProtoMessage() {}
-func (*HeartbeatResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{1} }
+// Deprecated: Use HeartbeatResponse.ProtoReflect.Descriptor instead.
+func (*HeartbeatResponse) Descriptor() ([]byte, []int) {
+ return file_master_proto_rawDescGZIP(), []int{1}
+}
-func (m *HeartbeatResponse) GetVolumeSizeLimit() uint64 {
- if m != nil {
- return m.VolumeSizeLimit
+func (x *HeartbeatResponse) GetVolumeSizeLimit() uint64 {
+ if x != nil {
+ return x.VolumeSizeLimit
}
return 0
}
-func (m *HeartbeatResponse) GetSecretKey() string {
- if m != nil {
- return m.SecretKey
+func (x *HeartbeatResponse) GetLeader() string {
+ if x != nil {
+ return x.Leader
}
return ""
}
-func (m *HeartbeatResponse) GetLeader() string {
- if m != nil {
- return m.Leader
+func (x *HeartbeatResponse) GetMetricsAddress() string {
+ if x != nil {
+ return x.MetricsAddress
}
return ""
}
+func (x *HeartbeatResponse) GetMetricsIntervalSeconds() uint32 {
+ if x != nil {
+ return x.MetricsIntervalSeconds
+ }
+ return 0
+}
+
+func (x *HeartbeatResponse) GetStorageBackends() []*StorageBackend {
+ if x != nil {
+ return x.StorageBackends
+ }
+ return nil
+}
+
type VolumeInformationMessage struct {
- Id uint32 `protobuf:"varint,1,opt,name=id" json:"id,omitempty"`
- Size uint64 `protobuf:"varint,2,opt,name=size" json:"size,omitempty"`
- Collection string `protobuf:"bytes,3,opt,name=collection" json:"collection,omitempty"`
- FileCount uint64 `protobuf:"varint,4,opt,name=file_count,json=fileCount" json:"file_count,omitempty"`
- DeleteCount uint64 `protobuf:"varint,5,opt,name=delete_count,json=deleteCount" json:"delete_count,omitempty"`
- DeletedByteCount uint64 `protobuf:"varint,6,opt,name=deleted_byte_count,json=deletedByteCount" json:"deleted_byte_count,omitempty"`
- ReadOnly bool `protobuf:"varint,7,opt,name=read_only,json=readOnly" json:"read_only,omitempty"`
- ReplicaPlacement uint32 `protobuf:"varint,8,opt,name=replica_placement,json=replicaPlacement" json:"replica_placement,omitempty"`
- Version uint32 `protobuf:"varint,9,opt,name=version" json:"version,omitempty"`
- Ttl uint32 `protobuf:"varint,10,opt,name=ttl" json:"ttl,omitempty"`
-}
-
-func (m *VolumeInformationMessage) Reset() { *m = VolumeInformationMessage{} }
-func (m *VolumeInformationMessage) String() string { return proto.CompactTextString(m) }
-func (*VolumeInformationMessage) ProtoMessage() {}
-func (*VolumeInformationMessage) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{2} }
-
-func (m *VolumeInformationMessage) GetId() uint32 {
- if m != nil {
- return m.Id
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ Id uint32 `protobuf:"varint,1,opt,name=id,proto3" json:"id,omitempty"`
+ Size uint64 `protobuf:"varint,2,opt,name=size,proto3" json:"size,omitempty"`
+ Collection string `protobuf:"bytes,3,opt,name=collection,proto3" json:"collection,omitempty"`
+ FileCount uint64 `protobuf:"varint,4,opt,name=file_count,json=fileCount,proto3" json:"file_count,omitempty"`
+ DeleteCount uint64 `protobuf:"varint,5,opt,name=delete_count,json=deleteCount,proto3" json:"delete_count,omitempty"`
+ DeletedByteCount uint64 `protobuf:"varint,6,opt,name=deleted_byte_count,json=deletedByteCount,proto3" json:"deleted_byte_count,omitempty"`
+ ReadOnly bool `protobuf:"varint,7,opt,name=read_only,json=readOnly,proto3" json:"read_only,omitempty"`
+ ReplicaPlacement uint32 `protobuf:"varint,8,opt,name=replica_placement,json=replicaPlacement,proto3" json:"replica_placement,omitempty"`
+ Version uint32 `protobuf:"varint,9,opt,name=version,proto3" json:"version,omitempty"`
+ Ttl uint32 `protobuf:"varint,10,opt,name=ttl,proto3" json:"ttl,omitempty"`
+ CompactRevision uint32 `protobuf:"varint,11,opt,name=compact_revision,json=compactRevision,proto3" json:"compact_revision,omitempty"`
+ ModifiedAtSecond int64 `protobuf:"varint,12,opt,name=modified_at_second,json=modifiedAtSecond,proto3" json:"modified_at_second,omitempty"`
+ RemoteStorageName string `protobuf:"bytes,13,opt,name=remote_storage_name,json=remoteStorageName,proto3" json:"remote_storage_name,omitempty"`
+ RemoteStorageKey string `protobuf:"bytes,14,opt,name=remote_storage_key,json=remoteStorageKey,proto3" json:"remote_storage_key,omitempty"`
+}
+
+func (x *VolumeInformationMessage) Reset() {
+ *x = VolumeInformationMessage{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_master_proto_msgTypes[2]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *VolumeInformationMessage) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*VolumeInformationMessage) ProtoMessage() {}
+
+func (x *VolumeInformationMessage) ProtoReflect() protoreflect.Message {
+ mi := &file_master_proto_msgTypes[2]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use VolumeInformationMessage.ProtoReflect.Descriptor instead.
+func (*VolumeInformationMessage) Descriptor() ([]byte, []int) {
+ return file_master_proto_rawDescGZIP(), []int{2}
+}
+
+func (x *VolumeInformationMessage) GetId() uint32 {
+ if x != nil {
+ return x.Id
}
return 0
}
-func (m *VolumeInformationMessage) GetSize() uint64 {
- if m != nil {
- return m.Size
+func (x *VolumeInformationMessage) GetSize() uint64 {
+ if x != nil {
+ return x.Size
}
return 0
}
-func (m *VolumeInformationMessage) GetCollection() string {
- if m != nil {
- return m.Collection
+func (x *VolumeInformationMessage) GetCollection() string {
+ if x != nil {
+ return x.Collection
}
return ""
}
-func (m *VolumeInformationMessage) GetFileCount() uint64 {
- if m != nil {
- return m.FileCount
+func (x *VolumeInformationMessage) GetFileCount() uint64 {
+ if x != nil {
+ return x.FileCount
}
return 0
}
-func (m *VolumeInformationMessage) GetDeleteCount() uint64 {
- if m != nil {
- return m.DeleteCount
+func (x *VolumeInformationMessage) GetDeleteCount() uint64 {
+ if x != nil {
+ return x.DeleteCount
}
return 0
}
-func (m *VolumeInformationMessage) GetDeletedByteCount() uint64 {
- if m != nil {
- return m.DeletedByteCount
+func (x *VolumeInformationMessage) GetDeletedByteCount() uint64 {
+ if x != nil {
+ return x.DeletedByteCount
}
return 0
}
-func (m *VolumeInformationMessage) GetReadOnly() bool {
- if m != nil {
- return m.ReadOnly
+func (x *VolumeInformationMessage) GetReadOnly() bool {
+ if x != nil {
+ return x.ReadOnly
}
return false
}
-func (m *VolumeInformationMessage) GetReplicaPlacement() uint32 {
- if m != nil {
- return m.ReplicaPlacement
+func (x *VolumeInformationMessage) GetReplicaPlacement() uint32 {
+ if x != nil {
+ return x.ReplicaPlacement
}
return 0
}
-func (m *VolumeInformationMessage) GetVersion() uint32 {
- if m != nil {
- return m.Version
+func (x *VolumeInformationMessage) GetVersion() uint32 {
+ if x != nil {
+ return x.Version
}
return 0
}
-func (m *VolumeInformationMessage) GetTtl() uint32 {
- if m != nil {
- return m.Ttl
+func (x *VolumeInformationMessage) GetTtl() uint32 {
+ if x != nil {
+ return x.Ttl
}
return 0
}
-type Empty struct {
+func (x *VolumeInformationMessage) GetCompactRevision() uint32 {
+ if x != nil {
+ return x.CompactRevision
+ }
+ return 0
}
-func (m *Empty) Reset() { *m = Empty{} }
-func (m *Empty) String() string { return proto.CompactTextString(m) }
-func (*Empty) ProtoMessage() {}
-func (*Empty) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{3} }
+func (x *VolumeInformationMessage) GetModifiedAtSecond() int64 {
+ if x != nil {
+ return x.ModifiedAtSecond
+ }
+ return 0
+}
-type SuperBlockExtra struct {
- ErasureCoding *SuperBlockExtra_ErasureCoding `protobuf:"bytes,1,opt,name=erasure_coding,json=erasureCoding" json:"erasure_coding,omitempty"`
+func (x *VolumeInformationMessage) GetRemoteStorageName() string {
+ if x != nil {
+ return x.RemoteStorageName
+ }
+ return ""
+}
+
+func (x *VolumeInformationMessage) GetRemoteStorageKey() string {
+ if x != nil {
+ return x.RemoteStorageKey
+ }
+ return ""
}
-func (m *SuperBlockExtra) Reset() { *m = SuperBlockExtra{} }
-func (m *SuperBlockExtra) String() string { return proto.CompactTextString(m) }
-func (*SuperBlockExtra) ProtoMessage() {}
-func (*SuperBlockExtra) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{4} }
+type VolumeShortInformationMessage struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ Id uint32 `protobuf:"varint,1,opt,name=id,proto3" json:"id,omitempty"`
+ Collection string `protobuf:"bytes,3,opt,name=collection,proto3" json:"collection,omitempty"`
+ ReplicaPlacement uint32 `protobuf:"varint,8,opt,name=replica_placement,json=replicaPlacement,proto3" json:"replica_placement,omitempty"`
+ Version uint32 `protobuf:"varint,9,opt,name=version,proto3" json:"version,omitempty"`
+ Ttl uint32 `protobuf:"varint,10,opt,name=ttl,proto3" json:"ttl,omitempty"`
+}
-func (m *SuperBlockExtra) GetErasureCoding() *SuperBlockExtra_ErasureCoding {
- if m != nil {
- return m.ErasureCoding
+func (x *VolumeShortInformationMessage) Reset() {
+ *x = VolumeShortInformationMessage{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_master_proto_msgTypes[3]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
- return nil
}
-type SuperBlockExtra_ErasureCoding struct {
- Data uint32 `protobuf:"varint,1,opt,name=data" json:"data,omitempty"`
- Parity uint32 `protobuf:"varint,2,opt,name=parity" json:"parity,omitempty"`
- VolumeIds []uint32 `protobuf:"varint,3,rep,packed,name=volume_ids,json=volumeIds" json:"volume_ids,omitempty"`
+func (x *VolumeShortInformationMessage) String() string {
+ return protoimpl.X.MessageStringOf(x)
}
-func (m *SuperBlockExtra_ErasureCoding) Reset() { *m = SuperBlockExtra_ErasureCoding{} }
-func (m *SuperBlockExtra_ErasureCoding) String() string { return proto.CompactTextString(m) }
-func (*SuperBlockExtra_ErasureCoding) ProtoMessage() {}
-func (*SuperBlockExtra_ErasureCoding) Descriptor() ([]byte, []int) {
- return fileDescriptor0, []int{4, 0}
+func (*VolumeShortInformationMessage) ProtoMessage() {}
+
+func (x *VolumeShortInformationMessage) ProtoReflect() protoreflect.Message {
+ mi := &file_master_proto_msgTypes[3]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use VolumeShortInformationMessage.ProtoReflect.Descriptor instead.
+func (*VolumeShortInformationMessage) Descriptor() ([]byte, []int) {
+ return file_master_proto_rawDescGZIP(), []int{3}
+}
+
+func (x *VolumeShortInformationMessage) GetId() uint32 {
+ if x != nil {
+ return x.Id
+ }
+ return 0
+}
+
+func (x *VolumeShortInformationMessage) GetCollection() string {
+ if x != nil {
+ return x.Collection
+ }
+ return ""
+}
+
+func (x *VolumeShortInformationMessage) GetReplicaPlacement() uint32 {
+ if x != nil {
+ return x.ReplicaPlacement
+ }
+ return 0
+}
+
+func (x *VolumeShortInformationMessage) GetVersion() uint32 {
+ if x != nil {
+ return x.Version
+ }
+ return 0
+}
+
+func (x *VolumeShortInformationMessage) GetTtl() uint32 {
+ if x != nil {
+ return x.Ttl
+ }
+ return 0
+}
+
+type VolumeEcShardInformationMessage struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ Id uint32 `protobuf:"varint,1,opt,name=id,proto3" json:"id,omitempty"`
+ Collection string `protobuf:"bytes,2,opt,name=collection,proto3" json:"collection,omitempty"`
+ EcIndexBits uint32 `protobuf:"varint,3,opt,name=ec_index_bits,json=ecIndexBits,proto3" json:"ec_index_bits,omitempty"`
+}
+
+func (x *VolumeEcShardInformationMessage) Reset() {
+ *x = VolumeEcShardInformationMessage{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_master_proto_msgTypes[4]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *VolumeEcShardInformationMessage) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*VolumeEcShardInformationMessage) ProtoMessage() {}
+
+func (x *VolumeEcShardInformationMessage) ProtoReflect() protoreflect.Message {
+ mi := &file_master_proto_msgTypes[4]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use VolumeEcShardInformationMessage.ProtoReflect.Descriptor instead.
+func (*VolumeEcShardInformationMessage) Descriptor() ([]byte, []int) {
+ return file_master_proto_rawDescGZIP(), []int{4}
}
-func (m *SuperBlockExtra_ErasureCoding) GetData() uint32 {
- if m != nil {
- return m.Data
+func (x *VolumeEcShardInformationMessage) GetId() uint32 {
+ if x != nil {
+ return x.Id
}
return 0
}
-func (m *SuperBlockExtra_ErasureCoding) GetParity() uint32 {
- if m != nil {
- return m.Parity
+func (x *VolumeEcShardInformationMessage) GetCollection() string {
+ if x != nil {
+ return x.Collection
+ }
+ return ""
+}
+
+func (x *VolumeEcShardInformationMessage) GetEcIndexBits() uint32 {
+ if x != nil {
+ return x.EcIndexBits
}
return 0
}
-func (m *SuperBlockExtra_ErasureCoding) GetVolumeIds() []uint32 {
- if m != nil {
- return m.VolumeIds
+type StorageBackend struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ Type string `protobuf:"bytes,1,opt,name=type,proto3" json:"type,omitempty"`
+ Id string `protobuf:"bytes,2,opt,name=id,proto3" json:"id,omitempty"`
+ Properties map[string]string `protobuf:"bytes,3,rep,name=properties,proto3" json:"properties,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"`
+}
+
+func (x *StorageBackend) Reset() {
+ *x = StorageBackend{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_master_proto_msgTypes[5]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *StorageBackend) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*StorageBackend) ProtoMessage() {}
+
+func (x *StorageBackend) ProtoReflect() protoreflect.Message {
+ mi := &file_master_proto_msgTypes[5]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use StorageBackend.ProtoReflect.Descriptor instead.
+func (*StorageBackend) Descriptor() ([]byte, []int) {
+ return file_master_proto_rawDescGZIP(), []int{5}
+}
+
+func (x *StorageBackend) GetType() string {
+ if x != nil {
+ return x.Type
+ }
+ return ""
+}
+
+func (x *StorageBackend) GetId() string {
+ if x != nil {
+ return x.Id
+ }
+ return ""
+}
+
+func (x *StorageBackend) GetProperties() map[string]string {
+ if x != nil {
+ return x.Properties
+ }
+ return nil
+}
+
+type Empty struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+}
+
+func (x *Empty) Reset() {
+ *x = Empty{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_master_proto_msgTypes[6]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *Empty) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*Empty) ProtoMessage() {}
+
+func (x *Empty) ProtoReflect() protoreflect.Message {
+ mi := &file_master_proto_msgTypes[6]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use Empty.ProtoReflect.Descriptor instead.
+func (*Empty) Descriptor() ([]byte, []int) {
+ return file_master_proto_rawDescGZIP(), []int{6}
+}
+
+type SuperBlockExtra struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ ErasureCoding *SuperBlockExtra_ErasureCoding `protobuf:"bytes,1,opt,name=erasure_coding,json=erasureCoding,proto3" json:"erasure_coding,omitempty"`
+}
+
+func (x *SuperBlockExtra) Reset() {
+ *x = SuperBlockExtra{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_master_proto_msgTypes[7]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *SuperBlockExtra) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*SuperBlockExtra) ProtoMessage() {}
+
+func (x *SuperBlockExtra) ProtoReflect() protoreflect.Message {
+ mi := &file_master_proto_msgTypes[7]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use SuperBlockExtra.ProtoReflect.Descriptor instead.
+func (*SuperBlockExtra) Descriptor() ([]byte, []int) {
+ return file_master_proto_rawDescGZIP(), []int{7}
+}
+
+func (x *SuperBlockExtra) GetErasureCoding() *SuperBlockExtra_ErasureCoding {
+ if x != nil {
+ return x.ErasureCoding
}
return nil
}
-type ClientListenRequest struct {
- Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"`
+type KeepConnectedRequest struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
+ GrpcPort uint32 `protobuf:"varint,2,opt,name=grpc_port,json=grpcPort,proto3" json:"grpc_port,omitempty"`
+}
+
+func (x *KeepConnectedRequest) Reset() {
+ *x = KeepConnectedRequest{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_master_proto_msgTypes[8]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *KeepConnectedRequest) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*KeepConnectedRequest) ProtoMessage() {}
+
+func (x *KeepConnectedRequest) ProtoReflect() protoreflect.Message {
+ mi := &file_master_proto_msgTypes[8]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
}
-func (m *ClientListenRequest) Reset() { *m = ClientListenRequest{} }
-func (m *ClientListenRequest) String() string { return proto.CompactTextString(m) }
-func (*ClientListenRequest) ProtoMessage() {}
-func (*ClientListenRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{5} }
+// Deprecated: Use KeepConnectedRequest.ProtoReflect.Descriptor instead.
+func (*KeepConnectedRequest) Descriptor() ([]byte, []int) {
+ return file_master_proto_rawDescGZIP(), []int{8}
+}
-func (m *ClientListenRequest) GetName() string {
- if m != nil {
- return m.Name
+func (x *KeepConnectedRequest) GetName() string {
+ if x != nil {
+ return x.Name
}
return ""
}
+func (x *KeepConnectedRequest) GetGrpcPort() uint32 {
+ if x != nil {
+ return x.GrpcPort
+ }
+ return 0
+}
+
type VolumeLocation struct {
- Url string `protobuf:"bytes,1,opt,name=url" json:"url,omitempty"`
- PublicUrl string `protobuf:"bytes,2,opt,name=public_url,json=publicUrl" json:"public_url,omitempty"`
- NewVids []uint32 `protobuf:"varint,3,rep,packed,name=new_vids,json=newVids" json:"new_vids,omitempty"`
- DeletedVids []uint32 `protobuf:"varint,4,rep,packed,name=deleted_vids,json=deletedVids" json:"deleted_vids,omitempty"`
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ Url string `protobuf:"bytes,1,opt,name=url,proto3" json:"url,omitempty"`
+ PublicUrl string `protobuf:"bytes,2,opt,name=public_url,json=publicUrl,proto3" json:"public_url,omitempty"`
+ NewVids []uint32 `protobuf:"varint,3,rep,packed,name=new_vids,json=newVids,proto3" json:"new_vids,omitempty"`
+ DeletedVids []uint32 `protobuf:"varint,4,rep,packed,name=deleted_vids,json=deletedVids,proto3" json:"deleted_vids,omitempty"`
+ Leader string `protobuf:"bytes,5,opt,name=leader,proto3" json:"leader,omitempty"` // optional when leader is not itself
+}
+
+func (x *VolumeLocation) Reset() {
+ *x = VolumeLocation{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_master_proto_msgTypes[9]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *VolumeLocation) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*VolumeLocation) ProtoMessage() {}
+
+func (x *VolumeLocation) ProtoReflect() protoreflect.Message {
+ mi := &file_master_proto_msgTypes[9]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
}
-func (m *VolumeLocation) Reset() { *m = VolumeLocation{} }
-func (m *VolumeLocation) String() string { return proto.CompactTextString(m) }
-func (*VolumeLocation) ProtoMessage() {}
-func (*VolumeLocation) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{6} }
+// Deprecated: Use VolumeLocation.ProtoReflect.Descriptor instead.
+func (*VolumeLocation) Descriptor() ([]byte, []int) {
+ return file_master_proto_rawDescGZIP(), []int{9}
+}
-func (m *VolumeLocation) GetUrl() string {
- if m != nil {
- return m.Url
+func (x *VolumeLocation) GetUrl() string {
+ if x != nil {
+ return x.Url
}
return ""
}
-func (m *VolumeLocation) GetPublicUrl() string {
- if m != nil {
- return m.PublicUrl
+func (x *VolumeLocation) GetPublicUrl() string {
+ if x != nil {
+ return x.PublicUrl
}
return ""
}
-func (m *VolumeLocation) GetNewVids() []uint32 {
- if m != nil {
- return m.NewVids
+func (x *VolumeLocation) GetNewVids() []uint32 {
+ if x != nil {
+ return x.NewVids
}
return nil
}
-func (m *VolumeLocation) GetDeletedVids() []uint32 {
- if m != nil {
- return m.DeletedVids
+func (x *VolumeLocation) GetDeletedVids() []uint32 {
+ if x != nil {
+ return x.DeletedVids
}
return nil
}
+func (x *VolumeLocation) GetLeader() string {
+ if x != nil {
+ return x.Leader
+ }
+ return ""
+}
+
type LookupVolumeRequest struct {
- VolumeIds []string `protobuf:"bytes,1,rep,name=volume_ids,json=volumeIds" json:"volume_ids,omitempty"`
- Collection string `protobuf:"bytes,2,opt,name=collection" json:"collection,omitempty"`
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ VolumeIds []string `protobuf:"bytes,1,rep,name=volume_ids,json=volumeIds,proto3" json:"volume_ids,omitempty"`
+ Collection string `protobuf:"bytes,2,opt,name=collection,proto3" json:"collection,omitempty"` // optional, a bit faster if provided.
+}
+
+func (x *LookupVolumeRequest) Reset() {
+ *x = LookupVolumeRequest{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_master_proto_msgTypes[10]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *LookupVolumeRequest) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*LookupVolumeRequest) ProtoMessage() {}
+
+func (x *LookupVolumeRequest) ProtoReflect() protoreflect.Message {
+ mi := &file_master_proto_msgTypes[10]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
}
-func (m *LookupVolumeRequest) Reset() { *m = LookupVolumeRequest{} }
-func (m *LookupVolumeRequest) String() string { return proto.CompactTextString(m) }
-func (*LookupVolumeRequest) ProtoMessage() {}
-func (*LookupVolumeRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{7} }
+// Deprecated: Use LookupVolumeRequest.ProtoReflect.Descriptor instead.
+func (*LookupVolumeRequest) Descriptor() ([]byte, []int) {
+ return file_master_proto_rawDescGZIP(), []int{10}
+}
-func (m *LookupVolumeRequest) GetVolumeIds() []string {
- if m != nil {
- return m.VolumeIds
+func (x *LookupVolumeRequest) GetVolumeIds() []string {
+ if x != nil {
+ return x.VolumeIds
}
return nil
}
-func (m *LookupVolumeRequest) GetCollection() string {
- if m != nil {
- return m.Collection
+func (x *LookupVolumeRequest) GetCollection() string {
+ if x != nil {
+ return x.Collection
}
return ""
}
type LookupVolumeResponse struct {
- VolumeIdLocations []*LookupVolumeResponse_VolumeIdLocation `protobuf:"bytes,1,rep,name=volume_id_locations,json=volumeIdLocations" json:"volume_id_locations,omitempty"`
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ VolumeIdLocations []*LookupVolumeResponse_VolumeIdLocation `protobuf:"bytes,1,rep,name=volume_id_locations,json=volumeIdLocations,proto3" json:"volume_id_locations,omitempty"`
+}
+
+func (x *LookupVolumeResponse) Reset() {
+ *x = LookupVolumeResponse{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_master_proto_msgTypes[11]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *LookupVolumeResponse) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*LookupVolumeResponse) ProtoMessage() {}
+
+func (x *LookupVolumeResponse) ProtoReflect() protoreflect.Message {
+ mi := &file_master_proto_msgTypes[11]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use LookupVolumeResponse.ProtoReflect.Descriptor instead.
+func (*LookupVolumeResponse) Descriptor() ([]byte, []int) {
+ return file_master_proto_rawDescGZIP(), []int{11}
+}
+
+func (x *LookupVolumeResponse) GetVolumeIdLocations() []*LookupVolumeResponse_VolumeIdLocation {
+ if x != nil {
+ return x.VolumeIdLocations
+ }
+ return nil
+}
+
+type Location struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ Url string `protobuf:"bytes,1,opt,name=url,proto3" json:"url,omitempty"`
+ PublicUrl string `protobuf:"bytes,2,opt,name=public_url,json=publicUrl,proto3" json:"public_url,omitempty"`
+}
+
+func (x *Location) Reset() {
+ *x = Location{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_master_proto_msgTypes[12]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *Location) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*Location) ProtoMessage() {}
+
+func (x *Location) ProtoReflect() protoreflect.Message {
+ mi := &file_master_proto_msgTypes[12]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use Location.ProtoReflect.Descriptor instead.
+func (*Location) Descriptor() ([]byte, []int) {
+ return file_master_proto_rawDescGZIP(), []int{12}
+}
+
+func (x *Location) GetUrl() string {
+ if x != nil {
+ return x.Url
+ }
+ return ""
+}
+
+func (x *Location) GetPublicUrl() string {
+ if x != nil {
+ return x.PublicUrl
+ }
+ return ""
+}
+
+type AssignRequest struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ Count uint64 `protobuf:"varint,1,opt,name=count,proto3" json:"count,omitempty"`
+ Replication string `protobuf:"bytes,2,opt,name=replication,proto3" json:"replication,omitempty"`
+ Collection string `protobuf:"bytes,3,opt,name=collection,proto3" json:"collection,omitempty"`
+ Ttl string `protobuf:"bytes,4,opt,name=ttl,proto3" json:"ttl,omitempty"`
+ DataCenter string `protobuf:"bytes,5,opt,name=data_center,json=dataCenter,proto3" json:"data_center,omitempty"`
+ Rack string `protobuf:"bytes,6,opt,name=rack,proto3" json:"rack,omitempty"`
+ DataNode string `protobuf:"bytes,7,opt,name=data_node,json=dataNode,proto3" json:"data_node,omitempty"`
+ MemoryMapMaxSizeMb uint32 `protobuf:"varint,8,opt,name=memory_map_max_size_mb,json=memoryMapMaxSizeMb,proto3" json:"memory_map_max_size_mb,omitempty"`
+ WritableVolumeCount uint32 `protobuf:"varint,9,opt,name=Writable_volume_count,json=WritableVolumeCount,proto3" json:"Writable_volume_count,omitempty"`
+}
+
+func (x *AssignRequest) Reset() {
+ *x = AssignRequest{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_master_proto_msgTypes[13]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *AssignRequest) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*AssignRequest) ProtoMessage() {}
+
+func (x *AssignRequest) ProtoReflect() protoreflect.Message {
+ mi := &file_master_proto_msgTypes[13]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use AssignRequest.ProtoReflect.Descriptor instead.
+func (*AssignRequest) Descriptor() ([]byte, []int) {
+ return file_master_proto_rawDescGZIP(), []int{13}
+}
+
+func (x *AssignRequest) GetCount() uint64 {
+ if x != nil {
+ return x.Count
+ }
+ return 0
+}
+
+func (x *AssignRequest) GetReplication() string {
+ if x != nil {
+ return x.Replication
+ }
+ return ""
+}
+
+func (x *AssignRequest) GetCollection() string {
+ if x != nil {
+ return x.Collection
+ }
+ return ""
+}
+
+func (x *AssignRequest) GetTtl() string {
+ if x != nil {
+ return x.Ttl
+ }
+ return ""
+}
+
+func (x *AssignRequest) GetDataCenter() string {
+ if x != nil {
+ return x.DataCenter
+ }
+ return ""
+}
+
+func (x *AssignRequest) GetRack() string {
+ if x != nil {
+ return x.Rack
+ }
+ return ""
+}
+
+func (x *AssignRequest) GetDataNode() string {
+ if x != nil {
+ return x.DataNode
+ }
+ return ""
+}
+
+func (x *AssignRequest) GetMemoryMapMaxSizeMb() uint32 {
+ if x != nil {
+ return x.MemoryMapMaxSizeMb
+ }
+ return 0
+}
+
+func (x *AssignRequest) GetWritableVolumeCount() uint32 {
+ if x != nil {
+ return x.WritableVolumeCount
+ }
+ return 0
+}
+
+type AssignResponse struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ Fid string `protobuf:"bytes,1,opt,name=fid,proto3" json:"fid,omitempty"`
+ Url string `protobuf:"bytes,2,opt,name=url,proto3" json:"url,omitempty"`
+ PublicUrl string `protobuf:"bytes,3,opt,name=public_url,json=publicUrl,proto3" json:"public_url,omitempty"`
+ Count uint64 `protobuf:"varint,4,opt,name=count,proto3" json:"count,omitempty"`
+ Error string `protobuf:"bytes,5,opt,name=error,proto3" json:"error,omitempty"`
+ Auth string `protobuf:"bytes,6,opt,name=auth,proto3" json:"auth,omitempty"`
+}
+
+func (x *AssignResponse) Reset() {
+ *x = AssignResponse{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_master_proto_msgTypes[14]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *AssignResponse) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*AssignResponse) ProtoMessage() {}
+
+func (x *AssignResponse) ProtoReflect() protoreflect.Message {
+ mi := &file_master_proto_msgTypes[14]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use AssignResponse.ProtoReflect.Descriptor instead.
+func (*AssignResponse) Descriptor() ([]byte, []int) {
+ return file_master_proto_rawDescGZIP(), []int{14}
+}
+
+func (x *AssignResponse) GetFid() string {
+ if x != nil {
+ return x.Fid
+ }
+ return ""
+}
+
+func (x *AssignResponse) GetUrl() string {
+ if x != nil {
+ return x.Url
+ }
+ return ""
+}
+
+func (x *AssignResponse) GetPublicUrl() string {
+ if x != nil {
+ return x.PublicUrl
+ }
+ return ""
+}
+
+func (x *AssignResponse) GetCount() uint64 {
+ if x != nil {
+ return x.Count
+ }
+ return 0
+}
+
+func (x *AssignResponse) GetError() string {
+ if x != nil {
+ return x.Error
+ }
+ return ""
+}
+
+func (x *AssignResponse) GetAuth() string {
+ if x != nil {
+ return x.Auth
+ }
+ return ""
+}
+
+type StatisticsRequest struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ Replication string `protobuf:"bytes,1,opt,name=replication,proto3" json:"replication,omitempty"`
+ Collection string `protobuf:"bytes,2,opt,name=collection,proto3" json:"collection,omitempty"`
+ Ttl string `protobuf:"bytes,3,opt,name=ttl,proto3" json:"ttl,omitempty"`
+}
+
+func (x *StatisticsRequest) Reset() {
+ *x = StatisticsRequest{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_master_proto_msgTypes[15]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *StatisticsRequest) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*StatisticsRequest) ProtoMessage() {}
+
+func (x *StatisticsRequest) ProtoReflect() protoreflect.Message {
+ mi := &file_master_proto_msgTypes[15]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use StatisticsRequest.ProtoReflect.Descriptor instead.
+func (*StatisticsRequest) Descriptor() ([]byte, []int) {
+ return file_master_proto_rawDescGZIP(), []int{15}
+}
+
+func (x *StatisticsRequest) GetReplication() string {
+ if x != nil {
+ return x.Replication
+ }
+ return ""
+}
+
+func (x *StatisticsRequest) GetCollection() string {
+ if x != nil {
+ return x.Collection
+ }
+ return ""
+}
+
+func (x *StatisticsRequest) GetTtl() string {
+ if x != nil {
+ return x.Ttl
+ }
+ return ""
+}
+
+type StatisticsResponse struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ Replication string `protobuf:"bytes,1,opt,name=replication,proto3" json:"replication,omitempty"`
+ Collection string `protobuf:"bytes,2,opt,name=collection,proto3" json:"collection,omitempty"`
+ Ttl string `protobuf:"bytes,3,opt,name=ttl,proto3" json:"ttl,omitempty"`
+ TotalSize uint64 `protobuf:"varint,4,opt,name=total_size,json=totalSize,proto3" json:"total_size,omitempty"`
+ UsedSize uint64 `protobuf:"varint,5,opt,name=used_size,json=usedSize,proto3" json:"used_size,omitempty"`
+ FileCount uint64 `protobuf:"varint,6,opt,name=file_count,json=fileCount,proto3" json:"file_count,omitempty"`
+}
+
+func (x *StatisticsResponse) Reset() {
+ *x = StatisticsResponse{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_master_proto_msgTypes[16]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *StatisticsResponse) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*StatisticsResponse) ProtoMessage() {}
+
+func (x *StatisticsResponse) ProtoReflect() protoreflect.Message {
+ mi := &file_master_proto_msgTypes[16]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use StatisticsResponse.ProtoReflect.Descriptor instead.
+func (*StatisticsResponse) Descriptor() ([]byte, []int) {
+ return file_master_proto_rawDescGZIP(), []int{16}
+}
+
+func (x *StatisticsResponse) GetReplication() string {
+ if x != nil {
+ return x.Replication
+ }
+ return ""
+}
+
+func (x *StatisticsResponse) GetCollection() string {
+ if x != nil {
+ return x.Collection
+ }
+ return ""
+}
+
+func (x *StatisticsResponse) GetTtl() string {
+ if x != nil {
+ return x.Ttl
+ }
+ return ""
+}
+
+func (x *StatisticsResponse) GetTotalSize() uint64 {
+ if x != nil {
+ return x.TotalSize
+ }
+ return 0
+}
+
+func (x *StatisticsResponse) GetUsedSize() uint64 {
+ if x != nil {
+ return x.UsedSize
+ }
+ return 0
+}
+
+func (x *StatisticsResponse) GetFileCount() uint64 {
+ if x != nil {
+ return x.FileCount
+ }
+ return 0
+}
+
+type StorageType struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ Replication string `protobuf:"bytes,1,opt,name=replication,proto3" json:"replication,omitempty"`
+ Ttl string `protobuf:"bytes,2,opt,name=ttl,proto3" json:"ttl,omitempty"`
+}
+
+func (x *StorageType) Reset() {
+ *x = StorageType{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_master_proto_msgTypes[17]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *StorageType) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*StorageType) ProtoMessage() {}
+
+func (x *StorageType) ProtoReflect() protoreflect.Message {
+ mi := &file_master_proto_msgTypes[17]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use StorageType.ProtoReflect.Descriptor instead.
+func (*StorageType) Descriptor() ([]byte, []int) {
+ return file_master_proto_rawDescGZIP(), []int{17}
+}
+
+func (x *StorageType) GetReplication() string {
+ if x != nil {
+ return x.Replication
+ }
+ return ""
+}
+
+func (x *StorageType) GetTtl() string {
+ if x != nil {
+ return x.Ttl
+ }
+ return ""
+}
+
+type Collection struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
+}
+
+func (x *Collection) Reset() {
+ *x = Collection{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_master_proto_msgTypes[18]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *Collection) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*Collection) ProtoMessage() {}
+
+func (x *Collection) ProtoReflect() protoreflect.Message {
+ mi := &file_master_proto_msgTypes[18]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use Collection.ProtoReflect.Descriptor instead.
+func (*Collection) Descriptor() ([]byte, []int) {
+ return file_master_proto_rawDescGZIP(), []int{18}
+}
+
+func (x *Collection) GetName() string {
+ if x != nil {
+ return x.Name
+ }
+ return ""
+}
+
+type CollectionListRequest struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ IncludeNormalVolumes bool `protobuf:"varint,1,opt,name=include_normal_volumes,json=includeNormalVolumes,proto3" json:"include_normal_volumes,omitempty"`
+ IncludeEcVolumes bool `protobuf:"varint,2,opt,name=include_ec_volumes,json=includeEcVolumes,proto3" json:"include_ec_volumes,omitempty"`
+}
+
+func (x *CollectionListRequest) Reset() {
+ *x = CollectionListRequest{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_master_proto_msgTypes[19]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *CollectionListRequest) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*CollectionListRequest) ProtoMessage() {}
+
+func (x *CollectionListRequest) ProtoReflect() protoreflect.Message {
+ mi := &file_master_proto_msgTypes[19]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use CollectionListRequest.ProtoReflect.Descriptor instead.
+func (*CollectionListRequest) Descriptor() ([]byte, []int) {
+ return file_master_proto_rawDescGZIP(), []int{19}
+}
+
+func (x *CollectionListRequest) GetIncludeNormalVolumes() bool {
+ if x != nil {
+ return x.IncludeNormalVolumes
+ }
+ return false
+}
+
+func (x *CollectionListRequest) GetIncludeEcVolumes() bool {
+ if x != nil {
+ return x.IncludeEcVolumes
+ }
+ return false
+}
+
+type CollectionListResponse struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ Collections []*Collection `protobuf:"bytes,1,rep,name=collections,proto3" json:"collections,omitempty"`
+}
+
+func (x *CollectionListResponse) Reset() {
+ *x = CollectionListResponse{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_master_proto_msgTypes[20]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *CollectionListResponse) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*CollectionListResponse) ProtoMessage() {}
+
+func (x *CollectionListResponse) ProtoReflect() protoreflect.Message {
+ mi := &file_master_proto_msgTypes[20]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use CollectionListResponse.ProtoReflect.Descriptor instead.
+func (*CollectionListResponse) Descriptor() ([]byte, []int) {
+ return file_master_proto_rawDescGZIP(), []int{20}
+}
+
+func (x *CollectionListResponse) GetCollections() []*Collection {
+ if x != nil {
+ return x.Collections
+ }
+ return nil
+}
+
+type CollectionDeleteRequest struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
+}
+
+func (x *CollectionDeleteRequest) Reset() {
+ *x = CollectionDeleteRequest{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_master_proto_msgTypes[21]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *CollectionDeleteRequest) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*CollectionDeleteRequest) ProtoMessage() {}
+
+func (x *CollectionDeleteRequest) ProtoReflect() protoreflect.Message {
+ mi := &file_master_proto_msgTypes[21]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use CollectionDeleteRequest.ProtoReflect.Descriptor instead.
+func (*CollectionDeleteRequest) Descriptor() ([]byte, []int) {
+ return file_master_proto_rawDescGZIP(), []int{21}
+}
+
+func (x *CollectionDeleteRequest) GetName() string {
+ if x != nil {
+ return x.Name
+ }
+ return ""
+}
+
+type CollectionDeleteResponse struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+}
+
+func (x *CollectionDeleteResponse) Reset() {
+ *x = CollectionDeleteResponse{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_master_proto_msgTypes[22]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *CollectionDeleteResponse) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*CollectionDeleteResponse) ProtoMessage() {}
+
+func (x *CollectionDeleteResponse) ProtoReflect() protoreflect.Message {
+ mi := &file_master_proto_msgTypes[22]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use CollectionDeleteResponse.ProtoReflect.Descriptor instead.
+func (*CollectionDeleteResponse) Descriptor() ([]byte, []int) {
+ return file_master_proto_rawDescGZIP(), []int{22}
+}
+
+//
+// volume related
+//
+type DataNodeInfo struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ Id string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"`
+ VolumeCount uint64 `protobuf:"varint,2,opt,name=volume_count,json=volumeCount,proto3" json:"volume_count,omitempty"`
+ MaxVolumeCount uint64 `protobuf:"varint,3,opt,name=max_volume_count,json=maxVolumeCount,proto3" json:"max_volume_count,omitempty"`
+ FreeVolumeCount uint64 `protobuf:"varint,4,opt,name=free_volume_count,json=freeVolumeCount,proto3" json:"free_volume_count,omitempty"`
+ ActiveVolumeCount uint64 `protobuf:"varint,5,opt,name=active_volume_count,json=activeVolumeCount,proto3" json:"active_volume_count,omitempty"`
+ VolumeInfos []*VolumeInformationMessage `protobuf:"bytes,6,rep,name=volume_infos,json=volumeInfos,proto3" json:"volume_infos,omitempty"`
+ EcShardInfos []*VolumeEcShardInformationMessage `protobuf:"bytes,7,rep,name=ec_shard_infos,json=ecShardInfos,proto3" json:"ec_shard_infos,omitempty"`
+ RemoteVolumeCount uint64 `protobuf:"varint,8,opt,name=remote_volume_count,json=remoteVolumeCount,proto3" json:"remote_volume_count,omitempty"`
+}
+
+func (x *DataNodeInfo) Reset() {
+ *x = DataNodeInfo{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_master_proto_msgTypes[23]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *DataNodeInfo) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*DataNodeInfo) ProtoMessage() {}
+
+func (x *DataNodeInfo) ProtoReflect() protoreflect.Message {
+ mi := &file_master_proto_msgTypes[23]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use DataNodeInfo.ProtoReflect.Descriptor instead.
+func (*DataNodeInfo) Descriptor() ([]byte, []int) {
+ return file_master_proto_rawDescGZIP(), []int{23}
+}
+
+func (x *DataNodeInfo) GetId() string {
+ if x != nil {
+ return x.Id
+ }
+ return ""
+}
+
+func (x *DataNodeInfo) GetVolumeCount() uint64 {
+ if x != nil {
+ return x.VolumeCount
+ }
+ return 0
+}
+
+func (x *DataNodeInfo) GetMaxVolumeCount() uint64 {
+ if x != nil {
+ return x.MaxVolumeCount
+ }
+ return 0
+}
+
+func (x *DataNodeInfo) GetFreeVolumeCount() uint64 {
+ if x != nil {
+ return x.FreeVolumeCount
+ }
+ return 0
+}
+
+func (x *DataNodeInfo) GetActiveVolumeCount() uint64 {
+ if x != nil {
+ return x.ActiveVolumeCount
+ }
+ return 0
+}
+
+func (x *DataNodeInfo) GetVolumeInfos() []*VolumeInformationMessage {
+ if x != nil {
+ return x.VolumeInfos
+ }
+ return nil
+}
+
+func (x *DataNodeInfo) GetEcShardInfos() []*VolumeEcShardInformationMessage {
+ if x != nil {
+ return x.EcShardInfos
+ }
+ return nil
+}
+
+func (x *DataNodeInfo) GetRemoteVolumeCount() uint64 {
+ if x != nil {
+ return x.RemoteVolumeCount
+ }
+ return 0
+}
+
+type RackInfo struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ Id string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"`
+ VolumeCount uint64 `protobuf:"varint,2,opt,name=volume_count,json=volumeCount,proto3" json:"volume_count,omitempty"`
+ MaxVolumeCount uint64 `protobuf:"varint,3,opt,name=max_volume_count,json=maxVolumeCount,proto3" json:"max_volume_count,omitempty"`
+ FreeVolumeCount uint64 `protobuf:"varint,4,opt,name=free_volume_count,json=freeVolumeCount,proto3" json:"free_volume_count,omitempty"`
+ ActiveVolumeCount uint64 `protobuf:"varint,5,opt,name=active_volume_count,json=activeVolumeCount,proto3" json:"active_volume_count,omitempty"`
+ DataNodeInfos []*DataNodeInfo `protobuf:"bytes,6,rep,name=data_node_infos,json=dataNodeInfos,proto3" json:"data_node_infos,omitempty"`
+ RemoteVolumeCount uint64 `protobuf:"varint,7,opt,name=remote_volume_count,json=remoteVolumeCount,proto3" json:"remote_volume_count,omitempty"`
+}
+
+func (x *RackInfo) Reset() {
+ *x = RackInfo{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_master_proto_msgTypes[24]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *RackInfo) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*RackInfo) ProtoMessage() {}
+
+func (x *RackInfo) ProtoReflect() protoreflect.Message {
+ mi := &file_master_proto_msgTypes[24]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use RackInfo.ProtoReflect.Descriptor instead.
+func (*RackInfo) Descriptor() ([]byte, []int) {
+ return file_master_proto_rawDescGZIP(), []int{24}
+}
+
+func (x *RackInfo) GetId() string {
+ if x != nil {
+ return x.Id
+ }
+ return ""
+}
+
+func (x *RackInfo) GetVolumeCount() uint64 {
+ if x != nil {
+ return x.VolumeCount
+ }
+ return 0
+}
+
+func (x *RackInfo) GetMaxVolumeCount() uint64 {
+ if x != nil {
+ return x.MaxVolumeCount
+ }
+ return 0
+}
+
+func (x *RackInfo) GetFreeVolumeCount() uint64 {
+ if x != nil {
+ return x.FreeVolumeCount
+ }
+ return 0
+}
+
+func (x *RackInfo) GetActiveVolumeCount() uint64 {
+ if x != nil {
+ return x.ActiveVolumeCount
+ }
+ return 0
+}
+
+func (x *RackInfo) GetDataNodeInfos() []*DataNodeInfo {
+ if x != nil {
+ return x.DataNodeInfos
+ }
+ return nil
+}
+
+func (x *RackInfo) GetRemoteVolumeCount() uint64 {
+ if x != nil {
+ return x.RemoteVolumeCount
+ }
+ return 0
+}
+
+type DataCenterInfo struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ Id string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"`
+ VolumeCount uint64 `protobuf:"varint,2,opt,name=volume_count,json=volumeCount,proto3" json:"volume_count,omitempty"`
+ MaxVolumeCount uint64 `protobuf:"varint,3,opt,name=max_volume_count,json=maxVolumeCount,proto3" json:"max_volume_count,omitempty"`
+ FreeVolumeCount uint64 `protobuf:"varint,4,opt,name=free_volume_count,json=freeVolumeCount,proto3" json:"free_volume_count,omitempty"`
+ ActiveVolumeCount uint64 `protobuf:"varint,5,opt,name=active_volume_count,json=activeVolumeCount,proto3" json:"active_volume_count,omitempty"`
+ RackInfos []*RackInfo `protobuf:"bytes,6,rep,name=rack_infos,json=rackInfos,proto3" json:"rack_infos,omitempty"`
+ RemoteVolumeCount uint64 `protobuf:"varint,7,opt,name=remote_volume_count,json=remoteVolumeCount,proto3" json:"remote_volume_count,omitempty"`
+}
+
+func (x *DataCenterInfo) Reset() {
+ *x = DataCenterInfo{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_master_proto_msgTypes[25]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *DataCenterInfo) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*DataCenterInfo) ProtoMessage() {}
+
+func (x *DataCenterInfo) ProtoReflect() protoreflect.Message {
+ mi := &file_master_proto_msgTypes[25]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use DataCenterInfo.ProtoReflect.Descriptor instead.
+func (*DataCenterInfo) Descriptor() ([]byte, []int) {
+ return file_master_proto_rawDescGZIP(), []int{25}
+}
+
+func (x *DataCenterInfo) GetId() string {
+ if x != nil {
+ return x.Id
+ }
+ return ""
+}
+
+func (x *DataCenterInfo) GetVolumeCount() uint64 {
+ if x != nil {
+ return x.VolumeCount
+ }
+ return 0
+}
+
+func (x *DataCenterInfo) GetMaxVolumeCount() uint64 {
+ if x != nil {
+ return x.MaxVolumeCount
+ }
+ return 0
+}
+
+func (x *DataCenterInfo) GetFreeVolumeCount() uint64 {
+ if x != nil {
+ return x.FreeVolumeCount
+ }
+ return 0
+}
+
+func (x *DataCenterInfo) GetActiveVolumeCount() uint64 {
+ if x != nil {
+ return x.ActiveVolumeCount
+ }
+ return 0
+}
+
+func (x *DataCenterInfo) GetRackInfos() []*RackInfo {
+ if x != nil {
+ return x.RackInfos
+ }
+ return nil
+}
+
+func (x *DataCenterInfo) GetRemoteVolumeCount() uint64 {
+ if x != nil {
+ return x.RemoteVolumeCount
+ }
+ return 0
+}
+
+type TopologyInfo struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ Id string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"`
+ VolumeCount uint64 `protobuf:"varint,2,opt,name=volume_count,json=volumeCount,proto3" json:"volume_count,omitempty"`
+ MaxVolumeCount uint64 `protobuf:"varint,3,opt,name=max_volume_count,json=maxVolumeCount,proto3" json:"max_volume_count,omitempty"`
+ FreeVolumeCount uint64 `protobuf:"varint,4,opt,name=free_volume_count,json=freeVolumeCount,proto3" json:"free_volume_count,omitempty"`
+ ActiveVolumeCount uint64 `protobuf:"varint,5,opt,name=active_volume_count,json=activeVolumeCount,proto3" json:"active_volume_count,omitempty"`
+ DataCenterInfos []*DataCenterInfo `protobuf:"bytes,6,rep,name=data_center_infos,json=dataCenterInfos,proto3" json:"data_center_infos,omitempty"`
+ RemoteVolumeCount uint64 `protobuf:"varint,7,opt,name=remote_volume_count,json=remoteVolumeCount,proto3" json:"remote_volume_count,omitempty"`
+}
+
+func (x *TopologyInfo) Reset() {
+ *x = TopologyInfo{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_master_proto_msgTypes[26]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *TopologyInfo) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*TopologyInfo) ProtoMessage() {}
+
+func (x *TopologyInfo) ProtoReflect() protoreflect.Message {
+ mi := &file_master_proto_msgTypes[26]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use TopologyInfo.ProtoReflect.Descriptor instead.
+func (*TopologyInfo) Descriptor() ([]byte, []int) {
+ return file_master_proto_rawDescGZIP(), []int{26}
+}
+
+func (x *TopologyInfo) GetId() string {
+ if x != nil {
+ return x.Id
+ }
+ return ""
+}
+
+func (x *TopologyInfo) GetVolumeCount() uint64 {
+ if x != nil {
+ return x.VolumeCount
+ }
+ return 0
+}
+
+func (x *TopologyInfo) GetMaxVolumeCount() uint64 {
+ if x != nil {
+ return x.MaxVolumeCount
+ }
+ return 0
+}
+
+func (x *TopologyInfo) GetFreeVolumeCount() uint64 {
+ if x != nil {
+ return x.FreeVolumeCount
+ }
+ return 0
+}
+
+func (x *TopologyInfo) GetActiveVolumeCount() uint64 {
+ if x != nil {
+ return x.ActiveVolumeCount
+ }
+ return 0
+}
+
+func (x *TopologyInfo) GetDataCenterInfos() []*DataCenterInfo {
+ if x != nil {
+ return x.DataCenterInfos
+ }
+ return nil
+}
+
+func (x *TopologyInfo) GetRemoteVolumeCount() uint64 {
+ if x != nil {
+ return x.RemoteVolumeCount
+ }
+ return 0
+}
+
+type VolumeListRequest struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+}
+
+func (x *VolumeListRequest) Reset() {
+ *x = VolumeListRequest{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_master_proto_msgTypes[27]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *VolumeListRequest) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*VolumeListRequest) ProtoMessage() {}
+
+func (x *VolumeListRequest) ProtoReflect() protoreflect.Message {
+ mi := &file_master_proto_msgTypes[27]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use VolumeListRequest.ProtoReflect.Descriptor instead.
+func (*VolumeListRequest) Descriptor() ([]byte, []int) {
+ return file_master_proto_rawDescGZIP(), []int{27}
+}
+
+type VolumeListResponse struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ TopologyInfo *TopologyInfo `protobuf:"bytes,1,opt,name=topology_info,json=topologyInfo,proto3" json:"topology_info,omitempty"`
+ VolumeSizeLimitMb uint64 `protobuf:"varint,2,opt,name=volume_size_limit_mb,json=volumeSizeLimitMb,proto3" json:"volume_size_limit_mb,omitempty"`
+}
+
+func (x *VolumeListResponse) Reset() {
+ *x = VolumeListResponse{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_master_proto_msgTypes[28]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *VolumeListResponse) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*VolumeListResponse) ProtoMessage() {}
+
+func (x *VolumeListResponse) ProtoReflect() protoreflect.Message {
+ mi := &file_master_proto_msgTypes[28]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use VolumeListResponse.ProtoReflect.Descriptor instead.
+func (*VolumeListResponse) Descriptor() ([]byte, []int) {
+ return file_master_proto_rawDescGZIP(), []int{28}
+}
+
+func (x *VolumeListResponse) GetTopologyInfo() *TopologyInfo {
+ if x != nil {
+ return x.TopologyInfo
+ }
+ return nil
+}
+
+func (x *VolumeListResponse) GetVolumeSizeLimitMb() uint64 {
+ if x != nil {
+ return x.VolumeSizeLimitMb
+ }
+ return 0
+}
+
+type LookupEcVolumeRequest struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ VolumeId uint32 `protobuf:"varint,1,opt,name=volume_id,json=volumeId,proto3" json:"volume_id,omitempty"`
+}
+
+func (x *LookupEcVolumeRequest) Reset() {
+ *x = LookupEcVolumeRequest{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_master_proto_msgTypes[29]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *LookupEcVolumeRequest) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*LookupEcVolumeRequest) ProtoMessage() {}
+
+func (x *LookupEcVolumeRequest) ProtoReflect() protoreflect.Message {
+ mi := &file_master_proto_msgTypes[29]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use LookupEcVolumeRequest.ProtoReflect.Descriptor instead.
+func (*LookupEcVolumeRequest) Descriptor() ([]byte, []int) {
+ return file_master_proto_rawDescGZIP(), []int{29}
+}
+
+func (x *LookupEcVolumeRequest) GetVolumeId() uint32 {
+ if x != nil {
+ return x.VolumeId
+ }
+ return 0
+}
+
+type LookupEcVolumeResponse struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ VolumeId uint32 `protobuf:"varint,1,opt,name=volume_id,json=volumeId,proto3" json:"volume_id,omitempty"`
+ ShardIdLocations []*LookupEcVolumeResponse_EcShardIdLocation `protobuf:"bytes,2,rep,name=shard_id_locations,json=shardIdLocations,proto3" json:"shard_id_locations,omitempty"`
+}
+
+func (x *LookupEcVolumeResponse) Reset() {
+ *x = LookupEcVolumeResponse{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_master_proto_msgTypes[30]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *LookupEcVolumeResponse) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*LookupEcVolumeResponse) ProtoMessage() {}
+
+func (x *LookupEcVolumeResponse) ProtoReflect() protoreflect.Message {
+ mi := &file_master_proto_msgTypes[30]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use LookupEcVolumeResponse.ProtoReflect.Descriptor instead.
+func (*LookupEcVolumeResponse) Descriptor() ([]byte, []int) {
+ return file_master_proto_rawDescGZIP(), []int{30}
+}
+
+func (x *LookupEcVolumeResponse) GetVolumeId() uint32 {
+ if x != nil {
+ return x.VolumeId
+ }
+ return 0
+}
+
+func (x *LookupEcVolumeResponse) GetShardIdLocations() []*LookupEcVolumeResponse_EcShardIdLocation {
+ if x != nil {
+ return x.ShardIdLocations
+ }
+ return nil
+}
+
+type GetMasterConfigurationRequest struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+}
+
+func (x *GetMasterConfigurationRequest) Reset() {
+ *x = GetMasterConfigurationRequest{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_master_proto_msgTypes[31]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *GetMasterConfigurationRequest) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*GetMasterConfigurationRequest) ProtoMessage() {}
+
+func (x *GetMasterConfigurationRequest) ProtoReflect() protoreflect.Message {
+ mi := &file_master_proto_msgTypes[31]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use GetMasterConfigurationRequest.ProtoReflect.Descriptor instead.
+func (*GetMasterConfigurationRequest) Descriptor() ([]byte, []int) {
+ return file_master_proto_rawDescGZIP(), []int{31}
+}
+
+type GetMasterConfigurationResponse struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ MetricsAddress string `protobuf:"bytes,1,opt,name=metrics_address,json=metricsAddress,proto3" json:"metrics_address,omitempty"`
+ MetricsIntervalSeconds uint32 `protobuf:"varint,2,opt,name=metrics_interval_seconds,json=metricsIntervalSeconds,proto3" json:"metrics_interval_seconds,omitempty"`
+}
+
+func (x *GetMasterConfigurationResponse) Reset() {
+ *x = GetMasterConfigurationResponse{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_master_proto_msgTypes[32]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *GetMasterConfigurationResponse) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*GetMasterConfigurationResponse) ProtoMessage() {}
+
+func (x *GetMasterConfigurationResponse) ProtoReflect() protoreflect.Message {
+ mi := &file_master_proto_msgTypes[32]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use GetMasterConfigurationResponse.ProtoReflect.Descriptor instead.
+func (*GetMasterConfigurationResponse) Descriptor() ([]byte, []int) {
+ return file_master_proto_rawDescGZIP(), []int{32}
+}
+
+func (x *GetMasterConfigurationResponse) GetMetricsAddress() string {
+ if x != nil {
+ return x.MetricsAddress
+ }
+ return ""
+}
+
+func (x *GetMasterConfigurationResponse) GetMetricsIntervalSeconds() uint32 {
+ if x != nil {
+ return x.MetricsIntervalSeconds
+ }
+ return 0
+}
+
+type ListMasterClientsRequest struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ ClientType string `protobuf:"bytes,1,opt,name=client_type,json=clientType,proto3" json:"client_type,omitempty"`
+}
+
+func (x *ListMasterClientsRequest) Reset() {
+ *x = ListMasterClientsRequest{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_master_proto_msgTypes[33]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *ListMasterClientsRequest) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*ListMasterClientsRequest) ProtoMessage() {}
+
+func (x *ListMasterClientsRequest) ProtoReflect() protoreflect.Message {
+ mi := &file_master_proto_msgTypes[33]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use ListMasterClientsRequest.ProtoReflect.Descriptor instead.
+func (*ListMasterClientsRequest) Descriptor() ([]byte, []int) {
+ return file_master_proto_rawDescGZIP(), []int{33}
+}
+
+func (x *ListMasterClientsRequest) GetClientType() string {
+ if x != nil {
+ return x.ClientType
+ }
+ return ""
+}
+
+type ListMasterClientsResponse struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ GrpcAddresses []string `protobuf:"bytes,1,rep,name=grpc_addresses,json=grpcAddresses,proto3" json:"grpc_addresses,omitempty"`
+}
+
+func (x *ListMasterClientsResponse) Reset() {
+ *x = ListMasterClientsResponse{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_master_proto_msgTypes[34]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *ListMasterClientsResponse) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*ListMasterClientsResponse) ProtoMessage() {}
+
+func (x *ListMasterClientsResponse) ProtoReflect() protoreflect.Message {
+ mi := &file_master_proto_msgTypes[34]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use ListMasterClientsResponse.ProtoReflect.Descriptor instead.
+func (*ListMasterClientsResponse) Descriptor() ([]byte, []int) {
+ return file_master_proto_rawDescGZIP(), []int{34}
+}
+
+func (x *ListMasterClientsResponse) GetGrpcAddresses() []string {
+ if x != nil {
+ return x.GrpcAddresses
+ }
+ return nil
+}
+
+type LeaseAdminTokenRequest struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ PreviousToken int64 `protobuf:"varint,1,opt,name=previous_token,json=previousToken,proto3" json:"previous_token,omitempty"`
+ PreviousLockTime int64 `protobuf:"varint,2,opt,name=previous_lock_time,json=previousLockTime,proto3" json:"previous_lock_time,omitempty"`
+ LockName string `protobuf:"bytes,3,opt,name=lock_name,json=lockName,proto3" json:"lock_name,omitempty"`
+}
+
+func (x *LeaseAdminTokenRequest) Reset() {
+ *x = LeaseAdminTokenRequest{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_master_proto_msgTypes[35]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *LeaseAdminTokenRequest) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*LeaseAdminTokenRequest) ProtoMessage() {}
+
+func (x *LeaseAdminTokenRequest) ProtoReflect() protoreflect.Message {
+ mi := &file_master_proto_msgTypes[35]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use LeaseAdminTokenRequest.ProtoReflect.Descriptor instead.
+func (*LeaseAdminTokenRequest) Descriptor() ([]byte, []int) {
+ return file_master_proto_rawDescGZIP(), []int{35}
+}
+
+func (x *LeaseAdminTokenRequest) GetPreviousToken() int64 {
+ if x != nil {
+ return x.PreviousToken
+ }
+ return 0
+}
+
+func (x *LeaseAdminTokenRequest) GetPreviousLockTime() int64 {
+ if x != nil {
+ return x.PreviousLockTime
+ }
+ return 0
+}
+
+func (x *LeaseAdminTokenRequest) GetLockName() string {
+ if x != nil {
+ return x.LockName
+ }
+ return ""
+}
+
+type LeaseAdminTokenResponse struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ Token int64 `protobuf:"varint,1,opt,name=token,proto3" json:"token,omitempty"`
+ LockTsNs int64 `protobuf:"varint,2,opt,name=lock_ts_ns,json=lockTsNs,proto3" json:"lock_ts_ns,omitempty"`
+}
+
+func (x *LeaseAdminTokenResponse) Reset() {
+ *x = LeaseAdminTokenResponse{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_master_proto_msgTypes[36]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *LeaseAdminTokenResponse) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*LeaseAdminTokenResponse) ProtoMessage() {}
+
+func (x *LeaseAdminTokenResponse) ProtoReflect() protoreflect.Message {
+ mi := &file_master_proto_msgTypes[36]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use LeaseAdminTokenResponse.ProtoReflect.Descriptor instead.
+func (*LeaseAdminTokenResponse) Descriptor() ([]byte, []int) {
+ return file_master_proto_rawDescGZIP(), []int{36}
+}
+
+func (x *LeaseAdminTokenResponse) GetToken() int64 {
+ if x != nil {
+ return x.Token
+ }
+ return 0
+}
+
+func (x *LeaseAdminTokenResponse) GetLockTsNs() int64 {
+ if x != nil {
+ return x.LockTsNs
+ }
+ return 0
+}
+
+type ReleaseAdminTokenRequest struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ PreviousToken int64 `protobuf:"varint,1,opt,name=previous_token,json=previousToken,proto3" json:"previous_token,omitempty"`
+ PreviousLockTime int64 `protobuf:"varint,2,opt,name=previous_lock_time,json=previousLockTime,proto3" json:"previous_lock_time,omitempty"`
+ LockName string `protobuf:"bytes,3,opt,name=lock_name,json=lockName,proto3" json:"lock_name,omitempty"`
+}
+
+func (x *ReleaseAdminTokenRequest) Reset() {
+ *x = ReleaseAdminTokenRequest{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_master_proto_msgTypes[37]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *ReleaseAdminTokenRequest) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*ReleaseAdminTokenRequest) ProtoMessage() {}
+
+func (x *ReleaseAdminTokenRequest) ProtoReflect() protoreflect.Message {
+ mi := &file_master_proto_msgTypes[37]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use ReleaseAdminTokenRequest.ProtoReflect.Descriptor instead.
+func (*ReleaseAdminTokenRequest) Descriptor() ([]byte, []int) {
+ return file_master_proto_rawDescGZIP(), []int{37}
}
-func (m *LookupVolumeResponse) Reset() { *m = LookupVolumeResponse{} }
-func (m *LookupVolumeResponse) String() string { return proto.CompactTextString(m) }
-func (*LookupVolumeResponse) ProtoMessage() {}
-func (*LookupVolumeResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{8} }
+func (x *ReleaseAdminTokenRequest) GetPreviousToken() int64 {
+ if x != nil {
+ return x.PreviousToken
+ }
+ return 0
+}
-func (m *LookupVolumeResponse) GetVolumeIdLocations() []*LookupVolumeResponse_VolumeIdLocation {
- if m != nil {
- return m.VolumeIdLocations
+func (x *ReleaseAdminTokenRequest) GetPreviousLockTime() int64 {
+ if x != nil {
+ return x.PreviousLockTime
}
- return nil
+ return 0
}
-type LookupVolumeResponse_VolumeIdLocation struct {
- VolumeId string `protobuf:"bytes,1,opt,name=volume_id,json=volumeId" json:"volume_id,omitempty"`
- Locations []*Location `protobuf:"bytes,2,rep,name=locations" json:"locations,omitempty"`
- Error string `protobuf:"bytes,3,opt,name=error" json:"error,omitempty"`
+func (x *ReleaseAdminTokenRequest) GetLockName() string {
+ if x != nil {
+ return x.LockName
+ }
+ return ""
}
-func (m *LookupVolumeResponse_VolumeIdLocation) Reset() { *m = LookupVolumeResponse_VolumeIdLocation{} }
-func (m *LookupVolumeResponse_VolumeIdLocation) String() string { return proto.CompactTextString(m) }
-func (*LookupVolumeResponse_VolumeIdLocation) ProtoMessage() {}
-func (*LookupVolumeResponse_VolumeIdLocation) Descriptor() ([]byte, []int) {
- return fileDescriptor0, []int{8, 0}
+type ReleaseAdminTokenResponse struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
}
-func (m *LookupVolumeResponse_VolumeIdLocation) GetVolumeId() string {
- if m != nil {
- return m.VolumeId
+func (x *ReleaseAdminTokenResponse) Reset() {
+ *x = ReleaseAdminTokenResponse{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_master_proto_msgTypes[38]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
- return ""
}
-func (m *LookupVolumeResponse_VolumeIdLocation) GetLocations() []*Location {
- if m != nil {
- return m.Locations
- }
- return nil
+func (x *ReleaseAdminTokenResponse) String() string {
+ return protoimpl.X.MessageStringOf(x)
}
-func (m *LookupVolumeResponse_VolumeIdLocation) GetError() string {
- if m != nil {
- return m.Error
+func (*ReleaseAdminTokenResponse) ProtoMessage() {}
+
+func (x *ReleaseAdminTokenResponse) ProtoReflect() protoreflect.Message {
+ mi := &file_master_proto_msgTypes[38]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
}
- return ""
+ return mi.MessageOf(x)
}
-type Location struct {
- Url string `protobuf:"bytes,1,opt,name=url" json:"url,omitempty"`
- PublicUrl string `protobuf:"bytes,2,opt,name=public_url,json=publicUrl" json:"public_url,omitempty"`
+// Deprecated: Use ReleaseAdminTokenResponse.ProtoReflect.Descriptor instead.
+func (*ReleaseAdminTokenResponse) Descriptor() ([]byte, []int) {
+ return file_master_proto_rawDescGZIP(), []int{38}
}
-func (m *Location) Reset() { *m = Location{} }
-func (m *Location) String() string { return proto.CompactTextString(m) }
-func (*Location) ProtoMessage() {}
-func (*Location) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{9} }
+type SuperBlockExtra_ErasureCoding struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
-func (m *Location) GetUrl() string {
- if m != nil {
- return m.Url
- }
- return ""
+ Data uint32 `protobuf:"varint,1,opt,name=data,proto3" json:"data,omitempty"`
+ Parity uint32 `protobuf:"varint,2,opt,name=parity,proto3" json:"parity,omitempty"`
+ VolumeIds []uint32 `protobuf:"varint,3,rep,packed,name=volume_ids,json=volumeIds,proto3" json:"volume_ids,omitempty"`
}
-func (m *Location) GetPublicUrl() string {
- if m != nil {
- return m.PublicUrl
+func (x *SuperBlockExtra_ErasureCoding) Reset() {
+ *x = SuperBlockExtra_ErasureCoding{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_master_proto_msgTypes[40]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
- return ""
}
-type AssignRequest struct {
- Count uint64 `protobuf:"varint,1,opt,name=count" json:"count,omitempty"`
- Replication string `protobuf:"bytes,2,opt,name=replication" json:"replication,omitempty"`
- Collection string `protobuf:"bytes,3,opt,name=collection" json:"collection,omitempty"`
- Ttl string `protobuf:"bytes,4,opt,name=ttl" json:"ttl,omitempty"`
- DataCenter string `protobuf:"bytes,5,opt,name=data_center,json=dataCenter" json:"data_center,omitempty"`
- Rack string `protobuf:"bytes,6,opt,name=rack" json:"rack,omitempty"`
- DataNode string `protobuf:"bytes,7,opt,name=data_node,json=dataNode" json:"data_node,omitempty"`
+func (x *SuperBlockExtra_ErasureCoding) String() string {
+ return protoimpl.X.MessageStringOf(x)
}
-func (m *AssignRequest) Reset() { *m = AssignRequest{} }
-func (m *AssignRequest) String() string { return proto.CompactTextString(m) }
-func (*AssignRequest) ProtoMessage() {}
-func (*AssignRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{10} }
+func (*SuperBlockExtra_ErasureCoding) ProtoMessage() {}
-func (m *AssignRequest) GetCount() uint64 {
- if m != nil {
- return m.Count
+func (x *SuperBlockExtra_ErasureCoding) ProtoReflect() protoreflect.Message {
+ mi := &file_master_proto_msgTypes[40]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
}
- return 0
+ return mi.MessageOf(x)
}
-func (m *AssignRequest) GetReplication() string {
- if m != nil {
- return m.Replication
- }
- return ""
+// Deprecated: Use SuperBlockExtra_ErasureCoding.ProtoReflect.Descriptor instead.
+func (*SuperBlockExtra_ErasureCoding) Descriptor() ([]byte, []int) {
+ return file_master_proto_rawDescGZIP(), []int{7, 0}
}
-func (m *AssignRequest) GetCollection() string {
- if m != nil {
- return m.Collection
+func (x *SuperBlockExtra_ErasureCoding) GetData() uint32 {
+ if x != nil {
+ return x.Data
}
- return ""
+ return 0
}
-func (m *AssignRequest) GetTtl() string {
- if m != nil {
- return m.Ttl
+func (x *SuperBlockExtra_ErasureCoding) GetParity() uint32 {
+ if x != nil {
+ return x.Parity
}
- return ""
+ return 0
}
-func (m *AssignRequest) GetDataCenter() string {
- if m != nil {
- return m.DataCenter
+func (x *SuperBlockExtra_ErasureCoding) GetVolumeIds() []uint32 {
+ if x != nil {
+ return x.VolumeIds
}
- return ""
+ return nil
}
-func (m *AssignRequest) GetRack() string {
- if m != nil {
- return m.Rack
- }
- return ""
+type LookupVolumeResponse_VolumeIdLocation struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ VolumeId string `protobuf:"bytes,1,opt,name=volume_id,json=volumeId,proto3" json:"volume_id,omitempty"`
+ Locations []*Location `protobuf:"bytes,2,rep,name=locations,proto3" json:"locations,omitempty"`
+ Error string `protobuf:"bytes,3,opt,name=error,proto3" json:"error,omitempty"`
}
-func (m *AssignRequest) GetDataNode() string {
- if m != nil {
- return m.DataNode
+func (x *LookupVolumeResponse_VolumeIdLocation) Reset() {
+ *x = LookupVolumeResponse_VolumeIdLocation{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_master_proto_msgTypes[41]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
- return ""
}
-type AssignResponse struct {
- Fid string `protobuf:"bytes,1,opt,name=fid" json:"fid,omitempty"`
- Url string `protobuf:"bytes,2,opt,name=url" json:"url,omitempty"`
- PublicUrl string `protobuf:"bytes,3,opt,name=public_url,json=publicUrl" json:"public_url,omitempty"`
- Count uint64 `protobuf:"varint,4,opt,name=count" json:"count,omitempty"`
- Error string `protobuf:"bytes,5,opt,name=error" json:"error,omitempty"`
+func (x *LookupVolumeResponse_VolumeIdLocation) String() string {
+ return protoimpl.X.MessageStringOf(x)
}
-func (m *AssignResponse) Reset() { *m = AssignResponse{} }
-func (m *AssignResponse) String() string { return proto.CompactTextString(m) }
-func (*AssignResponse) ProtoMessage() {}
-func (*AssignResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{11} }
+func (*LookupVolumeResponse_VolumeIdLocation) ProtoMessage() {}
-func (m *AssignResponse) GetFid() string {
- if m != nil {
- return m.Fid
+func (x *LookupVolumeResponse_VolumeIdLocation) ProtoReflect() protoreflect.Message {
+ mi := &file_master_proto_msgTypes[41]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
}
- return ""
+ return mi.MessageOf(x)
}
-func (m *AssignResponse) GetUrl() string {
- if m != nil {
- return m.Url
- }
- return ""
+// Deprecated: Use LookupVolumeResponse_VolumeIdLocation.ProtoReflect.Descriptor instead.
+func (*LookupVolumeResponse_VolumeIdLocation) Descriptor() ([]byte, []int) {
+ return file_master_proto_rawDescGZIP(), []int{11, 0}
}
-func (m *AssignResponse) GetPublicUrl() string {
- if m != nil {
- return m.PublicUrl
+func (x *LookupVolumeResponse_VolumeIdLocation) GetVolumeId() string {
+ if x != nil {
+ return x.VolumeId
}
return ""
}
-func (m *AssignResponse) GetCount() uint64 {
- if m != nil {
- return m.Count
+func (x *LookupVolumeResponse_VolumeIdLocation) GetLocations() []*Location {
+ if x != nil {
+ return x.Locations
}
- return 0
+ return nil
}
-func (m *AssignResponse) GetError() string {
- if m != nil {
- return m.Error
+func (x *LookupVolumeResponse_VolumeIdLocation) GetError() string {
+ if x != nil {
+ return x.Error
}
return ""
}
-type StatisticsRequest struct {
- Replication string `protobuf:"bytes,1,opt,name=replication" json:"replication,omitempty"`
- Collection string `protobuf:"bytes,2,opt,name=collection" json:"collection,omitempty"`
- Ttl string `protobuf:"bytes,3,opt,name=ttl" json:"ttl,omitempty"`
-}
+type LookupEcVolumeResponse_EcShardIdLocation struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
-func (m *StatisticsRequest) Reset() { *m = StatisticsRequest{} }
-func (m *StatisticsRequest) String() string { return proto.CompactTextString(m) }
-func (*StatisticsRequest) ProtoMessage() {}
-func (*StatisticsRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{12} }
+ ShardId uint32 `protobuf:"varint,1,opt,name=shard_id,json=shardId,proto3" json:"shard_id,omitempty"`
+ Locations []*Location `protobuf:"bytes,2,rep,name=locations,proto3" json:"locations,omitempty"`
+}
-func (m *StatisticsRequest) GetReplication() string {
- if m != nil {
- return m.Replication
+func (x *LookupEcVolumeResponse_EcShardIdLocation) Reset() {
+ *x = LookupEcVolumeResponse_EcShardIdLocation{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_master_proto_msgTypes[42]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
- return ""
}
-func (m *StatisticsRequest) GetCollection() string {
- if m != nil {
- return m.Collection
- }
- return ""
+func (x *LookupEcVolumeResponse_EcShardIdLocation) String() string {
+ return protoimpl.X.MessageStringOf(x)
}
-func (m *StatisticsRequest) GetTtl() string {
- if m != nil {
- return m.Ttl
+func (*LookupEcVolumeResponse_EcShardIdLocation) ProtoMessage() {}
+
+func (x *LookupEcVolumeResponse_EcShardIdLocation) ProtoReflect() protoreflect.Message {
+ mi := &file_master_proto_msgTypes[42]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
}
- return ""
+ return mi.MessageOf(x)
}
-type StatisticsResponse struct {
- Replication string `protobuf:"bytes,1,opt,name=replication" json:"replication,omitempty"`
- Collection string `protobuf:"bytes,2,opt,name=collection" json:"collection,omitempty"`
- Ttl string `protobuf:"bytes,3,opt,name=ttl" json:"ttl,omitempty"`
- TotalSize uint64 `protobuf:"varint,4,opt,name=total_size,json=totalSize" json:"total_size,omitempty"`
- UsedSize uint64 `protobuf:"varint,5,opt,name=used_size,json=usedSize" json:"used_size,omitempty"`
- FileCount uint64 `protobuf:"varint,6,opt,name=file_count,json=fileCount" json:"file_count,omitempty"`
+// Deprecated: Use LookupEcVolumeResponse_EcShardIdLocation.ProtoReflect.Descriptor instead.
+func (*LookupEcVolumeResponse_EcShardIdLocation) Descriptor() ([]byte, []int) {
+ return file_master_proto_rawDescGZIP(), []int{30, 0}
}
-func (m *StatisticsResponse) Reset() { *m = StatisticsResponse{} }
-func (m *StatisticsResponse) String() string { return proto.CompactTextString(m) }
-func (*StatisticsResponse) ProtoMessage() {}
-func (*StatisticsResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{13} }
-
-func (m *StatisticsResponse) GetReplication() string {
- if m != nil {
- return m.Replication
+func (x *LookupEcVolumeResponse_EcShardIdLocation) GetShardId() uint32 {
+ if x != nil {
+ return x.ShardId
}
- return ""
+ return 0
}
-func (m *StatisticsResponse) GetCollection() string {
- if m != nil {
- return m.Collection
+func (x *LookupEcVolumeResponse_EcShardIdLocation) GetLocations() []*Location {
+ if x != nil {
+ return x.Locations
}
- return ""
+ return nil
}
-func (m *StatisticsResponse) GetTtl() string {
- if m != nil {
- return m.Ttl
- }
- return ""
+var File_master_proto protoreflect.FileDescriptor
+
+var file_master_proto_rawDesc = []byte{
+ 0x0a, 0x0c, 0x6d, 0x61, 0x73, 0x74, 0x65, 0x72, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x09,
+ 0x6d, 0x61, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x22, 0x8b, 0x06, 0x0a, 0x09, 0x48, 0x65,
+ 0x61, 0x72, 0x74, 0x62, 0x65, 0x61, 0x74, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x70, 0x18, 0x01, 0x20,
+ 0x01, 0x28, 0x09, 0x52, 0x02, 0x69, 0x70, 0x12, 0x12, 0x0a, 0x04, 0x70, 0x6f, 0x72, 0x74, 0x18,
+ 0x02, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x04, 0x70, 0x6f, 0x72, 0x74, 0x12, 0x1d, 0x0a, 0x0a, 0x70,
+ 0x75, 0x62, 0x6c, 0x69, 0x63, 0x5f, 0x75, 0x72, 0x6c, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52,
+ 0x09, 0x70, 0x75, 0x62, 0x6c, 0x69, 0x63, 0x55, 0x72, 0x6c, 0x12, 0x28, 0x0a, 0x10, 0x6d, 0x61,
+ 0x78, 0x5f, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x04,
+ 0x20, 0x01, 0x28, 0x0d, 0x52, 0x0e, 0x6d, 0x61, 0x78, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x43,
+ 0x6f, 0x75, 0x6e, 0x74, 0x12, 0x20, 0x0a, 0x0c, 0x6d, 0x61, 0x78, 0x5f, 0x66, 0x69, 0x6c, 0x65,
+ 0x5f, 0x6b, 0x65, 0x79, 0x18, 0x05, 0x20, 0x01, 0x28, 0x04, 0x52, 0x0a, 0x6d, 0x61, 0x78, 0x46,
+ 0x69, 0x6c, 0x65, 0x4b, 0x65, 0x79, 0x12, 0x1f, 0x0a, 0x0b, 0x64, 0x61, 0x74, 0x61, 0x5f, 0x63,
+ 0x65, 0x6e, 0x74, 0x65, 0x72, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x64, 0x61, 0x74,
+ 0x61, 0x43, 0x65, 0x6e, 0x74, 0x65, 0x72, 0x12, 0x12, 0x0a, 0x04, 0x72, 0x61, 0x63, 0x6b, 0x18,
+ 0x07, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x72, 0x61, 0x63, 0x6b, 0x12, 0x1d, 0x0a, 0x0a, 0x61,
+ 0x64, 0x6d, 0x69, 0x6e, 0x5f, 0x70, 0x6f, 0x72, 0x74, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0d, 0x52,
+ 0x09, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x50, 0x6f, 0x72, 0x74, 0x12, 0x3d, 0x0a, 0x07, 0x76, 0x6f,
+ 0x6c, 0x75, 0x6d, 0x65, 0x73, 0x18, 0x09, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x23, 0x2e, 0x6d, 0x61,
+ 0x73, 0x74, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x49, 0x6e,
+ 0x66, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65,
+ 0x52, 0x07, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x73, 0x12, 0x49, 0x0a, 0x0b, 0x6e, 0x65, 0x77,
+ 0x5f, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x73, 0x18, 0x0a, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x28,
+ 0x2e, 0x6d, 0x61, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x56, 0x6f, 0x6c, 0x75, 0x6d,
+ 0x65, 0x53, 0x68, 0x6f, 0x72, 0x74, 0x49, 0x6e, 0x66, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x69, 0x6f,
+ 0x6e, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x52, 0x0a, 0x6e, 0x65, 0x77, 0x56, 0x6f, 0x6c,
+ 0x75, 0x6d, 0x65, 0x73, 0x12, 0x51, 0x0a, 0x0f, 0x64, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x64, 0x5f,
+ 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x73, 0x18, 0x0b, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x28, 0x2e,
+ 0x6d, 0x61, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65,
+ 0x53, 0x68, 0x6f, 0x72, 0x74, 0x49, 0x6e, 0x66, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x69, 0x6f, 0x6e,
+ 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x52, 0x0e, 0x64, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x64,
+ 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x73, 0x12, 0x24, 0x0a, 0x0e, 0x68, 0x61, 0x73, 0x5f, 0x6e,
+ 0x6f, 0x5f, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x73, 0x18, 0x0c, 0x20, 0x01, 0x28, 0x08, 0x52,
+ 0x0c, 0x68, 0x61, 0x73, 0x4e, 0x6f, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x73, 0x12, 0x47, 0x0a,
+ 0x09, 0x65, 0x63, 0x5f, 0x73, 0x68, 0x61, 0x72, 0x64, 0x73, 0x18, 0x10, 0x20, 0x03, 0x28, 0x0b,
+ 0x32, 0x2a, 0x2e, 0x6d, 0x61, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x56, 0x6f, 0x6c,
+ 0x75, 0x6d, 0x65, 0x45, 0x63, 0x53, 0x68, 0x61, 0x72, 0x64, 0x49, 0x6e, 0x66, 0x6f, 0x72, 0x6d,
+ 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x52, 0x08, 0x65, 0x63,
+ 0x53, 0x68, 0x61, 0x72, 0x64, 0x73, 0x12, 0x4e, 0x0a, 0x0d, 0x6e, 0x65, 0x77, 0x5f, 0x65, 0x63,
+ 0x5f, 0x73, 0x68, 0x61, 0x72, 0x64, 0x73, 0x18, 0x11, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2a, 0x2e,
+ 0x6d, 0x61, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65,
+ 0x45, 0x63, 0x53, 0x68, 0x61, 0x72, 0x64, 0x49, 0x6e, 0x66, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x69,
+ 0x6f, 0x6e, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x52, 0x0b, 0x6e, 0x65, 0x77, 0x45, 0x63,
+ 0x53, 0x68, 0x61, 0x72, 0x64, 0x73, 0x12, 0x56, 0x0a, 0x11, 0x64, 0x65, 0x6c, 0x65, 0x74, 0x65,
+ 0x64, 0x5f, 0x65, 0x63, 0x5f, 0x73, 0x68, 0x61, 0x72, 0x64, 0x73, 0x18, 0x12, 0x20, 0x03, 0x28,
+ 0x0b, 0x32, 0x2a, 0x2e, 0x6d, 0x61, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x56, 0x6f,
+ 0x6c, 0x75, 0x6d, 0x65, 0x45, 0x63, 0x53, 0x68, 0x61, 0x72, 0x64, 0x49, 0x6e, 0x66, 0x6f, 0x72,
+ 0x6d, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x52, 0x0f, 0x64,
+ 0x65, 0x6c, 0x65, 0x74, 0x65, 0x64, 0x45, 0x63, 0x53, 0x68, 0x61, 0x72, 0x64, 0x73, 0x12, 0x27,
+ 0x0a, 0x10, 0x68, 0x61, 0x73, 0x5f, 0x6e, 0x6f, 0x5f, 0x65, 0x63, 0x5f, 0x73, 0x68, 0x61, 0x72,
+ 0x64, 0x73, 0x18, 0x13, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0d, 0x68, 0x61, 0x73, 0x4e, 0x6f, 0x45,
+ 0x63, 0x53, 0x68, 0x61, 0x72, 0x64, 0x73, 0x22, 0x80, 0x02, 0x0a, 0x11, 0x48, 0x65, 0x61, 0x72,
+ 0x74, 0x62, 0x65, 0x61, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x2a, 0x0a,
+ 0x11, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x73, 0x69, 0x7a, 0x65, 0x5f, 0x6c, 0x69, 0x6d,
+ 0x69, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x04, 0x52, 0x0f, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65,
+ 0x53, 0x69, 0x7a, 0x65, 0x4c, 0x69, 0x6d, 0x69, 0x74, 0x12, 0x16, 0x0a, 0x06, 0x6c, 0x65, 0x61,
+ 0x64, 0x65, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x6c, 0x65, 0x61, 0x64, 0x65,
+ 0x72, 0x12, 0x27, 0x0a, 0x0f, 0x6d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, 0x5f, 0x61, 0x64, 0x64,
+ 0x72, 0x65, 0x73, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0e, 0x6d, 0x65, 0x74, 0x72,
+ 0x69, 0x63, 0x73, 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x12, 0x38, 0x0a, 0x18, 0x6d, 0x65,
+ 0x74, 0x72, 0x69, 0x63, 0x73, 0x5f, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x76, 0x61, 0x6c, 0x5f, 0x73,
+ 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x16, 0x6d, 0x65,
+ 0x74, 0x72, 0x69, 0x63, 0x73, 0x49, 0x6e, 0x74, 0x65, 0x72, 0x76, 0x61, 0x6c, 0x53, 0x65, 0x63,
+ 0x6f, 0x6e, 0x64, 0x73, 0x12, 0x44, 0x0a, 0x10, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x5f,
+ 0x62, 0x61, 0x63, 0x6b, 0x65, 0x6e, 0x64, 0x73, 0x18, 0x05, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x19,
+ 0x2e, 0x6d, 0x61, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x53, 0x74, 0x6f, 0x72, 0x61,
+ 0x67, 0x65, 0x42, 0x61, 0x63, 0x6b, 0x65, 0x6e, 0x64, 0x52, 0x0f, 0x73, 0x74, 0x6f, 0x72, 0x61,
+ 0x67, 0x65, 0x42, 0x61, 0x63, 0x6b, 0x65, 0x6e, 0x64, 0x73, 0x22, 0xfb, 0x03, 0x0a, 0x18, 0x56,
+ 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x69, 0x6f, 0x6e,
+ 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x01, 0x20,
+ 0x01, 0x28, 0x0d, 0x52, 0x02, 0x69, 0x64, 0x12, 0x12, 0x0a, 0x04, 0x73, 0x69, 0x7a, 0x65, 0x18,
+ 0x02, 0x20, 0x01, 0x28, 0x04, 0x52, 0x04, 0x73, 0x69, 0x7a, 0x65, 0x12, 0x1e, 0x0a, 0x0a, 0x63,
+ 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52,
+ 0x0a, 0x63, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x1d, 0x0a, 0x0a, 0x66,
+ 0x69, 0x6c, 0x65, 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x04, 0x52,
+ 0x09, 0x66, 0x69, 0x6c, 0x65, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x12, 0x21, 0x0a, 0x0c, 0x64, 0x65,
+ 0x6c, 0x65, 0x74, 0x65, 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x05, 0x20, 0x01, 0x28, 0x04,
+ 0x52, 0x0b, 0x64, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x12, 0x2c, 0x0a,
+ 0x12, 0x64, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x64, 0x5f, 0x62, 0x79, 0x74, 0x65, 0x5f, 0x63, 0x6f,
+ 0x75, 0x6e, 0x74, 0x18, 0x06, 0x20, 0x01, 0x28, 0x04, 0x52, 0x10, 0x64, 0x65, 0x6c, 0x65, 0x74,
+ 0x65, 0x64, 0x42, 0x79, 0x74, 0x65, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x12, 0x1b, 0x0a, 0x09, 0x72,
+ 0x65, 0x61, 0x64, 0x5f, 0x6f, 0x6e, 0x6c, 0x79, 0x18, 0x07, 0x20, 0x01, 0x28, 0x08, 0x52, 0x08,
+ 0x72, 0x65, 0x61, 0x64, 0x4f, 0x6e, 0x6c, 0x79, 0x12, 0x2b, 0x0a, 0x11, 0x72, 0x65, 0x70, 0x6c,
+ 0x69, 0x63, 0x61, 0x5f, 0x70, 0x6c, 0x61, 0x63, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x18, 0x08, 0x20,
+ 0x01, 0x28, 0x0d, 0x52, 0x10, 0x72, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x50, 0x6c, 0x61, 0x63,
+ 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x12, 0x18, 0x0a, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e,
+ 0x18, 0x09, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12,
+ 0x10, 0x0a, 0x03, 0x74, 0x74, 0x6c, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x03, 0x74, 0x74,
+ 0x6c, 0x12, 0x29, 0x0a, 0x10, 0x63, 0x6f, 0x6d, 0x70, 0x61, 0x63, 0x74, 0x5f, 0x72, 0x65, 0x76,
+ 0x69, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x0f, 0x63, 0x6f, 0x6d,
+ 0x70, 0x61, 0x63, 0x74, 0x52, 0x65, 0x76, 0x69, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x2c, 0x0a, 0x12,
+ 0x6d, 0x6f, 0x64, 0x69, 0x66, 0x69, 0x65, 0x64, 0x5f, 0x61, 0x74, 0x5f, 0x73, 0x65, 0x63, 0x6f,
+ 0x6e, 0x64, 0x18, 0x0c, 0x20, 0x01, 0x28, 0x03, 0x52, 0x10, 0x6d, 0x6f, 0x64, 0x69, 0x66, 0x69,
+ 0x65, 0x64, 0x41, 0x74, 0x53, 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x12, 0x2e, 0x0a, 0x13, 0x72, 0x65,
+ 0x6d, 0x6f, 0x74, 0x65, 0x5f, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x5f, 0x6e, 0x61, 0x6d,
+ 0x65, 0x18, 0x0d, 0x20, 0x01, 0x28, 0x09, 0x52, 0x11, 0x72, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x53,
+ 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x2c, 0x0a, 0x12, 0x72, 0x65,
+ 0x6d, 0x6f, 0x74, 0x65, 0x5f, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x5f, 0x6b, 0x65, 0x79,
+ 0x18, 0x0e, 0x20, 0x01, 0x28, 0x09, 0x52, 0x10, 0x72, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x53, 0x74,
+ 0x6f, 0x72, 0x61, 0x67, 0x65, 0x4b, 0x65, 0x79, 0x22, 0xa8, 0x01, 0x0a, 0x1d, 0x56, 0x6f, 0x6c,
+ 0x75, 0x6d, 0x65, 0x53, 0x68, 0x6f, 0x72, 0x74, 0x49, 0x6e, 0x66, 0x6f, 0x72, 0x6d, 0x61, 0x74,
+ 0x69, 0x6f, 0x6e, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64,
+ 0x18, 0x01, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x02, 0x69, 0x64, 0x12, 0x1e, 0x0a, 0x0a, 0x63, 0x6f,
+ 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a,
+ 0x63, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x2b, 0x0a, 0x11, 0x72, 0x65,
+ 0x70, 0x6c, 0x69, 0x63, 0x61, 0x5f, 0x70, 0x6c, 0x61, 0x63, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x18,
+ 0x08, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x10, 0x72, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x50, 0x6c,
+ 0x61, 0x63, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x12, 0x18, 0x0a, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69,
+ 0x6f, 0x6e, 0x18, 0x09, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f,
+ 0x6e, 0x12, 0x10, 0x0a, 0x03, 0x74, 0x74, 0x6c, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x03,
+ 0x74, 0x74, 0x6c, 0x22, 0x75, 0x0a, 0x1f, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x45, 0x63, 0x53,
+ 0x68, 0x61, 0x72, 0x64, 0x49, 0x6e, 0x66, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d,
+ 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01,
+ 0x28, 0x0d, 0x52, 0x02, 0x69, 0x64, 0x12, 0x1e, 0x0a, 0x0a, 0x63, 0x6f, 0x6c, 0x6c, 0x65, 0x63,
+ 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x63, 0x6f, 0x6c, 0x6c,
+ 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x22, 0x0a, 0x0d, 0x65, 0x63, 0x5f, 0x69, 0x6e, 0x64,
+ 0x65, 0x78, 0x5f, 0x62, 0x69, 0x74, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x0b, 0x65,
+ 0x63, 0x49, 0x6e, 0x64, 0x65, 0x78, 0x42, 0x69, 0x74, 0x73, 0x22, 0xbe, 0x01, 0x0a, 0x0e, 0x53,
+ 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x42, 0x61, 0x63, 0x6b, 0x65, 0x6e, 0x64, 0x12, 0x12, 0x0a,
+ 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x74, 0x79, 0x70,
+ 0x65, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x02, 0x69,
+ 0x64, 0x12, 0x49, 0x0a, 0x0a, 0x70, 0x72, 0x6f, 0x70, 0x65, 0x72, 0x74, 0x69, 0x65, 0x73, 0x18,
+ 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x29, 0x2e, 0x6d, 0x61, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x70,
+ 0x62, 0x2e, 0x53, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x42, 0x61, 0x63, 0x6b, 0x65, 0x6e, 0x64,
+ 0x2e, 0x50, 0x72, 0x6f, 0x70, 0x65, 0x72, 0x74, 0x69, 0x65, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79,
+ 0x52, 0x0a, 0x70, 0x72, 0x6f, 0x70, 0x65, 0x72, 0x74, 0x69, 0x65, 0x73, 0x1a, 0x3d, 0x0a, 0x0f,
+ 0x50, 0x72, 0x6f, 0x70, 0x65, 0x72, 0x74, 0x69, 0x65, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12,
+ 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65,
+ 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09,
+ 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0x07, 0x0a, 0x05, 0x45,
+ 0x6d, 0x70, 0x74, 0x79, 0x22, 0xbe, 0x01, 0x0a, 0x0f, 0x53, 0x75, 0x70, 0x65, 0x72, 0x42, 0x6c,
+ 0x6f, 0x63, 0x6b, 0x45, 0x78, 0x74, 0x72, 0x61, 0x12, 0x4f, 0x0a, 0x0e, 0x65, 0x72, 0x61, 0x73,
+ 0x75, 0x72, 0x65, 0x5f, 0x63, 0x6f, 0x64, 0x69, 0x6e, 0x67, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b,
+ 0x32, 0x28, 0x2e, 0x6d, 0x61, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x53, 0x75, 0x70,
+ 0x65, 0x72, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x45, 0x78, 0x74, 0x72, 0x61, 0x2e, 0x45, 0x72, 0x61,
+ 0x73, 0x75, 0x72, 0x65, 0x43, 0x6f, 0x64, 0x69, 0x6e, 0x67, 0x52, 0x0d, 0x65, 0x72, 0x61, 0x73,
+ 0x75, 0x72, 0x65, 0x43, 0x6f, 0x64, 0x69, 0x6e, 0x67, 0x1a, 0x5a, 0x0a, 0x0d, 0x45, 0x72, 0x61,
+ 0x73, 0x75, 0x72, 0x65, 0x43, 0x6f, 0x64, 0x69, 0x6e, 0x67, 0x12, 0x12, 0x0a, 0x04, 0x64, 0x61,
+ 0x74, 0x61, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x04, 0x64, 0x61, 0x74, 0x61, 0x12, 0x16,
+ 0x0a, 0x06, 0x70, 0x61, 0x72, 0x69, 0x74, 0x79, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x06,
+ 0x70, 0x61, 0x72, 0x69, 0x74, 0x79, 0x12, 0x1d, 0x0a, 0x0a, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65,
+ 0x5f, 0x69, 0x64, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0d, 0x52, 0x09, 0x76, 0x6f, 0x6c, 0x75,
+ 0x6d, 0x65, 0x49, 0x64, 0x73, 0x22, 0x47, 0x0a, 0x14, 0x4b, 0x65, 0x65, 0x70, 0x43, 0x6f, 0x6e,
+ 0x6e, 0x65, 0x63, 0x74, 0x65, 0x64, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x12, 0x0a,
+ 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d,
+ 0x65, 0x12, 0x1b, 0x0a, 0x09, 0x67, 0x72, 0x70, 0x63, 0x5f, 0x70, 0x6f, 0x72, 0x74, 0x18, 0x02,
+ 0x20, 0x01, 0x28, 0x0d, 0x52, 0x08, 0x67, 0x72, 0x70, 0x63, 0x50, 0x6f, 0x72, 0x74, 0x22, 0x97,
+ 0x01, 0x0a, 0x0e, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x4c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f,
+ 0x6e, 0x12, 0x10, 0x0a, 0x03, 0x75, 0x72, 0x6c, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03,
+ 0x75, 0x72, 0x6c, 0x12, 0x1d, 0x0a, 0x0a, 0x70, 0x75, 0x62, 0x6c, 0x69, 0x63, 0x5f, 0x75, 0x72,
+ 0x6c, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x70, 0x75, 0x62, 0x6c, 0x69, 0x63, 0x55,
+ 0x72, 0x6c, 0x12, 0x19, 0x0a, 0x08, 0x6e, 0x65, 0x77, 0x5f, 0x76, 0x69, 0x64, 0x73, 0x18, 0x03,
+ 0x20, 0x03, 0x28, 0x0d, 0x52, 0x07, 0x6e, 0x65, 0x77, 0x56, 0x69, 0x64, 0x73, 0x12, 0x21, 0x0a,
+ 0x0c, 0x64, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x64, 0x5f, 0x76, 0x69, 0x64, 0x73, 0x18, 0x04, 0x20,
+ 0x03, 0x28, 0x0d, 0x52, 0x0b, 0x64, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x64, 0x56, 0x69, 0x64, 0x73,
+ 0x12, 0x16, 0x0a, 0x06, 0x6c, 0x65, 0x61, 0x64, 0x65, 0x72, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09,
+ 0x52, 0x06, 0x6c, 0x65, 0x61, 0x64, 0x65, 0x72, 0x22, 0x54, 0x0a, 0x13, 0x4c, 0x6f, 0x6f, 0x6b,
+ 0x75, 0x70, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12,
+ 0x1d, 0x0a, 0x0a, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x69, 0x64, 0x73, 0x18, 0x01, 0x20,
+ 0x03, 0x28, 0x09, 0x52, 0x09, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x49, 0x64, 0x73, 0x12, 0x1e,
+ 0x0a, 0x0a, 0x63, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01,
+ 0x28, 0x09, 0x52, 0x0a, 0x63, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0xf2,
+ 0x01, 0x0a, 0x14, 0x4c, 0x6f, 0x6f, 0x6b, 0x75, 0x70, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x52,
+ 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x60, 0x0a, 0x13, 0x76, 0x6f, 0x6c, 0x75, 0x6d,
+ 0x65, 0x5f, 0x69, 0x64, 0x5f, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x01,
+ 0x20, 0x03, 0x28, 0x0b, 0x32, 0x30, 0x2e, 0x6d, 0x61, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x70, 0x62,
+ 0x2e, 0x4c, 0x6f, 0x6f, 0x6b, 0x75, 0x70, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x52, 0x65, 0x73,
+ 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x49, 0x64, 0x4c, 0x6f,
+ 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x11, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x49, 0x64,
+ 0x4c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x1a, 0x78, 0x0a, 0x10, 0x56, 0x6f, 0x6c,
+ 0x75, 0x6d, 0x65, 0x49, 0x64, 0x4c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x1b, 0x0a,
+ 0x09, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09,
+ 0x52, 0x08, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x49, 0x64, 0x12, 0x31, 0x0a, 0x09, 0x6c, 0x6f,
+ 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x13, 0x2e,
+ 0x6d, 0x61, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x4c, 0x6f, 0x63, 0x61, 0x74, 0x69,
+ 0x6f, 0x6e, 0x52, 0x09, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x14, 0x0a,
+ 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x65, 0x72,
+ 0x72, 0x6f, 0x72, 0x22, 0x3b, 0x0a, 0x08, 0x4c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12,
+ 0x10, 0x0a, 0x03, 0x75, 0x72, 0x6c, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x75, 0x72,
+ 0x6c, 0x12, 0x1d, 0x0a, 0x0a, 0x70, 0x75, 0x62, 0x6c, 0x69, 0x63, 0x5f, 0x75, 0x72, 0x6c, 0x18,
+ 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x70, 0x75, 0x62, 0x6c, 0x69, 0x63, 0x55, 0x72, 0x6c,
+ 0x22, 0xb3, 0x02, 0x0a, 0x0d, 0x41, 0x73, 0x73, 0x69, 0x67, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65,
+ 0x73, 0x74, 0x12, 0x14, 0x0a, 0x05, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28,
+ 0x04, 0x52, 0x05, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x12, 0x20, 0x0a, 0x0b, 0x72, 0x65, 0x70, 0x6c,
+ 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x72,
+ 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x1e, 0x0a, 0x0a, 0x63, 0x6f,
+ 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a,
+ 0x63, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x10, 0x0a, 0x03, 0x74, 0x74,
+ 0x6c, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x74, 0x74, 0x6c, 0x12, 0x1f, 0x0a, 0x0b,
+ 0x64, 0x61, 0x74, 0x61, 0x5f, 0x63, 0x65, 0x6e, 0x74, 0x65, 0x72, 0x18, 0x05, 0x20, 0x01, 0x28,
+ 0x09, 0x52, 0x0a, 0x64, 0x61, 0x74, 0x61, 0x43, 0x65, 0x6e, 0x74, 0x65, 0x72, 0x12, 0x12, 0x0a,
+ 0x04, 0x72, 0x61, 0x63, 0x6b, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x72, 0x61, 0x63,
+ 0x6b, 0x12, 0x1b, 0x0a, 0x09, 0x64, 0x61, 0x74, 0x61, 0x5f, 0x6e, 0x6f, 0x64, 0x65, 0x18, 0x07,
+ 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x64, 0x61, 0x74, 0x61, 0x4e, 0x6f, 0x64, 0x65, 0x12, 0x32,
+ 0x0a, 0x16, 0x6d, 0x65, 0x6d, 0x6f, 0x72, 0x79, 0x5f, 0x6d, 0x61, 0x70, 0x5f, 0x6d, 0x61, 0x78,
+ 0x5f, 0x73, 0x69, 0x7a, 0x65, 0x5f, 0x6d, 0x62, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x12,
+ 0x6d, 0x65, 0x6d, 0x6f, 0x72, 0x79, 0x4d, 0x61, 0x70, 0x4d, 0x61, 0x78, 0x53, 0x69, 0x7a, 0x65,
+ 0x4d, 0x62, 0x12, 0x32, 0x0a, 0x15, 0x57, 0x72, 0x69, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x5f, 0x76,
+ 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x09, 0x20, 0x01, 0x28,
+ 0x0d, 0x52, 0x13, 0x57, 0x72, 0x69, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x56, 0x6f, 0x6c, 0x75, 0x6d,
+ 0x65, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x22, 0x93, 0x01, 0x0a, 0x0e, 0x41, 0x73, 0x73, 0x69, 0x67,
+ 0x6e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x10, 0x0a, 0x03, 0x66, 0x69, 0x64,
+ 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x66, 0x69, 0x64, 0x12, 0x10, 0x0a, 0x03, 0x75,
+ 0x72, 0x6c, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x75, 0x72, 0x6c, 0x12, 0x1d, 0x0a,
+ 0x0a, 0x70, 0x75, 0x62, 0x6c, 0x69, 0x63, 0x5f, 0x75, 0x72, 0x6c, 0x18, 0x03, 0x20, 0x01, 0x28,
+ 0x09, 0x52, 0x09, 0x70, 0x75, 0x62, 0x6c, 0x69, 0x63, 0x55, 0x72, 0x6c, 0x12, 0x14, 0x0a, 0x05,
+ 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x04, 0x52, 0x05, 0x63, 0x6f, 0x75,
+ 0x6e, 0x74, 0x12, 0x14, 0x0a, 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x18, 0x05, 0x20, 0x01, 0x28,
+ 0x09, 0x52, 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x12, 0x12, 0x0a, 0x04, 0x61, 0x75, 0x74, 0x68,
+ 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x61, 0x75, 0x74, 0x68, 0x22, 0x67, 0x0a, 0x11,
+ 0x53, 0x74, 0x61, 0x74, 0x69, 0x73, 0x74, 0x69, 0x63, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73,
+ 0x74, 0x12, 0x20, 0x0a, 0x0b, 0x72, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e,
+ 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x72, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74,
+ 0x69, 0x6f, 0x6e, 0x12, 0x1e, 0x0a, 0x0a, 0x63, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f,
+ 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x63, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74,
+ 0x69, 0x6f, 0x6e, 0x12, 0x10, 0x0a, 0x03, 0x74, 0x74, 0x6c, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09,
+ 0x52, 0x03, 0x74, 0x74, 0x6c, 0x22, 0xc3, 0x01, 0x0a, 0x12, 0x53, 0x74, 0x61, 0x74, 0x69, 0x73,
+ 0x74, 0x69, 0x63, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x20, 0x0a, 0x0b,
+ 0x72, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28,
+ 0x09, 0x52, 0x0b, 0x72, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x1e,
+ 0x0a, 0x0a, 0x63, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01,
+ 0x28, 0x09, 0x52, 0x0a, 0x63, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x10,
+ 0x0a, 0x03, 0x74, 0x74, 0x6c, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x74, 0x74, 0x6c,
+ 0x12, 0x1d, 0x0a, 0x0a, 0x74, 0x6f, 0x74, 0x61, 0x6c, 0x5f, 0x73, 0x69, 0x7a, 0x65, 0x18, 0x04,
+ 0x20, 0x01, 0x28, 0x04, 0x52, 0x09, 0x74, 0x6f, 0x74, 0x61, 0x6c, 0x53, 0x69, 0x7a, 0x65, 0x12,
+ 0x1b, 0x0a, 0x09, 0x75, 0x73, 0x65, 0x64, 0x5f, 0x73, 0x69, 0x7a, 0x65, 0x18, 0x05, 0x20, 0x01,
+ 0x28, 0x04, 0x52, 0x08, 0x75, 0x73, 0x65, 0x64, 0x53, 0x69, 0x7a, 0x65, 0x12, 0x1d, 0x0a, 0x0a,
+ 0x66, 0x69, 0x6c, 0x65, 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x06, 0x20, 0x01, 0x28, 0x04,
+ 0x52, 0x09, 0x66, 0x69, 0x6c, 0x65, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x22, 0x41, 0x0a, 0x0b, 0x53,
+ 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x54, 0x79, 0x70, 0x65, 0x12, 0x20, 0x0a, 0x0b, 0x72, 0x65,
+ 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52,
+ 0x0b, 0x72, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x10, 0x0a, 0x03,
+ 0x74, 0x74, 0x6c, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x74, 0x74, 0x6c, 0x22, 0x20,
+ 0x0a, 0x0a, 0x43, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x12, 0x0a, 0x04,
+ 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65,
+ 0x22, 0x7b, 0x0a, 0x15, 0x43, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x4c, 0x69,
+ 0x73, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x34, 0x0a, 0x16, 0x69, 0x6e, 0x63,
+ 0x6c, 0x75, 0x64, 0x65, 0x5f, 0x6e, 0x6f, 0x72, 0x6d, 0x61, 0x6c, 0x5f, 0x76, 0x6f, 0x6c, 0x75,
+ 0x6d, 0x65, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x52, 0x14, 0x69, 0x6e, 0x63, 0x6c, 0x75,
+ 0x64, 0x65, 0x4e, 0x6f, 0x72, 0x6d, 0x61, 0x6c, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x73, 0x12,
+ 0x2c, 0x0a, 0x12, 0x69, 0x6e, 0x63, 0x6c, 0x75, 0x64, 0x65, 0x5f, 0x65, 0x63, 0x5f, 0x76, 0x6f,
+ 0x6c, 0x75, 0x6d, 0x65, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x10, 0x69, 0x6e, 0x63,
+ 0x6c, 0x75, 0x64, 0x65, 0x45, 0x63, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x73, 0x22, 0x51, 0x0a,
+ 0x16, 0x43, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x4c, 0x69, 0x73, 0x74, 0x52,
+ 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x37, 0x0a, 0x0b, 0x63, 0x6f, 0x6c, 0x6c, 0x65,
+ 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x6d,
+ 0x61, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x43, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74,
+ 0x69, 0x6f, 0x6e, 0x52, 0x0b, 0x63, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x73,
+ 0x22, 0x2d, 0x0a, 0x17, 0x43, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x44, 0x65,
+ 0x6c, 0x65, 0x74, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x12, 0x0a, 0x04, 0x6e,
+ 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x22,
+ 0x1a, 0x0a, 0x18, 0x43, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x44, 0x65, 0x6c,
+ 0x65, 0x74, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x91, 0x03, 0x0a, 0x0c,
+ 0x44, 0x61, 0x74, 0x61, 0x4e, 0x6f, 0x64, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x0e, 0x0a, 0x02,
+ 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x02, 0x69, 0x64, 0x12, 0x21, 0x0a, 0x0c,
+ 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x02, 0x20, 0x01,
+ 0x28, 0x04, 0x52, 0x0b, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x12,
+ 0x28, 0x0a, 0x10, 0x6d, 0x61, 0x78, 0x5f, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x63, 0x6f,
+ 0x75, 0x6e, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x04, 0x52, 0x0e, 0x6d, 0x61, 0x78, 0x56, 0x6f,
+ 0x6c, 0x75, 0x6d, 0x65, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x12, 0x2a, 0x0a, 0x11, 0x66, 0x72, 0x65,
+ 0x65, 0x5f, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x04,
+ 0x20, 0x01, 0x28, 0x04, 0x52, 0x0f, 0x66, 0x72, 0x65, 0x65, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65,
+ 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x12, 0x2e, 0x0a, 0x13, 0x61, 0x63, 0x74, 0x69, 0x76, 0x65, 0x5f,
+ 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x05, 0x20, 0x01,
+ 0x28, 0x04, 0x52, 0x11, 0x61, 0x63, 0x74, 0x69, 0x76, 0x65, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65,
+ 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x12, 0x46, 0x0a, 0x0c, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f,
+ 0x69, 0x6e, 0x66, 0x6f, 0x73, 0x18, 0x06, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x23, 0x2e, 0x6d, 0x61,
+ 0x73, 0x74, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x49, 0x6e,
+ 0x66, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65,
+ 0x52, 0x0b, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x73, 0x12, 0x50, 0x0a,
+ 0x0e, 0x65, 0x63, 0x5f, 0x73, 0x68, 0x61, 0x72, 0x64, 0x5f, 0x69, 0x6e, 0x66, 0x6f, 0x73, 0x18,
+ 0x07, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2a, 0x2e, 0x6d, 0x61, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x70,
+ 0x62, 0x2e, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x45, 0x63, 0x53, 0x68, 0x61, 0x72, 0x64, 0x49,
+ 0x6e, 0x66, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67,
+ 0x65, 0x52, 0x0c, 0x65, 0x63, 0x53, 0x68, 0x61, 0x72, 0x64, 0x49, 0x6e, 0x66, 0x6f, 0x73, 0x12,
+ 0x2e, 0x0a, 0x13, 0x72, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x5f, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65,
+ 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x08, 0x20, 0x01, 0x28, 0x04, 0x52, 0x11, 0x72, 0x65,
+ 0x6d, 0x6f, 0x74, 0x65, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x22,
+ 0xb4, 0x02, 0x0a, 0x08, 0x52, 0x61, 0x63, 0x6b, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x0e, 0x0a, 0x02,
+ 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x02, 0x69, 0x64, 0x12, 0x21, 0x0a, 0x0c,
+ 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x02, 0x20, 0x01,
+ 0x28, 0x04, 0x52, 0x0b, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x12,
+ 0x28, 0x0a, 0x10, 0x6d, 0x61, 0x78, 0x5f, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x63, 0x6f,
+ 0x75, 0x6e, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x04, 0x52, 0x0e, 0x6d, 0x61, 0x78, 0x56, 0x6f,
+ 0x6c, 0x75, 0x6d, 0x65, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x12, 0x2a, 0x0a, 0x11, 0x66, 0x72, 0x65,
+ 0x65, 0x5f, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x04,
+ 0x20, 0x01, 0x28, 0x04, 0x52, 0x0f, 0x66, 0x72, 0x65, 0x65, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65,
+ 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x12, 0x2e, 0x0a, 0x13, 0x61, 0x63, 0x74, 0x69, 0x76, 0x65, 0x5f,
+ 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x05, 0x20, 0x01,
+ 0x28, 0x04, 0x52, 0x11, 0x61, 0x63, 0x74, 0x69, 0x76, 0x65, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65,
+ 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x12, 0x3f, 0x0a, 0x0f, 0x64, 0x61, 0x74, 0x61, 0x5f, 0x6e, 0x6f,
+ 0x64, 0x65, 0x5f, 0x69, 0x6e, 0x66, 0x6f, 0x73, 0x18, 0x06, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x17,
+ 0x2e, 0x6d, 0x61, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x44, 0x61, 0x74, 0x61, 0x4e,
+ 0x6f, 0x64, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x0d, 0x64, 0x61, 0x74, 0x61, 0x4e, 0x6f, 0x64,
+ 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x73, 0x12, 0x2e, 0x0a, 0x13, 0x72, 0x65, 0x6d, 0x6f, 0x74, 0x65,
+ 0x5f, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x07, 0x20,
+ 0x01, 0x28, 0x04, 0x52, 0x11, 0x72, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x56, 0x6f, 0x6c, 0x75, 0x6d,
+ 0x65, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x22, 0xad, 0x02, 0x0a, 0x0e, 0x44, 0x61, 0x74, 0x61, 0x43,
+ 0x65, 0x6e, 0x74, 0x65, 0x72, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18,
+ 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x02, 0x69, 0x64, 0x12, 0x21, 0x0a, 0x0c, 0x76, 0x6f, 0x6c,
+ 0x75, 0x6d, 0x65, 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x04, 0x52,
+ 0x0b, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x12, 0x28, 0x0a, 0x10,
+ 0x6d, 0x61, 0x78, 0x5f, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74,
+ 0x18, 0x03, 0x20, 0x01, 0x28, 0x04, 0x52, 0x0e, 0x6d, 0x61, 0x78, 0x56, 0x6f, 0x6c, 0x75, 0x6d,
+ 0x65, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x12, 0x2a, 0x0a, 0x11, 0x66, 0x72, 0x65, 0x65, 0x5f, 0x76,
+ 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28,
+ 0x04, 0x52, 0x0f, 0x66, 0x72, 0x65, 0x65, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x43, 0x6f, 0x75,
+ 0x6e, 0x74, 0x12, 0x2e, 0x0a, 0x13, 0x61, 0x63, 0x74, 0x69, 0x76, 0x65, 0x5f, 0x76, 0x6f, 0x6c,
+ 0x75, 0x6d, 0x65, 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x05, 0x20, 0x01, 0x28, 0x04, 0x52,
+ 0x11, 0x61, 0x63, 0x74, 0x69, 0x76, 0x65, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x43, 0x6f, 0x75,
+ 0x6e, 0x74, 0x12, 0x32, 0x0a, 0x0a, 0x72, 0x61, 0x63, 0x6b, 0x5f, 0x69, 0x6e, 0x66, 0x6f, 0x73,
+ 0x18, 0x06, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x13, 0x2e, 0x6d, 0x61, 0x73, 0x74, 0x65, 0x72, 0x5f,
+ 0x70, 0x62, 0x2e, 0x52, 0x61, 0x63, 0x6b, 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x09, 0x72, 0x61, 0x63,
+ 0x6b, 0x49, 0x6e, 0x66, 0x6f, 0x73, 0x12, 0x2e, 0x0a, 0x13, 0x72, 0x65, 0x6d, 0x6f, 0x74, 0x65,
+ 0x5f, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x07, 0x20,
+ 0x01, 0x28, 0x04, 0x52, 0x11, 0x72, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x56, 0x6f, 0x6c, 0x75, 0x6d,
+ 0x65, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x22, 0xbe, 0x02, 0x0a, 0x0c, 0x54, 0x6f, 0x70, 0x6f, 0x6c,
+ 0x6f, 0x67, 0x79, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x01, 0x20,
+ 0x01, 0x28, 0x09, 0x52, 0x02, 0x69, 0x64, 0x12, 0x21, 0x0a, 0x0c, 0x76, 0x6f, 0x6c, 0x75, 0x6d,
+ 0x65, 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x04, 0x52, 0x0b, 0x76,
+ 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x12, 0x28, 0x0a, 0x10, 0x6d, 0x61,
+ 0x78, 0x5f, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x03,
+ 0x20, 0x01, 0x28, 0x04, 0x52, 0x0e, 0x6d, 0x61, 0x78, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x43,
+ 0x6f, 0x75, 0x6e, 0x74, 0x12, 0x2a, 0x0a, 0x11, 0x66, 0x72, 0x65, 0x65, 0x5f, 0x76, 0x6f, 0x6c,
+ 0x75, 0x6d, 0x65, 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x04, 0x52,
+ 0x0f, 0x66, 0x72, 0x65, 0x65, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x43, 0x6f, 0x75, 0x6e, 0x74,
+ 0x12, 0x2e, 0x0a, 0x13, 0x61, 0x63, 0x74, 0x69, 0x76, 0x65, 0x5f, 0x76, 0x6f, 0x6c, 0x75, 0x6d,
+ 0x65, 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x05, 0x20, 0x01, 0x28, 0x04, 0x52, 0x11, 0x61,
+ 0x63, 0x74, 0x69, 0x76, 0x65, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x43, 0x6f, 0x75, 0x6e, 0x74,
+ 0x12, 0x45, 0x0a, 0x11, 0x64, 0x61, 0x74, 0x61, 0x5f, 0x63, 0x65, 0x6e, 0x74, 0x65, 0x72, 0x5f,
+ 0x69, 0x6e, 0x66, 0x6f, 0x73, 0x18, 0x06, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x6d, 0x61,
+ 0x73, 0x74, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x44, 0x61, 0x74, 0x61, 0x43, 0x65, 0x6e, 0x74,
+ 0x65, 0x72, 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x0f, 0x64, 0x61, 0x74, 0x61, 0x43, 0x65, 0x6e, 0x74,
+ 0x65, 0x72, 0x49, 0x6e, 0x66, 0x6f, 0x73, 0x12, 0x2e, 0x0a, 0x13, 0x72, 0x65, 0x6d, 0x6f, 0x74,
+ 0x65, 0x5f, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x07,
+ 0x20, 0x01, 0x28, 0x04, 0x52, 0x11, 0x72, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x56, 0x6f, 0x6c, 0x75,
+ 0x6d, 0x65, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x22, 0x13, 0x0a, 0x11, 0x56, 0x6f, 0x6c, 0x75, 0x6d,
+ 0x65, 0x4c, 0x69, 0x73, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x22, 0x83, 0x01, 0x0a,
+ 0x12, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x4c, 0x69, 0x73, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f,
+ 0x6e, 0x73, 0x65, 0x12, 0x3c, 0x0a, 0x0d, 0x74, 0x6f, 0x70, 0x6f, 0x6c, 0x6f, 0x67, 0x79, 0x5f,
+ 0x69, 0x6e, 0x66, 0x6f, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x6d, 0x61, 0x73,
+ 0x74, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x54, 0x6f, 0x70, 0x6f, 0x6c, 0x6f, 0x67, 0x79, 0x49,
+ 0x6e, 0x66, 0x6f, 0x52, 0x0c, 0x74, 0x6f, 0x70, 0x6f, 0x6c, 0x6f, 0x67, 0x79, 0x49, 0x6e, 0x66,
+ 0x6f, 0x12, 0x2f, 0x0a, 0x14, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x73, 0x69, 0x7a, 0x65,
+ 0x5f, 0x6c, 0x69, 0x6d, 0x69, 0x74, 0x5f, 0x6d, 0x62, 0x18, 0x02, 0x20, 0x01, 0x28, 0x04, 0x52,
+ 0x11, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x53, 0x69, 0x7a, 0x65, 0x4c, 0x69, 0x6d, 0x69, 0x74,
+ 0x4d, 0x62, 0x22, 0x34, 0x0a, 0x15, 0x4c, 0x6f, 0x6f, 0x6b, 0x75, 0x70, 0x45, 0x63, 0x56, 0x6f,
+ 0x6c, 0x75, 0x6d, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1b, 0x0a, 0x09, 0x76,
+ 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x08,
+ 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x49, 0x64, 0x22, 0xfb, 0x01, 0x0a, 0x16, 0x4c, 0x6f, 0x6f,
+ 0x6b, 0x75, 0x70, 0x45, 0x63, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f,
+ 0x6e, 0x73, 0x65, 0x12, 0x1b, 0x0a, 0x09, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x69, 0x64,
+ 0x18, 0x01, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x08, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x49, 0x64,
+ 0x12, 0x61, 0x0a, 0x12, 0x73, 0x68, 0x61, 0x72, 0x64, 0x5f, 0x69, 0x64, 0x5f, 0x6c, 0x6f, 0x63,
+ 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x33, 0x2e, 0x6d,
+ 0x61, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x4c, 0x6f, 0x6f, 0x6b, 0x75, 0x70, 0x45,
+ 0x63, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e,
+ 0x45, 0x63, 0x53, 0x68, 0x61, 0x72, 0x64, 0x49, 0x64, 0x4c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f,
+ 0x6e, 0x52, 0x10, 0x73, 0x68, 0x61, 0x72, 0x64, 0x49, 0x64, 0x4c, 0x6f, 0x63, 0x61, 0x74, 0x69,
+ 0x6f, 0x6e, 0x73, 0x1a, 0x61, 0x0a, 0x11, 0x45, 0x63, 0x53, 0x68, 0x61, 0x72, 0x64, 0x49, 0x64,
+ 0x4c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x19, 0x0a, 0x08, 0x73, 0x68, 0x61, 0x72,
+ 0x64, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x07, 0x73, 0x68, 0x61, 0x72,
+ 0x64, 0x49, 0x64, 0x12, 0x31, 0x0a, 0x09, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73,
+ 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x13, 0x2e, 0x6d, 0x61, 0x73, 0x74, 0x65, 0x72, 0x5f,
+ 0x70, 0x62, 0x2e, 0x4c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x09, 0x6c, 0x6f, 0x63,
+ 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x22, 0x1f, 0x0a, 0x1d, 0x47, 0x65, 0x74, 0x4d, 0x61, 0x73,
+ 0x74, 0x65, 0x72, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e,
+ 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x22, 0x83, 0x01, 0x0a, 0x1e, 0x47, 0x65, 0x74, 0x4d,
+ 0x61, 0x73, 0x74, 0x65, 0x72, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, 0x69,
+ 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x27, 0x0a, 0x0f, 0x6d, 0x65,
+ 0x74, 0x72, 0x69, 0x63, 0x73, 0x5f, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x18, 0x01, 0x20,
+ 0x01, 0x28, 0x09, 0x52, 0x0e, 0x6d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, 0x41, 0x64, 0x64, 0x72,
+ 0x65, 0x73, 0x73, 0x12, 0x38, 0x0a, 0x18, 0x6d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, 0x5f, 0x69,
+ 0x6e, 0x74, 0x65, 0x72, 0x76, 0x61, 0x6c, 0x5f, 0x73, 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x73, 0x18,
+ 0x02, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x16, 0x6d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, 0x49, 0x6e,
+ 0x74, 0x65, 0x72, 0x76, 0x61, 0x6c, 0x53, 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x73, 0x22, 0x3b, 0x0a,
+ 0x18, 0x4c, 0x69, 0x73, 0x74, 0x4d, 0x61, 0x73, 0x74, 0x65, 0x72, 0x43, 0x6c, 0x69, 0x65, 0x6e,
+ 0x74, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1f, 0x0a, 0x0b, 0x63, 0x6c, 0x69,
+ 0x65, 0x6e, 0x74, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a,
+ 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x54, 0x79, 0x70, 0x65, 0x22, 0x42, 0x0a, 0x19, 0x4c, 0x69,
+ 0x73, 0x74, 0x4d, 0x61, 0x73, 0x74, 0x65, 0x72, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x73, 0x52,
+ 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x25, 0x0a, 0x0e, 0x67, 0x72, 0x70, 0x63, 0x5f,
+ 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x09, 0x52,
+ 0x0d, 0x67, 0x72, 0x70, 0x63, 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x65, 0x73, 0x22, 0x8a,
+ 0x01, 0x0a, 0x16, 0x4c, 0x65, 0x61, 0x73, 0x65, 0x41, 0x64, 0x6d, 0x69, 0x6e, 0x54, 0x6f, 0x6b,
+ 0x65, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x25, 0x0a, 0x0e, 0x70, 0x72, 0x65,
+ 0x76, 0x69, 0x6f, 0x75, 0x73, 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28,
+ 0x03, 0x52, 0x0d, 0x70, 0x72, 0x65, 0x76, 0x69, 0x6f, 0x75, 0x73, 0x54, 0x6f, 0x6b, 0x65, 0x6e,
+ 0x12, 0x2c, 0x0a, 0x12, 0x70, 0x72, 0x65, 0x76, 0x69, 0x6f, 0x75, 0x73, 0x5f, 0x6c, 0x6f, 0x63,
+ 0x6b, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x03, 0x52, 0x10, 0x70, 0x72,
+ 0x65, 0x76, 0x69, 0x6f, 0x75, 0x73, 0x4c, 0x6f, 0x63, 0x6b, 0x54, 0x69, 0x6d, 0x65, 0x12, 0x1b,
+ 0x0a, 0x09, 0x6c, 0x6f, 0x63, 0x6b, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28,
+ 0x09, 0x52, 0x08, 0x6c, 0x6f, 0x63, 0x6b, 0x4e, 0x61, 0x6d, 0x65, 0x22, 0x4d, 0x0a, 0x17, 0x4c,
+ 0x65, 0x61, 0x73, 0x65, 0x41, 0x64, 0x6d, 0x69, 0x6e, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x52, 0x65,
+ 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18,
+ 0x01, 0x20, 0x01, 0x28, 0x03, 0x52, 0x05, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x12, 0x1c, 0x0a, 0x0a,
+ 0x6c, 0x6f, 0x63, 0x6b, 0x5f, 0x74, 0x73, 0x5f, 0x6e, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x03,
+ 0x52, 0x08, 0x6c, 0x6f, 0x63, 0x6b, 0x54, 0x73, 0x4e, 0x73, 0x22, 0x8c, 0x01, 0x0a, 0x18, 0x52,
+ 0x65, 0x6c, 0x65, 0x61, 0x73, 0x65, 0x41, 0x64, 0x6d, 0x69, 0x6e, 0x54, 0x6f, 0x6b, 0x65, 0x6e,
+ 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x25, 0x0a, 0x0e, 0x70, 0x72, 0x65, 0x76, 0x69,
+ 0x6f, 0x75, 0x73, 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, 0x52,
+ 0x0d, 0x70, 0x72, 0x65, 0x76, 0x69, 0x6f, 0x75, 0x73, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x12, 0x2c,
+ 0x0a, 0x12, 0x70, 0x72, 0x65, 0x76, 0x69, 0x6f, 0x75, 0x73, 0x5f, 0x6c, 0x6f, 0x63, 0x6b, 0x5f,
+ 0x74, 0x69, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x03, 0x52, 0x10, 0x70, 0x72, 0x65, 0x76,
+ 0x69, 0x6f, 0x75, 0x73, 0x4c, 0x6f, 0x63, 0x6b, 0x54, 0x69, 0x6d, 0x65, 0x12, 0x1b, 0x0a, 0x09,
+ 0x6c, 0x6f, 0x63, 0x6b, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52,
+ 0x08, 0x6c, 0x6f, 0x63, 0x6b, 0x4e, 0x61, 0x6d, 0x65, 0x22, 0x1b, 0x0a, 0x19, 0x52, 0x65, 0x6c,
+ 0x65, 0x61, 0x73, 0x65, 0x41, 0x64, 0x6d, 0x69, 0x6e, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x52, 0x65,
+ 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x32, 0xf7, 0x08, 0x0a, 0x07, 0x53, 0x65, 0x61, 0x77, 0x65,
+ 0x65, 0x64, 0x12, 0x49, 0x0a, 0x0d, 0x53, 0x65, 0x6e, 0x64, 0x48, 0x65, 0x61, 0x72, 0x74, 0x62,
+ 0x65, 0x61, 0x74, 0x12, 0x14, 0x2e, 0x6d, 0x61, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e,
+ 0x48, 0x65, 0x61, 0x72, 0x74, 0x62, 0x65, 0x61, 0x74, 0x1a, 0x1c, 0x2e, 0x6d, 0x61, 0x73, 0x74,
+ 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x48, 0x65, 0x61, 0x72, 0x74, 0x62, 0x65, 0x61, 0x74, 0x52,
+ 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x28, 0x01, 0x30, 0x01, 0x12, 0x51, 0x0a,
+ 0x0d, 0x4b, 0x65, 0x65, 0x70, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x65, 0x64, 0x12, 0x1f,
+ 0x2e, 0x6d, 0x61, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x4b, 0x65, 0x65, 0x70, 0x43,
+ 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x65, 0x64, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a,
+ 0x19, 0x2e, 0x6d, 0x61, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x56, 0x6f, 0x6c, 0x75,
+ 0x6d, 0x65, 0x4c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0x00, 0x28, 0x01, 0x30, 0x01,
+ 0x12, 0x51, 0x0a, 0x0c, 0x4c, 0x6f, 0x6f, 0x6b, 0x75, 0x70, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65,
+ 0x12, 0x1e, 0x2e, 0x6d, 0x61, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x4c, 0x6f, 0x6f,
+ 0x6b, 0x75, 0x70, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74,
+ 0x1a, 0x1f, 0x2e, 0x6d, 0x61, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x4c, 0x6f, 0x6f,
+ 0x6b, 0x75, 0x70, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73,
+ 0x65, 0x22, 0x00, 0x12, 0x3f, 0x0a, 0x06, 0x41, 0x73, 0x73, 0x69, 0x67, 0x6e, 0x12, 0x18, 0x2e,
+ 0x6d, 0x61, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x41, 0x73, 0x73, 0x69, 0x67, 0x6e,
+ 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x19, 0x2e, 0x6d, 0x61, 0x73, 0x74, 0x65, 0x72,
+ 0x5f, 0x70, 0x62, 0x2e, 0x41, 0x73, 0x73, 0x69, 0x67, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e,
+ 0x73, 0x65, 0x22, 0x00, 0x12, 0x4b, 0x0a, 0x0a, 0x53, 0x74, 0x61, 0x74, 0x69, 0x73, 0x74, 0x69,
+ 0x63, 0x73, 0x12, 0x1c, 0x2e, 0x6d, 0x61, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x53,
+ 0x74, 0x61, 0x74, 0x69, 0x73, 0x74, 0x69, 0x63, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74,
+ 0x1a, 0x1d, 0x2e, 0x6d, 0x61, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x53, 0x74, 0x61,
+ 0x74, 0x69, 0x73, 0x74, 0x69, 0x63, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22,
+ 0x00, 0x12, 0x57, 0x0a, 0x0e, 0x43, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x4c,
+ 0x69, 0x73, 0x74, 0x12, 0x20, 0x2e, 0x6d, 0x61, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e,
+ 0x43, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x4c, 0x69, 0x73, 0x74, 0x52, 0x65,
+ 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x21, 0x2e, 0x6d, 0x61, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x70,
+ 0x62, 0x2e, 0x43, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x4c, 0x69, 0x73, 0x74,
+ 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x5d, 0x0a, 0x10, 0x43, 0x6f,
+ 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x12, 0x22,
+ 0x2e, 0x6d, 0x61, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x43, 0x6f, 0x6c, 0x6c, 0x65,
+ 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65,
+ 0x73, 0x74, 0x1a, 0x23, 0x2e, 0x6d, 0x61, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x43,
+ 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x52,
+ 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x4b, 0x0a, 0x0a, 0x56, 0x6f, 0x6c,
+ 0x75, 0x6d, 0x65, 0x4c, 0x69, 0x73, 0x74, 0x12, 0x1c, 0x2e, 0x6d, 0x61, 0x73, 0x74, 0x65, 0x72,
+ 0x5f, 0x70, 0x62, 0x2e, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x4c, 0x69, 0x73, 0x74, 0x52, 0x65,
+ 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1d, 0x2e, 0x6d, 0x61, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x70,
+ 0x62, 0x2e, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x4c, 0x69, 0x73, 0x74, 0x52, 0x65, 0x73, 0x70,
+ 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x57, 0x0a, 0x0e, 0x4c, 0x6f, 0x6f, 0x6b, 0x75, 0x70,
+ 0x45, 0x63, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x12, 0x20, 0x2e, 0x6d, 0x61, 0x73, 0x74, 0x65,
+ 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x4c, 0x6f, 0x6f, 0x6b, 0x75, 0x70, 0x45, 0x63, 0x56, 0x6f, 0x6c,
+ 0x75, 0x6d, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x21, 0x2e, 0x6d, 0x61, 0x73,
+ 0x74, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x4c, 0x6f, 0x6f, 0x6b, 0x75, 0x70, 0x45, 0x63, 0x56,
+ 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12,
+ 0x6f, 0x0a, 0x16, 0x47, 0x65, 0x74, 0x4d, 0x61, 0x73, 0x74, 0x65, 0x72, 0x43, 0x6f, 0x6e, 0x66,
+ 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x28, 0x2e, 0x6d, 0x61, 0x73, 0x74,
+ 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x47, 0x65, 0x74, 0x4d, 0x61, 0x73, 0x74, 0x65, 0x72, 0x43,
+ 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75,
+ 0x65, 0x73, 0x74, 0x1a, 0x29, 0x2e, 0x6d, 0x61, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e,
+ 0x47, 0x65, 0x74, 0x4d, 0x61, 0x73, 0x74, 0x65, 0x72, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75,
+ 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00,
+ 0x12, 0x60, 0x0a, 0x11, 0x4c, 0x69, 0x73, 0x74, 0x4d, 0x61, 0x73, 0x74, 0x65, 0x72, 0x43, 0x6c,
+ 0x69, 0x65, 0x6e, 0x74, 0x73, 0x12, 0x23, 0x2e, 0x6d, 0x61, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x70,
+ 0x62, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x4d, 0x61, 0x73, 0x74, 0x65, 0x72, 0x43, 0x6c, 0x69, 0x65,
+ 0x6e, 0x74, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x24, 0x2e, 0x6d, 0x61, 0x73,
+ 0x74, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x4d, 0x61, 0x73, 0x74, 0x65,
+ 0x72, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65,
+ 0x22, 0x00, 0x12, 0x5a, 0x0a, 0x0f, 0x4c, 0x65, 0x61, 0x73, 0x65, 0x41, 0x64, 0x6d, 0x69, 0x6e,
+ 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x12, 0x21, 0x2e, 0x6d, 0x61, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x70,
+ 0x62, 0x2e, 0x4c, 0x65, 0x61, 0x73, 0x65, 0x41, 0x64, 0x6d, 0x69, 0x6e, 0x54, 0x6f, 0x6b, 0x65,
+ 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x22, 0x2e, 0x6d, 0x61, 0x73, 0x74, 0x65,
+ 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x4c, 0x65, 0x61, 0x73, 0x65, 0x41, 0x64, 0x6d, 0x69, 0x6e, 0x54,
+ 0x6f, 0x6b, 0x65, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x60,
+ 0x0a, 0x11, 0x52, 0x65, 0x6c, 0x65, 0x61, 0x73, 0x65, 0x41, 0x64, 0x6d, 0x69, 0x6e, 0x54, 0x6f,
+ 0x6b, 0x65, 0x6e, 0x12, 0x23, 0x2e, 0x6d, 0x61, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e,
+ 0x52, 0x65, 0x6c, 0x65, 0x61, 0x73, 0x65, 0x41, 0x64, 0x6d, 0x69, 0x6e, 0x54, 0x6f, 0x6b, 0x65,
+ 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x24, 0x2e, 0x6d, 0x61, 0x73, 0x74, 0x65,
+ 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x52, 0x65, 0x6c, 0x65, 0x61, 0x73, 0x65, 0x41, 0x64, 0x6d, 0x69,
+ 0x6e, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00,
+ 0x42, 0x32, 0x5a, 0x30, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x63,
+ 0x68, 0x72, 0x69, 0x73, 0x6c, 0x75, 0x73, 0x66, 0x2f, 0x73, 0x65, 0x61, 0x77, 0x65, 0x65, 0x64,
+ 0x66, 0x73, 0x2f, 0x77, 0x65, 0x65, 0x64, 0x2f, 0x70, 0x62, 0x2f, 0x6d, 0x61, 0x73, 0x74, 0x65,
+ 0x72, 0x5f, 0x70, 0x62, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
}
-func (m *StatisticsResponse) GetTotalSize() uint64 {
- if m != nil {
- return m.TotalSize
- }
- return 0
-}
+var (
+ file_master_proto_rawDescOnce sync.Once
+ file_master_proto_rawDescData = file_master_proto_rawDesc
+)
-func (m *StatisticsResponse) GetUsedSize() uint64 {
- if m != nil {
- return m.UsedSize
- }
- return 0
+func file_master_proto_rawDescGZIP() []byte {
+ file_master_proto_rawDescOnce.Do(func() {
+ file_master_proto_rawDescData = protoimpl.X.CompressGZIP(file_master_proto_rawDescData)
+ })
+ return file_master_proto_rawDescData
}
-func (m *StatisticsResponse) GetFileCount() uint64 {
- if m != nil {
- return m.FileCount
- }
- return 0
+var file_master_proto_msgTypes = make([]protoimpl.MessageInfo, 43)
+var file_master_proto_goTypes = []interface{}{
+ (*Heartbeat)(nil), // 0: master_pb.Heartbeat
+ (*HeartbeatResponse)(nil), // 1: master_pb.HeartbeatResponse
+ (*VolumeInformationMessage)(nil), // 2: master_pb.VolumeInformationMessage
+ (*VolumeShortInformationMessage)(nil), // 3: master_pb.VolumeShortInformationMessage
+ (*VolumeEcShardInformationMessage)(nil), // 4: master_pb.VolumeEcShardInformationMessage
+ (*StorageBackend)(nil), // 5: master_pb.StorageBackend
+ (*Empty)(nil), // 6: master_pb.Empty
+ (*SuperBlockExtra)(nil), // 7: master_pb.SuperBlockExtra
+ (*KeepConnectedRequest)(nil), // 8: master_pb.KeepConnectedRequest
+ (*VolumeLocation)(nil), // 9: master_pb.VolumeLocation
+ (*LookupVolumeRequest)(nil), // 10: master_pb.LookupVolumeRequest
+ (*LookupVolumeResponse)(nil), // 11: master_pb.LookupVolumeResponse
+ (*Location)(nil), // 12: master_pb.Location
+ (*AssignRequest)(nil), // 13: master_pb.AssignRequest
+ (*AssignResponse)(nil), // 14: master_pb.AssignResponse
+ (*StatisticsRequest)(nil), // 15: master_pb.StatisticsRequest
+ (*StatisticsResponse)(nil), // 16: master_pb.StatisticsResponse
+ (*StorageType)(nil), // 17: master_pb.StorageType
+ (*Collection)(nil), // 18: master_pb.Collection
+ (*CollectionListRequest)(nil), // 19: master_pb.CollectionListRequest
+ (*CollectionListResponse)(nil), // 20: master_pb.CollectionListResponse
+ (*CollectionDeleteRequest)(nil), // 21: master_pb.CollectionDeleteRequest
+ (*CollectionDeleteResponse)(nil), // 22: master_pb.CollectionDeleteResponse
+ (*DataNodeInfo)(nil), // 23: master_pb.DataNodeInfo
+ (*RackInfo)(nil), // 24: master_pb.RackInfo
+ (*DataCenterInfo)(nil), // 25: master_pb.DataCenterInfo
+ (*TopologyInfo)(nil), // 26: master_pb.TopologyInfo
+ (*VolumeListRequest)(nil), // 27: master_pb.VolumeListRequest
+ (*VolumeListResponse)(nil), // 28: master_pb.VolumeListResponse
+ (*LookupEcVolumeRequest)(nil), // 29: master_pb.LookupEcVolumeRequest
+ (*LookupEcVolumeResponse)(nil), // 30: master_pb.LookupEcVolumeResponse
+ (*GetMasterConfigurationRequest)(nil), // 31: master_pb.GetMasterConfigurationRequest
+ (*GetMasterConfigurationResponse)(nil), // 32: master_pb.GetMasterConfigurationResponse
+ (*ListMasterClientsRequest)(nil), // 33: master_pb.ListMasterClientsRequest
+ (*ListMasterClientsResponse)(nil), // 34: master_pb.ListMasterClientsResponse
+ (*LeaseAdminTokenRequest)(nil), // 35: master_pb.LeaseAdminTokenRequest
+ (*LeaseAdminTokenResponse)(nil), // 36: master_pb.LeaseAdminTokenResponse
+ (*ReleaseAdminTokenRequest)(nil), // 37: master_pb.ReleaseAdminTokenRequest
+ (*ReleaseAdminTokenResponse)(nil), // 38: master_pb.ReleaseAdminTokenResponse
+ nil, // 39: master_pb.StorageBackend.PropertiesEntry
+ (*SuperBlockExtra_ErasureCoding)(nil), // 40: master_pb.SuperBlockExtra.ErasureCoding
+ (*LookupVolumeResponse_VolumeIdLocation)(nil), // 41: master_pb.LookupVolumeResponse.VolumeIdLocation
+ (*LookupEcVolumeResponse_EcShardIdLocation)(nil), // 42: master_pb.LookupEcVolumeResponse.EcShardIdLocation
+}
+var file_master_proto_depIdxs = []int32{
+ 2, // 0: master_pb.Heartbeat.volumes:type_name -> master_pb.VolumeInformationMessage
+ 3, // 1: master_pb.Heartbeat.new_volumes:type_name -> master_pb.VolumeShortInformationMessage
+ 3, // 2: master_pb.Heartbeat.deleted_volumes:type_name -> master_pb.VolumeShortInformationMessage
+ 4, // 3: master_pb.Heartbeat.ec_shards:type_name -> master_pb.VolumeEcShardInformationMessage
+ 4, // 4: master_pb.Heartbeat.new_ec_shards:type_name -> master_pb.VolumeEcShardInformationMessage
+ 4, // 5: master_pb.Heartbeat.deleted_ec_shards:type_name -> master_pb.VolumeEcShardInformationMessage
+ 5, // 6: master_pb.HeartbeatResponse.storage_backends:type_name -> master_pb.StorageBackend
+ 39, // 7: master_pb.StorageBackend.properties:type_name -> master_pb.StorageBackend.PropertiesEntry
+ 40, // 8: master_pb.SuperBlockExtra.erasure_coding:type_name -> master_pb.SuperBlockExtra.ErasureCoding
+ 41, // 9: master_pb.LookupVolumeResponse.volume_id_locations:type_name -> master_pb.LookupVolumeResponse.VolumeIdLocation
+ 18, // 10: master_pb.CollectionListResponse.collections:type_name -> master_pb.Collection
+ 2, // 11: master_pb.DataNodeInfo.volume_infos:type_name -> master_pb.VolumeInformationMessage
+ 4, // 12: master_pb.DataNodeInfo.ec_shard_infos:type_name -> master_pb.VolumeEcShardInformationMessage
+ 23, // 13: master_pb.RackInfo.data_node_infos:type_name -> master_pb.DataNodeInfo
+ 24, // 14: master_pb.DataCenterInfo.rack_infos:type_name -> master_pb.RackInfo
+ 25, // 15: master_pb.TopologyInfo.data_center_infos:type_name -> master_pb.DataCenterInfo
+ 26, // 16: master_pb.VolumeListResponse.topology_info:type_name -> master_pb.TopologyInfo
+ 42, // 17: master_pb.LookupEcVolumeResponse.shard_id_locations:type_name -> master_pb.LookupEcVolumeResponse.EcShardIdLocation
+ 12, // 18: master_pb.LookupVolumeResponse.VolumeIdLocation.locations:type_name -> master_pb.Location
+ 12, // 19: master_pb.LookupEcVolumeResponse.EcShardIdLocation.locations:type_name -> master_pb.Location
+ 0, // 20: master_pb.Seaweed.SendHeartbeat:input_type -> master_pb.Heartbeat
+ 8, // 21: master_pb.Seaweed.KeepConnected:input_type -> master_pb.KeepConnectedRequest
+ 10, // 22: master_pb.Seaweed.LookupVolume:input_type -> master_pb.LookupVolumeRequest
+ 13, // 23: master_pb.Seaweed.Assign:input_type -> master_pb.AssignRequest
+ 15, // 24: master_pb.Seaweed.Statistics:input_type -> master_pb.StatisticsRequest
+ 19, // 25: master_pb.Seaweed.CollectionList:input_type -> master_pb.CollectionListRequest
+ 21, // 26: master_pb.Seaweed.CollectionDelete:input_type -> master_pb.CollectionDeleteRequest
+ 27, // 27: master_pb.Seaweed.VolumeList:input_type -> master_pb.VolumeListRequest
+ 29, // 28: master_pb.Seaweed.LookupEcVolume:input_type -> master_pb.LookupEcVolumeRequest
+ 31, // 29: master_pb.Seaweed.GetMasterConfiguration:input_type -> master_pb.GetMasterConfigurationRequest
+ 33, // 30: master_pb.Seaweed.ListMasterClients:input_type -> master_pb.ListMasterClientsRequest
+ 35, // 31: master_pb.Seaweed.LeaseAdminToken:input_type -> master_pb.LeaseAdminTokenRequest
+ 37, // 32: master_pb.Seaweed.ReleaseAdminToken:input_type -> master_pb.ReleaseAdminTokenRequest
+ 1, // 33: master_pb.Seaweed.SendHeartbeat:output_type -> master_pb.HeartbeatResponse
+ 9, // 34: master_pb.Seaweed.KeepConnected:output_type -> master_pb.VolumeLocation
+ 11, // 35: master_pb.Seaweed.LookupVolume:output_type -> master_pb.LookupVolumeResponse
+ 14, // 36: master_pb.Seaweed.Assign:output_type -> master_pb.AssignResponse
+ 16, // 37: master_pb.Seaweed.Statistics:output_type -> master_pb.StatisticsResponse
+ 20, // 38: master_pb.Seaweed.CollectionList:output_type -> master_pb.CollectionListResponse
+ 22, // 39: master_pb.Seaweed.CollectionDelete:output_type -> master_pb.CollectionDeleteResponse
+ 28, // 40: master_pb.Seaweed.VolumeList:output_type -> master_pb.VolumeListResponse
+ 30, // 41: master_pb.Seaweed.LookupEcVolume:output_type -> master_pb.LookupEcVolumeResponse
+ 32, // 42: master_pb.Seaweed.GetMasterConfiguration:output_type -> master_pb.GetMasterConfigurationResponse
+ 34, // 43: master_pb.Seaweed.ListMasterClients:output_type -> master_pb.ListMasterClientsResponse
+ 36, // 44: master_pb.Seaweed.LeaseAdminToken:output_type -> master_pb.LeaseAdminTokenResponse
+ 38, // 45: master_pb.Seaweed.ReleaseAdminToken:output_type -> master_pb.ReleaseAdminTokenResponse
+ 33, // [33:46] is the sub-list for method output_type
+ 20, // [20:33] is the sub-list for method input_type
+ 20, // [20:20] is the sub-list for extension type_name
+ 20, // [20:20] is the sub-list for extension extendee
+ 0, // [0:20] is the sub-list for field type_name
}
-func init() {
- proto.RegisterType((*Heartbeat)(nil), "master_pb.Heartbeat")
- proto.RegisterType((*HeartbeatResponse)(nil), "master_pb.HeartbeatResponse")
- proto.RegisterType((*VolumeInformationMessage)(nil), "master_pb.VolumeInformationMessage")
- proto.RegisterType((*Empty)(nil), "master_pb.Empty")
- proto.RegisterType((*SuperBlockExtra)(nil), "master_pb.SuperBlockExtra")
- proto.RegisterType((*SuperBlockExtra_ErasureCoding)(nil), "master_pb.SuperBlockExtra.ErasureCoding")
- proto.RegisterType((*ClientListenRequest)(nil), "master_pb.ClientListenRequest")
- proto.RegisterType((*VolumeLocation)(nil), "master_pb.VolumeLocation")
- proto.RegisterType((*LookupVolumeRequest)(nil), "master_pb.LookupVolumeRequest")
- proto.RegisterType((*LookupVolumeResponse)(nil), "master_pb.LookupVolumeResponse")
- proto.RegisterType((*LookupVolumeResponse_VolumeIdLocation)(nil), "master_pb.LookupVolumeResponse.VolumeIdLocation")
- proto.RegisterType((*Location)(nil), "master_pb.Location")
- proto.RegisterType((*AssignRequest)(nil), "master_pb.AssignRequest")
- proto.RegisterType((*AssignResponse)(nil), "master_pb.AssignResponse")
- proto.RegisterType((*StatisticsRequest)(nil), "master_pb.StatisticsRequest")
- proto.RegisterType((*StatisticsResponse)(nil), "master_pb.StatisticsResponse")
+func init() { file_master_proto_init() }
+func file_master_proto_init() {
+ if File_master_proto != nil {
+ return
+ }
+ if !protoimpl.UnsafeEnabled {
+ file_master_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*Heartbeat); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_master_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*HeartbeatResponse); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_master_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*VolumeInformationMessage); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_master_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*VolumeShortInformationMessage); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_master_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*VolumeEcShardInformationMessage); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_master_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*StorageBackend); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_master_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*Empty); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_master_proto_msgTypes[7].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*SuperBlockExtra); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_master_proto_msgTypes[8].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*KeepConnectedRequest); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_master_proto_msgTypes[9].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*VolumeLocation); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_master_proto_msgTypes[10].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*LookupVolumeRequest); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_master_proto_msgTypes[11].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*LookupVolumeResponse); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_master_proto_msgTypes[12].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*Location); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_master_proto_msgTypes[13].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*AssignRequest); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_master_proto_msgTypes[14].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*AssignResponse); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_master_proto_msgTypes[15].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*StatisticsRequest); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_master_proto_msgTypes[16].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*StatisticsResponse); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_master_proto_msgTypes[17].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*StorageType); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_master_proto_msgTypes[18].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*Collection); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_master_proto_msgTypes[19].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*CollectionListRequest); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_master_proto_msgTypes[20].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*CollectionListResponse); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_master_proto_msgTypes[21].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*CollectionDeleteRequest); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_master_proto_msgTypes[22].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*CollectionDeleteResponse); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_master_proto_msgTypes[23].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*DataNodeInfo); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_master_proto_msgTypes[24].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*RackInfo); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_master_proto_msgTypes[25].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*DataCenterInfo); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_master_proto_msgTypes[26].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*TopologyInfo); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_master_proto_msgTypes[27].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*VolumeListRequest); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_master_proto_msgTypes[28].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*VolumeListResponse); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_master_proto_msgTypes[29].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*LookupEcVolumeRequest); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_master_proto_msgTypes[30].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*LookupEcVolumeResponse); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_master_proto_msgTypes[31].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*GetMasterConfigurationRequest); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_master_proto_msgTypes[32].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*GetMasterConfigurationResponse); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_master_proto_msgTypes[33].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*ListMasterClientsRequest); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_master_proto_msgTypes[34].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*ListMasterClientsResponse); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_master_proto_msgTypes[35].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*LeaseAdminTokenRequest); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_master_proto_msgTypes[36].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*LeaseAdminTokenResponse); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_master_proto_msgTypes[37].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*ReleaseAdminTokenRequest); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_master_proto_msgTypes[38].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*ReleaseAdminTokenResponse); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_master_proto_msgTypes[40].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*SuperBlockExtra_ErasureCoding); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_master_proto_msgTypes[41].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*LookupVolumeResponse_VolumeIdLocation); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_master_proto_msgTypes[42].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*LookupEcVolumeResponse_EcShardIdLocation); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ }
+ type x struct{}
+ out := protoimpl.TypeBuilder{
+ File: protoimpl.DescBuilder{
+ GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
+ RawDescriptor: file_master_proto_rawDesc,
+ NumEnums: 0,
+ NumMessages: 43,
+ NumExtensions: 0,
+ NumServices: 1,
+ },
+ GoTypes: file_master_proto_goTypes,
+ DependencyIndexes: file_master_proto_depIdxs,
+ MessageInfos: file_master_proto_msgTypes,
+ }.Build()
+ File_master_proto = out.File
+ file_master_proto_rawDesc = nil
+ file_master_proto_goTypes = nil
+ file_master_proto_depIdxs = nil
}
// Reference imports to suppress errors if they are not otherwise used.
var _ context.Context
-var _ grpc.ClientConn
+var _ grpc.ClientConnInterface
// This is a compile-time assertion to ensure that this generated file
// is compatible with the grpc package it is being compiled against.
-const _ = grpc.SupportPackageIsVersion4
-
-// Client API for Seaweed service
+const _ = grpc.SupportPackageIsVersion6
+// SeaweedClient is the client API for Seaweed service.
+//
+// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream.
type SeaweedClient interface {
SendHeartbeat(ctx context.Context, opts ...grpc.CallOption) (Seaweed_SendHeartbeatClient, error)
KeepConnected(ctx context.Context, opts ...grpc.CallOption) (Seaweed_KeepConnectedClient, error)
LookupVolume(ctx context.Context, in *LookupVolumeRequest, opts ...grpc.CallOption) (*LookupVolumeResponse, error)
Assign(ctx context.Context, in *AssignRequest, opts ...grpc.CallOption) (*AssignResponse, error)
Statistics(ctx context.Context, in *StatisticsRequest, opts ...grpc.CallOption) (*StatisticsResponse, error)
+ CollectionList(ctx context.Context, in *CollectionListRequest, opts ...grpc.CallOption) (*CollectionListResponse, error)
+ CollectionDelete(ctx context.Context, in *CollectionDeleteRequest, opts ...grpc.CallOption) (*CollectionDeleteResponse, error)
+ VolumeList(ctx context.Context, in *VolumeListRequest, opts ...grpc.CallOption) (*VolumeListResponse, error)
+ LookupEcVolume(ctx context.Context, in *LookupEcVolumeRequest, opts ...grpc.CallOption) (*LookupEcVolumeResponse, error)
+ GetMasterConfiguration(ctx context.Context, in *GetMasterConfigurationRequest, opts ...grpc.CallOption) (*GetMasterConfigurationResponse, error)
+ ListMasterClients(ctx context.Context, in *ListMasterClientsRequest, opts ...grpc.CallOption) (*ListMasterClientsResponse, error)
+ LeaseAdminToken(ctx context.Context, in *LeaseAdminTokenRequest, opts ...grpc.CallOption) (*LeaseAdminTokenResponse, error)
+ ReleaseAdminToken(ctx context.Context, in *ReleaseAdminTokenRequest, opts ...grpc.CallOption) (*ReleaseAdminTokenResponse, error)
}
type seaweedClient struct {
- cc *grpc.ClientConn
+ cc grpc.ClientConnInterface
}
-func NewSeaweedClient(cc *grpc.ClientConn) SeaweedClient {
+func NewSeaweedClient(cc grpc.ClientConnInterface) SeaweedClient {
return &seaweedClient{cc}
}
func (c *seaweedClient) SendHeartbeat(ctx context.Context, opts ...grpc.CallOption) (Seaweed_SendHeartbeatClient, error) {
- stream, err := grpc.NewClientStream(ctx, &_Seaweed_serviceDesc.Streams[0], c.cc, "/master_pb.Seaweed/SendHeartbeat", opts...)
+ stream, err := c.cc.NewStream(ctx, &_Seaweed_serviceDesc.Streams[0], "/master_pb.Seaweed/SendHeartbeat", opts...)
if err != nil {
return nil, err
}
@@ -752,7 +4018,7 @@ func (x *seaweedSendHeartbeatClient) Recv() (*HeartbeatResponse, error) {
}
func (c *seaweedClient) KeepConnected(ctx context.Context, opts ...grpc.CallOption) (Seaweed_KeepConnectedClient, error) {
- stream, err := grpc.NewClientStream(ctx, &_Seaweed_serviceDesc.Streams[1], c.cc, "/master_pb.Seaweed/KeepConnected", opts...)
+ stream, err := c.cc.NewStream(ctx, &_Seaweed_serviceDesc.Streams[1], "/master_pb.Seaweed/KeepConnected", opts...)
if err != nil {
return nil, err
}
@@ -761,7 +4027,7 @@ func (c *seaweedClient) KeepConnected(ctx context.Context, opts ...grpc.CallOpti
}
type Seaweed_KeepConnectedClient interface {
- Send(*ClientListenRequest) error
+ Send(*KeepConnectedRequest) error
Recv() (*VolumeLocation, error)
grpc.ClientStream
}
@@ -770,7 +4036,7 @@ type seaweedKeepConnectedClient struct {
grpc.ClientStream
}
-func (x *seaweedKeepConnectedClient) Send(m *ClientListenRequest) error {
+func (x *seaweedKeepConnectedClient) Send(m *KeepConnectedRequest) error {
return x.ClientStream.SendMsg(m)
}
@@ -784,7 +4050,7 @@ func (x *seaweedKeepConnectedClient) Recv() (*VolumeLocation, error) {
func (c *seaweedClient) LookupVolume(ctx context.Context, in *LookupVolumeRequest, opts ...grpc.CallOption) (*LookupVolumeResponse, error) {
out := new(LookupVolumeResponse)
- err := grpc.Invoke(ctx, "/master_pb.Seaweed/LookupVolume", in, out, c.cc, opts...)
+ err := c.cc.Invoke(ctx, "/master_pb.Seaweed/LookupVolume", in, out, opts...)
if err != nil {
return nil, err
}
@@ -793,7 +4059,7 @@ func (c *seaweedClient) LookupVolume(ctx context.Context, in *LookupVolumeReques
func (c *seaweedClient) Assign(ctx context.Context, in *AssignRequest, opts ...grpc.CallOption) (*AssignResponse, error) {
out := new(AssignResponse)
- err := grpc.Invoke(ctx, "/master_pb.Seaweed/Assign", in, out, c.cc, opts...)
+ err := c.cc.Invoke(ctx, "/master_pb.Seaweed/Assign", in, out, opts...)
if err != nil {
return nil, err
}
@@ -802,21 +4068,144 @@ func (c *seaweedClient) Assign(ctx context.Context, in *AssignRequest, opts ...g
func (c *seaweedClient) Statistics(ctx context.Context, in *StatisticsRequest, opts ...grpc.CallOption) (*StatisticsResponse, error) {
out := new(StatisticsResponse)
- err := grpc.Invoke(ctx, "/master_pb.Seaweed/Statistics", in, out, c.cc, opts...)
+ err := c.cc.Invoke(ctx, "/master_pb.Seaweed/Statistics", in, out, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
+func (c *seaweedClient) CollectionList(ctx context.Context, in *CollectionListRequest, opts ...grpc.CallOption) (*CollectionListResponse, error) {
+ out := new(CollectionListResponse)
+ err := c.cc.Invoke(ctx, "/master_pb.Seaweed/CollectionList", in, out, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
+func (c *seaweedClient) CollectionDelete(ctx context.Context, in *CollectionDeleteRequest, opts ...grpc.CallOption) (*CollectionDeleteResponse, error) {
+ out := new(CollectionDeleteResponse)
+ err := c.cc.Invoke(ctx, "/master_pb.Seaweed/CollectionDelete", in, out, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
+func (c *seaweedClient) VolumeList(ctx context.Context, in *VolumeListRequest, opts ...grpc.CallOption) (*VolumeListResponse, error) {
+ out := new(VolumeListResponse)
+ err := c.cc.Invoke(ctx, "/master_pb.Seaweed/VolumeList", in, out, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
+func (c *seaweedClient) LookupEcVolume(ctx context.Context, in *LookupEcVolumeRequest, opts ...grpc.CallOption) (*LookupEcVolumeResponse, error) {
+ out := new(LookupEcVolumeResponse)
+ err := c.cc.Invoke(ctx, "/master_pb.Seaweed/LookupEcVolume", in, out, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
+func (c *seaweedClient) GetMasterConfiguration(ctx context.Context, in *GetMasterConfigurationRequest, opts ...grpc.CallOption) (*GetMasterConfigurationResponse, error) {
+ out := new(GetMasterConfigurationResponse)
+ err := c.cc.Invoke(ctx, "/master_pb.Seaweed/GetMasterConfiguration", in, out, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
+func (c *seaweedClient) ListMasterClients(ctx context.Context, in *ListMasterClientsRequest, opts ...grpc.CallOption) (*ListMasterClientsResponse, error) {
+ out := new(ListMasterClientsResponse)
+ err := c.cc.Invoke(ctx, "/master_pb.Seaweed/ListMasterClients", in, out, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
+func (c *seaweedClient) LeaseAdminToken(ctx context.Context, in *LeaseAdminTokenRequest, opts ...grpc.CallOption) (*LeaseAdminTokenResponse, error) {
+ out := new(LeaseAdminTokenResponse)
+ err := c.cc.Invoke(ctx, "/master_pb.Seaweed/LeaseAdminToken", in, out, opts...)
if err != nil {
return nil, err
}
return out, nil
}
-// Server API for Seaweed service
+func (c *seaweedClient) ReleaseAdminToken(ctx context.Context, in *ReleaseAdminTokenRequest, opts ...grpc.CallOption) (*ReleaseAdminTokenResponse, error) {
+ out := new(ReleaseAdminTokenResponse)
+ err := c.cc.Invoke(ctx, "/master_pb.Seaweed/ReleaseAdminToken", in, out, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+// SeaweedServer is the server API for Seaweed service.
type SeaweedServer interface {
SendHeartbeat(Seaweed_SendHeartbeatServer) error
KeepConnected(Seaweed_KeepConnectedServer) error
LookupVolume(context.Context, *LookupVolumeRequest) (*LookupVolumeResponse, error)
Assign(context.Context, *AssignRequest) (*AssignResponse, error)
Statistics(context.Context, *StatisticsRequest) (*StatisticsResponse, error)
+ CollectionList(context.Context, *CollectionListRequest) (*CollectionListResponse, error)
+ CollectionDelete(context.Context, *CollectionDeleteRequest) (*CollectionDeleteResponse, error)
+ VolumeList(context.Context, *VolumeListRequest) (*VolumeListResponse, error)
+ LookupEcVolume(context.Context, *LookupEcVolumeRequest) (*LookupEcVolumeResponse, error)
+ GetMasterConfiguration(context.Context, *GetMasterConfigurationRequest) (*GetMasterConfigurationResponse, error)
+ ListMasterClients(context.Context, *ListMasterClientsRequest) (*ListMasterClientsResponse, error)
+ LeaseAdminToken(context.Context, *LeaseAdminTokenRequest) (*LeaseAdminTokenResponse, error)
+ ReleaseAdminToken(context.Context, *ReleaseAdminTokenRequest) (*ReleaseAdminTokenResponse, error)
+}
+
+// UnimplementedSeaweedServer can be embedded to have forward compatible implementations.
+type UnimplementedSeaweedServer struct {
+}
+
+func (*UnimplementedSeaweedServer) SendHeartbeat(Seaweed_SendHeartbeatServer) error {
+ return status.Errorf(codes.Unimplemented, "method SendHeartbeat not implemented")
+}
+func (*UnimplementedSeaweedServer) KeepConnected(Seaweed_KeepConnectedServer) error {
+ return status.Errorf(codes.Unimplemented, "method KeepConnected not implemented")
+}
+func (*UnimplementedSeaweedServer) LookupVolume(context.Context, *LookupVolumeRequest) (*LookupVolumeResponse, error) {
+ return nil, status.Errorf(codes.Unimplemented, "method LookupVolume not implemented")
+}
+func (*UnimplementedSeaweedServer) Assign(context.Context, *AssignRequest) (*AssignResponse, error) {
+ return nil, status.Errorf(codes.Unimplemented, "method Assign not implemented")
+}
+func (*UnimplementedSeaweedServer) Statistics(context.Context, *StatisticsRequest) (*StatisticsResponse, error) {
+ return nil, status.Errorf(codes.Unimplemented, "method Statistics not implemented")
+}
+func (*UnimplementedSeaweedServer) CollectionList(context.Context, *CollectionListRequest) (*CollectionListResponse, error) {
+ return nil, status.Errorf(codes.Unimplemented, "method CollectionList not implemented")
+}
+func (*UnimplementedSeaweedServer) CollectionDelete(context.Context, *CollectionDeleteRequest) (*CollectionDeleteResponse, error) {
+ return nil, status.Errorf(codes.Unimplemented, "method CollectionDelete not implemented")
+}
+func (*UnimplementedSeaweedServer) VolumeList(context.Context, *VolumeListRequest) (*VolumeListResponse, error) {
+ return nil, status.Errorf(codes.Unimplemented, "method VolumeList not implemented")
+}
+func (*UnimplementedSeaweedServer) LookupEcVolume(context.Context, *LookupEcVolumeRequest) (*LookupEcVolumeResponse, error) {
+ return nil, status.Errorf(codes.Unimplemented, "method LookupEcVolume not implemented")
+}
+func (*UnimplementedSeaweedServer) GetMasterConfiguration(context.Context, *GetMasterConfigurationRequest) (*GetMasterConfigurationResponse, error) {
+ return nil, status.Errorf(codes.Unimplemented, "method GetMasterConfiguration not implemented")
+}
+func (*UnimplementedSeaweedServer) ListMasterClients(context.Context, *ListMasterClientsRequest) (*ListMasterClientsResponse, error) {
+ return nil, status.Errorf(codes.Unimplemented, "method ListMasterClients not implemented")
+}
+func (*UnimplementedSeaweedServer) LeaseAdminToken(context.Context, *LeaseAdminTokenRequest) (*LeaseAdminTokenResponse, error) {
+ return nil, status.Errorf(codes.Unimplemented, "method LeaseAdminToken not implemented")
+}
+func (*UnimplementedSeaweedServer) ReleaseAdminToken(context.Context, *ReleaseAdminTokenRequest) (*ReleaseAdminTokenResponse, error) {
+ return nil, status.Errorf(codes.Unimplemented, "method ReleaseAdminToken not implemented")
}
func RegisterSeaweedServer(s *grpc.Server, srv SeaweedServer) {
@@ -855,7 +4244,7 @@ func _Seaweed_KeepConnected_Handler(srv interface{}, stream grpc.ServerStream) e
type Seaweed_KeepConnectedServer interface {
Send(*VolumeLocation) error
- Recv() (*ClientListenRequest, error)
+ Recv() (*KeepConnectedRequest, error)
grpc.ServerStream
}
@@ -867,8 +4256,8 @@ func (x *seaweedKeepConnectedServer) Send(m *VolumeLocation) error {
return x.ServerStream.SendMsg(m)
}
-func (x *seaweedKeepConnectedServer) Recv() (*ClientListenRequest, error) {
- m := new(ClientListenRequest)
+func (x *seaweedKeepConnectedServer) Recv() (*KeepConnectedRequest, error) {
+ m := new(KeepConnectedRequest)
if err := x.ServerStream.RecvMsg(m); err != nil {
return nil, err
}
@@ -929,6 +4318,150 @@ func _Seaweed_Statistics_Handler(srv interface{}, ctx context.Context, dec func(
return interceptor(ctx, in, info, handler)
}
+func _Seaweed_CollectionList_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(CollectionListRequest)
+ if err := dec(in); err != nil {
+ return nil, err
+ }
+ if interceptor == nil {
+ return srv.(SeaweedServer).CollectionList(ctx, in)
+ }
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: "/master_pb.Seaweed/CollectionList",
+ }
+ handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+ return srv.(SeaweedServer).CollectionList(ctx, req.(*CollectionListRequest))
+ }
+ return interceptor(ctx, in, info, handler)
+}
+
+func _Seaweed_CollectionDelete_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(CollectionDeleteRequest)
+ if err := dec(in); err != nil {
+ return nil, err
+ }
+ if interceptor == nil {
+ return srv.(SeaweedServer).CollectionDelete(ctx, in)
+ }
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: "/master_pb.Seaweed/CollectionDelete",
+ }
+ handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+ return srv.(SeaweedServer).CollectionDelete(ctx, req.(*CollectionDeleteRequest))
+ }
+ return interceptor(ctx, in, info, handler)
+}
+
+func _Seaweed_VolumeList_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(VolumeListRequest)
+ if err := dec(in); err != nil {
+ return nil, err
+ }
+ if interceptor == nil {
+ return srv.(SeaweedServer).VolumeList(ctx, in)
+ }
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: "/master_pb.Seaweed/VolumeList",
+ }
+ handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+ return srv.(SeaweedServer).VolumeList(ctx, req.(*VolumeListRequest))
+ }
+ return interceptor(ctx, in, info, handler)
+}
+
+func _Seaweed_LookupEcVolume_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(LookupEcVolumeRequest)
+ if err := dec(in); err != nil {
+ return nil, err
+ }
+ if interceptor == nil {
+ return srv.(SeaweedServer).LookupEcVolume(ctx, in)
+ }
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: "/master_pb.Seaweed/LookupEcVolume",
+ }
+ handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+ return srv.(SeaweedServer).LookupEcVolume(ctx, req.(*LookupEcVolumeRequest))
+ }
+ return interceptor(ctx, in, info, handler)
+}
+
+func _Seaweed_GetMasterConfiguration_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(GetMasterConfigurationRequest)
+ if err := dec(in); err != nil {
+ return nil, err
+ }
+ if interceptor == nil {
+ return srv.(SeaweedServer).GetMasterConfiguration(ctx, in)
+ }
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: "/master_pb.Seaweed/GetMasterConfiguration",
+ }
+ handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+ return srv.(SeaweedServer).GetMasterConfiguration(ctx, req.(*GetMasterConfigurationRequest))
+ }
+ return interceptor(ctx, in, info, handler)
+}
+
+func _Seaweed_ListMasterClients_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(ListMasterClientsRequest)
+ if err := dec(in); err != nil {
+ return nil, err
+ }
+ if interceptor == nil {
+ return srv.(SeaweedServer).ListMasterClients(ctx, in)
+ }
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: "/master_pb.Seaweed/ListMasterClients",
+ }
+ handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+ return srv.(SeaweedServer).ListMasterClients(ctx, req.(*ListMasterClientsRequest))
+ }
+ return interceptor(ctx, in, info, handler)
+}
+
+func _Seaweed_LeaseAdminToken_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(LeaseAdminTokenRequest)
+ if err := dec(in); err != nil {
+ return nil, err
+ }
+ if interceptor == nil {
+ return srv.(SeaweedServer).LeaseAdminToken(ctx, in)
+ }
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: "/master_pb.Seaweed/LeaseAdminToken",
+ }
+ handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+ return srv.(SeaweedServer).LeaseAdminToken(ctx, req.(*LeaseAdminTokenRequest))
+ }
+ return interceptor(ctx, in, info, handler)
+}
+
+func _Seaweed_ReleaseAdminToken_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(ReleaseAdminTokenRequest)
+ if err := dec(in); err != nil {
+ return nil, err
+ }
+ if interceptor == nil {
+ return srv.(SeaweedServer).ReleaseAdminToken(ctx, in)
+ }
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: "/master_pb.Seaweed/ReleaseAdminToken",
+ }
+ handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+ return srv.(SeaweedServer).ReleaseAdminToken(ctx, req.(*ReleaseAdminTokenRequest))
+ }
+ return interceptor(ctx, in, info, handler)
+}
+
var _Seaweed_serviceDesc = grpc.ServiceDesc{
ServiceName: "master_pb.Seaweed",
HandlerType: (*SeaweedServer)(nil),
@@ -945,6 +4478,38 @@ var _Seaweed_serviceDesc = grpc.ServiceDesc{
MethodName: "Statistics",
Handler: _Seaweed_Statistics_Handler,
},
+ {
+ MethodName: "CollectionList",
+ Handler: _Seaweed_CollectionList_Handler,
+ },
+ {
+ MethodName: "CollectionDelete",
+ Handler: _Seaweed_CollectionDelete_Handler,
+ },
+ {
+ MethodName: "VolumeList",
+ Handler: _Seaweed_VolumeList_Handler,
+ },
+ {
+ MethodName: "LookupEcVolume",
+ Handler: _Seaweed_LookupEcVolume_Handler,
+ },
+ {
+ MethodName: "GetMasterConfiguration",
+ Handler: _Seaweed_GetMasterConfiguration_Handler,
+ },
+ {
+ MethodName: "ListMasterClients",
+ Handler: _Seaweed_ListMasterClients_Handler,
+ },
+ {
+ MethodName: "LeaseAdminToken",
+ Handler: _Seaweed_LeaseAdminToken_Handler,
+ },
+ {
+ MethodName: "ReleaseAdminToken",
+ Handler: _Seaweed_ReleaseAdminToken_Handler,
+ },
},
Streams: []grpc.StreamDesc{
{
@@ -962,75 +4527,3 @@ var _Seaweed_serviceDesc = grpc.ServiceDesc{
},
Metadata: "master.proto",
}
-
-func init() { proto.RegisterFile("master.proto", fileDescriptor0) }
-
-var fileDescriptor0 = []byte{
- // 1055 bytes of a gzipped FileDescriptorProto
- 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0xb4, 0x56, 0x4b, 0x6f, 0xe4, 0x44,
- 0x10, 0x5e, 0x7b, 0x9e, 0xae, 0xd9, 0xc9, 0x4e, 0x3a, 0x11, 0xf2, 0xce, 0xbe, 0x06, 0x73, 0x19,
- 0x04, 0x8a, 0x96, 0x70, 0x44, 0x08, 0xb1, 0xd1, 0x22, 0xa2, 0x04, 0x36, 0x38, 0xb0, 0x07, 0x2e,
- 0xa6, 0x63, 0x57, 0xa2, 0x56, 0xfc, 0xa2, 0xbb, 0x27, 0x99, 0xd9, 0x0b, 0x47, 0xfe, 0x15, 0x17,
- 0xb8, 0xf1, 0x53, 0xb8, 0xf1, 0x0b, 0x50, 0x3f, 0xec, 0xf1, 0x38, 0x09, 0x91, 0x90, 0xb8, 0xb5,
- 0xbf, 0xae, 0xee, 0xaa, 0xfe, 0xbe, 0x7a, 0x18, 0x1e, 0x66, 0x54, 0x48, 0xe4, 0x7b, 0x25, 0x2f,
- 0x64, 0x41, 0x3c, 0xf3, 0x15, 0x95, 0x67, 0xc1, 0x5f, 0x2e, 0x78, 0x5f, 0x23, 0xe5, 0xf2, 0x0c,
- 0xa9, 0x24, 0x5b, 0xe0, 0xb2, 0xd2, 0x77, 0x66, 0xce, 0xdc, 0x0b, 0x5d, 0x56, 0x12, 0x02, 0xdd,
- 0xb2, 0xe0, 0xd2, 0x77, 0x67, 0xce, 0x7c, 0x1c, 0xea, 0x35, 0x79, 0x06, 0x50, 0x2e, 0xce, 0x52,
- 0x16, 0x47, 0x0b, 0x9e, 0xfa, 0x1d, 0x6d, 0xeb, 0x19, 0xe4, 0x07, 0x9e, 0x92, 0x39, 0x4c, 0x32,
- 0xba, 0x8c, 0xae, 0x8a, 0x74, 0x91, 0x61, 0x14, 0x17, 0x8b, 0x5c, 0xfa, 0x5d, 0x7d, 0x7c, 0x2b,
- 0xa3, 0xcb, 0xb7, 0x1a, 0x3e, 0x50, 0x28, 0x99, 0xa9, 0xa8, 0x96, 0xd1, 0x39, 0x4b, 0x31, 0xba,
- 0xc4, 0x95, 0xdf, 0x9b, 0x39, 0xf3, 0x6e, 0x08, 0x19, 0x5d, 0x7e, 0xc5, 0x52, 0x3c, 0xc2, 0x15,
- 0x79, 0x01, 0xa3, 0x84, 0x4a, 0x1a, 0xc5, 0x98, 0x4b, 0xe4, 0x7e, 0x5f, 0xfb, 0x02, 0x05, 0x1d,
- 0x68, 0x44, 0xc5, 0xc7, 0x69, 0x7c, 0xe9, 0x0f, 0xf4, 0x8e, 0x5e, 0xab, 0xf8, 0x68, 0x92, 0xb1,
- 0x3c, 0xd2, 0x91, 0x0f, 0xb5, 0x6b, 0x4f, 0x23, 0x27, 0x2a, 0xfc, 0xcf, 0x61, 0x60, 0x62, 0x13,
- 0xbe, 0x37, 0xeb, 0xcc, 0x47, 0xfb, 0x1f, 0xec, 0xd5, 0x6c, 0xec, 0x99, 0xf0, 0x0e, 0xf3, 0xf3,
- 0x82, 0x67, 0x54, 0xb2, 0x22, 0xff, 0x06, 0x85, 0xa0, 0x17, 0x18, 0x56, 0x67, 0xc8, 0x63, 0x18,
- 0xe6, 0x78, 0x1d, 0x5d, 0xb1, 0x44, 0xf8, 0x30, 0xeb, 0xcc, 0xc7, 0xe1, 0x20, 0xc7, 0xeb, 0xb7,
- 0x2c, 0x11, 0xe4, 0x7d, 0x78, 0x98, 0x60, 0x8a, 0x12, 0x13, 0xb3, 0x3d, 0xd2, 0xdb, 0x23, 0x8b,
- 0x29, 0x93, 0x40, 0xc0, 0x76, 0x4d, 0x76, 0x88, 0xa2, 0x2c, 0x72, 0x81, 0x64, 0x0e, 0x8f, 0xcc,
- 0xed, 0xa7, 0xec, 0x1d, 0x1e, 0xb3, 0x8c, 0x49, 0xad, 0x40, 0x37, 0x6c, 0xc3, 0xe4, 0x29, 0x78,
- 0x02, 0x63, 0x8e, 0xf2, 0x08, 0x57, 0x5a, 0x13, 0x2f, 0x5c, 0x03, 0xe4, 0x3d, 0xe8, 0xa7, 0x48,
- 0x13, 0xe4, 0x56, 0x14, 0xfb, 0x15, 0xfc, 0xe1, 0x82, 0x7f, 0xd7, 0xc3, 0xb4, 0xe2, 0x89, 0xf6,
- 0x37, 0x0e, 0x5d, 0x96, 0x28, 0x46, 0x05, 0x7b, 0x87, 0xfa, 0xf6, 0x6e, 0xa8, 0xd7, 0xe4, 0x39,
- 0x40, 0x5c, 0xa4, 0x29, 0xc6, 0xea, 0xa0, 0xbd, 0xbc, 0x81, 0x28, 0xc6, 0xb5, 0x88, 0x6b, 0xb1,
- 0xbb, 0xa1, 0xa7, 0x10, 0xa3, 0x73, 0xcd, 0x8b, 0x35, 0x30, 0x3a, 0x5b, 0x5e, 0x8c, 0xc9, 0xc7,
- 0x40, 0x2a, 0xea, 0xce, 0x56, 0xb5, 0x61, 0x5f, 0x1b, 0x4e, 0xec, 0xce, 0xab, 0x55, 0x65, 0xfd,
- 0x04, 0x3c, 0x8e, 0x34, 0x89, 0x8a, 0x3c, 0x5d, 0x69, 0xe9, 0x87, 0xe1, 0x50, 0x01, 0x6f, 0xf2,
- 0x74, 0x45, 0x3e, 0x82, 0x6d, 0x8e, 0x65, 0xca, 0x62, 0x1a, 0x95, 0x29, 0x8d, 0x31, 0xc3, 0xbc,
- 0xca, 0x82, 0x89, 0xdd, 0x38, 0xa9, 0x70, 0xe2, 0xc3, 0xe0, 0x0a, 0xb9, 0x50, 0xcf, 0xf2, 0xb4,
- 0x49, 0xf5, 0x49, 0x26, 0xd0, 0x91, 0x32, 0xf5, 0x41, 0xa3, 0x6a, 0x19, 0x0c, 0xa0, 0xf7, 0x3a,
- 0x2b, 0xe5, 0x2a, 0xf8, 0xcd, 0x81, 0x47, 0xa7, 0x8b, 0x12, 0xf9, 0xab, 0xb4, 0x88, 0x2f, 0x5f,
- 0x2f, 0x25, 0xa7, 0xe4, 0x0d, 0x6c, 0x21, 0xa7, 0x62, 0xc1, 0x55, 0xec, 0x09, 0xcb, 0x2f, 0x34,
- 0xa5, 0xa3, 0xfd, 0x79, 0x23, 0xb9, 0x5a, 0x67, 0xf6, 0x5e, 0x9b, 0x03, 0x07, 0xda, 0x3e, 0x1c,
- 0x63, 0xf3, 0x73, 0xfa, 0x23, 0x8c, 0x37, 0xf6, 0x95, 0x30, 0x2a, 0xf1, 0xad, 0x54, 0x7a, 0xad,
- 0x14, 0x2f, 0x29, 0x67, 0x72, 0x65, 0x0b, 0xd4, 0x7e, 0x29, 0x41, 0x6c, 0xfd, 0xa9, 0x3c, 0xec,
- 0xe8, 0x3c, 0xf4, 0x0c, 0x72, 0x98, 0x88, 0xe0, 0x43, 0xd8, 0x39, 0x48, 0x19, 0xe6, 0xf2, 0x98,
- 0x09, 0x89, 0x79, 0x88, 0x3f, 0x2f, 0x50, 0x48, 0xe5, 0x21, 0xa7, 0x19, 0xda, 0xf2, 0xd7, 0xeb,
- 0xe0, 0x17, 0xd8, 0x32, 0xa9, 0x73, 0x5c, 0xc4, 0x3a, 0x6f, 0x14, 0x31, 0xaa, 0xee, 0x8d, 0x91,
- 0x5a, 0xb6, 0x1a, 0x82, 0xdb, 0x6e, 0x08, 0xcd, 0x8a, 0xe9, 0xfc, 0x7b, 0xc5, 0x74, 0x6f, 0x56,
- 0xcc, 0xf7, 0xb0, 0x73, 0x5c, 0x14, 0x97, 0x8b, 0xd2, 0x84, 0x51, 0xc5, 0xba, 0xf9, 0x42, 0x67,
- 0xd6, 0x51, 0x3e, 0xeb, 0x17, 0xb6, 0x32, 0xd6, 0x6d, 0x67, 0x6c, 0xf0, 0xb7, 0x03, 0xbb, 0x9b,
- 0xd7, 0xda, 0x5a, 0xfc, 0x09, 0x76, 0xea, 0x7b, 0xa3, 0xd4, 0xbe, 0xd9, 0x38, 0x18, 0xed, 0xbf,
- 0x6c, 0x88, 0x79, 0xdb, 0xe9, 0xaa, 0x7d, 0x24, 0x15, 0x59, 0xe1, 0xf6, 0x55, 0x0b, 0x11, 0xd3,
- 0x25, 0x4c, 0xda, 0x66, 0x2a, 0xa1, 0x6b, 0xaf, 0x96, 0xd9, 0x61, 0x75, 0x92, 0x7c, 0x02, 0xde,
- 0x3a, 0x10, 0x57, 0x07, 0xb2, 0xb3, 0x11, 0x88, 0xf5, 0xb5, 0xb6, 0x22, 0xbb, 0xd0, 0x43, 0xce,
- 0x8b, 0xaa, 0x11, 0x98, 0x8f, 0xe0, 0x33, 0x18, 0xfe, 0x67, 0x15, 0x83, 0x3f, 0x1d, 0x18, 0x7f,
- 0x29, 0x04, 0xbb, 0xa8, 0xd3, 0x65, 0x17, 0x7a, 0xa6, 0x4c, 0x4d, 0xb3, 0x32, 0x1f, 0x64, 0x06,
- 0x23, 0x5b, 0x65, 0x0d, 0xea, 0x9b, 0xd0, 0xbd, 0xdd, 0xc4, 0x56, 0x5e, 0xd7, 0x84, 0x26, 0x65,
- 0xda, 0x1e, 0x03, 0xbd, 0x3b, 0xc7, 0x40, 0xbf, 0x31, 0x06, 0x9e, 0x80, 0xa7, 0x0f, 0xe5, 0x45,
- 0x82, 0x76, 0x3e, 0x0c, 0x15, 0xf0, 0x6d, 0x91, 0xe8, 0xb4, 0xae, 0x1e, 0x63, 0x85, 0x9f, 0x40,
- 0xe7, 0xbc, 0x26, 0x5f, 0x2d, 0x2b, 0x8a, 0xdc, 0xbb, 0x28, 0xba, 0x31, 0xf9, 0x6a, 0x42, 0xba,
- 0x4d, 0x42, 0x6a, 0x2d, 0x7a, 0x4d, 0x2d, 0x2e, 0x60, 0xfb, 0x54, 0x52, 0xc9, 0x84, 0x64, 0xb1,
- 0xa8, 0x18, 0x6d, 0x71, 0xe7, 0xdc, 0xc7, 0x9d, 0x7b, 0x17, 0x77, 0x9d, 0x9a, 0xbb, 0xe0, 0x77,
- 0x07, 0x48, 0xd3, 0x93, 0x7d, 0xee, 0xff, 0xe0, 0x4a, 0xd1, 0x23, 0x0b, 0x49, 0xd3, 0x48, 0x0f,
- 0x10, 0x3b, 0x06, 0x34, 0xa2, 0x26, 0x98, 0x12, 0x64, 0x21, 0x30, 0x31, 0xbb, 0x66, 0x06, 0x0c,
- 0x15, 0xa0, 0x37, 0x37, 0x47, 0x48, 0xbf, 0x35, 0x42, 0xf6, 0x7f, 0xed, 0xc0, 0xe0, 0x14, 0xe9,
- 0x35, 0x62, 0x42, 0x0e, 0x61, 0x7c, 0x8a, 0x79, 0xb2, 0xfe, 0x69, 0xd9, 0x6d, 0x54, 0x43, 0x8d,
- 0x4e, 0x9f, 0xde, 0x86, 0x56, 0xef, 0x0f, 0x1e, 0xcc, 0x9d, 0x97, 0x0e, 0x39, 0x81, 0xf1, 0x11,
- 0x62, 0x79, 0x50, 0xe4, 0x39, 0xc6, 0x12, 0x13, 0xf2, 0xbc, 0x71, 0xe8, 0x96, 0x16, 0x39, 0x7d,
- 0x7c, 0xe3, 0x5f, 0xa1, 0xaa, 0x28, 0x7b, 0xe3, 0x77, 0xf0, 0xb0, 0xd9, 0x19, 0x36, 0x2e, 0xbc,
- 0xa5, 0x8f, 0x4d, 0x5f, 0xdc, 0xd3, 0x52, 0x82, 0x07, 0xe4, 0x0b, 0xe8, 0x9b, 0x5c, 0x25, 0x7e,
- 0xc3, 0x78, 0xa3, 0x16, 0x37, 0xe2, 0xda, 0x4c, 0xec, 0xe0, 0x01, 0x39, 0x02, 0x58, 0x67, 0x00,
- 0x69, 0xf2, 0x72, 0x23, 0x05, 0xa7, 0xcf, 0xee, 0xd8, 0xad, 0x2e, 0x3b, 0xeb, 0xeb, 0x3f, 0xc8,
- 0x4f, 0xff, 0x09, 0x00, 0x00, 0xff, 0xff, 0xc7, 0x9f, 0x0a, 0x25, 0x51, 0x0a, 0x00, 0x00,
-}
diff --git a/weed/pb/messaging.proto b/weed/pb/messaging.proto
new file mode 100644
index 000000000..04446ad16
--- /dev/null
+++ b/weed/pb/messaging.proto
@@ -0,0 +1,135 @@
+syntax = "proto3";
+
+package messaging_pb;
+
+option go_package = "github.com/chrislusf/seaweedfs/weed/pb/messaging_pb";
+option java_package = "seaweedfs.client";
+option java_outer_classname = "MessagingProto";
+
+//////////////////////////////////////////////////
+
+service SeaweedMessaging {
+
+ rpc Subscribe (stream SubscriberMessage) returns (stream BrokerMessage) {
+ }
+
+ rpc Publish (stream PublishRequest) returns (stream PublishResponse) {
+ }
+
+ rpc DeleteTopic (DeleteTopicRequest) returns (DeleteTopicResponse) {
+ }
+
+ rpc ConfigureTopic (ConfigureTopicRequest) returns (ConfigureTopicResponse) {
+ }
+
+ rpc GetTopicConfiguration (GetTopicConfigurationRequest) returns (GetTopicConfigurationResponse) {
+ }
+
+ rpc FindBroker (FindBrokerRequest) returns (FindBrokerResponse) {
+ }
+
+}
+
+//////////////////////////////////////////////////
+
+message SubscriberMessage {
+ message InitMessage {
+ string namespace = 1;
+ string topic = 2;
+ int32 partition = 3;
+ enum StartPosition {
+ LATEST = 0; // Start at the newest message
+ EARLIEST = 1; // Start at the oldest message
+ TIMESTAMP = 2; // Start after a specified timestamp, exclusive
+ }
+ StartPosition startPosition = 4; // Where to begin consuming from
+ int64 timestampNs = 5; // timestamp in nano seconds
+ string subscriber_id = 6; // uniquely identify a subscriber to track consumption
+ }
+ InitMessage init = 1;
+ message AckMessage {
+ int64 message_id = 1;
+ }
+ AckMessage ack = 2;
+ bool is_close = 3;
+}
+
+message Message {
+ int64 event_time_ns = 1 [jstype = JS_STRING];
+ bytes key = 2; // Message key
+ bytes value = 3; // Message payload
+ map headers = 4; // Message headers
+ bool is_close = 5;
+}
+
+message BrokerMessage {
+ Message data = 1;
+}
+
+message PublishRequest {
+ message InitMessage {
+ string namespace = 1; // only needed on the initial request
+ string topic = 2; // only needed on the initial request
+ int32 partition = 3;
+ }
+ InitMessage init = 1;
+ Message data = 2;
+}
+
+message PublishResponse {
+ message ConfigMessage {
+ int32 partition_count = 1;
+ }
+ ConfigMessage config = 1;
+ message RedirectMessage {
+ string new_broker = 1;
+ }
+ RedirectMessage redirect = 2;
+ bool is_closed = 3;
+}
+
+message DeleteTopicRequest {
+ string namespace = 1;
+ string topic = 2;
+}
+message DeleteTopicResponse {
+}
+
+message ConfigureTopicRequest {
+ string namespace = 1;
+ string topic = 2;
+ TopicConfiguration configuration = 3;
+}
+message ConfigureTopicResponse {
+}
+
+message GetTopicConfigurationRequest {
+ string namespace = 1;
+ string topic = 2;
+}
+message GetTopicConfigurationResponse {
+ TopicConfiguration configuration = 1;
+}
+
+message FindBrokerRequest {
+ string namespace = 1;
+ string topic = 2;
+ int32 parition = 3;
+}
+
+message FindBrokerResponse {
+ string broker = 1;
+}
+
+message TopicConfiguration {
+ int32 partition_count = 1;
+ string collection = 2;
+ string replication = 3;
+ bool is_transient = 4;
+ enum Partitioning {
+ NonNullKeyHash = 0; // If not null, hash by key value. If null, round robin
+ KeyHash = 1; // hash by key value
+ RoundRobin = 2; // round robin pick one partition
+ }
+ Partitioning partitoning = 5;
+}
diff --git a/weed/pb/messaging_pb/messaging.pb.go b/weed/pb/messaging_pb/messaging.pb.go
new file mode 100644
index 000000000..90b4b724a
--- /dev/null
+++ b/weed/pb/messaging_pb/messaging.pb.go
@@ -0,0 +1,2053 @@
+// Code generated by protoc-gen-go. DO NOT EDIT.
+// versions:
+// protoc-gen-go v1.24.0
+// protoc v3.12.3
+// source: messaging.proto
+
+package messaging_pb
+
+import (
+ context "context"
+ proto "github.com/golang/protobuf/proto"
+ grpc "google.golang.org/grpc"
+ codes "google.golang.org/grpc/codes"
+ status "google.golang.org/grpc/status"
+ protoreflect "google.golang.org/protobuf/reflect/protoreflect"
+ protoimpl "google.golang.org/protobuf/runtime/protoimpl"
+ reflect "reflect"
+ sync "sync"
+)
+
+const (
+ // Verify that this generated code is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
+ // Verify that runtime/protoimpl is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
+)
+
+// This is a compile-time assertion that a sufficiently up-to-date version
+// of the legacy proto package is being used.
+const _ = proto.ProtoPackageIsVersion4
+
+type SubscriberMessage_InitMessage_StartPosition int32
+
+const (
+ SubscriberMessage_InitMessage_LATEST SubscriberMessage_InitMessage_StartPosition = 0 // Start at the newest message
+ SubscriberMessage_InitMessage_EARLIEST SubscriberMessage_InitMessage_StartPosition = 1 // Start at the oldest message
+ SubscriberMessage_InitMessage_TIMESTAMP SubscriberMessage_InitMessage_StartPosition = 2 // Start after a specified timestamp, exclusive
+)
+
+// Enum value maps for SubscriberMessage_InitMessage_StartPosition.
+var (
+ SubscriberMessage_InitMessage_StartPosition_name = map[int32]string{
+ 0: "LATEST",
+ 1: "EARLIEST",
+ 2: "TIMESTAMP",
+ }
+ SubscriberMessage_InitMessage_StartPosition_value = map[string]int32{
+ "LATEST": 0,
+ "EARLIEST": 1,
+ "TIMESTAMP": 2,
+ }
+)
+
+func (x SubscriberMessage_InitMessage_StartPosition) Enum() *SubscriberMessage_InitMessage_StartPosition {
+ p := new(SubscriberMessage_InitMessage_StartPosition)
+ *p = x
+ return p
+}
+
+func (x SubscriberMessage_InitMessage_StartPosition) String() string {
+ return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x))
+}
+
+func (SubscriberMessage_InitMessage_StartPosition) Descriptor() protoreflect.EnumDescriptor {
+ return file_messaging_proto_enumTypes[0].Descriptor()
+}
+
+func (SubscriberMessage_InitMessage_StartPosition) Type() protoreflect.EnumType {
+ return &file_messaging_proto_enumTypes[0]
+}
+
+func (x SubscriberMessage_InitMessage_StartPosition) Number() protoreflect.EnumNumber {
+ return protoreflect.EnumNumber(x)
+}
+
+// Deprecated: Use SubscriberMessage_InitMessage_StartPosition.Descriptor instead.
+func (SubscriberMessage_InitMessage_StartPosition) EnumDescriptor() ([]byte, []int) {
+ return file_messaging_proto_rawDescGZIP(), []int{0, 0, 0}
+}
+
+type TopicConfiguration_Partitioning int32
+
+const (
+ TopicConfiguration_NonNullKeyHash TopicConfiguration_Partitioning = 0 // If not null, hash by key value. If null, round robin
+ TopicConfiguration_KeyHash TopicConfiguration_Partitioning = 1 // hash by key value
+ TopicConfiguration_RoundRobin TopicConfiguration_Partitioning = 2 // round robin pick one partition
+)
+
+// Enum value maps for TopicConfiguration_Partitioning.
+var (
+ TopicConfiguration_Partitioning_name = map[int32]string{
+ 0: "NonNullKeyHash",
+ 1: "KeyHash",
+ 2: "RoundRobin",
+ }
+ TopicConfiguration_Partitioning_value = map[string]int32{
+ "NonNullKeyHash": 0,
+ "KeyHash": 1,
+ "RoundRobin": 2,
+ }
+)
+
+func (x TopicConfiguration_Partitioning) Enum() *TopicConfiguration_Partitioning {
+ p := new(TopicConfiguration_Partitioning)
+ *p = x
+ return p
+}
+
+func (x TopicConfiguration_Partitioning) String() string {
+ return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x))
+}
+
+func (TopicConfiguration_Partitioning) Descriptor() protoreflect.EnumDescriptor {
+ return file_messaging_proto_enumTypes[1].Descriptor()
+}
+
+func (TopicConfiguration_Partitioning) Type() protoreflect.EnumType {
+ return &file_messaging_proto_enumTypes[1]
+}
+
+func (x TopicConfiguration_Partitioning) Number() protoreflect.EnumNumber {
+ return protoreflect.EnumNumber(x)
+}
+
+// Deprecated: Use TopicConfiguration_Partitioning.Descriptor instead.
+func (TopicConfiguration_Partitioning) EnumDescriptor() ([]byte, []int) {
+ return file_messaging_proto_rawDescGZIP(), []int{13, 0}
+}
+
+type SubscriberMessage struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ Init *SubscriberMessage_InitMessage `protobuf:"bytes,1,opt,name=init,proto3" json:"init,omitempty"`
+ Ack *SubscriberMessage_AckMessage `protobuf:"bytes,2,opt,name=ack,proto3" json:"ack,omitempty"`
+ IsClose bool `protobuf:"varint,3,opt,name=is_close,json=isClose,proto3" json:"is_close,omitempty"`
+}
+
+func (x *SubscriberMessage) Reset() {
+ *x = SubscriberMessage{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_messaging_proto_msgTypes[0]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *SubscriberMessage) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*SubscriberMessage) ProtoMessage() {}
+
+func (x *SubscriberMessage) ProtoReflect() protoreflect.Message {
+ mi := &file_messaging_proto_msgTypes[0]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use SubscriberMessage.ProtoReflect.Descriptor instead.
+func (*SubscriberMessage) Descriptor() ([]byte, []int) {
+ return file_messaging_proto_rawDescGZIP(), []int{0}
+}
+
+func (x *SubscriberMessage) GetInit() *SubscriberMessage_InitMessage {
+ if x != nil {
+ return x.Init
+ }
+ return nil
+}
+
+func (x *SubscriberMessage) GetAck() *SubscriberMessage_AckMessage {
+ if x != nil {
+ return x.Ack
+ }
+ return nil
+}
+
+func (x *SubscriberMessage) GetIsClose() bool {
+ if x != nil {
+ return x.IsClose
+ }
+ return false
+}
+
+type Message struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ EventTimeNs int64 `protobuf:"varint,1,opt,name=event_time_ns,json=eventTimeNs,proto3" json:"event_time_ns,omitempty"`
+ Key []byte `protobuf:"bytes,2,opt,name=key,proto3" json:"key,omitempty"` // Message key
+ Value []byte `protobuf:"bytes,3,opt,name=value,proto3" json:"value,omitempty"` // Message payload
+ Headers map[string][]byte `protobuf:"bytes,4,rep,name=headers,proto3" json:"headers,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` // Message headers
+ IsClose bool `protobuf:"varint,5,opt,name=is_close,json=isClose,proto3" json:"is_close,omitempty"`
+}
+
+func (x *Message) Reset() {
+ *x = Message{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_messaging_proto_msgTypes[1]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *Message) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*Message) ProtoMessage() {}
+
+func (x *Message) ProtoReflect() protoreflect.Message {
+ mi := &file_messaging_proto_msgTypes[1]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use Message.ProtoReflect.Descriptor instead.
+func (*Message) Descriptor() ([]byte, []int) {
+ return file_messaging_proto_rawDescGZIP(), []int{1}
+}
+
+func (x *Message) GetEventTimeNs() int64 {
+ if x != nil {
+ return x.EventTimeNs
+ }
+ return 0
+}
+
+func (x *Message) GetKey() []byte {
+ if x != nil {
+ return x.Key
+ }
+ return nil
+}
+
+func (x *Message) GetValue() []byte {
+ if x != nil {
+ return x.Value
+ }
+ return nil
+}
+
+func (x *Message) GetHeaders() map[string][]byte {
+ if x != nil {
+ return x.Headers
+ }
+ return nil
+}
+
+func (x *Message) GetIsClose() bool {
+ if x != nil {
+ return x.IsClose
+ }
+ return false
+}
+
+type BrokerMessage struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ Data *Message `protobuf:"bytes,1,opt,name=data,proto3" json:"data,omitempty"`
+}
+
+func (x *BrokerMessage) Reset() {
+ *x = BrokerMessage{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_messaging_proto_msgTypes[2]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *BrokerMessage) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*BrokerMessage) ProtoMessage() {}
+
+func (x *BrokerMessage) ProtoReflect() protoreflect.Message {
+ mi := &file_messaging_proto_msgTypes[2]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use BrokerMessage.ProtoReflect.Descriptor instead.
+func (*BrokerMessage) Descriptor() ([]byte, []int) {
+ return file_messaging_proto_rawDescGZIP(), []int{2}
+}
+
+func (x *BrokerMessage) GetData() *Message {
+ if x != nil {
+ return x.Data
+ }
+ return nil
+}
+
+type PublishRequest struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ Init *PublishRequest_InitMessage `protobuf:"bytes,1,opt,name=init,proto3" json:"init,omitempty"`
+ Data *Message `protobuf:"bytes,2,opt,name=data,proto3" json:"data,omitempty"`
+}
+
+func (x *PublishRequest) Reset() {
+ *x = PublishRequest{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_messaging_proto_msgTypes[3]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *PublishRequest) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*PublishRequest) ProtoMessage() {}
+
+func (x *PublishRequest) ProtoReflect() protoreflect.Message {
+ mi := &file_messaging_proto_msgTypes[3]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use PublishRequest.ProtoReflect.Descriptor instead.
+func (*PublishRequest) Descriptor() ([]byte, []int) {
+ return file_messaging_proto_rawDescGZIP(), []int{3}
+}
+
+func (x *PublishRequest) GetInit() *PublishRequest_InitMessage {
+ if x != nil {
+ return x.Init
+ }
+ return nil
+}
+
+func (x *PublishRequest) GetData() *Message {
+ if x != nil {
+ return x.Data
+ }
+ return nil
+}
+
+type PublishResponse struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ Config *PublishResponse_ConfigMessage `protobuf:"bytes,1,opt,name=config,proto3" json:"config,omitempty"`
+ Redirect *PublishResponse_RedirectMessage `protobuf:"bytes,2,opt,name=redirect,proto3" json:"redirect,omitempty"`
+ IsClosed bool `protobuf:"varint,3,opt,name=is_closed,json=isClosed,proto3" json:"is_closed,omitempty"`
+}
+
+func (x *PublishResponse) Reset() {
+ *x = PublishResponse{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_messaging_proto_msgTypes[4]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *PublishResponse) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*PublishResponse) ProtoMessage() {}
+
+func (x *PublishResponse) ProtoReflect() protoreflect.Message {
+ mi := &file_messaging_proto_msgTypes[4]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use PublishResponse.ProtoReflect.Descriptor instead.
+func (*PublishResponse) Descriptor() ([]byte, []int) {
+ return file_messaging_proto_rawDescGZIP(), []int{4}
+}
+
+func (x *PublishResponse) GetConfig() *PublishResponse_ConfigMessage {
+ if x != nil {
+ return x.Config
+ }
+ return nil
+}
+
+func (x *PublishResponse) GetRedirect() *PublishResponse_RedirectMessage {
+ if x != nil {
+ return x.Redirect
+ }
+ return nil
+}
+
+func (x *PublishResponse) GetIsClosed() bool {
+ if x != nil {
+ return x.IsClosed
+ }
+ return false
+}
+
+type DeleteTopicRequest struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ Namespace string `protobuf:"bytes,1,opt,name=namespace,proto3" json:"namespace,omitempty"`
+ Topic string `protobuf:"bytes,2,opt,name=topic,proto3" json:"topic,omitempty"`
+}
+
+func (x *DeleteTopicRequest) Reset() {
+ *x = DeleteTopicRequest{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_messaging_proto_msgTypes[5]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *DeleteTopicRequest) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*DeleteTopicRequest) ProtoMessage() {}
+
+func (x *DeleteTopicRequest) ProtoReflect() protoreflect.Message {
+ mi := &file_messaging_proto_msgTypes[5]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use DeleteTopicRequest.ProtoReflect.Descriptor instead.
+func (*DeleteTopicRequest) Descriptor() ([]byte, []int) {
+ return file_messaging_proto_rawDescGZIP(), []int{5}
+}
+
+func (x *DeleteTopicRequest) GetNamespace() string {
+ if x != nil {
+ return x.Namespace
+ }
+ return ""
+}
+
+func (x *DeleteTopicRequest) GetTopic() string {
+ if x != nil {
+ return x.Topic
+ }
+ return ""
+}
+
+type DeleteTopicResponse struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+}
+
+func (x *DeleteTopicResponse) Reset() {
+ *x = DeleteTopicResponse{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_messaging_proto_msgTypes[6]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *DeleteTopicResponse) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*DeleteTopicResponse) ProtoMessage() {}
+
+func (x *DeleteTopicResponse) ProtoReflect() protoreflect.Message {
+ mi := &file_messaging_proto_msgTypes[6]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use DeleteTopicResponse.ProtoReflect.Descriptor instead.
+func (*DeleteTopicResponse) Descriptor() ([]byte, []int) {
+ return file_messaging_proto_rawDescGZIP(), []int{6}
+}
+
+type ConfigureTopicRequest struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ Namespace string `protobuf:"bytes,1,opt,name=namespace,proto3" json:"namespace,omitempty"`
+ Topic string `protobuf:"bytes,2,opt,name=topic,proto3" json:"topic,omitempty"`
+ Configuration *TopicConfiguration `protobuf:"bytes,3,opt,name=configuration,proto3" json:"configuration,omitempty"`
+}
+
+func (x *ConfigureTopicRequest) Reset() {
+ *x = ConfigureTopicRequest{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_messaging_proto_msgTypes[7]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *ConfigureTopicRequest) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*ConfigureTopicRequest) ProtoMessage() {}
+
+func (x *ConfigureTopicRequest) ProtoReflect() protoreflect.Message {
+ mi := &file_messaging_proto_msgTypes[7]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use ConfigureTopicRequest.ProtoReflect.Descriptor instead.
+func (*ConfigureTopicRequest) Descriptor() ([]byte, []int) {
+ return file_messaging_proto_rawDescGZIP(), []int{7}
+}
+
+func (x *ConfigureTopicRequest) GetNamespace() string {
+ if x != nil {
+ return x.Namespace
+ }
+ return ""
+}
+
+func (x *ConfigureTopicRequest) GetTopic() string {
+ if x != nil {
+ return x.Topic
+ }
+ return ""
+}
+
+func (x *ConfigureTopicRequest) GetConfiguration() *TopicConfiguration {
+ if x != nil {
+ return x.Configuration
+ }
+ return nil
+}
+
+type ConfigureTopicResponse struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+}
+
+func (x *ConfigureTopicResponse) Reset() {
+ *x = ConfigureTopicResponse{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_messaging_proto_msgTypes[8]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *ConfigureTopicResponse) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*ConfigureTopicResponse) ProtoMessage() {}
+
+func (x *ConfigureTopicResponse) ProtoReflect() protoreflect.Message {
+ mi := &file_messaging_proto_msgTypes[8]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use ConfigureTopicResponse.ProtoReflect.Descriptor instead.
+func (*ConfigureTopicResponse) Descriptor() ([]byte, []int) {
+ return file_messaging_proto_rawDescGZIP(), []int{8}
+}
+
+type GetTopicConfigurationRequest struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ Namespace string `protobuf:"bytes,1,opt,name=namespace,proto3" json:"namespace,omitempty"`
+ Topic string `protobuf:"bytes,2,opt,name=topic,proto3" json:"topic,omitempty"`
+}
+
+func (x *GetTopicConfigurationRequest) Reset() {
+ *x = GetTopicConfigurationRequest{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_messaging_proto_msgTypes[9]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *GetTopicConfigurationRequest) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*GetTopicConfigurationRequest) ProtoMessage() {}
+
+func (x *GetTopicConfigurationRequest) ProtoReflect() protoreflect.Message {
+ mi := &file_messaging_proto_msgTypes[9]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use GetTopicConfigurationRequest.ProtoReflect.Descriptor instead.
+func (*GetTopicConfigurationRequest) Descriptor() ([]byte, []int) {
+ return file_messaging_proto_rawDescGZIP(), []int{9}
+}
+
+func (x *GetTopicConfigurationRequest) GetNamespace() string {
+ if x != nil {
+ return x.Namespace
+ }
+ return ""
+}
+
+func (x *GetTopicConfigurationRequest) GetTopic() string {
+ if x != nil {
+ return x.Topic
+ }
+ return ""
+}
+
+type GetTopicConfigurationResponse struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ Configuration *TopicConfiguration `protobuf:"bytes,1,opt,name=configuration,proto3" json:"configuration,omitempty"`
+}
+
+func (x *GetTopicConfigurationResponse) Reset() {
+ *x = GetTopicConfigurationResponse{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_messaging_proto_msgTypes[10]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *GetTopicConfigurationResponse) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*GetTopicConfigurationResponse) ProtoMessage() {}
+
+func (x *GetTopicConfigurationResponse) ProtoReflect() protoreflect.Message {
+ mi := &file_messaging_proto_msgTypes[10]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use GetTopicConfigurationResponse.ProtoReflect.Descriptor instead.
+func (*GetTopicConfigurationResponse) Descriptor() ([]byte, []int) {
+ return file_messaging_proto_rawDescGZIP(), []int{10}
+}
+
+func (x *GetTopicConfigurationResponse) GetConfiguration() *TopicConfiguration {
+ if x != nil {
+ return x.Configuration
+ }
+ return nil
+}
+
+type FindBrokerRequest struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ Namespace string `protobuf:"bytes,1,opt,name=namespace,proto3" json:"namespace,omitempty"`
+ Topic string `protobuf:"bytes,2,opt,name=topic,proto3" json:"topic,omitempty"`
+ Parition int32 `protobuf:"varint,3,opt,name=parition,proto3" json:"parition,omitempty"`
+}
+
+func (x *FindBrokerRequest) Reset() {
+ *x = FindBrokerRequest{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_messaging_proto_msgTypes[11]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *FindBrokerRequest) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*FindBrokerRequest) ProtoMessage() {}
+
+func (x *FindBrokerRequest) ProtoReflect() protoreflect.Message {
+ mi := &file_messaging_proto_msgTypes[11]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use FindBrokerRequest.ProtoReflect.Descriptor instead.
+func (*FindBrokerRequest) Descriptor() ([]byte, []int) {
+ return file_messaging_proto_rawDescGZIP(), []int{11}
+}
+
+func (x *FindBrokerRequest) GetNamespace() string {
+ if x != nil {
+ return x.Namespace
+ }
+ return ""
+}
+
+func (x *FindBrokerRequest) GetTopic() string {
+ if x != nil {
+ return x.Topic
+ }
+ return ""
+}
+
+func (x *FindBrokerRequest) GetParition() int32 {
+ if x != nil {
+ return x.Parition
+ }
+ return 0
+}
+
+type FindBrokerResponse struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ Broker string `protobuf:"bytes,1,opt,name=broker,proto3" json:"broker,omitempty"`
+}
+
+func (x *FindBrokerResponse) Reset() {
+ *x = FindBrokerResponse{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_messaging_proto_msgTypes[12]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *FindBrokerResponse) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*FindBrokerResponse) ProtoMessage() {}
+
+func (x *FindBrokerResponse) ProtoReflect() protoreflect.Message {
+ mi := &file_messaging_proto_msgTypes[12]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use FindBrokerResponse.ProtoReflect.Descriptor instead.
+func (*FindBrokerResponse) Descriptor() ([]byte, []int) {
+ return file_messaging_proto_rawDescGZIP(), []int{12}
+}
+
+func (x *FindBrokerResponse) GetBroker() string {
+ if x != nil {
+ return x.Broker
+ }
+ return ""
+}
+
+type TopicConfiguration struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ PartitionCount int32 `protobuf:"varint,1,opt,name=partition_count,json=partitionCount,proto3" json:"partition_count,omitempty"`
+ Collection string `protobuf:"bytes,2,opt,name=collection,proto3" json:"collection,omitempty"`
+ Replication string `protobuf:"bytes,3,opt,name=replication,proto3" json:"replication,omitempty"`
+ IsTransient bool `protobuf:"varint,4,opt,name=is_transient,json=isTransient,proto3" json:"is_transient,omitempty"`
+ Partitoning TopicConfiguration_Partitioning `protobuf:"varint,5,opt,name=partitoning,proto3,enum=messaging_pb.TopicConfiguration_Partitioning" json:"partitoning,omitempty"`
+}
+
+func (x *TopicConfiguration) Reset() {
+ *x = TopicConfiguration{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_messaging_proto_msgTypes[13]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *TopicConfiguration) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*TopicConfiguration) ProtoMessage() {}
+
+func (x *TopicConfiguration) ProtoReflect() protoreflect.Message {
+ mi := &file_messaging_proto_msgTypes[13]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use TopicConfiguration.ProtoReflect.Descriptor instead.
+func (*TopicConfiguration) Descriptor() ([]byte, []int) {
+ return file_messaging_proto_rawDescGZIP(), []int{13}
+}
+
+func (x *TopicConfiguration) GetPartitionCount() int32 {
+ if x != nil {
+ return x.PartitionCount
+ }
+ return 0
+}
+
+func (x *TopicConfiguration) GetCollection() string {
+ if x != nil {
+ return x.Collection
+ }
+ return ""
+}
+
+func (x *TopicConfiguration) GetReplication() string {
+ if x != nil {
+ return x.Replication
+ }
+ return ""
+}
+
+func (x *TopicConfiguration) GetIsTransient() bool {
+ if x != nil {
+ return x.IsTransient
+ }
+ return false
+}
+
+func (x *TopicConfiguration) GetPartitoning() TopicConfiguration_Partitioning {
+ if x != nil {
+ return x.Partitoning
+ }
+ return TopicConfiguration_NonNullKeyHash
+}
+
+type SubscriberMessage_InitMessage struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ Namespace string `protobuf:"bytes,1,opt,name=namespace,proto3" json:"namespace,omitempty"`
+ Topic string `protobuf:"bytes,2,opt,name=topic,proto3" json:"topic,omitempty"`
+ Partition int32 `protobuf:"varint,3,opt,name=partition,proto3" json:"partition,omitempty"`
+ StartPosition SubscriberMessage_InitMessage_StartPosition `protobuf:"varint,4,opt,name=startPosition,proto3,enum=messaging_pb.SubscriberMessage_InitMessage_StartPosition" json:"startPosition,omitempty"` // Where to begin consuming from
+ TimestampNs int64 `protobuf:"varint,5,opt,name=timestampNs,proto3" json:"timestampNs,omitempty"` // timestamp in nano seconds
+ SubscriberId string `protobuf:"bytes,6,opt,name=subscriber_id,json=subscriberId,proto3" json:"subscriber_id,omitempty"` // uniquely identify a subscriber to track consumption
+}
+
+func (x *SubscriberMessage_InitMessage) Reset() {
+ *x = SubscriberMessage_InitMessage{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_messaging_proto_msgTypes[14]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *SubscriberMessage_InitMessage) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*SubscriberMessage_InitMessage) ProtoMessage() {}
+
+func (x *SubscriberMessage_InitMessage) ProtoReflect() protoreflect.Message {
+ mi := &file_messaging_proto_msgTypes[14]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use SubscriberMessage_InitMessage.ProtoReflect.Descriptor instead.
+func (*SubscriberMessage_InitMessage) Descriptor() ([]byte, []int) {
+ return file_messaging_proto_rawDescGZIP(), []int{0, 0}
+}
+
+func (x *SubscriberMessage_InitMessage) GetNamespace() string {
+ if x != nil {
+ return x.Namespace
+ }
+ return ""
+}
+
+func (x *SubscriberMessage_InitMessage) GetTopic() string {
+ if x != nil {
+ return x.Topic
+ }
+ return ""
+}
+
+func (x *SubscriberMessage_InitMessage) GetPartition() int32 {
+ if x != nil {
+ return x.Partition
+ }
+ return 0
+}
+
+func (x *SubscriberMessage_InitMessage) GetStartPosition() SubscriberMessage_InitMessage_StartPosition {
+ if x != nil {
+ return x.StartPosition
+ }
+ return SubscriberMessage_InitMessage_LATEST
+}
+
+func (x *SubscriberMessage_InitMessage) GetTimestampNs() int64 {
+ if x != nil {
+ return x.TimestampNs
+ }
+ return 0
+}
+
+func (x *SubscriberMessage_InitMessage) GetSubscriberId() string {
+ if x != nil {
+ return x.SubscriberId
+ }
+ return ""
+}
+
+type SubscriberMessage_AckMessage struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ MessageId int64 `protobuf:"varint,1,opt,name=message_id,json=messageId,proto3" json:"message_id,omitempty"`
+}
+
+func (x *SubscriberMessage_AckMessage) Reset() {
+ *x = SubscriberMessage_AckMessage{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_messaging_proto_msgTypes[15]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *SubscriberMessage_AckMessage) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*SubscriberMessage_AckMessage) ProtoMessage() {}
+
+func (x *SubscriberMessage_AckMessage) ProtoReflect() protoreflect.Message {
+ mi := &file_messaging_proto_msgTypes[15]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use SubscriberMessage_AckMessage.ProtoReflect.Descriptor instead.
+func (*SubscriberMessage_AckMessage) Descriptor() ([]byte, []int) {
+ return file_messaging_proto_rawDescGZIP(), []int{0, 1}
+}
+
+func (x *SubscriberMessage_AckMessage) GetMessageId() int64 {
+ if x != nil {
+ return x.MessageId
+ }
+ return 0
+}
+
+type PublishRequest_InitMessage struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ Namespace string `protobuf:"bytes,1,opt,name=namespace,proto3" json:"namespace,omitempty"` // only needed on the initial request
+ Topic string `protobuf:"bytes,2,opt,name=topic,proto3" json:"topic,omitempty"` // only needed on the initial request
+ Partition int32 `protobuf:"varint,3,opt,name=partition,proto3" json:"partition,omitempty"`
+}
+
+func (x *PublishRequest_InitMessage) Reset() {
+ *x = PublishRequest_InitMessage{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_messaging_proto_msgTypes[17]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *PublishRequest_InitMessage) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*PublishRequest_InitMessage) ProtoMessage() {}
+
+func (x *PublishRequest_InitMessage) ProtoReflect() protoreflect.Message {
+ mi := &file_messaging_proto_msgTypes[17]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use PublishRequest_InitMessage.ProtoReflect.Descriptor instead.
+func (*PublishRequest_InitMessage) Descriptor() ([]byte, []int) {
+ return file_messaging_proto_rawDescGZIP(), []int{3, 0}
+}
+
+func (x *PublishRequest_InitMessage) GetNamespace() string {
+ if x != nil {
+ return x.Namespace
+ }
+ return ""
+}
+
+func (x *PublishRequest_InitMessage) GetTopic() string {
+ if x != nil {
+ return x.Topic
+ }
+ return ""
+}
+
+func (x *PublishRequest_InitMessage) GetPartition() int32 {
+ if x != nil {
+ return x.Partition
+ }
+ return 0
+}
+
+type PublishResponse_ConfigMessage struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ PartitionCount int32 `protobuf:"varint,1,opt,name=partition_count,json=partitionCount,proto3" json:"partition_count,omitempty"`
+}
+
+func (x *PublishResponse_ConfigMessage) Reset() {
+ *x = PublishResponse_ConfigMessage{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_messaging_proto_msgTypes[18]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *PublishResponse_ConfigMessage) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*PublishResponse_ConfigMessage) ProtoMessage() {}
+
+func (x *PublishResponse_ConfigMessage) ProtoReflect() protoreflect.Message {
+ mi := &file_messaging_proto_msgTypes[18]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use PublishResponse_ConfigMessage.ProtoReflect.Descriptor instead.
+func (*PublishResponse_ConfigMessage) Descriptor() ([]byte, []int) {
+ return file_messaging_proto_rawDescGZIP(), []int{4, 0}
+}
+
+func (x *PublishResponse_ConfigMessage) GetPartitionCount() int32 {
+ if x != nil {
+ return x.PartitionCount
+ }
+ return 0
+}
+
+type PublishResponse_RedirectMessage struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ NewBroker string `protobuf:"bytes,1,opt,name=new_broker,json=newBroker,proto3" json:"new_broker,omitempty"`
+}
+
+func (x *PublishResponse_RedirectMessage) Reset() {
+ *x = PublishResponse_RedirectMessage{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_messaging_proto_msgTypes[19]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *PublishResponse_RedirectMessage) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*PublishResponse_RedirectMessage) ProtoMessage() {}
+
+func (x *PublishResponse_RedirectMessage) ProtoReflect() protoreflect.Message {
+ mi := &file_messaging_proto_msgTypes[19]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use PublishResponse_RedirectMessage.ProtoReflect.Descriptor instead.
+func (*PublishResponse_RedirectMessage) Descriptor() ([]byte, []int) {
+ return file_messaging_proto_rawDescGZIP(), []int{4, 1}
+}
+
+func (x *PublishResponse_RedirectMessage) GetNewBroker() string {
+ if x != nil {
+ return x.NewBroker
+ }
+ return ""
+}
+
+var File_messaging_proto protoreflect.FileDescriptor
+
+var file_messaging_proto_rawDesc = []byte{
+ 0x0a, 0x0f, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x69, 0x6e, 0x67, 0x2e, 0x70, 0x72, 0x6f, 0x74,
+ 0x6f, 0x12, 0x0c, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x69, 0x6e, 0x67, 0x5f, 0x70, 0x62, 0x22,
+ 0x9e, 0x04, 0x0a, 0x11, 0x53, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x62, 0x65, 0x72, 0x4d, 0x65,
+ 0x73, 0x73, 0x61, 0x67, 0x65, 0x12, 0x3f, 0x0a, 0x04, 0x69, 0x6e, 0x69, 0x74, 0x18, 0x01, 0x20,
+ 0x01, 0x28, 0x0b, 0x32, 0x2b, 0x2e, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x69, 0x6e, 0x67, 0x5f,
+ 0x70, 0x62, 0x2e, 0x53, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x62, 0x65, 0x72, 0x4d, 0x65, 0x73,
+ 0x73, 0x61, 0x67, 0x65, 0x2e, 0x49, 0x6e, 0x69, 0x74, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65,
+ 0x52, 0x04, 0x69, 0x6e, 0x69, 0x74, 0x12, 0x3c, 0x0a, 0x03, 0x61, 0x63, 0x6b, 0x18, 0x02, 0x20,
+ 0x01, 0x28, 0x0b, 0x32, 0x2a, 0x2e, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x69, 0x6e, 0x67, 0x5f,
+ 0x70, 0x62, 0x2e, 0x53, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x62, 0x65, 0x72, 0x4d, 0x65, 0x73,
+ 0x73, 0x61, 0x67, 0x65, 0x2e, 0x41, 0x63, 0x6b, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x52,
+ 0x03, 0x61, 0x63, 0x6b, 0x12, 0x19, 0x0a, 0x08, 0x69, 0x73, 0x5f, 0x63, 0x6c, 0x6f, 0x73, 0x65,
+ 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x52, 0x07, 0x69, 0x73, 0x43, 0x6c, 0x6f, 0x73, 0x65, 0x1a,
+ 0xc1, 0x02, 0x0a, 0x0b, 0x49, 0x6e, 0x69, 0x74, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x12,
+ 0x1c, 0x0a, 0x09, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01,
+ 0x28, 0x09, 0x52, 0x09, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x14, 0x0a,
+ 0x05, 0x74, 0x6f, 0x70, 0x69, 0x63, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x74, 0x6f,
+ 0x70, 0x69, 0x63, 0x12, 0x1c, 0x0a, 0x09, 0x70, 0x61, 0x72, 0x74, 0x69, 0x74, 0x69, 0x6f, 0x6e,
+ 0x18, 0x03, 0x20, 0x01, 0x28, 0x05, 0x52, 0x09, 0x70, 0x61, 0x72, 0x74, 0x69, 0x74, 0x69, 0x6f,
+ 0x6e, 0x12, 0x5f, 0x0a, 0x0d, 0x73, 0x74, 0x61, 0x72, 0x74, 0x50, 0x6f, 0x73, 0x69, 0x74, 0x69,
+ 0x6f, 0x6e, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x39, 0x2e, 0x6d, 0x65, 0x73, 0x73, 0x61,
+ 0x67, 0x69, 0x6e, 0x67, 0x5f, 0x70, 0x62, 0x2e, 0x53, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x62,
+ 0x65, 0x72, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x2e, 0x49, 0x6e, 0x69, 0x74, 0x4d, 0x65,
+ 0x73, 0x73, 0x61, 0x67, 0x65, 0x2e, 0x53, 0x74, 0x61, 0x72, 0x74, 0x50, 0x6f, 0x73, 0x69, 0x74,
+ 0x69, 0x6f, 0x6e, 0x52, 0x0d, 0x73, 0x74, 0x61, 0x72, 0x74, 0x50, 0x6f, 0x73, 0x69, 0x74, 0x69,
+ 0x6f, 0x6e, 0x12, 0x20, 0x0a, 0x0b, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x4e,
+ 0x73, 0x18, 0x05, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0b, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61,
+ 0x6d, 0x70, 0x4e, 0x73, 0x12, 0x23, 0x0a, 0x0d, 0x73, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x62,
+ 0x65, 0x72, 0x5f, 0x69, 0x64, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x73, 0x75, 0x62,
+ 0x73, 0x63, 0x72, 0x69, 0x62, 0x65, 0x72, 0x49, 0x64, 0x22, 0x38, 0x0a, 0x0d, 0x53, 0x74, 0x61,
+ 0x72, 0x74, 0x50, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x0a, 0x0a, 0x06, 0x4c, 0x41,
+ 0x54, 0x45, 0x53, 0x54, 0x10, 0x00, 0x12, 0x0c, 0x0a, 0x08, 0x45, 0x41, 0x52, 0x4c, 0x49, 0x45,
+ 0x53, 0x54, 0x10, 0x01, 0x12, 0x0d, 0x0a, 0x09, 0x54, 0x49, 0x4d, 0x45, 0x53, 0x54, 0x41, 0x4d,
+ 0x50, 0x10, 0x02, 0x1a, 0x2b, 0x0a, 0x0a, 0x41, 0x63, 0x6b, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67,
+ 0x65, 0x12, 0x1d, 0x0a, 0x0a, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x5f, 0x69, 0x64, 0x18,
+ 0x01, 0x20, 0x01, 0x28, 0x03, 0x52, 0x09, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x49, 0x64,
+ 0x22, 0xee, 0x01, 0x0a, 0x07, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x12, 0x26, 0x0a, 0x0d,
+ 0x65, 0x76, 0x65, 0x6e, 0x74, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x5f, 0x6e, 0x73, 0x18, 0x01, 0x20,
+ 0x01, 0x28, 0x03, 0x42, 0x02, 0x30, 0x01, 0x52, 0x0b, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x54, 0x69,
+ 0x6d, 0x65, 0x4e, 0x73, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x02, 0x20, 0x01, 0x28,
+ 0x0c, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18,
+ 0x03, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x3c, 0x0a, 0x07,
+ 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x73, 0x18, 0x04, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x22, 0x2e,
+ 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x69, 0x6e, 0x67, 0x5f, 0x70, 0x62, 0x2e, 0x4d, 0x65, 0x73,
+ 0x73, 0x61, 0x67, 0x65, 0x2e, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x73, 0x45, 0x6e, 0x74, 0x72,
+ 0x79, 0x52, 0x07, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x73, 0x12, 0x19, 0x0a, 0x08, 0x69, 0x73,
+ 0x5f, 0x63, 0x6c, 0x6f, 0x73, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x08, 0x52, 0x07, 0x69, 0x73,
+ 0x43, 0x6c, 0x6f, 0x73, 0x65, 0x1a, 0x3a, 0x0a, 0x0c, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x73,
+ 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01,
+ 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65,
+ 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38,
+ 0x01, 0x22, 0x3a, 0x0a, 0x0d, 0x42, 0x72, 0x6f, 0x6b, 0x65, 0x72, 0x4d, 0x65, 0x73, 0x73, 0x61,
+ 0x67, 0x65, 0x12, 0x29, 0x0a, 0x04, 0x64, 0x61, 0x74, 0x61, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b,
+ 0x32, 0x15, 0x2e, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x69, 0x6e, 0x67, 0x5f, 0x70, 0x62, 0x2e,
+ 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x52, 0x04, 0x64, 0x61, 0x74, 0x61, 0x22, 0xda, 0x01,
+ 0x0a, 0x0e, 0x50, 0x75, 0x62, 0x6c, 0x69, 0x73, 0x68, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74,
+ 0x12, 0x3c, 0x0a, 0x04, 0x69, 0x6e, 0x69, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x28,
+ 0x2e, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x69, 0x6e, 0x67, 0x5f, 0x70, 0x62, 0x2e, 0x50, 0x75,
+ 0x62, 0x6c, 0x69, 0x73, 0x68, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x2e, 0x49, 0x6e, 0x69,
+ 0x74, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x52, 0x04, 0x69, 0x6e, 0x69, 0x74, 0x12, 0x29,
+ 0x0a, 0x04, 0x64, 0x61, 0x74, 0x61, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x6d,
+ 0x65, 0x73, 0x73, 0x61, 0x67, 0x69, 0x6e, 0x67, 0x5f, 0x70, 0x62, 0x2e, 0x4d, 0x65, 0x73, 0x73,
+ 0x61, 0x67, 0x65, 0x52, 0x04, 0x64, 0x61, 0x74, 0x61, 0x1a, 0x5f, 0x0a, 0x0b, 0x49, 0x6e, 0x69,
+ 0x74, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x12, 0x1c, 0x0a, 0x09, 0x6e, 0x61, 0x6d, 0x65,
+ 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x6e, 0x61, 0x6d,
+ 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x74, 0x6f, 0x70, 0x69, 0x63, 0x18,
+ 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x74, 0x6f, 0x70, 0x69, 0x63, 0x12, 0x1c, 0x0a, 0x09,
+ 0x70, 0x61, 0x72, 0x74, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x05, 0x52,
+ 0x09, 0x70, 0x61, 0x72, 0x74, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0xaa, 0x02, 0x0a, 0x0f, 0x50,
+ 0x75, 0x62, 0x6c, 0x69, 0x73, 0x68, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x43,
+ 0x0a, 0x06, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2b,
+ 0x2e, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x69, 0x6e, 0x67, 0x5f, 0x70, 0x62, 0x2e, 0x50, 0x75,
+ 0x62, 0x6c, 0x69, 0x73, 0x68, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x43, 0x6f,
+ 0x6e, 0x66, 0x69, 0x67, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x52, 0x06, 0x63, 0x6f, 0x6e,
+ 0x66, 0x69, 0x67, 0x12, 0x49, 0x0a, 0x08, 0x72, 0x65, 0x64, 0x69, 0x72, 0x65, 0x63, 0x74, 0x18,
+ 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2d, 0x2e, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x69, 0x6e,
+ 0x67, 0x5f, 0x70, 0x62, 0x2e, 0x50, 0x75, 0x62, 0x6c, 0x69, 0x73, 0x68, 0x52, 0x65, 0x73, 0x70,
+ 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x52, 0x65, 0x64, 0x69, 0x72, 0x65, 0x63, 0x74, 0x4d, 0x65, 0x73,
+ 0x73, 0x61, 0x67, 0x65, 0x52, 0x08, 0x72, 0x65, 0x64, 0x69, 0x72, 0x65, 0x63, 0x74, 0x12, 0x1b,
+ 0x0a, 0x09, 0x69, 0x73, 0x5f, 0x63, 0x6c, 0x6f, 0x73, 0x65, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28,
+ 0x08, 0x52, 0x08, 0x69, 0x73, 0x43, 0x6c, 0x6f, 0x73, 0x65, 0x64, 0x1a, 0x38, 0x0a, 0x0d, 0x43,
+ 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x12, 0x27, 0x0a, 0x0f,
+ 0x70, 0x61, 0x72, 0x74, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18,
+ 0x01, 0x20, 0x01, 0x28, 0x05, 0x52, 0x0e, 0x70, 0x61, 0x72, 0x74, 0x69, 0x74, 0x69, 0x6f, 0x6e,
+ 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x1a, 0x30, 0x0a, 0x0f, 0x52, 0x65, 0x64, 0x69, 0x72, 0x65, 0x63,
+ 0x74, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x12, 0x1d, 0x0a, 0x0a, 0x6e, 0x65, 0x77, 0x5f,
+ 0x62, 0x72, 0x6f, 0x6b, 0x65, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x6e, 0x65,
+ 0x77, 0x42, 0x72, 0x6f, 0x6b, 0x65, 0x72, 0x22, 0x48, 0x0a, 0x12, 0x44, 0x65, 0x6c, 0x65, 0x74,
+ 0x65, 0x54, 0x6f, 0x70, 0x69, 0x63, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1c, 0x0a,
+ 0x09, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09,
+ 0x52, 0x09, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x74,
+ 0x6f, 0x70, 0x69, 0x63, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x74, 0x6f, 0x70, 0x69,
+ 0x63, 0x22, 0x15, 0x0a, 0x13, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x54, 0x6f, 0x70, 0x69, 0x63,
+ 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x93, 0x01, 0x0a, 0x15, 0x43, 0x6f, 0x6e,
+ 0x66, 0x69, 0x67, 0x75, 0x72, 0x65, 0x54, 0x6f, 0x70, 0x69, 0x63, 0x52, 0x65, 0x71, 0x75, 0x65,
+ 0x73, 0x74, 0x12, 0x1c, 0x0a, 0x09, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18,
+ 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65,
+ 0x12, 0x14, 0x0a, 0x05, 0x74, 0x6f, 0x70, 0x69, 0x63, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52,
+ 0x05, 0x74, 0x6f, 0x70, 0x69, 0x63, 0x12, 0x46, 0x0a, 0x0d, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67,
+ 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x20, 0x2e,
+ 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x69, 0x6e, 0x67, 0x5f, 0x70, 0x62, 0x2e, 0x54, 0x6f, 0x70,
+ 0x69, 0x63, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52,
+ 0x0d, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0x18,
+ 0x0a, 0x16, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x65, 0x54, 0x6f, 0x70, 0x69, 0x63,
+ 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x52, 0x0a, 0x1c, 0x47, 0x65, 0x74, 0x54,
+ 0x6f, 0x70, 0x69, 0x63, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f,
+ 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1c, 0x0a, 0x09, 0x6e, 0x61, 0x6d, 0x65,
+ 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x6e, 0x61, 0x6d,
+ 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x74, 0x6f, 0x70, 0x69, 0x63, 0x18,
+ 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x74, 0x6f, 0x70, 0x69, 0x63, 0x22, 0x67, 0x0a, 0x1d,
+ 0x47, 0x65, 0x74, 0x54, 0x6f, 0x70, 0x69, 0x63, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72,
+ 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x46, 0x0a,
+ 0x0d, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x01,
+ 0x20, 0x01, 0x28, 0x0b, 0x32, 0x20, 0x2e, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x69, 0x6e, 0x67,
+ 0x5f, 0x70, 0x62, 0x2e, 0x54, 0x6f, 0x70, 0x69, 0x63, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75,
+ 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0d, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72,
+ 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0x63, 0x0a, 0x11, 0x46, 0x69, 0x6e, 0x64, 0x42, 0x72, 0x6f,
+ 0x6b, 0x65, 0x72, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1c, 0x0a, 0x09, 0x6e, 0x61,
+ 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x6e,
+ 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x74, 0x6f, 0x70, 0x69,
+ 0x63, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x74, 0x6f, 0x70, 0x69, 0x63, 0x12, 0x1a,
+ 0x0a, 0x08, 0x70, 0x61, 0x72, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x05,
+ 0x52, 0x08, 0x70, 0x61, 0x72, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0x2c, 0x0a, 0x12, 0x46, 0x69,
+ 0x6e, 0x64, 0x42, 0x72, 0x6f, 0x6b, 0x65, 0x72, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65,
+ 0x12, 0x16, 0x0a, 0x06, 0x62, 0x72, 0x6f, 0x6b, 0x65, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09,
+ 0x52, 0x06, 0x62, 0x72, 0x6f, 0x6b, 0x65, 0x72, 0x22, 0xb4, 0x02, 0x0a, 0x12, 0x54, 0x6f, 0x70,
+ 0x69, 0x63, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12,
+ 0x27, 0x0a, 0x0f, 0x70, 0x61, 0x72, 0x74, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x63, 0x6f, 0x75,
+ 0x6e, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x05, 0x52, 0x0e, 0x70, 0x61, 0x72, 0x74, 0x69, 0x74,
+ 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x12, 0x1e, 0x0a, 0x0a, 0x63, 0x6f, 0x6c, 0x6c,
+ 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x63, 0x6f,
+ 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x20, 0x0a, 0x0b, 0x72, 0x65, 0x70, 0x6c,
+ 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x72,
+ 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x21, 0x0a, 0x0c, 0x69, 0x73,
+ 0x5f, 0x74, 0x72, 0x61, 0x6e, 0x73, 0x69, 0x65, 0x6e, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x08,
+ 0x52, 0x0b, 0x69, 0x73, 0x54, 0x72, 0x61, 0x6e, 0x73, 0x69, 0x65, 0x6e, 0x74, 0x12, 0x4f, 0x0a,
+ 0x0b, 0x70, 0x61, 0x72, 0x74, 0x69, 0x74, 0x6f, 0x6e, 0x69, 0x6e, 0x67, 0x18, 0x05, 0x20, 0x01,
+ 0x28, 0x0e, 0x32, 0x2d, 0x2e, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x69, 0x6e, 0x67, 0x5f, 0x70,
+ 0x62, 0x2e, 0x54, 0x6f, 0x70, 0x69, 0x63, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x61,
+ 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x50, 0x61, 0x72, 0x74, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x69, 0x6e,
+ 0x67, 0x52, 0x0b, 0x70, 0x61, 0x72, 0x74, 0x69, 0x74, 0x6f, 0x6e, 0x69, 0x6e, 0x67, 0x22, 0x3f,
+ 0x0a, 0x0c, 0x50, 0x61, 0x72, 0x74, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x69, 0x6e, 0x67, 0x12, 0x12,
+ 0x0a, 0x0e, 0x4e, 0x6f, 0x6e, 0x4e, 0x75, 0x6c, 0x6c, 0x4b, 0x65, 0x79, 0x48, 0x61, 0x73, 0x68,
+ 0x10, 0x00, 0x12, 0x0b, 0x0a, 0x07, 0x4b, 0x65, 0x79, 0x48, 0x61, 0x73, 0x68, 0x10, 0x01, 0x12,
+ 0x0e, 0x0a, 0x0a, 0x52, 0x6f, 0x75, 0x6e, 0x64, 0x52, 0x6f, 0x62, 0x69, 0x6e, 0x10, 0x02, 0x32,
+ 0xad, 0x04, 0x0a, 0x10, 0x53, 0x65, 0x61, 0x77, 0x65, 0x65, 0x64, 0x4d, 0x65, 0x73, 0x73, 0x61,
+ 0x67, 0x69, 0x6e, 0x67, 0x12, 0x4f, 0x0a, 0x09, 0x53, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x62,
+ 0x65, 0x12, 0x1f, 0x2e, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x69, 0x6e, 0x67, 0x5f, 0x70, 0x62,
+ 0x2e, 0x53, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x62, 0x65, 0x72, 0x4d, 0x65, 0x73, 0x73, 0x61,
+ 0x67, 0x65, 0x1a, 0x1b, 0x2e, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x69, 0x6e, 0x67, 0x5f, 0x70,
+ 0x62, 0x2e, 0x42, 0x72, 0x6f, 0x6b, 0x65, 0x72, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x22,
+ 0x00, 0x28, 0x01, 0x30, 0x01, 0x12, 0x4c, 0x0a, 0x07, 0x50, 0x75, 0x62, 0x6c, 0x69, 0x73, 0x68,
+ 0x12, 0x1c, 0x2e, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x69, 0x6e, 0x67, 0x5f, 0x70, 0x62, 0x2e,
+ 0x50, 0x75, 0x62, 0x6c, 0x69, 0x73, 0x68, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1d,
+ 0x2e, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x69, 0x6e, 0x67, 0x5f, 0x70, 0x62, 0x2e, 0x50, 0x75,
+ 0x62, 0x6c, 0x69, 0x73, 0x68, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x28,
+ 0x01, 0x30, 0x01, 0x12, 0x54, 0x0a, 0x0b, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x54, 0x6f, 0x70,
+ 0x69, 0x63, 0x12, 0x20, 0x2e, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x69, 0x6e, 0x67, 0x5f, 0x70,
+ 0x62, 0x2e, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x54, 0x6f, 0x70, 0x69, 0x63, 0x52, 0x65, 0x71,
+ 0x75, 0x65, 0x73, 0x74, 0x1a, 0x21, 0x2e, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x69, 0x6e, 0x67,
+ 0x5f, 0x70, 0x62, 0x2e, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x54, 0x6f, 0x70, 0x69, 0x63, 0x52,
+ 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x5d, 0x0a, 0x0e, 0x43, 0x6f, 0x6e,
+ 0x66, 0x69, 0x67, 0x75, 0x72, 0x65, 0x54, 0x6f, 0x70, 0x69, 0x63, 0x12, 0x23, 0x2e, 0x6d, 0x65,
+ 0x73, 0x73, 0x61, 0x67, 0x69, 0x6e, 0x67, 0x5f, 0x70, 0x62, 0x2e, 0x43, 0x6f, 0x6e, 0x66, 0x69,
+ 0x67, 0x75, 0x72, 0x65, 0x54, 0x6f, 0x70, 0x69, 0x63, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74,
+ 0x1a, 0x24, 0x2e, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x69, 0x6e, 0x67, 0x5f, 0x70, 0x62, 0x2e,
+ 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x65, 0x54, 0x6f, 0x70, 0x69, 0x63, 0x52, 0x65,
+ 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x72, 0x0a, 0x15, 0x47, 0x65, 0x74, 0x54,
+ 0x6f, 0x70, 0x69, 0x63, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f,
+ 0x6e, 0x12, 0x2a, 0x2e, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x69, 0x6e, 0x67, 0x5f, 0x70, 0x62,
+ 0x2e, 0x47, 0x65, 0x74, 0x54, 0x6f, 0x70, 0x69, 0x63, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75,
+ 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2b, 0x2e,
+ 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x69, 0x6e, 0x67, 0x5f, 0x70, 0x62, 0x2e, 0x47, 0x65, 0x74,
+ 0x54, 0x6f, 0x70, 0x69, 0x63, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, 0x69,
+ 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x51, 0x0a, 0x0a,
+ 0x46, 0x69, 0x6e, 0x64, 0x42, 0x72, 0x6f, 0x6b, 0x65, 0x72, 0x12, 0x1f, 0x2e, 0x6d, 0x65, 0x73,
+ 0x73, 0x61, 0x67, 0x69, 0x6e, 0x67, 0x5f, 0x70, 0x62, 0x2e, 0x46, 0x69, 0x6e, 0x64, 0x42, 0x72,
+ 0x6f, 0x6b, 0x65, 0x72, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x20, 0x2e, 0x6d, 0x65,
+ 0x73, 0x73, 0x61, 0x67, 0x69, 0x6e, 0x67, 0x5f, 0x70, 0x62, 0x2e, 0x46, 0x69, 0x6e, 0x64, 0x42,
+ 0x72, 0x6f, 0x6b, 0x65, 0x72, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x42,
+ 0x57, 0x0a, 0x10, 0x73, 0x65, 0x61, 0x77, 0x65, 0x65, 0x64, 0x66, 0x73, 0x2e, 0x63, 0x6c, 0x69,
+ 0x65, 0x6e, 0x74, 0x42, 0x0e, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x69, 0x6e, 0x67, 0x50, 0x72,
+ 0x6f, 0x74, 0x6f, 0x5a, 0x33, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f,
+ 0x63, 0x68, 0x72, 0x69, 0x73, 0x6c, 0x75, 0x73, 0x66, 0x2f, 0x73, 0x65, 0x61, 0x77, 0x65, 0x65,
+ 0x64, 0x66, 0x73, 0x2f, 0x77, 0x65, 0x65, 0x64, 0x2f, 0x70, 0x62, 0x2f, 0x6d, 0x65, 0x73, 0x73,
+ 0x61, 0x67, 0x69, 0x6e, 0x67, 0x5f, 0x70, 0x62, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
+}
+
+var (
+ file_messaging_proto_rawDescOnce sync.Once
+ file_messaging_proto_rawDescData = file_messaging_proto_rawDesc
+)
+
+func file_messaging_proto_rawDescGZIP() []byte {
+ file_messaging_proto_rawDescOnce.Do(func() {
+ file_messaging_proto_rawDescData = protoimpl.X.CompressGZIP(file_messaging_proto_rawDescData)
+ })
+ return file_messaging_proto_rawDescData
+}
+
+var file_messaging_proto_enumTypes = make([]protoimpl.EnumInfo, 2)
+var file_messaging_proto_msgTypes = make([]protoimpl.MessageInfo, 20)
+var file_messaging_proto_goTypes = []interface{}{
+ (SubscriberMessage_InitMessage_StartPosition)(0), // 0: messaging_pb.SubscriberMessage.InitMessage.StartPosition
+ (TopicConfiguration_Partitioning)(0), // 1: messaging_pb.TopicConfiguration.Partitioning
+ (*SubscriberMessage)(nil), // 2: messaging_pb.SubscriberMessage
+ (*Message)(nil), // 3: messaging_pb.Message
+ (*BrokerMessage)(nil), // 4: messaging_pb.BrokerMessage
+ (*PublishRequest)(nil), // 5: messaging_pb.PublishRequest
+ (*PublishResponse)(nil), // 6: messaging_pb.PublishResponse
+ (*DeleteTopicRequest)(nil), // 7: messaging_pb.DeleteTopicRequest
+ (*DeleteTopicResponse)(nil), // 8: messaging_pb.DeleteTopicResponse
+ (*ConfigureTopicRequest)(nil), // 9: messaging_pb.ConfigureTopicRequest
+ (*ConfigureTopicResponse)(nil), // 10: messaging_pb.ConfigureTopicResponse
+ (*GetTopicConfigurationRequest)(nil), // 11: messaging_pb.GetTopicConfigurationRequest
+ (*GetTopicConfigurationResponse)(nil), // 12: messaging_pb.GetTopicConfigurationResponse
+ (*FindBrokerRequest)(nil), // 13: messaging_pb.FindBrokerRequest
+ (*FindBrokerResponse)(nil), // 14: messaging_pb.FindBrokerResponse
+ (*TopicConfiguration)(nil), // 15: messaging_pb.TopicConfiguration
+ (*SubscriberMessage_InitMessage)(nil), // 16: messaging_pb.SubscriberMessage.InitMessage
+ (*SubscriberMessage_AckMessage)(nil), // 17: messaging_pb.SubscriberMessage.AckMessage
+ nil, // 18: messaging_pb.Message.HeadersEntry
+ (*PublishRequest_InitMessage)(nil), // 19: messaging_pb.PublishRequest.InitMessage
+ (*PublishResponse_ConfigMessage)(nil), // 20: messaging_pb.PublishResponse.ConfigMessage
+ (*PublishResponse_RedirectMessage)(nil), // 21: messaging_pb.PublishResponse.RedirectMessage
+}
+var file_messaging_proto_depIdxs = []int32{
+ 16, // 0: messaging_pb.SubscriberMessage.init:type_name -> messaging_pb.SubscriberMessage.InitMessage
+ 17, // 1: messaging_pb.SubscriberMessage.ack:type_name -> messaging_pb.SubscriberMessage.AckMessage
+ 18, // 2: messaging_pb.Message.headers:type_name -> messaging_pb.Message.HeadersEntry
+ 3, // 3: messaging_pb.BrokerMessage.data:type_name -> messaging_pb.Message
+ 19, // 4: messaging_pb.PublishRequest.init:type_name -> messaging_pb.PublishRequest.InitMessage
+ 3, // 5: messaging_pb.PublishRequest.data:type_name -> messaging_pb.Message
+ 20, // 6: messaging_pb.PublishResponse.config:type_name -> messaging_pb.PublishResponse.ConfigMessage
+ 21, // 7: messaging_pb.PublishResponse.redirect:type_name -> messaging_pb.PublishResponse.RedirectMessage
+ 15, // 8: messaging_pb.ConfigureTopicRequest.configuration:type_name -> messaging_pb.TopicConfiguration
+ 15, // 9: messaging_pb.GetTopicConfigurationResponse.configuration:type_name -> messaging_pb.TopicConfiguration
+ 1, // 10: messaging_pb.TopicConfiguration.partitoning:type_name -> messaging_pb.TopicConfiguration.Partitioning
+ 0, // 11: messaging_pb.SubscriberMessage.InitMessage.startPosition:type_name -> messaging_pb.SubscriberMessage.InitMessage.StartPosition
+ 2, // 12: messaging_pb.SeaweedMessaging.Subscribe:input_type -> messaging_pb.SubscriberMessage
+ 5, // 13: messaging_pb.SeaweedMessaging.Publish:input_type -> messaging_pb.PublishRequest
+ 7, // 14: messaging_pb.SeaweedMessaging.DeleteTopic:input_type -> messaging_pb.DeleteTopicRequest
+ 9, // 15: messaging_pb.SeaweedMessaging.ConfigureTopic:input_type -> messaging_pb.ConfigureTopicRequest
+ 11, // 16: messaging_pb.SeaweedMessaging.GetTopicConfiguration:input_type -> messaging_pb.GetTopicConfigurationRequest
+ 13, // 17: messaging_pb.SeaweedMessaging.FindBroker:input_type -> messaging_pb.FindBrokerRequest
+ 4, // 18: messaging_pb.SeaweedMessaging.Subscribe:output_type -> messaging_pb.BrokerMessage
+ 6, // 19: messaging_pb.SeaweedMessaging.Publish:output_type -> messaging_pb.PublishResponse
+ 8, // 20: messaging_pb.SeaweedMessaging.DeleteTopic:output_type -> messaging_pb.DeleteTopicResponse
+ 10, // 21: messaging_pb.SeaweedMessaging.ConfigureTopic:output_type -> messaging_pb.ConfigureTopicResponse
+ 12, // 22: messaging_pb.SeaweedMessaging.GetTopicConfiguration:output_type -> messaging_pb.GetTopicConfigurationResponse
+ 14, // 23: messaging_pb.SeaweedMessaging.FindBroker:output_type -> messaging_pb.FindBrokerResponse
+ 18, // [18:24] is the sub-list for method output_type
+ 12, // [12:18] is the sub-list for method input_type
+ 12, // [12:12] is the sub-list for extension type_name
+ 12, // [12:12] is the sub-list for extension extendee
+ 0, // [0:12] is the sub-list for field type_name
+}
+
+func init() { file_messaging_proto_init() }
+func file_messaging_proto_init() {
+ if File_messaging_proto != nil {
+ return
+ }
+ if !protoimpl.UnsafeEnabled {
+ file_messaging_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*SubscriberMessage); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_messaging_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*Message); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_messaging_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*BrokerMessage); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_messaging_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*PublishRequest); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_messaging_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*PublishResponse); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_messaging_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*DeleteTopicRequest); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_messaging_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*DeleteTopicResponse); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_messaging_proto_msgTypes[7].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*ConfigureTopicRequest); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_messaging_proto_msgTypes[8].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*ConfigureTopicResponse); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_messaging_proto_msgTypes[9].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*GetTopicConfigurationRequest); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_messaging_proto_msgTypes[10].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*GetTopicConfigurationResponse); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_messaging_proto_msgTypes[11].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*FindBrokerRequest); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_messaging_proto_msgTypes[12].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*FindBrokerResponse); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_messaging_proto_msgTypes[13].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*TopicConfiguration); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_messaging_proto_msgTypes[14].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*SubscriberMessage_InitMessage); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_messaging_proto_msgTypes[15].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*SubscriberMessage_AckMessage); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_messaging_proto_msgTypes[17].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*PublishRequest_InitMessage); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_messaging_proto_msgTypes[18].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*PublishResponse_ConfigMessage); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_messaging_proto_msgTypes[19].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*PublishResponse_RedirectMessage); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ }
+ type x struct{}
+ out := protoimpl.TypeBuilder{
+ File: protoimpl.DescBuilder{
+ GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
+ RawDescriptor: file_messaging_proto_rawDesc,
+ NumEnums: 2,
+ NumMessages: 20,
+ NumExtensions: 0,
+ NumServices: 1,
+ },
+ GoTypes: file_messaging_proto_goTypes,
+ DependencyIndexes: file_messaging_proto_depIdxs,
+ EnumInfos: file_messaging_proto_enumTypes,
+ MessageInfos: file_messaging_proto_msgTypes,
+ }.Build()
+ File_messaging_proto = out.File
+ file_messaging_proto_rawDesc = nil
+ file_messaging_proto_goTypes = nil
+ file_messaging_proto_depIdxs = nil
+}
+
+// Reference imports to suppress errors if they are not otherwise used.
+var _ context.Context
+var _ grpc.ClientConnInterface
+
+// This is a compile-time assertion to ensure that this generated file
+// is compatible with the grpc package it is being compiled against.
+const _ = grpc.SupportPackageIsVersion6
+
+// SeaweedMessagingClient is the client API for SeaweedMessaging service.
+//
+// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream.
+type SeaweedMessagingClient interface {
+ Subscribe(ctx context.Context, opts ...grpc.CallOption) (SeaweedMessaging_SubscribeClient, error)
+ Publish(ctx context.Context, opts ...grpc.CallOption) (SeaweedMessaging_PublishClient, error)
+ DeleteTopic(ctx context.Context, in *DeleteTopicRequest, opts ...grpc.CallOption) (*DeleteTopicResponse, error)
+ ConfigureTopic(ctx context.Context, in *ConfigureTopicRequest, opts ...grpc.CallOption) (*ConfigureTopicResponse, error)
+ GetTopicConfiguration(ctx context.Context, in *GetTopicConfigurationRequest, opts ...grpc.CallOption) (*GetTopicConfigurationResponse, error)
+ FindBroker(ctx context.Context, in *FindBrokerRequest, opts ...grpc.CallOption) (*FindBrokerResponse, error)
+}
+
+type seaweedMessagingClient struct {
+ cc grpc.ClientConnInterface
+}
+
+func NewSeaweedMessagingClient(cc grpc.ClientConnInterface) SeaweedMessagingClient {
+ return &seaweedMessagingClient{cc}
+}
+
+func (c *seaweedMessagingClient) Subscribe(ctx context.Context, opts ...grpc.CallOption) (SeaweedMessaging_SubscribeClient, error) {
+ stream, err := c.cc.NewStream(ctx, &_SeaweedMessaging_serviceDesc.Streams[0], "/messaging_pb.SeaweedMessaging/Subscribe", opts...)
+ if err != nil {
+ return nil, err
+ }
+ x := &seaweedMessagingSubscribeClient{stream}
+ return x, nil
+}
+
+type SeaweedMessaging_SubscribeClient interface {
+ Send(*SubscriberMessage) error
+ Recv() (*BrokerMessage, error)
+ grpc.ClientStream
+}
+
+type seaweedMessagingSubscribeClient struct {
+ grpc.ClientStream
+}
+
+func (x *seaweedMessagingSubscribeClient) Send(m *SubscriberMessage) error {
+ return x.ClientStream.SendMsg(m)
+}
+
+func (x *seaweedMessagingSubscribeClient) Recv() (*BrokerMessage, error) {
+ m := new(BrokerMessage)
+ if err := x.ClientStream.RecvMsg(m); err != nil {
+ return nil, err
+ }
+ return m, nil
+}
+
+func (c *seaweedMessagingClient) Publish(ctx context.Context, opts ...grpc.CallOption) (SeaweedMessaging_PublishClient, error) {
+ stream, err := c.cc.NewStream(ctx, &_SeaweedMessaging_serviceDesc.Streams[1], "/messaging_pb.SeaweedMessaging/Publish", opts...)
+ if err != nil {
+ return nil, err
+ }
+ x := &seaweedMessagingPublishClient{stream}
+ return x, nil
+}
+
+type SeaweedMessaging_PublishClient interface {
+ Send(*PublishRequest) error
+ Recv() (*PublishResponse, error)
+ grpc.ClientStream
+}
+
+type seaweedMessagingPublishClient struct {
+ grpc.ClientStream
+}
+
+func (x *seaweedMessagingPublishClient) Send(m *PublishRequest) error {
+ return x.ClientStream.SendMsg(m)
+}
+
+func (x *seaweedMessagingPublishClient) Recv() (*PublishResponse, error) {
+ m := new(PublishResponse)
+ if err := x.ClientStream.RecvMsg(m); err != nil {
+ return nil, err
+ }
+ return m, nil
+}
+
+func (c *seaweedMessagingClient) DeleteTopic(ctx context.Context, in *DeleteTopicRequest, opts ...grpc.CallOption) (*DeleteTopicResponse, error) {
+ out := new(DeleteTopicResponse)
+ err := c.cc.Invoke(ctx, "/messaging_pb.SeaweedMessaging/DeleteTopic", in, out, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
+func (c *seaweedMessagingClient) ConfigureTopic(ctx context.Context, in *ConfigureTopicRequest, opts ...grpc.CallOption) (*ConfigureTopicResponse, error) {
+ out := new(ConfigureTopicResponse)
+ err := c.cc.Invoke(ctx, "/messaging_pb.SeaweedMessaging/ConfigureTopic", in, out, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
+func (c *seaweedMessagingClient) GetTopicConfiguration(ctx context.Context, in *GetTopicConfigurationRequest, opts ...grpc.CallOption) (*GetTopicConfigurationResponse, error) {
+ out := new(GetTopicConfigurationResponse)
+ err := c.cc.Invoke(ctx, "/messaging_pb.SeaweedMessaging/GetTopicConfiguration", in, out, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
+func (c *seaweedMessagingClient) FindBroker(ctx context.Context, in *FindBrokerRequest, opts ...grpc.CallOption) (*FindBrokerResponse, error) {
+ out := new(FindBrokerResponse)
+ err := c.cc.Invoke(ctx, "/messaging_pb.SeaweedMessaging/FindBroker", in, out, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
+// SeaweedMessagingServer is the server API for SeaweedMessaging service.
+type SeaweedMessagingServer interface {
+ Subscribe(SeaweedMessaging_SubscribeServer) error
+ Publish(SeaweedMessaging_PublishServer) error
+ DeleteTopic(context.Context, *DeleteTopicRequest) (*DeleteTopicResponse, error)
+ ConfigureTopic(context.Context, *ConfigureTopicRequest) (*ConfigureTopicResponse, error)
+ GetTopicConfiguration(context.Context, *GetTopicConfigurationRequest) (*GetTopicConfigurationResponse, error)
+ FindBroker(context.Context, *FindBrokerRequest) (*FindBrokerResponse, error)
+}
+
+// UnimplementedSeaweedMessagingServer can be embedded to have forward compatible implementations.
+type UnimplementedSeaweedMessagingServer struct {
+}
+
+func (*UnimplementedSeaweedMessagingServer) Subscribe(SeaweedMessaging_SubscribeServer) error {
+ return status.Errorf(codes.Unimplemented, "method Subscribe not implemented")
+}
+func (*UnimplementedSeaweedMessagingServer) Publish(SeaweedMessaging_PublishServer) error {
+ return status.Errorf(codes.Unimplemented, "method Publish not implemented")
+}
+func (*UnimplementedSeaweedMessagingServer) DeleteTopic(context.Context, *DeleteTopicRequest) (*DeleteTopicResponse, error) {
+ return nil, status.Errorf(codes.Unimplemented, "method DeleteTopic not implemented")
+}
+func (*UnimplementedSeaweedMessagingServer) ConfigureTopic(context.Context, *ConfigureTopicRequest) (*ConfigureTopicResponse, error) {
+ return nil, status.Errorf(codes.Unimplemented, "method ConfigureTopic not implemented")
+}
+func (*UnimplementedSeaweedMessagingServer) GetTopicConfiguration(context.Context, *GetTopicConfigurationRequest) (*GetTopicConfigurationResponse, error) {
+ return nil, status.Errorf(codes.Unimplemented, "method GetTopicConfiguration not implemented")
+}
+func (*UnimplementedSeaweedMessagingServer) FindBroker(context.Context, *FindBrokerRequest) (*FindBrokerResponse, error) {
+ return nil, status.Errorf(codes.Unimplemented, "method FindBroker not implemented")
+}
+
+func RegisterSeaweedMessagingServer(s *grpc.Server, srv SeaweedMessagingServer) {
+ s.RegisterService(&_SeaweedMessaging_serviceDesc, srv)
+}
+
+func _SeaweedMessaging_Subscribe_Handler(srv interface{}, stream grpc.ServerStream) error {
+ return srv.(SeaweedMessagingServer).Subscribe(&seaweedMessagingSubscribeServer{stream})
+}
+
+type SeaweedMessaging_SubscribeServer interface {
+ Send(*BrokerMessage) error
+ Recv() (*SubscriberMessage, error)
+ grpc.ServerStream
+}
+
+type seaweedMessagingSubscribeServer struct {
+ grpc.ServerStream
+}
+
+func (x *seaweedMessagingSubscribeServer) Send(m *BrokerMessage) error {
+ return x.ServerStream.SendMsg(m)
+}
+
+func (x *seaweedMessagingSubscribeServer) Recv() (*SubscriberMessage, error) {
+ m := new(SubscriberMessage)
+ if err := x.ServerStream.RecvMsg(m); err != nil {
+ return nil, err
+ }
+ return m, nil
+}
+
+func _SeaweedMessaging_Publish_Handler(srv interface{}, stream grpc.ServerStream) error {
+ return srv.(SeaweedMessagingServer).Publish(&seaweedMessagingPublishServer{stream})
+}
+
+type SeaweedMessaging_PublishServer interface {
+ Send(*PublishResponse) error
+ Recv() (*PublishRequest, error)
+ grpc.ServerStream
+}
+
+type seaweedMessagingPublishServer struct {
+ grpc.ServerStream
+}
+
+func (x *seaweedMessagingPublishServer) Send(m *PublishResponse) error {
+ return x.ServerStream.SendMsg(m)
+}
+
+func (x *seaweedMessagingPublishServer) Recv() (*PublishRequest, error) {
+ m := new(PublishRequest)
+ if err := x.ServerStream.RecvMsg(m); err != nil {
+ return nil, err
+ }
+ return m, nil
+}
+
+func _SeaweedMessaging_DeleteTopic_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(DeleteTopicRequest)
+ if err := dec(in); err != nil {
+ return nil, err
+ }
+ if interceptor == nil {
+ return srv.(SeaweedMessagingServer).DeleteTopic(ctx, in)
+ }
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: "/messaging_pb.SeaweedMessaging/DeleteTopic",
+ }
+ handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+ return srv.(SeaweedMessagingServer).DeleteTopic(ctx, req.(*DeleteTopicRequest))
+ }
+ return interceptor(ctx, in, info, handler)
+}
+
+func _SeaweedMessaging_ConfigureTopic_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(ConfigureTopicRequest)
+ if err := dec(in); err != nil {
+ return nil, err
+ }
+ if interceptor == nil {
+ return srv.(SeaweedMessagingServer).ConfigureTopic(ctx, in)
+ }
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: "/messaging_pb.SeaweedMessaging/ConfigureTopic",
+ }
+ handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+ return srv.(SeaweedMessagingServer).ConfigureTopic(ctx, req.(*ConfigureTopicRequest))
+ }
+ return interceptor(ctx, in, info, handler)
+}
+
+func _SeaweedMessaging_GetTopicConfiguration_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(GetTopicConfigurationRequest)
+ if err := dec(in); err != nil {
+ return nil, err
+ }
+ if interceptor == nil {
+ return srv.(SeaweedMessagingServer).GetTopicConfiguration(ctx, in)
+ }
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: "/messaging_pb.SeaweedMessaging/GetTopicConfiguration",
+ }
+ handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+ return srv.(SeaweedMessagingServer).GetTopicConfiguration(ctx, req.(*GetTopicConfigurationRequest))
+ }
+ return interceptor(ctx, in, info, handler)
+}
+
+func _SeaweedMessaging_FindBroker_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(FindBrokerRequest)
+ if err := dec(in); err != nil {
+ return nil, err
+ }
+ if interceptor == nil {
+ return srv.(SeaweedMessagingServer).FindBroker(ctx, in)
+ }
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: "/messaging_pb.SeaweedMessaging/FindBroker",
+ }
+ handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+ return srv.(SeaweedMessagingServer).FindBroker(ctx, req.(*FindBrokerRequest))
+ }
+ return interceptor(ctx, in, info, handler)
+}
+
+var _SeaweedMessaging_serviceDesc = grpc.ServiceDesc{
+ ServiceName: "messaging_pb.SeaweedMessaging",
+ HandlerType: (*SeaweedMessagingServer)(nil),
+ Methods: []grpc.MethodDesc{
+ {
+ MethodName: "DeleteTopic",
+ Handler: _SeaweedMessaging_DeleteTopic_Handler,
+ },
+ {
+ MethodName: "ConfigureTopic",
+ Handler: _SeaweedMessaging_ConfigureTopic_Handler,
+ },
+ {
+ MethodName: "GetTopicConfiguration",
+ Handler: _SeaweedMessaging_GetTopicConfiguration_Handler,
+ },
+ {
+ MethodName: "FindBroker",
+ Handler: _SeaweedMessaging_FindBroker_Handler,
+ },
+ },
+ Streams: []grpc.StreamDesc{
+ {
+ StreamName: "Subscribe",
+ Handler: _SeaweedMessaging_Subscribe_Handler,
+ ServerStreams: true,
+ ClientStreams: true,
+ },
+ {
+ StreamName: "Publish",
+ Handler: _SeaweedMessaging_Publish_Handler,
+ ServerStreams: true,
+ ClientStreams: true,
+ },
+ },
+ Metadata: "messaging.proto",
+}
diff --git a/weed/pb/proto_read_write_test.go b/weed/pb/proto_read_write_test.go
new file mode 100644
index 000000000..7f6444ab5
--- /dev/null
+++ b/weed/pb/proto_read_write_test.go
@@ -0,0 +1,43 @@
+package pb
+
+import (
+ "fmt"
+ "testing"
+
+ "github.com/chrislusf/seaweedfs/weed/pb/volume_server_pb"
+ "github.com/golang/protobuf/jsonpb"
+)
+
+func TestJsonpMarshalUnmarshal(t *testing.T) {
+
+ tv := &volume_server_pb.RemoteFile{
+ BackendType: "aws",
+ BackendId: "",
+ FileSize: 12,
+ }
+
+ m := jsonpb.Marshaler{
+ EmitDefaults: true,
+ Indent: " ",
+ }
+
+ if text, err := m.MarshalToString(tv); err != nil {
+ fmt.Printf("marshal eror: %v\n", err)
+ } else {
+ fmt.Printf("marshalled: %s\n", text)
+ }
+
+ rawJson := `{
+ "backendType":"aws",
+ "backendId":"temp",
+ "FileSize":12
+ }`
+
+ tv1 := &volume_server_pb.RemoteFile{}
+ if err := jsonpb.UnmarshalString(rawJson, tv1); err != nil {
+ fmt.Printf("unmarshal error: %v\n", err)
+ }
+
+ fmt.Printf("unmarshalled: %+v\n", tv1)
+
+}
diff --git a/weed/pb/shared_values.go b/weed/pb/shared_values.go
new file mode 100644
index 000000000..1af19e51a
--- /dev/null
+++ b/weed/pb/shared_values.go
@@ -0,0 +1,5 @@
+package pb
+
+const (
+ AdminShellClient = "adminShell"
+)
diff --git a/weed/pb/volume_info.go b/weed/pb/volume_info.go
new file mode 100644
index 000000000..c4f733f5c
--- /dev/null
+++ b/weed/pb/volume_info.go
@@ -0,0 +1,76 @@
+package pb
+
+import (
+ "bytes"
+ "fmt"
+ "io/ioutil"
+
+ _ "github.com/chrislusf/seaweedfs/weed/storage/backend/s3_backend"
+ "github.com/chrislusf/seaweedfs/weed/util"
+
+ "github.com/golang/protobuf/jsonpb"
+
+ "github.com/chrislusf/seaweedfs/weed/glog"
+ "github.com/chrislusf/seaweedfs/weed/pb/volume_server_pb"
+)
+
+// MaybeLoadVolumeInfo load the file data as *volume_server_pb.VolumeInfo, the returned volumeInfo will not be nil
+func MaybeLoadVolumeInfo(fileName string) (*volume_server_pb.VolumeInfo, bool, error) {
+
+ volumeInfo := &volume_server_pb.VolumeInfo{}
+
+ glog.V(1).Infof("maybeLoadVolumeInfo checks %s", fileName)
+ if exists, canRead, _, _, _ := util.CheckFile(fileName); !exists || !canRead {
+ if !exists {
+ return volumeInfo, false, nil
+ }
+ if !canRead {
+ glog.Warningf("can not read %s", fileName)
+ return volumeInfo, false, fmt.Errorf("can not read %s", fileName)
+ }
+ return volumeInfo, false, nil
+ }
+
+ glog.V(1).Infof("maybeLoadVolumeInfo reads %s", fileName)
+ tierData, readErr := ioutil.ReadFile(fileName)
+ if readErr != nil {
+ glog.Warningf("fail to read %s : %v", fileName, readErr)
+ return volumeInfo, false, fmt.Errorf("fail to read %s : %v", fileName, readErr)
+ }
+
+ glog.V(1).Infof("maybeLoadVolumeInfo Unmarshal volume info %v", fileName)
+ if err := jsonpb.Unmarshal(bytes.NewReader(tierData), volumeInfo); err != nil {
+ glog.Warningf("unmarshal error: %v", err)
+ return volumeInfo, false, fmt.Errorf("unmarshal error: %v", err)
+ }
+
+ if len(volumeInfo.GetFiles()) == 0 {
+ return volumeInfo, false, nil
+ }
+
+ return volumeInfo, true, nil
+}
+
+func SaveVolumeInfo(fileName string, volumeInfo *volume_server_pb.VolumeInfo) error {
+
+ if exists, _, canWrite, _, _ := util.CheckFile(fileName); exists && !canWrite {
+ return fmt.Errorf("%s not writable", fileName)
+ }
+
+ m := jsonpb.Marshaler{
+ EmitDefaults: true,
+ Indent: " ",
+ }
+
+ text, marshalErr := m.MarshalToString(volumeInfo)
+ if marshalErr != nil {
+ return fmt.Errorf("marshal to %s: %v", fileName, marshalErr)
+ }
+
+ writeErr := ioutil.WriteFile(fileName, []byte(text), 0755)
+ if writeErr != nil {
+ return fmt.Errorf("fail to write %s : %v", fileName, writeErr)
+ }
+
+ return nil
+}
diff --git a/weed/pb/volume_server.proto b/weed/pb/volume_server.proto
index 8ab67a1bf..c9727e8d3 100644
--- a/weed/pb/volume_server.proto
+++ b/weed/pb/volume_server.proto
@@ -1,6 +1,7 @@
syntax = "proto3";
package volume_server_pb;
+option go_package = "github.com/chrislusf/seaweedfs/weed/pb/volume_server_pb";
//////////////////////////////////////////////////
@@ -8,6 +9,7 @@ service VolumeServer {
//Experts only: takes multiple fid parameters. This function does not propagate deletes to replicas.
rpc BatchDelete (BatchDeleteRequest) returns (BatchDeleteResponse) {
}
+
rpc VacuumVolumeCheck (VacuumVolumeCheckRequest) returns (VacuumVolumeCheckResponse) {
}
rpc VacuumVolumeCompact (VacuumVolumeCompactRequest) returns (VacuumVolumeCompactResponse) {
@@ -19,14 +21,12 @@ service VolumeServer {
rpc DeleteCollection (DeleteCollectionRequest) returns (DeleteCollectionResponse) {
}
- rpc AssignVolume (AssignVolumeRequest) returns (AssignVolumeResponse) {
+ rpc AllocateVolume (AllocateVolumeRequest) returns (AllocateVolumeResponse) {
}
rpc VolumeSyncStatus (VolumeSyncStatusRequest) returns (VolumeSyncStatusResponse) {
}
- rpc VolumeSyncIndex (VolumeSyncIndexRequest) returns (stream VolumeSyncIndexResponse) {
- }
- rpc VolumeSyncData (VolumeSyncDataRequest) returns (stream VolumeSyncDataResponse) {
+ rpc VolumeIncrementalCopy (VolumeIncrementalCopyRequest) returns (stream VolumeIncrementalCopyResponse) {
}
rpc VolumeMount (VolumeMountRequest) returns (VolumeMountResponse) {
@@ -35,8 +35,56 @@ service VolumeServer {
}
rpc VolumeDelete (VolumeDeleteRequest) returns (VolumeDeleteResponse) {
}
+ rpc VolumeMarkReadonly (VolumeMarkReadonlyRequest) returns (VolumeMarkReadonlyResponse) {
+ }
+ rpc VolumeConfigure (VolumeConfigureRequest) returns (VolumeConfigureResponse) {
+ }
+
+ // copy the .idx .dat files, and mount this volume
+ rpc VolumeCopy (VolumeCopyRequest) returns (VolumeCopyResponse) {
+ }
+ rpc ReadVolumeFileStatus (ReadVolumeFileStatusRequest) returns (ReadVolumeFileStatusResponse) {
+ }
+ rpc CopyFile (CopyFileRequest) returns (stream CopyFileResponse) {
+ }
+
+ rpc VolumeTailSender (VolumeTailSenderRequest) returns (stream VolumeTailSenderResponse) {
+ }
+ rpc VolumeTailReceiver (VolumeTailReceiverRequest) returns (VolumeTailReceiverResponse) {
+ }
- // rpc VolumeUiPage (VolumeUiPageRequest) returns (VolumeUiPageResponse) {}
+ // erasure coding
+ rpc VolumeEcShardsGenerate (VolumeEcShardsGenerateRequest) returns (VolumeEcShardsGenerateResponse) {
+ }
+ rpc VolumeEcShardsRebuild (VolumeEcShardsRebuildRequest) returns (VolumeEcShardsRebuildResponse) {
+ }
+ rpc VolumeEcShardsCopy (VolumeEcShardsCopyRequest) returns (VolumeEcShardsCopyResponse) {
+ }
+ rpc VolumeEcShardsDelete (VolumeEcShardsDeleteRequest) returns (VolumeEcShardsDeleteResponse) {
+ }
+ rpc VolumeEcShardsMount (VolumeEcShardsMountRequest) returns (VolumeEcShardsMountResponse) {
+ }
+ rpc VolumeEcShardsUnmount (VolumeEcShardsUnmountRequest) returns (VolumeEcShardsUnmountResponse) {
+ }
+ rpc VolumeEcShardRead (VolumeEcShardReadRequest) returns (stream VolumeEcShardReadResponse) {
+ }
+ rpc VolumeEcBlobDelete (VolumeEcBlobDeleteRequest) returns (VolumeEcBlobDeleteResponse) {
+ }
+ rpc VolumeEcShardsToVolume (VolumeEcShardsToVolumeRequest) returns (VolumeEcShardsToVolumeResponse) {
+ }
+
+ // tiered storage
+ rpc VolumeTierMoveDatToRemote (VolumeTierMoveDatToRemoteRequest) returns (stream VolumeTierMoveDatToRemoteResponse) {
+ }
+ rpc VolumeTierMoveDatFromRemote (VolumeTierMoveDatFromRemoteRequest) returns (stream VolumeTierMoveDatFromRemoteResponse) {
+ }
+
+ rpc VolumeServerStatus (VolumeServerStatusRequest) returns (VolumeServerStatusResponse) {
+ }
+
+ // query
+ rpc Query (QueryRequest) returns (stream QueriedStripe) {
+ }
}
@@ -44,6 +92,7 @@ service VolumeServer {
message BatchDeleteRequest {
repeated string file_ids = 1;
+ bool skip_cookie_check = 2;
}
message BatchDeleteResponse {
@@ -54,33 +103,35 @@ message DeleteResult {
int32 status = 2;
string error = 3;
uint32 size = 4;
+ uint32 version = 5;
}
message Empty {
}
message VacuumVolumeCheckRequest {
- uint32 volumd_id = 1;
+ uint32 volume_id = 1;
}
message VacuumVolumeCheckResponse {
double garbage_ratio = 1;
}
message VacuumVolumeCompactRequest {
- uint32 volumd_id = 1;
+ uint32 volume_id = 1;
int64 preallocate = 2;
}
message VacuumVolumeCompactResponse {
}
message VacuumVolumeCommitRequest {
- uint32 volumd_id = 1;
+ uint32 volume_id = 1;
}
message VacuumVolumeCommitResponse {
+ bool is_read_only = 1;
}
message VacuumVolumeCleanupRequest {
- uint32 volumd_id = 1;
+ uint32 volume_id = 1;
}
message VacuumVolumeCleanupResponse {
}
@@ -91,21 +142,22 @@ message DeleteCollectionRequest {
message DeleteCollectionResponse {
}
-message AssignVolumeRequest {
- uint32 volumd_id = 1;
+message AllocateVolumeRequest {
+ uint32 volume_id = 1;
string collection = 2;
int64 preallocate = 3;
string replication = 4;
string ttl = 5;
+ uint32 memory_map_max_size_mb = 6;
}
-message AssignVolumeResponse {
+message AllocateVolumeResponse {
}
message VolumeSyncStatusRequest {
- uint32 volumd_id = 1;
+ uint32 volume_id = 1;
}
message VolumeSyncStatusResponse {
- uint32 volumd_id = 1;
+ uint32 volume_id = 1;
string collection = 2;
string replication = 4;
string ttl = 5;
@@ -114,45 +166,180 @@ message VolumeSyncStatusResponse {
uint64 idx_file_size = 8;
}
-message VolumeSyncIndexRequest {
- uint32 volumd_id = 1;
+message VolumeIncrementalCopyRequest {
+ uint32 volume_id = 1;
+ uint64 since_ns = 2;
}
-message VolumeSyncIndexResponse {
- bytes index_file_content = 1;
-}
-
-message VolumeSyncDataRequest {
- uint32 volumd_id = 1;
- uint32 revision = 2;
- uint32 offset = 3;
- uint32 size = 4;
- string needle_id = 5;
-}
-message VolumeSyncDataResponse {
+message VolumeIncrementalCopyResponse {
bytes file_content = 1;
}
message VolumeMountRequest {
- uint32 volumd_id = 1;
+ uint32 volume_id = 1;
}
message VolumeMountResponse {
}
message VolumeUnmountRequest {
- uint32 volumd_id = 1;
+ uint32 volume_id = 1;
}
message VolumeUnmountResponse {
}
message VolumeDeleteRequest {
- uint32 volumd_id = 1;
+ uint32 volume_id = 1;
}
message VolumeDeleteResponse {
}
-message VolumeUiPageRequest {
+message VolumeMarkReadonlyRequest {
+ uint32 volume_id = 1;
+}
+message VolumeMarkReadonlyResponse {
+}
+
+message VolumeConfigureRequest {
+ uint32 volume_id = 1;
+ string replication = 2;
+}
+message VolumeConfigureResponse {
+ string error = 1;
+}
+
+message VolumeCopyRequest {
+ uint32 volume_id = 1;
+ string collection = 2;
+ string replication = 3;
+ string ttl = 4;
+ string source_data_node = 5;
+}
+message VolumeCopyResponse {
+ uint64 last_append_at_ns = 1;
+}
+
+message CopyFileRequest {
+ uint32 volume_id = 1;
+ string ext = 2;
+ uint32 compaction_revision = 3;
+ uint64 stop_offset = 4;
+ string collection = 5;
+ bool is_ec_volume = 6;
+ bool ignore_source_file_not_found = 7;
+}
+message CopyFileResponse {
+ bytes file_content = 1;
+}
+
+message VolumeTailSenderRequest {
+ uint32 volume_id = 1;
+ uint64 since_ns = 2;
+ uint32 idle_timeout_seconds = 3;
+}
+message VolumeTailSenderResponse {
+ bytes needle_header = 1;
+ bytes needle_body = 2;
+ bool is_last_chunk = 3;
+}
+
+message VolumeTailReceiverRequest {
+ uint32 volume_id = 1;
+ uint64 since_ns = 2;
+ uint32 idle_timeout_seconds = 3;
+ string source_volume_server = 4;
+}
+message VolumeTailReceiverResponse {
+}
+
+message VolumeEcShardsGenerateRequest {
+ uint32 volume_id = 1;
+ string collection = 2;
+}
+message VolumeEcShardsGenerateResponse {
+}
+
+message VolumeEcShardsRebuildRequest {
+ uint32 volume_id = 1;
+ string collection = 2;
+}
+message VolumeEcShardsRebuildResponse {
+ repeated uint32 rebuilt_shard_ids = 1;
+}
+
+message VolumeEcShardsCopyRequest {
+ uint32 volume_id = 1;
+ string collection = 2;
+ repeated uint32 shard_ids = 3;
+ bool copy_ecx_file = 4;
+ string source_data_node = 5;
+ bool copy_ecj_file = 6;
+ bool copy_vif_file = 7;
+}
+message VolumeEcShardsCopyResponse {
+}
+
+message VolumeEcShardsDeleteRequest {
+ uint32 volume_id = 1;
+ string collection = 2;
+ repeated uint32 shard_ids = 3;
+}
+message VolumeEcShardsDeleteResponse {
+}
+
+message VolumeEcShardsMountRequest {
+ uint32 volume_id = 1;
+ string collection = 2;
+ repeated uint32 shard_ids = 3;
+}
+message VolumeEcShardsMountResponse {
+}
+
+message VolumeEcShardsUnmountRequest {
+ uint32 volume_id = 1;
+ repeated uint32 shard_ids = 3;
+}
+message VolumeEcShardsUnmountResponse {
+}
+
+message VolumeEcShardReadRequest {
+ uint32 volume_id = 1;
+ uint32 shard_id = 2;
+ int64 offset = 3;
+ int64 size = 4;
+ uint64 file_key = 5;
+}
+message VolumeEcShardReadResponse {
+ bytes data = 1;
+ bool is_deleted = 2;
+}
+
+message VolumeEcBlobDeleteRequest {
+ uint32 volume_id = 1;
+ string collection = 2;
+ uint64 file_key = 3;
+ uint32 version = 4;
+}
+message VolumeEcBlobDeleteResponse {
+}
+
+message VolumeEcShardsToVolumeRequest {
+ uint32 volume_id = 1;
+ string collection = 2;
}
-message VolumeUiPageResponse {
+message VolumeEcShardsToVolumeResponse {
+}
+
+message ReadVolumeFileStatusRequest {
+ uint32 volume_id = 1;
+}
+message ReadVolumeFileStatusResponse {
+ uint32 volume_id = 1;
+ uint64 idx_file_timestamp_seconds = 2;
+ uint64 idx_file_size = 3;
+ uint64 dat_file_timestamp_seconds = 4;
+ uint64 dat_file_size = 5;
+ uint64 file_count = 6;
+ uint32 compaction_revision = 7;
+ string collection = 8;
}
message DiskStatus {
@@ -160,6 +347,8 @@ message DiskStatus {
uint64 all = 2;
uint64 used = 3;
uint64 free = 4;
+ float percent_free = 5;
+ float percent_used = 6;
}
message MemStatus {
@@ -171,3 +360,106 @@ message MemStatus {
uint64 heap = 6;
uint64 stack = 7;
}
+
+// tired storage on volume servers
+message RemoteFile {
+ string backend_type = 1;
+ string backend_id = 2;
+ string key = 3;
+ uint64 offset = 4;
+ uint64 file_size = 5;
+ uint64 modified_time = 6;
+ string extension = 7;
+}
+message VolumeInfo {
+ repeated RemoteFile files = 1;
+ uint32 version = 2;
+ string replication = 3;
+}
+
+message VolumeTierMoveDatToRemoteRequest {
+ uint32 volume_id = 1;
+ string collection = 2;
+ string destination_backend_name = 3;
+ bool keep_local_dat_file = 4;
+}
+message VolumeTierMoveDatToRemoteResponse {
+ int64 processed = 1;
+ float processedPercentage = 2;
+}
+
+message VolumeTierMoveDatFromRemoteRequest {
+ uint32 volume_id = 1;
+ string collection = 2;
+ bool keep_remote_dat_file = 3;
+}
+message VolumeTierMoveDatFromRemoteResponse {
+ int64 processed = 1;
+ float processedPercentage = 2;
+}
+
+message VolumeServerStatusRequest {
+
+}
+message VolumeServerStatusResponse {
+ repeated DiskStatus disk_statuses = 1;
+ MemStatus memory_status = 2;
+}
+
+// select on volume servers
+message QueryRequest {
+ repeated string selections = 1;
+ repeated string from_file_ids = 2;
+ message Filter {
+ string field = 1;
+ string operand = 2;
+ string value = 3;
+ }
+ Filter filter = 3;
+
+ message InputSerialization {
+ // NONE | GZIP | BZIP2
+ string compression_type = 1;
+ message CSVInput {
+ string file_header_info = 1; // Valid values: NONE | USE | IGNORE
+ string record_delimiter = 2; // Default: \n
+ string field_delimiter = 3; // Default: ,
+ string quote_charactoer = 4; // Default: "
+ string quote_escape_character = 5; // Default: "
+ string comments = 6; // Default: #
+ // If true, records might contain record delimiters within quote characters
+ bool allow_quoted_record_delimiter = 7; // default False.
+ }
+ message JSONInput {
+ string type = 1; // Valid values: DOCUMENT | LINES
+ }
+ message ParquetInput {
+ }
+
+ CSVInput csv_input = 2;
+ JSONInput json_input = 3;
+ ParquetInput parquet_input = 4;
+ }
+ InputSerialization input_serialization = 4;
+
+ message OutputSerialization {
+ message CSVOutput {
+ string quote_fields = 1; // Valid values: ALWAYS | ASNEEDED
+ string record_delimiter = 2; // Default: \n
+ string field_delimiter = 3; // Default: ,
+ string quote_charactoer = 4; // Default: "
+ string quote_escape_character = 5; // Default: "
+ }
+ message JSONOutput {
+ string record_delimiter = 1;
+ }
+
+ CSVOutput csv_output = 2;
+ JSONOutput json_output = 3;
+ }
+
+ OutputSerialization output_serialization = 5;
+}
+message QueriedStripe {
+ bytes records = 1;
+}
diff --git a/weed/pb/volume_server_pb/volume_server.pb.go b/weed/pb/volume_server_pb/volume_server.pb.go
index fa700e2e5..85d248258 100644
--- a/weed/pb/volume_server_pb/volume_server.pb.go
+++ b/weed/pb/volume_server_pb/volume_server.pb.go
@@ -1,1205 +1,7573 @@
-// Code generated by protoc-gen-go.
+// Code generated by protoc-gen-go. DO NOT EDIT.
+// versions:
+// protoc-gen-go v1.24.0
+// protoc v3.12.3
// source: volume_server.proto
-// DO NOT EDIT!
-
-/*
-Package volume_server_pb is a generated protocol buffer package.
-
-It is generated from these files:
- volume_server.proto
-
-It has these top-level messages:
- BatchDeleteRequest
- BatchDeleteResponse
- DeleteResult
- Empty
- VacuumVolumeCheckRequest
- VacuumVolumeCheckResponse
- VacuumVolumeCompactRequest
- VacuumVolumeCompactResponse
- VacuumVolumeCommitRequest
- VacuumVolumeCommitResponse
- VacuumVolumeCleanupRequest
- VacuumVolumeCleanupResponse
- DeleteCollectionRequest
- DeleteCollectionResponse
- AssignVolumeRequest
- AssignVolumeResponse
- VolumeSyncStatusRequest
- VolumeSyncStatusResponse
- VolumeSyncIndexRequest
- VolumeSyncIndexResponse
- VolumeSyncDataRequest
- VolumeSyncDataResponse
- VolumeMountRequest
- VolumeMountResponse
- VolumeUnmountRequest
- VolumeUnmountResponse
- VolumeDeleteRequest
- VolumeDeleteResponse
- VolumeUiPageRequest
- VolumeUiPageResponse
- DiskStatus
- MemStatus
-*/
-package volume_server_pb
-import proto "github.com/golang/protobuf/proto"
-import fmt "fmt"
-import math "math"
+package volume_server_pb
import (
- context "golang.org/x/net/context"
+ context "context"
+ proto "github.com/golang/protobuf/proto"
grpc "google.golang.org/grpc"
+ codes "google.golang.org/grpc/codes"
+ status "google.golang.org/grpc/status"
+ protoreflect "google.golang.org/protobuf/reflect/protoreflect"
+ protoimpl "google.golang.org/protobuf/runtime/protoimpl"
+ reflect "reflect"
+ sync "sync"
)
-// Reference imports to suppress errors if they are not otherwise used.
-var _ = proto.Marshal
-var _ = fmt.Errorf
-var _ = math.Inf
+const (
+ // Verify that this generated code is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
+ // Verify that runtime/protoimpl is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
+)
-// This is a compile-time assertion to ensure that this generated file
-// is compatible with the proto package it is being compiled against.
-// A compilation error at this line likely means your copy of the
-// proto package needs to be updated.
-const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package
+// This is a compile-time assertion that a sufficiently up-to-date version
+// of the legacy proto package is being used.
+const _ = proto.ProtoPackageIsVersion4
type BatchDeleteRequest struct {
- FileIds []string `protobuf:"bytes,1,rep,name=file_ids,json=fileIds" json:"file_ids,omitempty"`
-}
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
-func (m *BatchDeleteRequest) Reset() { *m = BatchDeleteRequest{} }
-func (m *BatchDeleteRequest) String() string { return proto.CompactTextString(m) }
-func (*BatchDeleteRequest) ProtoMessage() {}
-func (*BatchDeleteRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0} }
+ FileIds []string `protobuf:"bytes,1,rep,name=file_ids,json=fileIds,proto3" json:"file_ids,omitempty"`
+ SkipCookieCheck bool `protobuf:"varint,2,opt,name=skip_cookie_check,json=skipCookieCheck,proto3" json:"skip_cookie_check,omitempty"`
+}
-func (m *BatchDeleteRequest) GetFileIds() []string {
- if m != nil {
- return m.FileIds
+func (x *BatchDeleteRequest) Reset() {
+ *x = BatchDeleteRequest{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_volume_server_proto_msgTypes[0]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
- return nil
}
-type BatchDeleteResponse struct {
- Results []*DeleteResult `protobuf:"bytes,1,rep,name=results" json:"results,omitempty"`
+func (x *BatchDeleteRequest) String() string {
+ return protoimpl.X.MessageStringOf(x)
}
-func (m *BatchDeleteResponse) Reset() { *m = BatchDeleteResponse{} }
-func (m *BatchDeleteResponse) String() string { return proto.CompactTextString(m) }
-func (*BatchDeleteResponse) ProtoMessage() {}
-func (*BatchDeleteResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{1} }
+func (*BatchDeleteRequest) ProtoMessage() {}
-func (m *BatchDeleteResponse) GetResults() []*DeleteResult {
- if m != nil {
- return m.Results
+func (x *BatchDeleteRequest) ProtoReflect() protoreflect.Message {
+ mi := &file_volume_server_proto_msgTypes[0]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
}
- return nil
+ return mi.MessageOf(x)
}
-type DeleteResult struct {
- FileId string `protobuf:"bytes,1,opt,name=file_id,json=fileId" json:"file_id,omitempty"`
- Status int32 `protobuf:"varint,2,opt,name=status" json:"status,omitempty"`
- Error string `protobuf:"bytes,3,opt,name=error" json:"error,omitempty"`
- Size uint32 `protobuf:"varint,4,opt,name=size" json:"size,omitempty"`
+// Deprecated: Use BatchDeleteRequest.ProtoReflect.Descriptor instead.
+func (*BatchDeleteRequest) Descriptor() ([]byte, []int) {
+ return file_volume_server_proto_rawDescGZIP(), []int{0}
}
-func (m *DeleteResult) Reset() { *m = DeleteResult{} }
-func (m *DeleteResult) String() string { return proto.CompactTextString(m) }
-func (*DeleteResult) ProtoMessage() {}
-func (*DeleteResult) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{2} }
-
-func (m *DeleteResult) GetFileId() string {
- if m != nil {
- return m.FileId
+func (x *BatchDeleteRequest) GetFileIds() []string {
+ if x != nil {
+ return x.FileIds
}
- return ""
+ return nil
}
-func (m *DeleteResult) GetStatus() int32 {
- if m != nil {
- return m.Status
+func (x *BatchDeleteRequest) GetSkipCookieCheck() bool {
+ if x != nil {
+ return x.SkipCookieCheck
}
- return 0
+ return false
}
-func (m *DeleteResult) GetError() string {
- if m != nil {
- return m.Error
- }
- return ""
+type BatchDeleteResponse struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ Results []*DeleteResult `protobuf:"bytes,1,rep,name=results,proto3" json:"results,omitempty"`
}
-func (m *DeleteResult) GetSize() uint32 {
- if m != nil {
- return m.Size
+func (x *BatchDeleteResponse) Reset() {
+ *x = BatchDeleteResponse{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_volume_server_proto_msgTypes[1]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
- return 0
}
-type Empty struct {
+func (x *BatchDeleteResponse) String() string {
+ return protoimpl.X.MessageStringOf(x)
}
-func (m *Empty) Reset() { *m = Empty{} }
-func (m *Empty) String() string { return proto.CompactTextString(m) }
-func (*Empty) ProtoMessage() {}
-func (*Empty) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{3} }
+func (*BatchDeleteResponse) ProtoMessage() {}
-type VacuumVolumeCheckRequest struct {
- VolumdId uint32 `protobuf:"varint,1,opt,name=volumd_id,json=volumdId" json:"volumd_id,omitempty"`
+func (x *BatchDeleteResponse) ProtoReflect() protoreflect.Message {
+ mi := &file_volume_server_proto_msgTypes[1]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
}
-func (m *VacuumVolumeCheckRequest) Reset() { *m = VacuumVolumeCheckRequest{} }
-func (m *VacuumVolumeCheckRequest) String() string { return proto.CompactTextString(m) }
-func (*VacuumVolumeCheckRequest) ProtoMessage() {}
-func (*VacuumVolumeCheckRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{4} }
+// Deprecated: Use BatchDeleteResponse.ProtoReflect.Descriptor instead.
+func (*BatchDeleteResponse) Descriptor() ([]byte, []int) {
+ return file_volume_server_proto_rawDescGZIP(), []int{1}
+}
-func (m *VacuumVolumeCheckRequest) GetVolumdId() uint32 {
- if m != nil {
- return m.VolumdId
+func (x *BatchDeleteResponse) GetResults() []*DeleteResult {
+ if x != nil {
+ return x.Results
}
- return 0
+ return nil
}
-type VacuumVolumeCheckResponse struct {
- GarbageRatio float64 `protobuf:"fixed64,1,opt,name=garbage_ratio,json=garbageRatio" json:"garbage_ratio,omitempty"`
+type DeleteResult struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ FileId string `protobuf:"bytes,1,opt,name=file_id,json=fileId,proto3" json:"file_id,omitempty"`
+ Status int32 `protobuf:"varint,2,opt,name=status,proto3" json:"status,omitempty"`
+ Error string `protobuf:"bytes,3,opt,name=error,proto3" json:"error,omitempty"`
+ Size uint32 `protobuf:"varint,4,opt,name=size,proto3" json:"size,omitempty"`
+ Version uint32 `protobuf:"varint,5,opt,name=version,proto3" json:"version,omitempty"`
}
-func (m *VacuumVolumeCheckResponse) Reset() { *m = VacuumVolumeCheckResponse{} }
-func (m *VacuumVolumeCheckResponse) String() string { return proto.CompactTextString(m) }
-func (*VacuumVolumeCheckResponse) ProtoMessage() {}
-func (*VacuumVolumeCheckResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{5} }
-
-func (m *VacuumVolumeCheckResponse) GetGarbageRatio() float64 {
- if m != nil {
- return m.GarbageRatio
+func (x *DeleteResult) Reset() {
+ *x = DeleteResult{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_volume_server_proto_msgTypes[2]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
- return 0
}
-type VacuumVolumeCompactRequest struct {
- VolumdId uint32 `protobuf:"varint,1,opt,name=volumd_id,json=volumdId" json:"volumd_id,omitempty"`
- Preallocate int64 `protobuf:"varint,2,opt,name=preallocate" json:"preallocate,omitempty"`
+func (x *DeleteResult) String() string {
+ return protoimpl.X.MessageStringOf(x)
}
-func (m *VacuumVolumeCompactRequest) Reset() { *m = VacuumVolumeCompactRequest{} }
-func (m *VacuumVolumeCompactRequest) String() string { return proto.CompactTextString(m) }
-func (*VacuumVolumeCompactRequest) ProtoMessage() {}
-func (*VacuumVolumeCompactRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{6} }
+func (*DeleteResult) ProtoMessage() {}
-func (m *VacuumVolumeCompactRequest) GetVolumdId() uint32 {
- if m != nil {
- return m.VolumdId
+func (x *DeleteResult) ProtoReflect() protoreflect.Message {
+ mi := &file_volume_server_proto_msgTypes[2]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
}
- return 0
+ return mi.MessageOf(x)
}
-func (m *VacuumVolumeCompactRequest) GetPreallocate() int64 {
- if m != nil {
- return m.Preallocate
- }
- return 0
+// Deprecated: Use DeleteResult.ProtoReflect.Descriptor instead.
+func (*DeleteResult) Descriptor() ([]byte, []int) {
+ return file_volume_server_proto_rawDescGZIP(), []int{2}
}
-type VacuumVolumeCompactResponse struct {
+func (x *DeleteResult) GetFileId() string {
+ if x != nil {
+ return x.FileId
+ }
+ return ""
}
-func (m *VacuumVolumeCompactResponse) Reset() { *m = VacuumVolumeCompactResponse{} }
-func (m *VacuumVolumeCompactResponse) String() string { return proto.CompactTextString(m) }
-func (*VacuumVolumeCompactResponse) ProtoMessage() {}
-func (*VacuumVolumeCompactResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{7} }
+func (x *DeleteResult) GetStatus() int32 {
+ if x != nil {
+ return x.Status
+ }
+ return 0
+}
-type VacuumVolumeCommitRequest struct {
- VolumdId uint32 `protobuf:"varint,1,opt,name=volumd_id,json=volumdId" json:"volumd_id,omitempty"`
+func (x *DeleteResult) GetError() string {
+ if x != nil {
+ return x.Error
+ }
+ return ""
}
-func (m *VacuumVolumeCommitRequest) Reset() { *m = VacuumVolumeCommitRequest{} }
-func (m *VacuumVolumeCommitRequest) String() string { return proto.CompactTextString(m) }
-func (*VacuumVolumeCommitRequest) ProtoMessage() {}
-func (*VacuumVolumeCommitRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{8} }
+func (x *DeleteResult) GetSize() uint32 {
+ if x != nil {
+ return x.Size
+ }
+ return 0
+}
-func (m *VacuumVolumeCommitRequest) GetVolumdId() uint32 {
- if m != nil {
- return m.VolumdId
+func (x *DeleteResult) GetVersion() uint32 {
+ if x != nil {
+ return x.Version
}
return 0
}
-type VacuumVolumeCommitResponse struct {
+type Empty struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
}
-func (m *VacuumVolumeCommitResponse) Reset() { *m = VacuumVolumeCommitResponse{} }
-func (m *VacuumVolumeCommitResponse) String() string { return proto.CompactTextString(m) }
-func (*VacuumVolumeCommitResponse) ProtoMessage() {}
-func (*VacuumVolumeCommitResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{9} }
+func (x *Empty) Reset() {
+ *x = Empty{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_volume_server_proto_msgTypes[3]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
-type VacuumVolumeCleanupRequest struct {
- VolumdId uint32 `protobuf:"varint,1,opt,name=volumd_id,json=volumdId" json:"volumd_id,omitempty"`
+func (x *Empty) String() string {
+ return protoimpl.X.MessageStringOf(x)
}
-func (m *VacuumVolumeCleanupRequest) Reset() { *m = VacuumVolumeCleanupRequest{} }
-func (m *VacuumVolumeCleanupRequest) String() string { return proto.CompactTextString(m) }
-func (*VacuumVolumeCleanupRequest) ProtoMessage() {}
-func (*VacuumVolumeCleanupRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{10} }
+func (*Empty) ProtoMessage() {}
-func (m *VacuumVolumeCleanupRequest) GetVolumdId() uint32 {
- if m != nil {
- return m.VolumdId
+func (x *Empty) ProtoReflect() protoreflect.Message {
+ mi := &file_volume_server_proto_msgTypes[3]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
}
- return 0
+ return mi.MessageOf(x)
}
-type VacuumVolumeCleanupResponse struct {
+// Deprecated: Use Empty.ProtoReflect.Descriptor instead.
+func (*Empty) Descriptor() ([]byte, []int) {
+ return file_volume_server_proto_rawDescGZIP(), []int{3}
}
-func (m *VacuumVolumeCleanupResponse) Reset() { *m = VacuumVolumeCleanupResponse{} }
-func (m *VacuumVolumeCleanupResponse) String() string { return proto.CompactTextString(m) }
-func (*VacuumVolumeCleanupResponse) ProtoMessage() {}
-func (*VacuumVolumeCleanupResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{11} }
+type VacuumVolumeCheckRequest struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
-type DeleteCollectionRequest struct {
- Collection string `protobuf:"bytes,1,opt,name=collection" json:"collection,omitempty"`
+ VolumeId uint32 `protobuf:"varint,1,opt,name=volume_id,json=volumeId,proto3" json:"volume_id,omitempty"`
}
-func (m *DeleteCollectionRequest) Reset() { *m = DeleteCollectionRequest{} }
-func (m *DeleteCollectionRequest) String() string { return proto.CompactTextString(m) }
-func (*DeleteCollectionRequest) ProtoMessage() {}
-func (*DeleteCollectionRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{12} }
-
-func (m *DeleteCollectionRequest) GetCollection() string {
- if m != nil {
- return m.Collection
+func (x *VacuumVolumeCheckRequest) Reset() {
+ *x = VacuumVolumeCheckRequest{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_volume_server_proto_msgTypes[4]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
- return ""
-}
-
-type DeleteCollectionResponse struct {
}
-func (m *DeleteCollectionResponse) Reset() { *m = DeleteCollectionResponse{} }
-func (m *DeleteCollectionResponse) String() string { return proto.CompactTextString(m) }
-func (*DeleteCollectionResponse) ProtoMessage() {}
-func (*DeleteCollectionResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{13} }
-
-type AssignVolumeRequest struct {
- VolumdId uint32 `protobuf:"varint,1,opt,name=volumd_id,json=volumdId" json:"volumd_id,omitempty"`
- Collection string `protobuf:"bytes,2,opt,name=collection" json:"collection,omitempty"`
- Preallocate int64 `protobuf:"varint,3,opt,name=preallocate" json:"preallocate,omitempty"`
- Replication string `protobuf:"bytes,4,opt,name=replication" json:"replication,omitempty"`
- Ttl string `protobuf:"bytes,5,opt,name=ttl" json:"ttl,omitempty"`
+func (x *VacuumVolumeCheckRequest) String() string {
+ return protoimpl.X.MessageStringOf(x)
}
-func (m *AssignVolumeRequest) Reset() { *m = AssignVolumeRequest{} }
-func (m *AssignVolumeRequest) String() string { return proto.CompactTextString(m) }
-func (*AssignVolumeRequest) ProtoMessage() {}
-func (*AssignVolumeRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{14} }
+func (*VacuumVolumeCheckRequest) ProtoMessage() {}
-func (m *AssignVolumeRequest) GetVolumdId() uint32 {
- if m != nil {
- return m.VolumdId
+func (x *VacuumVolumeCheckRequest) ProtoReflect() protoreflect.Message {
+ mi := &file_volume_server_proto_msgTypes[4]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
}
- return 0
+ return mi.MessageOf(x)
}
-func (m *AssignVolumeRequest) GetCollection() string {
- if m != nil {
- return m.Collection
- }
- return ""
+// Deprecated: Use VacuumVolumeCheckRequest.ProtoReflect.Descriptor instead.
+func (*VacuumVolumeCheckRequest) Descriptor() ([]byte, []int) {
+ return file_volume_server_proto_rawDescGZIP(), []int{4}
}
-func (m *AssignVolumeRequest) GetPreallocate() int64 {
- if m != nil {
- return m.Preallocate
+func (x *VacuumVolumeCheckRequest) GetVolumeId() uint32 {
+ if x != nil {
+ return x.VolumeId
}
return 0
}
-func (m *AssignVolumeRequest) GetReplication() string {
- if m != nil {
- return m.Replication
- }
- return ""
+type VacuumVolumeCheckResponse struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ GarbageRatio float64 `protobuf:"fixed64,1,opt,name=garbage_ratio,json=garbageRatio,proto3" json:"garbage_ratio,omitempty"`
}
-func (m *AssignVolumeRequest) GetTtl() string {
- if m != nil {
- return m.Ttl
+func (x *VacuumVolumeCheckResponse) Reset() {
+ *x = VacuumVolumeCheckResponse{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_volume_server_proto_msgTypes[5]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
- return ""
}
-type AssignVolumeResponse struct {
+func (x *VacuumVolumeCheckResponse) String() string {
+ return protoimpl.X.MessageStringOf(x)
}
-func (m *AssignVolumeResponse) Reset() { *m = AssignVolumeResponse{} }
-func (m *AssignVolumeResponse) String() string { return proto.CompactTextString(m) }
-func (*AssignVolumeResponse) ProtoMessage() {}
-func (*AssignVolumeResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{15} }
+func (*VacuumVolumeCheckResponse) ProtoMessage() {}
-type VolumeSyncStatusRequest struct {
- VolumdId uint32 `protobuf:"varint,1,opt,name=volumd_id,json=volumdId" json:"volumd_id,omitempty"`
+func (x *VacuumVolumeCheckResponse) ProtoReflect() protoreflect.Message {
+ mi := &file_volume_server_proto_msgTypes[5]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
}
-func (m *VolumeSyncStatusRequest) Reset() { *m = VolumeSyncStatusRequest{} }
-func (m *VolumeSyncStatusRequest) String() string { return proto.CompactTextString(m) }
-func (*VolumeSyncStatusRequest) ProtoMessage() {}
-func (*VolumeSyncStatusRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{16} }
+// Deprecated: Use VacuumVolumeCheckResponse.ProtoReflect.Descriptor instead.
+func (*VacuumVolumeCheckResponse) Descriptor() ([]byte, []int) {
+ return file_volume_server_proto_rawDescGZIP(), []int{5}
+}
-func (m *VolumeSyncStatusRequest) GetVolumdId() uint32 {
- if m != nil {
- return m.VolumdId
+func (x *VacuumVolumeCheckResponse) GetGarbageRatio() float64 {
+ if x != nil {
+ return x.GarbageRatio
}
return 0
}
-type VolumeSyncStatusResponse struct {
- VolumdId uint32 `protobuf:"varint,1,opt,name=volumd_id,json=volumdId" json:"volumd_id,omitempty"`
- Collection string `protobuf:"bytes,2,opt,name=collection" json:"collection,omitempty"`
- Replication string `protobuf:"bytes,4,opt,name=replication" json:"replication,omitempty"`
- Ttl string `protobuf:"bytes,5,opt,name=ttl" json:"ttl,omitempty"`
- TailOffset uint64 `protobuf:"varint,6,opt,name=tail_offset,json=tailOffset" json:"tail_offset,omitempty"`
- CompactRevision uint32 `protobuf:"varint,7,opt,name=compact_revision,json=compactRevision" json:"compact_revision,omitempty"`
- IdxFileSize uint64 `protobuf:"varint,8,opt,name=idx_file_size,json=idxFileSize" json:"idx_file_size,omitempty"`
-}
+type VacuumVolumeCompactRequest struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
-func (m *VolumeSyncStatusResponse) Reset() { *m = VolumeSyncStatusResponse{} }
-func (m *VolumeSyncStatusResponse) String() string { return proto.CompactTextString(m) }
-func (*VolumeSyncStatusResponse) ProtoMessage() {}
-func (*VolumeSyncStatusResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{17} }
+ VolumeId uint32 `protobuf:"varint,1,opt,name=volume_id,json=volumeId,proto3" json:"volume_id,omitempty"`
+ Preallocate int64 `protobuf:"varint,2,opt,name=preallocate,proto3" json:"preallocate,omitempty"`
+}
-func (m *VolumeSyncStatusResponse) GetVolumdId() uint32 {
- if m != nil {
- return m.VolumdId
+func (x *VacuumVolumeCompactRequest) Reset() {
+ *x = VacuumVolumeCompactRequest{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_volume_server_proto_msgTypes[6]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
- return 0
}
-func (m *VolumeSyncStatusResponse) GetCollection() string {
- if m != nil {
- return m.Collection
- }
- return ""
+func (x *VacuumVolumeCompactRequest) String() string {
+ return protoimpl.X.MessageStringOf(x)
}
-func (m *VolumeSyncStatusResponse) GetReplication() string {
- if m != nil {
- return m.Replication
+func (*VacuumVolumeCompactRequest) ProtoMessage() {}
+
+func (x *VacuumVolumeCompactRequest) ProtoReflect() protoreflect.Message {
+ mi := &file_volume_server_proto_msgTypes[6]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
}
- return ""
+ return mi.MessageOf(x)
}
-func (m *VolumeSyncStatusResponse) GetTtl() string {
- if m != nil {
- return m.Ttl
- }
- return ""
+// Deprecated: Use VacuumVolumeCompactRequest.ProtoReflect.Descriptor instead.
+func (*VacuumVolumeCompactRequest) Descriptor() ([]byte, []int) {
+ return file_volume_server_proto_rawDescGZIP(), []int{6}
}
-func (m *VolumeSyncStatusResponse) GetTailOffset() uint64 {
- if m != nil {
- return m.TailOffset
+func (x *VacuumVolumeCompactRequest) GetVolumeId() uint32 {
+ if x != nil {
+ return x.VolumeId
}
return 0
}
-func (m *VolumeSyncStatusResponse) GetCompactRevision() uint32 {
- if m != nil {
- return m.CompactRevision
+func (x *VacuumVolumeCompactRequest) GetPreallocate() int64 {
+ if x != nil {
+ return x.Preallocate
}
return 0
}
-func (m *VolumeSyncStatusResponse) GetIdxFileSize() uint64 {
- if m != nil {
- return m.IdxFileSize
+type VacuumVolumeCompactResponse struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+}
+
+func (x *VacuumVolumeCompactResponse) Reset() {
+ *x = VacuumVolumeCompactResponse{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_volume_server_proto_msgTypes[7]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
- return 0
}
-type VolumeSyncIndexRequest struct {
- VolumdId uint32 `protobuf:"varint,1,opt,name=volumd_id,json=volumdId" json:"volumd_id,omitempty"`
+func (x *VacuumVolumeCompactResponse) String() string {
+ return protoimpl.X.MessageStringOf(x)
}
-func (m *VolumeSyncIndexRequest) Reset() { *m = VolumeSyncIndexRequest{} }
-func (m *VolumeSyncIndexRequest) String() string { return proto.CompactTextString(m) }
-func (*VolumeSyncIndexRequest) ProtoMessage() {}
-func (*VolumeSyncIndexRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{18} }
+func (*VacuumVolumeCompactResponse) ProtoMessage() {}
-func (m *VolumeSyncIndexRequest) GetVolumdId() uint32 {
- if m != nil {
- return m.VolumdId
+func (x *VacuumVolumeCompactResponse) ProtoReflect() protoreflect.Message {
+ mi := &file_volume_server_proto_msgTypes[7]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
}
- return 0
+ return mi.MessageOf(x)
}
-type VolumeSyncIndexResponse struct {
- IndexFileContent []byte `protobuf:"bytes,1,opt,name=index_file_content,json=indexFileContent,proto3" json:"index_file_content,omitempty"`
+// Deprecated: Use VacuumVolumeCompactResponse.ProtoReflect.Descriptor instead.
+func (*VacuumVolumeCompactResponse) Descriptor() ([]byte, []int) {
+ return file_volume_server_proto_rawDescGZIP(), []int{7}
}
-func (m *VolumeSyncIndexResponse) Reset() { *m = VolumeSyncIndexResponse{} }
-func (m *VolumeSyncIndexResponse) String() string { return proto.CompactTextString(m) }
-func (*VolumeSyncIndexResponse) ProtoMessage() {}
-func (*VolumeSyncIndexResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{19} }
+type VacuumVolumeCommitRequest struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ VolumeId uint32 `protobuf:"varint,1,opt,name=volume_id,json=volumeId,proto3" json:"volume_id,omitempty"`
+}
-func (m *VolumeSyncIndexResponse) GetIndexFileContent() []byte {
- if m != nil {
- return m.IndexFileContent
+func (x *VacuumVolumeCommitRequest) Reset() {
+ *x = VacuumVolumeCommitRequest{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_volume_server_proto_msgTypes[8]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
- return nil
}
-type VolumeSyncDataRequest struct {
- VolumdId uint32 `protobuf:"varint,1,opt,name=volumd_id,json=volumdId" json:"volumd_id,omitempty"`
- Revision uint32 `protobuf:"varint,2,opt,name=revision" json:"revision,omitempty"`
- Offset uint32 `protobuf:"varint,3,opt,name=offset" json:"offset,omitempty"`
- Size uint32 `protobuf:"varint,4,opt,name=size" json:"size,omitempty"`
- NeedleId string `protobuf:"bytes,5,opt,name=needle_id,json=needleId" json:"needle_id,omitempty"`
+func (x *VacuumVolumeCommitRequest) String() string {
+ return protoimpl.X.MessageStringOf(x)
}
-func (m *VolumeSyncDataRequest) Reset() { *m = VolumeSyncDataRequest{} }
-func (m *VolumeSyncDataRequest) String() string { return proto.CompactTextString(m) }
-func (*VolumeSyncDataRequest) ProtoMessage() {}
-func (*VolumeSyncDataRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{20} }
+func (*VacuumVolumeCommitRequest) ProtoMessage() {}
-func (m *VolumeSyncDataRequest) GetVolumdId() uint32 {
- if m != nil {
- return m.VolumdId
+func (x *VacuumVolumeCommitRequest) ProtoReflect() protoreflect.Message {
+ mi := &file_volume_server_proto_msgTypes[8]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
}
- return 0
+ return mi.MessageOf(x)
}
-func (m *VolumeSyncDataRequest) GetRevision() uint32 {
- if m != nil {
- return m.Revision
- }
- return 0
+// Deprecated: Use VacuumVolumeCommitRequest.ProtoReflect.Descriptor instead.
+func (*VacuumVolumeCommitRequest) Descriptor() ([]byte, []int) {
+ return file_volume_server_proto_rawDescGZIP(), []int{8}
}
-func (m *VolumeSyncDataRequest) GetOffset() uint32 {
- if m != nil {
- return m.Offset
+func (x *VacuumVolumeCommitRequest) GetVolumeId() uint32 {
+ if x != nil {
+ return x.VolumeId
}
return 0
}
-func (m *VolumeSyncDataRequest) GetSize() uint32 {
- if m != nil {
- return m.Size
- }
- return 0
+type VacuumVolumeCommitResponse struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ IsReadOnly bool `protobuf:"varint,1,opt,name=is_read_only,json=isReadOnly,proto3" json:"is_read_only,omitempty"`
}
-func (m *VolumeSyncDataRequest) GetNeedleId() string {
- if m != nil {
- return m.NeedleId
+func (x *VacuumVolumeCommitResponse) Reset() {
+ *x = VacuumVolumeCommitResponse{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_volume_server_proto_msgTypes[9]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
- return ""
}
-type VolumeSyncDataResponse struct {
- FileContent []byte `protobuf:"bytes,1,opt,name=file_content,json=fileContent,proto3" json:"file_content,omitempty"`
+func (x *VacuumVolumeCommitResponse) String() string {
+ return protoimpl.X.MessageStringOf(x)
}
-func (m *VolumeSyncDataResponse) Reset() { *m = VolumeSyncDataResponse{} }
-func (m *VolumeSyncDataResponse) String() string { return proto.CompactTextString(m) }
-func (*VolumeSyncDataResponse) ProtoMessage() {}
-func (*VolumeSyncDataResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{21} }
+func (*VacuumVolumeCommitResponse) ProtoMessage() {}
-func (m *VolumeSyncDataResponse) GetFileContent() []byte {
- if m != nil {
- return m.FileContent
+func (x *VacuumVolumeCommitResponse) ProtoReflect() protoreflect.Message {
+ mi := &file_volume_server_proto_msgTypes[9]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
}
- return nil
+ return mi.MessageOf(x)
}
-type VolumeMountRequest struct {
- VolumdId uint32 `protobuf:"varint,1,opt,name=volumd_id,json=volumdId" json:"volumd_id,omitempty"`
+// Deprecated: Use VacuumVolumeCommitResponse.ProtoReflect.Descriptor instead.
+func (*VacuumVolumeCommitResponse) Descriptor() ([]byte, []int) {
+ return file_volume_server_proto_rawDescGZIP(), []int{9}
+}
+
+func (x *VacuumVolumeCommitResponse) GetIsReadOnly() bool {
+ if x != nil {
+ return x.IsReadOnly
+ }
+ return false
}
-func (m *VolumeMountRequest) Reset() { *m = VolumeMountRequest{} }
-func (m *VolumeMountRequest) String() string { return proto.CompactTextString(m) }
-func (*VolumeMountRequest) ProtoMessage() {}
-func (*VolumeMountRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{22} }
+type VacuumVolumeCleanupRequest struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ VolumeId uint32 `protobuf:"varint,1,opt,name=volume_id,json=volumeId,proto3" json:"volume_id,omitempty"`
+}
-func (m *VolumeMountRequest) GetVolumdId() uint32 {
- if m != nil {
- return m.VolumdId
+func (x *VacuumVolumeCleanupRequest) Reset() {
+ *x = VacuumVolumeCleanupRequest{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_volume_server_proto_msgTypes[10]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
- return 0
}
-type VolumeMountResponse struct {
+func (x *VacuumVolumeCleanupRequest) String() string {
+ return protoimpl.X.MessageStringOf(x)
}
-func (m *VolumeMountResponse) Reset() { *m = VolumeMountResponse{} }
-func (m *VolumeMountResponse) String() string { return proto.CompactTextString(m) }
-func (*VolumeMountResponse) ProtoMessage() {}
-func (*VolumeMountResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{23} }
+func (*VacuumVolumeCleanupRequest) ProtoMessage() {}
-type VolumeUnmountRequest struct {
- VolumdId uint32 `protobuf:"varint,1,opt,name=volumd_id,json=volumdId" json:"volumd_id,omitempty"`
+func (x *VacuumVolumeCleanupRequest) ProtoReflect() protoreflect.Message {
+ mi := &file_volume_server_proto_msgTypes[10]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
}
-func (m *VolumeUnmountRequest) Reset() { *m = VolumeUnmountRequest{} }
-func (m *VolumeUnmountRequest) String() string { return proto.CompactTextString(m) }
-func (*VolumeUnmountRequest) ProtoMessage() {}
-func (*VolumeUnmountRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{24} }
+// Deprecated: Use VacuumVolumeCleanupRequest.ProtoReflect.Descriptor instead.
+func (*VacuumVolumeCleanupRequest) Descriptor() ([]byte, []int) {
+ return file_volume_server_proto_rawDescGZIP(), []int{10}
+}
-func (m *VolumeUnmountRequest) GetVolumdId() uint32 {
- if m != nil {
- return m.VolumdId
+func (x *VacuumVolumeCleanupRequest) GetVolumeId() uint32 {
+ if x != nil {
+ return x.VolumeId
}
return 0
}
-type VolumeUnmountResponse struct {
+type VacuumVolumeCleanupResponse struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
}
-func (m *VolumeUnmountResponse) Reset() { *m = VolumeUnmountResponse{} }
-func (m *VolumeUnmountResponse) String() string { return proto.CompactTextString(m) }
-func (*VolumeUnmountResponse) ProtoMessage() {}
-func (*VolumeUnmountResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{25} }
+func (x *VacuumVolumeCleanupResponse) Reset() {
+ *x = VacuumVolumeCleanupResponse{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_volume_server_proto_msgTypes[11]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
-type VolumeDeleteRequest struct {
- VolumdId uint32 `protobuf:"varint,1,opt,name=volumd_id,json=volumdId" json:"volumd_id,omitempty"`
+func (x *VacuumVolumeCleanupResponse) String() string {
+ return protoimpl.X.MessageStringOf(x)
}
-func (m *VolumeDeleteRequest) Reset() { *m = VolumeDeleteRequest{} }
-func (m *VolumeDeleteRequest) String() string { return proto.CompactTextString(m) }
-func (*VolumeDeleteRequest) ProtoMessage() {}
-func (*VolumeDeleteRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{26} }
+func (*VacuumVolumeCleanupResponse) ProtoMessage() {}
-func (m *VolumeDeleteRequest) GetVolumdId() uint32 {
- if m != nil {
- return m.VolumdId
+func (x *VacuumVolumeCleanupResponse) ProtoReflect() protoreflect.Message {
+ mi := &file_volume_server_proto_msgTypes[11]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
}
- return 0
+ return mi.MessageOf(x)
}
-type VolumeDeleteResponse struct {
+// Deprecated: Use VacuumVolumeCleanupResponse.ProtoReflect.Descriptor instead.
+func (*VacuumVolumeCleanupResponse) Descriptor() ([]byte, []int) {
+ return file_volume_server_proto_rawDescGZIP(), []int{11}
}
-func (m *VolumeDeleteResponse) Reset() { *m = VolumeDeleteResponse{} }
-func (m *VolumeDeleteResponse) String() string { return proto.CompactTextString(m) }
-func (*VolumeDeleteResponse) ProtoMessage() {}
-func (*VolumeDeleteResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{27} }
+type DeleteCollectionRequest struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
-type VolumeUiPageRequest struct {
+ Collection string `protobuf:"bytes,1,opt,name=collection,proto3" json:"collection,omitempty"`
}
-func (m *VolumeUiPageRequest) Reset() { *m = VolumeUiPageRequest{} }
-func (m *VolumeUiPageRequest) String() string { return proto.CompactTextString(m) }
-func (*VolumeUiPageRequest) ProtoMessage() {}
-func (*VolumeUiPageRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{28} }
+func (x *DeleteCollectionRequest) Reset() {
+ *x = DeleteCollectionRequest{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_volume_server_proto_msgTypes[12]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
-type VolumeUiPageResponse struct {
+func (x *DeleteCollectionRequest) String() string {
+ return protoimpl.X.MessageStringOf(x)
}
-func (m *VolumeUiPageResponse) Reset() { *m = VolumeUiPageResponse{} }
-func (m *VolumeUiPageResponse) String() string { return proto.CompactTextString(m) }
-func (*VolumeUiPageResponse) ProtoMessage() {}
-func (*VolumeUiPageResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{29} }
+func (*DeleteCollectionRequest) ProtoMessage() {}
-type DiskStatus struct {
- Dir string `protobuf:"bytes,1,opt,name=dir" json:"dir,omitempty"`
- All uint64 `protobuf:"varint,2,opt,name=all" json:"all,omitempty"`
- Used uint64 `protobuf:"varint,3,opt,name=used" json:"used,omitempty"`
- Free uint64 `protobuf:"varint,4,opt,name=free" json:"free,omitempty"`
+func (x *DeleteCollectionRequest) ProtoReflect() protoreflect.Message {
+ mi := &file_volume_server_proto_msgTypes[12]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
}
-func (m *DiskStatus) Reset() { *m = DiskStatus{} }
-func (m *DiskStatus) String() string { return proto.CompactTextString(m) }
-func (*DiskStatus) ProtoMessage() {}
-func (*DiskStatus) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{30} }
+// Deprecated: Use DeleteCollectionRequest.ProtoReflect.Descriptor instead.
+func (*DeleteCollectionRequest) Descriptor() ([]byte, []int) {
+ return file_volume_server_proto_rawDescGZIP(), []int{12}
+}
-func (m *DiskStatus) GetDir() string {
- if m != nil {
- return m.Dir
+func (x *DeleteCollectionRequest) GetCollection() string {
+ if x != nil {
+ return x.Collection
}
return ""
}
-func (m *DiskStatus) GetAll() uint64 {
- if m != nil {
- return m.All
+type DeleteCollectionResponse struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+}
+
+func (x *DeleteCollectionResponse) Reset() {
+ *x = DeleteCollectionResponse{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_volume_server_proto_msgTypes[13]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
- return 0
}
-func (m *DiskStatus) GetUsed() uint64 {
- if m != nil {
- return m.Used
+func (x *DeleteCollectionResponse) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*DeleteCollectionResponse) ProtoMessage() {}
+
+func (x *DeleteCollectionResponse) ProtoReflect() protoreflect.Message {
+ mi := &file_volume_server_proto_msgTypes[13]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
}
- return 0
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use DeleteCollectionResponse.ProtoReflect.Descriptor instead.
+func (*DeleteCollectionResponse) Descriptor() ([]byte, []int) {
+ return file_volume_server_proto_rawDescGZIP(), []int{13}
+}
+
+type AllocateVolumeRequest struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ VolumeId uint32 `protobuf:"varint,1,opt,name=volume_id,json=volumeId,proto3" json:"volume_id,omitempty"`
+ Collection string `protobuf:"bytes,2,opt,name=collection,proto3" json:"collection,omitempty"`
+ Preallocate int64 `protobuf:"varint,3,opt,name=preallocate,proto3" json:"preallocate,omitempty"`
+ Replication string `protobuf:"bytes,4,opt,name=replication,proto3" json:"replication,omitempty"`
+ Ttl string `protobuf:"bytes,5,opt,name=ttl,proto3" json:"ttl,omitempty"`
+ MemoryMapMaxSizeMb uint32 `protobuf:"varint,6,opt,name=memory_map_max_size_mb,json=memoryMapMaxSizeMb,proto3" json:"memory_map_max_size_mb,omitempty"`
}
-func (m *DiskStatus) GetFree() uint64 {
- if m != nil {
- return m.Free
+func (x *AllocateVolumeRequest) Reset() {
+ *x = AllocateVolumeRequest{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_volume_server_proto_msgTypes[14]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
- return 0
}
-type MemStatus struct {
- Goroutines int32 `protobuf:"varint,1,opt,name=goroutines" json:"goroutines,omitempty"`
- All uint64 `protobuf:"varint,2,opt,name=all" json:"all,omitempty"`
- Used uint64 `protobuf:"varint,3,opt,name=used" json:"used,omitempty"`
- Free uint64 `protobuf:"varint,4,opt,name=free" json:"free,omitempty"`
- Self uint64 `protobuf:"varint,5,opt,name=self" json:"self,omitempty"`
- Heap uint64 `protobuf:"varint,6,opt,name=heap" json:"heap,omitempty"`
- Stack uint64 `protobuf:"varint,7,opt,name=stack" json:"stack,omitempty"`
+func (x *AllocateVolumeRequest) String() string {
+ return protoimpl.X.MessageStringOf(x)
}
-func (m *MemStatus) Reset() { *m = MemStatus{} }
-func (m *MemStatus) String() string { return proto.CompactTextString(m) }
-func (*MemStatus) ProtoMessage() {}
-func (*MemStatus) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{31} }
+func (*AllocateVolumeRequest) ProtoMessage() {}
-func (m *MemStatus) GetGoroutines() int32 {
- if m != nil {
- return m.Goroutines
+func (x *AllocateVolumeRequest) ProtoReflect() protoreflect.Message {
+ mi := &file_volume_server_proto_msgTypes[14]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
}
- return 0
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use AllocateVolumeRequest.ProtoReflect.Descriptor instead.
+func (*AllocateVolumeRequest) Descriptor() ([]byte, []int) {
+ return file_volume_server_proto_rawDescGZIP(), []int{14}
}
-func (m *MemStatus) GetAll() uint64 {
- if m != nil {
- return m.All
+func (x *AllocateVolumeRequest) GetVolumeId() uint32 {
+ if x != nil {
+ return x.VolumeId
}
return 0
}
-func (m *MemStatus) GetUsed() uint64 {
- if m != nil {
- return m.Used
+func (x *AllocateVolumeRequest) GetCollection() string {
+ if x != nil {
+ return x.Collection
}
- return 0
+ return ""
}
-func (m *MemStatus) GetFree() uint64 {
- if m != nil {
- return m.Free
+func (x *AllocateVolumeRequest) GetPreallocate() int64 {
+ if x != nil {
+ return x.Preallocate
}
return 0
}
-func (m *MemStatus) GetSelf() uint64 {
- if m != nil {
- return m.Self
+func (x *AllocateVolumeRequest) GetReplication() string {
+ if x != nil {
+ return x.Replication
}
- return 0
+ return ""
}
-func (m *MemStatus) GetHeap() uint64 {
- if m != nil {
- return m.Heap
+func (x *AllocateVolumeRequest) GetTtl() string {
+ if x != nil {
+ return x.Ttl
}
- return 0
+ return ""
}
-func (m *MemStatus) GetStack() uint64 {
- if m != nil {
- return m.Stack
+func (x *AllocateVolumeRequest) GetMemoryMapMaxSizeMb() uint32 {
+ if x != nil {
+ return x.MemoryMapMaxSizeMb
}
return 0
}
-func init() {
- proto.RegisterType((*BatchDeleteRequest)(nil), "volume_server_pb.BatchDeleteRequest")
- proto.RegisterType((*BatchDeleteResponse)(nil), "volume_server_pb.BatchDeleteResponse")
- proto.RegisterType((*DeleteResult)(nil), "volume_server_pb.DeleteResult")
- proto.RegisterType((*Empty)(nil), "volume_server_pb.Empty")
- proto.RegisterType((*VacuumVolumeCheckRequest)(nil), "volume_server_pb.VacuumVolumeCheckRequest")
- proto.RegisterType((*VacuumVolumeCheckResponse)(nil), "volume_server_pb.VacuumVolumeCheckResponse")
- proto.RegisterType((*VacuumVolumeCompactRequest)(nil), "volume_server_pb.VacuumVolumeCompactRequest")
- proto.RegisterType((*VacuumVolumeCompactResponse)(nil), "volume_server_pb.VacuumVolumeCompactResponse")
- proto.RegisterType((*VacuumVolumeCommitRequest)(nil), "volume_server_pb.VacuumVolumeCommitRequest")
- proto.RegisterType((*VacuumVolumeCommitResponse)(nil), "volume_server_pb.VacuumVolumeCommitResponse")
- proto.RegisterType((*VacuumVolumeCleanupRequest)(nil), "volume_server_pb.VacuumVolumeCleanupRequest")
- proto.RegisterType((*VacuumVolumeCleanupResponse)(nil), "volume_server_pb.VacuumVolumeCleanupResponse")
- proto.RegisterType((*DeleteCollectionRequest)(nil), "volume_server_pb.DeleteCollectionRequest")
- proto.RegisterType((*DeleteCollectionResponse)(nil), "volume_server_pb.DeleteCollectionResponse")
- proto.RegisterType((*AssignVolumeRequest)(nil), "volume_server_pb.AssignVolumeRequest")
- proto.RegisterType((*AssignVolumeResponse)(nil), "volume_server_pb.AssignVolumeResponse")
- proto.RegisterType((*VolumeSyncStatusRequest)(nil), "volume_server_pb.VolumeSyncStatusRequest")
- proto.RegisterType((*VolumeSyncStatusResponse)(nil), "volume_server_pb.VolumeSyncStatusResponse")
- proto.RegisterType((*VolumeSyncIndexRequest)(nil), "volume_server_pb.VolumeSyncIndexRequest")
- proto.RegisterType((*VolumeSyncIndexResponse)(nil), "volume_server_pb.VolumeSyncIndexResponse")
- proto.RegisterType((*VolumeSyncDataRequest)(nil), "volume_server_pb.VolumeSyncDataRequest")
- proto.RegisterType((*VolumeSyncDataResponse)(nil), "volume_server_pb.VolumeSyncDataResponse")
- proto.RegisterType((*VolumeMountRequest)(nil), "volume_server_pb.VolumeMountRequest")
- proto.RegisterType((*VolumeMountResponse)(nil), "volume_server_pb.VolumeMountResponse")
- proto.RegisterType((*VolumeUnmountRequest)(nil), "volume_server_pb.VolumeUnmountRequest")
- proto.RegisterType((*VolumeUnmountResponse)(nil), "volume_server_pb.VolumeUnmountResponse")
- proto.RegisterType((*VolumeDeleteRequest)(nil), "volume_server_pb.VolumeDeleteRequest")
- proto.RegisterType((*VolumeDeleteResponse)(nil), "volume_server_pb.VolumeDeleteResponse")
- proto.RegisterType((*VolumeUiPageRequest)(nil), "volume_server_pb.VolumeUiPageRequest")
- proto.RegisterType((*VolumeUiPageResponse)(nil), "volume_server_pb.VolumeUiPageResponse")
- proto.RegisterType((*DiskStatus)(nil), "volume_server_pb.DiskStatus")
- proto.RegisterType((*MemStatus)(nil), "volume_server_pb.MemStatus")
+type AllocateVolumeResponse struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
}
-// Reference imports to suppress errors if they are not otherwise used.
-var _ context.Context
-var _ grpc.ClientConn
+func (x *AllocateVolumeResponse) Reset() {
+ *x = AllocateVolumeResponse{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_volume_server_proto_msgTypes[15]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
-// This is a compile-time assertion to ensure that this generated file
-// is compatible with the grpc package it is being compiled against.
-const _ = grpc.SupportPackageIsVersion4
+func (x *AllocateVolumeResponse) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
-// Client API for VolumeServer service
+func (*AllocateVolumeResponse) ProtoMessage() {}
-type VolumeServerClient interface {
- // Experts only: takes multiple fid parameters. This function does not propagate deletes to replicas.
- BatchDelete(ctx context.Context, in *BatchDeleteRequest, opts ...grpc.CallOption) (*BatchDeleteResponse, error)
- VacuumVolumeCheck(ctx context.Context, in *VacuumVolumeCheckRequest, opts ...grpc.CallOption) (*VacuumVolumeCheckResponse, error)
- VacuumVolumeCompact(ctx context.Context, in *VacuumVolumeCompactRequest, opts ...grpc.CallOption) (*VacuumVolumeCompactResponse, error)
- VacuumVolumeCommit(ctx context.Context, in *VacuumVolumeCommitRequest, opts ...grpc.CallOption) (*VacuumVolumeCommitResponse, error)
- VacuumVolumeCleanup(ctx context.Context, in *VacuumVolumeCleanupRequest, opts ...grpc.CallOption) (*VacuumVolumeCleanupResponse, error)
- DeleteCollection(ctx context.Context, in *DeleteCollectionRequest, opts ...grpc.CallOption) (*DeleteCollectionResponse, error)
- AssignVolume(ctx context.Context, in *AssignVolumeRequest, opts ...grpc.CallOption) (*AssignVolumeResponse, error)
- VolumeSyncStatus(ctx context.Context, in *VolumeSyncStatusRequest, opts ...grpc.CallOption) (*VolumeSyncStatusResponse, error)
- VolumeSyncIndex(ctx context.Context, in *VolumeSyncIndexRequest, opts ...grpc.CallOption) (VolumeServer_VolumeSyncIndexClient, error)
- VolumeSyncData(ctx context.Context, in *VolumeSyncDataRequest, opts ...grpc.CallOption) (VolumeServer_VolumeSyncDataClient, error)
- VolumeMount(ctx context.Context, in *VolumeMountRequest, opts ...grpc.CallOption) (*VolumeMountResponse, error)
- VolumeUnmount(ctx context.Context, in *VolumeUnmountRequest, opts ...grpc.CallOption) (*VolumeUnmountResponse, error)
- VolumeDelete(ctx context.Context, in *VolumeDeleteRequest, opts ...grpc.CallOption) (*VolumeDeleteResponse, error)
+func (x *AllocateVolumeResponse) ProtoReflect() protoreflect.Message {
+ mi := &file_volume_server_proto_msgTypes[15]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
}
-type volumeServerClient struct {
- cc *grpc.ClientConn
+// Deprecated: Use AllocateVolumeResponse.ProtoReflect.Descriptor instead.
+func (*AllocateVolumeResponse) Descriptor() ([]byte, []int) {
+ return file_volume_server_proto_rawDescGZIP(), []int{15}
}
-func NewVolumeServerClient(cc *grpc.ClientConn) VolumeServerClient {
- return &volumeServerClient{cc}
+type VolumeSyncStatusRequest struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ VolumeId uint32 `protobuf:"varint,1,opt,name=volume_id,json=volumeId,proto3" json:"volume_id,omitempty"`
}
-func (c *volumeServerClient) BatchDelete(ctx context.Context, in *BatchDeleteRequest, opts ...grpc.CallOption) (*BatchDeleteResponse, error) {
- out := new(BatchDeleteResponse)
- err := grpc.Invoke(ctx, "/volume_server_pb.VolumeServer/BatchDelete", in, out, c.cc, opts...)
- if err != nil {
- return nil, err
+func (x *VolumeSyncStatusRequest) Reset() {
+ *x = VolumeSyncStatusRequest{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_volume_server_proto_msgTypes[16]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
- return out, nil
}
-func (c *volumeServerClient) VacuumVolumeCheck(ctx context.Context, in *VacuumVolumeCheckRequest, opts ...grpc.CallOption) (*VacuumVolumeCheckResponse, error) {
- out := new(VacuumVolumeCheckResponse)
- err := grpc.Invoke(ctx, "/volume_server_pb.VolumeServer/VacuumVolumeCheck", in, out, c.cc, opts...)
- if err != nil {
- return nil, err
- }
- return out, nil
+func (x *VolumeSyncStatusRequest) String() string {
+ return protoimpl.X.MessageStringOf(x)
}
-func (c *volumeServerClient) VacuumVolumeCompact(ctx context.Context, in *VacuumVolumeCompactRequest, opts ...grpc.CallOption) (*VacuumVolumeCompactResponse, error) {
- out := new(VacuumVolumeCompactResponse)
- err := grpc.Invoke(ctx, "/volume_server_pb.VolumeServer/VacuumVolumeCompact", in, out, c.cc, opts...)
- if err != nil {
- return nil, err
+func (*VolumeSyncStatusRequest) ProtoMessage() {}
+
+func (x *VolumeSyncStatusRequest) ProtoReflect() protoreflect.Message {
+ mi := &file_volume_server_proto_msgTypes[16]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
}
- return out, nil
+ return mi.MessageOf(x)
}
-func (c *volumeServerClient) VacuumVolumeCommit(ctx context.Context, in *VacuumVolumeCommitRequest, opts ...grpc.CallOption) (*VacuumVolumeCommitResponse, error) {
- out := new(VacuumVolumeCommitResponse)
- err := grpc.Invoke(ctx, "/volume_server_pb.VolumeServer/VacuumVolumeCommit", in, out, c.cc, opts...)
- if err != nil {
- return nil, err
- }
- return out, nil
+// Deprecated: Use VolumeSyncStatusRequest.ProtoReflect.Descriptor instead.
+func (*VolumeSyncStatusRequest) Descriptor() ([]byte, []int) {
+ return file_volume_server_proto_rawDescGZIP(), []int{16}
}
-func (c *volumeServerClient) VacuumVolumeCleanup(ctx context.Context, in *VacuumVolumeCleanupRequest, opts ...grpc.CallOption) (*VacuumVolumeCleanupResponse, error) {
- out := new(VacuumVolumeCleanupResponse)
- err := grpc.Invoke(ctx, "/volume_server_pb.VolumeServer/VacuumVolumeCleanup", in, out, c.cc, opts...)
- if err != nil {
- return nil, err
+func (x *VolumeSyncStatusRequest) GetVolumeId() uint32 {
+ if x != nil {
+ return x.VolumeId
}
- return out, nil
+ return 0
}
-func (c *volumeServerClient) DeleteCollection(ctx context.Context, in *DeleteCollectionRequest, opts ...grpc.CallOption) (*DeleteCollectionResponse, error) {
- out := new(DeleteCollectionResponse)
- err := grpc.Invoke(ctx, "/volume_server_pb.VolumeServer/DeleteCollection", in, out, c.cc, opts...)
- if err != nil {
- return nil, err
- }
- return out, nil
+type VolumeSyncStatusResponse struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ VolumeId uint32 `protobuf:"varint,1,opt,name=volume_id,json=volumeId,proto3" json:"volume_id,omitempty"`
+ Collection string `protobuf:"bytes,2,opt,name=collection,proto3" json:"collection,omitempty"`
+ Replication string `protobuf:"bytes,4,opt,name=replication,proto3" json:"replication,omitempty"`
+ Ttl string `protobuf:"bytes,5,opt,name=ttl,proto3" json:"ttl,omitempty"`
+ TailOffset uint64 `protobuf:"varint,6,opt,name=tail_offset,json=tailOffset,proto3" json:"tail_offset,omitempty"`
+ CompactRevision uint32 `protobuf:"varint,7,opt,name=compact_revision,json=compactRevision,proto3" json:"compact_revision,omitempty"`
+ IdxFileSize uint64 `protobuf:"varint,8,opt,name=idx_file_size,json=idxFileSize,proto3" json:"idx_file_size,omitempty"`
}
-func (c *volumeServerClient) AssignVolume(ctx context.Context, in *AssignVolumeRequest, opts ...grpc.CallOption) (*AssignVolumeResponse, error) {
- out := new(AssignVolumeResponse)
- err := grpc.Invoke(ctx, "/volume_server_pb.VolumeServer/AssignVolume", in, out, c.cc, opts...)
- if err != nil {
- return nil, err
+func (x *VolumeSyncStatusResponse) Reset() {
+ *x = VolumeSyncStatusResponse{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_volume_server_proto_msgTypes[17]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
- return out, nil
}
-func (c *volumeServerClient) VolumeSyncStatus(ctx context.Context, in *VolumeSyncStatusRequest, opts ...grpc.CallOption) (*VolumeSyncStatusResponse, error) {
- out := new(VolumeSyncStatusResponse)
- err := grpc.Invoke(ctx, "/volume_server_pb.VolumeServer/VolumeSyncStatus", in, out, c.cc, opts...)
- if err != nil {
- return nil, err
- }
- return out, nil
+func (x *VolumeSyncStatusResponse) String() string {
+ return protoimpl.X.MessageStringOf(x)
}
-func (c *volumeServerClient) VolumeSyncIndex(ctx context.Context, in *VolumeSyncIndexRequest, opts ...grpc.CallOption) (VolumeServer_VolumeSyncIndexClient, error) {
- stream, err := grpc.NewClientStream(ctx, &_VolumeServer_serviceDesc.Streams[0], c.cc, "/volume_server_pb.VolumeServer/VolumeSyncIndex", opts...)
- if err != nil {
- return nil, err
- }
- x := &volumeServerVolumeSyncIndexClient{stream}
- if err := x.ClientStream.SendMsg(in); err != nil {
- return nil, err
- }
- if err := x.ClientStream.CloseSend(); err != nil {
- return nil, err
+func (*VolumeSyncStatusResponse) ProtoMessage() {}
+
+func (x *VolumeSyncStatusResponse) ProtoReflect() protoreflect.Message {
+ mi := &file_volume_server_proto_msgTypes[17]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
}
- return x, nil
+ return mi.MessageOf(x)
}
-type VolumeServer_VolumeSyncIndexClient interface {
- Recv() (*VolumeSyncIndexResponse, error)
- grpc.ClientStream
+// Deprecated: Use VolumeSyncStatusResponse.ProtoReflect.Descriptor instead.
+func (*VolumeSyncStatusResponse) Descriptor() ([]byte, []int) {
+ return file_volume_server_proto_rawDescGZIP(), []int{17}
}
-type volumeServerVolumeSyncIndexClient struct {
- grpc.ClientStream
+func (x *VolumeSyncStatusResponse) GetVolumeId() uint32 {
+ if x != nil {
+ return x.VolumeId
+ }
+ return 0
}
-func (x *volumeServerVolumeSyncIndexClient) Recv() (*VolumeSyncIndexResponse, error) {
- m := new(VolumeSyncIndexResponse)
- if err := x.ClientStream.RecvMsg(m); err != nil {
- return nil, err
+func (x *VolumeSyncStatusResponse) GetCollection() string {
+ if x != nil {
+ return x.Collection
}
- return m, nil
+ return ""
}
-func (c *volumeServerClient) VolumeSyncData(ctx context.Context, in *VolumeSyncDataRequest, opts ...grpc.CallOption) (VolumeServer_VolumeSyncDataClient, error) {
- stream, err := grpc.NewClientStream(ctx, &_VolumeServer_serviceDesc.Streams[1], c.cc, "/volume_server_pb.VolumeServer/VolumeSyncData", opts...)
- if err != nil {
- return nil, err
- }
- x := &volumeServerVolumeSyncDataClient{stream}
- if err := x.ClientStream.SendMsg(in); err != nil {
- return nil, err
- }
- if err := x.ClientStream.CloseSend(); err != nil {
- return nil, err
+func (x *VolumeSyncStatusResponse) GetReplication() string {
+ if x != nil {
+ return x.Replication
}
- return x, nil
+ return ""
}
-type VolumeServer_VolumeSyncDataClient interface {
- Recv() (*VolumeSyncDataResponse, error)
- grpc.ClientStream
+func (x *VolumeSyncStatusResponse) GetTtl() string {
+ if x != nil {
+ return x.Ttl
+ }
+ return ""
}
-type volumeServerVolumeSyncDataClient struct {
- grpc.ClientStream
+func (x *VolumeSyncStatusResponse) GetTailOffset() uint64 {
+ if x != nil {
+ return x.TailOffset
+ }
+ return 0
}
-func (x *volumeServerVolumeSyncDataClient) Recv() (*VolumeSyncDataResponse, error) {
- m := new(VolumeSyncDataResponse)
- if err := x.ClientStream.RecvMsg(m); err != nil {
- return nil, err
+func (x *VolumeSyncStatusResponse) GetCompactRevision() uint32 {
+ if x != nil {
+ return x.CompactRevision
}
- return m, nil
+ return 0
+}
+
+func (x *VolumeSyncStatusResponse) GetIdxFileSize() uint64 {
+ if x != nil {
+ return x.IdxFileSize
+ }
+ return 0
+}
+
+type VolumeIncrementalCopyRequest struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ VolumeId uint32 `protobuf:"varint,1,opt,name=volume_id,json=volumeId,proto3" json:"volume_id,omitempty"`
+ SinceNs uint64 `protobuf:"varint,2,opt,name=since_ns,json=sinceNs,proto3" json:"since_ns,omitempty"`
+}
+
+func (x *VolumeIncrementalCopyRequest) Reset() {
+ *x = VolumeIncrementalCopyRequest{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_volume_server_proto_msgTypes[18]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *VolumeIncrementalCopyRequest) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*VolumeIncrementalCopyRequest) ProtoMessage() {}
+
+func (x *VolumeIncrementalCopyRequest) ProtoReflect() protoreflect.Message {
+ mi := &file_volume_server_proto_msgTypes[18]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use VolumeIncrementalCopyRequest.ProtoReflect.Descriptor instead.
+func (*VolumeIncrementalCopyRequest) Descriptor() ([]byte, []int) {
+ return file_volume_server_proto_rawDescGZIP(), []int{18}
+}
+
+func (x *VolumeIncrementalCopyRequest) GetVolumeId() uint32 {
+ if x != nil {
+ return x.VolumeId
+ }
+ return 0
+}
+
+func (x *VolumeIncrementalCopyRequest) GetSinceNs() uint64 {
+ if x != nil {
+ return x.SinceNs
+ }
+ return 0
+}
+
+type VolumeIncrementalCopyResponse struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ FileContent []byte `protobuf:"bytes,1,opt,name=file_content,json=fileContent,proto3" json:"file_content,omitempty"`
+}
+
+func (x *VolumeIncrementalCopyResponse) Reset() {
+ *x = VolumeIncrementalCopyResponse{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_volume_server_proto_msgTypes[19]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *VolumeIncrementalCopyResponse) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*VolumeIncrementalCopyResponse) ProtoMessage() {}
+
+func (x *VolumeIncrementalCopyResponse) ProtoReflect() protoreflect.Message {
+ mi := &file_volume_server_proto_msgTypes[19]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use VolumeIncrementalCopyResponse.ProtoReflect.Descriptor instead.
+func (*VolumeIncrementalCopyResponse) Descriptor() ([]byte, []int) {
+ return file_volume_server_proto_rawDescGZIP(), []int{19}
+}
+
+func (x *VolumeIncrementalCopyResponse) GetFileContent() []byte {
+ if x != nil {
+ return x.FileContent
+ }
+ return nil
+}
+
+type VolumeMountRequest struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ VolumeId uint32 `protobuf:"varint,1,opt,name=volume_id,json=volumeId,proto3" json:"volume_id,omitempty"`
+}
+
+func (x *VolumeMountRequest) Reset() {
+ *x = VolumeMountRequest{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_volume_server_proto_msgTypes[20]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *VolumeMountRequest) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*VolumeMountRequest) ProtoMessage() {}
+
+func (x *VolumeMountRequest) ProtoReflect() protoreflect.Message {
+ mi := &file_volume_server_proto_msgTypes[20]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use VolumeMountRequest.ProtoReflect.Descriptor instead.
+func (*VolumeMountRequest) Descriptor() ([]byte, []int) {
+ return file_volume_server_proto_rawDescGZIP(), []int{20}
+}
+
+func (x *VolumeMountRequest) GetVolumeId() uint32 {
+ if x != nil {
+ return x.VolumeId
+ }
+ return 0
+}
+
+type VolumeMountResponse struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+}
+
+func (x *VolumeMountResponse) Reset() {
+ *x = VolumeMountResponse{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_volume_server_proto_msgTypes[21]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *VolumeMountResponse) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*VolumeMountResponse) ProtoMessage() {}
+
+func (x *VolumeMountResponse) ProtoReflect() protoreflect.Message {
+ mi := &file_volume_server_proto_msgTypes[21]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use VolumeMountResponse.ProtoReflect.Descriptor instead.
+func (*VolumeMountResponse) Descriptor() ([]byte, []int) {
+ return file_volume_server_proto_rawDescGZIP(), []int{21}
+}
+
+type VolumeUnmountRequest struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ VolumeId uint32 `protobuf:"varint,1,opt,name=volume_id,json=volumeId,proto3" json:"volume_id,omitempty"`
+}
+
+func (x *VolumeUnmountRequest) Reset() {
+ *x = VolumeUnmountRequest{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_volume_server_proto_msgTypes[22]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *VolumeUnmountRequest) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*VolumeUnmountRequest) ProtoMessage() {}
+
+func (x *VolumeUnmountRequest) ProtoReflect() protoreflect.Message {
+ mi := &file_volume_server_proto_msgTypes[22]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use VolumeUnmountRequest.ProtoReflect.Descriptor instead.
+func (*VolumeUnmountRequest) Descriptor() ([]byte, []int) {
+ return file_volume_server_proto_rawDescGZIP(), []int{22}
+}
+
+func (x *VolumeUnmountRequest) GetVolumeId() uint32 {
+ if x != nil {
+ return x.VolumeId
+ }
+ return 0
+}
+
+type VolumeUnmountResponse struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+}
+
+func (x *VolumeUnmountResponse) Reset() {
+ *x = VolumeUnmountResponse{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_volume_server_proto_msgTypes[23]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *VolumeUnmountResponse) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*VolumeUnmountResponse) ProtoMessage() {}
+
+func (x *VolumeUnmountResponse) ProtoReflect() protoreflect.Message {
+ mi := &file_volume_server_proto_msgTypes[23]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use VolumeUnmountResponse.ProtoReflect.Descriptor instead.
+func (*VolumeUnmountResponse) Descriptor() ([]byte, []int) {
+ return file_volume_server_proto_rawDescGZIP(), []int{23}
+}
+
+type VolumeDeleteRequest struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ VolumeId uint32 `protobuf:"varint,1,opt,name=volume_id,json=volumeId,proto3" json:"volume_id,omitempty"`
+}
+
+func (x *VolumeDeleteRequest) Reset() {
+ *x = VolumeDeleteRequest{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_volume_server_proto_msgTypes[24]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *VolumeDeleteRequest) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*VolumeDeleteRequest) ProtoMessage() {}
+
+func (x *VolumeDeleteRequest) ProtoReflect() protoreflect.Message {
+ mi := &file_volume_server_proto_msgTypes[24]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use VolumeDeleteRequest.ProtoReflect.Descriptor instead.
+func (*VolumeDeleteRequest) Descriptor() ([]byte, []int) {
+ return file_volume_server_proto_rawDescGZIP(), []int{24}
+}
+
+func (x *VolumeDeleteRequest) GetVolumeId() uint32 {
+ if x != nil {
+ return x.VolumeId
+ }
+ return 0
+}
+
+type VolumeDeleteResponse struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+}
+
+func (x *VolumeDeleteResponse) Reset() {
+ *x = VolumeDeleteResponse{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_volume_server_proto_msgTypes[25]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *VolumeDeleteResponse) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*VolumeDeleteResponse) ProtoMessage() {}
+
+func (x *VolumeDeleteResponse) ProtoReflect() protoreflect.Message {
+ mi := &file_volume_server_proto_msgTypes[25]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use VolumeDeleteResponse.ProtoReflect.Descriptor instead.
+func (*VolumeDeleteResponse) Descriptor() ([]byte, []int) {
+ return file_volume_server_proto_rawDescGZIP(), []int{25}
+}
+
+type VolumeMarkReadonlyRequest struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ VolumeId uint32 `protobuf:"varint,1,opt,name=volume_id,json=volumeId,proto3" json:"volume_id,omitempty"`
+}
+
+func (x *VolumeMarkReadonlyRequest) Reset() {
+ *x = VolumeMarkReadonlyRequest{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_volume_server_proto_msgTypes[26]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *VolumeMarkReadonlyRequest) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*VolumeMarkReadonlyRequest) ProtoMessage() {}
+
+func (x *VolumeMarkReadonlyRequest) ProtoReflect() protoreflect.Message {
+ mi := &file_volume_server_proto_msgTypes[26]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use VolumeMarkReadonlyRequest.ProtoReflect.Descriptor instead.
+func (*VolumeMarkReadonlyRequest) Descriptor() ([]byte, []int) {
+ return file_volume_server_proto_rawDescGZIP(), []int{26}
+}
+
+func (x *VolumeMarkReadonlyRequest) GetVolumeId() uint32 {
+ if x != nil {
+ return x.VolumeId
+ }
+ return 0
+}
+
+type VolumeMarkReadonlyResponse struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+}
+
+func (x *VolumeMarkReadonlyResponse) Reset() {
+ *x = VolumeMarkReadonlyResponse{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_volume_server_proto_msgTypes[27]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *VolumeMarkReadonlyResponse) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*VolumeMarkReadonlyResponse) ProtoMessage() {}
+
+func (x *VolumeMarkReadonlyResponse) ProtoReflect() protoreflect.Message {
+ mi := &file_volume_server_proto_msgTypes[27]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use VolumeMarkReadonlyResponse.ProtoReflect.Descriptor instead.
+func (*VolumeMarkReadonlyResponse) Descriptor() ([]byte, []int) {
+ return file_volume_server_proto_rawDescGZIP(), []int{27}
+}
+
+type VolumeConfigureRequest struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ VolumeId uint32 `protobuf:"varint,1,opt,name=volume_id,json=volumeId,proto3" json:"volume_id,omitempty"`
+ Replication string `protobuf:"bytes,2,opt,name=replication,proto3" json:"replication,omitempty"`
+}
+
+func (x *VolumeConfigureRequest) Reset() {
+ *x = VolumeConfigureRequest{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_volume_server_proto_msgTypes[28]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *VolumeConfigureRequest) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*VolumeConfigureRequest) ProtoMessage() {}
+
+func (x *VolumeConfigureRequest) ProtoReflect() protoreflect.Message {
+ mi := &file_volume_server_proto_msgTypes[28]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use VolumeConfigureRequest.ProtoReflect.Descriptor instead.
+func (*VolumeConfigureRequest) Descriptor() ([]byte, []int) {
+ return file_volume_server_proto_rawDescGZIP(), []int{28}
+}
+
+func (x *VolumeConfigureRequest) GetVolumeId() uint32 {
+ if x != nil {
+ return x.VolumeId
+ }
+ return 0
+}
+
+func (x *VolumeConfigureRequest) GetReplication() string {
+ if x != nil {
+ return x.Replication
+ }
+ return ""
+}
+
+type VolumeConfigureResponse struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ Error string `protobuf:"bytes,1,opt,name=error,proto3" json:"error,omitempty"`
+}
+
+func (x *VolumeConfigureResponse) Reset() {
+ *x = VolumeConfigureResponse{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_volume_server_proto_msgTypes[29]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *VolumeConfigureResponse) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*VolumeConfigureResponse) ProtoMessage() {}
+
+func (x *VolumeConfigureResponse) ProtoReflect() protoreflect.Message {
+ mi := &file_volume_server_proto_msgTypes[29]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use VolumeConfigureResponse.ProtoReflect.Descriptor instead.
+func (*VolumeConfigureResponse) Descriptor() ([]byte, []int) {
+ return file_volume_server_proto_rawDescGZIP(), []int{29}
+}
+
+func (x *VolumeConfigureResponse) GetError() string {
+ if x != nil {
+ return x.Error
+ }
+ return ""
+}
+
+type VolumeCopyRequest struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ VolumeId uint32 `protobuf:"varint,1,opt,name=volume_id,json=volumeId,proto3" json:"volume_id,omitempty"`
+ Collection string `protobuf:"bytes,2,opt,name=collection,proto3" json:"collection,omitempty"`
+ Replication string `protobuf:"bytes,3,opt,name=replication,proto3" json:"replication,omitempty"`
+ Ttl string `protobuf:"bytes,4,opt,name=ttl,proto3" json:"ttl,omitempty"`
+ SourceDataNode string `protobuf:"bytes,5,opt,name=source_data_node,json=sourceDataNode,proto3" json:"source_data_node,omitempty"`
+}
+
+func (x *VolumeCopyRequest) Reset() {
+ *x = VolumeCopyRequest{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_volume_server_proto_msgTypes[30]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *VolumeCopyRequest) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*VolumeCopyRequest) ProtoMessage() {}
+
+func (x *VolumeCopyRequest) ProtoReflect() protoreflect.Message {
+ mi := &file_volume_server_proto_msgTypes[30]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use VolumeCopyRequest.ProtoReflect.Descriptor instead.
+func (*VolumeCopyRequest) Descriptor() ([]byte, []int) {
+ return file_volume_server_proto_rawDescGZIP(), []int{30}
+}
+
+func (x *VolumeCopyRequest) GetVolumeId() uint32 {
+ if x != nil {
+ return x.VolumeId
+ }
+ return 0
+}
+
+func (x *VolumeCopyRequest) GetCollection() string {
+ if x != nil {
+ return x.Collection
+ }
+ return ""
+}
+
+func (x *VolumeCopyRequest) GetReplication() string {
+ if x != nil {
+ return x.Replication
+ }
+ return ""
+}
+
+func (x *VolumeCopyRequest) GetTtl() string {
+ if x != nil {
+ return x.Ttl
+ }
+ return ""
+}
+
+func (x *VolumeCopyRequest) GetSourceDataNode() string {
+ if x != nil {
+ return x.SourceDataNode
+ }
+ return ""
+}
+
+type VolumeCopyResponse struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ LastAppendAtNs uint64 `protobuf:"varint,1,opt,name=last_append_at_ns,json=lastAppendAtNs,proto3" json:"last_append_at_ns,omitempty"`
+}
+
+func (x *VolumeCopyResponse) Reset() {
+ *x = VolumeCopyResponse{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_volume_server_proto_msgTypes[31]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *VolumeCopyResponse) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*VolumeCopyResponse) ProtoMessage() {}
+
+func (x *VolumeCopyResponse) ProtoReflect() protoreflect.Message {
+ mi := &file_volume_server_proto_msgTypes[31]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use VolumeCopyResponse.ProtoReflect.Descriptor instead.
+func (*VolumeCopyResponse) Descriptor() ([]byte, []int) {
+ return file_volume_server_proto_rawDescGZIP(), []int{31}
+}
+
+func (x *VolumeCopyResponse) GetLastAppendAtNs() uint64 {
+ if x != nil {
+ return x.LastAppendAtNs
+ }
+ return 0
+}
+
+type CopyFileRequest struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ VolumeId uint32 `protobuf:"varint,1,opt,name=volume_id,json=volumeId,proto3" json:"volume_id,omitempty"`
+ Ext string `protobuf:"bytes,2,opt,name=ext,proto3" json:"ext,omitempty"`
+ CompactionRevision uint32 `protobuf:"varint,3,opt,name=compaction_revision,json=compactionRevision,proto3" json:"compaction_revision,omitempty"`
+ StopOffset uint64 `protobuf:"varint,4,opt,name=stop_offset,json=stopOffset,proto3" json:"stop_offset,omitempty"`
+ Collection string `protobuf:"bytes,5,opt,name=collection,proto3" json:"collection,omitempty"`
+ IsEcVolume bool `protobuf:"varint,6,opt,name=is_ec_volume,json=isEcVolume,proto3" json:"is_ec_volume,omitempty"`
+ IgnoreSourceFileNotFound bool `protobuf:"varint,7,opt,name=ignore_source_file_not_found,json=ignoreSourceFileNotFound,proto3" json:"ignore_source_file_not_found,omitempty"`
+}
+
+func (x *CopyFileRequest) Reset() {
+ *x = CopyFileRequest{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_volume_server_proto_msgTypes[32]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *CopyFileRequest) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*CopyFileRequest) ProtoMessage() {}
+
+func (x *CopyFileRequest) ProtoReflect() protoreflect.Message {
+ mi := &file_volume_server_proto_msgTypes[32]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use CopyFileRequest.ProtoReflect.Descriptor instead.
+func (*CopyFileRequest) Descriptor() ([]byte, []int) {
+ return file_volume_server_proto_rawDescGZIP(), []int{32}
+}
+
+func (x *CopyFileRequest) GetVolumeId() uint32 {
+ if x != nil {
+ return x.VolumeId
+ }
+ return 0
+}
+
+func (x *CopyFileRequest) GetExt() string {
+ if x != nil {
+ return x.Ext
+ }
+ return ""
+}
+
+func (x *CopyFileRequest) GetCompactionRevision() uint32 {
+ if x != nil {
+ return x.CompactionRevision
+ }
+ return 0
+}
+
+func (x *CopyFileRequest) GetStopOffset() uint64 {
+ if x != nil {
+ return x.StopOffset
+ }
+ return 0
+}
+
+func (x *CopyFileRequest) GetCollection() string {
+ if x != nil {
+ return x.Collection
+ }
+ return ""
+}
+
+func (x *CopyFileRequest) GetIsEcVolume() bool {
+ if x != nil {
+ return x.IsEcVolume
+ }
+ return false
+}
+
+func (x *CopyFileRequest) GetIgnoreSourceFileNotFound() bool {
+ if x != nil {
+ return x.IgnoreSourceFileNotFound
+ }
+ return false
+}
+
+type CopyFileResponse struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ FileContent []byte `protobuf:"bytes,1,opt,name=file_content,json=fileContent,proto3" json:"file_content,omitempty"`
+}
+
+func (x *CopyFileResponse) Reset() {
+ *x = CopyFileResponse{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_volume_server_proto_msgTypes[33]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *CopyFileResponse) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*CopyFileResponse) ProtoMessage() {}
+
+func (x *CopyFileResponse) ProtoReflect() protoreflect.Message {
+ mi := &file_volume_server_proto_msgTypes[33]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use CopyFileResponse.ProtoReflect.Descriptor instead.
+func (*CopyFileResponse) Descriptor() ([]byte, []int) {
+ return file_volume_server_proto_rawDescGZIP(), []int{33}
+}
+
+func (x *CopyFileResponse) GetFileContent() []byte {
+ if x != nil {
+ return x.FileContent
+ }
+ return nil
+}
+
+type VolumeTailSenderRequest struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ VolumeId uint32 `protobuf:"varint,1,opt,name=volume_id,json=volumeId,proto3" json:"volume_id,omitempty"`
+ SinceNs uint64 `protobuf:"varint,2,opt,name=since_ns,json=sinceNs,proto3" json:"since_ns,omitempty"`
+ IdleTimeoutSeconds uint32 `protobuf:"varint,3,opt,name=idle_timeout_seconds,json=idleTimeoutSeconds,proto3" json:"idle_timeout_seconds,omitempty"`
+}
+
+func (x *VolumeTailSenderRequest) Reset() {
+ *x = VolumeTailSenderRequest{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_volume_server_proto_msgTypes[34]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *VolumeTailSenderRequest) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*VolumeTailSenderRequest) ProtoMessage() {}
+
+func (x *VolumeTailSenderRequest) ProtoReflect() protoreflect.Message {
+ mi := &file_volume_server_proto_msgTypes[34]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use VolumeTailSenderRequest.ProtoReflect.Descriptor instead.
+func (*VolumeTailSenderRequest) Descriptor() ([]byte, []int) {
+ return file_volume_server_proto_rawDescGZIP(), []int{34}
+}
+
+func (x *VolumeTailSenderRequest) GetVolumeId() uint32 {
+ if x != nil {
+ return x.VolumeId
+ }
+ return 0
+}
+
+func (x *VolumeTailSenderRequest) GetSinceNs() uint64 {
+ if x != nil {
+ return x.SinceNs
+ }
+ return 0
+}
+
+func (x *VolumeTailSenderRequest) GetIdleTimeoutSeconds() uint32 {
+ if x != nil {
+ return x.IdleTimeoutSeconds
+ }
+ return 0
+}
+
+type VolumeTailSenderResponse struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ NeedleHeader []byte `protobuf:"bytes,1,opt,name=needle_header,json=needleHeader,proto3" json:"needle_header,omitempty"`
+ NeedleBody []byte `protobuf:"bytes,2,opt,name=needle_body,json=needleBody,proto3" json:"needle_body,omitempty"`
+ IsLastChunk bool `protobuf:"varint,3,opt,name=is_last_chunk,json=isLastChunk,proto3" json:"is_last_chunk,omitempty"`
+}
+
+func (x *VolumeTailSenderResponse) Reset() {
+ *x = VolumeTailSenderResponse{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_volume_server_proto_msgTypes[35]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *VolumeTailSenderResponse) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*VolumeTailSenderResponse) ProtoMessage() {}
+
+func (x *VolumeTailSenderResponse) ProtoReflect() protoreflect.Message {
+ mi := &file_volume_server_proto_msgTypes[35]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use VolumeTailSenderResponse.ProtoReflect.Descriptor instead.
+func (*VolumeTailSenderResponse) Descriptor() ([]byte, []int) {
+ return file_volume_server_proto_rawDescGZIP(), []int{35}
+}
+
+func (x *VolumeTailSenderResponse) GetNeedleHeader() []byte {
+ if x != nil {
+ return x.NeedleHeader
+ }
+ return nil
+}
+
+func (x *VolumeTailSenderResponse) GetNeedleBody() []byte {
+ if x != nil {
+ return x.NeedleBody
+ }
+ return nil
+}
+
+func (x *VolumeTailSenderResponse) GetIsLastChunk() bool {
+ if x != nil {
+ return x.IsLastChunk
+ }
+ return false
+}
+
+type VolumeTailReceiverRequest struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ VolumeId uint32 `protobuf:"varint,1,opt,name=volume_id,json=volumeId,proto3" json:"volume_id,omitempty"`
+ SinceNs uint64 `protobuf:"varint,2,opt,name=since_ns,json=sinceNs,proto3" json:"since_ns,omitempty"`
+ IdleTimeoutSeconds uint32 `protobuf:"varint,3,opt,name=idle_timeout_seconds,json=idleTimeoutSeconds,proto3" json:"idle_timeout_seconds,omitempty"`
+ SourceVolumeServer string `protobuf:"bytes,4,opt,name=source_volume_server,json=sourceVolumeServer,proto3" json:"source_volume_server,omitempty"`
+}
+
+func (x *VolumeTailReceiverRequest) Reset() {
+ *x = VolumeTailReceiverRequest{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_volume_server_proto_msgTypes[36]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *VolumeTailReceiverRequest) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*VolumeTailReceiverRequest) ProtoMessage() {}
+
+func (x *VolumeTailReceiverRequest) ProtoReflect() protoreflect.Message {
+ mi := &file_volume_server_proto_msgTypes[36]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use VolumeTailReceiverRequest.ProtoReflect.Descriptor instead.
+func (*VolumeTailReceiverRequest) Descriptor() ([]byte, []int) {
+ return file_volume_server_proto_rawDescGZIP(), []int{36}
+}
+
+func (x *VolumeTailReceiverRequest) GetVolumeId() uint32 {
+ if x != nil {
+ return x.VolumeId
+ }
+ return 0
+}
+
+func (x *VolumeTailReceiverRequest) GetSinceNs() uint64 {
+ if x != nil {
+ return x.SinceNs
+ }
+ return 0
+}
+
+func (x *VolumeTailReceiverRequest) GetIdleTimeoutSeconds() uint32 {
+ if x != nil {
+ return x.IdleTimeoutSeconds
+ }
+ return 0
+}
+
+func (x *VolumeTailReceiverRequest) GetSourceVolumeServer() string {
+ if x != nil {
+ return x.SourceVolumeServer
+ }
+ return ""
+}
+
+type VolumeTailReceiverResponse struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+}
+
+func (x *VolumeTailReceiverResponse) Reset() {
+ *x = VolumeTailReceiverResponse{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_volume_server_proto_msgTypes[37]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *VolumeTailReceiverResponse) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*VolumeTailReceiverResponse) ProtoMessage() {}
+
+func (x *VolumeTailReceiverResponse) ProtoReflect() protoreflect.Message {
+ mi := &file_volume_server_proto_msgTypes[37]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use VolumeTailReceiverResponse.ProtoReflect.Descriptor instead.
+func (*VolumeTailReceiverResponse) Descriptor() ([]byte, []int) {
+ return file_volume_server_proto_rawDescGZIP(), []int{37}
+}
+
+type VolumeEcShardsGenerateRequest struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ VolumeId uint32 `protobuf:"varint,1,opt,name=volume_id,json=volumeId,proto3" json:"volume_id,omitempty"`
+ Collection string `protobuf:"bytes,2,opt,name=collection,proto3" json:"collection,omitempty"`
+}
+
+func (x *VolumeEcShardsGenerateRequest) Reset() {
+ *x = VolumeEcShardsGenerateRequest{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_volume_server_proto_msgTypes[38]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *VolumeEcShardsGenerateRequest) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*VolumeEcShardsGenerateRequest) ProtoMessage() {}
+
+func (x *VolumeEcShardsGenerateRequest) ProtoReflect() protoreflect.Message {
+ mi := &file_volume_server_proto_msgTypes[38]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use VolumeEcShardsGenerateRequest.ProtoReflect.Descriptor instead.
+func (*VolumeEcShardsGenerateRequest) Descriptor() ([]byte, []int) {
+ return file_volume_server_proto_rawDescGZIP(), []int{38}
+}
+
+func (x *VolumeEcShardsGenerateRequest) GetVolumeId() uint32 {
+ if x != nil {
+ return x.VolumeId
+ }
+ return 0
+}
+
+func (x *VolumeEcShardsGenerateRequest) GetCollection() string {
+ if x != nil {
+ return x.Collection
+ }
+ return ""
+}
+
+type VolumeEcShardsGenerateResponse struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+}
+
+func (x *VolumeEcShardsGenerateResponse) Reset() {
+ *x = VolumeEcShardsGenerateResponse{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_volume_server_proto_msgTypes[39]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *VolumeEcShardsGenerateResponse) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*VolumeEcShardsGenerateResponse) ProtoMessage() {}
+
+func (x *VolumeEcShardsGenerateResponse) ProtoReflect() protoreflect.Message {
+ mi := &file_volume_server_proto_msgTypes[39]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use VolumeEcShardsGenerateResponse.ProtoReflect.Descriptor instead.
+func (*VolumeEcShardsGenerateResponse) Descriptor() ([]byte, []int) {
+ return file_volume_server_proto_rawDescGZIP(), []int{39}
+}
+
+type VolumeEcShardsRebuildRequest struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ VolumeId uint32 `protobuf:"varint,1,opt,name=volume_id,json=volumeId,proto3" json:"volume_id,omitempty"`
+ Collection string `protobuf:"bytes,2,opt,name=collection,proto3" json:"collection,omitempty"`
+}
+
+func (x *VolumeEcShardsRebuildRequest) Reset() {
+ *x = VolumeEcShardsRebuildRequest{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_volume_server_proto_msgTypes[40]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *VolumeEcShardsRebuildRequest) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*VolumeEcShardsRebuildRequest) ProtoMessage() {}
+
+func (x *VolumeEcShardsRebuildRequest) ProtoReflect() protoreflect.Message {
+ mi := &file_volume_server_proto_msgTypes[40]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use VolumeEcShardsRebuildRequest.ProtoReflect.Descriptor instead.
+func (*VolumeEcShardsRebuildRequest) Descriptor() ([]byte, []int) {
+ return file_volume_server_proto_rawDescGZIP(), []int{40}
+}
+
+func (x *VolumeEcShardsRebuildRequest) GetVolumeId() uint32 {
+ if x != nil {
+ return x.VolumeId
+ }
+ return 0
+}
+
+func (x *VolumeEcShardsRebuildRequest) GetCollection() string {
+ if x != nil {
+ return x.Collection
+ }
+ return ""
+}
+
+type VolumeEcShardsRebuildResponse struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ RebuiltShardIds []uint32 `protobuf:"varint,1,rep,packed,name=rebuilt_shard_ids,json=rebuiltShardIds,proto3" json:"rebuilt_shard_ids,omitempty"`
+}
+
+func (x *VolumeEcShardsRebuildResponse) Reset() {
+ *x = VolumeEcShardsRebuildResponse{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_volume_server_proto_msgTypes[41]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *VolumeEcShardsRebuildResponse) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*VolumeEcShardsRebuildResponse) ProtoMessage() {}
+
+func (x *VolumeEcShardsRebuildResponse) ProtoReflect() protoreflect.Message {
+ mi := &file_volume_server_proto_msgTypes[41]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use VolumeEcShardsRebuildResponse.ProtoReflect.Descriptor instead.
+func (*VolumeEcShardsRebuildResponse) Descriptor() ([]byte, []int) {
+ return file_volume_server_proto_rawDescGZIP(), []int{41}
+}
+
+func (x *VolumeEcShardsRebuildResponse) GetRebuiltShardIds() []uint32 {
+ if x != nil {
+ return x.RebuiltShardIds
+ }
+ return nil
+}
+
+type VolumeEcShardsCopyRequest struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ VolumeId uint32 `protobuf:"varint,1,opt,name=volume_id,json=volumeId,proto3" json:"volume_id,omitempty"`
+ Collection string `protobuf:"bytes,2,opt,name=collection,proto3" json:"collection,omitempty"`
+ ShardIds []uint32 `protobuf:"varint,3,rep,packed,name=shard_ids,json=shardIds,proto3" json:"shard_ids,omitempty"`
+ CopyEcxFile bool `protobuf:"varint,4,opt,name=copy_ecx_file,json=copyEcxFile,proto3" json:"copy_ecx_file,omitempty"`
+ SourceDataNode string `protobuf:"bytes,5,opt,name=source_data_node,json=sourceDataNode,proto3" json:"source_data_node,omitempty"`
+ CopyEcjFile bool `protobuf:"varint,6,opt,name=copy_ecj_file,json=copyEcjFile,proto3" json:"copy_ecj_file,omitempty"`
+ CopyVifFile bool `protobuf:"varint,7,opt,name=copy_vif_file,json=copyVifFile,proto3" json:"copy_vif_file,omitempty"`
+}
+
+func (x *VolumeEcShardsCopyRequest) Reset() {
+ *x = VolumeEcShardsCopyRequest{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_volume_server_proto_msgTypes[42]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *VolumeEcShardsCopyRequest) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*VolumeEcShardsCopyRequest) ProtoMessage() {}
+
+func (x *VolumeEcShardsCopyRequest) ProtoReflect() protoreflect.Message {
+ mi := &file_volume_server_proto_msgTypes[42]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use VolumeEcShardsCopyRequest.ProtoReflect.Descriptor instead.
+func (*VolumeEcShardsCopyRequest) Descriptor() ([]byte, []int) {
+ return file_volume_server_proto_rawDescGZIP(), []int{42}
+}
+
+func (x *VolumeEcShardsCopyRequest) GetVolumeId() uint32 {
+ if x != nil {
+ return x.VolumeId
+ }
+ return 0
+}
+
+func (x *VolumeEcShardsCopyRequest) GetCollection() string {
+ if x != nil {
+ return x.Collection
+ }
+ return ""
+}
+
+func (x *VolumeEcShardsCopyRequest) GetShardIds() []uint32 {
+ if x != nil {
+ return x.ShardIds
+ }
+ return nil
+}
+
+func (x *VolumeEcShardsCopyRequest) GetCopyEcxFile() bool {
+ if x != nil {
+ return x.CopyEcxFile
+ }
+ return false
+}
+
+func (x *VolumeEcShardsCopyRequest) GetSourceDataNode() string {
+ if x != nil {
+ return x.SourceDataNode
+ }
+ return ""
+}
+
+func (x *VolumeEcShardsCopyRequest) GetCopyEcjFile() bool {
+ if x != nil {
+ return x.CopyEcjFile
+ }
+ return false
+}
+
+func (x *VolumeEcShardsCopyRequest) GetCopyVifFile() bool {
+ if x != nil {
+ return x.CopyVifFile
+ }
+ return false
+}
+
+type VolumeEcShardsCopyResponse struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+}
+
+func (x *VolumeEcShardsCopyResponse) Reset() {
+ *x = VolumeEcShardsCopyResponse{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_volume_server_proto_msgTypes[43]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *VolumeEcShardsCopyResponse) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*VolumeEcShardsCopyResponse) ProtoMessage() {}
+
+func (x *VolumeEcShardsCopyResponse) ProtoReflect() protoreflect.Message {
+ mi := &file_volume_server_proto_msgTypes[43]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use VolumeEcShardsCopyResponse.ProtoReflect.Descriptor instead.
+func (*VolumeEcShardsCopyResponse) Descriptor() ([]byte, []int) {
+ return file_volume_server_proto_rawDescGZIP(), []int{43}
+}
+
+type VolumeEcShardsDeleteRequest struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ VolumeId uint32 `protobuf:"varint,1,opt,name=volume_id,json=volumeId,proto3" json:"volume_id,omitempty"`
+ Collection string `protobuf:"bytes,2,opt,name=collection,proto3" json:"collection,omitempty"`
+ ShardIds []uint32 `protobuf:"varint,3,rep,packed,name=shard_ids,json=shardIds,proto3" json:"shard_ids,omitempty"`
+}
+
+func (x *VolumeEcShardsDeleteRequest) Reset() {
+ *x = VolumeEcShardsDeleteRequest{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_volume_server_proto_msgTypes[44]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *VolumeEcShardsDeleteRequest) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*VolumeEcShardsDeleteRequest) ProtoMessage() {}
+
+func (x *VolumeEcShardsDeleteRequest) ProtoReflect() protoreflect.Message {
+ mi := &file_volume_server_proto_msgTypes[44]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use VolumeEcShardsDeleteRequest.ProtoReflect.Descriptor instead.
+func (*VolumeEcShardsDeleteRequest) Descriptor() ([]byte, []int) {
+ return file_volume_server_proto_rawDescGZIP(), []int{44}
+}
+
+func (x *VolumeEcShardsDeleteRequest) GetVolumeId() uint32 {
+ if x != nil {
+ return x.VolumeId
+ }
+ return 0
+}
+
+func (x *VolumeEcShardsDeleteRequest) GetCollection() string {
+ if x != nil {
+ return x.Collection
+ }
+ return ""
+}
+
+func (x *VolumeEcShardsDeleteRequest) GetShardIds() []uint32 {
+ if x != nil {
+ return x.ShardIds
+ }
+ return nil
+}
+
+type VolumeEcShardsDeleteResponse struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+}
+
+func (x *VolumeEcShardsDeleteResponse) Reset() {
+ *x = VolumeEcShardsDeleteResponse{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_volume_server_proto_msgTypes[45]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *VolumeEcShardsDeleteResponse) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*VolumeEcShardsDeleteResponse) ProtoMessage() {}
+
+func (x *VolumeEcShardsDeleteResponse) ProtoReflect() protoreflect.Message {
+ mi := &file_volume_server_proto_msgTypes[45]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use VolumeEcShardsDeleteResponse.ProtoReflect.Descriptor instead.
+func (*VolumeEcShardsDeleteResponse) Descriptor() ([]byte, []int) {
+ return file_volume_server_proto_rawDescGZIP(), []int{45}
+}
+
+type VolumeEcShardsMountRequest struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ VolumeId uint32 `protobuf:"varint,1,opt,name=volume_id,json=volumeId,proto3" json:"volume_id,omitempty"`
+ Collection string `protobuf:"bytes,2,opt,name=collection,proto3" json:"collection,omitempty"`
+ ShardIds []uint32 `protobuf:"varint,3,rep,packed,name=shard_ids,json=shardIds,proto3" json:"shard_ids,omitempty"`
+}
+
+func (x *VolumeEcShardsMountRequest) Reset() {
+ *x = VolumeEcShardsMountRequest{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_volume_server_proto_msgTypes[46]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *VolumeEcShardsMountRequest) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*VolumeEcShardsMountRequest) ProtoMessage() {}
+
+func (x *VolumeEcShardsMountRequest) ProtoReflect() protoreflect.Message {
+ mi := &file_volume_server_proto_msgTypes[46]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use VolumeEcShardsMountRequest.ProtoReflect.Descriptor instead.
+func (*VolumeEcShardsMountRequest) Descriptor() ([]byte, []int) {
+ return file_volume_server_proto_rawDescGZIP(), []int{46}
+}
+
+func (x *VolumeEcShardsMountRequest) GetVolumeId() uint32 {
+ if x != nil {
+ return x.VolumeId
+ }
+ return 0
+}
+
+func (x *VolumeEcShardsMountRequest) GetCollection() string {
+ if x != nil {
+ return x.Collection
+ }
+ return ""
+}
+
+func (x *VolumeEcShardsMountRequest) GetShardIds() []uint32 {
+ if x != nil {
+ return x.ShardIds
+ }
+ return nil
+}
+
+type VolumeEcShardsMountResponse struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+}
+
+func (x *VolumeEcShardsMountResponse) Reset() {
+ *x = VolumeEcShardsMountResponse{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_volume_server_proto_msgTypes[47]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *VolumeEcShardsMountResponse) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*VolumeEcShardsMountResponse) ProtoMessage() {}
+
+func (x *VolumeEcShardsMountResponse) ProtoReflect() protoreflect.Message {
+ mi := &file_volume_server_proto_msgTypes[47]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use VolumeEcShardsMountResponse.ProtoReflect.Descriptor instead.
+func (*VolumeEcShardsMountResponse) Descriptor() ([]byte, []int) {
+ return file_volume_server_proto_rawDescGZIP(), []int{47}
+}
+
+type VolumeEcShardsUnmountRequest struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ VolumeId uint32 `protobuf:"varint,1,opt,name=volume_id,json=volumeId,proto3" json:"volume_id,omitempty"`
+ ShardIds []uint32 `protobuf:"varint,3,rep,packed,name=shard_ids,json=shardIds,proto3" json:"shard_ids,omitempty"`
+}
+
+func (x *VolumeEcShardsUnmountRequest) Reset() {
+ *x = VolumeEcShardsUnmountRequest{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_volume_server_proto_msgTypes[48]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *VolumeEcShardsUnmountRequest) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*VolumeEcShardsUnmountRequest) ProtoMessage() {}
+
+func (x *VolumeEcShardsUnmountRequest) ProtoReflect() protoreflect.Message {
+ mi := &file_volume_server_proto_msgTypes[48]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use VolumeEcShardsUnmountRequest.ProtoReflect.Descriptor instead.
+func (*VolumeEcShardsUnmountRequest) Descriptor() ([]byte, []int) {
+ return file_volume_server_proto_rawDescGZIP(), []int{48}
+}
+
+func (x *VolumeEcShardsUnmountRequest) GetVolumeId() uint32 {
+ if x != nil {
+ return x.VolumeId
+ }
+ return 0
+}
+
+func (x *VolumeEcShardsUnmountRequest) GetShardIds() []uint32 {
+ if x != nil {
+ return x.ShardIds
+ }
+ return nil
+}
+
+type VolumeEcShardsUnmountResponse struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+}
+
+func (x *VolumeEcShardsUnmountResponse) Reset() {
+ *x = VolumeEcShardsUnmountResponse{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_volume_server_proto_msgTypes[49]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *VolumeEcShardsUnmountResponse) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*VolumeEcShardsUnmountResponse) ProtoMessage() {}
+
+func (x *VolumeEcShardsUnmountResponse) ProtoReflect() protoreflect.Message {
+ mi := &file_volume_server_proto_msgTypes[49]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use VolumeEcShardsUnmountResponse.ProtoReflect.Descriptor instead.
+func (*VolumeEcShardsUnmountResponse) Descriptor() ([]byte, []int) {
+ return file_volume_server_proto_rawDescGZIP(), []int{49}
+}
+
+type VolumeEcShardReadRequest struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ VolumeId uint32 `protobuf:"varint,1,opt,name=volume_id,json=volumeId,proto3" json:"volume_id,omitempty"`
+ ShardId uint32 `protobuf:"varint,2,opt,name=shard_id,json=shardId,proto3" json:"shard_id,omitempty"`
+ Offset int64 `protobuf:"varint,3,opt,name=offset,proto3" json:"offset,omitempty"`
+ Size int64 `protobuf:"varint,4,opt,name=size,proto3" json:"size,omitempty"`
+ FileKey uint64 `protobuf:"varint,5,opt,name=file_key,json=fileKey,proto3" json:"file_key,omitempty"`
+}
+
+func (x *VolumeEcShardReadRequest) Reset() {
+ *x = VolumeEcShardReadRequest{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_volume_server_proto_msgTypes[50]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *VolumeEcShardReadRequest) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*VolumeEcShardReadRequest) ProtoMessage() {}
+
+func (x *VolumeEcShardReadRequest) ProtoReflect() protoreflect.Message {
+ mi := &file_volume_server_proto_msgTypes[50]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use VolumeEcShardReadRequest.ProtoReflect.Descriptor instead.
+func (*VolumeEcShardReadRequest) Descriptor() ([]byte, []int) {
+ return file_volume_server_proto_rawDescGZIP(), []int{50}
+}
+
+func (x *VolumeEcShardReadRequest) GetVolumeId() uint32 {
+ if x != nil {
+ return x.VolumeId
+ }
+ return 0
+}
+
+func (x *VolumeEcShardReadRequest) GetShardId() uint32 {
+ if x != nil {
+ return x.ShardId
+ }
+ return 0
+}
+
+func (x *VolumeEcShardReadRequest) GetOffset() int64 {
+ if x != nil {
+ return x.Offset
+ }
+ return 0
+}
+
+func (x *VolumeEcShardReadRequest) GetSize() int64 {
+ if x != nil {
+ return x.Size
+ }
+ return 0
+}
+
+func (x *VolumeEcShardReadRequest) GetFileKey() uint64 {
+ if x != nil {
+ return x.FileKey
+ }
+ return 0
+}
+
+type VolumeEcShardReadResponse struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ Data []byte `protobuf:"bytes,1,opt,name=data,proto3" json:"data,omitempty"`
+ IsDeleted bool `protobuf:"varint,2,opt,name=is_deleted,json=isDeleted,proto3" json:"is_deleted,omitempty"`
+}
+
+func (x *VolumeEcShardReadResponse) Reset() {
+ *x = VolumeEcShardReadResponse{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_volume_server_proto_msgTypes[51]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *VolumeEcShardReadResponse) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*VolumeEcShardReadResponse) ProtoMessage() {}
+
+func (x *VolumeEcShardReadResponse) ProtoReflect() protoreflect.Message {
+ mi := &file_volume_server_proto_msgTypes[51]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use VolumeEcShardReadResponse.ProtoReflect.Descriptor instead.
+func (*VolumeEcShardReadResponse) Descriptor() ([]byte, []int) {
+ return file_volume_server_proto_rawDescGZIP(), []int{51}
+}
+
+func (x *VolumeEcShardReadResponse) GetData() []byte {
+ if x != nil {
+ return x.Data
+ }
+ return nil
+}
+
+func (x *VolumeEcShardReadResponse) GetIsDeleted() bool {
+ if x != nil {
+ return x.IsDeleted
+ }
+ return false
+}
+
+type VolumeEcBlobDeleteRequest struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ VolumeId uint32 `protobuf:"varint,1,opt,name=volume_id,json=volumeId,proto3" json:"volume_id,omitempty"`
+ Collection string `protobuf:"bytes,2,opt,name=collection,proto3" json:"collection,omitempty"`
+ FileKey uint64 `protobuf:"varint,3,opt,name=file_key,json=fileKey,proto3" json:"file_key,omitempty"`
+ Version uint32 `protobuf:"varint,4,opt,name=version,proto3" json:"version,omitempty"`
+}
+
+func (x *VolumeEcBlobDeleteRequest) Reset() {
+ *x = VolumeEcBlobDeleteRequest{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_volume_server_proto_msgTypes[52]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *VolumeEcBlobDeleteRequest) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*VolumeEcBlobDeleteRequest) ProtoMessage() {}
+
+func (x *VolumeEcBlobDeleteRequest) ProtoReflect() protoreflect.Message {
+ mi := &file_volume_server_proto_msgTypes[52]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use VolumeEcBlobDeleteRequest.ProtoReflect.Descriptor instead.
+func (*VolumeEcBlobDeleteRequest) Descriptor() ([]byte, []int) {
+ return file_volume_server_proto_rawDescGZIP(), []int{52}
+}
+
+func (x *VolumeEcBlobDeleteRequest) GetVolumeId() uint32 {
+ if x != nil {
+ return x.VolumeId
+ }
+ return 0
+}
+
+func (x *VolumeEcBlobDeleteRequest) GetCollection() string {
+ if x != nil {
+ return x.Collection
+ }
+ return ""
+}
+
+func (x *VolumeEcBlobDeleteRequest) GetFileKey() uint64 {
+ if x != nil {
+ return x.FileKey
+ }
+ return 0
+}
+
+func (x *VolumeEcBlobDeleteRequest) GetVersion() uint32 {
+ if x != nil {
+ return x.Version
+ }
+ return 0
+}
+
+type VolumeEcBlobDeleteResponse struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+}
+
+func (x *VolumeEcBlobDeleteResponse) Reset() {
+ *x = VolumeEcBlobDeleteResponse{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_volume_server_proto_msgTypes[53]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *VolumeEcBlobDeleteResponse) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*VolumeEcBlobDeleteResponse) ProtoMessage() {}
+
+func (x *VolumeEcBlobDeleteResponse) ProtoReflect() protoreflect.Message {
+ mi := &file_volume_server_proto_msgTypes[53]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use VolumeEcBlobDeleteResponse.ProtoReflect.Descriptor instead.
+func (*VolumeEcBlobDeleteResponse) Descriptor() ([]byte, []int) {
+ return file_volume_server_proto_rawDescGZIP(), []int{53}
+}
+
+type VolumeEcShardsToVolumeRequest struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ VolumeId uint32 `protobuf:"varint,1,opt,name=volume_id,json=volumeId,proto3" json:"volume_id,omitempty"`
+ Collection string `protobuf:"bytes,2,opt,name=collection,proto3" json:"collection,omitempty"`
+}
+
+func (x *VolumeEcShardsToVolumeRequest) Reset() {
+ *x = VolumeEcShardsToVolumeRequest{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_volume_server_proto_msgTypes[54]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *VolumeEcShardsToVolumeRequest) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*VolumeEcShardsToVolumeRequest) ProtoMessage() {}
+
+func (x *VolumeEcShardsToVolumeRequest) ProtoReflect() protoreflect.Message {
+ mi := &file_volume_server_proto_msgTypes[54]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use VolumeEcShardsToVolumeRequest.ProtoReflect.Descriptor instead.
+func (*VolumeEcShardsToVolumeRequest) Descriptor() ([]byte, []int) {
+ return file_volume_server_proto_rawDescGZIP(), []int{54}
+}
+
+func (x *VolumeEcShardsToVolumeRequest) GetVolumeId() uint32 {
+ if x != nil {
+ return x.VolumeId
+ }
+ return 0
+}
+
+func (x *VolumeEcShardsToVolumeRequest) GetCollection() string {
+ if x != nil {
+ return x.Collection
+ }
+ return ""
+}
+
+type VolumeEcShardsToVolumeResponse struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+}
+
+func (x *VolumeEcShardsToVolumeResponse) Reset() {
+ *x = VolumeEcShardsToVolumeResponse{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_volume_server_proto_msgTypes[55]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *VolumeEcShardsToVolumeResponse) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*VolumeEcShardsToVolumeResponse) ProtoMessage() {}
+
+func (x *VolumeEcShardsToVolumeResponse) ProtoReflect() protoreflect.Message {
+ mi := &file_volume_server_proto_msgTypes[55]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use VolumeEcShardsToVolumeResponse.ProtoReflect.Descriptor instead.
+func (*VolumeEcShardsToVolumeResponse) Descriptor() ([]byte, []int) {
+ return file_volume_server_proto_rawDescGZIP(), []int{55}
+}
+
+type ReadVolumeFileStatusRequest struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ VolumeId uint32 `protobuf:"varint,1,opt,name=volume_id,json=volumeId,proto3" json:"volume_id,omitempty"`
+}
+
+func (x *ReadVolumeFileStatusRequest) Reset() {
+ *x = ReadVolumeFileStatusRequest{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_volume_server_proto_msgTypes[56]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *ReadVolumeFileStatusRequest) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*ReadVolumeFileStatusRequest) ProtoMessage() {}
+
+func (x *ReadVolumeFileStatusRequest) ProtoReflect() protoreflect.Message {
+ mi := &file_volume_server_proto_msgTypes[56]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use ReadVolumeFileStatusRequest.ProtoReflect.Descriptor instead.
+func (*ReadVolumeFileStatusRequest) Descriptor() ([]byte, []int) {
+ return file_volume_server_proto_rawDescGZIP(), []int{56}
+}
+
+func (x *ReadVolumeFileStatusRequest) GetVolumeId() uint32 {
+ if x != nil {
+ return x.VolumeId
+ }
+ return 0
+}
+
+type ReadVolumeFileStatusResponse struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ VolumeId uint32 `protobuf:"varint,1,opt,name=volume_id,json=volumeId,proto3" json:"volume_id,omitempty"`
+ IdxFileTimestampSeconds uint64 `protobuf:"varint,2,opt,name=idx_file_timestamp_seconds,json=idxFileTimestampSeconds,proto3" json:"idx_file_timestamp_seconds,omitempty"`
+ IdxFileSize uint64 `protobuf:"varint,3,opt,name=idx_file_size,json=idxFileSize,proto3" json:"idx_file_size,omitempty"`
+ DatFileTimestampSeconds uint64 `protobuf:"varint,4,opt,name=dat_file_timestamp_seconds,json=datFileTimestampSeconds,proto3" json:"dat_file_timestamp_seconds,omitempty"`
+ DatFileSize uint64 `protobuf:"varint,5,opt,name=dat_file_size,json=datFileSize,proto3" json:"dat_file_size,omitempty"`
+ FileCount uint64 `protobuf:"varint,6,opt,name=file_count,json=fileCount,proto3" json:"file_count,omitempty"`
+ CompactionRevision uint32 `protobuf:"varint,7,opt,name=compaction_revision,json=compactionRevision,proto3" json:"compaction_revision,omitempty"`
+ Collection string `protobuf:"bytes,8,opt,name=collection,proto3" json:"collection,omitempty"`
+}
+
+func (x *ReadVolumeFileStatusResponse) Reset() {
+ *x = ReadVolumeFileStatusResponse{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_volume_server_proto_msgTypes[57]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *ReadVolumeFileStatusResponse) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*ReadVolumeFileStatusResponse) ProtoMessage() {}
+
+func (x *ReadVolumeFileStatusResponse) ProtoReflect() protoreflect.Message {
+ mi := &file_volume_server_proto_msgTypes[57]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use ReadVolumeFileStatusResponse.ProtoReflect.Descriptor instead.
+func (*ReadVolumeFileStatusResponse) Descriptor() ([]byte, []int) {
+ return file_volume_server_proto_rawDescGZIP(), []int{57}
+}
+
+func (x *ReadVolumeFileStatusResponse) GetVolumeId() uint32 {
+ if x != nil {
+ return x.VolumeId
+ }
+ return 0
+}
+
+func (x *ReadVolumeFileStatusResponse) GetIdxFileTimestampSeconds() uint64 {
+ if x != nil {
+ return x.IdxFileTimestampSeconds
+ }
+ return 0
+}
+
+func (x *ReadVolumeFileStatusResponse) GetIdxFileSize() uint64 {
+ if x != nil {
+ return x.IdxFileSize
+ }
+ return 0
+}
+
+func (x *ReadVolumeFileStatusResponse) GetDatFileTimestampSeconds() uint64 {
+ if x != nil {
+ return x.DatFileTimestampSeconds
+ }
+ return 0
+}
+
+func (x *ReadVolumeFileStatusResponse) GetDatFileSize() uint64 {
+ if x != nil {
+ return x.DatFileSize
+ }
+ return 0
+}
+
+func (x *ReadVolumeFileStatusResponse) GetFileCount() uint64 {
+ if x != nil {
+ return x.FileCount
+ }
+ return 0
+}
+
+func (x *ReadVolumeFileStatusResponse) GetCompactionRevision() uint32 {
+ if x != nil {
+ return x.CompactionRevision
+ }
+ return 0
+}
+
+func (x *ReadVolumeFileStatusResponse) GetCollection() string {
+ if x != nil {
+ return x.Collection
+ }
+ return ""
+}
+
+type DiskStatus struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ Dir string `protobuf:"bytes,1,opt,name=dir,proto3" json:"dir,omitempty"`
+ All uint64 `protobuf:"varint,2,opt,name=all,proto3" json:"all,omitempty"`
+ Used uint64 `protobuf:"varint,3,opt,name=used,proto3" json:"used,omitempty"`
+ Free uint64 `protobuf:"varint,4,opt,name=free,proto3" json:"free,omitempty"`
+ PercentFree float32 `protobuf:"fixed32,5,opt,name=percent_free,json=percentFree,proto3" json:"percent_free,omitempty"`
+ PercentUsed float32 `protobuf:"fixed32,6,opt,name=percent_used,json=percentUsed,proto3" json:"percent_used,omitempty"`
+}
+
+func (x *DiskStatus) Reset() {
+ *x = DiskStatus{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_volume_server_proto_msgTypes[58]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *DiskStatus) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*DiskStatus) ProtoMessage() {}
+
+func (x *DiskStatus) ProtoReflect() protoreflect.Message {
+ mi := &file_volume_server_proto_msgTypes[58]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use DiskStatus.ProtoReflect.Descriptor instead.
+func (*DiskStatus) Descriptor() ([]byte, []int) {
+ return file_volume_server_proto_rawDescGZIP(), []int{58}
+}
+
+func (x *DiskStatus) GetDir() string {
+ if x != nil {
+ return x.Dir
+ }
+ return ""
+}
+
+func (x *DiskStatus) GetAll() uint64 {
+ if x != nil {
+ return x.All
+ }
+ return 0
+}
+
+func (x *DiskStatus) GetUsed() uint64 {
+ if x != nil {
+ return x.Used
+ }
+ return 0
+}
+
+func (x *DiskStatus) GetFree() uint64 {
+ if x != nil {
+ return x.Free
+ }
+ return 0
+}
+
+func (x *DiskStatus) GetPercentFree() float32 {
+ if x != nil {
+ return x.PercentFree
+ }
+ return 0
+}
+
+func (x *DiskStatus) GetPercentUsed() float32 {
+ if x != nil {
+ return x.PercentUsed
+ }
+ return 0
+}
+
+type MemStatus struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ Goroutines int32 `protobuf:"varint,1,opt,name=goroutines,proto3" json:"goroutines,omitempty"`
+ All uint64 `protobuf:"varint,2,opt,name=all,proto3" json:"all,omitempty"`
+ Used uint64 `protobuf:"varint,3,opt,name=used,proto3" json:"used,omitempty"`
+ Free uint64 `protobuf:"varint,4,opt,name=free,proto3" json:"free,omitempty"`
+ Self uint64 `protobuf:"varint,5,opt,name=self,proto3" json:"self,omitempty"`
+ Heap uint64 `protobuf:"varint,6,opt,name=heap,proto3" json:"heap,omitempty"`
+ Stack uint64 `protobuf:"varint,7,opt,name=stack,proto3" json:"stack,omitempty"`
+}
+
+func (x *MemStatus) Reset() {
+ *x = MemStatus{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_volume_server_proto_msgTypes[59]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *MemStatus) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*MemStatus) ProtoMessage() {}
+
+func (x *MemStatus) ProtoReflect() protoreflect.Message {
+ mi := &file_volume_server_proto_msgTypes[59]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use MemStatus.ProtoReflect.Descriptor instead.
+func (*MemStatus) Descriptor() ([]byte, []int) {
+ return file_volume_server_proto_rawDescGZIP(), []int{59}
+}
+
+func (x *MemStatus) GetGoroutines() int32 {
+ if x != nil {
+ return x.Goroutines
+ }
+ return 0
+}
+
+func (x *MemStatus) GetAll() uint64 {
+ if x != nil {
+ return x.All
+ }
+ return 0
+}
+
+func (x *MemStatus) GetUsed() uint64 {
+ if x != nil {
+ return x.Used
+ }
+ return 0
+}
+
+func (x *MemStatus) GetFree() uint64 {
+ if x != nil {
+ return x.Free
+ }
+ return 0
+}
+
+func (x *MemStatus) GetSelf() uint64 {
+ if x != nil {
+ return x.Self
+ }
+ return 0
+}
+
+func (x *MemStatus) GetHeap() uint64 {
+ if x != nil {
+ return x.Heap
+ }
+ return 0
+}
+
+func (x *MemStatus) GetStack() uint64 {
+ if x != nil {
+ return x.Stack
+ }
+ return 0
+}
+
+// tired storage on volume servers
+type RemoteFile struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ BackendType string `protobuf:"bytes,1,opt,name=backend_type,json=backendType,proto3" json:"backend_type,omitempty"`
+ BackendId string `protobuf:"bytes,2,opt,name=backend_id,json=backendId,proto3" json:"backend_id,omitempty"`
+ Key string `protobuf:"bytes,3,opt,name=key,proto3" json:"key,omitempty"`
+ Offset uint64 `protobuf:"varint,4,opt,name=offset,proto3" json:"offset,omitempty"`
+ FileSize uint64 `protobuf:"varint,5,opt,name=file_size,json=fileSize,proto3" json:"file_size,omitempty"`
+ ModifiedTime uint64 `protobuf:"varint,6,opt,name=modified_time,json=modifiedTime,proto3" json:"modified_time,omitempty"`
+ Extension string `protobuf:"bytes,7,opt,name=extension,proto3" json:"extension,omitempty"`
+}
+
+func (x *RemoteFile) Reset() {
+ *x = RemoteFile{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_volume_server_proto_msgTypes[60]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *RemoteFile) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*RemoteFile) ProtoMessage() {}
+
+func (x *RemoteFile) ProtoReflect() protoreflect.Message {
+ mi := &file_volume_server_proto_msgTypes[60]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use RemoteFile.ProtoReflect.Descriptor instead.
+func (*RemoteFile) Descriptor() ([]byte, []int) {
+ return file_volume_server_proto_rawDescGZIP(), []int{60}
+}
+
+func (x *RemoteFile) GetBackendType() string {
+ if x != nil {
+ return x.BackendType
+ }
+ return ""
+}
+
+func (x *RemoteFile) GetBackendId() string {
+ if x != nil {
+ return x.BackendId
+ }
+ return ""
+}
+
+func (x *RemoteFile) GetKey() string {
+ if x != nil {
+ return x.Key
+ }
+ return ""
+}
+
+func (x *RemoteFile) GetOffset() uint64 {
+ if x != nil {
+ return x.Offset
+ }
+ return 0
+}
+
+func (x *RemoteFile) GetFileSize() uint64 {
+ if x != nil {
+ return x.FileSize
+ }
+ return 0
+}
+
+func (x *RemoteFile) GetModifiedTime() uint64 {
+ if x != nil {
+ return x.ModifiedTime
+ }
+ return 0
+}
+
+func (x *RemoteFile) GetExtension() string {
+ if x != nil {
+ return x.Extension
+ }
+ return ""
+}
+
+type VolumeInfo struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ Files []*RemoteFile `protobuf:"bytes,1,rep,name=files,proto3" json:"files,omitempty"`
+ Version uint32 `protobuf:"varint,2,opt,name=version,proto3" json:"version,omitempty"`
+ Replication string `protobuf:"bytes,3,opt,name=replication,proto3" json:"replication,omitempty"`
+}
+
+func (x *VolumeInfo) Reset() {
+ *x = VolumeInfo{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_volume_server_proto_msgTypes[61]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *VolumeInfo) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*VolumeInfo) ProtoMessage() {}
+
+func (x *VolumeInfo) ProtoReflect() protoreflect.Message {
+ mi := &file_volume_server_proto_msgTypes[61]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use VolumeInfo.ProtoReflect.Descriptor instead.
+func (*VolumeInfo) Descriptor() ([]byte, []int) {
+ return file_volume_server_proto_rawDescGZIP(), []int{61}
+}
+
+func (x *VolumeInfo) GetFiles() []*RemoteFile {
+ if x != nil {
+ return x.Files
+ }
+ return nil
+}
+
+func (x *VolumeInfo) GetVersion() uint32 {
+ if x != nil {
+ return x.Version
+ }
+ return 0
+}
+
+func (x *VolumeInfo) GetReplication() string {
+ if x != nil {
+ return x.Replication
+ }
+ return ""
+}
+
+type VolumeTierMoveDatToRemoteRequest struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ VolumeId uint32 `protobuf:"varint,1,opt,name=volume_id,json=volumeId,proto3" json:"volume_id,omitempty"`
+ Collection string `protobuf:"bytes,2,opt,name=collection,proto3" json:"collection,omitempty"`
+ DestinationBackendName string `protobuf:"bytes,3,opt,name=destination_backend_name,json=destinationBackendName,proto3" json:"destination_backend_name,omitempty"`
+ KeepLocalDatFile bool `protobuf:"varint,4,opt,name=keep_local_dat_file,json=keepLocalDatFile,proto3" json:"keep_local_dat_file,omitempty"`
+}
+
+func (x *VolumeTierMoveDatToRemoteRequest) Reset() {
+ *x = VolumeTierMoveDatToRemoteRequest{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_volume_server_proto_msgTypes[62]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *VolumeTierMoveDatToRemoteRequest) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*VolumeTierMoveDatToRemoteRequest) ProtoMessage() {}
+
+func (x *VolumeTierMoveDatToRemoteRequest) ProtoReflect() protoreflect.Message {
+ mi := &file_volume_server_proto_msgTypes[62]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use VolumeTierMoveDatToRemoteRequest.ProtoReflect.Descriptor instead.
+func (*VolumeTierMoveDatToRemoteRequest) Descriptor() ([]byte, []int) {
+ return file_volume_server_proto_rawDescGZIP(), []int{62}
+}
+
+func (x *VolumeTierMoveDatToRemoteRequest) GetVolumeId() uint32 {
+ if x != nil {
+ return x.VolumeId
+ }
+ return 0
+}
+
+func (x *VolumeTierMoveDatToRemoteRequest) GetCollection() string {
+ if x != nil {
+ return x.Collection
+ }
+ return ""
+}
+
+func (x *VolumeTierMoveDatToRemoteRequest) GetDestinationBackendName() string {
+ if x != nil {
+ return x.DestinationBackendName
+ }
+ return ""
+}
+
+func (x *VolumeTierMoveDatToRemoteRequest) GetKeepLocalDatFile() bool {
+ if x != nil {
+ return x.KeepLocalDatFile
+ }
+ return false
+}
+
+type VolumeTierMoveDatToRemoteResponse struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ Processed int64 `protobuf:"varint,1,opt,name=processed,proto3" json:"processed,omitempty"`
+ ProcessedPercentage float32 `protobuf:"fixed32,2,opt,name=processedPercentage,proto3" json:"processedPercentage,omitempty"`
+}
+
+func (x *VolumeTierMoveDatToRemoteResponse) Reset() {
+ *x = VolumeTierMoveDatToRemoteResponse{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_volume_server_proto_msgTypes[63]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *VolumeTierMoveDatToRemoteResponse) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*VolumeTierMoveDatToRemoteResponse) ProtoMessage() {}
+
+func (x *VolumeTierMoveDatToRemoteResponse) ProtoReflect() protoreflect.Message {
+ mi := &file_volume_server_proto_msgTypes[63]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use VolumeTierMoveDatToRemoteResponse.ProtoReflect.Descriptor instead.
+func (*VolumeTierMoveDatToRemoteResponse) Descriptor() ([]byte, []int) {
+ return file_volume_server_proto_rawDescGZIP(), []int{63}
+}
+
+func (x *VolumeTierMoveDatToRemoteResponse) GetProcessed() int64 {
+ if x != nil {
+ return x.Processed
+ }
+ return 0
+}
+
+func (x *VolumeTierMoveDatToRemoteResponse) GetProcessedPercentage() float32 {
+ if x != nil {
+ return x.ProcessedPercentage
+ }
+ return 0
+}
+
+type VolumeTierMoveDatFromRemoteRequest struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ VolumeId uint32 `protobuf:"varint,1,opt,name=volume_id,json=volumeId,proto3" json:"volume_id,omitempty"`
+ Collection string `protobuf:"bytes,2,opt,name=collection,proto3" json:"collection,omitempty"`
+ KeepRemoteDatFile bool `protobuf:"varint,3,opt,name=keep_remote_dat_file,json=keepRemoteDatFile,proto3" json:"keep_remote_dat_file,omitempty"`
+}
+
+func (x *VolumeTierMoveDatFromRemoteRequest) Reset() {
+ *x = VolumeTierMoveDatFromRemoteRequest{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_volume_server_proto_msgTypes[64]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *VolumeTierMoveDatFromRemoteRequest) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*VolumeTierMoveDatFromRemoteRequest) ProtoMessage() {}
+
+func (x *VolumeTierMoveDatFromRemoteRequest) ProtoReflect() protoreflect.Message {
+ mi := &file_volume_server_proto_msgTypes[64]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use VolumeTierMoveDatFromRemoteRequest.ProtoReflect.Descriptor instead.
+func (*VolumeTierMoveDatFromRemoteRequest) Descriptor() ([]byte, []int) {
+ return file_volume_server_proto_rawDescGZIP(), []int{64}
+}
+
+func (x *VolumeTierMoveDatFromRemoteRequest) GetVolumeId() uint32 {
+ if x != nil {
+ return x.VolumeId
+ }
+ return 0
+}
+
+func (x *VolumeTierMoveDatFromRemoteRequest) GetCollection() string {
+ if x != nil {
+ return x.Collection
+ }
+ return ""
+}
+
+func (x *VolumeTierMoveDatFromRemoteRequest) GetKeepRemoteDatFile() bool {
+ if x != nil {
+ return x.KeepRemoteDatFile
+ }
+ return false
+}
+
+type VolumeTierMoveDatFromRemoteResponse struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ Processed int64 `protobuf:"varint,1,opt,name=processed,proto3" json:"processed,omitempty"`
+ ProcessedPercentage float32 `protobuf:"fixed32,2,opt,name=processedPercentage,proto3" json:"processedPercentage,omitempty"`
+}
+
+func (x *VolumeTierMoveDatFromRemoteResponse) Reset() {
+ *x = VolumeTierMoveDatFromRemoteResponse{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_volume_server_proto_msgTypes[65]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *VolumeTierMoveDatFromRemoteResponse) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*VolumeTierMoveDatFromRemoteResponse) ProtoMessage() {}
+
+func (x *VolumeTierMoveDatFromRemoteResponse) ProtoReflect() protoreflect.Message {
+ mi := &file_volume_server_proto_msgTypes[65]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use VolumeTierMoveDatFromRemoteResponse.ProtoReflect.Descriptor instead.
+func (*VolumeTierMoveDatFromRemoteResponse) Descriptor() ([]byte, []int) {
+ return file_volume_server_proto_rawDescGZIP(), []int{65}
+}
+
+func (x *VolumeTierMoveDatFromRemoteResponse) GetProcessed() int64 {
+ if x != nil {
+ return x.Processed
+ }
+ return 0
+}
+
+func (x *VolumeTierMoveDatFromRemoteResponse) GetProcessedPercentage() float32 {
+ if x != nil {
+ return x.ProcessedPercentage
+ }
+ return 0
+}
+
+type VolumeServerStatusRequest struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+}
+
+func (x *VolumeServerStatusRequest) Reset() {
+ *x = VolumeServerStatusRequest{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_volume_server_proto_msgTypes[66]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *VolumeServerStatusRequest) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*VolumeServerStatusRequest) ProtoMessage() {}
+
+func (x *VolumeServerStatusRequest) ProtoReflect() protoreflect.Message {
+ mi := &file_volume_server_proto_msgTypes[66]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use VolumeServerStatusRequest.ProtoReflect.Descriptor instead.
+func (*VolumeServerStatusRequest) Descriptor() ([]byte, []int) {
+ return file_volume_server_proto_rawDescGZIP(), []int{66}
+}
+
+type VolumeServerStatusResponse struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ DiskStatuses []*DiskStatus `protobuf:"bytes,1,rep,name=disk_statuses,json=diskStatuses,proto3" json:"disk_statuses,omitempty"`
+ MemoryStatus *MemStatus `protobuf:"bytes,2,opt,name=memory_status,json=memoryStatus,proto3" json:"memory_status,omitempty"`
+}
+
+func (x *VolumeServerStatusResponse) Reset() {
+ *x = VolumeServerStatusResponse{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_volume_server_proto_msgTypes[67]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *VolumeServerStatusResponse) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*VolumeServerStatusResponse) ProtoMessage() {}
+
+func (x *VolumeServerStatusResponse) ProtoReflect() protoreflect.Message {
+ mi := &file_volume_server_proto_msgTypes[67]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use VolumeServerStatusResponse.ProtoReflect.Descriptor instead.
+func (*VolumeServerStatusResponse) Descriptor() ([]byte, []int) {
+ return file_volume_server_proto_rawDescGZIP(), []int{67}
+}
+
+func (x *VolumeServerStatusResponse) GetDiskStatuses() []*DiskStatus {
+ if x != nil {
+ return x.DiskStatuses
+ }
+ return nil
+}
+
+func (x *VolumeServerStatusResponse) GetMemoryStatus() *MemStatus {
+ if x != nil {
+ return x.MemoryStatus
+ }
+ return nil
+}
+
+// select on volume servers
+type QueryRequest struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ Selections []string `protobuf:"bytes,1,rep,name=selections,proto3" json:"selections,omitempty"`
+ FromFileIds []string `protobuf:"bytes,2,rep,name=from_file_ids,json=fromFileIds,proto3" json:"from_file_ids,omitempty"`
+ Filter *QueryRequest_Filter `protobuf:"bytes,3,opt,name=filter,proto3" json:"filter,omitempty"`
+ InputSerialization *QueryRequest_InputSerialization `protobuf:"bytes,4,opt,name=input_serialization,json=inputSerialization,proto3" json:"input_serialization,omitempty"`
+ OutputSerialization *QueryRequest_OutputSerialization `protobuf:"bytes,5,opt,name=output_serialization,json=outputSerialization,proto3" json:"output_serialization,omitempty"`
+}
+
+func (x *QueryRequest) Reset() {
+ *x = QueryRequest{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_volume_server_proto_msgTypes[68]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *QueryRequest) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*QueryRequest) ProtoMessage() {}
+
+func (x *QueryRequest) ProtoReflect() protoreflect.Message {
+ mi := &file_volume_server_proto_msgTypes[68]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use QueryRequest.ProtoReflect.Descriptor instead.
+func (*QueryRequest) Descriptor() ([]byte, []int) {
+ return file_volume_server_proto_rawDescGZIP(), []int{68}
+}
+
+func (x *QueryRequest) GetSelections() []string {
+ if x != nil {
+ return x.Selections
+ }
+ return nil
+}
+
+func (x *QueryRequest) GetFromFileIds() []string {
+ if x != nil {
+ return x.FromFileIds
+ }
+ return nil
+}
+
+func (x *QueryRequest) GetFilter() *QueryRequest_Filter {
+ if x != nil {
+ return x.Filter
+ }
+ return nil
+}
+
+func (x *QueryRequest) GetInputSerialization() *QueryRequest_InputSerialization {
+ if x != nil {
+ return x.InputSerialization
+ }
+ return nil
+}
+
+func (x *QueryRequest) GetOutputSerialization() *QueryRequest_OutputSerialization {
+ if x != nil {
+ return x.OutputSerialization
+ }
+ return nil
+}
+
+type QueriedStripe struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ Records []byte `protobuf:"bytes,1,opt,name=records,proto3" json:"records,omitempty"`
+}
+
+func (x *QueriedStripe) Reset() {
+ *x = QueriedStripe{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_volume_server_proto_msgTypes[69]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *QueriedStripe) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*QueriedStripe) ProtoMessage() {}
+
+func (x *QueriedStripe) ProtoReflect() protoreflect.Message {
+ mi := &file_volume_server_proto_msgTypes[69]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use QueriedStripe.ProtoReflect.Descriptor instead.
+func (*QueriedStripe) Descriptor() ([]byte, []int) {
+ return file_volume_server_proto_rawDescGZIP(), []int{69}
+}
+
+func (x *QueriedStripe) GetRecords() []byte {
+ if x != nil {
+ return x.Records
+ }
+ return nil
+}
+
+type QueryRequest_Filter struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ Field string `protobuf:"bytes,1,opt,name=field,proto3" json:"field,omitempty"`
+ Operand string `protobuf:"bytes,2,opt,name=operand,proto3" json:"operand,omitempty"`
+ Value string `protobuf:"bytes,3,opt,name=value,proto3" json:"value,omitempty"`
+}
+
+func (x *QueryRequest_Filter) Reset() {
+ *x = QueryRequest_Filter{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_volume_server_proto_msgTypes[70]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *QueryRequest_Filter) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*QueryRequest_Filter) ProtoMessage() {}
+
+func (x *QueryRequest_Filter) ProtoReflect() protoreflect.Message {
+ mi := &file_volume_server_proto_msgTypes[70]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use QueryRequest_Filter.ProtoReflect.Descriptor instead.
+func (*QueryRequest_Filter) Descriptor() ([]byte, []int) {
+ return file_volume_server_proto_rawDescGZIP(), []int{68, 0}
+}
+
+func (x *QueryRequest_Filter) GetField() string {
+ if x != nil {
+ return x.Field
+ }
+ return ""
+}
+
+func (x *QueryRequest_Filter) GetOperand() string {
+ if x != nil {
+ return x.Operand
+ }
+ return ""
+}
+
+func (x *QueryRequest_Filter) GetValue() string {
+ if x != nil {
+ return x.Value
+ }
+ return ""
+}
+
+type QueryRequest_InputSerialization struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // NONE | GZIP | BZIP2
+ CompressionType string `protobuf:"bytes,1,opt,name=compression_type,json=compressionType,proto3" json:"compression_type,omitempty"`
+ CsvInput *QueryRequest_InputSerialization_CSVInput `protobuf:"bytes,2,opt,name=csv_input,json=csvInput,proto3" json:"csv_input,omitempty"`
+ JsonInput *QueryRequest_InputSerialization_JSONInput `protobuf:"bytes,3,opt,name=json_input,json=jsonInput,proto3" json:"json_input,omitempty"`
+ ParquetInput *QueryRequest_InputSerialization_ParquetInput `protobuf:"bytes,4,opt,name=parquet_input,json=parquetInput,proto3" json:"parquet_input,omitempty"`
+}
+
+func (x *QueryRequest_InputSerialization) Reset() {
+ *x = QueryRequest_InputSerialization{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_volume_server_proto_msgTypes[71]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *QueryRequest_InputSerialization) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*QueryRequest_InputSerialization) ProtoMessage() {}
+
+func (x *QueryRequest_InputSerialization) ProtoReflect() protoreflect.Message {
+ mi := &file_volume_server_proto_msgTypes[71]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use QueryRequest_InputSerialization.ProtoReflect.Descriptor instead.
+func (*QueryRequest_InputSerialization) Descriptor() ([]byte, []int) {
+ return file_volume_server_proto_rawDescGZIP(), []int{68, 1}
+}
+
+func (x *QueryRequest_InputSerialization) GetCompressionType() string {
+ if x != nil {
+ return x.CompressionType
+ }
+ return ""
+}
+
+func (x *QueryRequest_InputSerialization) GetCsvInput() *QueryRequest_InputSerialization_CSVInput {
+ if x != nil {
+ return x.CsvInput
+ }
+ return nil
+}
+
+func (x *QueryRequest_InputSerialization) GetJsonInput() *QueryRequest_InputSerialization_JSONInput {
+ if x != nil {
+ return x.JsonInput
+ }
+ return nil
+}
+
+func (x *QueryRequest_InputSerialization) GetParquetInput() *QueryRequest_InputSerialization_ParquetInput {
+ if x != nil {
+ return x.ParquetInput
+ }
+ return nil
+}
+
+type QueryRequest_OutputSerialization struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ CsvOutput *QueryRequest_OutputSerialization_CSVOutput `protobuf:"bytes,2,opt,name=csv_output,json=csvOutput,proto3" json:"csv_output,omitempty"`
+ JsonOutput *QueryRequest_OutputSerialization_JSONOutput `protobuf:"bytes,3,opt,name=json_output,json=jsonOutput,proto3" json:"json_output,omitempty"`
+}
+
+func (x *QueryRequest_OutputSerialization) Reset() {
+ *x = QueryRequest_OutputSerialization{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_volume_server_proto_msgTypes[72]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *QueryRequest_OutputSerialization) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*QueryRequest_OutputSerialization) ProtoMessage() {}
+
+func (x *QueryRequest_OutputSerialization) ProtoReflect() protoreflect.Message {
+ mi := &file_volume_server_proto_msgTypes[72]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use QueryRequest_OutputSerialization.ProtoReflect.Descriptor instead.
+func (*QueryRequest_OutputSerialization) Descriptor() ([]byte, []int) {
+ return file_volume_server_proto_rawDescGZIP(), []int{68, 2}
+}
+
+func (x *QueryRequest_OutputSerialization) GetCsvOutput() *QueryRequest_OutputSerialization_CSVOutput {
+ if x != nil {
+ return x.CsvOutput
+ }
+ return nil
+}
+
+func (x *QueryRequest_OutputSerialization) GetJsonOutput() *QueryRequest_OutputSerialization_JSONOutput {
+ if x != nil {
+ return x.JsonOutput
+ }
+ return nil
+}
+
+type QueryRequest_InputSerialization_CSVInput struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ FileHeaderInfo string `protobuf:"bytes,1,opt,name=file_header_info,json=fileHeaderInfo,proto3" json:"file_header_info,omitempty"` // Valid values: NONE | USE | IGNORE
+ RecordDelimiter string `protobuf:"bytes,2,opt,name=record_delimiter,json=recordDelimiter,proto3" json:"record_delimiter,omitempty"` // Default: \n
+ FieldDelimiter string `protobuf:"bytes,3,opt,name=field_delimiter,json=fieldDelimiter,proto3" json:"field_delimiter,omitempty"` // Default: ,
+ QuoteCharactoer string `protobuf:"bytes,4,opt,name=quote_charactoer,json=quoteCharactoer,proto3" json:"quote_charactoer,omitempty"` // Default: "
+ QuoteEscapeCharacter string `protobuf:"bytes,5,opt,name=quote_escape_character,json=quoteEscapeCharacter,proto3" json:"quote_escape_character,omitempty"` // Default: "
+ Comments string `protobuf:"bytes,6,opt,name=comments,proto3" json:"comments,omitempty"` // Default: #
+ // If true, records might contain record delimiters within quote characters
+ AllowQuotedRecordDelimiter bool `protobuf:"varint,7,opt,name=allow_quoted_record_delimiter,json=allowQuotedRecordDelimiter,proto3" json:"allow_quoted_record_delimiter,omitempty"` // default False.
+}
+
+func (x *QueryRequest_InputSerialization_CSVInput) Reset() {
+ *x = QueryRequest_InputSerialization_CSVInput{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_volume_server_proto_msgTypes[73]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *QueryRequest_InputSerialization_CSVInput) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*QueryRequest_InputSerialization_CSVInput) ProtoMessage() {}
+
+func (x *QueryRequest_InputSerialization_CSVInput) ProtoReflect() protoreflect.Message {
+ mi := &file_volume_server_proto_msgTypes[73]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use QueryRequest_InputSerialization_CSVInput.ProtoReflect.Descriptor instead.
+func (*QueryRequest_InputSerialization_CSVInput) Descriptor() ([]byte, []int) {
+ return file_volume_server_proto_rawDescGZIP(), []int{68, 1, 0}
+}
+
+func (x *QueryRequest_InputSerialization_CSVInput) GetFileHeaderInfo() string {
+ if x != nil {
+ return x.FileHeaderInfo
+ }
+ return ""
+}
+
+func (x *QueryRequest_InputSerialization_CSVInput) GetRecordDelimiter() string {
+ if x != nil {
+ return x.RecordDelimiter
+ }
+ return ""
+}
+
+func (x *QueryRequest_InputSerialization_CSVInput) GetFieldDelimiter() string {
+ if x != nil {
+ return x.FieldDelimiter
+ }
+ return ""
+}
+
+func (x *QueryRequest_InputSerialization_CSVInput) GetQuoteCharactoer() string {
+ if x != nil {
+ return x.QuoteCharactoer
+ }
+ return ""
+}
+
+func (x *QueryRequest_InputSerialization_CSVInput) GetQuoteEscapeCharacter() string {
+ if x != nil {
+ return x.QuoteEscapeCharacter
+ }
+ return ""
+}
+
+func (x *QueryRequest_InputSerialization_CSVInput) GetComments() string {
+ if x != nil {
+ return x.Comments
+ }
+ return ""
+}
+
+func (x *QueryRequest_InputSerialization_CSVInput) GetAllowQuotedRecordDelimiter() bool {
+ if x != nil {
+ return x.AllowQuotedRecordDelimiter
+ }
+ return false
+}
+
+type QueryRequest_InputSerialization_JSONInput struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ Type string `protobuf:"bytes,1,opt,name=type,proto3" json:"type,omitempty"` // Valid values: DOCUMENT | LINES
+}
+
+func (x *QueryRequest_InputSerialization_JSONInput) Reset() {
+ *x = QueryRequest_InputSerialization_JSONInput{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_volume_server_proto_msgTypes[74]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *QueryRequest_InputSerialization_JSONInput) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*QueryRequest_InputSerialization_JSONInput) ProtoMessage() {}
+
+func (x *QueryRequest_InputSerialization_JSONInput) ProtoReflect() protoreflect.Message {
+ mi := &file_volume_server_proto_msgTypes[74]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use QueryRequest_InputSerialization_JSONInput.ProtoReflect.Descriptor instead.
+func (*QueryRequest_InputSerialization_JSONInput) Descriptor() ([]byte, []int) {
+ return file_volume_server_proto_rawDescGZIP(), []int{68, 1, 1}
+}
+
+func (x *QueryRequest_InputSerialization_JSONInput) GetType() string {
+ if x != nil {
+ return x.Type
+ }
+ return ""
+}
+
+type QueryRequest_InputSerialization_ParquetInput struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+}
+
+func (x *QueryRequest_InputSerialization_ParquetInput) Reset() {
+ *x = QueryRequest_InputSerialization_ParquetInput{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_volume_server_proto_msgTypes[75]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *QueryRequest_InputSerialization_ParquetInput) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*QueryRequest_InputSerialization_ParquetInput) ProtoMessage() {}
+
+func (x *QueryRequest_InputSerialization_ParquetInput) ProtoReflect() protoreflect.Message {
+ mi := &file_volume_server_proto_msgTypes[75]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use QueryRequest_InputSerialization_ParquetInput.ProtoReflect.Descriptor instead.
+func (*QueryRequest_InputSerialization_ParquetInput) Descriptor() ([]byte, []int) {
+ return file_volume_server_proto_rawDescGZIP(), []int{68, 1, 2}
+}
+
+type QueryRequest_OutputSerialization_CSVOutput struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ QuoteFields string `protobuf:"bytes,1,opt,name=quote_fields,json=quoteFields,proto3" json:"quote_fields,omitempty"` // Valid values: ALWAYS | ASNEEDED
+ RecordDelimiter string `protobuf:"bytes,2,opt,name=record_delimiter,json=recordDelimiter,proto3" json:"record_delimiter,omitempty"` // Default: \n
+ FieldDelimiter string `protobuf:"bytes,3,opt,name=field_delimiter,json=fieldDelimiter,proto3" json:"field_delimiter,omitempty"` // Default: ,
+ QuoteCharactoer string `protobuf:"bytes,4,opt,name=quote_charactoer,json=quoteCharactoer,proto3" json:"quote_charactoer,omitempty"` // Default: "
+ QuoteEscapeCharacter string `protobuf:"bytes,5,opt,name=quote_escape_character,json=quoteEscapeCharacter,proto3" json:"quote_escape_character,omitempty"` // Default: "
+}
+
+func (x *QueryRequest_OutputSerialization_CSVOutput) Reset() {
+ *x = QueryRequest_OutputSerialization_CSVOutput{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_volume_server_proto_msgTypes[76]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *QueryRequest_OutputSerialization_CSVOutput) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*QueryRequest_OutputSerialization_CSVOutput) ProtoMessage() {}
+
+func (x *QueryRequest_OutputSerialization_CSVOutput) ProtoReflect() protoreflect.Message {
+ mi := &file_volume_server_proto_msgTypes[76]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use QueryRequest_OutputSerialization_CSVOutput.ProtoReflect.Descriptor instead.
+func (*QueryRequest_OutputSerialization_CSVOutput) Descriptor() ([]byte, []int) {
+ return file_volume_server_proto_rawDescGZIP(), []int{68, 2, 0}
+}
+
+func (x *QueryRequest_OutputSerialization_CSVOutput) GetQuoteFields() string {
+ if x != nil {
+ return x.QuoteFields
+ }
+ return ""
+}
+
+func (x *QueryRequest_OutputSerialization_CSVOutput) GetRecordDelimiter() string {
+ if x != nil {
+ return x.RecordDelimiter
+ }
+ return ""
+}
+
+func (x *QueryRequest_OutputSerialization_CSVOutput) GetFieldDelimiter() string {
+ if x != nil {
+ return x.FieldDelimiter
+ }
+ return ""
+}
+
+func (x *QueryRequest_OutputSerialization_CSVOutput) GetQuoteCharactoer() string {
+ if x != nil {
+ return x.QuoteCharactoer
+ }
+ return ""
+}
+
+func (x *QueryRequest_OutputSerialization_CSVOutput) GetQuoteEscapeCharacter() string {
+ if x != nil {
+ return x.QuoteEscapeCharacter
+ }
+ return ""
+}
+
+type QueryRequest_OutputSerialization_JSONOutput struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ RecordDelimiter string `protobuf:"bytes,1,opt,name=record_delimiter,json=recordDelimiter,proto3" json:"record_delimiter,omitempty"`
+}
+
+func (x *QueryRequest_OutputSerialization_JSONOutput) Reset() {
+ *x = QueryRequest_OutputSerialization_JSONOutput{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_volume_server_proto_msgTypes[77]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *QueryRequest_OutputSerialization_JSONOutput) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*QueryRequest_OutputSerialization_JSONOutput) ProtoMessage() {}
+
+func (x *QueryRequest_OutputSerialization_JSONOutput) ProtoReflect() protoreflect.Message {
+ mi := &file_volume_server_proto_msgTypes[77]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use QueryRequest_OutputSerialization_JSONOutput.ProtoReflect.Descriptor instead.
+func (*QueryRequest_OutputSerialization_JSONOutput) Descriptor() ([]byte, []int) {
+ return file_volume_server_proto_rawDescGZIP(), []int{68, 2, 1}
+}
+
+func (x *QueryRequest_OutputSerialization_JSONOutput) GetRecordDelimiter() string {
+ if x != nil {
+ return x.RecordDelimiter
+ }
+ return ""
+}
+
+var File_volume_server_proto protoreflect.FileDescriptor
+
+var file_volume_server_proto_rawDesc = []byte{
+ 0x0a, 0x13, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x2e,
+ 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x10, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x73, 0x65,
+ 0x72, 0x76, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x22, 0x5b, 0x0a, 0x12, 0x42, 0x61, 0x74, 0x63, 0x68,
+ 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x19, 0x0a,
+ 0x08, 0x66, 0x69, 0x6c, 0x65, 0x5f, 0x69, 0x64, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x09, 0x52,
+ 0x07, 0x66, 0x69, 0x6c, 0x65, 0x49, 0x64, 0x73, 0x12, 0x2a, 0x0a, 0x11, 0x73, 0x6b, 0x69, 0x70,
+ 0x5f, 0x63, 0x6f, 0x6f, 0x6b, 0x69, 0x65, 0x5f, 0x63, 0x68, 0x65, 0x63, 0x6b, 0x18, 0x02, 0x20,
+ 0x01, 0x28, 0x08, 0x52, 0x0f, 0x73, 0x6b, 0x69, 0x70, 0x43, 0x6f, 0x6f, 0x6b, 0x69, 0x65, 0x43,
+ 0x68, 0x65, 0x63, 0x6b, 0x22, 0x4f, 0x0a, 0x13, 0x42, 0x61, 0x74, 0x63, 0x68, 0x44, 0x65, 0x6c,
+ 0x65, 0x74, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x38, 0x0a, 0x07, 0x72,
+ 0x65, 0x73, 0x75, 0x6c, 0x74, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x76,
+ 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e,
+ 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x52, 0x07, 0x72, 0x65,
+ 0x73, 0x75, 0x6c, 0x74, 0x73, 0x22, 0x83, 0x01, 0x0a, 0x0c, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65,
+ 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x12, 0x17, 0x0a, 0x07, 0x66, 0x69, 0x6c, 0x65, 0x5f, 0x69,
+ 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x66, 0x69, 0x6c, 0x65, 0x49, 0x64, 0x12,
+ 0x16, 0x0a, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x52,
+ 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x14, 0x0a, 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72,
+ 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x12, 0x12, 0x0a,
+ 0x04, 0x73, 0x69, 0x7a, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x04, 0x73, 0x69, 0x7a,
+ 0x65, 0x12, 0x18, 0x0a, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x05, 0x20, 0x01,
+ 0x28, 0x0d, 0x52, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x22, 0x07, 0x0a, 0x05, 0x45,
+ 0x6d, 0x70, 0x74, 0x79, 0x22, 0x37, 0x0a, 0x18, 0x56, 0x61, 0x63, 0x75, 0x75, 0x6d, 0x56, 0x6f,
+ 0x6c, 0x75, 0x6d, 0x65, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74,
+ 0x12, 0x1b, 0x0a, 0x09, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20,
+ 0x01, 0x28, 0x0d, 0x52, 0x08, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x49, 0x64, 0x22, 0x40, 0x0a,
+ 0x19, 0x56, 0x61, 0x63, 0x75, 0x75, 0x6d, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x43, 0x68, 0x65,
+ 0x63, 0x6b, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x23, 0x0a, 0x0d, 0x67, 0x61,
+ 0x72, 0x62, 0x61, 0x67, 0x65, 0x5f, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x18, 0x01, 0x20, 0x01, 0x28,
+ 0x01, 0x52, 0x0c, 0x67, 0x61, 0x72, 0x62, 0x61, 0x67, 0x65, 0x52, 0x61, 0x74, 0x69, 0x6f, 0x22,
+ 0x5b, 0x0a, 0x1a, 0x56, 0x61, 0x63, 0x75, 0x75, 0x6d, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x43,
+ 0x6f, 0x6d, 0x70, 0x61, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1b, 0x0a,
+ 0x09, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0d,
+ 0x52, 0x08, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x49, 0x64, 0x12, 0x20, 0x0a, 0x0b, 0x70, 0x72,
+ 0x65, 0x61, 0x6c, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x03, 0x52,
+ 0x0b, 0x70, 0x72, 0x65, 0x61, 0x6c, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x65, 0x22, 0x1d, 0x0a, 0x1b,
+ 0x56, 0x61, 0x63, 0x75, 0x75, 0x6d, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x43, 0x6f, 0x6d, 0x70,
+ 0x61, 0x63, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x38, 0x0a, 0x19, 0x56,
+ 0x61, 0x63, 0x75, 0x75, 0x6d, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x43, 0x6f, 0x6d, 0x6d, 0x69,
+ 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1b, 0x0a, 0x09, 0x76, 0x6f, 0x6c, 0x75,
+ 0x6d, 0x65, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x08, 0x76, 0x6f, 0x6c,
+ 0x75, 0x6d, 0x65, 0x49, 0x64, 0x22, 0x3e, 0x0a, 0x1a, 0x56, 0x61, 0x63, 0x75, 0x75, 0x6d, 0x56,
+ 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x43, 0x6f, 0x6d, 0x6d, 0x69, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f,
+ 0x6e, 0x73, 0x65, 0x12, 0x20, 0x0a, 0x0c, 0x69, 0x73, 0x5f, 0x72, 0x65, 0x61, 0x64, 0x5f, 0x6f,
+ 0x6e, 0x6c, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0a, 0x69, 0x73, 0x52, 0x65, 0x61,
+ 0x64, 0x4f, 0x6e, 0x6c, 0x79, 0x22, 0x39, 0x0a, 0x1a, 0x56, 0x61, 0x63, 0x75, 0x75, 0x6d, 0x56,
+ 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x43, 0x6c, 0x65, 0x61, 0x6e, 0x75, 0x70, 0x52, 0x65, 0x71, 0x75,
+ 0x65, 0x73, 0x74, 0x12, 0x1b, 0x0a, 0x09, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x69, 0x64,
+ 0x18, 0x01, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x08, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x49, 0x64,
+ 0x22, 0x1d, 0x0a, 0x1b, 0x56, 0x61, 0x63, 0x75, 0x75, 0x6d, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65,
+ 0x43, 0x6c, 0x65, 0x61, 0x6e, 0x75, 0x70, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22,
+ 0x39, 0x0a, 0x17, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x43, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74,
+ 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1e, 0x0a, 0x0a, 0x63, 0x6f,
+ 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a,
+ 0x63, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0x1a, 0x0a, 0x18, 0x44, 0x65,
+ 0x6c, 0x65, 0x74, 0x65, 0x43, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65,
+ 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0xde, 0x01, 0x0a, 0x15, 0x41, 0x6c, 0x6c, 0x6f, 0x63,
+ 0x61, 0x74, 0x65, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74,
+ 0x12, 0x1b, 0x0a, 0x09, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20,
+ 0x01, 0x28, 0x0d, 0x52, 0x08, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x49, 0x64, 0x12, 0x1e, 0x0a,
+ 0x0a, 0x63, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28,
+ 0x09, 0x52, 0x0a, 0x63, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x20, 0x0a,
+ 0x0b, 0x70, 0x72, 0x65, 0x61, 0x6c, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x65, 0x18, 0x03, 0x20, 0x01,
+ 0x28, 0x03, 0x52, 0x0b, 0x70, 0x72, 0x65, 0x61, 0x6c, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x65, 0x12,
+ 0x20, 0x0a, 0x0b, 0x72, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x04,
+ 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x72, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f,
+ 0x6e, 0x12, 0x10, 0x0a, 0x03, 0x74, 0x74, 0x6c, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03,
+ 0x74, 0x74, 0x6c, 0x12, 0x32, 0x0a, 0x16, 0x6d, 0x65, 0x6d, 0x6f, 0x72, 0x79, 0x5f, 0x6d, 0x61,
+ 0x70, 0x5f, 0x6d, 0x61, 0x78, 0x5f, 0x73, 0x69, 0x7a, 0x65, 0x5f, 0x6d, 0x62, 0x18, 0x06, 0x20,
+ 0x01, 0x28, 0x0d, 0x52, 0x12, 0x6d, 0x65, 0x6d, 0x6f, 0x72, 0x79, 0x4d, 0x61, 0x70, 0x4d, 0x61,
+ 0x78, 0x53, 0x69, 0x7a, 0x65, 0x4d, 0x62, 0x22, 0x18, 0x0a, 0x16, 0x41, 0x6c, 0x6c, 0x6f, 0x63,
+ 0x61, 0x74, 0x65, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73,
+ 0x65, 0x22, 0x36, 0x0a, 0x17, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x53, 0x79, 0x6e, 0x63, 0x53,
+ 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1b, 0x0a, 0x09,
+ 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0d, 0x52,
+ 0x08, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x49, 0x64, 0x22, 0xfb, 0x01, 0x0a, 0x18, 0x56, 0x6f,
+ 0x6c, 0x75, 0x6d, 0x65, 0x53, 0x79, 0x6e, 0x63, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65,
+ 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x1b, 0x0a, 0x09, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65,
+ 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x08, 0x76, 0x6f, 0x6c, 0x75, 0x6d,
+ 0x65, 0x49, 0x64, 0x12, 0x1e, 0x0a, 0x0a, 0x63, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f,
+ 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x63, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74,
+ 0x69, 0x6f, 0x6e, 0x12, 0x20, 0x0a, 0x0b, 0x72, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69,
+ 0x6f, 0x6e, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x72, 0x65, 0x70, 0x6c, 0x69, 0x63,
+ 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x10, 0x0a, 0x03, 0x74, 0x74, 0x6c, 0x18, 0x05, 0x20, 0x01,
+ 0x28, 0x09, 0x52, 0x03, 0x74, 0x74, 0x6c, 0x12, 0x1f, 0x0a, 0x0b, 0x74, 0x61, 0x69, 0x6c, 0x5f,
+ 0x6f, 0x66, 0x66, 0x73, 0x65, 0x74, 0x18, 0x06, 0x20, 0x01, 0x28, 0x04, 0x52, 0x0a, 0x74, 0x61,
+ 0x69, 0x6c, 0x4f, 0x66, 0x66, 0x73, 0x65, 0x74, 0x12, 0x29, 0x0a, 0x10, 0x63, 0x6f, 0x6d, 0x70,
+ 0x61, 0x63, 0x74, 0x5f, 0x72, 0x65, 0x76, 0x69, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x07, 0x20, 0x01,
+ 0x28, 0x0d, 0x52, 0x0f, 0x63, 0x6f, 0x6d, 0x70, 0x61, 0x63, 0x74, 0x52, 0x65, 0x76, 0x69, 0x73,
+ 0x69, 0x6f, 0x6e, 0x12, 0x22, 0x0a, 0x0d, 0x69, 0x64, 0x78, 0x5f, 0x66, 0x69, 0x6c, 0x65, 0x5f,
+ 0x73, 0x69, 0x7a, 0x65, 0x18, 0x08, 0x20, 0x01, 0x28, 0x04, 0x52, 0x0b, 0x69, 0x64, 0x78, 0x46,
+ 0x69, 0x6c, 0x65, 0x53, 0x69, 0x7a, 0x65, 0x22, 0x56, 0x0a, 0x1c, 0x56, 0x6f, 0x6c, 0x75, 0x6d,
+ 0x65, 0x49, 0x6e, 0x63, 0x72, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x61, 0x6c, 0x43, 0x6f, 0x70, 0x79,
+ 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1b, 0x0a, 0x09, 0x76, 0x6f, 0x6c, 0x75, 0x6d,
+ 0x65, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x08, 0x76, 0x6f, 0x6c, 0x75,
+ 0x6d, 0x65, 0x49, 0x64, 0x12, 0x19, 0x0a, 0x08, 0x73, 0x69, 0x6e, 0x63, 0x65, 0x5f, 0x6e, 0x73,
+ 0x18, 0x02, 0x20, 0x01, 0x28, 0x04, 0x52, 0x07, 0x73, 0x69, 0x6e, 0x63, 0x65, 0x4e, 0x73, 0x22,
+ 0x42, 0x0a, 0x1d, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x49, 0x6e, 0x63, 0x72, 0x65, 0x6d, 0x65,
+ 0x6e, 0x74, 0x61, 0x6c, 0x43, 0x6f, 0x70, 0x79, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65,
+ 0x12, 0x21, 0x0a, 0x0c, 0x66, 0x69, 0x6c, 0x65, 0x5f, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74,
+ 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x0b, 0x66, 0x69, 0x6c, 0x65, 0x43, 0x6f, 0x6e, 0x74,
+ 0x65, 0x6e, 0x74, 0x22, 0x31, 0x0a, 0x12, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x4d, 0x6f, 0x75,
+ 0x6e, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1b, 0x0a, 0x09, 0x76, 0x6f, 0x6c,
+ 0x75, 0x6d, 0x65, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x08, 0x76, 0x6f,
+ 0x6c, 0x75, 0x6d, 0x65, 0x49, 0x64, 0x22, 0x15, 0x0a, 0x13, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65,
+ 0x4d, 0x6f, 0x75, 0x6e, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x33, 0x0a,
+ 0x14, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x55, 0x6e, 0x6d, 0x6f, 0x75, 0x6e, 0x74, 0x52, 0x65,
+ 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1b, 0x0a, 0x09, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f,
+ 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x08, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65,
+ 0x49, 0x64, 0x22, 0x17, 0x0a, 0x15, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x55, 0x6e, 0x6d, 0x6f,
+ 0x75, 0x6e, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x32, 0x0a, 0x13, 0x56,
+ 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65,
+ 0x73, 0x74, 0x12, 0x1b, 0x0a, 0x09, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x69, 0x64, 0x18,
+ 0x01, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x08, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x49, 0x64, 0x22,
+ 0x16, 0x0a, 0x14, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x52,
+ 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x38, 0x0a, 0x19, 0x56, 0x6f, 0x6c, 0x75, 0x6d,
+ 0x65, 0x4d, 0x61, 0x72, 0x6b, 0x52, 0x65, 0x61, 0x64, 0x6f, 0x6e, 0x6c, 0x79, 0x52, 0x65, 0x71,
+ 0x75, 0x65, 0x73, 0x74, 0x12, 0x1b, 0x0a, 0x09, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x69,
+ 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x08, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x49,
+ 0x64, 0x22, 0x1c, 0x0a, 0x1a, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x4d, 0x61, 0x72, 0x6b, 0x52,
+ 0x65, 0x61, 0x64, 0x6f, 0x6e, 0x6c, 0x79, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22,
+ 0x57, 0x0a, 0x16, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75,
+ 0x72, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1b, 0x0a, 0x09, 0x76, 0x6f, 0x6c,
+ 0x75, 0x6d, 0x65, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x08, 0x76, 0x6f,
+ 0x6c, 0x75, 0x6d, 0x65, 0x49, 0x64, 0x12, 0x20, 0x0a, 0x0b, 0x72, 0x65, 0x70, 0x6c, 0x69, 0x63,
+ 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x72, 0x65, 0x70,
+ 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0x2f, 0x0a, 0x17, 0x56, 0x6f, 0x6c, 0x75,
+ 0x6d, 0x65, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f,
+ 0x6e, 0x73, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x18, 0x01, 0x20, 0x01,
+ 0x28, 0x09, 0x52, 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x22, 0xae, 0x01, 0x0a, 0x11, 0x56, 0x6f,
+ 0x6c, 0x75, 0x6d, 0x65, 0x43, 0x6f, 0x70, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12,
+ 0x1b, 0x0a, 0x09, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01,
+ 0x28, 0x0d, 0x52, 0x08, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x49, 0x64, 0x12, 0x1e, 0x0a, 0x0a,
+ 0x63, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09,
+ 0x52, 0x0a, 0x63, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x20, 0x0a, 0x0b,
+ 0x72, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28,
+ 0x09, 0x52, 0x0b, 0x72, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x10,
+ 0x0a, 0x03, 0x74, 0x74, 0x6c, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x74, 0x74, 0x6c,
+ 0x12, 0x28, 0x0a, 0x10, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x64, 0x61, 0x74, 0x61, 0x5f,
+ 0x6e, 0x6f, 0x64, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0e, 0x73, 0x6f, 0x75, 0x72,
+ 0x63, 0x65, 0x44, 0x61, 0x74, 0x61, 0x4e, 0x6f, 0x64, 0x65, 0x22, 0x3f, 0x0a, 0x12, 0x56, 0x6f,
+ 0x6c, 0x75, 0x6d, 0x65, 0x43, 0x6f, 0x70, 0x79, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65,
+ 0x12, 0x29, 0x0a, 0x11, 0x6c, 0x61, 0x73, 0x74, 0x5f, 0x61, 0x70, 0x70, 0x65, 0x6e, 0x64, 0x5f,
+ 0x61, 0x74, 0x5f, 0x6e, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x04, 0x52, 0x0e, 0x6c, 0x61, 0x73,
+ 0x74, 0x41, 0x70, 0x70, 0x65, 0x6e, 0x64, 0x41, 0x74, 0x4e, 0x73, 0x22, 0x94, 0x02, 0x0a, 0x0f,
+ 0x43, 0x6f, 0x70, 0x79, 0x46, 0x69, 0x6c, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12,
+ 0x1b, 0x0a, 0x09, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01,
+ 0x28, 0x0d, 0x52, 0x08, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x49, 0x64, 0x12, 0x10, 0x0a, 0x03,
+ 0x65, 0x78, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x65, 0x78, 0x74, 0x12, 0x2f,
+ 0x0a, 0x13, 0x63, 0x6f, 0x6d, 0x70, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x72, 0x65, 0x76,
+ 0x69, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x12, 0x63, 0x6f, 0x6d,
+ 0x70, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x76, 0x69, 0x73, 0x69, 0x6f, 0x6e, 0x12,
+ 0x1f, 0x0a, 0x0b, 0x73, 0x74, 0x6f, 0x70, 0x5f, 0x6f, 0x66, 0x66, 0x73, 0x65, 0x74, 0x18, 0x04,
+ 0x20, 0x01, 0x28, 0x04, 0x52, 0x0a, 0x73, 0x74, 0x6f, 0x70, 0x4f, 0x66, 0x66, 0x73, 0x65, 0x74,
+ 0x12, 0x1e, 0x0a, 0x0a, 0x63, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x05,
+ 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x63, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e,
+ 0x12, 0x20, 0x0a, 0x0c, 0x69, 0x73, 0x5f, 0x65, 0x63, 0x5f, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65,
+ 0x18, 0x06, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0a, 0x69, 0x73, 0x45, 0x63, 0x56, 0x6f, 0x6c, 0x75,
+ 0x6d, 0x65, 0x12, 0x3e, 0x0a, 0x1c, 0x69, 0x67, 0x6e, 0x6f, 0x72, 0x65, 0x5f, 0x73, 0x6f, 0x75,
+ 0x72, 0x63, 0x65, 0x5f, 0x66, 0x69, 0x6c, 0x65, 0x5f, 0x6e, 0x6f, 0x74, 0x5f, 0x66, 0x6f, 0x75,
+ 0x6e, 0x64, 0x18, 0x07, 0x20, 0x01, 0x28, 0x08, 0x52, 0x18, 0x69, 0x67, 0x6e, 0x6f, 0x72, 0x65,
+ 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x46, 0x69, 0x6c, 0x65, 0x4e, 0x6f, 0x74, 0x46, 0x6f, 0x75,
+ 0x6e, 0x64, 0x22, 0x35, 0x0a, 0x10, 0x43, 0x6f, 0x70, 0x79, 0x46, 0x69, 0x6c, 0x65, 0x52, 0x65,
+ 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x21, 0x0a, 0x0c, 0x66, 0x69, 0x6c, 0x65, 0x5f, 0x63,
+ 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x0b, 0x66, 0x69,
+ 0x6c, 0x65, 0x43, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x22, 0x83, 0x01, 0x0a, 0x17, 0x56, 0x6f,
+ 0x6c, 0x75, 0x6d, 0x65, 0x54, 0x61, 0x69, 0x6c, 0x53, 0x65, 0x6e, 0x64, 0x65, 0x72, 0x52, 0x65,
+ 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1b, 0x0a, 0x09, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f,
+ 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x08, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65,
+ 0x49, 0x64, 0x12, 0x19, 0x0a, 0x08, 0x73, 0x69, 0x6e, 0x63, 0x65, 0x5f, 0x6e, 0x73, 0x18, 0x02,
+ 0x20, 0x01, 0x28, 0x04, 0x52, 0x07, 0x73, 0x69, 0x6e, 0x63, 0x65, 0x4e, 0x73, 0x12, 0x30, 0x0a,
+ 0x14, 0x69, 0x64, 0x6c, 0x65, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x5f, 0x73, 0x65,
+ 0x63, 0x6f, 0x6e, 0x64, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x12, 0x69, 0x64, 0x6c,
+ 0x65, 0x54, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x53, 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x73, 0x22,
+ 0x84, 0x01, 0x0a, 0x18, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x54, 0x61, 0x69, 0x6c, 0x53, 0x65,
+ 0x6e, 0x64, 0x65, 0x72, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x23, 0x0a, 0x0d,
+ 0x6e, 0x65, 0x65, 0x64, 0x6c, 0x65, 0x5f, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x18, 0x01, 0x20,
+ 0x01, 0x28, 0x0c, 0x52, 0x0c, 0x6e, 0x65, 0x65, 0x64, 0x6c, 0x65, 0x48, 0x65, 0x61, 0x64, 0x65,
+ 0x72, 0x12, 0x1f, 0x0a, 0x0b, 0x6e, 0x65, 0x65, 0x64, 0x6c, 0x65, 0x5f, 0x62, 0x6f, 0x64, 0x79,
+ 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x0a, 0x6e, 0x65, 0x65, 0x64, 0x6c, 0x65, 0x42, 0x6f,
+ 0x64, 0x79, 0x12, 0x22, 0x0a, 0x0d, 0x69, 0x73, 0x5f, 0x6c, 0x61, 0x73, 0x74, 0x5f, 0x63, 0x68,
+ 0x75, 0x6e, 0x6b, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0b, 0x69, 0x73, 0x4c, 0x61, 0x73,
+ 0x74, 0x43, 0x68, 0x75, 0x6e, 0x6b, 0x22, 0xb7, 0x01, 0x0a, 0x19, 0x56, 0x6f, 0x6c, 0x75, 0x6d,
+ 0x65, 0x54, 0x61, 0x69, 0x6c, 0x52, 0x65, 0x63, 0x65, 0x69, 0x76, 0x65, 0x72, 0x52, 0x65, 0x71,
+ 0x75, 0x65, 0x73, 0x74, 0x12, 0x1b, 0x0a, 0x09, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x69,
+ 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x08, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x49,
+ 0x64, 0x12, 0x19, 0x0a, 0x08, 0x73, 0x69, 0x6e, 0x63, 0x65, 0x5f, 0x6e, 0x73, 0x18, 0x02, 0x20,
+ 0x01, 0x28, 0x04, 0x52, 0x07, 0x73, 0x69, 0x6e, 0x63, 0x65, 0x4e, 0x73, 0x12, 0x30, 0x0a, 0x14,
+ 0x69, 0x64, 0x6c, 0x65, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x5f, 0x73, 0x65, 0x63,
+ 0x6f, 0x6e, 0x64, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x12, 0x69, 0x64, 0x6c, 0x65,
+ 0x54, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x53, 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x73, 0x12, 0x30,
+ 0x0a, 0x14, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f,
+ 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x12, 0x73, 0x6f,
+ 0x75, 0x72, 0x63, 0x65, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72,
+ 0x22, 0x1c, 0x0a, 0x1a, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x54, 0x61, 0x69, 0x6c, 0x52, 0x65,
+ 0x63, 0x65, 0x69, 0x76, 0x65, 0x72, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x5c,
+ 0x0a, 0x1d, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x45, 0x63, 0x53, 0x68, 0x61, 0x72, 0x64, 0x73,
+ 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12,
+ 0x1b, 0x0a, 0x09, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01,
+ 0x28, 0x0d, 0x52, 0x08, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x49, 0x64, 0x12, 0x1e, 0x0a, 0x0a,
+ 0x63, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09,
+ 0x52, 0x0a, 0x63, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0x20, 0x0a, 0x1e,
+ 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x45, 0x63, 0x53, 0x68, 0x61, 0x72, 0x64, 0x73, 0x47, 0x65,
+ 0x6e, 0x65, 0x72, 0x61, 0x74, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x5b,
+ 0x0a, 0x1c, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x45, 0x63, 0x53, 0x68, 0x61, 0x72, 0x64, 0x73,
+ 0x52, 0x65, 0x62, 0x75, 0x69, 0x6c, 0x64, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1b,
+ 0x0a, 0x09, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28,
+ 0x0d, 0x52, 0x08, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x49, 0x64, 0x12, 0x1e, 0x0a, 0x0a, 0x63,
+ 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52,
+ 0x0a, 0x63, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0x4b, 0x0a, 0x1d, 0x56,
+ 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x45, 0x63, 0x53, 0x68, 0x61, 0x72, 0x64, 0x73, 0x52, 0x65, 0x62,
+ 0x75, 0x69, 0x6c, 0x64, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x2a, 0x0a, 0x11,
+ 0x72, 0x65, 0x62, 0x75, 0x69, 0x6c, 0x74, 0x5f, 0x73, 0x68, 0x61, 0x72, 0x64, 0x5f, 0x69, 0x64,
+ 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0d, 0x52, 0x0f, 0x72, 0x65, 0x62, 0x75, 0x69, 0x6c, 0x74,
+ 0x53, 0x68, 0x61, 0x72, 0x64, 0x49, 0x64, 0x73, 0x22, 0x8b, 0x02, 0x0a, 0x19, 0x56, 0x6f, 0x6c,
+ 0x75, 0x6d, 0x65, 0x45, 0x63, 0x53, 0x68, 0x61, 0x72, 0x64, 0x73, 0x43, 0x6f, 0x70, 0x79, 0x52,
+ 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1b, 0x0a, 0x09, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65,
+ 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x08, 0x76, 0x6f, 0x6c, 0x75, 0x6d,
+ 0x65, 0x49, 0x64, 0x12, 0x1e, 0x0a, 0x0a, 0x63, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f,
+ 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x63, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74,
+ 0x69, 0x6f, 0x6e, 0x12, 0x1b, 0x0a, 0x09, 0x73, 0x68, 0x61, 0x72, 0x64, 0x5f, 0x69, 0x64, 0x73,
+ 0x18, 0x03, 0x20, 0x03, 0x28, 0x0d, 0x52, 0x08, 0x73, 0x68, 0x61, 0x72, 0x64, 0x49, 0x64, 0x73,
+ 0x12, 0x22, 0x0a, 0x0d, 0x63, 0x6f, 0x70, 0x79, 0x5f, 0x65, 0x63, 0x78, 0x5f, 0x66, 0x69, 0x6c,
+ 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0b, 0x63, 0x6f, 0x70, 0x79, 0x45, 0x63, 0x78,
+ 0x46, 0x69, 0x6c, 0x65, 0x12, 0x28, 0x0a, 0x10, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x64,
+ 0x61, 0x74, 0x61, 0x5f, 0x6e, 0x6f, 0x64, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0e,
+ 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x44, 0x61, 0x74, 0x61, 0x4e, 0x6f, 0x64, 0x65, 0x12, 0x22,
+ 0x0a, 0x0d, 0x63, 0x6f, 0x70, 0x79, 0x5f, 0x65, 0x63, 0x6a, 0x5f, 0x66, 0x69, 0x6c, 0x65, 0x18,
+ 0x06, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0b, 0x63, 0x6f, 0x70, 0x79, 0x45, 0x63, 0x6a, 0x46, 0x69,
+ 0x6c, 0x65, 0x12, 0x22, 0x0a, 0x0d, 0x63, 0x6f, 0x70, 0x79, 0x5f, 0x76, 0x69, 0x66, 0x5f, 0x66,
+ 0x69, 0x6c, 0x65, 0x18, 0x07, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0b, 0x63, 0x6f, 0x70, 0x79, 0x56,
+ 0x69, 0x66, 0x46, 0x69, 0x6c, 0x65, 0x22, 0x1c, 0x0a, 0x1a, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65,
+ 0x45, 0x63, 0x53, 0x68, 0x61, 0x72, 0x64, 0x73, 0x43, 0x6f, 0x70, 0x79, 0x52, 0x65, 0x73, 0x70,
+ 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x77, 0x0a, 0x1b, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x45, 0x63,
+ 0x53, 0x68, 0x61, 0x72, 0x64, 0x73, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x52, 0x65, 0x71, 0x75,
+ 0x65, 0x73, 0x74, 0x12, 0x1b, 0x0a, 0x09, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x69, 0x64,
+ 0x18, 0x01, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x08, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x49, 0x64,
+ 0x12, 0x1e, 0x0a, 0x0a, 0x63, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x02,
+ 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x63, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e,
+ 0x12, 0x1b, 0x0a, 0x09, 0x73, 0x68, 0x61, 0x72, 0x64, 0x5f, 0x69, 0x64, 0x73, 0x18, 0x03, 0x20,
+ 0x03, 0x28, 0x0d, 0x52, 0x08, 0x73, 0x68, 0x61, 0x72, 0x64, 0x49, 0x64, 0x73, 0x22, 0x1e, 0x0a,
+ 0x1c, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x45, 0x63, 0x53, 0x68, 0x61, 0x72, 0x64, 0x73, 0x44,
+ 0x65, 0x6c, 0x65, 0x74, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x76, 0x0a,
+ 0x1a, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x45, 0x63, 0x53, 0x68, 0x61, 0x72, 0x64, 0x73, 0x4d,
+ 0x6f, 0x75, 0x6e, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1b, 0x0a, 0x09, 0x76,
+ 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x08,
+ 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x49, 0x64, 0x12, 0x1e, 0x0a, 0x0a, 0x63, 0x6f, 0x6c, 0x6c,
+ 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x63, 0x6f,
+ 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x1b, 0x0a, 0x09, 0x73, 0x68, 0x61, 0x72,
+ 0x64, 0x5f, 0x69, 0x64, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0d, 0x52, 0x08, 0x73, 0x68, 0x61,
+ 0x72, 0x64, 0x49, 0x64, 0x73, 0x22, 0x1d, 0x0a, 0x1b, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x45,
+ 0x63, 0x53, 0x68, 0x61, 0x72, 0x64, 0x73, 0x4d, 0x6f, 0x75, 0x6e, 0x74, 0x52, 0x65, 0x73, 0x70,
+ 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x58, 0x0a, 0x1c, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x45, 0x63,
+ 0x53, 0x68, 0x61, 0x72, 0x64, 0x73, 0x55, 0x6e, 0x6d, 0x6f, 0x75, 0x6e, 0x74, 0x52, 0x65, 0x71,
+ 0x75, 0x65, 0x73, 0x74, 0x12, 0x1b, 0x0a, 0x09, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x69,
+ 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x08, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x49,
+ 0x64, 0x12, 0x1b, 0x0a, 0x09, 0x73, 0x68, 0x61, 0x72, 0x64, 0x5f, 0x69, 0x64, 0x73, 0x18, 0x03,
+ 0x20, 0x03, 0x28, 0x0d, 0x52, 0x08, 0x73, 0x68, 0x61, 0x72, 0x64, 0x49, 0x64, 0x73, 0x22, 0x1f,
+ 0x0a, 0x1d, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x45, 0x63, 0x53, 0x68, 0x61, 0x72, 0x64, 0x73,
+ 0x55, 0x6e, 0x6d, 0x6f, 0x75, 0x6e, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22,
+ 0x99, 0x01, 0x0a, 0x18, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x45, 0x63, 0x53, 0x68, 0x61, 0x72,
+ 0x64, 0x52, 0x65, 0x61, 0x64, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1b, 0x0a, 0x09,
+ 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0d, 0x52,
+ 0x08, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x49, 0x64, 0x12, 0x19, 0x0a, 0x08, 0x73, 0x68, 0x61,
+ 0x72, 0x64, 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x07, 0x73, 0x68, 0x61,
+ 0x72, 0x64, 0x49, 0x64, 0x12, 0x16, 0x0a, 0x06, 0x6f, 0x66, 0x66, 0x73, 0x65, 0x74, 0x18, 0x03,
+ 0x20, 0x01, 0x28, 0x03, 0x52, 0x06, 0x6f, 0x66, 0x66, 0x73, 0x65, 0x74, 0x12, 0x12, 0x0a, 0x04,
+ 0x73, 0x69, 0x7a, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x03, 0x52, 0x04, 0x73, 0x69, 0x7a, 0x65,
+ 0x12, 0x19, 0x0a, 0x08, 0x66, 0x69, 0x6c, 0x65, 0x5f, 0x6b, 0x65, 0x79, 0x18, 0x05, 0x20, 0x01,
+ 0x28, 0x04, 0x52, 0x07, 0x66, 0x69, 0x6c, 0x65, 0x4b, 0x65, 0x79, 0x22, 0x4e, 0x0a, 0x19, 0x56,
+ 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x45, 0x63, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x65, 0x61, 0x64,
+ 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x64, 0x61, 0x74, 0x61,
+ 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x04, 0x64, 0x61, 0x74, 0x61, 0x12, 0x1d, 0x0a, 0x0a,
+ 0x69, 0x73, 0x5f, 0x64, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08,
+ 0x52, 0x09, 0x69, 0x73, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x64, 0x22, 0x8d, 0x01, 0x0a, 0x19,
+ 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x45, 0x63, 0x42, 0x6c, 0x6f, 0x62, 0x44, 0x65, 0x6c, 0x65,
+ 0x74, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1b, 0x0a, 0x09, 0x76, 0x6f, 0x6c,
+ 0x75, 0x6d, 0x65, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x08, 0x76, 0x6f,
+ 0x6c, 0x75, 0x6d, 0x65, 0x49, 0x64, 0x12, 0x1e, 0x0a, 0x0a, 0x63, 0x6f, 0x6c, 0x6c, 0x65, 0x63,
+ 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x63, 0x6f, 0x6c, 0x6c,
+ 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x19, 0x0a, 0x08, 0x66, 0x69, 0x6c, 0x65, 0x5f, 0x6b,
+ 0x65, 0x79, 0x18, 0x03, 0x20, 0x01, 0x28, 0x04, 0x52, 0x07, 0x66, 0x69, 0x6c, 0x65, 0x4b, 0x65,
+ 0x79, 0x12, 0x18, 0x0a, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x04, 0x20, 0x01,
+ 0x28, 0x0d, 0x52, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x22, 0x1c, 0x0a, 0x1a, 0x56,
+ 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x45, 0x63, 0x42, 0x6c, 0x6f, 0x62, 0x44, 0x65, 0x6c, 0x65, 0x74,
+ 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x5c, 0x0a, 0x1d, 0x56, 0x6f, 0x6c,
+ 0x75, 0x6d, 0x65, 0x45, 0x63, 0x53, 0x68, 0x61, 0x72, 0x64, 0x73, 0x54, 0x6f, 0x56, 0x6f, 0x6c,
+ 0x75, 0x6d, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1b, 0x0a, 0x09, 0x76, 0x6f,
+ 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x08, 0x76,
+ 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x49, 0x64, 0x12, 0x1e, 0x0a, 0x0a, 0x63, 0x6f, 0x6c, 0x6c, 0x65,
+ 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x63, 0x6f, 0x6c,
+ 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0x20, 0x0a, 0x1e, 0x56, 0x6f, 0x6c, 0x75, 0x6d,
+ 0x65, 0x45, 0x63, 0x53, 0x68, 0x61, 0x72, 0x64, 0x73, 0x54, 0x6f, 0x56, 0x6f, 0x6c, 0x75, 0x6d,
+ 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x3a, 0x0a, 0x1b, 0x52, 0x65, 0x61,
+ 0x64, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x46, 0x69, 0x6c, 0x65, 0x53, 0x74, 0x61, 0x74, 0x75,
+ 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1b, 0x0a, 0x09, 0x76, 0x6f, 0x6c, 0x75,
+ 0x6d, 0x65, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x08, 0x76, 0x6f, 0x6c,
+ 0x75, 0x6d, 0x65, 0x49, 0x64, 0x22, 0xed, 0x02, 0x0a, 0x1c, 0x52, 0x65, 0x61, 0x64, 0x56, 0x6f,
+ 0x6c, 0x75, 0x6d, 0x65, 0x46, 0x69, 0x6c, 0x65, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65,
+ 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x1b, 0x0a, 0x09, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65,
+ 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x08, 0x76, 0x6f, 0x6c, 0x75, 0x6d,
+ 0x65, 0x49, 0x64, 0x12, 0x3b, 0x0a, 0x1a, 0x69, 0x64, 0x78, 0x5f, 0x66, 0x69, 0x6c, 0x65, 0x5f,
+ 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x5f, 0x73, 0x65, 0x63, 0x6f, 0x6e, 0x64,
+ 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x04, 0x52, 0x17, 0x69, 0x64, 0x78, 0x46, 0x69, 0x6c, 0x65,
+ 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x53, 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x73,
+ 0x12, 0x22, 0x0a, 0x0d, 0x69, 0x64, 0x78, 0x5f, 0x66, 0x69, 0x6c, 0x65, 0x5f, 0x73, 0x69, 0x7a,
+ 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x04, 0x52, 0x0b, 0x69, 0x64, 0x78, 0x46, 0x69, 0x6c, 0x65,
+ 0x53, 0x69, 0x7a, 0x65, 0x12, 0x3b, 0x0a, 0x1a, 0x64, 0x61, 0x74, 0x5f, 0x66, 0x69, 0x6c, 0x65,
+ 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x5f, 0x73, 0x65, 0x63, 0x6f, 0x6e,
+ 0x64, 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, 0x04, 0x52, 0x17, 0x64, 0x61, 0x74, 0x46, 0x69, 0x6c,
+ 0x65, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x53, 0x65, 0x63, 0x6f, 0x6e, 0x64,
+ 0x73, 0x12, 0x22, 0x0a, 0x0d, 0x64, 0x61, 0x74, 0x5f, 0x66, 0x69, 0x6c, 0x65, 0x5f, 0x73, 0x69,
+ 0x7a, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x04, 0x52, 0x0b, 0x64, 0x61, 0x74, 0x46, 0x69, 0x6c,
+ 0x65, 0x53, 0x69, 0x7a, 0x65, 0x12, 0x1d, 0x0a, 0x0a, 0x66, 0x69, 0x6c, 0x65, 0x5f, 0x63, 0x6f,
+ 0x75, 0x6e, 0x74, 0x18, 0x06, 0x20, 0x01, 0x28, 0x04, 0x52, 0x09, 0x66, 0x69, 0x6c, 0x65, 0x43,
+ 0x6f, 0x75, 0x6e, 0x74, 0x12, 0x2f, 0x0a, 0x13, 0x63, 0x6f, 0x6d, 0x70, 0x61, 0x63, 0x74, 0x69,
+ 0x6f, 0x6e, 0x5f, 0x72, 0x65, 0x76, 0x69, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x07, 0x20, 0x01, 0x28,
+ 0x0d, 0x52, 0x12, 0x63, 0x6f, 0x6d, 0x70, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x76,
+ 0x69, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x1e, 0x0a, 0x0a, 0x63, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74,
+ 0x69, 0x6f, 0x6e, 0x18, 0x08, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x63, 0x6f, 0x6c, 0x6c, 0x65,
+ 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0x9e, 0x01, 0x0a, 0x0a, 0x44, 0x69, 0x73, 0x6b, 0x53, 0x74,
+ 0x61, 0x74, 0x75, 0x73, 0x12, 0x10, 0x0a, 0x03, 0x64, 0x69, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28,
+ 0x09, 0x52, 0x03, 0x64, 0x69, 0x72, 0x12, 0x10, 0x0a, 0x03, 0x61, 0x6c, 0x6c, 0x18, 0x02, 0x20,
+ 0x01, 0x28, 0x04, 0x52, 0x03, 0x61, 0x6c, 0x6c, 0x12, 0x12, 0x0a, 0x04, 0x75, 0x73, 0x65, 0x64,
+ 0x18, 0x03, 0x20, 0x01, 0x28, 0x04, 0x52, 0x04, 0x75, 0x73, 0x65, 0x64, 0x12, 0x12, 0x0a, 0x04,
+ 0x66, 0x72, 0x65, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x04, 0x52, 0x04, 0x66, 0x72, 0x65, 0x65,
+ 0x12, 0x21, 0x0a, 0x0c, 0x70, 0x65, 0x72, 0x63, 0x65, 0x6e, 0x74, 0x5f, 0x66, 0x72, 0x65, 0x65,
+ 0x18, 0x05, 0x20, 0x01, 0x28, 0x02, 0x52, 0x0b, 0x70, 0x65, 0x72, 0x63, 0x65, 0x6e, 0x74, 0x46,
+ 0x72, 0x65, 0x65, 0x12, 0x21, 0x0a, 0x0c, 0x70, 0x65, 0x72, 0x63, 0x65, 0x6e, 0x74, 0x5f, 0x75,
+ 0x73, 0x65, 0x64, 0x18, 0x06, 0x20, 0x01, 0x28, 0x02, 0x52, 0x0b, 0x70, 0x65, 0x72, 0x63, 0x65,
+ 0x6e, 0x74, 0x55, 0x73, 0x65, 0x64, 0x22, 0xa3, 0x01, 0x0a, 0x09, 0x4d, 0x65, 0x6d, 0x53, 0x74,
+ 0x61, 0x74, 0x75, 0x73, 0x12, 0x1e, 0x0a, 0x0a, 0x67, 0x6f, 0x72, 0x6f, 0x75, 0x74, 0x69, 0x6e,
+ 0x65, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x05, 0x52, 0x0a, 0x67, 0x6f, 0x72, 0x6f, 0x75, 0x74,
+ 0x69, 0x6e, 0x65, 0x73, 0x12, 0x10, 0x0a, 0x03, 0x61, 0x6c, 0x6c, 0x18, 0x02, 0x20, 0x01, 0x28,
+ 0x04, 0x52, 0x03, 0x61, 0x6c, 0x6c, 0x12, 0x12, 0x0a, 0x04, 0x75, 0x73, 0x65, 0x64, 0x18, 0x03,
+ 0x20, 0x01, 0x28, 0x04, 0x52, 0x04, 0x75, 0x73, 0x65, 0x64, 0x12, 0x12, 0x0a, 0x04, 0x66, 0x72,
+ 0x65, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x04, 0x52, 0x04, 0x66, 0x72, 0x65, 0x65, 0x12, 0x12,
+ 0x0a, 0x04, 0x73, 0x65, 0x6c, 0x66, 0x18, 0x05, 0x20, 0x01, 0x28, 0x04, 0x52, 0x04, 0x73, 0x65,
+ 0x6c, 0x66, 0x12, 0x12, 0x0a, 0x04, 0x68, 0x65, 0x61, 0x70, 0x18, 0x06, 0x20, 0x01, 0x28, 0x04,
+ 0x52, 0x04, 0x68, 0x65, 0x61, 0x70, 0x12, 0x14, 0x0a, 0x05, 0x73, 0x74, 0x61, 0x63, 0x6b, 0x18,
+ 0x07, 0x20, 0x01, 0x28, 0x04, 0x52, 0x05, 0x73, 0x74, 0x61, 0x63, 0x6b, 0x22, 0xd8, 0x01, 0x0a,
+ 0x0a, 0x52, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x46, 0x69, 0x6c, 0x65, 0x12, 0x21, 0x0a, 0x0c, 0x62,
+ 0x61, 0x63, 0x6b, 0x65, 0x6e, 0x64, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28,
+ 0x09, 0x52, 0x0b, 0x62, 0x61, 0x63, 0x6b, 0x65, 0x6e, 0x64, 0x54, 0x79, 0x70, 0x65, 0x12, 0x1d,
+ 0x0a, 0x0a, 0x62, 0x61, 0x63, 0x6b, 0x65, 0x6e, 0x64, 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01,
+ 0x28, 0x09, 0x52, 0x09, 0x62, 0x61, 0x63, 0x6b, 0x65, 0x6e, 0x64, 0x49, 0x64, 0x12, 0x10, 0x0a,
+ 0x03, 0x6b, 0x65, 0x79, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12,
+ 0x16, 0x0a, 0x06, 0x6f, 0x66, 0x66, 0x73, 0x65, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x04, 0x52,
+ 0x06, 0x6f, 0x66, 0x66, 0x73, 0x65, 0x74, 0x12, 0x1b, 0x0a, 0x09, 0x66, 0x69, 0x6c, 0x65, 0x5f,
+ 0x73, 0x69, 0x7a, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x04, 0x52, 0x08, 0x66, 0x69, 0x6c, 0x65,
+ 0x53, 0x69, 0x7a, 0x65, 0x12, 0x23, 0x0a, 0x0d, 0x6d, 0x6f, 0x64, 0x69, 0x66, 0x69, 0x65, 0x64,
+ 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, 0x04, 0x52, 0x0c, 0x6d, 0x6f, 0x64,
+ 0x69, 0x66, 0x69, 0x65, 0x64, 0x54, 0x69, 0x6d, 0x65, 0x12, 0x1c, 0x0a, 0x09, 0x65, 0x78, 0x74,
+ 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x07, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x65, 0x78,
+ 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x22, 0x7c, 0x0a, 0x0a, 0x56, 0x6f, 0x6c, 0x75, 0x6d,
+ 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x32, 0x0a, 0x05, 0x66, 0x69, 0x6c, 0x65, 0x73, 0x18, 0x01,
+ 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x73, 0x65,
+ 0x72, 0x76, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x52, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x46, 0x69,
+ 0x6c, 0x65, 0x52, 0x05, 0x66, 0x69, 0x6c, 0x65, 0x73, 0x12, 0x18, 0x0a, 0x07, 0x76, 0x65, 0x72,
+ 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x07, 0x76, 0x65, 0x72, 0x73,
+ 0x69, 0x6f, 0x6e, 0x12, 0x20, 0x0a, 0x0b, 0x72, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69,
+ 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x72, 0x65, 0x70, 0x6c, 0x69, 0x63,
+ 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0xc8, 0x01, 0x0a, 0x20, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65,
+ 0x54, 0x69, 0x65, 0x72, 0x4d, 0x6f, 0x76, 0x65, 0x44, 0x61, 0x74, 0x54, 0x6f, 0x52, 0x65, 0x6d,
+ 0x6f, 0x74, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1b, 0x0a, 0x09, 0x76, 0x6f,
+ 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x08, 0x76,
+ 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x49, 0x64, 0x12, 0x1e, 0x0a, 0x0a, 0x63, 0x6f, 0x6c, 0x6c, 0x65,
+ 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x63, 0x6f, 0x6c,
+ 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x38, 0x0a, 0x18, 0x64, 0x65, 0x73, 0x74, 0x69,
+ 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x62, 0x61, 0x63, 0x6b, 0x65, 0x6e, 0x64, 0x5f, 0x6e,
+ 0x61, 0x6d, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x16, 0x64, 0x65, 0x73, 0x74, 0x69,
+ 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x42, 0x61, 0x63, 0x6b, 0x65, 0x6e, 0x64, 0x4e, 0x61, 0x6d,
+ 0x65, 0x12, 0x2d, 0x0a, 0x13, 0x6b, 0x65, 0x65, 0x70, 0x5f, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x5f,
+ 0x64, 0x61, 0x74, 0x5f, 0x66, 0x69, 0x6c, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x08, 0x52, 0x10,
+ 0x6b, 0x65, 0x65, 0x70, 0x4c, 0x6f, 0x63, 0x61, 0x6c, 0x44, 0x61, 0x74, 0x46, 0x69, 0x6c, 0x65,
+ 0x22, 0x73, 0x0a, 0x21, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x54, 0x69, 0x65, 0x72, 0x4d, 0x6f,
+ 0x76, 0x65, 0x44, 0x61, 0x74, 0x54, 0x6f, 0x52, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x52, 0x65, 0x73,
+ 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x1c, 0x0a, 0x09, 0x70, 0x72, 0x6f, 0x63, 0x65, 0x73, 0x73,
+ 0x65, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, 0x52, 0x09, 0x70, 0x72, 0x6f, 0x63, 0x65, 0x73,
+ 0x73, 0x65, 0x64, 0x12, 0x30, 0x0a, 0x13, 0x70, 0x72, 0x6f, 0x63, 0x65, 0x73, 0x73, 0x65, 0x64,
+ 0x50, 0x65, 0x72, 0x63, 0x65, 0x6e, 0x74, 0x61, 0x67, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x02,
+ 0x52, 0x13, 0x70, 0x72, 0x6f, 0x63, 0x65, 0x73, 0x73, 0x65, 0x64, 0x50, 0x65, 0x72, 0x63, 0x65,
+ 0x6e, 0x74, 0x61, 0x67, 0x65, 0x22, 0x92, 0x01, 0x0a, 0x22, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65,
+ 0x54, 0x69, 0x65, 0x72, 0x4d, 0x6f, 0x76, 0x65, 0x44, 0x61, 0x74, 0x46, 0x72, 0x6f, 0x6d, 0x52,
+ 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1b, 0x0a, 0x09,
+ 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0d, 0x52,
+ 0x08, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x49, 0x64, 0x12, 0x1e, 0x0a, 0x0a, 0x63, 0x6f, 0x6c,
+ 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x63,
+ 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x2f, 0x0a, 0x14, 0x6b, 0x65, 0x65,
+ 0x70, 0x5f, 0x72, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x5f, 0x64, 0x61, 0x74, 0x5f, 0x66, 0x69, 0x6c,
+ 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x52, 0x11, 0x6b, 0x65, 0x65, 0x70, 0x52, 0x65, 0x6d,
+ 0x6f, 0x74, 0x65, 0x44, 0x61, 0x74, 0x46, 0x69, 0x6c, 0x65, 0x22, 0x75, 0x0a, 0x23, 0x56, 0x6f,
+ 0x6c, 0x75, 0x6d, 0x65, 0x54, 0x69, 0x65, 0x72, 0x4d, 0x6f, 0x76, 0x65, 0x44, 0x61, 0x74, 0x46,
+ 0x72, 0x6f, 0x6d, 0x52, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73,
+ 0x65, 0x12, 0x1c, 0x0a, 0x09, 0x70, 0x72, 0x6f, 0x63, 0x65, 0x73, 0x73, 0x65, 0x64, 0x18, 0x01,
+ 0x20, 0x01, 0x28, 0x03, 0x52, 0x09, 0x70, 0x72, 0x6f, 0x63, 0x65, 0x73, 0x73, 0x65, 0x64, 0x12,
+ 0x30, 0x0a, 0x13, 0x70, 0x72, 0x6f, 0x63, 0x65, 0x73, 0x73, 0x65, 0x64, 0x50, 0x65, 0x72, 0x63,
+ 0x65, 0x6e, 0x74, 0x61, 0x67, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x02, 0x52, 0x13, 0x70, 0x72,
+ 0x6f, 0x63, 0x65, 0x73, 0x73, 0x65, 0x64, 0x50, 0x65, 0x72, 0x63, 0x65, 0x6e, 0x74, 0x61, 0x67,
+ 0x65, 0x22, 0x1b, 0x0a, 0x19, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x53, 0x65, 0x72, 0x76, 0x65,
+ 0x72, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x22, 0xa1,
+ 0x01, 0x0a, 0x1a, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x53,
+ 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x41, 0x0a,
+ 0x0d, 0x64, 0x69, 0x73, 0x6b, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x65, 0x73, 0x18, 0x01,
+ 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x73, 0x65,
+ 0x72, 0x76, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x44, 0x69, 0x73, 0x6b, 0x53, 0x74, 0x61, 0x74,
+ 0x75, 0x73, 0x52, 0x0c, 0x64, 0x69, 0x73, 0x6b, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x65, 0x73,
+ 0x12, 0x40, 0x0a, 0x0d, 0x6d, 0x65, 0x6d, 0x6f, 0x72, 0x79, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x75,
+ 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65,
+ 0x5f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x4d, 0x65, 0x6d, 0x53, 0x74,
+ 0x61, 0x74, 0x75, 0x73, 0x52, 0x0c, 0x6d, 0x65, 0x6d, 0x6f, 0x72, 0x79, 0x53, 0x74, 0x61, 0x74,
+ 0x75, 0x73, 0x22, 0xf8, 0x0c, 0x0a, 0x0c, 0x51, 0x75, 0x65, 0x72, 0x79, 0x52, 0x65, 0x71, 0x75,
+ 0x65, 0x73, 0x74, 0x12, 0x1e, 0x0a, 0x0a, 0x73, 0x65, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e,
+ 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0a, 0x73, 0x65, 0x6c, 0x65, 0x63, 0x74, 0x69,
+ 0x6f, 0x6e, 0x73, 0x12, 0x22, 0x0a, 0x0d, 0x66, 0x72, 0x6f, 0x6d, 0x5f, 0x66, 0x69, 0x6c, 0x65,
+ 0x5f, 0x69, 0x64, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0b, 0x66, 0x72, 0x6f, 0x6d,
+ 0x46, 0x69, 0x6c, 0x65, 0x49, 0x64, 0x73, 0x12, 0x3d, 0x0a, 0x06, 0x66, 0x69, 0x6c, 0x74, 0x65,
+ 0x72, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65,
+ 0x5f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x51, 0x75, 0x65, 0x72, 0x79,
+ 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x2e, 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x52, 0x06,
+ 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x12, 0x62, 0x0a, 0x13, 0x69, 0x6e, 0x70, 0x75, 0x74, 0x5f,
+ 0x73, 0x65, 0x72, 0x69, 0x61, 0x6c, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x04, 0x20,
+ 0x01, 0x28, 0x0b, 0x32, 0x31, 0x2e, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x73, 0x65, 0x72,
+ 0x76, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x51, 0x75, 0x65, 0x72, 0x79, 0x52, 0x65, 0x71, 0x75,
+ 0x65, 0x73, 0x74, 0x2e, 0x49, 0x6e, 0x70, 0x75, 0x74, 0x53, 0x65, 0x72, 0x69, 0x61, 0x6c, 0x69,
+ 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x12, 0x69, 0x6e, 0x70, 0x75, 0x74, 0x53, 0x65, 0x72,
+ 0x69, 0x61, 0x6c, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x65, 0x0a, 0x14, 0x6f, 0x75,
+ 0x74, 0x70, 0x75, 0x74, 0x5f, 0x73, 0x65, 0x72, 0x69, 0x61, 0x6c, 0x69, 0x7a, 0x61, 0x74, 0x69,
+ 0x6f, 0x6e, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x32, 0x2e, 0x76, 0x6f, 0x6c, 0x75, 0x6d,
+ 0x65, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x51, 0x75, 0x65, 0x72,
+ 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x2e, 0x4f, 0x75, 0x74, 0x70, 0x75, 0x74, 0x53,
+ 0x65, 0x72, 0x69, 0x61, 0x6c, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x13, 0x6f, 0x75,
+ 0x74, 0x70, 0x75, 0x74, 0x53, 0x65, 0x72, 0x69, 0x61, 0x6c, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f,
+ 0x6e, 0x1a, 0x4e, 0x0a, 0x06, 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x12, 0x14, 0x0a, 0x05, 0x66,
+ 0x69, 0x65, 0x6c, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x66, 0x69, 0x65, 0x6c,
+ 0x64, 0x12, 0x18, 0x0a, 0x07, 0x6f, 0x70, 0x65, 0x72, 0x61, 0x6e, 0x64, 0x18, 0x02, 0x20, 0x01,
+ 0x28, 0x09, 0x52, 0x07, 0x6f, 0x70, 0x65, 0x72, 0x61, 0x6e, 0x64, 0x12, 0x14, 0x0a, 0x05, 0x76,
+ 0x61, 0x6c, 0x75, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75,
+ 0x65, 0x1a, 0xd5, 0x05, 0x0a, 0x12, 0x49, 0x6e, 0x70, 0x75, 0x74, 0x53, 0x65, 0x72, 0x69, 0x61,
+ 0x6c, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x29, 0x0a, 0x10, 0x63, 0x6f, 0x6d, 0x70,
+ 0x72, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01,
+ 0x28, 0x09, 0x52, 0x0f, 0x63, 0x6f, 0x6d, 0x70, 0x72, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x54,
+ 0x79, 0x70, 0x65, 0x12, 0x57, 0x0a, 0x09, 0x63, 0x73, 0x76, 0x5f, 0x69, 0x6e, 0x70, 0x75, 0x74,
+ 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x3a, 0x2e, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f,
+ 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x51, 0x75, 0x65, 0x72, 0x79, 0x52,
+ 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x2e, 0x49, 0x6e, 0x70, 0x75, 0x74, 0x53, 0x65, 0x72, 0x69,
+ 0x61, 0x6c, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x43, 0x53, 0x56, 0x49, 0x6e, 0x70,
+ 0x75, 0x74, 0x52, 0x08, 0x63, 0x73, 0x76, 0x49, 0x6e, 0x70, 0x75, 0x74, 0x12, 0x5a, 0x0a, 0x0a,
+ 0x6a, 0x73, 0x6f, 0x6e, 0x5f, 0x69, 0x6e, 0x70, 0x75, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b,
+ 0x32, 0x3b, 0x2e, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72,
+ 0x5f, 0x70, 0x62, 0x2e, 0x51, 0x75, 0x65, 0x72, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74,
+ 0x2e, 0x49, 0x6e, 0x70, 0x75, 0x74, 0x53, 0x65, 0x72, 0x69, 0x61, 0x6c, 0x69, 0x7a, 0x61, 0x74,
+ 0x69, 0x6f, 0x6e, 0x2e, 0x4a, 0x53, 0x4f, 0x4e, 0x49, 0x6e, 0x70, 0x75, 0x74, 0x52, 0x09, 0x6a,
+ 0x73, 0x6f, 0x6e, 0x49, 0x6e, 0x70, 0x75, 0x74, 0x12, 0x63, 0x0a, 0x0d, 0x70, 0x61, 0x72, 0x71,
+ 0x75, 0x65, 0x74, 0x5f, 0x69, 0x6e, 0x70, 0x75, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32,
+ 0x3e, 0x2e, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f,
+ 0x70, 0x62, 0x2e, 0x51, 0x75, 0x65, 0x72, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x2e,
+ 0x49, 0x6e, 0x70, 0x75, 0x74, 0x53, 0x65, 0x72, 0x69, 0x61, 0x6c, 0x69, 0x7a, 0x61, 0x74, 0x69,
+ 0x6f, 0x6e, 0x2e, 0x50, 0x61, 0x72, 0x71, 0x75, 0x65, 0x74, 0x49, 0x6e, 0x70, 0x75, 0x74, 0x52,
+ 0x0c, 0x70, 0x61, 0x72, 0x71, 0x75, 0x65, 0x74, 0x49, 0x6e, 0x70, 0x75, 0x74, 0x1a, 0xc8, 0x02,
+ 0x0a, 0x08, 0x43, 0x53, 0x56, 0x49, 0x6e, 0x70, 0x75, 0x74, 0x12, 0x28, 0x0a, 0x10, 0x66, 0x69,
+ 0x6c, 0x65, 0x5f, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x5f, 0x69, 0x6e, 0x66, 0x6f, 0x18, 0x01,
+ 0x20, 0x01, 0x28, 0x09, 0x52, 0x0e, 0x66, 0x69, 0x6c, 0x65, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72,
+ 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x29, 0x0a, 0x10, 0x72, 0x65, 0x63, 0x6f, 0x72, 0x64, 0x5f, 0x64,
+ 0x65, 0x6c, 0x69, 0x6d, 0x69, 0x74, 0x65, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0f,
+ 0x72, 0x65, 0x63, 0x6f, 0x72, 0x64, 0x44, 0x65, 0x6c, 0x69, 0x6d, 0x69, 0x74, 0x65, 0x72, 0x12,
+ 0x27, 0x0a, 0x0f, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x5f, 0x64, 0x65, 0x6c, 0x69, 0x6d, 0x69, 0x74,
+ 0x65, 0x72, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0e, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x44,
+ 0x65, 0x6c, 0x69, 0x6d, 0x69, 0x74, 0x65, 0x72, 0x12, 0x29, 0x0a, 0x10, 0x71, 0x75, 0x6f, 0x74,
+ 0x65, 0x5f, 0x63, 0x68, 0x61, 0x72, 0x61, 0x63, 0x74, 0x6f, 0x65, 0x72, 0x18, 0x04, 0x20, 0x01,
+ 0x28, 0x09, 0x52, 0x0f, 0x71, 0x75, 0x6f, 0x74, 0x65, 0x43, 0x68, 0x61, 0x72, 0x61, 0x63, 0x74,
+ 0x6f, 0x65, 0x72, 0x12, 0x34, 0x0a, 0x16, 0x71, 0x75, 0x6f, 0x74, 0x65, 0x5f, 0x65, 0x73, 0x63,
+ 0x61, 0x70, 0x65, 0x5f, 0x63, 0x68, 0x61, 0x72, 0x61, 0x63, 0x74, 0x65, 0x72, 0x18, 0x05, 0x20,
+ 0x01, 0x28, 0x09, 0x52, 0x14, 0x71, 0x75, 0x6f, 0x74, 0x65, 0x45, 0x73, 0x63, 0x61, 0x70, 0x65,
+ 0x43, 0x68, 0x61, 0x72, 0x61, 0x63, 0x74, 0x65, 0x72, 0x12, 0x1a, 0x0a, 0x08, 0x63, 0x6f, 0x6d,
+ 0x6d, 0x65, 0x6e, 0x74, 0x73, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x63, 0x6f, 0x6d,
+ 0x6d, 0x65, 0x6e, 0x74, 0x73, 0x12, 0x41, 0x0a, 0x1d, 0x61, 0x6c, 0x6c, 0x6f, 0x77, 0x5f, 0x71,
+ 0x75, 0x6f, 0x74, 0x65, 0x64, 0x5f, 0x72, 0x65, 0x63, 0x6f, 0x72, 0x64, 0x5f, 0x64, 0x65, 0x6c,
+ 0x69, 0x6d, 0x69, 0x74, 0x65, 0x72, 0x18, 0x07, 0x20, 0x01, 0x28, 0x08, 0x52, 0x1a, 0x61, 0x6c,
+ 0x6c, 0x6f, 0x77, 0x51, 0x75, 0x6f, 0x74, 0x65, 0x64, 0x52, 0x65, 0x63, 0x6f, 0x72, 0x64, 0x44,
+ 0x65, 0x6c, 0x69, 0x6d, 0x69, 0x74, 0x65, 0x72, 0x1a, 0x1f, 0x0a, 0x09, 0x4a, 0x53, 0x4f, 0x4e,
+ 0x49, 0x6e, 0x70, 0x75, 0x74, 0x12, 0x12, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20,
+ 0x01, 0x28, 0x09, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, 0x1a, 0x0e, 0x0a, 0x0c, 0x50, 0x61, 0x72,
+ 0x71, 0x75, 0x65, 0x74, 0x49, 0x6e, 0x70, 0x75, 0x74, 0x1a, 0xf1, 0x03, 0x0a, 0x13, 0x4f, 0x75,
+ 0x74, 0x70, 0x75, 0x74, 0x53, 0x65, 0x72, 0x69, 0x61, 0x6c, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f,
+ 0x6e, 0x12, 0x5b, 0x0a, 0x0a, 0x63, 0x73, 0x76, 0x5f, 0x6f, 0x75, 0x74, 0x70, 0x75, 0x74, 0x18,
+ 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x3c, 0x2e, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x73,
+ 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x51, 0x75, 0x65, 0x72, 0x79, 0x52, 0x65,
+ 0x71, 0x75, 0x65, 0x73, 0x74, 0x2e, 0x4f, 0x75, 0x74, 0x70, 0x75, 0x74, 0x53, 0x65, 0x72, 0x69,
+ 0x61, 0x6c, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x43, 0x53, 0x56, 0x4f, 0x75, 0x74,
+ 0x70, 0x75, 0x74, 0x52, 0x09, 0x63, 0x73, 0x76, 0x4f, 0x75, 0x74, 0x70, 0x75, 0x74, 0x12, 0x5e,
+ 0x0a, 0x0b, 0x6a, 0x73, 0x6f, 0x6e, 0x5f, 0x6f, 0x75, 0x74, 0x70, 0x75, 0x74, 0x18, 0x03, 0x20,
+ 0x01, 0x28, 0x0b, 0x32, 0x3d, 0x2e, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x73, 0x65, 0x72,
+ 0x76, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x51, 0x75, 0x65, 0x72, 0x79, 0x52, 0x65, 0x71, 0x75,
+ 0x65, 0x73, 0x74, 0x2e, 0x4f, 0x75, 0x74, 0x70, 0x75, 0x74, 0x53, 0x65, 0x72, 0x69, 0x61, 0x6c,
+ 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x4a, 0x53, 0x4f, 0x4e, 0x4f, 0x75, 0x74, 0x70,
+ 0x75, 0x74, 0x52, 0x0a, 0x6a, 0x73, 0x6f, 0x6e, 0x4f, 0x75, 0x74, 0x70, 0x75, 0x74, 0x1a, 0xe3,
+ 0x01, 0x0a, 0x09, 0x43, 0x53, 0x56, 0x4f, 0x75, 0x74, 0x70, 0x75, 0x74, 0x12, 0x21, 0x0a, 0x0c,
+ 0x71, 0x75, 0x6f, 0x74, 0x65, 0x5f, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x73, 0x18, 0x01, 0x20, 0x01,
+ 0x28, 0x09, 0x52, 0x0b, 0x71, 0x75, 0x6f, 0x74, 0x65, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x73, 0x12,
+ 0x29, 0x0a, 0x10, 0x72, 0x65, 0x63, 0x6f, 0x72, 0x64, 0x5f, 0x64, 0x65, 0x6c, 0x69, 0x6d, 0x69,
+ 0x74, 0x65, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0f, 0x72, 0x65, 0x63, 0x6f, 0x72,
+ 0x64, 0x44, 0x65, 0x6c, 0x69, 0x6d, 0x69, 0x74, 0x65, 0x72, 0x12, 0x27, 0x0a, 0x0f, 0x66, 0x69,
+ 0x65, 0x6c, 0x64, 0x5f, 0x64, 0x65, 0x6c, 0x69, 0x6d, 0x69, 0x74, 0x65, 0x72, 0x18, 0x03, 0x20,
+ 0x01, 0x28, 0x09, 0x52, 0x0e, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x44, 0x65, 0x6c, 0x69, 0x6d, 0x69,
+ 0x74, 0x65, 0x72, 0x12, 0x29, 0x0a, 0x10, 0x71, 0x75, 0x6f, 0x74, 0x65, 0x5f, 0x63, 0x68, 0x61,
+ 0x72, 0x61, 0x63, 0x74, 0x6f, 0x65, 0x72, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0f, 0x71,
+ 0x75, 0x6f, 0x74, 0x65, 0x43, 0x68, 0x61, 0x72, 0x61, 0x63, 0x74, 0x6f, 0x65, 0x72, 0x12, 0x34,
+ 0x0a, 0x16, 0x71, 0x75, 0x6f, 0x74, 0x65, 0x5f, 0x65, 0x73, 0x63, 0x61, 0x70, 0x65, 0x5f, 0x63,
+ 0x68, 0x61, 0x72, 0x61, 0x63, 0x74, 0x65, 0x72, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x14,
+ 0x71, 0x75, 0x6f, 0x74, 0x65, 0x45, 0x73, 0x63, 0x61, 0x70, 0x65, 0x43, 0x68, 0x61, 0x72, 0x61,
+ 0x63, 0x74, 0x65, 0x72, 0x1a, 0x37, 0x0a, 0x0a, 0x4a, 0x53, 0x4f, 0x4e, 0x4f, 0x75, 0x74, 0x70,
+ 0x75, 0x74, 0x12, 0x29, 0x0a, 0x10, 0x72, 0x65, 0x63, 0x6f, 0x72, 0x64, 0x5f, 0x64, 0x65, 0x6c,
+ 0x69, 0x6d, 0x69, 0x74, 0x65, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0f, 0x72, 0x65,
+ 0x63, 0x6f, 0x72, 0x64, 0x44, 0x65, 0x6c, 0x69, 0x6d, 0x69, 0x74, 0x65, 0x72, 0x22, 0x29, 0x0a,
+ 0x0d, 0x51, 0x75, 0x65, 0x72, 0x69, 0x65, 0x64, 0x53, 0x74, 0x72, 0x69, 0x70, 0x65, 0x12, 0x18,
+ 0x0a, 0x07, 0x72, 0x65, 0x63, 0x6f, 0x72, 0x64, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52,
+ 0x07, 0x72, 0x65, 0x63, 0x6f, 0x72, 0x64, 0x73, 0x32, 0xa1, 0x1c, 0x0a, 0x0c, 0x56, 0x6f, 0x6c,
+ 0x75, 0x6d, 0x65, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x12, 0x5c, 0x0a, 0x0b, 0x42, 0x61, 0x74,
+ 0x63, 0x68, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x12, 0x24, 0x2e, 0x76, 0x6f, 0x6c, 0x75, 0x6d,
+ 0x65, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x42, 0x61, 0x74, 0x63,
+ 0x68, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x25,
+ 0x2e, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x70,
+ 0x62, 0x2e, 0x42, 0x61, 0x74, 0x63, 0x68, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x52, 0x65, 0x73,
+ 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x6e, 0x0a, 0x11, 0x56, 0x61, 0x63, 0x75, 0x75,
+ 0x6d, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x12, 0x2a, 0x2e, 0x76,
+ 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e,
+ 0x56, 0x61, 0x63, 0x75, 0x75, 0x6d, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x43, 0x68, 0x65, 0x63,
+ 0x6b, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2b, 0x2e, 0x76, 0x6f, 0x6c, 0x75, 0x6d,
+ 0x65, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x56, 0x61, 0x63, 0x75,
+ 0x75, 0x6d, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x52, 0x65, 0x73,
+ 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x74, 0x0a, 0x13, 0x56, 0x61, 0x63, 0x75, 0x75,
+ 0x6d, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x43, 0x6f, 0x6d, 0x70, 0x61, 0x63, 0x74, 0x12, 0x2c,
+ 0x2e, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x70,
+ 0x62, 0x2e, 0x56, 0x61, 0x63, 0x75, 0x75, 0x6d, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x43, 0x6f,
+ 0x6d, 0x70, 0x61, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2d, 0x2e, 0x76,
+ 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e,
+ 0x56, 0x61, 0x63, 0x75, 0x75, 0x6d, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x43, 0x6f, 0x6d, 0x70,
+ 0x61, 0x63, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x71, 0x0a,
+ 0x12, 0x56, 0x61, 0x63, 0x75, 0x75, 0x6d, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x43, 0x6f, 0x6d,
+ 0x6d, 0x69, 0x74, 0x12, 0x2b, 0x2e, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x73, 0x65, 0x72,
+ 0x76, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x56, 0x61, 0x63, 0x75, 0x75, 0x6d, 0x56, 0x6f, 0x6c,
+ 0x75, 0x6d, 0x65, 0x43, 0x6f, 0x6d, 0x6d, 0x69, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74,
+ 0x1a, 0x2c, 0x2e, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72,
+ 0x5f, 0x70, 0x62, 0x2e, 0x56, 0x61, 0x63, 0x75, 0x75, 0x6d, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65,
+ 0x43, 0x6f, 0x6d, 0x6d, 0x69, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00,
+ 0x12, 0x74, 0x0a, 0x13, 0x56, 0x61, 0x63, 0x75, 0x75, 0x6d, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65,
+ 0x43, 0x6c, 0x65, 0x61, 0x6e, 0x75, 0x70, 0x12, 0x2c, 0x2e, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65,
+ 0x5f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x56, 0x61, 0x63, 0x75, 0x75,
+ 0x6d, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x43, 0x6c, 0x65, 0x61, 0x6e, 0x75, 0x70, 0x52, 0x65,
+ 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2d, 0x2e, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x73,
+ 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x56, 0x61, 0x63, 0x75, 0x75, 0x6d, 0x56,
+ 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x43, 0x6c, 0x65, 0x61, 0x6e, 0x75, 0x70, 0x52, 0x65, 0x73, 0x70,
+ 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x6b, 0x0a, 0x10, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65,
+ 0x43, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x29, 0x2e, 0x76, 0x6f, 0x6c,
+ 0x75, 0x6d, 0x65, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x44, 0x65,
+ 0x6c, 0x65, 0x74, 0x65, 0x43, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65,
+ 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2a, 0x2e, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x73,
+ 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x43,
+ 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73,
+ 0x65, 0x22, 0x00, 0x12, 0x65, 0x0a, 0x0e, 0x41, 0x6c, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x65, 0x56,
+ 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x12, 0x27, 0x2e, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x73,
+ 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x41, 0x6c, 0x6c, 0x6f, 0x63, 0x61, 0x74,
+ 0x65, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x28,
+ 0x2e, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x70,
+ 0x62, 0x2e, 0x41, 0x6c, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x65, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65,
+ 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x6b, 0x0a, 0x10, 0x56, 0x6f,
+ 0x6c, 0x75, 0x6d, 0x65, 0x53, 0x79, 0x6e, 0x63, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x29,
+ 0x2e, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x70,
+ 0x62, 0x2e, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x53, 0x79, 0x6e, 0x63, 0x53, 0x74, 0x61, 0x74,
+ 0x75, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2a, 0x2e, 0x76, 0x6f, 0x6c, 0x75,
+ 0x6d, 0x65, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x56, 0x6f, 0x6c,
+ 0x75, 0x6d, 0x65, 0x53, 0x79, 0x6e, 0x63, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x73,
+ 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x7c, 0x0a, 0x15, 0x56, 0x6f, 0x6c, 0x75, 0x6d,
+ 0x65, 0x49, 0x6e, 0x63, 0x72, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x61, 0x6c, 0x43, 0x6f, 0x70, 0x79,
+ 0x12, 0x2e, 0x2e, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72,
+ 0x5f, 0x70, 0x62, 0x2e, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x49, 0x6e, 0x63, 0x72, 0x65, 0x6d,
+ 0x65, 0x6e, 0x74, 0x61, 0x6c, 0x43, 0x6f, 0x70, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74,
+ 0x1a, 0x2f, 0x2e, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72,
+ 0x5f, 0x70, 0x62, 0x2e, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x49, 0x6e, 0x63, 0x72, 0x65, 0x6d,
+ 0x65, 0x6e, 0x74, 0x61, 0x6c, 0x43, 0x6f, 0x70, 0x79, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73,
+ 0x65, 0x22, 0x00, 0x30, 0x01, 0x12, 0x5c, 0x0a, 0x0b, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x4d,
+ 0x6f, 0x75, 0x6e, 0x74, 0x12, 0x24, 0x2e, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x73, 0x65,
+ 0x72, 0x76, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x4d, 0x6f,
+ 0x75, 0x6e, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x25, 0x2e, 0x76, 0x6f, 0x6c,
+ 0x75, 0x6d, 0x65, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x56, 0x6f,
+ 0x6c, 0x75, 0x6d, 0x65, 0x4d, 0x6f, 0x75, 0x6e, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73,
+ 0x65, 0x22, 0x00, 0x12, 0x62, 0x0a, 0x0d, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x55, 0x6e, 0x6d,
+ 0x6f, 0x75, 0x6e, 0x74, 0x12, 0x26, 0x2e, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x73, 0x65,
+ 0x72, 0x76, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x55, 0x6e,
+ 0x6d, 0x6f, 0x75, 0x6e, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x27, 0x2e, 0x76,
+ 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e,
+ 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x55, 0x6e, 0x6d, 0x6f, 0x75, 0x6e, 0x74, 0x52, 0x65, 0x73,
+ 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x5f, 0x0a, 0x0c, 0x56, 0x6f, 0x6c, 0x75, 0x6d,
+ 0x65, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x12, 0x25, 0x2e, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65,
+ 0x5f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x56, 0x6f, 0x6c, 0x75, 0x6d,
+ 0x65, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x26,
+ 0x2e, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x70,
+ 0x62, 0x2e, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x52, 0x65,
+ 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x71, 0x0a, 0x12, 0x56, 0x6f, 0x6c, 0x75,
+ 0x6d, 0x65, 0x4d, 0x61, 0x72, 0x6b, 0x52, 0x65, 0x61, 0x64, 0x6f, 0x6e, 0x6c, 0x79, 0x12, 0x2b,
+ 0x2e, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x70,
+ 0x62, 0x2e, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x4d, 0x61, 0x72, 0x6b, 0x52, 0x65, 0x61, 0x64,
+ 0x6f, 0x6e, 0x6c, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2c, 0x2e, 0x76, 0x6f,
+ 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x56,
+ 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x4d, 0x61, 0x72, 0x6b, 0x52, 0x65, 0x61, 0x64, 0x6f, 0x6e, 0x6c,
+ 0x79, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x68, 0x0a, 0x0f, 0x56,
+ 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x65, 0x12, 0x28,
+ 0x2e, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x70,
+ 0x62, 0x2e, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72,
+ 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x29, 0x2e, 0x76, 0x6f, 0x6c, 0x75, 0x6d,
+ 0x65, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x56, 0x6f, 0x6c, 0x75,
+ 0x6d, 0x65, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f,
+ 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x59, 0x0a, 0x0a, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x43,
+ 0x6f, 0x70, 0x79, 0x12, 0x23, 0x2e, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x73, 0x65, 0x72,
+ 0x76, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x43, 0x6f, 0x70,
+ 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x24, 0x2e, 0x76, 0x6f, 0x6c, 0x75, 0x6d,
+ 0x65, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x56, 0x6f, 0x6c, 0x75,
+ 0x6d, 0x65, 0x43, 0x6f, 0x70, 0x79, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00,
+ 0x12, 0x77, 0x0a, 0x14, 0x52, 0x65, 0x61, 0x64, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x46, 0x69,
+ 0x6c, 0x65, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x2d, 0x2e, 0x76, 0x6f, 0x6c, 0x75, 0x6d,
+ 0x65, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x52, 0x65, 0x61, 0x64,
+ 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x46, 0x69, 0x6c, 0x65, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73,
+ 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2e, 0x2e, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65,
+ 0x5f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x52, 0x65, 0x61, 0x64, 0x56,
+ 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x46, 0x69, 0x6c, 0x65, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52,
+ 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x55, 0x0a, 0x08, 0x43, 0x6f, 0x70,
+ 0x79, 0x46, 0x69, 0x6c, 0x65, 0x12, 0x21, 0x2e, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x73,
+ 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x43, 0x6f, 0x70, 0x79, 0x46, 0x69, 0x6c,
+ 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x22, 0x2e, 0x76, 0x6f, 0x6c, 0x75, 0x6d,
+ 0x65, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x43, 0x6f, 0x70, 0x79,
+ 0x46, 0x69, 0x6c, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x30, 0x01,
+ 0x12, 0x6d, 0x0a, 0x10, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x54, 0x61, 0x69, 0x6c, 0x53, 0x65,
+ 0x6e, 0x64, 0x65, 0x72, 0x12, 0x29, 0x2e, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x73, 0x65,
+ 0x72, 0x76, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x54, 0x61,
+ 0x69, 0x6c, 0x53, 0x65, 0x6e, 0x64, 0x65, 0x72, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a,
+ 0x2a, 0x2e, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f,
+ 0x70, 0x62, 0x2e, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x54, 0x61, 0x69, 0x6c, 0x53, 0x65, 0x6e,
+ 0x64, 0x65, 0x72, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x30, 0x01, 0x12,
+ 0x71, 0x0a, 0x12, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x54, 0x61, 0x69, 0x6c, 0x52, 0x65, 0x63,
+ 0x65, 0x69, 0x76, 0x65, 0x72, 0x12, 0x2b, 0x2e, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x73,
+ 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x54,
+ 0x61, 0x69, 0x6c, 0x52, 0x65, 0x63, 0x65, 0x69, 0x76, 0x65, 0x72, 0x52, 0x65, 0x71, 0x75, 0x65,
+ 0x73, 0x74, 0x1a, 0x2c, 0x2e, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x73, 0x65, 0x72, 0x76,
+ 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x54, 0x61, 0x69, 0x6c,
+ 0x52, 0x65, 0x63, 0x65, 0x69, 0x76, 0x65, 0x72, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65,
+ 0x22, 0x00, 0x12, 0x7d, 0x0a, 0x16, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x45, 0x63, 0x53, 0x68,
+ 0x61, 0x72, 0x64, 0x73, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x65, 0x12, 0x2f, 0x2e, 0x76,
+ 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e,
+ 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x45, 0x63, 0x53, 0x68, 0x61, 0x72, 0x64, 0x73, 0x47, 0x65,
+ 0x6e, 0x65, 0x72, 0x61, 0x74, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x30, 0x2e,
+ 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x70, 0x62,
+ 0x2e, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x45, 0x63, 0x53, 0x68, 0x61, 0x72, 0x64, 0x73, 0x47,
+ 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22,
+ 0x00, 0x12, 0x7a, 0x0a, 0x15, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x45, 0x63, 0x53, 0x68, 0x61,
+ 0x72, 0x64, 0x73, 0x52, 0x65, 0x62, 0x75, 0x69, 0x6c, 0x64, 0x12, 0x2e, 0x2e, 0x76, 0x6f, 0x6c,
+ 0x75, 0x6d, 0x65, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x56, 0x6f,
+ 0x6c, 0x75, 0x6d, 0x65, 0x45, 0x63, 0x53, 0x68, 0x61, 0x72, 0x64, 0x73, 0x52, 0x65, 0x62, 0x75,
+ 0x69, 0x6c, 0x64, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2f, 0x2e, 0x76, 0x6f, 0x6c,
+ 0x75, 0x6d, 0x65, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x56, 0x6f,
+ 0x6c, 0x75, 0x6d, 0x65, 0x45, 0x63, 0x53, 0x68, 0x61, 0x72, 0x64, 0x73, 0x52, 0x65, 0x62, 0x75,
+ 0x69, 0x6c, 0x64, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x71, 0x0a,
+ 0x12, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x45, 0x63, 0x53, 0x68, 0x61, 0x72, 0x64, 0x73, 0x43,
+ 0x6f, 0x70, 0x79, 0x12, 0x2b, 0x2e, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x73, 0x65, 0x72,
+ 0x76, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x45, 0x63, 0x53,
+ 0x68, 0x61, 0x72, 0x64, 0x73, 0x43, 0x6f, 0x70, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74,
+ 0x1a, 0x2c, 0x2e, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72,
+ 0x5f, 0x70, 0x62, 0x2e, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x45, 0x63, 0x53, 0x68, 0x61, 0x72,
+ 0x64, 0x73, 0x43, 0x6f, 0x70, 0x79, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00,
+ 0x12, 0x77, 0x0a, 0x14, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x45, 0x63, 0x53, 0x68, 0x61, 0x72,
+ 0x64, 0x73, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x12, 0x2d, 0x2e, 0x76, 0x6f, 0x6c, 0x75, 0x6d,
+ 0x65, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x56, 0x6f, 0x6c, 0x75,
+ 0x6d, 0x65, 0x45, 0x63, 0x53, 0x68, 0x61, 0x72, 0x64, 0x73, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65,
+ 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2e, 0x2e, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65,
+ 0x5f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x56, 0x6f, 0x6c, 0x75, 0x6d,
+ 0x65, 0x45, 0x63, 0x53, 0x68, 0x61, 0x72, 0x64, 0x73, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x52,
+ 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x74, 0x0a, 0x13, 0x56, 0x6f, 0x6c,
+ 0x75, 0x6d, 0x65, 0x45, 0x63, 0x53, 0x68, 0x61, 0x72, 0x64, 0x73, 0x4d, 0x6f, 0x75, 0x6e, 0x74,
+ 0x12, 0x2c, 0x2e, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72,
+ 0x5f, 0x70, 0x62, 0x2e, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x45, 0x63, 0x53, 0x68, 0x61, 0x72,
+ 0x64, 0x73, 0x4d, 0x6f, 0x75, 0x6e, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2d,
+ 0x2e, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x70,
+ 0x62, 0x2e, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x45, 0x63, 0x53, 0x68, 0x61, 0x72, 0x64, 0x73,
+ 0x4d, 0x6f, 0x75, 0x6e, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12,
+ 0x7a, 0x0a, 0x15, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x45, 0x63, 0x53, 0x68, 0x61, 0x72, 0x64,
+ 0x73, 0x55, 0x6e, 0x6d, 0x6f, 0x75, 0x6e, 0x74, 0x12, 0x2e, 0x2e, 0x76, 0x6f, 0x6c, 0x75, 0x6d,
+ 0x65, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x56, 0x6f, 0x6c, 0x75,
+ 0x6d, 0x65, 0x45, 0x63, 0x53, 0x68, 0x61, 0x72, 0x64, 0x73, 0x55, 0x6e, 0x6d, 0x6f, 0x75, 0x6e,
+ 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2f, 0x2e, 0x76, 0x6f, 0x6c, 0x75, 0x6d,
+ 0x65, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x56, 0x6f, 0x6c, 0x75,
+ 0x6d, 0x65, 0x45, 0x63, 0x53, 0x68, 0x61, 0x72, 0x64, 0x73, 0x55, 0x6e, 0x6d, 0x6f, 0x75, 0x6e,
+ 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x70, 0x0a, 0x11, 0x56,
+ 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x45, 0x63, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x65, 0x61, 0x64,
+ 0x12, 0x2a, 0x2e, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72,
+ 0x5f, 0x70, 0x62, 0x2e, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x45, 0x63, 0x53, 0x68, 0x61, 0x72,
+ 0x64, 0x52, 0x65, 0x61, 0x64, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2b, 0x2e, 0x76,
+ 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e,
+ 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x45, 0x63, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x65, 0x61,
+ 0x64, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x30, 0x01, 0x12, 0x71, 0x0a,
+ 0x12, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x45, 0x63, 0x42, 0x6c, 0x6f, 0x62, 0x44, 0x65, 0x6c,
+ 0x65, 0x74, 0x65, 0x12, 0x2b, 0x2e, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x73, 0x65, 0x72,
+ 0x76, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x45, 0x63, 0x42,
+ 0x6c, 0x6f, 0x62, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74,
+ 0x1a, 0x2c, 0x2e, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72,
+ 0x5f, 0x70, 0x62, 0x2e, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x45, 0x63, 0x42, 0x6c, 0x6f, 0x62,
+ 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00,
+ 0x12, 0x7d, 0x0a, 0x16, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x45, 0x63, 0x53, 0x68, 0x61, 0x72,
+ 0x64, 0x73, 0x54, 0x6f, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x12, 0x2f, 0x2e, 0x76, 0x6f, 0x6c,
+ 0x75, 0x6d, 0x65, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x56, 0x6f,
+ 0x6c, 0x75, 0x6d, 0x65, 0x45, 0x63, 0x53, 0x68, 0x61, 0x72, 0x64, 0x73, 0x54, 0x6f, 0x56, 0x6f,
+ 0x6c, 0x75, 0x6d, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x30, 0x2e, 0x76, 0x6f,
+ 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x56,
+ 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x45, 0x63, 0x53, 0x68, 0x61, 0x72, 0x64, 0x73, 0x54, 0x6f, 0x56,
+ 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12,
+ 0x88, 0x01, 0x0a, 0x19, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x54, 0x69, 0x65, 0x72, 0x4d, 0x6f,
+ 0x76, 0x65, 0x44, 0x61, 0x74, 0x54, 0x6f, 0x52, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x12, 0x32, 0x2e,
+ 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x70, 0x62,
+ 0x2e, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x54, 0x69, 0x65, 0x72, 0x4d, 0x6f, 0x76, 0x65, 0x44,
+ 0x61, 0x74, 0x54, 0x6f, 0x52, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73,
+ 0x74, 0x1a, 0x33, 0x2e, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x65,
+ 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x54, 0x69, 0x65, 0x72, 0x4d,
+ 0x6f, 0x76, 0x65, 0x44, 0x61, 0x74, 0x54, 0x6f, 0x52, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x52, 0x65,
+ 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x30, 0x01, 0x12, 0x8e, 0x01, 0x0a, 0x1b, 0x56,
+ 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x54, 0x69, 0x65, 0x72, 0x4d, 0x6f, 0x76, 0x65, 0x44, 0x61, 0x74,
+ 0x46, 0x72, 0x6f, 0x6d, 0x52, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x12, 0x34, 0x2e, 0x76, 0x6f, 0x6c,
+ 0x75, 0x6d, 0x65, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x56, 0x6f,
+ 0x6c, 0x75, 0x6d, 0x65, 0x54, 0x69, 0x65, 0x72, 0x4d, 0x6f, 0x76, 0x65, 0x44, 0x61, 0x74, 0x46,
+ 0x72, 0x6f, 0x6d, 0x52, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74,
+ 0x1a, 0x35, 0x2e, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72,
+ 0x5f, 0x70, 0x62, 0x2e, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x54, 0x69, 0x65, 0x72, 0x4d, 0x6f,
+ 0x76, 0x65, 0x44, 0x61, 0x74, 0x46, 0x72, 0x6f, 0x6d, 0x52, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x52,
+ 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x30, 0x01, 0x12, 0x71, 0x0a, 0x12, 0x56,
+ 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x53, 0x74, 0x61, 0x74, 0x75,
+ 0x73, 0x12, 0x2b, 0x2e, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x65,
+ 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x53, 0x65, 0x72, 0x76, 0x65,
+ 0x72, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2c,
+ 0x2e, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x70,
+ 0x62, 0x2e, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x53, 0x74,
+ 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x4c,
+ 0x0a, 0x05, 0x51, 0x75, 0x65, 0x72, 0x79, 0x12, 0x1e, 0x2e, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65,
+ 0x5f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x51, 0x75, 0x65, 0x72, 0x79,
+ 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1f, 0x2e, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65,
+ 0x5f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x51, 0x75, 0x65, 0x72, 0x69,
+ 0x65, 0x64, 0x53, 0x74, 0x72, 0x69, 0x70, 0x65, 0x22, 0x00, 0x30, 0x01, 0x42, 0x39, 0x5a, 0x37,
+ 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x63, 0x68, 0x72, 0x69, 0x73,
+ 0x6c, 0x75, 0x73, 0x66, 0x2f, 0x73, 0x65, 0x61, 0x77, 0x65, 0x65, 0x64, 0x66, 0x73, 0x2f, 0x77,
+ 0x65, 0x65, 0x64, 0x2f, 0x70, 0x62, 0x2f, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x73, 0x65,
+ 0x72, 0x76, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
+}
+
+var (
+ file_volume_server_proto_rawDescOnce sync.Once
+ file_volume_server_proto_rawDescData = file_volume_server_proto_rawDesc
+)
+
+func file_volume_server_proto_rawDescGZIP() []byte {
+ file_volume_server_proto_rawDescOnce.Do(func() {
+ file_volume_server_proto_rawDescData = protoimpl.X.CompressGZIP(file_volume_server_proto_rawDescData)
+ })
+ return file_volume_server_proto_rawDescData
+}
+
+var file_volume_server_proto_msgTypes = make([]protoimpl.MessageInfo, 78)
+var file_volume_server_proto_goTypes = []interface{}{
+ (*BatchDeleteRequest)(nil), // 0: volume_server_pb.BatchDeleteRequest
+ (*BatchDeleteResponse)(nil), // 1: volume_server_pb.BatchDeleteResponse
+ (*DeleteResult)(nil), // 2: volume_server_pb.DeleteResult
+ (*Empty)(nil), // 3: volume_server_pb.Empty
+ (*VacuumVolumeCheckRequest)(nil), // 4: volume_server_pb.VacuumVolumeCheckRequest
+ (*VacuumVolumeCheckResponse)(nil), // 5: volume_server_pb.VacuumVolumeCheckResponse
+ (*VacuumVolumeCompactRequest)(nil), // 6: volume_server_pb.VacuumVolumeCompactRequest
+ (*VacuumVolumeCompactResponse)(nil), // 7: volume_server_pb.VacuumVolumeCompactResponse
+ (*VacuumVolumeCommitRequest)(nil), // 8: volume_server_pb.VacuumVolumeCommitRequest
+ (*VacuumVolumeCommitResponse)(nil), // 9: volume_server_pb.VacuumVolumeCommitResponse
+ (*VacuumVolumeCleanupRequest)(nil), // 10: volume_server_pb.VacuumVolumeCleanupRequest
+ (*VacuumVolumeCleanupResponse)(nil), // 11: volume_server_pb.VacuumVolumeCleanupResponse
+ (*DeleteCollectionRequest)(nil), // 12: volume_server_pb.DeleteCollectionRequest
+ (*DeleteCollectionResponse)(nil), // 13: volume_server_pb.DeleteCollectionResponse
+ (*AllocateVolumeRequest)(nil), // 14: volume_server_pb.AllocateVolumeRequest
+ (*AllocateVolumeResponse)(nil), // 15: volume_server_pb.AllocateVolumeResponse
+ (*VolumeSyncStatusRequest)(nil), // 16: volume_server_pb.VolumeSyncStatusRequest
+ (*VolumeSyncStatusResponse)(nil), // 17: volume_server_pb.VolumeSyncStatusResponse
+ (*VolumeIncrementalCopyRequest)(nil), // 18: volume_server_pb.VolumeIncrementalCopyRequest
+ (*VolumeIncrementalCopyResponse)(nil), // 19: volume_server_pb.VolumeIncrementalCopyResponse
+ (*VolumeMountRequest)(nil), // 20: volume_server_pb.VolumeMountRequest
+ (*VolumeMountResponse)(nil), // 21: volume_server_pb.VolumeMountResponse
+ (*VolumeUnmountRequest)(nil), // 22: volume_server_pb.VolumeUnmountRequest
+ (*VolumeUnmountResponse)(nil), // 23: volume_server_pb.VolumeUnmountResponse
+ (*VolumeDeleteRequest)(nil), // 24: volume_server_pb.VolumeDeleteRequest
+ (*VolumeDeleteResponse)(nil), // 25: volume_server_pb.VolumeDeleteResponse
+ (*VolumeMarkReadonlyRequest)(nil), // 26: volume_server_pb.VolumeMarkReadonlyRequest
+ (*VolumeMarkReadonlyResponse)(nil), // 27: volume_server_pb.VolumeMarkReadonlyResponse
+ (*VolumeConfigureRequest)(nil), // 28: volume_server_pb.VolumeConfigureRequest
+ (*VolumeConfigureResponse)(nil), // 29: volume_server_pb.VolumeConfigureResponse
+ (*VolumeCopyRequest)(nil), // 30: volume_server_pb.VolumeCopyRequest
+ (*VolumeCopyResponse)(nil), // 31: volume_server_pb.VolumeCopyResponse
+ (*CopyFileRequest)(nil), // 32: volume_server_pb.CopyFileRequest
+ (*CopyFileResponse)(nil), // 33: volume_server_pb.CopyFileResponse
+ (*VolumeTailSenderRequest)(nil), // 34: volume_server_pb.VolumeTailSenderRequest
+ (*VolumeTailSenderResponse)(nil), // 35: volume_server_pb.VolumeTailSenderResponse
+ (*VolumeTailReceiverRequest)(nil), // 36: volume_server_pb.VolumeTailReceiverRequest
+ (*VolumeTailReceiverResponse)(nil), // 37: volume_server_pb.VolumeTailReceiverResponse
+ (*VolumeEcShardsGenerateRequest)(nil), // 38: volume_server_pb.VolumeEcShardsGenerateRequest
+ (*VolumeEcShardsGenerateResponse)(nil), // 39: volume_server_pb.VolumeEcShardsGenerateResponse
+ (*VolumeEcShardsRebuildRequest)(nil), // 40: volume_server_pb.VolumeEcShardsRebuildRequest
+ (*VolumeEcShardsRebuildResponse)(nil), // 41: volume_server_pb.VolumeEcShardsRebuildResponse
+ (*VolumeEcShardsCopyRequest)(nil), // 42: volume_server_pb.VolumeEcShardsCopyRequest
+ (*VolumeEcShardsCopyResponse)(nil), // 43: volume_server_pb.VolumeEcShardsCopyResponse
+ (*VolumeEcShardsDeleteRequest)(nil), // 44: volume_server_pb.VolumeEcShardsDeleteRequest
+ (*VolumeEcShardsDeleteResponse)(nil), // 45: volume_server_pb.VolumeEcShardsDeleteResponse
+ (*VolumeEcShardsMountRequest)(nil), // 46: volume_server_pb.VolumeEcShardsMountRequest
+ (*VolumeEcShardsMountResponse)(nil), // 47: volume_server_pb.VolumeEcShardsMountResponse
+ (*VolumeEcShardsUnmountRequest)(nil), // 48: volume_server_pb.VolumeEcShardsUnmountRequest
+ (*VolumeEcShardsUnmountResponse)(nil), // 49: volume_server_pb.VolumeEcShardsUnmountResponse
+ (*VolumeEcShardReadRequest)(nil), // 50: volume_server_pb.VolumeEcShardReadRequest
+ (*VolumeEcShardReadResponse)(nil), // 51: volume_server_pb.VolumeEcShardReadResponse
+ (*VolumeEcBlobDeleteRequest)(nil), // 52: volume_server_pb.VolumeEcBlobDeleteRequest
+ (*VolumeEcBlobDeleteResponse)(nil), // 53: volume_server_pb.VolumeEcBlobDeleteResponse
+ (*VolumeEcShardsToVolumeRequest)(nil), // 54: volume_server_pb.VolumeEcShardsToVolumeRequest
+ (*VolumeEcShardsToVolumeResponse)(nil), // 55: volume_server_pb.VolumeEcShardsToVolumeResponse
+ (*ReadVolumeFileStatusRequest)(nil), // 56: volume_server_pb.ReadVolumeFileStatusRequest
+ (*ReadVolumeFileStatusResponse)(nil), // 57: volume_server_pb.ReadVolumeFileStatusResponse
+ (*DiskStatus)(nil), // 58: volume_server_pb.DiskStatus
+ (*MemStatus)(nil), // 59: volume_server_pb.MemStatus
+ (*RemoteFile)(nil), // 60: volume_server_pb.RemoteFile
+ (*VolumeInfo)(nil), // 61: volume_server_pb.VolumeInfo
+ (*VolumeTierMoveDatToRemoteRequest)(nil), // 62: volume_server_pb.VolumeTierMoveDatToRemoteRequest
+ (*VolumeTierMoveDatToRemoteResponse)(nil), // 63: volume_server_pb.VolumeTierMoveDatToRemoteResponse
+ (*VolumeTierMoveDatFromRemoteRequest)(nil), // 64: volume_server_pb.VolumeTierMoveDatFromRemoteRequest
+ (*VolumeTierMoveDatFromRemoteResponse)(nil), // 65: volume_server_pb.VolumeTierMoveDatFromRemoteResponse
+ (*VolumeServerStatusRequest)(nil), // 66: volume_server_pb.VolumeServerStatusRequest
+ (*VolumeServerStatusResponse)(nil), // 67: volume_server_pb.VolumeServerStatusResponse
+ (*QueryRequest)(nil), // 68: volume_server_pb.QueryRequest
+ (*QueriedStripe)(nil), // 69: volume_server_pb.QueriedStripe
+ (*QueryRequest_Filter)(nil), // 70: volume_server_pb.QueryRequest.Filter
+ (*QueryRequest_InputSerialization)(nil), // 71: volume_server_pb.QueryRequest.InputSerialization
+ (*QueryRequest_OutputSerialization)(nil), // 72: volume_server_pb.QueryRequest.OutputSerialization
+ (*QueryRequest_InputSerialization_CSVInput)(nil), // 73: volume_server_pb.QueryRequest.InputSerialization.CSVInput
+ (*QueryRequest_InputSerialization_JSONInput)(nil), // 74: volume_server_pb.QueryRequest.InputSerialization.JSONInput
+ (*QueryRequest_InputSerialization_ParquetInput)(nil), // 75: volume_server_pb.QueryRequest.InputSerialization.ParquetInput
+ (*QueryRequest_OutputSerialization_CSVOutput)(nil), // 76: volume_server_pb.QueryRequest.OutputSerialization.CSVOutput
+ (*QueryRequest_OutputSerialization_JSONOutput)(nil), // 77: volume_server_pb.QueryRequest.OutputSerialization.JSONOutput
+}
+var file_volume_server_proto_depIdxs = []int32{
+ 2, // 0: volume_server_pb.BatchDeleteResponse.results:type_name -> volume_server_pb.DeleteResult
+ 60, // 1: volume_server_pb.VolumeInfo.files:type_name -> volume_server_pb.RemoteFile
+ 58, // 2: volume_server_pb.VolumeServerStatusResponse.disk_statuses:type_name -> volume_server_pb.DiskStatus
+ 59, // 3: volume_server_pb.VolumeServerStatusResponse.memory_status:type_name -> volume_server_pb.MemStatus
+ 70, // 4: volume_server_pb.QueryRequest.filter:type_name -> volume_server_pb.QueryRequest.Filter
+ 71, // 5: volume_server_pb.QueryRequest.input_serialization:type_name -> volume_server_pb.QueryRequest.InputSerialization
+ 72, // 6: volume_server_pb.QueryRequest.output_serialization:type_name -> volume_server_pb.QueryRequest.OutputSerialization
+ 73, // 7: volume_server_pb.QueryRequest.InputSerialization.csv_input:type_name -> volume_server_pb.QueryRequest.InputSerialization.CSVInput
+ 74, // 8: volume_server_pb.QueryRequest.InputSerialization.json_input:type_name -> volume_server_pb.QueryRequest.InputSerialization.JSONInput
+ 75, // 9: volume_server_pb.QueryRequest.InputSerialization.parquet_input:type_name -> volume_server_pb.QueryRequest.InputSerialization.ParquetInput
+ 76, // 10: volume_server_pb.QueryRequest.OutputSerialization.csv_output:type_name -> volume_server_pb.QueryRequest.OutputSerialization.CSVOutput
+ 77, // 11: volume_server_pb.QueryRequest.OutputSerialization.json_output:type_name -> volume_server_pb.QueryRequest.OutputSerialization.JSONOutput
+ 0, // 12: volume_server_pb.VolumeServer.BatchDelete:input_type -> volume_server_pb.BatchDeleteRequest
+ 4, // 13: volume_server_pb.VolumeServer.VacuumVolumeCheck:input_type -> volume_server_pb.VacuumVolumeCheckRequest
+ 6, // 14: volume_server_pb.VolumeServer.VacuumVolumeCompact:input_type -> volume_server_pb.VacuumVolumeCompactRequest
+ 8, // 15: volume_server_pb.VolumeServer.VacuumVolumeCommit:input_type -> volume_server_pb.VacuumVolumeCommitRequest
+ 10, // 16: volume_server_pb.VolumeServer.VacuumVolumeCleanup:input_type -> volume_server_pb.VacuumVolumeCleanupRequest
+ 12, // 17: volume_server_pb.VolumeServer.DeleteCollection:input_type -> volume_server_pb.DeleteCollectionRequest
+ 14, // 18: volume_server_pb.VolumeServer.AllocateVolume:input_type -> volume_server_pb.AllocateVolumeRequest
+ 16, // 19: volume_server_pb.VolumeServer.VolumeSyncStatus:input_type -> volume_server_pb.VolumeSyncStatusRequest
+ 18, // 20: volume_server_pb.VolumeServer.VolumeIncrementalCopy:input_type -> volume_server_pb.VolumeIncrementalCopyRequest
+ 20, // 21: volume_server_pb.VolumeServer.VolumeMount:input_type -> volume_server_pb.VolumeMountRequest
+ 22, // 22: volume_server_pb.VolumeServer.VolumeUnmount:input_type -> volume_server_pb.VolumeUnmountRequest
+ 24, // 23: volume_server_pb.VolumeServer.VolumeDelete:input_type -> volume_server_pb.VolumeDeleteRequest
+ 26, // 24: volume_server_pb.VolumeServer.VolumeMarkReadonly:input_type -> volume_server_pb.VolumeMarkReadonlyRequest
+ 28, // 25: volume_server_pb.VolumeServer.VolumeConfigure:input_type -> volume_server_pb.VolumeConfigureRequest
+ 30, // 26: volume_server_pb.VolumeServer.VolumeCopy:input_type -> volume_server_pb.VolumeCopyRequest
+ 56, // 27: volume_server_pb.VolumeServer.ReadVolumeFileStatus:input_type -> volume_server_pb.ReadVolumeFileStatusRequest
+ 32, // 28: volume_server_pb.VolumeServer.CopyFile:input_type -> volume_server_pb.CopyFileRequest
+ 34, // 29: volume_server_pb.VolumeServer.VolumeTailSender:input_type -> volume_server_pb.VolumeTailSenderRequest
+ 36, // 30: volume_server_pb.VolumeServer.VolumeTailReceiver:input_type -> volume_server_pb.VolumeTailReceiverRequest
+ 38, // 31: volume_server_pb.VolumeServer.VolumeEcShardsGenerate:input_type -> volume_server_pb.VolumeEcShardsGenerateRequest
+ 40, // 32: volume_server_pb.VolumeServer.VolumeEcShardsRebuild:input_type -> volume_server_pb.VolumeEcShardsRebuildRequest
+ 42, // 33: volume_server_pb.VolumeServer.VolumeEcShardsCopy:input_type -> volume_server_pb.VolumeEcShardsCopyRequest
+ 44, // 34: volume_server_pb.VolumeServer.VolumeEcShardsDelete:input_type -> volume_server_pb.VolumeEcShardsDeleteRequest
+ 46, // 35: volume_server_pb.VolumeServer.VolumeEcShardsMount:input_type -> volume_server_pb.VolumeEcShardsMountRequest
+ 48, // 36: volume_server_pb.VolumeServer.VolumeEcShardsUnmount:input_type -> volume_server_pb.VolumeEcShardsUnmountRequest
+ 50, // 37: volume_server_pb.VolumeServer.VolumeEcShardRead:input_type -> volume_server_pb.VolumeEcShardReadRequest
+ 52, // 38: volume_server_pb.VolumeServer.VolumeEcBlobDelete:input_type -> volume_server_pb.VolumeEcBlobDeleteRequest
+ 54, // 39: volume_server_pb.VolumeServer.VolumeEcShardsToVolume:input_type -> volume_server_pb.VolumeEcShardsToVolumeRequest
+ 62, // 40: volume_server_pb.VolumeServer.VolumeTierMoveDatToRemote:input_type -> volume_server_pb.VolumeTierMoveDatToRemoteRequest
+ 64, // 41: volume_server_pb.VolumeServer.VolumeTierMoveDatFromRemote:input_type -> volume_server_pb.VolumeTierMoveDatFromRemoteRequest
+ 66, // 42: volume_server_pb.VolumeServer.VolumeServerStatus:input_type -> volume_server_pb.VolumeServerStatusRequest
+ 68, // 43: volume_server_pb.VolumeServer.Query:input_type -> volume_server_pb.QueryRequest
+ 1, // 44: volume_server_pb.VolumeServer.BatchDelete:output_type -> volume_server_pb.BatchDeleteResponse
+ 5, // 45: volume_server_pb.VolumeServer.VacuumVolumeCheck:output_type -> volume_server_pb.VacuumVolumeCheckResponse
+ 7, // 46: volume_server_pb.VolumeServer.VacuumVolumeCompact:output_type -> volume_server_pb.VacuumVolumeCompactResponse
+ 9, // 47: volume_server_pb.VolumeServer.VacuumVolumeCommit:output_type -> volume_server_pb.VacuumVolumeCommitResponse
+ 11, // 48: volume_server_pb.VolumeServer.VacuumVolumeCleanup:output_type -> volume_server_pb.VacuumVolumeCleanupResponse
+ 13, // 49: volume_server_pb.VolumeServer.DeleteCollection:output_type -> volume_server_pb.DeleteCollectionResponse
+ 15, // 50: volume_server_pb.VolumeServer.AllocateVolume:output_type -> volume_server_pb.AllocateVolumeResponse
+ 17, // 51: volume_server_pb.VolumeServer.VolumeSyncStatus:output_type -> volume_server_pb.VolumeSyncStatusResponse
+ 19, // 52: volume_server_pb.VolumeServer.VolumeIncrementalCopy:output_type -> volume_server_pb.VolumeIncrementalCopyResponse
+ 21, // 53: volume_server_pb.VolumeServer.VolumeMount:output_type -> volume_server_pb.VolumeMountResponse
+ 23, // 54: volume_server_pb.VolumeServer.VolumeUnmount:output_type -> volume_server_pb.VolumeUnmountResponse
+ 25, // 55: volume_server_pb.VolumeServer.VolumeDelete:output_type -> volume_server_pb.VolumeDeleteResponse
+ 27, // 56: volume_server_pb.VolumeServer.VolumeMarkReadonly:output_type -> volume_server_pb.VolumeMarkReadonlyResponse
+ 29, // 57: volume_server_pb.VolumeServer.VolumeConfigure:output_type -> volume_server_pb.VolumeConfigureResponse
+ 31, // 58: volume_server_pb.VolumeServer.VolumeCopy:output_type -> volume_server_pb.VolumeCopyResponse
+ 57, // 59: volume_server_pb.VolumeServer.ReadVolumeFileStatus:output_type -> volume_server_pb.ReadVolumeFileStatusResponse
+ 33, // 60: volume_server_pb.VolumeServer.CopyFile:output_type -> volume_server_pb.CopyFileResponse
+ 35, // 61: volume_server_pb.VolumeServer.VolumeTailSender:output_type -> volume_server_pb.VolumeTailSenderResponse
+ 37, // 62: volume_server_pb.VolumeServer.VolumeTailReceiver:output_type -> volume_server_pb.VolumeTailReceiverResponse
+ 39, // 63: volume_server_pb.VolumeServer.VolumeEcShardsGenerate:output_type -> volume_server_pb.VolumeEcShardsGenerateResponse
+ 41, // 64: volume_server_pb.VolumeServer.VolumeEcShardsRebuild:output_type -> volume_server_pb.VolumeEcShardsRebuildResponse
+ 43, // 65: volume_server_pb.VolumeServer.VolumeEcShardsCopy:output_type -> volume_server_pb.VolumeEcShardsCopyResponse
+ 45, // 66: volume_server_pb.VolumeServer.VolumeEcShardsDelete:output_type -> volume_server_pb.VolumeEcShardsDeleteResponse
+ 47, // 67: volume_server_pb.VolumeServer.VolumeEcShardsMount:output_type -> volume_server_pb.VolumeEcShardsMountResponse
+ 49, // 68: volume_server_pb.VolumeServer.VolumeEcShardsUnmount:output_type -> volume_server_pb.VolumeEcShardsUnmountResponse
+ 51, // 69: volume_server_pb.VolumeServer.VolumeEcShardRead:output_type -> volume_server_pb.VolumeEcShardReadResponse
+ 53, // 70: volume_server_pb.VolumeServer.VolumeEcBlobDelete:output_type -> volume_server_pb.VolumeEcBlobDeleteResponse
+ 55, // 71: volume_server_pb.VolumeServer.VolumeEcShardsToVolume:output_type -> volume_server_pb.VolumeEcShardsToVolumeResponse
+ 63, // 72: volume_server_pb.VolumeServer.VolumeTierMoveDatToRemote:output_type -> volume_server_pb.VolumeTierMoveDatToRemoteResponse
+ 65, // 73: volume_server_pb.VolumeServer.VolumeTierMoveDatFromRemote:output_type -> volume_server_pb.VolumeTierMoveDatFromRemoteResponse
+ 67, // 74: volume_server_pb.VolumeServer.VolumeServerStatus:output_type -> volume_server_pb.VolumeServerStatusResponse
+ 69, // 75: volume_server_pb.VolumeServer.Query:output_type -> volume_server_pb.QueriedStripe
+ 44, // [44:76] is the sub-list for method output_type
+ 12, // [12:44] is the sub-list for method input_type
+ 12, // [12:12] is the sub-list for extension type_name
+ 12, // [12:12] is the sub-list for extension extendee
+ 0, // [0:12] is the sub-list for field type_name
+}
+
+func init() { file_volume_server_proto_init() }
+func file_volume_server_proto_init() {
+ if File_volume_server_proto != nil {
+ return
+ }
+ if !protoimpl.UnsafeEnabled {
+ file_volume_server_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*BatchDeleteRequest); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_volume_server_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*BatchDeleteResponse); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_volume_server_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*DeleteResult); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_volume_server_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*Empty); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_volume_server_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*VacuumVolumeCheckRequest); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_volume_server_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*VacuumVolumeCheckResponse); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_volume_server_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*VacuumVolumeCompactRequest); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_volume_server_proto_msgTypes[7].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*VacuumVolumeCompactResponse); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_volume_server_proto_msgTypes[8].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*VacuumVolumeCommitRequest); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_volume_server_proto_msgTypes[9].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*VacuumVolumeCommitResponse); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_volume_server_proto_msgTypes[10].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*VacuumVolumeCleanupRequest); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_volume_server_proto_msgTypes[11].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*VacuumVolumeCleanupResponse); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_volume_server_proto_msgTypes[12].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*DeleteCollectionRequest); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_volume_server_proto_msgTypes[13].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*DeleteCollectionResponse); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_volume_server_proto_msgTypes[14].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*AllocateVolumeRequest); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_volume_server_proto_msgTypes[15].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*AllocateVolumeResponse); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_volume_server_proto_msgTypes[16].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*VolumeSyncStatusRequest); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_volume_server_proto_msgTypes[17].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*VolumeSyncStatusResponse); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_volume_server_proto_msgTypes[18].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*VolumeIncrementalCopyRequest); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_volume_server_proto_msgTypes[19].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*VolumeIncrementalCopyResponse); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_volume_server_proto_msgTypes[20].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*VolumeMountRequest); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_volume_server_proto_msgTypes[21].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*VolumeMountResponse); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_volume_server_proto_msgTypes[22].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*VolumeUnmountRequest); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_volume_server_proto_msgTypes[23].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*VolumeUnmountResponse); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_volume_server_proto_msgTypes[24].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*VolumeDeleteRequest); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_volume_server_proto_msgTypes[25].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*VolumeDeleteResponse); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_volume_server_proto_msgTypes[26].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*VolumeMarkReadonlyRequest); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_volume_server_proto_msgTypes[27].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*VolumeMarkReadonlyResponse); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_volume_server_proto_msgTypes[28].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*VolumeConfigureRequest); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_volume_server_proto_msgTypes[29].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*VolumeConfigureResponse); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_volume_server_proto_msgTypes[30].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*VolumeCopyRequest); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_volume_server_proto_msgTypes[31].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*VolumeCopyResponse); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_volume_server_proto_msgTypes[32].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*CopyFileRequest); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_volume_server_proto_msgTypes[33].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*CopyFileResponse); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_volume_server_proto_msgTypes[34].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*VolumeTailSenderRequest); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_volume_server_proto_msgTypes[35].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*VolumeTailSenderResponse); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_volume_server_proto_msgTypes[36].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*VolumeTailReceiverRequest); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_volume_server_proto_msgTypes[37].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*VolumeTailReceiverResponse); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_volume_server_proto_msgTypes[38].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*VolumeEcShardsGenerateRequest); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_volume_server_proto_msgTypes[39].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*VolumeEcShardsGenerateResponse); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_volume_server_proto_msgTypes[40].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*VolumeEcShardsRebuildRequest); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_volume_server_proto_msgTypes[41].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*VolumeEcShardsRebuildResponse); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_volume_server_proto_msgTypes[42].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*VolumeEcShardsCopyRequest); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_volume_server_proto_msgTypes[43].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*VolumeEcShardsCopyResponse); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_volume_server_proto_msgTypes[44].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*VolumeEcShardsDeleteRequest); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_volume_server_proto_msgTypes[45].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*VolumeEcShardsDeleteResponse); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_volume_server_proto_msgTypes[46].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*VolumeEcShardsMountRequest); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_volume_server_proto_msgTypes[47].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*VolumeEcShardsMountResponse); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_volume_server_proto_msgTypes[48].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*VolumeEcShardsUnmountRequest); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_volume_server_proto_msgTypes[49].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*VolumeEcShardsUnmountResponse); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_volume_server_proto_msgTypes[50].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*VolumeEcShardReadRequest); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_volume_server_proto_msgTypes[51].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*VolumeEcShardReadResponse); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_volume_server_proto_msgTypes[52].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*VolumeEcBlobDeleteRequest); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_volume_server_proto_msgTypes[53].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*VolumeEcBlobDeleteResponse); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_volume_server_proto_msgTypes[54].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*VolumeEcShardsToVolumeRequest); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_volume_server_proto_msgTypes[55].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*VolumeEcShardsToVolumeResponse); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_volume_server_proto_msgTypes[56].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*ReadVolumeFileStatusRequest); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_volume_server_proto_msgTypes[57].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*ReadVolumeFileStatusResponse); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_volume_server_proto_msgTypes[58].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*DiskStatus); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_volume_server_proto_msgTypes[59].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*MemStatus); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_volume_server_proto_msgTypes[60].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*RemoteFile); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_volume_server_proto_msgTypes[61].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*VolumeInfo); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_volume_server_proto_msgTypes[62].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*VolumeTierMoveDatToRemoteRequest); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_volume_server_proto_msgTypes[63].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*VolumeTierMoveDatToRemoteResponse); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_volume_server_proto_msgTypes[64].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*VolumeTierMoveDatFromRemoteRequest); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_volume_server_proto_msgTypes[65].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*VolumeTierMoveDatFromRemoteResponse); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_volume_server_proto_msgTypes[66].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*VolumeServerStatusRequest); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_volume_server_proto_msgTypes[67].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*VolumeServerStatusResponse); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_volume_server_proto_msgTypes[68].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*QueryRequest); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_volume_server_proto_msgTypes[69].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*QueriedStripe); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_volume_server_proto_msgTypes[70].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*QueryRequest_Filter); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_volume_server_proto_msgTypes[71].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*QueryRequest_InputSerialization); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_volume_server_proto_msgTypes[72].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*QueryRequest_OutputSerialization); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_volume_server_proto_msgTypes[73].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*QueryRequest_InputSerialization_CSVInput); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_volume_server_proto_msgTypes[74].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*QueryRequest_InputSerialization_JSONInput); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_volume_server_proto_msgTypes[75].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*QueryRequest_InputSerialization_ParquetInput); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_volume_server_proto_msgTypes[76].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*QueryRequest_OutputSerialization_CSVOutput); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_volume_server_proto_msgTypes[77].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*QueryRequest_OutputSerialization_JSONOutput); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ }
+ type x struct{}
+ out := protoimpl.TypeBuilder{
+ File: protoimpl.DescBuilder{
+ GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
+ RawDescriptor: file_volume_server_proto_rawDesc,
+ NumEnums: 0,
+ NumMessages: 78,
+ NumExtensions: 0,
+ NumServices: 1,
+ },
+ GoTypes: file_volume_server_proto_goTypes,
+ DependencyIndexes: file_volume_server_proto_depIdxs,
+ MessageInfos: file_volume_server_proto_msgTypes,
+ }.Build()
+ File_volume_server_proto = out.File
+ file_volume_server_proto_rawDesc = nil
+ file_volume_server_proto_goTypes = nil
+ file_volume_server_proto_depIdxs = nil
+}
+
+// Reference imports to suppress errors if they are not otherwise used.
+var _ context.Context
+var _ grpc.ClientConnInterface
+
+// This is a compile-time assertion to ensure that this generated file
+// is compatible with the grpc package it is being compiled against.
+const _ = grpc.SupportPackageIsVersion6
+
+// VolumeServerClient is the client API for VolumeServer service.
+//
+// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream.
+type VolumeServerClient interface {
+ //Experts only: takes multiple fid parameters. This function does not propagate deletes to replicas.
+ BatchDelete(ctx context.Context, in *BatchDeleteRequest, opts ...grpc.CallOption) (*BatchDeleteResponse, error)
+ VacuumVolumeCheck(ctx context.Context, in *VacuumVolumeCheckRequest, opts ...grpc.CallOption) (*VacuumVolumeCheckResponse, error)
+ VacuumVolumeCompact(ctx context.Context, in *VacuumVolumeCompactRequest, opts ...grpc.CallOption) (*VacuumVolumeCompactResponse, error)
+ VacuumVolumeCommit(ctx context.Context, in *VacuumVolumeCommitRequest, opts ...grpc.CallOption) (*VacuumVolumeCommitResponse, error)
+ VacuumVolumeCleanup(ctx context.Context, in *VacuumVolumeCleanupRequest, opts ...grpc.CallOption) (*VacuumVolumeCleanupResponse, error)
+ DeleteCollection(ctx context.Context, in *DeleteCollectionRequest, opts ...grpc.CallOption) (*DeleteCollectionResponse, error)
+ AllocateVolume(ctx context.Context, in *AllocateVolumeRequest, opts ...grpc.CallOption) (*AllocateVolumeResponse, error)
+ VolumeSyncStatus(ctx context.Context, in *VolumeSyncStatusRequest, opts ...grpc.CallOption) (*VolumeSyncStatusResponse, error)
+ VolumeIncrementalCopy(ctx context.Context, in *VolumeIncrementalCopyRequest, opts ...grpc.CallOption) (VolumeServer_VolumeIncrementalCopyClient, error)
+ VolumeMount(ctx context.Context, in *VolumeMountRequest, opts ...grpc.CallOption) (*VolumeMountResponse, error)
+ VolumeUnmount(ctx context.Context, in *VolumeUnmountRequest, opts ...grpc.CallOption) (*VolumeUnmountResponse, error)
+ VolumeDelete(ctx context.Context, in *VolumeDeleteRequest, opts ...grpc.CallOption) (*VolumeDeleteResponse, error)
+ VolumeMarkReadonly(ctx context.Context, in *VolumeMarkReadonlyRequest, opts ...grpc.CallOption) (*VolumeMarkReadonlyResponse, error)
+ VolumeConfigure(ctx context.Context, in *VolumeConfigureRequest, opts ...grpc.CallOption) (*VolumeConfigureResponse, error)
+ // copy the .idx .dat files, and mount this volume
+ VolumeCopy(ctx context.Context, in *VolumeCopyRequest, opts ...grpc.CallOption) (*VolumeCopyResponse, error)
+ ReadVolumeFileStatus(ctx context.Context, in *ReadVolumeFileStatusRequest, opts ...grpc.CallOption) (*ReadVolumeFileStatusResponse, error)
+ CopyFile(ctx context.Context, in *CopyFileRequest, opts ...grpc.CallOption) (VolumeServer_CopyFileClient, error)
+ VolumeTailSender(ctx context.Context, in *VolumeTailSenderRequest, opts ...grpc.CallOption) (VolumeServer_VolumeTailSenderClient, error)
+ VolumeTailReceiver(ctx context.Context, in *VolumeTailReceiverRequest, opts ...grpc.CallOption) (*VolumeTailReceiverResponse, error)
+ // erasure coding
+ VolumeEcShardsGenerate(ctx context.Context, in *VolumeEcShardsGenerateRequest, opts ...grpc.CallOption) (*VolumeEcShardsGenerateResponse, error)
+ VolumeEcShardsRebuild(ctx context.Context, in *VolumeEcShardsRebuildRequest, opts ...grpc.CallOption) (*VolumeEcShardsRebuildResponse, error)
+ VolumeEcShardsCopy(ctx context.Context, in *VolumeEcShardsCopyRequest, opts ...grpc.CallOption) (*VolumeEcShardsCopyResponse, error)
+ VolumeEcShardsDelete(ctx context.Context, in *VolumeEcShardsDeleteRequest, opts ...grpc.CallOption) (*VolumeEcShardsDeleteResponse, error)
+ VolumeEcShardsMount(ctx context.Context, in *VolumeEcShardsMountRequest, opts ...grpc.CallOption) (*VolumeEcShardsMountResponse, error)
+ VolumeEcShardsUnmount(ctx context.Context, in *VolumeEcShardsUnmountRequest, opts ...grpc.CallOption) (*VolumeEcShardsUnmountResponse, error)
+ VolumeEcShardRead(ctx context.Context, in *VolumeEcShardReadRequest, opts ...grpc.CallOption) (VolumeServer_VolumeEcShardReadClient, error)
+ VolumeEcBlobDelete(ctx context.Context, in *VolumeEcBlobDeleteRequest, opts ...grpc.CallOption) (*VolumeEcBlobDeleteResponse, error)
+ VolumeEcShardsToVolume(ctx context.Context, in *VolumeEcShardsToVolumeRequest, opts ...grpc.CallOption) (*VolumeEcShardsToVolumeResponse, error)
+ // tiered storage
+ VolumeTierMoveDatToRemote(ctx context.Context, in *VolumeTierMoveDatToRemoteRequest, opts ...grpc.CallOption) (VolumeServer_VolumeTierMoveDatToRemoteClient, error)
+ VolumeTierMoveDatFromRemote(ctx context.Context, in *VolumeTierMoveDatFromRemoteRequest, opts ...grpc.CallOption) (VolumeServer_VolumeTierMoveDatFromRemoteClient, error)
+ VolumeServerStatus(ctx context.Context, in *VolumeServerStatusRequest, opts ...grpc.CallOption) (*VolumeServerStatusResponse, error)
+ // query
+ Query(ctx context.Context, in *QueryRequest, opts ...grpc.CallOption) (VolumeServer_QueryClient, error)
+}
+
+type volumeServerClient struct {
+ cc grpc.ClientConnInterface
+}
+
+func NewVolumeServerClient(cc grpc.ClientConnInterface) VolumeServerClient {
+ return &volumeServerClient{cc}
+}
+
+func (c *volumeServerClient) BatchDelete(ctx context.Context, in *BatchDeleteRequest, opts ...grpc.CallOption) (*BatchDeleteResponse, error) {
+ out := new(BatchDeleteResponse)
+ err := c.cc.Invoke(ctx, "/volume_server_pb.VolumeServer/BatchDelete", in, out, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
+func (c *volumeServerClient) VacuumVolumeCheck(ctx context.Context, in *VacuumVolumeCheckRequest, opts ...grpc.CallOption) (*VacuumVolumeCheckResponse, error) {
+ out := new(VacuumVolumeCheckResponse)
+ err := c.cc.Invoke(ctx, "/volume_server_pb.VolumeServer/VacuumVolumeCheck", in, out, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
+func (c *volumeServerClient) VacuumVolumeCompact(ctx context.Context, in *VacuumVolumeCompactRequest, opts ...grpc.CallOption) (*VacuumVolumeCompactResponse, error) {
+ out := new(VacuumVolumeCompactResponse)
+ err := c.cc.Invoke(ctx, "/volume_server_pb.VolumeServer/VacuumVolumeCompact", in, out, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
+func (c *volumeServerClient) VacuumVolumeCommit(ctx context.Context, in *VacuumVolumeCommitRequest, opts ...grpc.CallOption) (*VacuumVolumeCommitResponse, error) {
+ out := new(VacuumVolumeCommitResponse)
+ err := c.cc.Invoke(ctx, "/volume_server_pb.VolumeServer/VacuumVolumeCommit", in, out, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
+func (c *volumeServerClient) VacuumVolumeCleanup(ctx context.Context, in *VacuumVolumeCleanupRequest, opts ...grpc.CallOption) (*VacuumVolumeCleanupResponse, error) {
+ out := new(VacuumVolumeCleanupResponse)
+ err := c.cc.Invoke(ctx, "/volume_server_pb.VolumeServer/VacuumVolumeCleanup", in, out, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
+func (c *volumeServerClient) DeleteCollection(ctx context.Context, in *DeleteCollectionRequest, opts ...grpc.CallOption) (*DeleteCollectionResponse, error) {
+ out := new(DeleteCollectionResponse)
+ err := c.cc.Invoke(ctx, "/volume_server_pb.VolumeServer/DeleteCollection", in, out, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
+func (c *volumeServerClient) AllocateVolume(ctx context.Context, in *AllocateVolumeRequest, opts ...grpc.CallOption) (*AllocateVolumeResponse, error) {
+ out := new(AllocateVolumeResponse)
+ err := c.cc.Invoke(ctx, "/volume_server_pb.VolumeServer/AllocateVolume", in, out, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
+func (c *volumeServerClient) VolumeSyncStatus(ctx context.Context, in *VolumeSyncStatusRequest, opts ...grpc.CallOption) (*VolumeSyncStatusResponse, error) {
+ out := new(VolumeSyncStatusResponse)
+ err := c.cc.Invoke(ctx, "/volume_server_pb.VolumeServer/VolumeSyncStatus", in, out, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
+func (c *volumeServerClient) VolumeIncrementalCopy(ctx context.Context, in *VolumeIncrementalCopyRequest, opts ...grpc.CallOption) (VolumeServer_VolumeIncrementalCopyClient, error) {
+ stream, err := c.cc.NewStream(ctx, &_VolumeServer_serviceDesc.Streams[0], "/volume_server_pb.VolumeServer/VolumeIncrementalCopy", opts...)
+ if err != nil {
+ return nil, err
+ }
+ x := &volumeServerVolumeIncrementalCopyClient{stream}
+ if err := x.ClientStream.SendMsg(in); err != nil {
+ return nil, err
+ }
+ if err := x.ClientStream.CloseSend(); err != nil {
+ return nil, err
+ }
+ return x, nil
+}
+
+type VolumeServer_VolumeIncrementalCopyClient interface {
+ Recv() (*VolumeIncrementalCopyResponse, error)
+ grpc.ClientStream
+}
+
+type volumeServerVolumeIncrementalCopyClient struct {
+ grpc.ClientStream
+}
+
+func (x *volumeServerVolumeIncrementalCopyClient) Recv() (*VolumeIncrementalCopyResponse, error) {
+ m := new(VolumeIncrementalCopyResponse)
+ if err := x.ClientStream.RecvMsg(m); err != nil {
+ return nil, err
+ }
+ return m, nil
}
func (c *volumeServerClient) VolumeMount(ctx context.Context, in *VolumeMountRequest, opts ...grpc.CallOption) (*VolumeMountResponse, error) {
out := new(VolumeMountResponse)
- err := grpc.Invoke(ctx, "/volume_server_pb.VolumeServer/VolumeMount", in, out, c.cc, opts...)
+ err := c.cc.Invoke(ctx, "/volume_server_pb.VolumeServer/VolumeMount", in, out, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
+func (c *volumeServerClient) VolumeUnmount(ctx context.Context, in *VolumeUnmountRequest, opts ...grpc.CallOption) (*VolumeUnmountResponse, error) {
+ out := new(VolumeUnmountResponse)
+ err := c.cc.Invoke(ctx, "/volume_server_pb.VolumeServer/VolumeUnmount", in, out, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
+func (c *volumeServerClient) VolumeDelete(ctx context.Context, in *VolumeDeleteRequest, opts ...grpc.CallOption) (*VolumeDeleteResponse, error) {
+ out := new(VolumeDeleteResponse)
+ err := c.cc.Invoke(ctx, "/volume_server_pb.VolumeServer/VolumeDelete", in, out, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
+func (c *volumeServerClient) VolumeMarkReadonly(ctx context.Context, in *VolumeMarkReadonlyRequest, opts ...grpc.CallOption) (*VolumeMarkReadonlyResponse, error) {
+ out := new(VolumeMarkReadonlyResponse)
+ err := c.cc.Invoke(ctx, "/volume_server_pb.VolumeServer/VolumeMarkReadonly", in, out, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
+func (c *volumeServerClient) VolumeConfigure(ctx context.Context, in *VolumeConfigureRequest, opts ...grpc.CallOption) (*VolumeConfigureResponse, error) {
+ out := new(VolumeConfigureResponse)
+ err := c.cc.Invoke(ctx, "/volume_server_pb.VolumeServer/VolumeConfigure", in, out, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
+func (c *volumeServerClient) VolumeCopy(ctx context.Context, in *VolumeCopyRequest, opts ...grpc.CallOption) (*VolumeCopyResponse, error) {
+ out := new(VolumeCopyResponse)
+ err := c.cc.Invoke(ctx, "/volume_server_pb.VolumeServer/VolumeCopy", in, out, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
+func (c *volumeServerClient) ReadVolumeFileStatus(ctx context.Context, in *ReadVolumeFileStatusRequest, opts ...grpc.CallOption) (*ReadVolumeFileStatusResponse, error) {
+ out := new(ReadVolumeFileStatusResponse)
+ err := c.cc.Invoke(ctx, "/volume_server_pb.VolumeServer/ReadVolumeFileStatus", in, out, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
+func (c *volumeServerClient) CopyFile(ctx context.Context, in *CopyFileRequest, opts ...grpc.CallOption) (VolumeServer_CopyFileClient, error) {
+ stream, err := c.cc.NewStream(ctx, &_VolumeServer_serviceDesc.Streams[1], "/volume_server_pb.VolumeServer/CopyFile", opts...)
+ if err != nil {
+ return nil, err
+ }
+ x := &volumeServerCopyFileClient{stream}
+ if err := x.ClientStream.SendMsg(in); err != nil {
+ return nil, err
+ }
+ if err := x.ClientStream.CloseSend(); err != nil {
+ return nil, err
+ }
+ return x, nil
+}
+
+type VolumeServer_CopyFileClient interface {
+ Recv() (*CopyFileResponse, error)
+ grpc.ClientStream
+}
+
+type volumeServerCopyFileClient struct {
+ grpc.ClientStream
+}
+
+func (x *volumeServerCopyFileClient) Recv() (*CopyFileResponse, error) {
+ m := new(CopyFileResponse)
+ if err := x.ClientStream.RecvMsg(m); err != nil {
+ return nil, err
+ }
+ return m, nil
+}
+
+func (c *volumeServerClient) VolumeTailSender(ctx context.Context, in *VolumeTailSenderRequest, opts ...grpc.CallOption) (VolumeServer_VolumeTailSenderClient, error) {
+ stream, err := c.cc.NewStream(ctx, &_VolumeServer_serviceDesc.Streams[2], "/volume_server_pb.VolumeServer/VolumeTailSender", opts...)
+ if err != nil {
+ return nil, err
+ }
+ x := &volumeServerVolumeTailSenderClient{stream}
+ if err := x.ClientStream.SendMsg(in); err != nil {
+ return nil, err
+ }
+ if err := x.ClientStream.CloseSend(); err != nil {
+ return nil, err
+ }
+ return x, nil
+}
+
+type VolumeServer_VolumeTailSenderClient interface {
+ Recv() (*VolumeTailSenderResponse, error)
+ grpc.ClientStream
+}
+
+type volumeServerVolumeTailSenderClient struct {
+ grpc.ClientStream
+}
+
+func (x *volumeServerVolumeTailSenderClient) Recv() (*VolumeTailSenderResponse, error) {
+ m := new(VolumeTailSenderResponse)
+ if err := x.ClientStream.RecvMsg(m); err != nil {
+ return nil, err
+ }
+ return m, nil
+}
+
+func (c *volumeServerClient) VolumeTailReceiver(ctx context.Context, in *VolumeTailReceiverRequest, opts ...grpc.CallOption) (*VolumeTailReceiverResponse, error) {
+ out := new(VolumeTailReceiverResponse)
+ err := c.cc.Invoke(ctx, "/volume_server_pb.VolumeServer/VolumeTailReceiver", in, out, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
+func (c *volumeServerClient) VolumeEcShardsGenerate(ctx context.Context, in *VolumeEcShardsGenerateRequest, opts ...grpc.CallOption) (*VolumeEcShardsGenerateResponse, error) {
+ out := new(VolumeEcShardsGenerateResponse)
+ err := c.cc.Invoke(ctx, "/volume_server_pb.VolumeServer/VolumeEcShardsGenerate", in, out, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
+func (c *volumeServerClient) VolumeEcShardsRebuild(ctx context.Context, in *VolumeEcShardsRebuildRequest, opts ...grpc.CallOption) (*VolumeEcShardsRebuildResponse, error) {
+ out := new(VolumeEcShardsRebuildResponse)
+ err := c.cc.Invoke(ctx, "/volume_server_pb.VolumeServer/VolumeEcShardsRebuild", in, out, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
+func (c *volumeServerClient) VolumeEcShardsCopy(ctx context.Context, in *VolumeEcShardsCopyRequest, opts ...grpc.CallOption) (*VolumeEcShardsCopyResponse, error) {
+ out := new(VolumeEcShardsCopyResponse)
+ err := c.cc.Invoke(ctx, "/volume_server_pb.VolumeServer/VolumeEcShardsCopy", in, out, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
+func (c *volumeServerClient) VolumeEcShardsDelete(ctx context.Context, in *VolumeEcShardsDeleteRequest, opts ...grpc.CallOption) (*VolumeEcShardsDeleteResponse, error) {
+ out := new(VolumeEcShardsDeleteResponse)
+ err := c.cc.Invoke(ctx, "/volume_server_pb.VolumeServer/VolumeEcShardsDelete", in, out, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
+func (c *volumeServerClient) VolumeEcShardsMount(ctx context.Context, in *VolumeEcShardsMountRequest, opts ...grpc.CallOption) (*VolumeEcShardsMountResponse, error) {
+ out := new(VolumeEcShardsMountResponse)
+ err := c.cc.Invoke(ctx, "/volume_server_pb.VolumeServer/VolumeEcShardsMount", in, out, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
+func (c *volumeServerClient) VolumeEcShardsUnmount(ctx context.Context, in *VolumeEcShardsUnmountRequest, opts ...grpc.CallOption) (*VolumeEcShardsUnmountResponse, error) {
+ out := new(VolumeEcShardsUnmountResponse)
+ err := c.cc.Invoke(ctx, "/volume_server_pb.VolumeServer/VolumeEcShardsUnmount", in, out, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
+func (c *volumeServerClient) VolumeEcShardRead(ctx context.Context, in *VolumeEcShardReadRequest, opts ...grpc.CallOption) (VolumeServer_VolumeEcShardReadClient, error) {
+ stream, err := c.cc.NewStream(ctx, &_VolumeServer_serviceDesc.Streams[3], "/volume_server_pb.VolumeServer/VolumeEcShardRead", opts...)
+ if err != nil {
+ return nil, err
+ }
+ x := &volumeServerVolumeEcShardReadClient{stream}
+ if err := x.ClientStream.SendMsg(in); err != nil {
+ return nil, err
+ }
+ if err := x.ClientStream.CloseSend(); err != nil {
+ return nil, err
+ }
+ return x, nil
+}
+
+type VolumeServer_VolumeEcShardReadClient interface {
+ Recv() (*VolumeEcShardReadResponse, error)
+ grpc.ClientStream
+}
+
+type volumeServerVolumeEcShardReadClient struct {
+ grpc.ClientStream
+}
+
+func (x *volumeServerVolumeEcShardReadClient) Recv() (*VolumeEcShardReadResponse, error) {
+ m := new(VolumeEcShardReadResponse)
+ if err := x.ClientStream.RecvMsg(m); err != nil {
+ return nil, err
+ }
+ return m, nil
+}
+
+func (c *volumeServerClient) VolumeEcBlobDelete(ctx context.Context, in *VolumeEcBlobDeleteRequest, opts ...grpc.CallOption) (*VolumeEcBlobDeleteResponse, error) {
+ out := new(VolumeEcBlobDeleteResponse)
+ err := c.cc.Invoke(ctx, "/volume_server_pb.VolumeServer/VolumeEcBlobDelete", in, out, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
+func (c *volumeServerClient) VolumeEcShardsToVolume(ctx context.Context, in *VolumeEcShardsToVolumeRequest, opts ...grpc.CallOption) (*VolumeEcShardsToVolumeResponse, error) {
+ out := new(VolumeEcShardsToVolumeResponse)
+ err := c.cc.Invoke(ctx, "/volume_server_pb.VolumeServer/VolumeEcShardsToVolume", in, out, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
+func (c *volumeServerClient) VolumeTierMoveDatToRemote(ctx context.Context, in *VolumeTierMoveDatToRemoteRequest, opts ...grpc.CallOption) (VolumeServer_VolumeTierMoveDatToRemoteClient, error) {
+ stream, err := c.cc.NewStream(ctx, &_VolumeServer_serviceDesc.Streams[4], "/volume_server_pb.VolumeServer/VolumeTierMoveDatToRemote", opts...)
+ if err != nil {
+ return nil, err
+ }
+ x := &volumeServerVolumeTierMoveDatToRemoteClient{stream}
+ if err := x.ClientStream.SendMsg(in); err != nil {
+ return nil, err
+ }
+ if err := x.ClientStream.CloseSend(); err != nil {
+ return nil, err
+ }
+ return x, nil
+}
+
+type VolumeServer_VolumeTierMoveDatToRemoteClient interface {
+ Recv() (*VolumeTierMoveDatToRemoteResponse, error)
+ grpc.ClientStream
+}
+
+type volumeServerVolumeTierMoveDatToRemoteClient struct {
+ grpc.ClientStream
+}
+
+func (x *volumeServerVolumeTierMoveDatToRemoteClient) Recv() (*VolumeTierMoveDatToRemoteResponse, error) {
+ m := new(VolumeTierMoveDatToRemoteResponse)
+ if err := x.ClientStream.RecvMsg(m); err != nil {
+ return nil, err
+ }
+ return m, nil
+}
+
+func (c *volumeServerClient) VolumeTierMoveDatFromRemote(ctx context.Context, in *VolumeTierMoveDatFromRemoteRequest, opts ...grpc.CallOption) (VolumeServer_VolumeTierMoveDatFromRemoteClient, error) {
+ stream, err := c.cc.NewStream(ctx, &_VolumeServer_serviceDesc.Streams[5], "/volume_server_pb.VolumeServer/VolumeTierMoveDatFromRemote", opts...)
+ if err != nil {
+ return nil, err
+ }
+ x := &volumeServerVolumeTierMoveDatFromRemoteClient{stream}
+ if err := x.ClientStream.SendMsg(in); err != nil {
+ return nil, err
+ }
+ if err := x.ClientStream.CloseSend(); err != nil {
+ return nil, err
+ }
+ return x, nil
+}
+
+type VolumeServer_VolumeTierMoveDatFromRemoteClient interface {
+ Recv() (*VolumeTierMoveDatFromRemoteResponse, error)
+ grpc.ClientStream
+}
+
+type volumeServerVolumeTierMoveDatFromRemoteClient struct {
+ grpc.ClientStream
+}
+
+func (x *volumeServerVolumeTierMoveDatFromRemoteClient) Recv() (*VolumeTierMoveDatFromRemoteResponse, error) {
+ m := new(VolumeTierMoveDatFromRemoteResponse)
+ if err := x.ClientStream.RecvMsg(m); err != nil {
+ return nil, err
+ }
+ return m, nil
+}
+
+func (c *volumeServerClient) VolumeServerStatus(ctx context.Context, in *VolumeServerStatusRequest, opts ...grpc.CallOption) (*VolumeServerStatusResponse, error) {
+ out := new(VolumeServerStatusResponse)
+ err := c.cc.Invoke(ctx, "/volume_server_pb.VolumeServer/VolumeServerStatus", in, out, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
+func (c *volumeServerClient) Query(ctx context.Context, in *QueryRequest, opts ...grpc.CallOption) (VolumeServer_QueryClient, error) {
+ stream, err := c.cc.NewStream(ctx, &_VolumeServer_serviceDesc.Streams[6], "/volume_server_pb.VolumeServer/Query", opts...)
if err != nil {
return nil, err
}
- return out, nil
+ x := &volumeServerQueryClient{stream}
+ if err := x.ClientStream.SendMsg(in); err != nil {
+ return nil, err
+ }
+ if err := x.ClientStream.CloseSend(); err != nil {
+ return nil, err
+ }
+ return x, nil
+}
+
+type VolumeServer_QueryClient interface {
+ Recv() (*QueriedStripe, error)
+ grpc.ClientStream
+}
+
+type volumeServerQueryClient struct {
+ grpc.ClientStream
+}
+
+func (x *volumeServerQueryClient) Recv() (*QueriedStripe, error) {
+ m := new(QueriedStripe)
+ if err := x.ClientStream.RecvMsg(m); err != nil {
+ return nil, err
+ }
+ return m, nil
+}
+
+// VolumeServerServer is the server API for VolumeServer service.
+type VolumeServerServer interface {
+ //Experts only: takes multiple fid parameters. This function does not propagate deletes to replicas.
+ BatchDelete(context.Context, *BatchDeleteRequest) (*BatchDeleteResponse, error)
+ VacuumVolumeCheck(context.Context, *VacuumVolumeCheckRequest) (*VacuumVolumeCheckResponse, error)
+ VacuumVolumeCompact(context.Context, *VacuumVolumeCompactRequest) (*VacuumVolumeCompactResponse, error)
+ VacuumVolumeCommit(context.Context, *VacuumVolumeCommitRequest) (*VacuumVolumeCommitResponse, error)
+ VacuumVolumeCleanup(context.Context, *VacuumVolumeCleanupRequest) (*VacuumVolumeCleanupResponse, error)
+ DeleteCollection(context.Context, *DeleteCollectionRequest) (*DeleteCollectionResponse, error)
+ AllocateVolume(context.Context, *AllocateVolumeRequest) (*AllocateVolumeResponse, error)
+ VolumeSyncStatus(context.Context, *VolumeSyncStatusRequest) (*VolumeSyncStatusResponse, error)
+ VolumeIncrementalCopy(*VolumeIncrementalCopyRequest, VolumeServer_VolumeIncrementalCopyServer) error
+ VolumeMount(context.Context, *VolumeMountRequest) (*VolumeMountResponse, error)
+ VolumeUnmount(context.Context, *VolumeUnmountRequest) (*VolumeUnmountResponse, error)
+ VolumeDelete(context.Context, *VolumeDeleteRequest) (*VolumeDeleteResponse, error)
+ VolumeMarkReadonly(context.Context, *VolumeMarkReadonlyRequest) (*VolumeMarkReadonlyResponse, error)
+ VolumeConfigure(context.Context, *VolumeConfigureRequest) (*VolumeConfigureResponse, error)
+ // copy the .idx .dat files, and mount this volume
+ VolumeCopy(context.Context, *VolumeCopyRequest) (*VolumeCopyResponse, error)
+ ReadVolumeFileStatus(context.Context, *ReadVolumeFileStatusRequest) (*ReadVolumeFileStatusResponse, error)
+ CopyFile(*CopyFileRequest, VolumeServer_CopyFileServer) error
+ VolumeTailSender(*VolumeTailSenderRequest, VolumeServer_VolumeTailSenderServer) error
+ VolumeTailReceiver(context.Context, *VolumeTailReceiverRequest) (*VolumeTailReceiverResponse, error)
+ // erasure coding
+ VolumeEcShardsGenerate(context.Context, *VolumeEcShardsGenerateRequest) (*VolumeEcShardsGenerateResponse, error)
+ VolumeEcShardsRebuild(context.Context, *VolumeEcShardsRebuildRequest) (*VolumeEcShardsRebuildResponse, error)
+ VolumeEcShardsCopy(context.Context, *VolumeEcShardsCopyRequest) (*VolumeEcShardsCopyResponse, error)
+ VolumeEcShardsDelete(context.Context, *VolumeEcShardsDeleteRequest) (*VolumeEcShardsDeleteResponse, error)
+ VolumeEcShardsMount(context.Context, *VolumeEcShardsMountRequest) (*VolumeEcShardsMountResponse, error)
+ VolumeEcShardsUnmount(context.Context, *VolumeEcShardsUnmountRequest) (*VolumeEcShardsUnmountResponse, error)
+ VolumeEcShardRead(*VolumeEcShardReadRequest, VolumeServer_VolumeEcShardReadServer) error
+ VolumeEcBlobDelete(context.Context, *VolumeEcBlobDeleteRequest) (*VolumeEcBlobDeleteResponse, error)
+ VolumeEcShardsToVolume(context.Context, *VolumeEcShardsToVolumeRequest) (*VolumeEcShardsToVolumeResponse, error)
+ // tiered storage
+ VolumeTierMoveDatToRemote(*VolumeTierMoveDatToRemoteRequest, VolumeServer_VolumeTierMoveDatToRemoteServer) error
+ VolumeTierMoveDatFromRemote(*VolumeTierMoveDatFromRemoteRequest, VolumeServer_VolumeTierMoveDatFromRemoteServer) error
+ VolumeServerStatus(context.Context, *VolumeServerStatusRequest) (*VolumeServerStatusResponse, error)
+ // query
+ Query(*QueryRequest, VolumeServer_QueryServer) error
+}
+
+// UnimplementedVolumeServerServer can be embedded to have forward compatible implementations.
+type UnimplementedVolumeServerServer struct {
+}
+
+func (*UnimplementedVolumeServerServer) BatchDelete(context.Context, *BatchDeleteRequest) (*BatchDeleteResponse, error) {
+ return nil, status.Errorf(codes.Unimplemented, "method BatchDelete not implemented")
+}
+func (*UnimplementedVolumeServerServer) VacuumVolumeCheck(context.Context, *VacuumVolumeCheckRequest) (*VacuumVolumeCheckResponse, error) {
+ return nil, status.Errorf(codes.Unimplemented, "method VacuumVolumeCheck not implemented")
+}
+func (*UnimplementedVolumeServerServer) VacuumVolumeCompact(context.Context, *VacuumVolumeCompactRequest) (*VacuumVolumeCompactResponse, error) {
+ return nil, status.Errorf(codes.Unimplemented, "method VacuumVolumeCompact not implemented")
+}
+func (*UnimplementedVolumeServerServer) VacuumVolumeCommit(context.Context, *VacuumVolumeCommitRequest) (*VacuumVolumeCommitResponse, error) {
+ return nil, status.Errorf(codes.Unimplemented, "method VacuumVolumeCommit not implemented")
+}
+func (*UnimplementedVolumeServerServer) VacuumVolumeCleanup(context.Context, *VacuumVolumeCleanupRequest) (*VacuumVolumeCleanupResponse, error) {
+ return nil, status.Errorf(codes.Unimplemented, "method VacuumVolumeCleanup not implemented")
+}
+func (*UnimplementedVolumeServerServer) DeleteCollection(context.Context, *DeleteCollectionRequest) (*DeleteCollectionResponse, error) {
+ return nil, status.Errorf(codes.Unimplemented, "method DeleteCollection not implemented")
+}
+func (*UnimplementedVolumeServerServer) AllocateVolume(context.Context, *AllocateVolumeRequest) (*AllocateVolumeResponse, error) {
+ return nil, status.Errorf(codes.Unimplemented, "method AllocateVolume not implemented")
+}
+func (*UnimplementedVolumeServerServer) VolumeSyncStatus(context.Context, *VolumeSyncStatusRequest) (*VolumeSyncStatusResponse, error) {
+ return nil, status.Errorf(codes.Unimplemented, "method VolumeSyncStatus not implemented")
+}
+func (*UnimplementedVolumeServerServer) VolumeIncrementalCopy(*VolumeIncrementalCopyRequest, VolumeServer_VolumeIncrementalCopyServer) error {
+ return status.Errorf(codes.Unimplemented, "method VolumeIncrementalCopy not implemented")
+}
+func (*UnimplementedVolumeServerServer) VolumeMount(context.Context, *VolumeMountRequest) (*VolumeMountResponse, error) {
+ return nil, status.Errorf(codes.Unimplemented, "method VolumeMount not implemented")
+}
+func (*UnimplementedVolumeServerServer) VolumeUnmount(context.Context, *VolumeUnmountRequest) (*VolumeUnmountResponse, error) {
+ return nil, status.Errorf(codes.Unimplemented, "method VolumeUnmount not implemented")
+}
+func (*UnimplementedVolumeServerServer) VolumeDelete(context.Context, *VolumeDeleteRequest) (*VolumeDeleteResponse, error) {
+ return nil, status.Errorf(codes.Unimplemented, "method VolumeDelete not implemented")
+}
+func (*UnimplementedVolumeServerServer) VolumeMarkReadonly(context.Context, *VolumeMarkReadonlyRequest) (*VolumeMarkReadonlyResponse, error) {
+ return nil, status.Errorf(codes.Unimplemented, "method VolumeMarkReadonly not implemented")
+}
+func (*UnimplementedVolumeServerServer) VolumeConfigure(context.Context, *VolumeConfigureRequest) (*VolumeConfigureResponse, error) {
+ return nil, status.Errorf(codes.Unimplemented, "method VolumeConfigure not implemented")
+}
+func (*UnimplementedVolumeServerServer) VolumeCopy(context.Context, *VolumeCopyRequest) (*VolumeCopyResponse, error) {
+ return nil, status.Errorf(codes.Unimplemented, "method VolumeCopy not implemented")
+}
+func (*UnimplementedVolumeServerServer) ReadVolumeFileStatus(context.Context, *ReadVolumeFileStatusRequest) (*ReadVolumeFileStatusResponse, error) {
+ return nil, status.Errorf(codes.Unimplemented, "method ReadVolumeFileStatus not implemented")
+}
+func (*UnimplementedVolumeServerServer) CopyFile(*CopyFileRequest, VolumeServer_CopyFileServer) error {
+ return status.Errorf(codes.Unimplemented, "method CopyFile not implemented")
+}
+func (*UnimplementedVolumeServerServer) VolumeTailSender(*VolumeTailSenderRequest, VolumeServer_VolumeTailSenderServer) error {
+ return status.Errorf(codes.Unimplemented, "method VolumeTailSender not implemented")
+}
+func (*UnimplementedVolumeServerServer) VolumeTailReceiver(context.Context, *VolumeTailReceiverRequest) (*VolumeTailReceiverResponse, error) {
+ return nil, status.Errorf(codes.Unimplemented, "method VolumeTailReceiver not implemented")
+}
+func (*UnimplementedVolumeServerServer) VolumeEcShardsGenerate(context.Context, *VolumeEcShardsGenerateRequest) (*VolumeEcShardsGenerateResponse, error) {
+ return nil, status.Errorf(codes.Unimplemented, "method VolumeEcShardsGenerate not implemented")
+}
+func (*UnimplementedVolumeServerServer) VolumeEcShardsRebuild(context.Context, *VolumeEcShardsRebuildRequest) (*VolumeEcShardsRebuildResponse, error) {
+ return nil, status.Errorf(codes.Unimplemented, "method VolumeEcShardsRebuild not implemented")
+}
+func (*UnimplementedVolumeServerServer) VolumeEcShardsCopy(context.Context, *VolumeEcShardsCopyRequest) (*VolumeEcShardsCopyResponse, error) {
+ return nil, status.Errorf(codes.Unimplemented, "method VolumeEcShardsCopy not implemented")
+}
+func (*UnimplementedVolumeServerServer) VolumeEcShardsDelete(context.Context, *VolumeEcShardsDeleteRequest) (*VolumeEcShardsDeleteResponse, error) {
+ return nil, status.Errorf(codes.Unimplemented, "method VolumeEcShardsDelete not implemented")
+}
+func (*UnimplementedVolumeServerServer) VolumeEcShardsMount(context.Context, *VolumeEcShardsMountRequest) (*VolumeEcShardsMountResponse, error) {
+ return nil, status.Errorf(codes.Unimplemented, "method VolumeEcShardsMount not implemented")
+}
+func (*UnimplementedVolumeServerServer) VolumeEcShardsUnmount(context.Context, *VolumeEcShardsUnmountRequest) (*VolumeEcShardsUnmountResponse, error) {
+ return nil, status.Errorf(codes.Unimplemented, "method VolumeEcShardsUnmount not implemented")
+}
+func (*UnimplementedVolumeServerServer) VolumeEcShardRead(*VolumeEcShardReadRequest, VolumeServer_VolumeEcShardReadServer) error {
+ return status.Errorf(codes.Unimplemented, "method VolumeEcShardRead not implemented")
+}
+func (*UnimplementedVolumeServerServer) VolumeEcBlobDelete(context.Context, *VolumeEcBlobDeleteRequest) (*VolumeEcBlobDeleteResponse, error) {
+ return nil, status.Errorf(codes.Unimplemented, "method VolumeEcBlobDelete not implemented")
+}
+func (*UnimplementedVolumeServerServer) VolumeEcShardsToVolume(context.Context, *VolumeEcShardsToVolumeRequest) (*VolumeEcShardsToVolumeResponse, error) {
+ return nil, status.Errorf(codes.Unimplemented, "method VolumeEcShardsToVolume not implemented")
+}
+func (*UnimplementedVolumeServerServer) VolumeTierMoveDatToRemote(*VolumeTierMoveDatToRemoteRequest, VolumeServer_VolumeTierMoveDatToRemoteServer) error {
+ return status.Errorf(codes.Unimplemented, "method VolumeTierMoveDatToRemote not implemented")
+}
+func (*UnimplementedVolumeServerServer) VolumeTierMoveDatFromRemote(*VolumeTierMoveDatFromRemoteRequest, VolumeServer_VolumeTierMoveDatFromRemoteServer) error {
+ return status.Errorf(codes.Unimplemented, "method VolumeTierMoveDatFromRemote not implemented")
+}
+func (*UnimplementedVolumeServerServer) VolumeServerStatus(context.Context, *VolumeServerStatusRequest) (*VolumeServerStatusResponse, error) {
+ return nil, status.Errorf(codes.Unimplemented, "method VolumeServerStatus not implemented")
+}
+func (*UnimplementedVolumeServerServer) Query(*QueryRequest, VolumeServer_QueryServer) error {
+ return status.Errorf(codes.Unimplemented, "method Query not implemented")
+}
+
+func RegisterVolumeServerServer(s *grpc.Server, srv VolumeServerServer) {
+ s.RegisterService(&_VolumeServer_serviceDesc, srv)
+}
+
+func _VolumeServer_BatchDelete_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(BatchDeleteRequest)
+ if err := dec(in); err != nil {
+ return nil, err
+ }
+ if interceptor == nil {
+ return srv.(VolumeServerServer).BatchDelete(ctx, in)
+ }
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: "/volume_server_pb.VolumeServer/BatchDelete",
+ }
+ handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+ return srv.(VolumeServerServer).BatchDelete(ctx, req.(*BatchDeleteRequest))
+ }
+ return interceptor(ctx, in, info, handler)
+}
+
+func _VolumeServer_VacuumVolumeCheck_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(VacuumVolumeCheckRequest)
+ if err := dec(in); err != nil {
+ return nil, err
+ }
+ if interceptor == nil {
+ return srv.(VolumeServerServer).VacuumVolumeCheck(ctx, in)
+ }
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: "/volume_server_pb.VolumeServer/VacuumVolumeCheck",
+ }
+ handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+ return srv.(VolumeServerServer).VacuumVolumeCheck(ctx, req.(*VacuumVolumeCheckRequest))
+ }
+ return interceptor(ctx, in, info, handler)
+}
+
+func _VolumeServer_VacuumVolumeCompact_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(VacuumVolumeCompactRequest)
+ if err := dec(in); err != nil {
+ return nil, err
+ }
+ if interceptor == nil {
+ return srv.(VolumeServerServer).VacuumVolumeCompact(ctx, in)
+ }
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: "/volume_server_pb.VolumeServer/VacuumVolumeCompact",
+ }
+ handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+ return srv.(VolumeServerServer).VacuumVolumeCompact(ctx, req.(*VacuumVolumeCompactRequest))
+ }
+ return interceptor(ctx, in, info, handler)
+}
+
+func _VolumeServer_VacuumVolumeCommit_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(VacuumVolumeCommitRequest)
+ if err := dec(in); err != nil {
+ return nil, err
+ }
+ if interceptor == nil {
+ return srv.(VolumeServerServer).VacuumVolumeCommit(ctx, in)
+ }
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: "/volume_server_pb.VolumeServer/VacuumVolumeCommit",
+ }
+ handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+ return srv.(VolumeServerServer).VacuumVolumeCommit(ctx, req.(*VacuumVolumeCommitRequest))
+ }
+ return interceptor(ctx, in, info, handler)
+}
+
+func _VolumeServer_VacuumVolumeCleanup_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(VacuumVolumeCleanupRequest)
+ if err := dec(in); err != nil {
+ return nil, err
+ }
+ if interceptor == nil {
+ return srv.(VolumeServerServer).VacuumVolumeCleanup(ctx, in)
+ }
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: "/volume_server_pb.VolumeServer/VacuumVolumeCleanup",
+ }
+ handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+ return srv.(VolumeServerServer).VacuumVolumeCleanup(ctx, req.(*VacuumVolumeCleanupRequest))
+ }
+ return interceptor(ctx, in, info, handler)
+}
+
+func _VolumeServer_DeleteCollection_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(DeleteCollectionRequest)
+ if err := dec(in); err != nil {
+ return nil, err
+ }
+ if interceptor == nil {
+ return srv.(VolumeServerServer).DeleteCollection(ctx, in)
+ }
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: "/volume_server_pb.VolumeServer/DeleteCollection",
+ }
+ handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+ return srv.(VolumeServerServer).DeleteCollection(ctx, req.(*DeleteCollectionRequest))
+ }
+ return interceptor(ctx, in, info, handler)
+}
+
+func _VolumeServer_AllocateVolume_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(AllocateVolumeRequest)
+ if err := dec(in); err != nil {
+ return nil, err
+ }
+ if interceptor == nil {
+ return srv.(VolumeServerServer).AllocateVolume(ctx, in)
+ }
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: "/volume_server_pb.VolumeServer/AllocateVolume",
+ }
+ handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+ return srv.(VolumeServerServer).AllocateVolume(ctx, req.(*AllocateVolumeRequest))
+ }
+ return interceptor(ctx, in, info, handler)
+}
+
+func _VolumeServer_VolumeSyncStatus_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(VolumeSyncStatusRequest)
+ if err := dec(in); err != nil {
+ return nil, err
+ }
+ if interceptor == nil {
+ return srv.(VolumeServerServer).VolumeSyncStatus(ctx, in)
+ }
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: "/volume_server_pb.VolumeServer/VolumeSyncStatus",
+ }
+ handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+ return srv.(VolumeServerServer).VolumeSyncStatus(ctx, req.(*VolumeSyncStatusRequest))
+ }
+ return interceptor(ctx, in, info, handler)
+}
+
+func _VolumeServer_VolumeIncrementalCopy_Handler(srv interface{}, stream grpc.ServerStream) error {
+ m := new(VolumeIncrementalCopyRequest)
+ if err := stream.RecvMsg(m); err != nil {
+ return err
+ }
+ return srv.(VolumeServerServer).VolumeIncrementalCopy(m, &volumeServerVolumeIncrementalCopyServer{stream})
+}
+
+type VolumeServer_VolumeIncrementalCopyServer interface {
+ Send(*VolumeIncrementalCopyResponse) error
+ grpc.ServerStream
+}
+
+type volumeServerVolumeIncrementalCopyServer struct {
+ grpc.ServerStream
+}
+
+func (x *volumeServerVolumeIncrementalCopyServer) Send(m *VolumeIncrementalCopyResponse) error {
+ return x.ServerStream.SendMsg(m)
+}
+
+func _VolumeServer_VolumeMount_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(VolumeMountRequest)
+ if err := dec(in); err != nil {
+ return nil, err
+ }
+ if interceptor == nil {
+ return srv.(VolumeServerServer).VolumeMount(ctx, in)
+ }
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: "/volume_server_pb.VolumeServer/VolumeMount",
+ }
+ handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+ return srv.(VolumeServerServer).VolumeMount(ctx, req.(*VolumeMountRequest))
+ }
+ return interceptor(ctx, in, info, handler)
+}
+
+func _VolumeServer_VolumeUnmount_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(VolumeUnmountRequest)
+ if err := dec(in); err != nil {
+ return nil, err
+ }
+ if interceptor == nil {
+ return srv.(VolumeServerServer).VolumeUnmount(ctx, in)
+ }
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: "/volume_server_pb.VolumeServer/VolumeUnmount",
+ }
+ handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+ return srv.(VolumeServerServer).VolumeUnmount(ctx, req.(*VolumeUnmountRequest))
+ }
+ return interceptor(ctx, in, info, handler)
}
-func (c *volumeServerClient) VolumeUnmount(ctx context.Context, in *VolumeUnmountRequest, opts ...grpc.CallOption) (*VolumeUnmountResponse, error) {
- out := new(VolumeUnmountResponse)
- err := grpc.Invoke(ctx, "/volume_server_pb.VolumeServer/VolumeUnmount", in, out, c.cc, opts...)
- if err != nil {
+func _VolumeServer_VolumeDelete_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(VolumeDeleteRequest)
+ if err := dec(in); err != nil {
return nil, err
}
- return out, nil
+ if interceptor == nil {
+ return srv.(VolumeServerServer).VolumeDelete(ctx, in)
+ }
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: "/volume_server_pb.VolumeServer/VolumeDelete",
+ }
+ handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+ return srv.(VolumeServerServer).VolumeDelete(ctx, req.(*VolumeDeleteRequest))
+ }
+ return interceptor(ctx, in, info, handler)
}
-func (c *volumeServerClient) VolumeDelete(ctx context.Context, in *VolumeDeleteRequest, opts ...grpc.CallOption) (*VolumeDeleteResponse, error) {
- out := new(VolumeDeleteResponse)
- err := grpc.Invoke(ctx, "/volume_server_pb.VolumeServer/VolumeDelete", in, out, c.cc, opts...)
- if err != nil {
+func _VolumeServer_VolumeMarkReadonly_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(VolumeMarkReadonlyRequest)
+ if err := dec(in); err != nil {
return nil, err
}
- return out, nil
-}
-
-// Server API for VolumeServer service
-
-type VolumeServerServer interface {
- // Experts only: takes multiple fid parameters. This function does not propagate deletes to replicas.
- BatchDelete(context.Context, *BatchDeleteRequest) (*BatchDeleteResponse, error)
- VacuumVolumeCheck(context.Context, *VacuumVolumeCheckRequest) (*VacuumVolumeCheckResponse, error)
- VacuumVolumeCompact(context.Context, *VacuumVolumeCompactRequest) (*VacuumVolumeCompactResponse, error)
- VacuumVolumeCommit(context.Context, *VacuumVolumeCommitRequest) (*VacuumVolumeCommitResponse, error)
- VacuumVolumeCleanup(context.Context, *VacuumVolumeCleanupRequest) (*VacuumVolumeCleanupResponse, error)
- DeleteCollection(context.Context, *DeleteCollectionRequest) (*DeleteCollectionResponse, error)
- AssignVolume(context.Context, *AssignVolumeRequest) (*AssignVolumeResponse, error)
- VolumeSyncStatus(context.Context, *VolumeSyncStatusRequest) (*VolumeSyncStatusResponse, error)
- VolumeSyncIndex(*VolumeSyncIndexRequest, VolumeServer_VolumeSyncIndexServer) error
- VolumeSyncData(*VolumeSyncDataRequest, VolumeServer_VolumeSyncDataServer) error
- VolumeMount(context.Context, *VolumeMountRequest) (*VolumeMountResponse, error)
- VolumeUnmount(context.Context, *VolumeUnmountRequest) (*VolumeUnmountResponse, error)
- VolumeDelete(context.Context, *VolumeDeleteRequest) (*VolumeDeleteResponse, error)
-}
-
-func RegisterVolumeServerServer(s *grpc.Server, srv VolumeServerServer) {
- s.RegisterService(&_VolumeServer_serviceDesc, srv)
+ if interceptor == nil {
+ return srv.(VolumeServerServer).VolumeMarkReadonly(ctx, in)
+ }
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: "/volume_server_pb.VolumeServer/VolumeMarkReadonly",
+ }
+ handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+ return srv.(VolumeServerServer).VolumeMarkReadonly(ctx, req.(*VolumeMarkReadonlyRequest))
+ }
+ return interceptor(ctx, in, info, handler)
}
-func _VolumeServer_BatchDelete_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
- in := new(BatchDeleteRequest)
+func _VolumeServer_VolumeConfigure_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(VolumeConfigureRequest)
if err := dec(in); err != nil {
return nil, err
}
if interceptor == nil {
- return srv.(VolumeServerServer).BatchDelete(ctx, in)
+ return srv.(VolumeServerServer).VolumeConfigure(ctx, in)
}
info := &grpc.UnaryServerInfo{
Server: srv,
- FullMethod: "/volume_server_pb.VolumeServer/BatchDelete",
+ FullMethod: "/volume_server_pb.VolumeServer/VolumeConfigure",
}
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
- return srv.(VolumeServerServer).BatchDelete(ctx, req.(*BatchDeleteRequest))
+ return srv.(VolumeServerServer).VolumeConfigure(ctx, req.(*VolumeConfigureRequest))
}
return interceptor(ctx, in, info, handler)
}
-func _VolumeServer_VacuumVolumeCheck_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
- in := new(VacuumVolumeCheckRequest)
+func _VolumeServer_VolumeCopy_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(VolumeCopyRequest)
if err := dec(in); err != nil {
return nil, err
}
if interceptor == nil {
- return srv.(VolumeServerServer).VacuumVolumeCheck(ctx, in)
+ return srv.(VolumeServerServer).VolumeCopy(ctx, in)
}
info := &grpc.UnaryServerInfo{
Server: srv,
- FullMethod: "/volume_server_pb.VolumeServer/VacuumVolumeCheck",
+ FullMethod: "/volume_server_pb.VolumeServer/VolumeCopy",
}
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
- return srv.(VolumeServerServer).VacuumVolumeCheck(ctx, req.(*VacuumVolumeCheckRequest))
+ return srv.(VolumeServerServer).VolumeCopy(ctx, req.(*VolumeCopyRequest))
}
return interceptor(ctx, in, info, handler)
}
-func _VolumeServer_VacuumVolumeCompact_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
- in := new(VacuumVolumeCompactRequest)
+func _VolumeServer_ReadVolumeFileStatus_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(ReadVolumeFileStatusRequest)
if err := dec(in); err != nil {
return nil, err
}
if interceptor == nil {
- return srv.(VolumeServerServer).VacuumVolumeCompact(ctx, in)
+ return srv.(VolumeServerServer).ReadVolumeFileStatus(ctx, in)
}
info := &grpc.UnaryServerInfo{
Server: srv,
- FullMethod: "/volume_server_pb.VolumeServer/VacuumVolumeCompact",
+ FullMethod: "/volume_server_pb.VolumeServer/ReadVolumeFileStatus",
}
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
- return srv.(VolumeServerServer).VacuumVolumeCompact(ctx, req.(*VacuumVolumeCompactRequest))
+ return srv.(VolumeServerServer).ReadVolumeFileStatus(ctx, req.(*ReadVolumeFileStatusRequest))
}
return interceptor(ctx, in, info, handler)
}
-func _VolumeServer_VacuumVolumeCommit_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
- in := new(VacuumVolumeCommitRequest)
+func _VolumeServer_CopyFile_Handler(srv interface{}, stream grpc.ServerStream) error {
+ m := new(CopyFileRequest)
+ if err := stream.RecvMsg(m); err != nil {
+ return err
+ }
+ return srv.(VolumeServerServer).CopyFile(m, &volumeServerCopyFileServer{stream})
+}
+
+type VolumeServer_CopyFileServer interface {
+ Send(*CopyFileResponse) error
+ grpc.ServerStream
+}
+
+type volumeServerCopyFileServer struct {
+ grpc.ServerStream
+}
+
+func (x *volumeServerCopyFileServer) Send(m *CopyFileResponse) error {
+ return x.ServerStream.SendMsg(m)
+}
+
+func _VolumeServer_VolumeTailSender_Handler(srv interface{}, stream grpc.ServerStream) error {
+ m := new(VolumeTailSenderRequest)
+ if err := stream.RecvMsg(m); err != nil {
+ return err
+ }
+ return srv.(VolumeServerServer).VolumeTailSender(m, &volumeServerVolumeTailSenderServer{stream})
+}
+
+type VolumeServer_VolumeTailSenderServer interface {
+ Send(*VolumeTailSenderResponse) error
+ grpc.ServerStream
+}
+
+type volumeServerVolumeTailSenderServer struct {
+ grpc.ServerStream
+}
+
+func (x *volumeServerVolumeTailSenderServer) Send(m *VolumeTailSenderResponse) error {
+ return x.ServerStream.SendMsg(m)
+}
+
+func _VolumeServer_VolumeTailReceiver_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(VolumeTailReceiverRequest)
if err := dec(in); err != nil {
return nil, err
}
if interceptor == nil {
- return srv.(VolumeServerServer).VacuumVolumeCommit(ctx, in)
+ return srv.(VolumeServerServer).VolumeTailReceiver(ctx, in)
}
info := &grpc.UnaryServerInfo{
Server: srv,
- FullMethod: "/volume_server_pb.VolumeServer/VacuumVolumeCommit",
+ FullMethod: "/volume_server_pb.VolumeServer/VolumeTailReceiver",
}
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
- return srv.(VolumeServerServer).VacuumVolumeCommit(ctx, req.(*VacuumVolumeCommitRequest))
+ return srv.(VolumeServerServer).VolumeTailReceiver(ctx, req.(*VolumeTailReceiverRequest))
}
return interceptor(ctx, in, info, handler)
}
-func _VolumeServer_VacuumVolumeCleanup_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
- in := new(VacuumVolumeCleanupRequest)
+func _VolumeServer_VolumeEcShardsGenerate_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(VolumeEcShardsGenerateRequest)
if err := dec(in); err != nil {
return nil, err
}
if interceptor == nil {
- return srv.(VolumeServerServer).VacuumVolumeCleanup(ctx, in)
+ return srv.(VolumeServerServer).VolumeEcShardsGenerate(ctx, in)
}
info := &grpc.UnaryServerInfo{
Server: srv,
- FullMethod: "/volume_server_pb.VolumeServer/VacuumVolumeCleanup",
+ FullMethod: "/volume_server_pb.VolumeServer/VolumeEcShardsGenerate",
}
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
- return srv.(VolumeServerServer).VacuumVolumeCleanup(ctx, req.(*VacuumVolumeCleanupRequest))
+ return srv.(VolumeServerServer).VolumeEcShardsGenerate(ctx, req.(*VolumeEcShardsGenerateRequest))
}
return interceptor(ctx, in, info, handler)
}
-func _VolumeServer_DeleteCollection_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
- in := new(DeleteCollectionRequest)
+func _VolumeServer_VolumeEcShardsRebuild_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(VolumeEcShardsRebuildRequest)
if err := dec(in); err != nil {
return nil, err
}
if interceptor == nil {
- return srv.(VolumeServerServer).DeleteCollection(ctx, in)
+ return srv.(VolumeServerServer).VolumeEcShardsRebuild(ctx, in)
}
info := &grpc.UnaryServerInfo{
Server: srv,
- FullMethod: "/volume_server_pb.VolumeServer/DeleteCollection",
+ FullMethod: "/volume_server_pb.VolumeServer/VolumeEcShardsRebuild",
}
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
- return srv.(VolumeServerServer).DeleteCollection(ctx, req.(*DeleteCollectionRequest))
+ return srv.(VolumeServerServer).VolumeEcShardsRebuild(ctx, req.(*VolumeEcShardsRebuildRequest))
}
return interceptor(ctx, in, info, handler)
}
-func _VolumeServer_AssignVolume_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
- in := new(AssignVolumeRequest)
+func _VolumeServer_VolumeEcShardsCopy_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(VolumeEcShardsCopyRequest)
if err := dec(in); err != nil {
return nil, err
}
if interceptor == nil {
- return srv.(VolumeServerServer).AssignVolume(ctx, in)
+ return srv.(VolumeServerServer).VolumeEcShardsCopy(ctx, in)
}
info := &grpc.UnaryServerInfo{
Server: srv,
- FullMethod: "/volume_server_pb.VolumeServer/AssignVolume",
+ FullMethod: "/volume_server_pb.VolumeServer/VolumeEcShardsCopy",
}
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
- return srv.(VolumeServerServer).AssignVolume(ctx, req.(*AssignVolumeRequest))
+ return srv.(VolumeServerServer).VolumeEcShardsCopy(ctx, req.(*VolumeEcShardsCopyRequest))
}
return interceptor(ctx, in, info, handler)
}
-func _VolumeServer_VolumeSyncStatus_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
- in := new(VolumeSyncStatusRequest)
+func _VolumeServer_VolumeEcShardsDelete_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(VolumeEcShardsDeleteRequest)
if err := dec(in); err != nil {
return nil, err
}
if interceptor == nil {
- return srv.(VolumeServerServer).VolumeSyncStatus(ctx, in)
+ return srv.(VolumeServerServer).VolumeEcShardsDelete(ctx, in)
}
info := &grpc.UnaryServerInfo{
Server: srv,
- FullMethod: "/volume_server_pb.VolumeServer/VolumeSyncStatus",
+ FullMethod: "/volume_server_pb.VolumeServer/VolumeEcShardsDelete",
}
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
- return srv.(VolumeServerServer).VolumeSyncStatus(ctx, req.(*VolumeSyncStatusRequest))
+ return srv.(VolumeServerServer).VolumeEcShardsDelete(ctx, req.(*VolumeEcShardsDeleteRequest))
}
return interceptor(ctx, in, info, handler)
}
-func _VolumeServer_VolumeSyncIndex_Handler(srv interface{}, stream grpc.ServerStream) error {
- m := new(VolumeSyncIndexRequest)
- if err := stream.RecvMsg(m); err != nil {
- return err
+func _VolumeServer_VolumeEcShardsMount_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(VolumeEcShardsMountRequest)
+ if err := dec(in); err != nil {
+ return nil, err
}
- return srv.(VolumeServerServer).VolumeSyncIndex(m, &volumeServerVolumeSyncIndexServer{stream})
-}
-
-type VolumeServer_VolumeSyncIndexServer interface {
- Send(*VolumeSyncIndexResponse) error
- grpc.ServerStream
-}
-
-type volumeServerVolumeSyncIndexServer struct {
- grpc.ServerStream
+ if interceptor == nil {
+ return srv.(VolumeServerServer).VolumeEcShardsMount(ctx, in)
+ }
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: "/volume_server_pb.VolumeServer/VolumeEcShardsMount",
+ }
+ handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+ return srv.(VolumeServerServer).VolumeEcShardsMount(ctx, req.(*VolumeEcShardsMountRequest))
+ }
+ return interceptor(ctx, in, info, handler)
}
-func (x *volumeServerVolumeSyncIndexServer) Send(m *VolumeSyncIndexResponse) error {
- return x.ServerStream.SendMsg(m)
+func _VolumeServer_VolumeEcShardsUnmount_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(VolumeEcShardsUnmountRequest)
+ if err := dec(in); err != nil {
+ return nil, err
+ }
+ if interceptor == nil {
+ return srv.(VolumeServerServer).VolumeEcShardsUnmount(ctx, in)
+ }
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: "/volume_server_pb.VolumeServer/VolumeEcShardsUnmount",
+ }
+ handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+ return srv.(VolumeServerServer).VolumeEcShardsUnmount(ctx, req.(*VolumeEcShardsUnmountRequest))
+ }
+ return interceptor(ctx, in, info, handler)
}
-func _VolumeServer_VolumeSyncData_Handler(srv interface{}, stream grpc.ServerStream) error {
- m := new(VolumeSyncDataRequest)
+func _VolumeServer_VolumeEcShardRead_Handler(srv interface{}, stream grpc.ServerStream) error {
+ m := new(VolumeEcShardReadRequest)
if err := stream.RecvMsg(m); err != nil {
return err
}
- return srv.(VolumeServerServer).VolumeSyncData(m, &volumeServerVolumeSyncDataServer{stream})
+ return srv.(VolumeServerServer).VolumeEcShardRead(m, &volumeServerVolumeEcShardReadServer{stream})
}
-type VolumeServer_VolumeSyncDataServer interface {
- Send(*VolumeSyncDataResponse) error
+type VolumeServer_VolumeEcShardReadServer interface {
+ Send(*VolumeEcShardReadResponse) error
grpc.ServerStream
}
-type volumeServerVolumeSyncDataServer struct {
+type volumeServerVolumeEcShardReadServer struct {
grpc.ServerStream
}
-func (x *volumeServerVolumeSyncDataServer) Send(m *VolumeSyncDataResponse) error {
+func (x *volumeServerVolumeEcShardReadServer) Send(m *VolumeEcShardReadResponse) error {
return x.ServerStream.SendMsg(m)
}
-func _VolumeServer_VolumeMount_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
- in := new(VolumeMountRequest)
+func _VolumeServer_VolumeEcBlobDelete_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(VolumeEcBlobDeleteRequest)
if err := dec(in); err != nil {
return nil, err
}
if interceptor == nil {
- return srv.(VolumeServerServer).VolumeMount(ctx, in)
+ return srv.(VolumeServerServer).VolumeEcBlobDelete(ctx, in)
}
info := &grpc.UnaryServerInfo{
Server: srv,
- FullMethod: "/volume_server_pb.VolumeServer/VolumeMount",
+ FullMethod: "/volume_server_pb.VolumeServer/VolumeEcBlobDelete",
}
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
- return srv.(VolumeServerServer).VolumeMount(ctx, req.(*VolumeMountRequest))
+ return srv.(VolumeServerServer).VolumeEcBlobDelete(ctx, req.(*VolumeEcBlobDeleteRequest))
}
return interceptor(ctx, in, info, handler)
}
-func _VolumeServer_VolumeUnmount_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
- in := new(VolumeUnmountRequest)
+func _VolumeServer_VolumeEcShardsToVolume_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(VolumeEcShardsToVolumeRequest)
if err := dec(in); err != nil {
return nil, err
}
if interceptor == nil {
- return srv.(VolumeServerServer).VolumeUnmount(ctx, in)
+ return srv.(VolumeServerServer).VolumeEcShardsToVolume(ctx, in)
}
info := &grpc.UnaryServerInfo{
Server: srv,
- FullMethod: "/volume_server_pb.VolumeServer/VolumeUnmount",
+ FullMethod: "/volume_server_pb.VolumeServer/VolumeEcShardsToVolume",
}
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
- return srv.(VolumeServerServer).VolumeUnmount(ctx, req.(*VolumeUnmountRequest))
+ return srv.(VolumeServerServer).VolumeEcShardsToVolume(ctx, req.(*VolumeEcShardsToVolumeRequest))
}
return interceptor(ctx, in, info, handler)
}
-func _VolumeServer_VolumeDelete_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
- in := new(VolumeDeleteRequest)
+func _VolumeServer_VolumeTierMoveDatToRemote_Handler(srv interface{}, stream grpc.ServerStream) error {
+ m := new(VolumeTierMoveDatToRemoteRequest)
+ if err := stream.RecvMsg(m); err != nil {
+ return err
+ }
+ return srv.(VolumeServerServer).VolumeTierMoveDatToRemote(m, &volumeServerVolumeTierMoveDatToRemoteServer{stream})
+}
+
+type VolumeServer_VolumeTierMoveDatToRemoteServer interface {
+ Send(*VolumeTierMoveDatToRemoteResponse) error
+ grpc.ServerStream
+}
+
+type volumeServerVolumeTierMoveDatToRemoteServer struct {
+ grpc.ServerStream
+}
+
+func (x *volumeServerVolumeTierMoveDatToRemoteServer) Send(m *VolumeTierMoveDatToRemoteResponse) error {
+ return x.ServerStream.SendMsg(m)
+}
+
+func _VolumeServer_VolumeTierMoveDatFromRemote_Handler(srv interface{}, stream grpc.ServerStream) error {
+ m := new(VolumeTierMoveDatFromRemoteRequest)
+ if err := stream.RecvMsg(m); err != nil {
+ return err
+ }
+ return srv.(VolumeServerServer).VolumeTierMoveDatFromRemote(m, &volumeServerVolumeTierMoveDatFromRemoteServer{stream})
+}
+
+type VolumeServer_VolumeTierMoveDatFromRemoteServer interface {
+ Send(*VolumeTierMoveDatFromRemoteResponse) error
+ grpc.ServerStream
+}
+
+type volumeServerVolumeTierMoveDatFromRemoteServer struct {
+ grpc.ServerStream
+}
+
+func (x *volumeServerVolumeTierMoveDatFromRemoteServer) Send(m *VolumeTierMoveDatFromRemoteResponse) error {
+ return x.ServerStream.SendMsg(m)
+}
+
+func _VolumeServer_VolumeServerStatus_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(VolumeServerStatusRequest)
if err := dec(in); err != nil {
return nil, err
}
if interceptor == nil {
- return srv.(VolumeServerServer).VolumeDelete(ctx, in)
+ return srv.(VolumeServerServer).VolumeServerStatus(ctx, in)
}
info := &grpc.UnaryServerInfo{
Server: srv,
- FullMethod: "/volume_server_pb.VolumeServer/VolumeDelete",
+ FullMethod: "/volume_server_pb.VolumeServer/VolumeServerStatus",
}
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
- return srv.(VolumeServerServer).VolumeDelete(ctx, req.(*VolumeDeleteRequest))
+ return srv.(VolumeServerServer).VolumeServerStatus(ctx, req.(*VolumeServerStatusRequest))
}
return interceptor(ctx, in, info, handler)
}
+func _VolumeServer_Query_Handler(srv interface{}, stream grpc.ServerStream) error {
+ m := new(QueryRequest)
+ if err := stream.RecvMsg(m); err != nil {
+ return err
+ }
+ return srv.(VolumeServerServer).Query(m, &volumeServerQueryServer{stream})
+}
+
+type VolumeServer_QueryServer interface {
+ Send(*QueriedStripe) error
+ grpc.ServerStream
+}
+
+type volumeServerQueryServer struct {
+ grpc.ServerStream
+}
+
+func (x *volumeServerQueryServer) Send(m *QueriedStripe) error {
+ return x.ServerStream.SendMsg(m)
+}
+
var _VolumeServer_serviceDesc = grpc.ServiceDesc{
ServiceName: "volume_server_pb.VolumeServer",
HandlerType: (*VolumeServerServer)(nil),
@@ -1229,8 +7597,8 @@ var _VolumeServer_serviceDesc = grpc.ServiceDesc{
Handler: _VolumeServer_DeleteCollection_Handler,
},
{
- MethodName: "AssignVolume",
- Handler: _VolumeServer_AssignVolume_Handler,
+ MethodName: "AllocateVolume",
+ Handler: _VolumeServer_AllocateVolume_Handler,
},
{
MethodName: "VolumeSyncStatus",
@@ -1248,90 +7616,99 @@ var _VolumeServer_serviceDesc = grpc.ServiceDesc{
MethodName: "VolumeDelete",
Handler: _VolumeServer_VolumeDelete_Handler,
},
+ {
+ MethodName: "VolumeMarkReadonly",
+ Handler: _VolumeServer_VolumeMarkReadonly_Handler,
+ },
+ {
+ MethodName: "VolumeConfigure",
+ Handler: _VolumeServer_VolumeConfigure_Handler,
+ },
+ {
+ MethodName: "VolumeCopy",
+ Handler: _VolumeServer_VolumeCopy_Handler,
+ },
+ {
+ MethodName: "ReadVolumeFileStatus",
+ Handler: _VolumeServer_ReadVolumeFileStatus_Handler,
+ },
+ {
+ MethodName: "VolumeTailReceiver",
+ Handler: _VolumeServer_VolumeTailReceiver_Handler,
+ },
+ {
+ MethodName: "VolumeEcShardsGenerate",
+ Handler: _VolumeServer_VolumeEcShardsGenerate_Handler,
+ },
+ {
+ MethodName: "VolumeEcShardsRebuild",
+ Handler: _VolumeServer_VolumeEcShardsRebuild_Handler,
+ },
+ {
+ MethodName: "VolumeEcShardsCopy",
+ Handler: _VolumeServer_VolumeEcShardsCopy_Handler,
+ },
+ {
+ MethodName: "VolumeEcShardsDelete",
+ Handler: _VolumeServer_VolumeEcShardsDelete_Handler,
+ },
+ {
+ MethodName: "VolumeEcShardsMount",
+ Handler: _VolumeServer_VolumeEcShardsMount_Handler,
+ },
+ {
+ MethodName: "VolumeEcShardsUnmount",
+ Handler: _VolumeServer_VolumeEcShardsUnmount_Handler,
+ },
+ {
+ MethodName: "VolumeEcBlobDelete",
+ Handler: _VolumeServer_VolumeEcBlobDelete_Handler,
+ },
+ {
+ MethodName: "VolumeEcShardsToVolume",
+ Handler: _VolumeServer_VolumeEcShardsToVolume_Handler,
+ },
+ {
+ MethodName: "VolumeServerStatus",
+ Handler: _VolumeServer_VolumeServerStatus_Handler,
+ },
},
Streams: []grpc.StreamDesc{
{
- StreamName: "VolumeSyncIndex",
- Handler: _VolumeServer_VolumeSyncIndex_Handler,
+ StreamName: "VolumeIncrementalCopy",
+ Handler: _VolumeServer_VolumeIncrementalCopy_Handler,
+ ServerStreams: true,
+ },
+ {
+ StreamName: "CopyFile",
+ Handler: _VolumeServer_CopyFile_Handler,
+ ServerStreams: true,
+ },
+ {
+ StreamName: "VolumeTailSender",
+ Handler: _VolumeServer_VolumeTailSender_Handler,
+ ServerStreams: true,
+ },
+ {
+ StreamName: "VolumeEcShardRead",
+ Handler: _VolumeServer_VolumeEcShardRead_Handler,
ServerStreams: true,
},
{
- StreamName: "VolumeSyncData",
- Handler: _VolumeServer_VolumeSyncData_Handler,
+ StreamName: "VolumeTierMoveDatToRemote",
+ Handler: _VolumeServer_VolumeTierMoveDatToRemote_Handler,
+ ServerStreams: true,
+ },
+ {
+ StreamName: "VolumeTierMoveDatFromRemote",
+ Handler: _VolumeServer_VolumeTierMoveDatFromRemote_Handler,
+ ServerStreams: true,
+ },
+ {
+ StreamName: "Query",
+ Handler: _VolumeServer_Query_Handler,
ServerStreams: true,
},
},
Metadata: "volume_server.proto",
}
-
-func init() { proto.RegisterFile("volume_server.proto", fileDescriptor0) }
-
-var fileDescriptor0 = []byte{
- // 1044 bytes of a gzipped FileDescriptorProto
- 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0xa4, 0x57, 0xdd, 0x72, 0xdb, 0x44,
- 0x14, 0x8e, 0x6a, 0x3b, 0x76, 0x8e, 0x6d, 0x6a, 0xd6, 0x69, 0xa2, 0xaa, 0x10, 0x8c, 0x80, 0xd4,
- 0x69, 0x43, 0x80, 0x74, 0x80, 0x32, 0xdc, 0x00, 0x09, 0x30, 0xb9, 0xe8, 0x94, 0xd9, 0x4c, 0x3b,
- 0xcc, 0xd0, 0x19, 0x8f, 0x22, 0xad, 0x9d, 0x25, 0xb2, 0xe4, 0x6a, 0x57, 0x99, 0x94, 0x37, 0xe1,
- 0x9a, 0x1b, 0x9e, 0x8e, 0x17, 0xe0, 0x86, 0xd9, 0x1f, 0xd9, 0xfa, 0x73, 0x24, 0xe0, 0x6e, 0xf7,
- 0xec, 0x39, 0xdf, 0xf9, 0xd9, 0xa3, 0xf3, 0xad, 0x60, 0x78, 0x1d, 0xfa, 0xf1, 0x9c, 0x4c, 0x18,
- 0x89, 0xae, 0x49, 0x74, 0xb4, 0x88, 0x42, 0x1e, 0xa2, 0x41, 0x46, 0x38, 0x59, 0x5c, 0xd8, 0x9f,
- 0x00, 0xfa, 0xce, 0xe1, 0xee, 0xe5, 0x29, 0xf1, 0x09, 0x27, 0x98, 0xbc, 0x8e, 0x09, 0xe3, 0xe8,
- 0x3e, 0x74, 0xa6, 0xd4, 0x27, 0x13, 0xea, 0x31, 0xd3, 0x18, 0x35, 0xc6, 0x5b, 0xb8, 0x2d, 0xf6,
- 0x67, 0x1e, 0xb3, 0x9f, 0xc3, 0x30, 0x63, 0xc0, 0x16, 0x61, 0xc0, 0x08, 0x7a, 0x0a, 0xed, 0x88,
- 0xb0, 0xd8, 0xe7, 0xca, 0xa0, 0x7b, 0xbc, 0x77, 0x94, 0xf7, 0x75, 0xb4, 0x34, 0x89, 0x7d, 0x8e,
- 0x13, 0x75, 0x9b, 0x42, 0x2f, 0x7d, 0x80, 0x76, 0xa1, 0xad, 0x7d, 0x9b, 0xc6, 0xc8, 0x18, 0x6f,
- 0xe1, 0x4d, 0xe5, 0x1a, 0xed, 0xc0, 0x26, 0xe3, 0x0e, 0x8f, 0x99, 0x79, 0x67, 0x64, 0x8c, 0x5b,
- 0x58, 0xef, 0xd0, 0x36, 0xb4, 0x48, 0x14, 0x85, 0x91, 0xd9, 0x90, 0xea, 0x6a, 0x83, 0x10, 0x34,
- 0x19, 0xfd, 0x8d, 0x98, 0xcd, 0x91, 0x31, 0xee, 0x63, 0xb9, 0xb6, 0xdb, 0xd0, 0xfa, 0x7e, 0xbe,
- 0xe0, 0x6f, 0xec, 0x2f, 0xc1, 0x7c, 0xe9, 0xb8, 0x71, 0x3c, 0x7f, 0x29, 0x63, 0x3c, 0xb9, 0x24,
- 0xee, 0x55, 0x92, 0xfb, 0x03, 0xd8, 0x92, 0x91, 0x7b, 0x49, 0x04, 0x7d, 0xdc, 0x51, 0x82, 0x33,
- 0xcf, 0xfe, 0x06, 0xee, 0x97, 0x18, 0xea, 0x1a, 0x7c, 0x00, 0xfd, 0x99, 0x13, 0x5d, 0x38, 0x33,
- 0x32, 0x89, 0x1c, 0x4e, 0x43, 0x69, 0x6d, 0xe0, 0x9e, 0x16, 0x62, 0x21, 0xb3, 0x7f, 0x01, 0x2b,
- 0x83, 0x10, 0xce, 0x17, 0x8e, 0xcb, 0xeb, 0x38, 0x47, 0x23, 0xe8, 0x2e, 0x22, 0xe2, 0xf8, 0x7e,
- 0xe8, 0x3a, 0x9c, 0xc8, 0x2a, 0x34, 0x70, 0x5a, 0x64, 0xbf, 0x0b, 0x0f, 0x4a, 0xc1, 0x55, 0x80,
- 0xf6, 0xd3, 0x5c, 0xf4, 0xe1, 0x7c, 0x4e, 0x6b, 0xb9, 0xb6, 0xdf, 0x29, 0x44, 0x2d, 0x2d, 0x35,
- 0xee, 0x57, 0xb9, 0x53, 0x9f, 0x38, 0x41, 0xbc, 0xa8, 0x05, 0x9c, 0x8f, 0x38, 0x31, 0x5d, 0x22,
- 0xef, 0xaa, 0xe6, 0x38, 0x09, 0x7d, 0x9f, 0xb8, 0x9c, 0x86, 0x41, 0x02, 0xbb, 0x07, 0xe0, 0x2e,
- 0x85, 0xba, 0x55, 0x52, 0x12, 0xdb, 0x02, 0xb3, 0x68, 0xaa, 0x61, 0xff, 0x34, 0x60, 0xf8, 0x2d,
- 0x63, 0x74, 0x16, 0x28, 0xb7, 0xb5, 0xca, 0x9f, 0x75, 0x78, 0x27, 0xef, 0x30, 0x7f, 0x3d, 0x8d,
- 0xc2, 0xf5, 0x08, 0x8d, 0x88, 0x2c, 0x7c, 0xea, 0x3a, 0x12, 0xa2, 0x29, 0x21, 0xd2, 0x22, 0x34,
- 0x80, 0x06, 0xe7, 0xbe, 0xd9, 0x92, 0x27, 0x62, 0x69, 0xef, 0xc0, 0x76, 0x36, 0x52, 0x9d, 0xc2,
- 0x17, 0xb0, 0xab, 0x24, 0xe7, 0x6f, 0x02, 0xf7, 0x5c, 0x7e, 0x09, 0xb5, 0x0a, 0xfe, 0xb7, 0x01,
- 0x66, 0xd1, 0x50, 0x77, 0xf0, 0xff, 0xcd, 0xff, 0xdf, 0x66, 0x87, 0xde, 0x83, 0x2e, 0x77, 0xa8,
- 0x3f, 0x09, 0xa7, 0x53, 0x46, 0xb8, 0xb9, 0x39, 0x32, 0xc6, 0x4d, 0x0c, 0x42, 0xf4, 0x5c, 0x4a,
- 0xd0, 0x01, 0x0c, 0x5c, 0xd5, 0xc5, 0x93, 0x88, 0x5c, 0x53, 0x26, 0x90, 0xdb, 0x32, 0xb0, 0xbb,
- 0x6e, 0xd2, 0xdd, 0x4a, 0x8c, 0x6c, 0xe8, 0x53, 0xef, 0x66, 0x22, 0x87, 0x87, 0xfc, 0xf4, 0x3b,
- 0x12, 0xad, 0x4b, 0xbd, 0x9b, 0x1f, 0xa8, 0x4f, 0xce, 0xc5, 0x04, 0xf8, 0x1c, 0x76, 0x56, 0xc9,
- 0x9f, 0x05, 0x1e, 0xb9, 0xa9, 0x55, 0xb4, 0x1f, 0xd3, 0xc5, 0xd6, 0x66, 0xba, 0x64, 0x87, 0x80,
- 0xa8, 0x10, 0x28, 0xbf, 0x6e, 0x18, 0x70, 0x12, 0x70, 0x09, 0xd0, 0xc3, 0x03, 0x79, 0x22, 0x9c,
- 0x9f, 0x28, 0xb9, 0xfd, 0xbb, 0x01, 0xf7, 0x56, 0x48, 0xa7, 0x0e, 0x77, 0x6a, 0xb5, 0x9e, 0x05,
- 0x9d, 0x65, 0xf6, 0x77, 0xd4, 0x59, 0xb2, 0x17, 0x63, 0x51, 0x57, 0xaf, 0x21, 0x4f, 0xf4, 0xae,
- 0x6c, 0x00, 0x0a, 0x27, 0x01, 0x21, 0x9e, 0x9a, 0xae, 0xea, 0x1a, 0x3a, 0x4a, 0x70, 0xe6, 0xd9,
- 0x5f, 0xa7, 0x6b, 0xa3, 0x42, 0xd3, 0x39, 0xbe, 0x0f, 0xbd, 0x92, 0xec, 0xba, 0xd3, 0x54, 0x62,
- 0x9f, 0x01, 0x52, 0xc6, 0xcf, 0xc2, 0x38, 0xa8, 0x37, 0x53, 0xee, 0xc1, 0x30, 0x63, 0xa2, 0x1b,
- 0xfb, 0x09, 0x6c, 0x2b, 0xf1, 0x8b, 0x60, 0x5e, 0x1b, 0x6b, 0x37, 0x29, 0xeb, 0xd2, 0x48, 0xa3,
- 0x1d, 0x27, 0x4e, 0xb2, 0x04, 0x77, 0x2b, 0xd8, 0x4e, 0x12, 0x41, 0x96, 0xe3, 0x56, 0x01, 0xbf,
- 0xa0, 0x3f, 0x89, 0x79, 0xae, 0xb0, 0x56, 0xea, 0x89, 0x58, 0xab, 0xff, 0x0c, 0x70, 0x4a, 0xd9,
- 0x95, 0xfa, 0xc4, 0x44, 0xef, 0x7b, 0x34, 0xd2, 0x73, 0x4a, 0x2c, 0x85, 0xc4, 0xf1, 0x7d, 0x79,
- 0x9f, 0x4d, 0x2c, 0x96, 0xe2, 0xca, 0x62, 0x46, 0x3c, 0x79, 0x91, 0x4d, 0x2c, 0xd7, 0x42, 0x36,
- 0x8d, 0x88, 0xba, 0xc6, 0x26, 0x96, 0x6b, 0xfb, 0x0f, 0x03, 0xb6, 0x9e, 0x91, 0xb9, 0x46, 0xde,
- 0x03, 0x98, 0x85, 0x51, 0x18, 0x73, 0x1a, 0x10, 0x26, 0x1d, 0xb4, 0x70, 0x4a, 0xf2, 0xdf, 0xfd,
- 0xc8, 0x16, 0x22, 0xfe, 0x54, 0x76, 0x4a, 0x13, 0xcb, 0xb5, 0x90, 0x5d, 0x12, 0x67, 0xa1, 0x3f,
- 0x55, 0xb9, 0x16, 0x0c, 0xcc, 0xb8, 0xe3, 0x5e, 0xc9, 0x2f, 0xb3, 0x89, 0xd5, 0xe6, 0xf8, 0x2f,
- 0x80, 0x9e, 0x6e, 0x28, 0xf9, 0x04, 0x40, 0xaf, 0xa0, 0x9b, 0x7a, 0x3a, 0xa0, 0x0f, 0x8b, 0x2f,
- 0x84, 0xe2, 0x53, 0xc4, 0xfa, 0xa8, 0x42, 0x4b, 0x17, 0x7b, 0x03, 0x05, 0xf0, 0x76, 0x81, 0x9a,
- 0xd1, 0xa3, 0xa2, 0xf5, 0x3a, 0xe2, 0xb7, 0x1e, 0xd7, 0xd2, 0x5d, 0xfa, 0xe3, 0x30, 0x2c, 0xe1,
- 0x5a, 0x74, 0x58, 0x81, 0x92, 0xe1, 0x7b, 0xeb, 0xe3, 0x9a, 0xda, 0x4b, 0xaf, 0xaf, 0x01, 0x15,
- 0x89, 0x18, 0x3d, 0xae, 0x84, 0x59, 0x11, 0xbd, 0x75, 0x58, 0x4f, 0x79, 0x6d, 0xa2, 0x8a, 0xa2,
- 0x2b, 0x13, 0xcd, 0x3c, 0x02, 0x2a, 0x13, 0xcd, 0xf1, 0xfe, 0x06, 0xba, 0x82, 0x41, 0x9e, 0xbe,
- 0xd1, 0xc1, 0xba, 0x37, 0x65, 0xe1, 0x75, 0x60, 0x3d, 0xaa, 0xa3, 0xba, 0x74, 0x36, 0x81, 0x5e,
- 0x9a, 0x64, 0x51, 0x49, 0xd3, 0x95, 0x3c, 0x17, 0xac, 0xfd, 0x2a, 0xb5, 0x74, 0x36, 0x79, 0xd2,
- 0x2d, 0xcb, 0x66, 0x0d, 0xa3, 0x97, 0x65, 0xb3, 0x8e, 0xc3, 0xed, 0x0d, 0xf4, 0x2b, 0xdc, 0xcd,
- 0xb1, 0x15, 0x1a, 0xdf, 0x06, 0x90, 0xe6, 0x41, 0xeb, 0xa0, 0x86, 0x66, 0xe2, 0xe9, 0x53, 0x03,
- 0xcd, 0xe0, 0xad, 0x2c, 0x69, 0xa0, 0x87, 0xb7, 0x01, 0xa4, 0x18, 0xcf, 0x1a, 0x57, 0x2b, 0xa6,
- 0x1c, 0xbd, 0x82, 0x6e, 0x8a, 0x2d, 0xca, 0x86, 0x47, 0x91, 0x7f, 0xca, 0x86, 0x47, 0x19, 0xe5,
- 0x6c, 0xa0, 0x0b, 0xe8, 0x67, 0xf8, 0x03, 0xed, 0xaf, 0xb3, 0xcc, 0xb2, 0x92, 0xf5, 0xb0, 0x52,
- 0x2f, 0xdd, 0x64, 0x69, 0x5a, 0x41, 0x6b, 0x83, 0xcb, 0x0e, 0xc0, 0xfd, 0x2a, 0xb5, 0xc4, 0xc1,
- 0xc5, 0xa6, 0xfc, 0xc9, 0x7b, 0xf2, 0x4f, 0x00, 0x00, 0x00, 0xff, 0xff, 0xdb, 0x3c, 0x6d, 0xd7,
- 0xfb, 0x0d, 0x00, 0x00,
-}
diff --git a/weed/pb/volume_server_pb/volume_server_helper.go b/weed/pb/volume_server_pb/volume_server_helper.go
new file mode 100644
index 000000000..356be27ff
--- /dev/null
+++ b/weed/pb/volume_server_pb/volume_server_helper.go
@@ -0,0 +1,5 @@
+package volume_server_pb
+
+func (m *RemoteFile) BackendName() string {
+ return m.BackendType + "." + m.BackendId
+}
diff --git a/weed/query/json/query_json.go b/weed/query/json/query_json.go
new file mode 100644
index 000000000..46f3b1b56
--- /dev/null
+++ b/weed/query/json/query_json.go
@@ -0,0 +1,107 @@
+package json
+
+import (
+ "strconv"
+
+ "github.com/chrislusf/seaweedfs/weed/query/sqltypes"
+ "github.com/tidwall/gjson"
+ "github.com/tidwall/match"
+)
+
+type Query struct {
+ Field string
+ Op string
+ Value string
+}
+
+func QueryJson(jsonLine string, projections []string, query Query) (passedFilter bool, values []sqltypes.Value) {
+ if filterJson(jsonLine, query) {
+ passedFilter = true
+ fields := gjson.GetMany(jsonLine, projections...)
+ for _, f := range fields {
+ values = append(values, sqltypes.MakeTrusted(sqltypes.Type(f.Type), sqltypes.StringToBytes(f.Raw)))
+ }
+ return
+ }
+ return false, nil
+}
+
+func filterJson(jsonLine string, query Query) bool {
+
+ value := gjson.Get(jsonLine, query.Field)
+
+ // copied from gjson.go queryMatches() function
+ rpv := query.Value
+
+ if !value.Exists() {
+ return false
+ }
+ if query.Op == "" {
+ // the query is only looking for existence, such as:
+ // friends.#(name)
+ // which makes sure that the array "friends" has an element of
+ // "name" that exists
+ return true
+ }
+ switch value.Type {
+ case gjson.String:
+ switch query.Op {
+ case "=":
+ return value.Str == rpv
+ case "!=":
+ return value.Str != rpv
+ case "<":
+ return value.Str < rpv
+ case "<=":
+ return value.Str <= rpv
+ case ">":
+ return value.Str > rpv
+ case ">=":
+ return value.Str >= rpv
+ case "%":
+ return match.Match(value.Str, rpv)
+ case "!%":
+ return !match.Match(value.Str, rpv)
+ }
+ case gjson.Number:
+ rpvn, _ := strconv.ParseFloat(rpv, 64)
+ switch query.Op {
+ case "=":
+ return value.Num == rpvn
+ case "!=":
+ return value.Num != rpvn
+ case "<":
+ return value.Num < rpvn
+ case "<=":
+ return value.Num <= rpvn
+ case ">":
+ return value.Num > rpvn
+ case ">=":
+ return value.Num >= rpvn
+ }
+ case gjson.True:
+ switch query.Op {
+ case "=":
+ return rpv == "true"
+ case "!=":
+ return rpv != "true"
+ case ">":
+ return rpv == "false"
+ case ">=":
+ return true
+ }
+ case gjson.False:
+ switch query.Op {
+ case "=":
+ return rpv == "false"
+ case "!=":
+ return rpv != "false"
+ case "<":
+ return rpv == "true"
+ case "<=":
+ return true
+ }
+ }
+ return false
+
+}
diff --git a/weed/query/json/query_json_test.go b/weed/query/json/query_json_test.go
new file mode 100644
index 000000000..1794bb333
--- /dev/null
+++ b/weed/query/json/query_json_test.go
@@ -0,0 +1,133 @@
+package json
+
+import (
+ "testing"
+
+ "github.com/tidwall/gjson"
+)
+
+func TestGjson(t *testing.T) {
+ data := `
+ {
+ "quiz": {
+ "sport": {
+ "q1": {
+ "question": "Which one is correct team name in NBA?",
+ "options": [
+ "New York Bulls",
+ "Los Angeles Kings",
+ "Golden State Warriros",
+ "Huston Rocket"
+ ],
+ "answer": "Huston Rocket"
+ }
+ },
+ "maths": {
+ "q1": {
+ "question": "5 + 7 = ?",
+ "options": [
+ "10",
+ "11",
+ "12",
+ "13"
+ ],
+ "answer": "12"
+ },
+ "q2": {
+ "question": "12 - 8 = ?",
+ "options": [
+ "1",
+ "2",
+ "3",
+ "4"
+ ],
+ "answer": "4"
+ }
+ }
+ }
+ }
+
+ {
+ "fruit": "Apple",
+ "size": "Large",
+ "quiz": "Red"
+ }
+
+`
+
+ projections := []string{"quiz", "fruit"}
+
+ gjson.ForEachLine(data, func(line gjson.Result) bool {
+ println(line.Raw)
+ println("+++++++++++")
+ results := gjson.GetMany(line.Raw, projections...)
+ for _, result := range results {
+ println(result.Index, result.Type, result.String())
+ }
+ println("-----------")
+ return true
+ })
+
+}
+
+func TestJsonQueryRow(t *testing.T) {
+
+ data := `
+ {
+ "fruit": "Bl\"ue",
+ "size": 6,
+ "quiz": "green"
+ }
+
+`
+ selections := []string{"fruit", "size"}
+
+ isFiltered, values := QueryJson(data, selections, Query{
+ Field: "quiz",
+ Op: "=",
+ Value: "green",
+ })
+
+ if !isFiltered {
+ t.Errorf("should have been filtered")
+ }
+
+ if values == nil {
+ t.Errorf("values should have been returned")
+ }
+
+ buf := ToJson(nil, selections, values)
+ println(string(buf))
+
+}
+
+func TestJsonQueryNumber(t *testing.T) {
+
+ data := `
+ {
+ "fruit": "Bl\"ue",
+ "size": 6,
+ "quiz": "green"
+ }
+
+`
+ selections := []string{"fruit", "quiz"}
+
+ isFiltered, values := QueryJson(data, selections, Query{
+ Field: "size",
+ Op: ">=",
+ Value: "6",
+ })
+
+ if !isFiltered {
+ t.Errorf("should have been filtered")
+ }
+
+ if values == nil {
+ t.Errorf("values should have been returned")
+ }
+
+ buf := ToJson(nil, selections, values)
+ println(string(buf))
+
+}
diff --git a/weed/query/json/seralize.go b/weed/query/json/seralize.go
new file mode 100644
index 000000000..9bbddc2ff
--- /dev/null
+++ b/weed/query/json/seralize.go
@@ -0,0 +1,17 @@
+package json
+
+import "github.com/chrislusf/seaweedfs/weed/query/sqltypes"
+
+func ToJson(buf []byte, selections []string, values []sqltypes.Value) []byte {
+ buf = append(buf, '{')
+ for i, value := range values {
+ if i > 0 {
+ buf = append(buf, ',')
+ }
+ buf = append(buf, selections[i]...)
+ buf = append(buf, ':')
+ buf = append(buf, value.Raw()...)
+ }
+ buf = append(buf, '}')
+ return buf
+}
diff --git a/weed/query/sqltypes/const.go b/weed/query/sqltypes/const.go
new file mode 100644
index 000000000..f1d0f540e
--- /dev/null
+++ b/weed/query/sqltypes/const.go
@@ -0,0 +1,124 @@
+package sqltypes
+
+// copied from vitness
+
+// Flag allows us to qualify types by their common properties.
+type Flag int32
+
+const (
+ Flag_NONE Flag = 0
+ Flag_ISINTEGRAL Flag = 256
+ Flag_ISUNSIGNED Flag = 512
+ Flag_ISFLOAT Flag = 1024
+ Flag_ISQUOTED Flag = 2048
+ Flag_ISTEXT Flag = 4096
+ Flag_ISBINARY Flag = 8192
+)
+
+var Flag_name = map[int32]string{
+ 0: "NONE",
+ 256: "ISINTEGRAL",
+ 512: "ISUNSIGNED",
+ 1024: "ISFLOAT",
+ 2048: "ISQUOTED",
+ 4096: "ISTEXT",
+ 8192: "ISBINARY",
+}
+var Flag_value = map[string]int32{
+ "NONE": 0,
+ "ISINTEGRAL": 256,
+ "ISUNSIGNED": 512,
+ "ISFLOAT": 1024,
+ "ISQUOTED": 2048,
+ "ISTEXT": 4096,
+ "ISBINARY": 8192,
+}
+
+// Type defines the various supported data types in bind vars
+// and query results.
+type Type int32
+
+const (
+ // NULL_TYPE specifies a NULL type.
+ Type_NULL_TYPE Type = 0
+ // INT8 specifies a TINYINT type.
+ // Properties: 1, IsNumber.
+ Type_INT8 Type = 257
+ // UINT8 specifies a TINYINT UNSIGNED type.
+ // Properties: 2, IsNumber, IsUnsigned.
+ Type_UINT8 Type = 770
+ // INT16 specifies a SMALLINT type.
+ // Properties: 3, IsNumber.
+ Type_INT16 Type = 259
+ // UINT16 specifies a SMALLINT UNSIGNED type.
+ // Properties: 4, IsNumber, IsUnsigned.
+ Type_UINT16 Type = 772
+ // INT24 specifies a MEDIUMINT type.
+ // Properties: 5, IsNumber.
+ Type_INT32 Type = 263
+ // UINT32 specifies a INTEGER UNSIGNED type.
+ // Properties: 8, IsNumber, IsUnsigned.
+ Type_UINT32 Type = 776
+ // INT64 specifies a BIGINT type.
+ // Properties: 9, IsNumber.
+ Type_INT64 Type = 265
+ // UINT64 specifies a BIGINT UNSIGNED type.
+ // Properties: 10, IsNumber, IsUnsigned.
+ Type_UINT64 Type = 778
+ // FLOAT32 specifies a FLOAT type.
+ // Properties: 11, IsFloat.
+ Type_FLOAT32 Type = 1035
+ // FLOAT64 specifies a DOUBLE or REAL type.
+ // Properties: 12, IsFloat.
+ Type_FLOAT64 Type = 1036
+ // TIMESTAMP specifies a TIMESTAMP type.
+ // Properties: 13, IsQuoted.
+ Type_TIMESTAMP Type = 2061
+ // DATE specifies a DATE type.
+ // Properties: 14, IsQuoted.
+ Type_DATE Type = 2062
+ // TIME specifies a TIME type.
+ // Properties: 15, IsQuoted.
+ Type_TIME Type = 2063
+ // DATETIME specifies a DATETIME type.
+ // Properties: 16, IsQuoted.
+ Type_DATETIME Type = 2064
+ // YEAR specifies a YEAR type.
+ // Properties: 17, IsNumber, IsUnsigned.
+ Type_YEAR Type = 785
+ // DECIMAL specifies a DECIMAL or NUMERIC type.
+ // Properties: 18, None.
+ Type_DECIMAL Type = 18
+ // TEXT specifies a TEXT type.
+ // Properties: 19, IsQuoted, IsText.
+ Type_TEXT Type = 6163
+ // BLOB specifies a BLOB type.
+ // Properties: 20, IsQuoted, IsBinary.
+ Type_BLOB Type = 10260
+ // VARCHAR specifies a VARCHAR type.
+ // Properties: 21, IsQuoted, IsText.
+ Type_VARCHAR Type = 6165
+ // VARBINARY specifies a VARBINARY type.
+ // Properties: 22, IsQuoted, IsBinary.
+ Type_VARBINARY Type = 10262
+ // CHAR specifies a CHAR type.
+ // Properties: 23, IsQuoted, IsText.
+ Type_CHAR Type = 6167
+ // BINARY specifies a BINARY type.
+ // Properties: 24, IsQuoted, IsBinary.
+ Type_BINARY Type = 10264
+ // BIT specifies a BIT type.
+ // Properties: 25, IsQuoted.
+ Type_BIT Type = 2073
+ // JSON specifies a JSON type.
+ // Properties: 30, IsQuoted.
+ Type_JSON Type = 2078
+)
+
+// BindVariable represents a single bind variable in a Query.
+type BindVariable struct {
+ Type Type `protobuf:"varint,1,opt,name=type,enum=query.Type" json:"type,omitempty"`
+ Value []byte `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"`
+ // values are set if type is TUPLE.
+ Values []*Value `protobuf:"bytes,3,rep,name=values" json:"values,omitempty"`
+}
diff --git a/weed/query/sqltypes/type.go b/weed/query/sqltypes/type.go
new file mode 100644
index 000000000..f4f3dd471
--- /dev/null
+++ b/weed/query/sqltypes/type.go
@@ -0,0 +1,101 @@
+package sqltypes
+
+// These bit flags can be used to query on the
+// common properties of types.
+const (
+ flagIsIntegral = int(Flag_ISINTEGRAL)
+ flagIsUnsigned = int(Flag_ISUNSIGNED)
+ flagIsFloat = int(Flag_ISFLOAT)
+ flagIsQuoted = int(Flag_ISQUOTED)
+ flagIsText = int(Flag_ISTEXT)
+ flagIsBinary = int(Flag_ISBINARY)
+)
+
+// IsIntegral returns true if Type is an integral
+// (signed/unsigned) that can be represented using
+// up to 64 binary bits.
+// If you have a Value object, use its member function.
+func IsIntegral(t Type) bool {
+ return int(t)&flagIsIntegral == flagIsIntegral
+}
+
+// IsSigned returns true if Type is a signed integral.
+// If you have a Value object, use its member function.
+func IsSigned(t Type) bool {
+ return int(t)&(flagIsIntegral|flagIsUnsigned) == flagIsIntegral
+}
+
+// IsUnsigned returns true if Type is an unsigned integral.
+// Caution: this is not the same as !IsSigned.
+// If you have a Value object, use its member function.
+func IsUnsigned(t Type) bool {
+ return int(t)&(flagIsIntegral|flagIsUnsigned) == flagIsIntegral|flagIsUnsigned
+}
+
+// IsFloat returns true is Type is a floating point.
+// If you have a Value object, use its member function.
+func IsFloat(t Type) bool {
+ return int(t)&flagIsFloat == flagIsFloat
+}
+
+// IsQuoted returns true if Type is a quoted text or binary.
+// If you have a Value object, use its member function.
+func IsQuoted(t Type) bool {
+ return (int(t)&flagIsQuoted == flagIsQuoted) && t != Bit
+}
+
+// IsText returns true if Type is a text.
+// If you have a Value object, use its member function.
+func IsText(t Type) bool {
+ return int(t)&flagIsText == flagIsText
+}
+
+// IsBinary returns true if Type is a binary.
+// If you have a Value object, use its member function.
+func IsBinary(t Type) bool {
+ return int(t)&flagIsBinary == flagIsBinary
+}
+
+// isNumber returns true if the type is any type of number.
+func isNumber(t Type) bool {
+ return IsIntegral(t) || IsFloat(t) || t == Decimal
+}
+
+// IsTemporal returns true if Value is time type.
+func IsTemporal(t Type) bool {
+ switch t {
+ case Timestamp, Date, Time, Datetime:
+ return true
+ }
+ return false
+}
+
+// Vitess data types. These are idiomatically
+// named synonyms for the Type values.
+const (
+ Null = Type_NULL_TYPE
+ Int8 = Type_INT8
+ Uint8 = Type_UINT8
+ Int16 = Type_INT16
+ Uint16 = Type_UINT16
+ Int32 = Type_INT32
+ Uint32 = Type_UINT32
+ Int64 = Type_INT64
+ Uint64 = Type_UINT64
+ Float32 = Type_FLOAT32
+ Float64 = Type_FLOAT64
+ Timestamp = Type_TIMESTAMP
+ Date = Type_DATE
+ Time = Type_TIME
+ Datetime = Type_DATETIME
+ Year = Type_YEAR
+ Decimal = Type_DECIMAL
+ Text = Type_TEXT
+ Blob = Type_BLOB
+ VarChar = Type_VARCHAR
+ VarBinary = Type_VARBINARY
+ Char = Type_CHAR
+ Binary = Type_BINARY
+ Bit = Type_BIT
+ TypeJSON = Type_JSON
+)
diff --git a/weed/query/sqltypes/unsafe.go b/weed/query/sqltypes/unsafe.go
new file mode 100644
index 000000000..e322c92ce
--- /dev/null
+++ b/weed/query/sqltypes/unsafe.go
@@ -0,0 +1,30 @@
+package sqltypes
+
+import (
+ "reflect"
+ "unsafe"
+)
+
+// BytesToString casts slice to string without copy
+func BytesToString(b []byte) (s string) {
+ if len(b) == 0 {
+ return ""
+ }
+
+ bh := (*reflect.SliceHeader)(unsafe.Pointer(&b))
+ sh := reflect.StringHeader{Data: bh.Data, Len: bh.Len}
+
+ return *(*string)(unsafe.Pointer(&sh))
+}
+
+// StringToBytes casts string to slice without copy
+func StringToBytes(s string) []byte {
+ if len(s) == 0 {
+ return []byte{}
+ }
+
+ sh := (*reflect.StringHeader)(unsafe.Pointer(&s))
+ bh := reflect.SliceHeader{Data: sh.Data, Len: sh.Len, Cap: sh.Len}
+
+ return *(*[]byte)(unsafe.Pointer(&bh))
+}
diff --git a/weed/query/sqltypes/value.go b/weed/query/sqltypes/value.go
new file mode 100644
index 000000000..012de2b45
--- /dev/null
+++ b/weed/query/sqltypes/value.go
@@ -0,0 +1,355 @@
+package sqltypes
+
+import (
+ "fmt"
+ "strconv"
+ "time"
+)
+
+var (
+ // NULL represents the NULL value.
+ NULL = Value{}
+ // DontEscape tells you if a character should not be escaped.
+ DontEscape = byte(255)
+ nullstr = []byte("null")
+)
+
+type Value struct {
+ typ Type
+ val []byte
+}
+
+// NewValue builds a Value using typ and val. If the value and typ
+// don't match, it returns an error.
+func NewValue(typ Type, val []byte) (v Value, err error) {
+ switch {
+ case IsSigned(typ):
+ if _, err := strconv.ParseInt(string(val), 0, 64); err != nil {
+ return NULL, err
+ }
+ return MakeTrusted(typ, val), nil
+ case IsUnsigned(typ):
+ if _, err := strconv.ParseUint(string(val), 0, 64); err != nil {
+ return NULL, err
+ }
+ return MakeTrusted(typ, val), nil
+ case IsFloat(typ) || typ == Decimal:
+ if _, err := strconv.ParseFloat(string(val), 64); err != nil {
+ return NULL, err
+ }
+ return MakeTrusted(typ, val), nil
+ case IsQuoted(typ) || typ == Bit || typ == Null:
+ return MakeTrusted(typ, val), nil
+ }
+ // All other types are unsafe or invalid.
+ return NULL, fmt.Errorf("invalid type specified for MakeValue: %v", typ)
+}
+
+// MakeTrusted makes a new Value based on the type.
+// This function should only be used if you know the value
+// and type conform to the rules. Every place this function is
+// called, a comment is needed that explains why it's justified.
+// Exceptions: The current package and mysql package do not need
+// comments. Other packages can also use the function to create
+// VarBinary or VarChar values.
+func MakeTrusted(typ Type, val []byte) Value {
+
+ if typ == Null {
+ return NULL
+ }
+
+ return Value{typ: typ, val: val}
+}
+
+// NewInt64 builds an Int64 Value.
+func NewInt64(v int64) Value {
+ return MakeTrusted(Int64, strconv.AppendInt(nil, v, 10))
+}
+
+// NewInt32 builds an Int64 Value.
+func NewInt32(v int32) Value {
+ return MakeTrusted(Int32, strconv.AppendInt(nil, int64(v), 10))
+}
+
+// NewUint64 builds an Uint64 Value.
+func NewUint64(v uint64) Value {
+ return MakeTrusted(Uint64, strconv.AppendUint(nil, v, 10))
+}
+
+// NewFloat32 builds an Float64 Value.
+func NewFloat32(v float32) Value {
+ return MakeTrusted(Float32, strconv.AppendFloat(nil, float64(v), 'f', -1, 64))
+}
+
+// NewFloat64 builds an Float64 Value.
+func NewFloat64(v float64) Value {
+ return MakeTrusted(Float64, strconv.AppendFloat(nil, v, 'g', -1, 64))
+}
+
+// NewVarChar builds a VarChar Value.
+func NewVarChar(v string) Value {
+ return MakeTrusted(VarChar, []byte(v))
+}
+
+// NewVarBinary builds a VarBinary Value.
+// The input is a string because it's the most common use case.
+func NewVarBinary(v string) Value {
+ return MakeTrusted(VarBinary, []byte(v))
+}
+
+// NewIntegral builds an integral type from a string representation.
+// The type will be Int64 or Uint64. Int64 will be preferred where possible.
+func NewIntegral(val string) (n Value, err error) {
+ signed, err := strconv.ParseInt(val, 0, 64)
+ if err == nil {
+ return MakeTrusted(Int64, strconv.AppendInt(nil, signed, 10)), nil
+ }
+ unsigned, err := strconv.ParseUint(val, 0, 64)
+ if err != nil {
+ return Value{}, err
+ }
+ return MakeTrusted(Uint64, strconv.AppendUint(nil, unsigned, 10)), nil
+}
+
+// MakeString makes a VarBinary Value.
+func MakeString(val []byte) Value {
+ return MakeTrusted(VarBinary, val)
+}
+
+// BuildValue builds a value from any go type. sqltype.Value is
+// also allowed.
+func BuildValue(goval interface{}) (v Value, err error) {
+ // Look for the most common types first.
+ switch goval := goval.(type) {
+ case nil:
+ // no op
+ case []byte:
+ v = MakeTrusted(VarBinary, goval)
+ case int64:
+ v = MakeTrusted(Int64, strconv.AppendInt(nil, int64(goval), 10))
+ case uint64:
+ v = MakeTrusted(Uint64, strconv.AppendUint(nil, uint64(goval), 10))
+ case float64:
+ v = MakeTrusted(Float64, strconv.AppendFloat(nil, goval, 'f', -1, 64))
+ case int:
+ v = MakeTrusted(Int64, strconv.AppendInt(nil, int64(goval), 10))
+ case int8:
+ v = MakeTrusted(Int8, strconv.AppendInt(nil, int64(goval), 10))
+ case int16:
+ v = MakeTrusted(Int16, strconv.AppendInt(nil, int64(goval), 10))
+ case int32:
+ v = MakeTrusted(Int32, strconv.AppendInt(nil, int64(goval), 10))
+ case uint:
+ v = MakeTrusted(Uint64, strconv.AppendUint(nil, uint64(goval), 10))
+ case uint8:
+ v = MakeTrusted(Uint8, strconv.AppendUint(nil, uint64(goval), 10))
+ case uint16:
+ v = MakeTrusted(Uint16, strconv.AppendUint(nil, uint64(goval), 10))
+ case uint32:
+ v = MakeTrusted(Uint32, strconv.AppendUint(nil, uint64(goval), 10))
+ case float32:
+ v = MakeTrusted(Float32, strconv.AppendFloat(nil, float64(goval), 'f', -1, 64))
+ case string:
+ v = MakeTrusted(VarBinary, []byte(goval))
+ case time.Time:
+ v = MakeTrusted(Datetime, []byte(goval.Format("2006-01-02 15:04:05")))
+ case Value:
+ v = goval
+ case *BindVariable:
+ return ValueFromBytes(goval.Type, goval.Value)
+ default:
+ return v, fmt.Errorf("unexpected type %T: %v", goval, goval)
+ }
+ return v, nil
+}
+
+// BuildConverted is like BuildValue except that it tries to
+// convert a string or []byte to an integral if the target type
+// is an integral. We don't perform other implicit conversions
+// because they're unsafe.
+func BuildConverted(typ Type, goval interface{}) (v Value, err error) {
+ if IsIntegral(typ) {
+ switch goval := goval.(type) {
+ case []byte:
+ return ValueFromBytes(typ, goval)
+ case string:
+ return ValueFromBytes(typ, []byte(goval))
+ case Value:
+ if goval.IsQuoted() {
+ return ValueFromBytes(typ, goval.Raw())
+ }
+ }
+ }
+ return BuildValue(goval)
+}
+
+// ValueFromBytes builds a Value using typ and val. It ensures that val
+// matches the requested type. If type is an integral it's converted to
+// a canonical form. Otherwise, the original representation is preserved.
+func ValueFromBytes(typ Type, val []byte) (v Value, err error) {
+ switch {
+ case IsSigned(typ):
+ signed, err := strconv.ParseInt(string(val), 0, 64)
+ if err != nil {
+ return NULL, err
+ }
+ v = MakeTrusted(typ, strconv.AppendInt(nil, signed, 10))
+ case IsUnsigned(typ):
+ unsigned, err := strconv.ParseUint(string(val), 0, 64)
+ if err != nil {
+ return NULL, err
+ }
+ v = MakeTrusted(typ, strconv.AppendUint(nil, unsigned, 10))
+ case IsFloat(typ) || typ == Decimal:
+ _, err := strconv.ParseFloat(string(val), 64)
+ if err != nil {
+ return NULL, err
+ }
+ // After verification, we preserve the original representation.
+ fallthrough
+ default:
+ v = MakeTrusted(typ, val)
+ }
+ return v, nil
+}
+
+// BuildIntegral builds an integral type from a string representation.
+// The type will be Int64 or Uint64. Int64 will be preferred where possible.
+func BuildIntegral(val string) (n Value, err error) {
+ signed, err := strconv.ParseInt(val, 0, 64)
+ if err == nil {
+ return MakeTrusted(Int64, strconv.AppendInt(nil, signed, 10)), nil
+ }
+ unsigned, err := strconv.ParseUint(val, 0, 64)
+ if err != nil {
+ return Value{}, err
+ }
+ return MakeTrusted(Uint64, strconv.AppendUint(nil, unsigned, 10)), nil
+}
+
+// Type returns the type of Value.
+func (v Value) Type() Type {
+ return v.typ
+}
+
+// Raw returns the raw bytes. All types are currently implemented as []byte.
+// You should avoid using this function. If you do, you should treat the
+// bytes as read-only.
+func (v Value) Raw() []byte {
+ return v.val
+}
+
+// Len returns the length.
+func (v Value) Len() int {
+ return len(v.val)
+}
+
+// Values represents the array of Value.
+type Values []Value
+
+// Len implements the interface.
+func (vs Values) Len() int {
+ len := 0
+ for _, v := range vs {
+ len += v.Len()
+ }
+ return len
+}
+
+// String returns the raw value as a string.
+func (v Value) String() string {
+ return BytesToString(v.val)
+}
+
+// ToNative converts Value to a native go type.
+// This does not work for sqltypes.Tuple. The function
+// panics if there are inconsistencies.
+func (v Value) ToNative() interface{} {
+ var out interface{}
+ var err error
+ switch {
+ case v.typ == Null:
+ // no-op
+ case IsSigned(v.typ):
+ out, err = v.ParseInt64()
+ case IsUnsigned(v.typ):
+ out, err = v.ParseUint64()
+ case IsFloat(v.typ):
+ out, err = v.ParseFloat64()
+ default:
+ out = v.val
+ }
+ if err != nil {
+ panic(err)
+ }
+ return out
+}
+
+// ParseInt64 will parse a Value into an int64. It does
+// not check the type.
+func (v Value) ParseInt64() (val int64, err error) {
+ return strconv.ParseInt(v.String(), 10, 64)
+}
+
+// ParseUint64 will parse a Value into a uint64. It does
+// not check the type.
+func (v Value) ParseUint64() (val uint64, err error) {
+ return strconv.ParseUint(v.String(), 10, 64)
+}
+
+// ParseFloat64 will parse a Value into an float64. It does
+// not check the type.
+func (v Value) ParseFloat64() (val float64, err error) {
+ return strconv.ParseFloat(v.String(), 64)
+}
+
+// IsNull returns true if Value is null.
+func (v Value) IsNull() bool {
+ return v.typ == Null
+}
+
+// IsIntegral returns true if Value is an integral.
+func (v Value) IsIntegral() bool {
+ return IsIntegral(v.typ)
+}
+
+// IsSigned returns true if Value is a signed integral.
+func (v Value) IsSigned() bool {
+ return IsSigned(v.typ)
+}
+
+// IsUnsigned returns true if Value is an unsigned integral.
+func (v Value) IsUnsigned() bool {
+ return IsUnsigned(v.typ)
+}
+
+// IsFloat returns true if Value is a float.
+func (v Value) IsFloat() bool {
+ return IsFloat(v.typ)
+}
+
+// IsQuoted returns true if Value must be SQL-quoted.
+func (v Value) IsQuoted() bool {
+ return IsQuoted(v.typ)
+}
+
+// IsText returns true if Value is a collatable text.
+func (v Value) IsText() bool {
+ return IsText(v.typ)
+}
+
+// IsBinary returns true if Value is binary.
+func (v Value) IsBinary() bool {
+ return IsBinary(v.typ)
+}
+
+// IsTemporal returns true if Value is time type.
+func (v Value) IsTemporal() bool {
+ return IsTemporal(v.typ)
+}
+
+// ToString returns the value as MySQL would return it as string.
+// If the value is not convertible like in the case of Expression, it returns nil.
+func (v Value) ToString() string {
+ return BytesToString(v.val)
+}
diff --git a/weed/replication/replicator.go b/weed/replication/replicator.go
index ac8235fd5..051199adb 100644
--- a/weed/replication/replicator.go
+++ b/weed/replication/replicator.go
@@ -1,7 +1,8 @@
package replication
import (
- "path/filepath"
+ "context"
+ "fmt"
"strings"
"github.com/chrislusf/seaweedfs/weed/glog"
@@ -16,10 +17,10 @@ type Replicator struct {
source *source.FilerSource
}
-func NewReplicator(sourceConfig util.Configuration, dataSink sink.ReplicationSink) *Replicator {
+func NewReplicator(sourceConfig util.Configuration, configPrefix string, dataSink sink.ReplicationSink) *Replicator {
source := &source.FilerSource{}
- source.Initialize(sourceConfig)
+ source.Initialize(sourceConfig, configPrefix)
dataSink.SetSourceFiler(source)
@@ -29,12 +30,15 @@ func NewReplicator(sourceConfig util.Configuration, dataSink sink.ReplicationSin
}
}
-func (r *Replicator) Replicate(key string, message *filer_pb.EventNotification) error {
+func (r *Replicator) Replicate(ctx context.Context, key string, message *filer_pb.EventNotification) error {
+ if message.IsFromOtherCluster && r.sink.GetName() == "filer" {
+ return nil
+ }
if !strings.HasPrefix(key, r.source.Dir) {
glog.V(4).Infof("skipping %v outside of %v", key, r.source.Dir)
return nil
}
- newKey := filepath.Join(r.sink.GetSinkToDirectory(), key[len(r.source.Dir):])
+ newKey := util.Join(r.sink.GetSinkToDirectory(), key[len(r.source.Dir):])
glog.V(3).Infof("replicate %s => %s", key, newKey)
key = newKey
if message.OldEntry != nil && message.NewEntry == nil {
@@ -50,12 +54,17 @@ func (r *Replicator) Replicate(key string, message *filer_pb.EventNotification)
return nil
}
- foundExisting, err := r.sink.UpdateEntry(key, message.OldEntry, message.NewEntry, message.DeleteChunks)
+ foundExisting, err := r.sink.UpdateEntry(key, message.OldEntry, message.NewParentPath, message.NewEntry, message.DeleteChunks)
if foundExisting {
glog.V(4).Infof("updated %v", key)
return err
}
+ err = r.sink.DeleteEntry(key, message.OldEntry.IsDirectory, false)
+ if err != nil {
+ return fmt.Errorf("delete old entry %v: %v", key, err)
+ }
+
glog.V(4).Infof("creating missing %v", key)
return r.sink.CreateEntry(key, message.NewEntry)
}
diff --git a/weed/replication/sink/azuresink/azure_sink.go b/weed/replication/sink/azuresink/azure_sink.go
index 7acf37fa5..aef97c06e 100644
--- a/weed/replication/sink/azuresink/azure_sink.go
+++ b/weed/replication/sink/azuresink/azure_sink.go
@@ -35,12 +35,12 @@ func (g *AzureSink) GetSinkToDirectory() string {
return g.dir
}
-func (g *AzureSink) Initialize(configuration util.Configuration) error {
+func (g *AzureSink) Initialize(configuration util.Configuration, prefix string) error {
return g.initialize(
- configuration.GetString("account_name"),
- configuration.GetString("account_key"),
- configuration.GetString("container"),
- configuration.GetString("directory"),
+ configuration.GetString(prefix+"account_name"),
+ configuration.GetString(prefix+"account_key"),
+ configuration.GetString(prefix+"container"),
+ configuration.GetString(prefix+"directory"),
)
}
@@ -78,9 +78,7 @@ func (g *AzureSink) DeleteEntry(key string, isDirectory, deleteIncludeChunks boo
key = key + "/"
}
- ctx := context.Background()
-
- if _, err := g.containerURL.NewBlobURL(key).Delete(ctx,
+ if _, err := g.containerURL.NewBlobURL(key).Delete(context.Background(),
azblob.DeleteSnapshotsOptionInclude, azblob.BlobAccessConditions{}); err != nil {
return fmt.Errorf("azure delete %s/%s: %v", g.container, key, err)
}
@@ -98,15 +96,13 @@ func (g *AzureSink) CreateEntry(key string, entry *filer_pb.Entry) error {
}
totalSize := filer2.TotalSize(entry.Chunks)
- chunkViews := filer2.ViewFromChunks(entry.Chunks, 0, int(totalSize))
-
- ctx := context.Background()
+ chunkViews := filer2.ViewFromChunks(entry.Chunks, 0, int64(totalSize))
// Create a URL that references a to-be-created blob in your
// Azure Storage account's container.
appendBlobURL := g.containerURL.NewAppendBlobURL(key)
- _, err := appendBlobURL.Create(ctx, azblob.BlobHTTPHeaders{}, azblob.Metadata{}, azblob.BlobAccessConditions{})
+ _, err := appendBlobURL.Create(context.Background(), azblob.BlobHTTPHeaders{}, azblob.Metadata{}, azblob.BlobAccessConditions{})
if err != nil {
return err
}
@@ -119,8 +115,8 @@ func (g *AzureSink) CreateEntry(key string, entry *filer_pb.Entry) error {
}
var writeErr error
- _, readErr := util.ReadUrlAsStream(fileUrl, chunk.Offset, int(chunk.Size), func(data []byte) {
- _, writeErr = appendBlobURL.AppendBlock(ctx, bytes.NewReader(data), azblob.AppendBlobAccessConditions{}, nil)
+ readErr := util.ReadUrlAsStream(fileUrl, nil, false, chunk.IsFullChunk(), chunk.Offset, int(chunk.Size), func(data []byte) {
+ _, writeErr = appendBlobURL.AppendBlock(context.Background(), bytes.NewReader(data), azblob.AppendBlobAccessConditions{}, nil)
})
if readErr != nil {
@@ -136,7 +132,7 @@ func (g *AzureSink) CreateEntry(key string, entry *filer_pb.Entry) error {
}
-func (g *AzureSink) UpdateEntry(key string, oldEntry, newEntry *filer_pb.Entry, deleteIncludeChunks bool) (foundExistingEntry bool, err error) {
+func (g *AzureSink) UpdateEntry(key string, oldEntry *filer_pb.Entry, newParentPath string, newEntry *filer_pb.Entry, deleteIncludeChunks bool) (foundExistingEntry bool, err error) {
key = cleanKey(key)
// TODO improve efficiency
return false, nil
diff --git a/weed/replication/sink/b2sink/b2_sink.go b/weed/replication/sink/b2sink/b2_sink.go
index 17f5e39b2..1e7d82ed4 100644
--- a/weed/replication/sink/b2sink/b2_sink.go
+++ b/weed/replication/sink/b2sink/b2_sink.go
@@ -31,12 +31,12 @@ func (g *B2Sink) GetSinkToDirectory() string {
return g.dir
}
-func (g *B2Sink) Initialize(configuration util.Configuration) error {
+func (g *B2Sink) Initialize(configuration util.Configuration, prefix string) error {
return g.initialize(
- configuration.GetString("b2_account_id"),
- configuration.GetString("b2_master_application_key"),
- configuration.GetString("bucket"),
- configuration.GetString("directory"),
+ configuration.GetString(prefix+"b2_account_id"),
+ configuration.GetString(prefix+"b2_master_application_key"),
+ configuration.GetString(prefix+"bucket"),
+ configuration.GetString(prefix+"directory"),
)
}
@@ -45,8 +45,7 @@ func (g *B2Sink) SetSourceFiler(s *source.FilerSource) {
}
func (g *B2Sink) initialize(accountId, accountKey, bucket, dir string) error {
- ctx := context.Background()
- client, err := b2.NewClient(ctx, accountId, accountKey)
+ client, err := b2.NewClient(context.Background(), accountId, accountKey)
if err != nil {
return err
}
@@ -66,16 +65,14 @@ func (g *B2Sink) DeleteEntry(key string, isDirectory, deleteIncludeChunks bool)
key = key + "/"
}
- ctx := context.Background()
-
- bucket, err := g.client.Bucket(ctx, g.bucket)
+ bucket, err := g.client.Bucket(context.Background(), g.bucket)
if err != nil {
return err
}
targetObject := bucket.Object(key)
- return targetObject.Delete(ctx)
+ return targetObject.Delete(context.Background())
}
@@ -88,17 +85,15 @@ func (g *B2Sink) CreateEntry(key string, entry *filer_pb.Entry) error {
}
totalSize := filer2.TotalSize(entry.Chunks)
- chunkViews := filer2.ViewFromChunks(entry.Chunks, 0, int(totalSize))
-
- ctx := context.Background()
+ chunkViews := filer2.ViewFromChunks(entry.Chunks, 0, int64(totalSize))
- bucket, err := g.client.Bucket(ctx, g.bucket)
+ bucket, err := g.client.Bucket(context.Background(), g.bucket)
if err != nil {
return err
}
targetObject := bucket.Object(key)
- writer := targetObject.NewWriter(ctx)
+ writer := targetObject.NewWriter(context.Background())
for _, chunk := range chunkViews {
@@ -108,7 +103,7 @@ func (g *B2Sink) CreateEntry(key string, entry *filer_pb.Entry) error {
}
var writeErr error
- _, readErr := util.ReadUrlAsStream(fileUrl, chunk.Offset, int(chunk.Size), func(data []byte) {
+ readErr := util.ReadUrlAsStream(fileUrl, nil, false, chunk.IsFullChunk(), chunk.Offset, int(chunk.Size), func(data []byte) {
_, err := writer.Write(data)
if err != nil {
writeErr = err
@@ -128,7 +123,7 @@ func (g *B2Sink) CreateEntry(key string, entry *filer_pb.Entry) error {
}
-func (g *B2Sink) UpdateEntry(key string, oldEntry, newEntry *filer_pb.Entry, deleteIncludeChunks bool) (foundExistingEntry bool, err error) {
+func (g *B2Sink) UpdateEntry(key string, oldEntry *filer_pb.Entry, newParentPath string, newEntry *filer_pb.Entry, deleteIncludeChunks bool) (foundExistingEntry bool, err error) {
key = cleanKey(key)
diff --git a/weed/replication/sink/filersink/README.txt b/weed/replication/sink/filersink/README.txt
new file mode 100644
index 000000000..4ba0fc752
--- /dev/null
+++ b/weed/replication/sink/filersink/README.txt
@@ -0,0 +1,12 @@
+How replication works
+======
+
+All metadata changes within current cluster would be notified to a message queue.
+
+If the meta data change is from other clusters, this metadata would change would not be notified to the message queue.
+
+So active<=>active replication is possible.
+
+
+All metadata changes would be published as metadata changes.
+So all mounts listening for metadata changes will get updated.
\ No newline at end of file
diff --git a/weed/replication/sink/filersink/fetch_write.go b/weed/replication/sink/filersink/fetch_write.go
index c14566723..bde29176c 100644
--- a/weed/replication/sink/filersink/fetch_write.go
+++ b/weed/replication/sink/filersink/fetch_write.go
@@ -3,41 +3,46 @@ package filersink
import (
"context"
"fmt"
- "strings"
"sync"
+ "google.golang.org/grpc"
+
"github.com/chrislusf/seaweedfs/weed/glog"
"github.com/chrislusf/seaweedfs/weed/operation"
+ "github.com/chrislusf/seaweedfs/weed/pb"
"github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
- "github.com/chrislusf/seaweedfs/weed/util"
+ "github.com/chrislusf/seaweedfs/weed/security"
)
-func (fs *FilerSink) replicateChunks(sourceChunks []*filer_pb.FileChunk) (replicatedChunks []*filer_pb.FileChunk, err error) {
+func (fs *FilerSink) replicateChunks(sourceChunks []*filer_pb.FileChunk, dir string) (replicatedChunks []*filer_pb.FileChunk, err error) {
if len(sourceChunks) == 0 {
return
}
+
+ replicatedChunks = make([]*filer_pb.FileChunk, len(sourceChunks))
+
var wg sync.WaitGroup
- for _, sourceChunk := range sourceChunks {
+ for chunkIndex, sourceChunk := range sourceChunks {
wg.Add(1)
- go func(chunk *filer_pb.FileChunk) {
+ go func(chunk *filer_pb.FileChunk, index int) {
defer wg.Done()
- replicatedChunk, e := fs.replicateOneChunk(chunk)
+ replicatedChunk, e := fs.replicateOneChunk(chunk, dir)
if e != nil {
err = e
}
- replicatedChunks = append(replicatedChunks, replicatedChunk)
- }(sourceChunk)
+ replicatedChunks[index] = replicatedChunk
+ }(sourceChunk, chunkIndex)
}
wg.Wait()
return
}
-func (fs *FilerSink) replicateOneChunk(sourceChunk *filer_pb.FileChunk) (*filer_pb.FileChunk, error) {
+func (fs *FilerSink) replicateOneChunk(sourceChunk *filer_pb.FileChunk, dir string) (*filer_pb.FileChunk, error) {
- fileId, err := fs.fetchAndWrite(sourceChunk)
+ fileId, err := fs.fetchAndWrite(sourceChunk, dir)
if err != nil {
- return nil, fmt.Errorf("copy %s: %v", sourceChunk.FileId, err)
+ return nil, fmt.Errorf("copy %s: %v", sourceChunk.GetFileIdString(), err)
}
return &filer_pb.FileChunk{
@@ -46,21 +51,24 @@ func (fs *FilerSink) replicateOneChunk(sourceChunk *filer_pb.FileChunk) (*filer_
Size: sourceChunk.Size,
Mtime: sourceChunk.Mtime,
ETag: sourceChunk.ETag,
- SourceFileId: sourceChunk.FileId,
+ SourceFileId: sourceChunk.GetFileIdString(),
+ CipherKey: sourceChunk.CipherKey,
+ IsCompressed: sourceChunk.IsCompressed,
}, nil
}
-func (fs *FilerSink) fetchAndWrite(sourceChunk *filer_pb.FileChunk) (fileId string, err error) {
+func (fs *FilerSink) fetchAndWrite(sourceChunk *filer_pb.FileChunk, dir string) (fileId string, err error) {
- filename, header, readCloser, err := fs.filerSource.ReadPart(sourceChunk.FileId)
+ filename, header, readCloser, err := fs.filerSource.ReadPart(sourceChunk.GetFileIdString())
if err != nil {
- return "", fmt.Errorf("read part %s: %v", sourceChunk.FileId, err)
+ return "", fmt.Errorf("read part %s: %v", sourceChunk.GetFileIdString(), err)
}
defer readCloser.Close()
var host string
+ var auth security.EncodedJwt
- if err := fs.withFilerClient(func(client filer_pb.SeaweedFilerClient) error {
+ if err := fs.WithFilerClient(func(client filer_pb.SeaweedFilerClient) error {
request := &filer_pb.AssignVolumeRequest{
Count: 1,
@@ -68,6 +76,7 @@ func (fs *FilerSink) fetchAndWrite(sourceChunk *filer_pb.FileChunk) (fileId stri
Collection: fs.collection,
TtlSec: fs.ttlSec,
DataCenter: fs.dataCenter,
+ ParentPath: dir,
}
resp, err := client.AssignVolume(context.Background(), request)
@@ -75,8 +84,11 @@ func (fs *FilerSink) fetchAndWrite(sourceChunk *filer_pb.FileChunk) (fileId stri
glog.V(0).Infof("assign volume failure %v: %v", request, err)
return err
}
+ if resp.Error != "" {
+ return fmt.Errorf("assign volume failure %v: %v", request, resp.Error)
+ }
- fileId, host = resp.FileId, resp.Url
+ fileId, host, auth = resp.FileId, resp.Url, security.EncodedJwt(resp.Auth)
return nil
}); err != nil {
@@ -87,8 +99,8 @@ func (fs *FilerSink) fetchAndWrite(sourceChunk *filer_pb.FileChunk) (fileId stri
glog.V(4).Infof("replicating %s to %s header:%+v", filename, fileUrl, header)
- uploadResult, err := operation.Upload(fileUrl, filename, readCloser,
- "gzip" == header.Get("Content-Encoding"), header.Get("Content-Type"), nil, "")
+ // fetch data as is, regardless whether it is encrypted or not
+ uploadResult, err, _ := operation.Upload(fileUrl, filename, false, readCloser, "gzip" == header.Get("Content-Encoding"), header.Get("Content-Type"), nil, auth)
if err != nil {
glog.V(0).Infof("upload data %v to %s: %v", filename, fileUrl, err)
return "", fmt.Errorf("upload data: %v", err)
@@ -101,23 +113,16 @@ func (fs *FilerSink) fetchAndWrite(sourceChunk *filer_pb.FileChunk) (fileId stri
return
}
-func (fs *FilerSink) withFilerClient(fn func(filer_pb.SeaweedFilerClient) error) error {
+var _ = filer_pb.FilerClient(&FilerSink{})
- grpcConnection, err := util.GrpcDial(fs.grpcAddress)
- if err != nil {
- return fmt.Errorf("fail to dial %s: %v", fs.grpcAddress, err)
- }
- defer grpcConnection.Close()
+func (fs *FilerSink) WithFilerClient(fn func(filer_pb.SeaweedFilerClient) error) error {
- client := filer_pb.NewSeaweedFilerClient(grpcConnection)
+ return pb.WithCachedGrpcClient(func(grpcConnection *grpc.ClientConn) error {
+ client := filer_pb.NewSeaweedFilerClient(grpcConnection)
+ return fn(client)
+ }, fs.grpcAddress, fs.grpcDialOption)
- return fn(client)
}
-
-func volumeId(fileId string) string {
- lastCommaIndex := strings.LastIndex(fileId, ",")
- if lastCommaIndex > 0 {
- return fileId[:lastCommaIndex]
- }
- return fileId
+func (fs *FilerSink) AdjustedUrl(hostAndPort string) string {
+ return hostAndPort
}
diff --git a/weed/replication/sink/filersink/filer_sink.go b/weed/replication/sink/filersink/filer_sink.go
index 2e9cc86d1..50721a8f3 100644
--- a/weed/replication/sink/filersink/filer_sink.go
+++ b/weed/replication/sink/filersink/filer_sink.go
@@ -4,6 +4,10 @@ import (
"context"
"fmt"
+ "google.golang.org/grpc"
+
+ "github.com/chrislusf/seaweedfs/weed/security"
+
"github.com/chrislusf/seaweedfs/weed/filer2"
"github.com/chrislusf/seaweedfs/weed/glog"
"github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
@@ -13,13 +17,14 @@ import (
)
type FilerSink struct {
- filerSource *source.FilerSource
- grpcAddress string
- dir string
- replication string
- collection string
- ttlSec int32
- dataCenter string
+ filerSource *source.FilerSource
+ grpcAddress string
+ dir string
+ replication string
+ collection string
+ ttlSec int32
+ dataCenter string
+ grpcDialOption grpc.DialOption
}
func init() {
@@ -34,13 +39,13 @@ func (fs *FilerSink) GetSinkToDirectory() string {
return fs.dir
}
-func (fs *FilerSink) Initialize(configuration util.Configuration) error {
+func (fs *FilerSink) Initialize(configuration util.Configuration, prefix string) error {
return fs.initialize(
- configuration.GetString("grpcAddress"),
- configuration.GetString("directory"),
- configuration.GetString("replication"),
- configuration.GetString("collection"),
- configuration.GetInt("ttlSec"),
+ configuration.GetString(prefix+"grpcAddress"),
+ configuration.GetString(prefix+"directory"),
+ configuration.GetString(prefix+"replication"),
+ configuration.GetString(prefix+"collection"),
+ configuration.GetInt(prefix+"ttlSec"),
)
}
@@ -55,37 +60,28 @@ func (fs *FilerSink) initialize(grpcAddress string, dir string,
fs.replication = replication
fs.collection = collection
fs.ttlSec = int32(ttlSec)
+ fs.grpcDialOption = security.LoadClientTLS(util.GetViper(), "grpc.client")
return nil
}
func (fs *FilerSink) DeleteEntry(key string, isDirectory, deleteIncludeChunks bool) error {
- return fs.withFilerClient(func(client filer_pb.SeaweedFilerClient) error {
-
- dir, name := filer2.FullPath(key).DirAndName()
- request := &filer_pb.DeleteEntryRequest{
- Directory: dir,
- Name: name,
- IsDeleteData: deleteIncludeChunks,
- }
+ dir, name := util.FullPath(key).DirAndName()
- glog.V(1).Infof("delete entry: %v", request)
- _, err := client.DeleteEntry(context.Background(), request)
- if err != nil {
- glog.V(0).Infof("delete entry %s: %v", key, err)
- return fmt.Errorf("delete entry %s: %v", key, err)
- }
-
- return nil
- })
+ glog.V(1).Infof("delete entry: %v", key)
+ err := filer_pb.Remove(fs, dir, name, deleteIncludeChunks, false, false, true)
+ if err != nil {
+ glog.V(0).Infof("delete entry %s: %v", key, err)
+ return fmt.Errorf("delete entry %s: %v", key, err)
+ }
+ return nil
}
func (fs *FilerSink) CreateEntry(key string, entry *filer_pb.Entry) error {
- return fs.withFilerClient(func(client filer_pb.SeaweedFilerClient) error {
+ return fs.WithFilerClient(func(client filer_pb.SeaweedFilerClient) error {
- dir, name := filer2.FullPath(key).DirAndName()
- ctx := context.Background()
+ dir, name := util.FullPath(key).DirAndName()
// look up existing entry
lookupRequest := &filer_pb.LookupDirectoryEntryRequest{
@@ -93,14 +89,14 @@ func (fs *FilerSink) CreateEntry(key string, entry *filer_pb.Entry) error {
Name: name,
}
glog.V(1).Infof("lookup: %v", lookupRequest)
- if resp, err := client.LookupDirectoryEntry(ctx, lookupRequest); err == nil {
- if filer2.ETag(resp.Entry.Chunks) == filer2.ETag(entry.Chunks) {
+ if resp, err := filer_pb.LookupEntry(client, lookupRequest); err == nil {
+ if filer2.ETag(resp.Entry) == filer2.ETag(entry) {
glog.V(0).Infof("already replicated %s", key)
return nil
}
}
- replicatedChunks, err := fs.replicateChunks(entry.Chunks)
+ replicatedChunks, err := fs.replicateChunks(entry.Chunks, dir)
if err != nil {
glog.V(0).Infof("replicate entry chunks %s: %v", key, err)
@@ -117,10 +113,11 @@ func (fs *FilerSink) CreateEntry(key string, entry *filer_pb.Entry) error {
Attributes: entry.Attributes,
Chunks: replicatedChunks,
},
+ IsFromOtherCluster: true,
}
glog.V(1).Infof("create: %v", request)
- if _, err := client.CreateEntry(ctx, request); err != nil {
+ if err := filer_pb.CreateEntry(client, request); err != nil {
glog.V(0).Infof("create entry %s: %v", key, err)
return fmt.Errorf("create entry %s: %v", key, err)
}
@@ -129,15 +126,13 @@ func (fs *FilerSink) CreateEntry(key string, entry *filer_pb.Entry) error {
})
}
-func (fs *FilerSink) UpdateEntry(key string, oldEntry, newEntry *filer_pb.Entry, deleteIncludeChunks bool) (foundExistingEntry bool, err error) {
-
- ctx := context.Background()
+func (fs *FilerSink) UpdateEntry(key string, oldEntry *filer_pb.Entry, newParentPath string, newEntry *filer_pb.Entry, deleteIncludeChunks bool) (foundExistingEntry bool, err error) {
- dir, name := filer2.FullPath(key).DirAndName()
+ dir, name := util.FullPath(key).DirAndName()
// read existing entry
var existingEntry *filer_pb.Entry
- err = fs.withFilerClient(func(client filer_pb.SeaweedFilerClient) error {
+ err = fs.WithFilerClient(func(client filer_pb.SeaweedFilerClient) error {
request := &filer_pb.LookupDirectoryEntryRequest{
Directory: dir,
@@ -145,7 +140,7 @@ func (fs *FilerSink) UpdateEntry(key string, oldEntry, newEntry *filer_pb.Entry,
}
glog.V(4).Infof("lookup entry: %v", request)
- resp, err := client.LookupDirectoryEntry(ctx, request)
+ resp, err := filer_pb.LookupEntry(client, request)
if err != nil {
glog.V(0).Infof("lookup %s: %v", key, err)
return err
@@ -166,7 +161,7 @@ func (fs *FilerSink) UpdateEntry(key string, oldEntry, newEntry *filer_pb.Entry,
// skip if already changed
// this usually happens when the messages are not ordered
glog.V(0).Infof("late updates %s", key)
- } else if filer2.ETag(newEntry.Chunks) == filer2.ETag(existingEntry.Chunks) {
+ } else if filer2.ETag(newEntry) == filer2.ETag(existingEntry) {
// skip if no change
// this usually happens when retrying the replication
glog.V(0).Infof("already replicated %s", key)
@@ -177,11 +172,11 @@ func (fs *FilerSink) UpdateEntry(key string, oldEntry, newEntry *filer_pb.Entry,
// delete the chunks that are deleted from the source
if deleteIncludeChunks {
// remove the deleted chunks. Actual data deletion happens in filer UpdateEntry FindUnusedFileChunks
- existingEntry.Chunks = minusChunks(existingEntry.Chunks, deletedChunks)
+ existingEntry.Chunks = filer2.MinusChunks(existingEntry.Chunks, deletedChunks)
}
// replicate the chunks that are new in the source
- replicatedChunks, err := fs.replicateChunks(newChunks)
+ replicatedChunks, err := fs.replicateChunks(newChunks, newParentPath)
if err != nil {
return true, fmt.Errorf("replicte %s chunks error: %v", key, err)
}
@@ -189,14 +184,15 @@ func (fs *FilerSink) UpdateEntry(key string, oldEntry, newEntry *filer_pb.Entry,
}
// save updated meta data
- return true, fs.withFilerClient(func(client filer_pb.SeaweedFilerClient) error {
+ return true, fs.WithFilerClient(func(client filer_pb.SeaweedFilerClient) error {
request := &filer_pb.UpdateEntryRequest{
- Directory: dir,
- Entry: existingEntry,
+ Directory: newParentPath,
+ Entry: existingEntry,
+ IsFromOtherCluster: true,
}
- if _, err := client.UpdateEntry(ctx, request); err != nil {
+ if _, err := client.UpdateEntry(context.Background(), request); err != nil {
return fmt.Errorf("update existingEntry %s: %v", key, err)
}
@@ -205,23 +201,7 @@ func (fs *FilerSink) UpdateEntry(key string, oldEntry, newEntry *filer_pb.Entry,
}
func compareChunks(oldEntry, newEntry *filer_pb.Entry) (deletedChunks, newChunks []*filer_pb.FileChunk) {
- deletedChunks = minusChunks(oldEntry.Chunks, newEntry.Chunks)
- newChunks = minusChunks(newEntry.Chunks, oldEntry.Chunks)
- return
-}
-
-func minusChunks(as, bs []*filer_pb.FileChunk) (delta []*filer_pb.FileChunk) {
- for _, a := range as {
- found := false
- for _, b := range bs {
- if a.FileId == b.FileId {
- found = true
- break
- }
- }
- if !found {
- delta = append(delta, a)
- }
- }
+ deletedChunks = filer2.MinusChunks(oldEntry.Chunks, newEntry.Chunks)
+ newChunks = filer2.MinusChunks(newEntry.Chunks, oldEntry.Chunks)
return
}
diff --git a/weed/replication/sink/gcssink/gcs_sink.go b/weed/replication/sink/gcssink/gcs_sink.go
index c1beefc33..bb5a54272 100644
--- a/weed/replication/sink/gcssink/gcs_sink.go
+++ b/weed/replication/sink/gcssink/gcs_sink.go
@@ -6,13 +6,14 @@ import (
"os"
"cloud.google.com/go/storage"
+ "google.golang.org/api/option"
+
"github.com/chrislusf/seaweedfs/weed/filer2"
"github.com/chrislusf/seaweedfs/weed/glog"
"github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
"github.com/chrislusf/seaweedfs/weed/replication/sink"
"github.com/chrislusf/seaweedfs/weed/replication/source"
"github.com/chrislusf/seaweedfs/weed/util"
- "google.golang.org/api/option"
)
type GcsSink struct {
@@ -34,11 +35,11 @@ func (g *GcsSink) GetSinkToDirectory() string {
return g.dir
}
-func (g *GcsSink) Initialize(configuration util.Configuration) error {
+func (g *GcsSink) Initialize(configuration util.Configuration, prefix string) error {
return g.initialize(
- configuration.GetString("google_application_credentials"),
- configuration.GetString("bucket"),
- configuration.GetString("directory"),
+ configuration.GetString(prefix+"google_application_credentials"),
+ configuration.GetString(prefix+"bucket"),
+ configuration.GetString(prefix+"directory"),
)
}
@@ -50,7 +51,6 @@ func (g *GcsSink) initialize(google_application_credentials, bucketName, dir str
g.bucket = bucketName
g.dir = dir
- ctx := context.Background()
// Creates a client.
if google_application_credentials == "" {
var found bool
@@ -59,7 +59,7 @@ func (g *GcsSink) initialize(google_application_credentials, bucketName, dir str
glog.Fatalf("need to specific GOOGLE_APPLICATION_CREDENTIALS env variable or google_application_credentials in replication.toml")
}
}
- client, err := storage.NewClient(ctx, option.WithCredentialsFile(google_application_credentials))
+ client, err := storage.NewClient(context.Background(), option.WithCredentialsFile(google_application_credentials))
if err != nil {
glog.Fatalf("Failed to create client: %v", err)
}
@@ -90,11 +90,9 @@ func (g *GcsSink) CreateEntry(key string, entry *filer_pb.Entry) error {
}
totalSize := filer2.TotalSize(entry.Chunks)
- chunkViews := filer2.ViewFromChunks(entry.Chunks, 0, int(totalSize))
-
- ctx := context.Background()
+ chunkViews := filer2.ViewFromChunks(entry.Chunks, 0, int64(totalSize))
- wc := g.client.Bucket(g.bucket).Object(key).NewWriter(ctx)
+ wc := g.client.Bucket(g.bucket).Object(key).NewWriter(context.Background())
for _, chunk := range chunkViews {
@@ -103,7 +101,7 @@ func (g *GcsSink) CreateEntry(key string, entry *filer_pb.Entry) error {
return err
}
- _, err = util.ReadUrlAsStream(fileUrl, chunk.Offset, int(chunk.Size), func(data []byte) {
+ err = util.ReadUrlAsStream(fileUrl, nil, false, chunk.IsFullChunk(), chunk.Offset, int(chunk.Size), func(data []byte) {
wc.Write(data)
})
@@ -121,7 +119,7 @@ func (g *GcsSink) CreateEntry(key string, entry *filer_pb.Entry) error {
}
-func (g *GcsSink) UpdateEntry(key string, oldEntry, newEntry *filer_pb.Entry, deleteIncludeChunks bool) (foundExistingEntry bool, err error) {
+func (g *GcsSink) UpdateEntry(key string, oldEntry *filer_pb.Entry, newParentPath string, newEntry *filer_pb.Entry, deleteIncludeChunks bool) (foundExistingEntry bool, err error) {
// TODO improve efficiency
return false, nil
}
diff --git a/weed/replication/sink/replication_sink.go b/weed/replication/sink/replication_sink.go
index 0a86139d3..6d85f660a 100644
--- a/weed/replication/sink/replication_sink.go
+++ b/weed/replication/sink/replication_sink.go
@@ -8,10 +8,10 @@ import (
type ReplicationSink interface {
GetName() string
- Initialize(configuration util.Configuration) error
+ Initialize(configuration util.Configuration, prefix string) error
DeleteEntry(key string, isDirectory, deleteIncludeChunks bool) error
CreateEntry(key string, entry *filer_pb.Entry) error
- UpdateEntry(key string, oldEntry, newEntry *filer_pb.Entry, deleteIncludeChunks bool) (foundExistingEntry bool, err error)
+ UpdateEntry(key string, oldEntry *filer_pb.Entry, newParentPath string, newEntry *filer_pb.Entry, deleteIncludeChunks bool) (foundExistingEntry bool, err error)
GetSinkToDirectory() string
SetSourceFiler(s *source.FilerSource)
}
diff --git a/weed/replication/sink/s3sink/s3_sink.go b/weed/replication/sink/s3sink/s3_sink.go
index 0a4e78318..d7af105b8 100644
--- a/weed/replication/sink/s3sink/s3_sink.go
+++ b/weed/replication/sink/s3sink/s3_sink.go
@@ -1,6 +1,7 @@
package S3Sink
import (
+ "context"
"fmt"
"strings"
"sync"
@@ -10,6 +11,7 @@ import (
"github.com/aws/aws-sdk-go/aws/session"
"github.com/aws/aws-sdk-go/service/s3"
"github.com/aws/aws-sdk-go/service/s3/s3iface"
+
"github.com/chrislusf/seaweedfs/weed/filer2"
"github.com/chrislusf/seaweedfs/weed/glog"
"github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
@@ -23,6 +25,7 @@ type S3Sink struct {
region string
bucket string
dir string
+ endpoint string
filerSource *source.FilerSource
}
@@ -38,16 +41,18 @@ func (s3sink *S3Sink) GetSinkToDirectory() string {
return s3sink.dir
}
-func (s3sink *S3Sink) Initialize(configuration util.Configuration) error {
- glog.V(0).Infof("sink.s3.region: %v", configuration.GetString("region"))
- glog.V(0).Infof("sink.s3.bucket: %v", configuration.GetString("bucket"))
- glog.V(0).Infof("sink.s3.directory: %v", configuration.GetString("directory"))
+func (s3sink *S3Sink) Initialize(configuration util.Configuration, prefix string) error {
+ glog.V(0).Infof("sink.s3.region: %v", configuration.GetString(prefix+"region"))
+ glog.V(0).Infof("sink.s3.bucket: %v", configuration.GetString(prefix+"bucket"))
+ glog.V(0).Infof("sink.s3.directory: %v", configuration.GetString(prefix+"directory"))
+ glog.V(0).Infof("sink.s3.endpoint: %v", configuration.GetString(prefix+"endpoint"))
return s3sink.initialize(
- configuration.GetString("aws_access_key_id"),
- configuration.GetString("aws_secret_access_key"),
- configuration.GetString("region"),
- configuration.GetString("bucket"),
- configuration.GetString("directory"),
+ configuration.GetString(prefix+"aws_access_key_id"),
+ configuration.GetString(prefix+"aws_secret_access_key"),
+ configuration.GetString(prefix+"region"),
+ configuration.GetString(prefix+"bucket"),
+ configuration.GetString(prefix+"directory"),
+ configuration.GetString(prefix+"endpoint"),
)
}
@@ -55,16 +60,18 @@ func (s3sink *S3Sink) SetSourceFiler(s *source.FilerSource) {
s3sink.filerSource = s
}
-func (s3sink *S3Sink) initialize(awsAccessKeyId, aswSecretAccessKey, region, bucket, dir string) error {
+func (s3sink *S3Sink) initialize(awsAccessKeyId, awsSecretAccessKey, region, bucket, dir, endpoint string) error {
s3sink.region = region
s3sink.bucket = bucket
s3sink.dir = dir
+ s3sink.endpoint = endpoint
config := &aws.Config{
- Region: aws.String(s3sink.region),
+ Region: aws.String(s3sink.region),
+ Endpoint: aws.String(s3sink.endpoint),
}
- if awsAccessKeyId != "" && aswSecretAccessKey != "" {
- config.Credentials = credentials.NewStaticCredentials(awsAccessKeyId, aswSecretAccessKey, "")
+ if awsAccessKeyId != "" && awsSecretAccessKey != "" {
+ config.Credentials = credentials.NewStaticCredentials(awsAccessKeyId, awsSecretAccessKey, "")
}
sess, err := session.NewSession(config)
@@ -89,7 +96,6 @@ func (s3sink *S3Sink) DeleteEntry(key string, isDirectory, deleteIncludeChunks b
}
func (s3sink *S3Sink) CreateEntry(key string, entry *filer_pb.Entry) error {
-
key = cleanKey(key)
if entry.IsDirectory {
@@ -102,21 +108,22 @@ func (s3sink *S3Sink) CreateEntry(key string, entry *filer_pb.Entry) error {
}
totalSize := filer2.TotalSize(entry.Chunks)
- chunkViews := filer2.ViewFromChunks(entry.Chunks, 0, int(totalSize))
+ chunkViews := filer2.ViewFromChunks(entry.Chunks, 0, int64(totalSize))
+
+ parts := make([]*s3.CompletedPart, len(chunkViews))
- var parts []*s3.CompletedPart
var wg sync.WaitGroup
for chunkIndex, chunk := range chunkViews {
partId := chunkIndex + 1
wg.Add(1)
- go func(chunk *filer2.ChunkView) {
+ go func(chunk *filer2.ChunkView, index int) {
defer wg.Done()
if part, uploadErr := s3sink.uploadPart(key, uploadId, partId, chunk); uploadErr != nil {
err = uploadErr
} else {
- parts = append(parts, part)
+ parts[index] = part
}
- }(chunk)
+ }(chunk, chunkIndex)
}
wg.Wait()
@@ -125,11 +132,11 @@ func (s3sink *S3Sink) CreateEntry(key string, entry *filer_pb.Entry) error {
return err
}
- return s3sink.completeMultipartUpload(key, uploadId, parts)
+ return s3sink.completeMultipartUpload(context.Background(), key, uploadId, parts)
}
-func (s3sink *S3Sink) UpdateEntry(key string, oldEntry, newEntry *filer_pb.Entry, deleteIncludeChunks bool) (foundExistingEntry bool, err error) {
+func (s3sink *S3Sink) UpdateEntry(key string, oldEntry *filer_pb.Entry, newParentPath string, newEntry *filer_pb.Entry, deleteIncludeChunks bool) (foundExistingEntry bool, err error) {
key = cleanKey(key)
// TODO improve efficiency
return false, nil
diff --git a/weed/replication/sink/s3sink/s3_write.go b/weed/replication/sink/s3sink/s3_write.go
index 5c4be7aee..c5c65ed5c 100644
--- a/weed/replication/sink/s3sink/s3_write.go
+++ b/weed/replication/sink/s3sink/s3_write.go
@@ -2,6 +2,7 @@ package S3Sink
import (
"bytes"
+ "context"
"fmt"
"io"
@@ -81,7 +82,7 @@ func (s3sink *S3Sink) abortMultipartUpload(key, uploadId string) error {
}
// To complete multipart upload
-func (s3sink *S3Sink) completeMultipartUpload(key, uploadId string, parts []*s3.CompletedPart) error {
+func (s3sink *S3Sink) completeMultipartUpload(ctx context.Context, key, uploadId string, parts []*s3.CompletedPart) error {
input := &s3.CompleteMultipartUploadInput{
Bucket: aws.String(s3sink.bucket),
Key: aws.String(key),
@@ -161,6 +162,6 @@ func (s3sink *S3Sink) buildReadSeeker(chunk *filer2.ChunkView) (io.ReadSeeker, e
return nil, err
}
buf := make([]byte, chunk.Size)
- util.ReadUrl(fileUrl, chunk.Offset, int(chunk.Size), buf, true)
+ util.ReadUrl(fileUrl, nil, false, false, chunk.Offset, int(chunk.Size), buf)
return bytes.NewReader(buf), nil
}
diff --git a/weed/replication/source/filer_source.go b/weed/replication/source/filer_source.go
index efe71e706..69c23fe82 100644
--- a/weed/replication/source/filer_source.go
+++ b/weed/replication/source/filer_source.go
@@ -7,6 +7,11 @@ import (
"net/http"
"strings"
+ "google.golang.org/grpc"
+
+ "github.com/chrislusf/seaweedfs/weed/pb"
+ "github.com/chrislusf/seaweedfs/weed/security"
+
"github.com/chrislusf/seaweedfs/weed/glog"
"github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
"github.com/chrislusf/seaweedfs/weed/util"
@@ -17,20 +22,22 @@ type ReplicationSource interface {
}
type FilerSource struct {
- grpcAddress string
- Dir string
+ grpcAddress string
+ grpcDialOption grpc.DialOption
+ Dir string
}
-func (fs *FilerSource) Initialize(configuration util.Configuration) error {
+func (fs *FilerSource) Initialize(configuration util.Configuration, prefix string) error {
return fs.initialize(
- configuration.GetString("grpcAddress"),
- configuration.GetString("directory"),
+ configuration.GetString(prefix+"grpcAddress"),
+ configuration.GetString(prefix+"directory"),
)
}
func (fs *FilerSource) initialize(grpcAddress string, dir string) (err error) {
fs.grpcAddress = grpcAddress
fs.Dir = dir
+ fs.grpcDialOption = security.LoadClientTLS(util.GetViper(), "grpc.client")
return nil
}
@@ -40,7 +47,7 @@ func (fs *FilerSource) LookupFileId(part string) (fileUrl string, err error) {
vid := volumeId(part)
- err = fs.withFilerClient(func(client filer_pb.SeaweedFilerClient) error {
+ err = fs.WithFilerClient(func(client filer_pb.SeaweedFilerClient) error {
glog.V(4).Infof("read lookup volume id locations: %v", vid)
resp, err := client.LookupVolume(context.Background(), &filer_pb.LookupVolumeRequest{
@@ -84,17 +91,19 @@ func (fs *FilerSource) ReadPart(part string) (filename string, header http.Heade
return filename, header, readCloser, err
}
-func (fs *FilerSource) withFilerClient(fn func(filer_pb.SeaweedFilerClient) error) error {
+var _ = filer_pb.FilerClient(&FilerSource{})
- grpcConnection, err := util.GrpcDial(fs.grpcAddress)
- if err != nil {
- return fmt.Errorf("fail to dial %s: %v", fs.grpcAddress, err)
- }
- defer grpcConnection.Close()
+func (fs *FilerSource) WithFilerClient(fn func(filer_pb.SeaweedFilerClient) error) error {
+
+ return pb.WithCachedGrpcClient(func(grpcConnection *grpc.ClientConn) error {
+ client := filer_pb.NewSeaweedFilerClient(grpcConnection)
+ return fn(client)
+ }, fs.grpcAddress, fs.grpcDialOption)
- client := filer_pb.NewSeaweedFilerClient(grpcConnection)
+}
- return fn(client)
+func (fs *FilerSource) AdjustedUrl(hostAndPort string) string {
+ return hostAndPort
}
func volumeId(fileId string) string {
diff --git a/weed/replication/sub/notification_aws_sqs.go b/weed/replication/sub/notification_aws_sqs.go
index f0100f4de..1dd386ba7 100644
--- a/weed/replication/sub/notification_aws_sqs.go
+++ b/weed/replication/sub/notification_aws_sqs.go
@@ -27,24 +27,24 @@ func (k *AwsSqsInput) GetName() string {
return "aws_sqs"
}
-func (k *AwsSqsInput) Initialize(configuration util.Configuration) error {
- glog.V(0).Infof("replication.notification.aws_sqs.region: %v", configuration.GetString("region"))
- glog.V(0).Infof("replication.notification.aws_sqs.sqs_queue_name: %v", configuration.GetString("sqs_queue_name"))
+func (k *AwsSqsInput) Initialize(configuration util.Configuration, prefix string) error {
+ glog.V(0).Infof("replication.notification.aws_sqs.region: %v", configuration.GetString(prefix+"region"))
+ glog.V(0).Infof("replication.notification.aws_sqs.sqs_queue_name: %v", configuration.GetString(prefix+"sqs_queue_name"))
return k.initialize(
- configuration.GetString("aws_access_key_id"),
- configuration.GetString("aws_secret_access_key"),
- configuration.GetString("region"),
- configuration.GetString("sqs_queue_name"),
+ configuration.GetString(prefix+"aws_access_key_id"),
+ configuration.GetString(prefix+"aws_secret_access_key"),
+ configuration.GetString(prefix+"region"),
+ configuration.GetString(prefix+"sqs_queue_name"),
)
}
-func (k *AwsSqsInput) initialize(awsAccessKeyId, aswSecretAccessKey, region, queueName string) (err error) {
+func (k *AwsSqsInput) initialize(awsAccessKeyId, awsSecretAccessKey, region, queueName string) (err error) {
config := &aws.Config{
Region: aws.String(region),
}
- if awsAccessKeyId != "" && aswSecretAccessKey != "" {
- config.Credentials = credentials.NewStaticCredentials(awsAccessKeyId, aswSecretAccessKey, "")
+ if awsAccessKeyId != "" && awsSecretAccessKey != "" {
+ config.Credentials = credentials.NewStaticCredentials(awsAccessKeyId, awsSecretAccessKey, "")
}
sess, err := session.NewSession(config)
@@ -92,7 +92,9 @@ func (k *AwsSqsInput) ReceiveMessage() (key string, message *filer_pb.EventNotif
}
// process the message
- key = *result.Messages[0].Attributes["key"]
+ // fmt.Printf("messages: %+v\n", result.Messages[0])
+ keyValue := result.Messages[0].MessageAttributes["key"]
+ key = *keyValue.StringValue
text := *result.Messages[0].Body
message = &filer_pb.EventNotification{}
err = proto.UnmarshalText(text, message)
diff --git a/weed/replication/sub/notification_gocdk_pub_sub.go b/weed/replication/sub/notification_gocdk_pub_sub.go
new file mode 100644
index 000000000..9726096e5
--- /dev/null
+++ b/weed/replication/sub/notification_gocdk_pub_sub.go
@@ -0,0 +1,50 @@
+package sub
+
+import (
+ "context"
+
+ "github.com/chrislusf/seaweedfs/weed/glog"
+ "github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
+ "github.com/chrislusf/seaweedfs/weed/util"
+ "github.com/golang/protobuf/proto"
+ "gocloud.dev/pubsub"
+ _ "gocloud.dev/pubsub/awssnssqs"
+ // _ "gocloud.dev/pubsub/azuresb"
+ _ "gocloud.dev/pubsub/gcppubsub"
+ _ "gocloud.dev/pubsub/natspubsub"
+ _ "gocloud.dev/pubsub/rabbitpubsub"
+)
+
+func init() {
+ NotificationInputs = append(NotificationInputs, &GoCDKPubSubInput{})
+}
+
+type GoCDKPubSubInput struct {
+ sub *pubsub.Subscription
+}
+
+func (k *GoCDKPubSubInput) GetName() string {
+ return "gocdk_pub_sub"
+}
+
+func (k *GoCDKPubSubInput) Initialize(configuration util.Configuration, prefix string) error {
+ subURL := configuration.GetString(prefix + "sub_url")
+ glog.V(0).Infof("notification.gocdk_pub_sub.sub_url: %v", subURL)
+ sub, err := pubsub.OpenSubscription(context.Background(), subURL)
+ if err != nil {
+ return err
+ }
+ k.sub = sub
+ return nil
+}
+
+func (k *GoCDKPubSubInput) ReceiveMessage() (key string, message *filer_pb.EventNotification, err error) {
+ msg, err := k.sub.Receive(context.Background())
+ key = msg.Metadata["key"]
+ message = &filer_pb.EventNotification{}
+ err = proto.Unmarshal(msg.Body, message)
+ if err != nil {
+ return "", nil, err
+ }
+ return key, message, nil
+}
diff --git a/weed/replication/sub/notification_google_pub_sub.go b/weed/replication/sub/notification_google_pub_sub.go
index ad6b42a2e..a950bb42b 100644
--- a/weed/replication/sub/notification_google_pub_sub.go
+++ b/weed/replication/sub/notification_google_pub_sub.go
@@ -27,13 +27,13 @@ func (k *GooglePubSubInput) GetName() string {
return "google_pub_sub"
}
-func (k *GooglePubSubInput) Initialize(configuration util.Configuration) error {
- glog.V(0).Infof("notification.google_pub_sub.project_id: %v", configuration.GetString("project_id"))
- glog.V(0).Infof("notification.google_pub_sub.topic: %v", configuration.GetString("topic"))
+func (k *GooglePubSubInput) Initialize(configuration util.Configuration, prefix string) error {
+ glog.V(0).Infof("notification.google_pub_sub.project_id: %v", configuration.GetString(prefix+"project_id"))
+ glog.V(0).Infof("notification.google_pub_sub.topic: %v", configuration.GetString(prefix+"topic"))
return k.initialize(
- configuration.GetString("google_application_credentials"),
- configuration.GetString("project_id"),
- configuration.GetString("topic"),
+ configuration.GetString(prefix+"google_application_credentials"),
+ configuration.GetString(prefix+"project_id"),
+ configuration.GetString(prefix+"topic"),
)
}
diff --git a/weed/replication/sub/notification_kafka.go b/weed/replication/sub/notification_kafka.go
index 1a86a8307..fa9cfad9b 100644
--- a/weed/replication/sub/notification_kafka.go
+++ b/weed/replication/sub/notification_kafka.go
@@ -28,14 +28,14 @@ func (k *KafkaInput) GetName() string {
return "kafka"
}
-func (k *KafkaInput) Initialize(configuration util.Configuration) error {
- glog.V(0).Infof("replication.notification.kafka.hosts: %v\n", configuration.GetStringSlice("hosts"))
- glog.V(0).Infof("replication.notification.kafka.topic: %v\n", configuration.GetString("topic"))
+func (k *KafkaInput) Initialize(configuration util.Configuration, prefix string) error {
+ glog.V(0).Infof("replication.notification.kafka.hosts: %v\n", configuration.GetStringSlice(prefix+"hosts"))
+ glog.V(0).Infof("replication.notification.kafka.topic: %v\n", configuration.GetString(prefix+"topic"))
return k.initialize(
- configuration.GetStringSlice("hosts"),
- configuration.GetString("topic"),
- configuration.GetString("offsetFile"),
- configuration.GetInt("offsetSaveIntervalSeconds"),
+ configuration.GetStringSlice(prefix+"hosts"),
+ configuration.GetString(prefix+"topic"),
+ configuration.GetString(prefix+"offsetFile"),
+ configuration.GetInt(prefix+"offsetSaveIntervalSeconds"),
)
}
diff --git a/weed/replication/sub/notifications.go b/weed/replication/sub/notifications.go
index 66fbef824..8a2668f98 100644
--- a/weed/replication/sub/notifications.go
+++ b/weed/replication/sub/notifications.go
@@ -9,7 +9,7 @@ type NotificationInput interface {
// GetName gets the name to locate the configuration in sync.toml file
GetName() string
// Initialize initializes the file store
- Initialize(configuration util.Configuration) error
+ Initialize(configuration util.Configuration, prefix string) error
ReceiveMessage() (key string, message *filer_pb.EventNotification, err error)
}
diff --git a/weed/s3api/auth_credentials.go b/weed/s3api/auth_credentials.go
new file mode 100644
index 000000000..c1e8dff1e
--- /dev/null
+++ b/weed/s3api/auth_credentials.go
@@ -0,0 +1,188 @@
+package s3api
+
+import (
+ "bytes"
+ "fmt"
+ "io/ioutil"
+ "net/http"
+
+ "github.com/golang/protobuf/jsonpb"
+ "github.com/gorilla/mux"
+
+ "github.com/chrislusf/seaweedfs/weed/glog"
+ "github.com/chrislusf/seaweedfs/weed/pb/iam_pb"
+)
+
+type Action string
+
+const (
+ ACTION_READ = "Read"
+ ACTION_WRITE = "Write"
+ ACTION_ADMIN = "Admin"
+)
+
+type Iam interface {
+ Check(f http.HandlerFunc, actions ...Action) http.HandlerFunc
+}
+
+type IdentityAccessManagement struct {
+ identities []*Identity
+ domain string
+}
+
+type Identity struct {
+ Name string
+ Credentials []*Credential
+ Actions []Action
+}
+
+type Credential struct {
+ AccessKey string
+ SecretKey string
+}
+
+func NewIdentityAccessManagement(fileName string, domain string) *IdentityAccessManagement {
+ iam := &IdentityAccessManagement{
+ domain: domain,
+ }
+ if fileName == "" {
+ return iam
+ }
+ if err := iam.loadS3ApiConfiguration(fileName); err != nil {
+ glog.Fatalf("fail to load config file %s: %v", fileName, err)
+ }
+ return iam
+}
+
+func (iam *IdentityAccessManagement) loadS3ApiConfiguration(fileName string) error {
+
+ s3ApiConfiguration := &iam_pb.S3ApiConfiguration{}
+
+ rawData, readErr := ioutil.ReadFile(fileName)
+ if readErr != nil {
+ glog.Warningf("fail to read %s : %v", fileName, readErr)
+ return fmt.Errorf("fail to read %s : %v", fileName, readErr)
+ }
+
+ glog.V(1).Infof("maybeLoadVolumeInfo Unmarshal volume info %v", fileName)
+ if err := jsonpb.Unmarshal(bytes.NewReader(rawData), s3ApiConfiguration); err != nil {
+ glog.Warningf("unmarshal error: %v", err)
+ return fmt.Errorf("unmarshal %s error: %v", fileName, err)
+ }
+
+ for _, ident := range s3ApiConfiguration.Identities {
+ t := &Identity{
+ Name: ident.Name,
+ Credentials: nil,
+ Actions: nil,
+ }
+ for _, action := range ident.Actions {
+ t.Actions = append(t.Actions, Action(action))
+ }
+ for _, cred := range ident.Credentials {
+ t.Credentials = append(t.Credentials, &Credential{
+ AccessKey: cred.AccessKey,
+ SecretKey: cred.SecretKey,
+ })
+ }
+ iam.identities = append(iam.identities, t)
+ }
+
+ return nil
+}
+
+func (iam *IdentityAccessManagement) lookupByAccessKey(accessKey string) (identity *Identity, cred *Credential, found bool) {
+ for _, ident := range iam.identities {
+ for _, cred := range ident.Credentials {
+ if cred.AccessKey == accessKey {
+ return ident, cred, true
+ }
+ }
+ }
+ return nil, nil, false
+}
+
+func (iam *IdentityAccessManagement) Auth(f http.HandlerFunc, action Action) http.HandlerFunc {
+
+ if len(iam.identities) == 0 {
+ return f
+ }
+
+ return func(w http.ResponseWriter, r *http.Request) {
+ errCode := iam.authRequest(r, action)
+ if errCode == ErrNone {
+ f(w, r)
+ return
+ }
+ writeErrorResponse(w, errCode, r.URL)
+ }
+}
+
+// check whether the request has valid access keys
+func (iam *IdentityAccessManagement) authRequest(r *http.Request, action Action) ErrorCode {
+ var identity *Identity
+ var s3Err ErrorCode
+ switch getRequestAuthType(r) {
+ case authTypeStreamingSigned:
+ return ErrNone
+ case authTypeUnknown:
+ glog.V(3).Infof("unknown auth type")
+ return ErrAccessDenied
+ case authTypePresignedV2, authTypeSignedV2:
+ glog.V(3).Infof("v2 auth type")
+ identity, s3Err = iam.isReqAuthenticatedV2(r)
+ case authTypeSigned, authTypePresigned:
+ glog.V(3).Infof("v4 auth type")
+ identity, s3Err = iam.reqSignatureV4Verify(r)
+ case authTypePostPolicy:
+ glog.V(3).Infof("post policy auth type")
+ return ErrNotImplemented
+ case authTypeJWT:
+ glog.V(3).Infof("jwt auth type")
+ return ErrNotImplemented
+ case authTypeAnonymous:
+ return ErrAccessDenied
+ default:
+ return ErrNotImplemented
+ }
+
+ glog.V(3).Infof("auth error: %v", s3Err)
+ if s3Err != ErrNone {
+ return s3Err
+ }
+
+ glog.V(3).Infof("user name: %v actions: %v", identity.Name, identity.Actions)
+
+ vars := mux.Vars(r)
+ bucket := vars["bucket"]
+
+ if !identity.canDo(action, bucket) {
+ return ErrAccessDenied
+ }
+
+ return ErrNone
+
+}
+
+func (identity *Identity) canDo(action Action, bucket string) bool {
+ for _, a := range identity.Actions {
+ if a == "Admin" {
+ return true
+ }
+ }
+ for _, a := range identity.Actions {
+ if a == action {
+ return true
+ }
+ }
+ if bucket == "" {
+ return false
+ }
+ limitedByBucket := string(action) + ":" + bucket
+ for _, a := range identity.Actions {
+ if string(a) == limitedByBucket {
+ return true
+ }
+ }
+ return false
+}
diff --git a/weed/s3api/auth_credentials_test.go b/weed/s3api/auth_credentials_test.go
new file mode 100644
index 000000000..c6f76560c
--- /dev/null
+++ b/weed/s3api/auth_credentials_test.go
@@ -0,0 +1,68 @@
+package s3api
+
+import (
+ "testing"
+
+ "github.com/golang/protobuf/jsonpb"
+
+ "github.com/chrislusf/seaweedfs/weed/pb/iam_pb"
+)
+
+func TestIdentityListFileFormat(t *testing.T) {
+
+ s3ApiConfiguration := &iam_pb.S3ApiConfiguration{}
+
+ identity1 := &iam_pb.Identity{
+ Name: "some_name",
+ Credentials: []*iam_pb.Credential{
+ {
+ AccessKey: "some_access_key1",
+ SecretKey: "some_secret_key2",
+ },
+ },
+ Actions: []string{
+ ACTION_ADMIN,
+ ACTION_READ,
+ ACTION_WRITE,
+ },
+ }
+ identity2 := &iam_pb.Identity{
+ Name: "some_read_only_user",
+ Credentials: []*iam_pb.Credential{
+ {
+ AccessKey: "some_access_key1",
+ SecretKey: "some_secret_key1",
+ },
+ },
+ Actions: []string{
+ ACTION_READ,
+ },
+ }
+ identity3 := &iam_pb.Identity{
+ Name: "some_normal_user",
+ Credentials: []*iam_pb.Credential{
+ {
+ AccessKey: "some_access_key2",
+ SecretKey: "some_secret_key2",
+ },
+ },
+ Actions: []string{
+ ACTION_READ,
+ ACTION_WRITE,
+ },
+ }
+
+ s3ApiConfiguration.Identities = append(s3ApiConfiguration.Identities, identity1)
+ s3ApiConfiguration.Identities = append(s3ApiConfiguration.Identities, identity2)
+ s3ApiConfiguration.Identities = append(s3ApiConfiguration.Identities, identity3)
+
+ m := jsonpb.Marshaler{
+ EmitDefaults: true,
+ Indent: " ",
+ }
+
+ text, _ := m.MarshalToString(s3ApiConfiguration)
+
+ println(text)
+
+}
diff --git a/weed/s3api/auth_signature_v2.go b/weed/s3api/auth_signature_v2.go
new file mode 100644
index 000000000..151a9ec26
--- /dev/null
+++ b/weed/s3api/auth_signature_v2.go
@@ -0,0 +1,412 @@
+/*
+ * The following code tries to reverse engineer the Amazon S3 APIs,
+ * and is mostly copied from minio implementation.
+ */
+
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+// implied. See the License for the specific language governing
+// permissions and limitations under the License.
+
+package s3api
+
+import (
+ "crypto/hmac"
+ "crypto/sha1"
+ "crypto/subtle"
+ "encoding/base64"
+ "fmt"
+ "net"
+ "net/http"
+ "net/url"
+ "path"
+ "sort"
+ "strconv"
+ "strings"
+ "time"
+)
+
+// Whitelist resource list that will be used in query string for signature-V2 calculation.
+// The list should be alphabetically sorted
+var resourceList = []string{
+ "acl",
+ "delete",
+ "lifecycle",
+ "location",
+ "logging",
+ "notification",
+ "partNumber",
+ "policy",
+ "requestPayment",
+ "response-cache-control",
+ "response-content-disposition",
+ "response-content-encoding",
+ "response-content-language",
+ "response-content-type",
+ "response-expires",
+ "torrent",
+ "uploadId",
+ "uploads",
+ "versionId",
+ "versioning",
+ "versions",
+ "website",
+}
+
+// Verify if request has valid AWS Signature Version '2'.
+func (iam *IdentityAccessManagement) isReqAuthenticatedV2(r *http.Request) (*Identity, ErrorCode) {
+ if isRequestSignatureV2(r) {
+ return iam.doesSignV2Match(r)
+ }
+ return iam.doesPresignV2SignatureMatch(r)
+}
+
+// Authorization = "AWS" + " " + AWSAccessKeyId + ":" + Signature;
+// Signature = Base64( HMAC-SHA1( YourSecretKey, UTF-8-Encoding-Of( StringToSign ) ) );
+//
+// StringToSign = HTTP-Verb + "\n" +
+// Content-Md5 + "\n" +
+// Content-Type + "\n" +
+// Date + "\n" +
+// CanonicalizedProtocolHeaders +
+// CanonicalizedResource;
+//
+// CanonicalizedResource = [ "/" + Bucket ] +
+// +
+// [ subresource, if present. For example "?acl", "?location", "?logging", or "?torrent"];
+//
+// CanonicalizedProtocolHeaders =
+
+// doesSignV2Match - Verify authorization header with calculated header in accordance with
+// - http://docs.aws.amazon.com/AmazonS3/latest/dev/auth-request-sig-v2.html
+// returns true if matches, false otherwise. if error is not nil then it is always false
+
+func validateV2AuthHeader(v2Auth string) (accessKey string, errCode ErrorCode) {
+ if v2Auth == "" {
+ return "", ErrAuthHeaderEmpty
+ }
+ // Verify if the header algorithm is supported or not.
+ if !strings.HasPrefix(v2Auth, signV2Algorithm) {
+ return "", ErrSignatureVersionNotSupported
+ }
+
+ // below is V2 Signed Auth header format, splitting on `space` (after the `AWS` string).
+ // Authorization = "AWS" + " " + AWSAccessKeyId + ":" + Signature
+ authFields := strings.Split(v2Auth, " ")
+ if len(authFields) != 2 {
+ return "", ErrMissingFields
+ }
+
+ // Then will be splitting on ":", this will seprate `AWSAccessKeyId` and `Signature` string.
+ keySignFields := strings.Split(strings.TrimSpace(authFields[1]), ":")
+ if len(keySignFields) != 2 {
+ return "", ErrMissingFields
+ }
+
+ return keySignFields[0], ErrNone
+}
+
+func (iam *IdentityAccessManagement) doesSignV2Match(r *http.Request) (*Identity, ErrorCode) {
+ v2Auth := r.Header.Get("Authorization")
+
+ accessKey, apiError := validateV2AuthHeader(v2Auth)
+ if apiError != ErrNone {
+ return nil, apiError
+ }
+
+ // Access credentials.
+ // Validate if access key id same.
+ ident, cred, found := iam.lookupByAccessKey(accessKey)
+ if !found {
+ return nil, ErrInvalidAccessKeyID
+ }
+
+ // r.RequestURI will have raw encoded URI as sent by the client.
+ tokens := strings.SplitN(r.RequestURI, "?", 2)
+ encodedResource := tokens[0]
+ encodedQuery := ""
+ if len(tokens) == 2 {
+ encodedQuery = tokens[1]
+ }
+
+ unescapedQueries, err := unescapeQueries(encodedQuery)
+ if err != nil {
+ return nil, ErrInvalidQueryParams
+ }
+
+ encodedResource, err = getResource(encodedResource, r.Host, iam.domain)
+ if err != nil {
+ return nil, ErrInvalidRequest
+ }
+
+ prefix := fmt.Sprintf("%s %s:", signV2Algorithm, cred.AccessKey)
+ if !strings.HasPrefix(v2Auth, prefix) {
+ return nil, ErrSignatureDoesNotMatch
+ }
+ v2Auth = v2Auth[len(prefix):]
+ expectedAuth := signatureV2(cred, r.Method, encodedResource, strings.Join(unescapedQueries, "&"), r.Header)
+ if !compareSignatureV2(v2Auth, expectedAuth) {
+ return nil, ErrSignatureDoesNotMatch
+ }
+ return ident, ErrNone
+}
+
+// doesPresignV2SignatureMatch - Verify query headers with presigned signature
+// - http://docs.aws.amazon.com/AmazonS3/latest/dev/RESTAuthentication.html#RESTAuthenticationQueryStringAuth
+// returns ErrNone if matches. S3 errors otherwise.
+func (iam *IdentityAccessManagement) doesPresignV2SignatureMatch(r *http.Request) (*Identity, ErrorCode) {
+
+ // r.RequestURI will have raw encoded URI as sent by the client.
+ tokens := strings.SplitN(r.RequestURI, "?", 2)
+ encodedResource := tokens[0]
+ encodedQuery := ""
+ if len(tokens) == 2 {
+ encodedQuery = tokens[1]
+ }
+
+ var (
+ filteredQueries []string
+ gotSignature string
+ expires string
+ accessKey string
+ err error
+ )
+
+ var unescapedQueries []string
+ unescapedQueries, err = unescapeQueries(encodedQuery)
+ if err != nil {
+ return nil, ErrInvalidQueryParams
+ }
+
+ // Extract the necessary values from presigned query, construct a list of new filtered queries.
+ for _, query := range unescapedQueries {
+ keyval := strings.SplitN(query, "=", 2)
+ if len(keyval) != 2 {
+ return nil, ErrInvalidQueryParams
+ }
+ switch keyval[0] {
+ case "AWSAccessKeyId":
+ accessKey = keyval[1]
+ case "Signature":
+ gotSignature = keyval[1]
+ case "Expires":
+ expires = keyval[1]
+ default:
+ filteredQueries = append(filteredQueries, query)
+ }
+ }
+
+ // Invalid values returns error.
+ if accessKey == "" || gotSignature == "" || expires == "" {
+ return nil, ErrInvalidQueryParams
+ }
+
+ // Validate if access key id same.
+ ident, cred, found := iam.lookupByAccessKey(accessKey)
+ if !found {
+ return nil, ErrInvalidAccessKeyID
+ }
+
+ // Make sure the request has not expired.
+ expiresInt, err := strconv.ParseInt(expires, 10, 64)
+ if err != nil {
+ return nil, ErrMalformedExpires
+ }
+
+ // Check if the presigned URL has expired.
+ if expiresInt < time.Now().UTC().Unix() {
+ return nil, ErrExpiredPresignRequest
+ }
+
+ encodedResource, err = getResource(encodedResource, r.Host, iam.domain)
+ if err != nil {
+ return nil, ErrInvalidRequest
+ }
+
+ expectedSignature := preSignatureV2(cred, r.Method, encodedResource, strings.Join(filteredQueries, "&"), r.Header, expires)
+ if !compareSignatureV2(gotSignature, expectedSignature) {
+ return nil, ErrSignatureDoesNotMatch
+ }
+
+ return ident, ErrNone
+}
+
+// Escape encodedQuery string into unescaped list of query params, returns error
+// if any while unescaping the values.
+func unescapeQueries(encodedQuery string) (unescapedQueries []string, err error) {
+ for _, query := range strings.Split(encodedQuery, "&") {
+ var unescapedQuery string
+ unescapedQuery, err = url.QueryUnescape(query)
+ if err != nil {
+ return nil, err
+ }
+ unescapedQueries = append(unescapedQueries, unescapedQuery)
+ }
+ return unescapedQueries, nil
+}
+
+// Returns "/bucketName/objectName" for path-style or virtual-host-style requests.
+func getResource(path string, host string, domain string) (string, error) {
+ if domain == "" {
+ return path, nil
+ }
+ // If virtual-host-style is enabled construct the "resource" properly.
+ if strings.Contains(host, ":") {
+ // In bucket.mydomain.com:9000, strip out :9000
+ var err error
+ if host, _, err = net.SplitHostPort(host); err != nil {
+ return "", err
+ }
+ }
+ if !strings.HasSuffix(host, "."+domain) {
+ return path, nil
+ }
+ bucket := strings.TrimSuffix(host, "."+domain)
+ return "/" + pathJoin(bucket, path), nil
+}
+
+// pathJoin - like path.Join() but retains trailing "/" of the last element
+func pathJoin(elem ...string) string {
+ trailingSlash := ""
+ if len(elem) > 0 {
+ if strings.HasSuffix(elem[len(elem)-1], "/") {
+ trailingSlash = "/"
+ }
+ }
+ return path.Join(elem...) + trailingSlash
+}
+
+// Return the signature v2 of a given request.
+func signatureV2(cred *Credential, method string, encodedResource string, encodedQuery string, headers http.Header) string {
+ stringToSign := getStringToSignV2(method, encodedResource, encodedQuery, headers, "")
+ signature := calculateSignatureV2(stringToSign, cred.SecretKey)
+ return signature
+}
+
+// Return string to sign under two different conditions.
+// - if expires string is set then string to sign includes date instead of the Date header.
+// - if expires string is empty then string to sign includes date header instead.
+func getStringToSignV2(method string, encodedResource, encodedQuery string, headers http.Header, expires string) string {
+ canonicalHeaders := canonicalizedAmzHeadersV2(headers)
+ if len(canonicalHeaders) > 0 {
+ canonicalHeaders += "\n"
+ }
+
+ date := expires // Date is set to expires date for presign operations.
+ if date == "" {
+ // If expires date is empty then request header Date is used.
+ date = headers.Get("Date")
+ }
+
+ // From the Amazon docs:
+ //
+ // StringToSign = HTTP-Verb + "\n" +
+ // Content-Md5 + "\n" +
+ // Content-Type + "\n" +
+ // Date/Expires + "\n" +
+ // CanonicalizedProtocolHeaders +
+ // CanonicalizedResource;
+ stringToSign := strings.Join([]string{
+ method,
+ headers.Get("Content-MD5"),
+ headers.Get("Content-Type"),
+ date,
+ canonicalHeaders,
+ }, "\n")
+
+ return stringToSign + canonicalizedResourceV2(encodedResource, encodedQuery)
+}
+
+// Return canonical resource string.
+func canonicalizedResourceV2(encodedResource, encodedQuery string) string {
+ queries := strings.Split(encodedQuery, "&")
+ keyval := make(map[string]string)
+ for _, query := range queries {
+ key := query
+ val := ""
+ index := strings.Index(query, "=")
+ if index != -1 {
+ key = query[:index]
+ val = query[index+1:]
+ }
+ keyval[key] = val
+ }
+
+ var canonicalQueries []string
+ for _, key := range resourceList {
+ val, ok := keyval[key]
+ if !ok {
+ continue
+ }
+ if val == "" {
+ canonicalQueries = append(canonicalQueries, key)
+ continue
+ }
+ canonicalQueries = append(canonicalQueries, key+"="+val)
+ }
+
+ // The queries will be already sorted as resourceList is sorted, if canonicalQueries
+ // is empty strings.Join returns empty.
+ canonicalQuery := strings.Join(canonicalQueries, "&")
+ if canonicalQuery != "" {
+ return encodedResource + "?" + canonicalQuery
+ }
+ return encodedResource
+}
+
+// Return canonical headers.
+func canonicalizedAmzHeadersV2(headers http.Header) string {
+ var keys []string
+ keyval := make(map[string]string)
+ for key := range headers {
+ lkey := strings.ToLower(key)
+ if !strings.HasPrefix(lkey, "x-amz-") {
+ continue
+ }
+ keys = append(keys, lkey)
+ keyval[lkey] = strings.Join(headers[key], ",")
+ }
+ sort.Strings(keys)
+ var canonicalHeaders []string
+ for _, key := range keys {
+ canonicalHeaders = append(canonicalHeaders, key+":"+keyval[key])
+ }
+ return strings.Join(canonicalHeaders, "\n")
+}
+
+func calculateSignatureV2(stringToSign string, secret string) string {
+ hm := hmac.New(sha1.New, []byte(secret))
+ hm.Write([]byte(stringToSign))
+ return base64.StdEncoding.EncodeToString(hm.Sum(nil))
+}
+
+// compareSignatureV2 returns true if and only if both signatures
+// are equal. The signatures are expected to be base64 encoded strings
+// according to the AWS S3 signature V2 spec.
+func compareSignatureV2(sig1, sig2 string) bool {
+ // Decode signature string to binary byte-sequence representation is required
+ // as Base64 encoding of a value is not unique:
+ // For example "aGVsbG8=" and "aGVsbG8=\r" will result in the same byte slice.
+ signature1, err := base64.StdEncoding.DecodeString(sig1)
+ if err != nil {
+ return false
+ }
+ signature2, err := base64.StdEncoding.DecodeString(sig2)
+ if err != nil {
+ return false
+ }
+ return subtle.ConstantTimeCompare(signature1, signature2) == 1
+}
+
+// Return signature-v2 for the presigned request.
+func preSignatureV2(cred *Credential, method string, encodedResource string, encodedQuery string, headers http.Header, expires string) string {
+ stringToSign := getStringToSignV2(method, encodedResource, encodedQuery, headers, expires)
+ return calculateSignatureV2(stringToSign, cred.SecretKey)
+}
diff --git a/weed/s3api/auth_signature_v4.go b/weed/s3api/auth_signature_v4.go
new file mode 100644
index 000000000..cdfd8be1d
--- /dev/null
+++ b/weed/s3api/auth_signature_v4.go
@@ -0,0 +1,720 @@
+/*
+ * The following code tries to reverse engineer the Amazon S3 APIs,
+ * and is mostly copied from minio implementation.
+ */
+
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+// implied. See the License for the specific language governing
+// permissions and limitations under the License.
+
+package s3api
+
+import (
+ "bytes"
+ "crypto/hmac"
+ "crypto/sha256"
+ "crypto/subtle"
+ "encoding/hex"
+ "net/http"
+ "net/url"
+ "regexp"
+ "sort"
+ "strconv"
+ "strings"
+ "time"
+ "unicode/utf8"
+)
+
+func (iam *IdentityAccessManagement) reqSignatureV4Verify(r *http.Request) (*Identity, ErrorCode) {
+ sha256sum := getContentSha256Cksum(r)
+ switch {
+ case isRequestSignatureV4(r):
+ return iam.doesSignatureMatch(sha256sum, r)
+ case isRequestPresignedSignatureV4(r):
+ return iam.doesPresignedSignatureMatch(sha256sum, r)
+ }
+ return nil, ErrAccessDenied
+}
+
+// Streaming AWS Signature Version '4' constants.
+const (
+ emptySHA256 = "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855"
+ streamingContentSHA256 = "STREAMING-AWS4-HMAC-SHA256-PAYLOAD"
+ signV4ChunkedAlgorithm = "AWS4-HMAC-SHA256-PAYLOAD"
+
+ // http Header "x-amz-content-sha256" == "UNSIGNED-PAYLOAD" indicates that the
+ // client did not calculate sha256 of the payload.
+ unsignedPayload = "UNSIGNED-PAYLOAD"
+)
+
+// Returns SHA256 for calculating canonical-request.
+func getContentSha256Cksum(r *http.Request) string {
+ var (
+ defaultSha256Cksum string
+ v []string
+ ok bool
+ )
+
+ // For a presigned request we look at the query param for sha256.
+ if isRequestPresignedSignatureV4(r) {
+ // X-Amz-Content-Sha256, if not set in presigned requests, checksum
+ // will default to 'UNSIGNED-PAYLOAD'.
+ defaultSha256Cksum = unsignedPayload
+ v, ok = r.URL.Query()["X-Amz-Content-Sha256"]
+ if !ok {
+ v, ok = r.Header["X-Amz-Content-Sha256"]
+ }
+ } else {
+ // X-Amz-Content-Sha256, if not set in signed requests, checksum
+ // will default to sha256([]byte("")).
+ defaultSha256Cksum = emptySHA256
+ v, ok = r.Header["X-Amz-Content-Sha256"]
+ }
+
+ // We found 'X-Amz-Content-Sha256' return the captured value.
+ if ok {
+ return v[0]
+ }
+
+ // We couldn't find 'X-Amz-Content-Sha256'.
+ return defaultSha256Cksum
+}
+
+// Verify authorization header - http://docs.aws.amazon.com/AmazonS3/latest/API/sig-v4-authenticating-requests.html
+func (iam *IdentityAccessManagement) doesSignatureMatch(hashedPayload string, r *http.Request) (*Identity, ErrorCode) {
+
+ // Copy request.
+ req := *r
+
+ // Save authorization header.
+ v4Auth := req.Header.Get("Authorization")
+
+ // Parse signature version '4' header.
+ signV4Values, err := parseSignV4(v4Auth)
+ if err != ErrNone {
+ return nil, err
+ }
+
+ // Extract all the signed headers along with its values.
+ extractedSignedHeaders, errCode := extractSignedHeaders(signV4Values.SignedHeaders, r)
+ if errCode != ErrNone {
+ return nil, errCode
+ }
+
+ // Verify if the access key id matches.
+ identity, cred, found := iam.lookupByAccessKey(signV4Values.Credential.accessKey)
+ if !found {
+ return nil, ErrInvalidAccessKeyID
+ }
+
+ // Extract date, if not present throw error.
+ var date string
+ if date = req.Header.Get(http.CanonicalHeaderKey("X-Amz-Date")); date == "" {
+ if date = r.Header.Get("Date"); date == "" {
+ return nil, ErrMissingDateHeader
+ }
+ }
+ // Parse date header.
+ t, e := time.Parse(iso8601Format, date)
+ if e != nil {
+ return nil, ErrMalformedDate
+ }
+
+ // Query string.
+ queryStr := req.URL.Query().Encode()
+
+ // Get canonical request.
+ canonicalRequest := getCanonicalRequest(extractedSignedHeaders, hashedPayload, queryStr, req.URL.Path, req.Method)
+
+ // Get string to sign from canonical request.
+ stringToSign := getStringToSign(canonicalRequest, t, signV4Values.Credential.getScope())
+
+ // Get hmac signing key.
+ signingKey := getSigningKey(cred.SecretKey, signV4Values.Credential.scope.date, signV4Values.Credential.scope.region)
+
+ // Calculate signature.
+ newSignature := getSignature(signingKey, stringToSign)
+
+ // Verify if signature match.
+ if !compareSignatureV4(newSignature, signV4Values.Signature) {
+ return nil, ErrSignatureDoesNotMatch
+ }
+
+ // Return error none.
+ return identity, ErrNone
+}
+
+// credentialHeader data type represents structured form of Credential
+// string from authorization header.
+type credentialHeader struct {
+ accessKey string
+ scope struct {
+ date time.Time
+ region string
+ service string
+ request string
+ }
+}
+
+// signValues data type represents structured form of AWS Signature V4 header.
+type signValues struct {
+ Credential credentialHeader
+ SignedHeaders []string
+ Signature string
+}
+
+// Return scope string.
+func (c credentialHeader) getScope() string {
+ return strings.Join([]string{
+ c.scope.date.Format(yyyymmdd),
+ c.scope.region,
+ c.scope.service,
+ c.scope.request,
+ }, "/")
+}
+
+// Authorization: algorithm Credential=accessKeyID/credScope, \
+// SignedHeaders=signedHeaders, Signature=signature
+//
+func parseSignV4(v4Auth string) (sv signValues, aec ErrorCode) {
+ // Replace all spaced strings, some clients can send spaced
+ // parameters and some won't. So we pro-actively remove any spaces
+ // to make parsing easier.
+ v4Auth = strings.Replace(v4Auth, " ", "", -1)
+ if v4Auth == "" {
+ return sv, ErrAuthHeaderEmpty
+ }
+
+ // Verify if the header algorithm is supported or not.
+ if !strings.HasPrefix(v4Auth, signV4Algorithm) {
+ return sv, ErrSignatureVersionNotSupported
+ }
+
+ // Strip off the Algorithm prefix.
+ v4Auth = strings.TrimPrefix(v4Auth, signV4Algorithm)
+ authFields := strings.Split(strings.TrimSpace(v4Auth), ",")
+ if len(authFields) != 3 {
+ return sv, ErrMissingFields
+ }
+
+ // Initialize signature version '4' structured header.
+ signV4Values := signValues{}
+
+ var err ErrorCode
+ // Save credentail values.
+ signV4Values.Credential, err = parseCredentialHeader(authFields[0])
+ if err != ErrNone {
+ return sv, err
+ }
+
+ // Save signed headers.
+ signV4Values.SignedHeaders, err = parseSignedHeader(authFields[1])
+ if err != ErrNone {
+ return sv, err
+ }
+
+ // Save signature.
+ signV4Values.Signature, err = parseSignature(authFields[2])
+ if err != ErrNone {
+ return sv, err
+ }
+
+ // Return the structure here.
+ return signV4Values, ErrNone
+}
+
+// parse credentialHeader string into its structured form.
+func parseCredentialHeader(credElement string) (ch credentialHeader, aec ErrorCode) {
+ creds := strings.Split(strings.TrimSpace(credElement), "=")
+ if len(creds) != 2 {
+ return ch, ErrMissingFields
+ }
+ if creds[0] != "Credential" {
+ return ch, ErrMissingCredTag
+ }
+ credElements := strings.Split(strings.TrimSpace(creds[1]), "/")
+ if len(credElements) != 5 {
+ return ch, ErrCredMalformed
+ }
+ // Save access key id.
+ cred := credentialHeader{
+ accessKey: credElements[0],
+ }
+ var e error
+ cred.scope.date, e = time.Parse(yyyymmdd, credElements[1])
+ if e != nil {
+ return ch, ErrMalformedCredentialDate
+ }
+
+ cred.scope.region = credElements[2]
+ cred.scope.service = credElements[3] // "s3"
+ cred.scope.request = credElements[4] // "aws4_request"
+ return cred, ErrNone
+}
+
+// Parse slice of signed headers from signed headers tag.
+func parseSignedHeader(signedHdrElement string) ([]string, ErrorCode) {
+ signedHdrFields := strings.Split(strings.TrimSpace(signedHdrElement), "=")
+ if len(signedHdrFields) != 2 {
+ return nil, ErrMissingFields
+ }
+ if signedHdrFields[0] != "SignedHeaders" {
+ return nil, ErrMissingSignHeadersTag
+ }
+ if signedHdrFields[1] == "" {
+ return nil, ErrMissingFields
+ }
+ signedHeaders := strings.Split(signedHdrFields[1], ";")
+ return signedHeaders, ErrNone
+}
+
+// Parse signature from signature tag.
+func parseSignature(signElement string) (string, ErrorCode) {
+ signFields := strings.Split(strings.TrimSpace(signElement), "=")
+ if len(signFields) != 2 {
+ return "", ErrMissingFields
+ }
+ if signFields[0] != "Signature" {
+ return "", ErrMissingSignTag
+ }
+ if signFields[1] == "" {
+ return "", ErrMissingFields
+ }
+ signature := signFields[1]
+ return signature, ErrNone
+}
+
+// check query headers with presigned signature
+// - http://docs.aws.amazon.com/AmazonS3/latest/API/sigv4-query-string-auth.html
+func (iam *IdentityAccessManagement) doesPresignedSignatureMatch(hashedPayload string, r *http.Request) (*Identity, ErrorCode) {
+
+ // Copy request
+ req := *r
+
+ // Parse request query string.
+ pSignValues, err := parsePreSignV4(req.URL.Query())
+ if err != ErrNone {
+ return nil, err
+ }
+
+ // Verify if the access key id matches.
+ identity, cred, found := iam.lookupByAccessKey(pSignValues.Credential.accessKey)
+ if !found {
+ return nil, ErrInvalidAccessKeyID
+ }
+
+ // Extract all the signed headers along with its values.
+ extractedSignedHeaders, errCode := extractSignedHeaders(pSignValues.SignedHeaders, r)
+ if errCode != ErrNone {
+ return nil, errCode
+ }
+ // Construct new query.
+ query := make(url.Values)
+ if req.URL.Query().Get("X-Amz-Content-Sha256") != "" {
+ query.Set("X-Amz-Content-Sha256", hashedPayload)
+ }
+
+ query.Set("X-Amz-Algorithm", signV4Algorithm)
+
+ now := time.Now().UTC()
+
+ // If the host which signed the request is slightly ahead in time (by less than globalMaxSkewTime) the
+ // request should still be allowed.
+ if pSignValues.Date.After(now.Add(15 * time.Minute)) {
+ return nil, ErrRequestNotReadyYet
+ }
+
+ if now.Sub(pSignValues.Date) > pSignValues.Expires {
+ return nil, ErrExpiredPresignRequest
+ }
+
+ // Save the date and expires.
+ t := pSignValues.Date
+ expireSeconds := int(pSignValues.Expires / time.Second)
+
+ // Construct the query.
+ query.Set("X-Amz-Date", t.Format(iso8601Format))
+ query.Set("X-Amz-Expires", strconv.Itoa(expireSeconds))
+ query.Set("X-Amz-SignedHeaders", getSignedHeaders(extractedSignedHeaders))
+ query.Set("X-Amz-Credential", cred.AccessKey+"/"+getScope(t, pSignValues.Credential.scope.region))
+
+ // Save other headers available in the request parameters.
+ for k, v := range req.URL.Query() {
+
+ // Handle the metadata in presigned put query string
+ if strings.Contains(strings.ToLower(k), "x-amz-meta-") {
+ query.Set(k, v[0])
+ }
+
+ if strings.HasPrefix(strings.ToLower(k), "x-amz") {
+ continue
+ }
+ query[k] = v
+ }
+
+ // Get the encoded query.
+ encodedQuery := query.Encode()
+
+ // Verify if date query is same.
+ if req.URL.Query().Get("X-Amz-Date") != query.Get("X-Amz-Date") {
+ return nil, ErrSignatureDoesNotMatch
+ }
+ // Verify if expires query is same.
+ if req.URL.Query().Get("X-Amz-Expires") != query.Get("X-Amz-Expires") {
+ return nil, ErrSignatureDoesNotMatch
+ }
+ // Verify if signed headers query is same.
+ if req.URL.Query().Get("X-Amz-SignedHeaders") != query.Get("X-Amz-SignedHeaders") {
+ return nil, ErrSignatureDoesNotMatch
+ }
+ // Verify if credential query is same.
+ if req.URL.Query().Get("X-Amz-Credential") != query.Get("X-Amz-Credential") {
+ return nil, ErrSignatureDoesNotMatch
+ }
+ // Verify if sha256 payload query is same.
+ if req.URL.Query().Get("X-Amz-Content-Sha256") != "" {
+ if req.URL.Query().Get("X-Amz-Content-Sha256") != query.Get("X-Amz-Content-Sha256") {
+ return nil, ErrContentSHA256Mismatch
+ }
+ }
+
+ /// Verify finally if signature is same.
+
+ // Get canonical request.
+ presignedCanonicalReq := getCanonicalRequest(extractedSignedHeaders, hashedPayload, encodedQuery, req.URL.Path, req.Method)
+
+ // Get string to sign from canonical request.
+ presignedStringToSign := getStringToSign(presignedCanonicalReq, t, pSignValues.Credential.getScope())
+
+ // Get hmac presigned signing key.
+ presignedSigningKey := getSigningKey(cred.SecretKey, pSignValues.Credential.scope.date, pSignValues.Credential.scope.region)
+
+ // Get new signature.
+ newSignature := getSignature(presignedSigningKey, presignedStringToSign)
+
+ // Verify signature.
+ if !compareSignatureV4(req.URL.Query().Get("X-Amz-Signature"), newSignature) {
+ return nil, ErrSignatureDoesNotMatch
+ }
+ return identity, ErrNone
+}
+
+func contains(list []string, elem string) bool {
+ for _, t := range list {
+ if t == elem {
+ return true
+ }
+ }
+ return false
+}
+
+// preSignValues data type represents structued form of AWS Signature V4 query string.
+type preSignValues struct {
+ signValues
+ Date time.Time
+ Expires time.Duration
+}
+
+// Parses signature version '4' query string of the following form.
+//
+// querystring = X-Amz-Algorithm=algorithm
+// querystring += &X-Amz-Credential= urlencode(accessKey + '/' + credential_scope)
+// querystring += &X-Amz-Date=date
+// querystring += &X-Amz-Expires=timeout interval
+// querystring += &X-Amz-SignedHeaders=signed_headers
+// querystring += &X-Amz-Signature=signature
+//
+// verifies if any of the necessary query params are missing in the presigned request.
+func doesV4PresignParamsExist(query url.Values) ErrorCode {
+ v4PresignQueryParams := []string{"X-Amz-Algorithm", "X-Amz-Credential", "X-Amz-Signature", "X-Amz-Date", "X-Amz-SignedHeaders", "X-Amz-Expires"}
+ for _, v4PresignQueryParam := range v4PresignQueryParams {
+ if _, ok := query[v4PresignQueryParam]; !ok {
+ return ErrInvalidQueryParams
+ }
+ }
+ return ErrNone
+}
+
+// Parses all the presigned signature values into separate elements.
+func parsePreSignV4(query url.Values) (psv preSignValues, aec ErrorCode) {
+ var err ErrorCode
+ // verify whether the required query params exist.
+ err = doesV4PresignParamsExist(query)
+ if err != ErrNone {
+ return psv, err
+ }
+
+ // Verify if the query algorithm is supported or not.
+ if query.Get("X-Amz-Algorithm") != signV4Algorithm {
+ return psv, ErrInvalidQuerySignatureAlgo
+ }
+
+ // Initialize signature version '4' structured header.
+ preSignV4Values := preSignValues{}
+
+ // Save credential.
+ preSignV4Values.Credential, err = parseCredentialHeader("Credential=" + query.Get("X-Amz-Credential"))
+ if err != ErrNone {
+ return psv, err
+ }
+
+ var e error
+ // Save date in native time.Time.
+ preSignV4Values.Date, e = time.Parse(iso8601Format, query.Get("X-Amz-Date"))
+ if e != nil {
+ return psv, ErrMalformedPresignedDate
+ }
+
+ // Save expires in native time.Duration.
+ preSignV4Values.Expires, e = time.ParseDuration(query.Get("X-Amz-Expires") + "s")
+ if e != nil {
+ return psv, ErrMalformedExpires
+ }
+
+ if preSignV4Values.Expires < 0 {
+ return psv, ErrNegativeExpires
+ }
+
+ // Check if Expiry time is less than 7 days (value in seconds).
+ if preSignV4Values.Expires.Seconds() > 604800 {
+ return psv, ErrMaximumExpires
+ }
+
+ // Save signed headers.
+ preSignV4Values.SignedHeaders, err = parseSignedHeader("SignedHeaders=" + query.Get("X-Amz-SignedHeaders"))
+ if err != ErrNone {
+ return psv, err
+ }
+
+ // Save signature.
+ preSignV4Values.Signature, err = parseSignature("Signature=" + query.Get("X-Amz-Signature"))
+ if err != ErrNone {
+ return psv, err
+ }
+
+ // Return structed form of signature query string.
+ return preSignV4Values, ErrNone
+}
+
+// extractSignedHeaders extract signed headers from Authorization header
+func extractSignedHeaders(signedHeaders []string, r *http.Request) (http.Header, ErrorCode) {
+ reqHeaders := r.Header
+ // find whether "host" is part of list of signed headers.
+ // if not return ErrUnsignedHeaders. "host" is mandatory.
+ if !contains(signedHeaders, "host") {
+ return nil, ErrUnsignedHeaders
+ }
+ extractedSignedHeaders := make(http.Header)
+ for _, header := range signedHeaders {
+ // `host` will not be found in the headers, can be found in r.Host.
+ // but its alway necessary that the list of signed headers containing host in it.
+ val, ok := reqHeaders[http.CanonicalHeaderKey(header)]
+ if ok {
+ for _, enc := range val {
+ extractedSignedHeaders.Add(header, enc)
+ }
+ continue
+ }
+ switch header {
+ case "expect":
+ // Golang http server strips off 'Expect' header, if the
+ // client sent this as part of signed headers we need to
+ // handle otherwise we would see a signature mismatch.
+ // `aws-cli` sets this as part of signed headers.
+ //
+ // According to
+ // http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.20
+ // Expect header is always of form:
+ //
+ // Expect = "Expect" ":" 1#expectation
+ // expectation = "100-continue" | expectation-extension
+ //
+ // So it safe to assume that '100-continue' is what would
+ // be sent, for the time being keep this work around.
+ // Adding a *TODO* to remove this later when Golang server
+ // doesn't filter out the 'Expect' header.
+ extractedSignedHeaders.Set(header, "100-continue")
+ case "host":
+ // Go http server removes "host" from Request.Header
+ extractedSignedHeaders.Set(header, r.Host)
+ case "transfer-encoding":
+ for _, enc := range r.TransferEncoding {
+ extractedSignedHeaders.Add(header, enc)
+ }
+ case "content-length":
+ // Signature-V4 spec excludes Content-Length from signed headers list for signature calculation.
+ // But some clients deviate from this rule. Hence we consider Content-Length for signature
+ // calculation to be compatible with such clients.
+ extractedSignedHeaders.Set(header, strconv.FormatInt(r.ContentLength, 10))
+ default:
+ return nil, ErrUnsignedHeaders
+ }
+ }
+ return extractedSignedHeaders, ErrNone
+}
+
+// getSignedHeaders generate a string i.e alphabetically sorted, semicolon-separated list of lowercase request header names
+func getSignedHeaders(signedHeaders http.Header) string {
+ var headers []string
+ for k := range signedHeaders {
+ headers = append(headers, strings.ToLower(k))
+ }
+ sort.Strings(headers)
+ return strings.Join(headers, ";")
+}
+
+// getScope generate a string of a specific date, an AWS region, and a service.
+func getScope(t time.Time, region string) string {
+ scope := strings.Join([]string{
+ t.Format(yyyymmdd),
+ region,
+ "s3",
+ "aws4_request",
+ }, "/")
+ return scope
+}
+
+// getCanonicalRequest generate a canonical request of style
+//
+// canonicalRequest =
+// \n
+// \n
+// \n
+// \n
+// \n
+//
+//
+func getCanonicalRequest(extractedSignedHeaders http.Header, payload, queryStr, urlPath, method string) string {
+ rawQuery := strings.Replace(queryStr, "+", "%20", -1)
+ encodedPath := encodePath(urlPath)
+ canonicalRequest := strings.Join([]string{
+ method,
+ encodedPath,
+ rawQuery,
+ getCanonicalHeaders(extractedSignedHeaders),
+ getSignedHeaders(extractedSignedHeaders),
+ payload,
+ }, "\n")
+ return canonicalRequest
+}
+
+// getStringToSign a string based on selected query values.
+func getStringToSign(canonicalRequest string, t time.Time, scope string) string {
+ stringToSign := signV4Algorithm + "\n" + t.Format(iso8601Format) + "\n"
+ stringToSign = stringToSign + scope + "\n"
+ canonicalRequestBytes := sha256.Sum256([]byte(canonicalRequest))
+ stringToSign = stringToSign + hex.EncodeToString(canonicalRequestBytes[:])
+ return stringToSign
+}
+
+// sumHMAC calculate hmac between two input byte array.
+func sumHMAC(key []byte, data []byte) []byte {
+ hash := hmac.New(sha256.New, key)
+ hash.Write(data)
+ return hash.Sum(nil)
+}
+
+// getSigningKey hmac seed to calculate final signature.
+func getSigningKey(secretKey string, t time.Time, region string) []byte {
+ date := sumHMAC([]byte("AWS4"+secretKey), []byte(t.Format(yyyymmdd)))
+ regionBytes := sumHMAC(date, []byte(region))
+ service := sumHMAC(regionBytes, []byte("s3"))
+ signingKey := sumHMAC(service, []byte("aws4_request"))
+ return signingKey
+}
+
+// getSignature final signature in hexadecimal form.
+func getSignature(signingKey []byte, stringToSign string) string {
+ return hex.EncodeToString(sumHMAC(signingKey, []byte(stringToSign)))
+}
+
+// getCanonicalHeaders generate a list of request headers with their values
+func getCanonicalHeaders(signedHeaders http.Header) string {
+ var headers []string
+ vals := make(http.Header)
+ for k, vv := range signedHeaders {
+ headers = append(headers, strings.ToLower(k))
+ vals[strings.ToLower(k)] = vv
+ }
+ sort.Strings(headers)
+
+ var buf bytes.Buffer
+ for _, k := range headers {
+ buf.WriteString(k)
+ buf.WriteByte(':')
+ for idx, v := range vals[k] {
+ if idx > 0 {
+ buf.WriteByte(',')
+ }
+ buf.WriteString(signV4TrimAll(v))
+ }
+ buf.WriteByte('\n')
+ }
+ return buf.String()
+}
+
+// Trim leading and trailing spaces and replace sequential spaces with one space, following Trimall()
+// in http://docs.aws.amazon.com/general/latest/gr/sigv4-create-canonical-request.html
+func signV4TrimAll(input string) string {
+ // Compress adjacent spaces (a space is determined by
+ // unicode.IsSpace() internally here) to one space and return
+ return strings.Join(strings.Fields(input), " ")
+}
+
+// if object matches reserved string, no need to encode them
+var reservedObjectNames = regexp.MustCompile("^[a-zA-Z0-9-_.~/]+$")
+
+// EncodePath encode the strings from UTF-8 byte representations to HTML hex escape sequences
+//
+// This is necessary since regular url.Parse() and url.Encode() functions do not support UTF-8
+// non english characters cannot be parsed due to the nature in which url.Encode() is written
+//
+// This function on the other hand is a direct replacement for url.Encode() technique to support
+// pretty much every UTF-8 character.
+func encodePath(pathName string) string {
+ if reservedObjectNames.MatchString(pathName) {
+ return pathName
+ }
+ var encodedPathname string
+ for _, s := range pathName {
+ if 'A' <= s && s <= 'Z' || 'a' <= s && s <= 'z' || '0' <= s && s <= '9' { // §2.3 Unreserved characters (mark)
+ encodedPathname = encodedPathname + string(s)
+ continue
+ }
+ switch s {
+ case '-', '_', '.', '~', '/': // §2.3 Unreserved characters (mark)
+ encodedPathname = encodedPathname + string(s)
+ continue
+ default:
+ len := utf8.RuneLen(s)
+ if len < 0 {
+ // if utf8 cannot convert return the same string as is
+ return pathName
+ }
+ u := make([]byte, len)
+ utf8.EncodeRune(u, s)
+ for _, r := range u {
+ hex := hex.EncodeToString([]byte{r})
+ encodedPathname = encodedPathname + "%" + strings.ToUpper(hex)
+ }
+ }
+ }
+ return encodedPathname
+}
+
+// compareSignatureV4 returns true if and only if both signatures
+// are equal. The signatures are expected to be HEX encoded strings
+// according to the AWS S3 signature V4 spec.
+func compareSignatureV4(sig1, sig2 string) bool {
+ // The CTC using []byte(str) works because the hex encoding
+ // is unique for a sequence of bytes. See also compareSignatureV2.
+ return subtle.ConstantTimeCompare([]byte(sig1), []byte(sig2)) == 1
+}
diff --git a/weed/s3api/auto_signature_v4_test.go b/weed/s3api/auto_signature_v4_test.go
new file mode 100644
index 000000000..036b5c052
--- /dev/null
+++ b/weed/s3api/auto_signature_v4_test.go
@@ -0,0 +1,418 @@
+package s3api
+
+import (
+ "bytes"
+ "crypto/md5"
+ "crypto/sha256"
+ "encoding/base64"
+ "encoding/hex"
+ "errors"
+ "fmt"
+ "io"
+ "io/ioutil"
+ "net/http"
+ "net/url"
+ "sort"
+ "strconv"
+ "strings"
+ "testing"
+ "time"
+ "unicode/utf8"
+)
+
+// TestIsRequestPresignedSignatureV4 - Test validates the logic for presign signature verision v4 detection.
+func TestIsRequestPresignedSignatureV4(t *testing.T) {
+ testCases := []struct {
+ inputQueryKey string
+ inputQueryValue string
+ expectedResult bool
+ }{
+ // Test case - 1.
+ // Test case with query key ""X-Amz-Credential" set.
+ {"", "", false},
+ // Test case - 2.
+ {"X-Amz-Credential", "", true},
+ // Test case - 3.
+ {"X-Amz-Content-Sha256", "", false},
+ }
+
+ for i, testCase := range testCases {
+ // creating an input HTTP request.
+ // Only the query parameters are relevant for this particular test.
+ inputReq, err := http.NewRequest("GET", "http://example.com", nil)
+ if err != nil {
+ t.Fatalf("Error initializing input HTTP request: %v", err)
+ }
+ q := inputReq.URL.Query()
+ q.Add(testCase.inputQueryKey, testCase.inputQueryValue)
+ inputReq.URL.RawQuery = q.Encode()
+
+ actualResult := isRequestPresignedSignatureV4(inputReq)
+ if testCase.expectedResult != actualResult {
+ t.Errorf("Test %d: Expected the result to `%v`, but instead got `%v`", i+1, testCase.expectedResult, actualResult)
+ }
+ }
+}
+
+// Tests is requested authenticated function, tests replies for s3 errors.
+func TestIsReqAuthenticated(t *testing.T) {
+ iam := NewIdentityAccessManagement("", "")
+ iam.identities = []*Identity{
+ {
+ Name: "someone",
+ Credentials: []*Credential{
+ {
+ AccessKey: "access_key_1",
+ SecretKey: "secret_key_1",
+ },
+ },
+ Actions: nil,
+ },
+ }
+
+ // List of test cases for validating http request authentication.
+ testCases := []struct {
+ req *http.Request
+ s3Error ErrorCode
+ }{
+ // When request is unsigned, access denied is returned.
+ {mustNewRequest("GET", "http://127.0.0.1:9000", 0, nil, t), ErrAccessDenied},
+ // When request is properly signed, error is none.
+ {mustNewSignedRequest("GET", "http://127.0.0.1:9000", 0, nil, t), ErrNone},
+ }
+
+ // Validates all testcases.
+ for i, testCase := range testCases {
+ if _, s3Error := iam.reqSignatureV4Verify(testCase.req); s3Error != testCase.s3Error {
+ ioutil.ReadAll(testCase.req.Body)
+ t.Fatalf("Test %d: Unexpected S3 error: want %d - got %d", i, testCase.s3Error, s3Error)
+ }
+ }
+}
+
+func TestCheckAdminRequestAuthType(t *testing.T) {
+ iam := NewIdentityAccessManagement("", "")
+ iam.identities = []*Identity{
+ {
+ Name: "someone",
+ Credentials: []*Credential{
+ {
+ AccessKey: "access_key_1",
+ SecretKey: "secret_key_1",
+ },
+ },
+ Actions: nil,
+ },
+ }
+
+ testCases := []struct {
+ Request *http.Request
+ ErrCode ErrorCode
+ }{
+ {Request: mustNewRequest("GET", "http://127.0.0.1:9000", 0, nil, t), ErrCode: ErrAccessDenied},
+ {Request: mustNewSignedRequest("GET", "http://127.0.0.1:9000", 0, nil, t), ErrCode: ErrNone},
+ {Request: mustNewPresignedRequest("GET", "http://127.0.0.1:9000", 0, nil, t), ErrCode: ErrNone},
+ }
+ for i, testCase := range testCases {
+ if _, s3Error := iam.reqSignatureV4Verify(testCase.Request); s3Error != testCase.ErrCode {
+ t.Errorf("Test %d: Unexpected s3error returned wanted %d, got %d", i, testCase.ErrCode, s3Error)
+ }
+ }
+}
+
+// Provides a fully populated http request instance, fails otherwise.
+func mustNewRequest(method string, urlStr string, contentLength int64, body io.ReadSeeker, t *testing.T) *http.Request {
+ req, err := newTestRequest(method, urlStr, contentLength, body)
+ if err != nil {
+ t.Fatalf("Unable to initialize new http request %s", err)
+ }
+ return req
+}
+
+// This is similar to mustNewRequest but additionally the request
+// is signed with AWS Signature V4, fails if not able to do so.
+func mustNewSignedRequest(method string, urlStr string, contentLength int64, body io.ReadSeeker, t *testing.T) *http.Request {
+ req := mustNewRequest(method, urlStr, contentLength, body, t)
+ cred := &Credential{"access_key_1", "secret_key_1"}
+ if err := signRequestV4(req, cred.AccessKey, cred.SecretKey); err != nil {
+ t.Fatalf("Unable to inititalized new signed http request %s", err)
+ }
+ return req
+}
+
+// This is similar to mustNewRequest but additionally the request
+// is presigned with AWS Signature V4, fails if not able to do so.
+func mustNewPresignedRequest(method string, urlStr string, contentLength int64, body io.ReadSeeker, t *testing.T) *http.Request {
+ req := mustNewRequest(method, urlStr, contentLength, body, t)
+ cred := &Credential{"access_key_1", "secret_key_1"}
+ if err := preSignV4(req, cred.AccessKey, cred.SecretKey, int64(10*time.Minute.Seconds())); err != nil {
+ t.Fatalf("Unable to inititalized new signed http request %s", err)
+ }
+ return req
+}
+
+// Returns new HTTP request object.
+func newTestRequest(method, urlStr string, contentLength int64, body io.ReadSeeker) (*http.Request, error) {
+ if method == "" {
+ method = "POST"
+ }
+
+ // Save for subsequent use
+ var hashedPayload string
+ var md5Base64 string
+ switch {
+ case body == nil:
+ hashedPayload = getSHA256Hash([]byte{})
+ default:
+ payloadBytes, err := ioutil.ReadAll(body)
+ if err != nil {
+ return nil, err
+ }
+ hashedPayload = getSHA256Hash(payloadBytes)
+ md5Base64 = getMD5HashBase64(payloadBytes)
+ }
+ // Seek back to beginning.
+ if body != nil {
+ body.Seek(0, 0)
+ } else {
+ body = bytes.NewReader([]byte(""))
+ }
+ req, err := http.NewRequest(method, urlStr, body)
+ if err != nil {
+ return nil, err
+ }
+ if md5Base64 != "" {
+ req.Header.Set("Content-Md5", md5Base64)
+ }
+ req.Header.Set("x-amz-content-sha256", hashedPayload)
+
+ // Add Content-Length
+ req.ContentLength = contentLength
+
+ return req, nil
+}
+
+// getSHA256Hash returns SHA-256 hash in hex encoding of given data.
+func getSHA256Hash(data []byte) string {
+ return hex.EncodeToString(getSHA256Sum(data))
+}
+
+// getMD5HashBase64 returns MD5 hash in base64 encoding of given data.
+func getMD5HashBase64(data []byte) string {
+ return base64.StdEncoding.EncodeToString(getMD5Sum(data))
+}
+
+// getSHA256Hash returns SHA-256 sum of given data.
+func getSHA256Sum(data []byte) []byte {
+ hash := sha256.New()
+ hash.Write(data)
+ return hash.Sum(nil)
+}
+
+// getMD5Sum returns MD5 sum of given data.
+func getMD5Sum(data []byte) []byte {
+ hash := md5.New()
+ hash.Write(data)
+ return hash.Sum(nil)
+}
+
+// getMD5Hash returns MD5 hash in hex encoding of given data.
+func getMD5Hash(data []byte) string {
+ return hex.EncodeToString(getMD5Sum(data))
+}
+
+var ignoredHeaders = map[string]bool{
+ "Authorization": true,
+ "Content-Type": true,
+ "Content-Length": true,
+ "User-Agent": true,
+}
+
+// Sign given request using Signature V4.
+func signRequestV4(req *http.Request, accessKey, secretKey string) error {
+ // Get hashed payload.
+ hashedPayload := req.Header.Get("x-amz-content-sha256")
+ if hashedPayload == "" {
+ return fmt.Errorf("Invalid hashed payload")
+ }
+
+ currTime := time.Now()
+
+ // Set x-amz-date.
+ req.Header.Set("x-amz-date", currTime.Format(iso8601Format))
+
+ // Get header map.
+ headerMap := make(map[string][]string)
+ for k, vv := range req.Header {
+ // If request header key is not in ignored headers, then add it.
+ if _, ok := ignoredHeaders[http.CanonicalHeaderKey(k)]; !ok {
+ headerMap[strings.ToLower(k)] = vv
+ }
+ }
+
+ // Get header keys.
+ headers := []string{"host"}
+ for k := range headerMap {
+ headers = append(headers, k)
+ }
+ sort.Strings(headers)
+
+ region := "us-east-1"
+
+ // Get canonical headers.
+ var buf bytes.Buffer
+ for _, k := range headers {
+ buf.WriteString(k)
+ buf.WriteByte(':')
+ switch {
+ case k == "host":
+ buf.WriteString(req.URL.Host)
+ fallthrough
+ default:
+ for idx, v := range headerMap[k] {
+ if idx > 0 {
+ buf.WriteByte(',')
+ }
+ buf.WriteString(v)
+ }
+ buf.WriteByte('\n')
+ }
+ }
+ canonicalHeaders := buf.String()
+
+ // Get signed headers.
+ signedHeaders := strings.Join(headers, ";")
+
+ // Get canonical query string.
+ req.URL.RawQuery = strings.Replace(req.URL.Query().Encode(), "+", "%20", -1)
+
+ // Get canonical URI.
+ canonicalURI := EncodePath(req.URL.Path)
+
+ // Get canonical request.
+ // canonicalRequest =
+ // \n
+ // \n
+ // \n
+ // \n
+ // \n
+ //
+ //
+ canonicalRequest := strings.Join([]string{
+ req.Method,
+ canonicalURI,
+ req.URL.RawQuery,
+ canonicalHeaders,
+ signedHeaders,
+ hashedPayload,
+ }, "\n")
+
+ // Get scope.
+ scope := strings.Join([]string{
+ currTime.Format(yyyymmdd),
+ region,
+ "s3",
+ "aws4_request",
+ }, "/")
+
+ stringToSign := "AWS4-HMAC-SHA256" + "\n" + currTime.Format(iso8601Format) + "\n"
+ stringToSign = stringToSign + scope + "\n"
+ stringToSign = stringToSign + getSHA256Hash([]byte(canonicalRequest))
+
+ date := sumHMAC([]byte("AWS4"+secretKey), []byte(currTime.Format(yyyymmdd)))
+ regionHMAC := sumHMAC(date, []byte(region))
+ service := sumHMAC(regionHMAC, []byte("s3"))
+ signingKey := sumHMAC(service, []byte("aws4_request"))
+
+ signature := hex.EncodeToString(sumHMAC(signingKey, []byte(stringToSign)))
+
+ // final Authorization header
+ parts := []string{
+ "AWS4-HMAC-SHA256" + " Credential=" + accessKey + "/" + scope,
+ "SignedHeaders=" + signedHeaders,
+ "Signature=" + signature,
+ }
+ auth := strings.Join(parts, ", ")
+ req.Header.Set("Authorization", auth)
+
+ return nil
+}
+
+// preSignV4 presign the request, in accordance with
+// http://docs.aws.amazon.com/AmazonS3/latest/API/sigv4-query-string-auth.html.
+func preSignV4(req *http.Request, accessKeyID, secretAccessKey string, expires int64) error {
+ // Presign is not needed for anonymous credentials.
+ if accessKeyID == "" || secretAccessKey == "" {
+ return errors.New("Presign cannot be generated without access and secret keys")
+ }
+
+ region := "us-east-1"
+ date := time.Now().UTC()
+ scope := getScope(date, region)
+ credential := fmt.Sprintf("%s/%s", accessKeyID, scope)
+
+ // Set URL query.
+ query := req.URL.Query()
+ query.Set("X-Amz-Algorithm", signV4Algorithm)
+ query.Set("X-Amz-Date", date.Format(iso8601Format))
+ query.Set("X-Amz-Expires", strconv.FormatInt(expires, 10))
+ query.Set("X-Amz-SignedHeaders", "host")
+ query.Set("X-Amz-Credential", credential)
+ query.Set("X-Amz-Content-Sha256", unsignedPayload)
+
+ // "host" is the only header required to be signed for Presigned URLs.
+ extractedSignedHeaders := make(http.Header)
+ extractedSignedHeaders.Set("host", req.Host)
+
+ queryStr := strings.Replace(query.Encode(), "+", "%20", -1)
+ canonicalRequest := getCanonicalRequest(extractedSignedHeaders, unsignedPayload, queryStr, req.URL.Path, req.Method)
+ stringToSign := getStringToSign(canonicalRequest, date, scope)
+ signingKey := getSigningKey(secretAccessKey, date, region)
+ signature := getSignature(signingKey, stringToSign)
+
+ req.URL.RawQuery = query.Encode()
+
+ // Add signature header to RawQuery.
+ req.URL.RawQuery += "&X-Amz-Signature=" + url.QueryEscape(signature)
+
+ // Construct the final presigned URL.
+ return nil
+}
+
+// EncodePath encode the strings from UTF-8 byte representations to HTML hex escape sequences
+//
+// This is necessary since regular url.Parse() and url.Encode() functions do not support UTF-8
+// non english characters cannot be parsed due to the nature in which url.Encode() is written
+//
+// This function on the other hand is a direct replacement for url.Encode() technique to support
+// pretty much every UTF-8 character.
+func EncodePath(pathName string) string {
+ if reservedObjectNames.MatchString(pathName) {
+ return pathName
+ }
+ var encodedPathname string
+ for _, s := range pathName {
+ if 'A' <= s && s <= 'Z' || 'a' <= s && s <= 'z' || '0' <= s && s <= '9' { // §2.3 Unreserved characters (mark)
+ encodedPathname = encodedPathname + string(s)
+ continue
+ }
+ switch s {
+ case '-', '_', '.', '~', '/': // §2.3 Unreserved characters (mark)
+ encodedPathname = encodedPathname + string(s)
+ continue
+ default:
+ len := utf8.RuneLen(s)
+ if len < 0 {
+ // if utf8 cannot convert return the same string as is
+ return pathName
+ }
+ u := make([]byte, len)
+ utf8.EncodeRune(u, s)
+ for _, r := range u {
+ hex := hex.EncodeToString([]byte{r})
+ encodedPathname = encodedPathname + "%" + strings.ToUpper(hex)
+ }
+ }
+ }
+ return encodedPathname
+}
diff --git a/weed/s3api/chunked_reader_v4.go b/weed/s3api/chunked_reader_v4.go
index 061fd4a92..76c4394c2 100644
--- a/weed/s3api/chunked_reader_v4.go
+++ b/weed/s3api/chunked_reader_v4.go
@@ -21,17 +21,115 @@ package s3api
import (
"bufio"
"bytes"
+ "crypto/sha256"
+ "encoding/hex"
"errors"
- "github.com/dustin/go-humanize"
+ "hash"
"io"
"net/http"
-)
+ "time"
-// Streaming AWS Signature Version '4' constants.
-const (
- streamingContentSHA256 = "STREAMING-AWS4-HMAC-SHA256-PAYLOAD"
+ "github.com/dustin/go-humanize"
)
+// getChunkSignature - get chunk signature.
+func getChunkSignature(secretKey string, seedSignature string, region string, date time.Time, hashedChunk string) string {
+
+ // Calculate string to sign.
+ stringToSign := signV4ChunkedAlgorithm + "\n" +
+ date.Format(iso8601Format) + "\n" +
+ getScope(date, region) + "\n" +
+ seedSignature + "\n" +
+ emptySHA256 + "\n" +
+ hashedChunk
+
+ // Get hmac signing key.
+ signingKey := getSigningKey(secretKey, date, region)
+
+ // Calculate signature.
+ newSignature := getSignature(signingKey, stringToSign)
+
+ return newSignature
+}
+
+// calculateSeedSignature - Calculate seed signature in accordance with
+// - http://docs.aws.amazon.com/AmazonS3/latest/API/sigv4-streaming.html
+// returns signature, error otherwise if the signature mismatches or any other
+// error while parsing and validating.
+func (iam *IdentityAccessManagement) calculateSeedSignature(r *http.Request) (cred *Credential, signature string, region string, date time.Time, errCode ErrorCode) {
+
+ // Copy request.
+ req := *r
+
+ // Save authorization header.
+ v4Auth := req.Header.Get("Authorization")
+
+ // Parse signature version '4' header.
+ signV4Values, errCode := parseSignV4(v4Auth)
+ if errCode != ErrNone {
+ return nil, "", "", time.Time{}, errCode
+ }
+
+ // Payload streaming.
+ payload := streamingContentSHA256
+
+ // Payload for STREAMING signature should be 'STREAMING-AWS4-HMAC-SHA256-PAYLOAD'
+ if payload != req.Header.Get("X-Amz-Content-Sha256") {
+ return nil, "", "", time.Time{}, ErrContentSHA256Mismatch
+ }
+
+ // Extract all the signed headers along with its values.
+ extractedSignedHeaders, errCode := extractSignedHeaders(signV4Values.SignedHeaders, r)
+ if errCode != ErrNone {
+ return nil, "", "", time.Time{}, errCode
+ }
+ // Verify if the access key id matches.
+ _, cred, found := iam.lookupByAccessKey(signV4Values.Credential.accessKey)
+ if !found {
+ return nil, "", "", time.Time{}, ErrInvalidAccessKeyID
+ }
+
+ // Verify if region is valid.
+ region = signV4Values.Credential.scope.region
+
+ // Extract date, if not present throw error.
+ var dateStr string
+ if dateStr = req.Header.Get(http.CanonicalHeaderKey("x-amz-date")); dateStr == "" {
+ if dateStr = r.Header.Get("Date"); dateStr == "" {
+ return nil, "", "", time.Time{}, ErrMissingDateHeader
+ }
+ }
+ // Parse date header.
+ var err error
+ date, err = time.Parse(iso8601Format, dateStr)
+ if err != nil {
+ return nil, "", "", time.Time{}, ErrMalformedDate
+ }
+
+ // Query string.
+ queryStr := req.URL.Query().Encode()
+
+ // Get canonical request.
+ canonicalRequest := getCanonicalRequest(extractedSignedHeaders, payload, queryStr, req.URL.Path, req.Method)
+
+ // Get string to sign from canonical request.
+ stringToSign := getStringToSign(canonicalRequest, date, signV4Values.Credential.getScope())
+
+ // Get hmac signing key.
+ signingKey := getSigningKey(cred.SecretKey, signV4Values.Credential.scope.date, region)
+
+ // Calculate signature.
+ newSignature := getSignature(signingKey, stringToSign)
+
+ // Verify if signature match.
+ if !compareSignatureV4(newSignature, signV4Values.Signature) {
+ return nil, "", "", time.Time{}, ErrSignatureDoesNotMatch
+ }
+
+ // Return caculated signature.
+ return cred, newSignature, region, date, ErrNone
+}
+
const maxLineLength = 4 * humanize.KiByte // assumed <= bufio.defaultBufSize 4KiB
// lineTooLong is generated as chunk header is bigger than 4KiB.
@@ -43,22 +141,36 @@ var errMalformedEncoding = errors.New("malformed chunked encoding")
// newSignV4ChunkedReader returns a new s3ChunkedReader that translates the data read from r
// out of HTTP "chunked" format before returning it.
// The s3ChunkedReader returns io.EOF when the final 0-length chunk is read.
-func newSignV4ChunkedReader(req *http.Request) io.ReadCloser {
- return &s3ChunkedReader{
- reader: bufio.NewReader(req.Body),
- state: readChunkHeader,
+func (iam *IdentityAccessManagement) newSignV4ChunkedReader(req *http.Request) (io.ReadCloser, ErrorCode) {
+ ident, seedSignature, region, seedDate, errCode := iam.calculateSeedSignature(req)
+ if errCode != ErrNone {
+ return nil, errCode
}
+ return &s3ChunkedReader{
+ cred: ident,
+ reader: bufio.NewReader(req.Body),
+ seedSignature: seedSignature,
+ seedDate: seedDate,
+ region: region,
+ chunkSHA256Writer: sha256.New(),
+ state: readChunkHeader,
+ }, ErrNone
}
// Represents the overall state that is required for decoding a
// AWS Signature V4 chunked reader.
type s3ChunkedReader struct {
- reader *bufio.Reader
- state chunkState
- lastChunk bool
- chunkSignature string
- n uint64 // Unread bytes in chunk
- err error
+ cred *Credential
+ reader *bufio.Reader
+ seedSignature string
+ seedDate time.Time
+ region string
+ state chunkState
+ lastChunk bool
+ chunkSignature string
+ chunkSHA256Writer hash.Hash // Calculates sha256 of chunk data.
+ n uint64 // Unread bytes in chunk
+ err error
}
// Read chunk reads the chunk token signature portion.
@@ -157,6 +269,9 @@ func (cr *s3ChunkedReader) Read(buf []byte) (n int, err error) {
return 0, cr.err
}
+ // Calculate sha256.
+ cr.chunkSHA256Writer.Write(rbuf[:n0])
+
// Update the bytes read into request buffer so far.
n += n0
buf = buf[n0:]
@@ -169,6 +284,19 @@ func (cr *s3ChunkedReader) Read(buf []byte) (n int, err error) {
continue
}
case verifyChunk:
+ // Calculate the hashed chunk.
+ hashedChunk := hex.EncodeToString(cr.chunkSHA256Writer.Sum(nil))
+ // Calculate the chunk signature.
+ newSignature := getChunkSignature(cr.cred.SecretKey, cr.seedSignature, cr.region, cr.seedDate, hashedChunk)
+ if !compareSignatureV4(cr.chunkSignature, newSignature) {
+ // Chunk signature doesn't match we return signature does not match.
+ cr.err = errors.New("chunk signature does not match")
+ return 0, cr.err
+ }
+ // Newly calculated signature becomes the seed for the next chunk
+ // this follows the chaining.
+ cr.seedSignature = newSignature
+ cr.chunkSHA256Writer.Reset()
if cr.lastChunk {
cr.state = eofChunk
} else {
diff --git a/weed/s3api/custom_types.go b/weed/s3api/custom_types.go
new file mode 100644
index 000000000..569dfc3ac
--- /dev/null
+++ b/weed/s3api/custom_types.go
@@ -0,0 +1,3 @@
+package s3api
+
+const s3TimeFormat = "2006-01-02T15:04:05.999Z07:00"
diff --git a/weed/s3api/filer_multipart.go b/weed/s3api/filer_multipart.go
index 73be496d9..31ac850b1 100644
--- a/weed/s3api/filer_multipart.go
+++ b/weed/s3api/filer_multipart.go
@@ -1,6 +1,7 @@
package s3api
import (
+ "encoding/xml"
"fmt"
"path/filepath"
"strconv"
@@ -9,18 +10,20 @@ import (
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/service/s3"
+ "github.com/google/uuid"
+
"github.com/chrislusf/seaweedfs/weed/filer2"
"github.com/chrislusf/seaweedfs/weed/glog"
"github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
- "github.com/satori/go.uuid"
)
type InitiateMultipartUploadResult struct {
+ XMLName xml.Name `xml:"http://s3.amazonaws.com/doc/2006-03-01/ InitiateMultipartUploadResult"`
s3.CreateMultipartUploadOutput
}
func (s3a *S3ApiServer) createMultipartUpload(input *s3.CreateMultipartUploadInput) (output *InitiateMultipartUploadResult, code ErrorCode) {
- uploadId, _ := uuid.NewV4()
+ uploadId, _ := uuid.NewRandom()
uploadIdString := uploadId.String()
if err := s3a.mkdir(s3a.genUploadsFolder(*input.Bucket), uploadIdString, func(entry *filer_pb.Entry) {
@@ -34,9 +37,9 @@ func (s3a *S3ApiServer) createMultipartUpload(input *s3.CreateMultipartUploadInp
}
output = &InitiateMultipartUploadResult{
- s3.CreateMultipartUploadOutput{
+ CreateMultipartUploadOutput: s3.CreateMultipartUploadOutput{
Bucket: input.Bucket,
- Key: input.Key,
+ Key: objectKey(input.Key),
UploadId: aws.String(uploadIdString),
},
}
@@ -45,6 +48,7 @@ func (s3a *S3ApiServer) createMultipartUpload(input *s3.CreateMultipartUploadInp
}
type CompleteMultipartUploadResult struct {
+ XMLName xml.Name `xml:"http://s3.amazonaws.com/doc/2006-03-01/ CompleteMultipartUploadResult"`
s3.CompleteMultipartUploadOutput
}
@@ -65,11 +69,12 @@ func (s3a *S3ApiServer) completeMultipartUpload(input *s3.CompleteMultipartUploa
if strings.HasSuffix(entry.Name, ".part") && !entry.IsDirectory {
for _, chunk := range entry.Chunks {
p := &filer_pb.FileChunk{
- FileId: chunk.FileId,
- Offset: offset,
- Size: chunk.Size,
- Mtime: chunk.Mtime,
- ETag: chunk.ETag,
+ FileId: chunk.GetFileIdString(),
+ Offset: offset,
+ Size: chunk.Size,
+ Mtime: chunk.Mtime,
+ CipherKey: chunk.CipherKey,
+ ETag: chunk.ETag,
}
finalParts = append(finalParts, p)
offset += int64(chunk.Size)
@@ -87,6 +92,11 @@ func (s3a *S3ApiServer) completeMultipartUpload(input *s3.CompleteMultipartUploa
}
dirName = fmt.Sprintf("%s/%s/%s", s3a.option.BucketsPath, *input.Bucket, dirName)
+ // remove suffix '/'
+ if strings.HasSuffix(dirName, "/") {
+ dirName = dirName[:len(dirName)-1]
+ }
+
err = s3a.mkFile(dirName, entryName, finalParts)
if err != nil {
@@ -95,14 +105,15 @@ func (s3a *S3ApiServer) completeMultipartUpload(input *s3.CompleteMultipartUploa
}
output = &CompleteMultipartUploadResult{
- s3.CompleteMultipartUploadOutput{
- Bucket: input.Bucket,
- ETag: aws.String("\"" + filer2.ETag(finalParts) + "\""),
- Key: input.Key,
+ CompleteMultipartUploadOutput: s3.CompleteMultipartUploadOutput{
+ Location: aws.String(fmt.Sprintf("http://%s%s/%s", s3a.option.Filer, dirName, entryName)),
+ Bucket: input.Bucket,
+ ETag: aws.String("\"" + filer2.ETagChunks(finalParts) + "\""),
+ Key: objectKey(input.Key),
},
}
- if err = s3a.rm(s3a.genUploadsFolder(*input.Bucket), *input.UploadId, true, false, true); err != nil {
+ if err = s3a.rm(s3a.genUploadsFolder(*input.Bucket), *input.UploadId, false, true); err != nil {
glog.V(1).Infof("completeMultipartUpload cleanup %s upload %s: %v", *input.Bucket, *input.UploadId, err)
}
@@ -117,7 +128,7 @@ func (s3a *S3ApiServer) abortMultipartUpload(input *s3.AbortMultipartUploadInput
return nil, ErrNoSuchUpload
}
if exists {
- err = s3a.rm(s3a.genUploadsFolder(*input.Bucket), *input.UploadId, true, true, true)
+ err = s3a.rm(s3a.genUploadsFolder(*input.Bucket), *input.UploadId, true, true)
}
if err != nil {
glog.V(1).Infof("bucket %s remove upload %s: %v", *input.Bucket, *input.UploadId, err)
@@ -128,13 +139,14 @@ func (s3a *S3ApiServer) abortMultipartUpload(input *s3.AbortMultipartUploadInput
}
type ListMultipartUploadsResult struct {
+ XMLName xml.Name `xml:"http://s3.amazonaws.com/doc/2006-03-01/ ListMultipartUploadsResult"`
s3.ListMultipartUploadsOutput
}
func (s3a *S3ApiServer) listMultipartUploads(input *s3.ListMultipartUploadsInput) (output *ListMultipartUploadsResult, code ErrorCode) {
output = &ListMultipartUploadsResult{
- s3.ListMultipartUploadsOutput{
+ ListMultipartUploadsOutput: s3.ListMultipartUploadsOutput{
Bucket: input.Bucket,
Delimiter: input.Delimiter,
EncodingType: input.EncodingType,
@@ -144,7 +156,7 @@ func (s3a *S3ApiServer) listMultipartUploads(input *s3.ListMultipartUploadsInput
},
}
- entries, err := s3a.list(s3a.genUploadsFolder(*input.Bucket), *input.Prefix, *input.KeyMarker, true, int(*input.MaxUploads))
+ entries, err := s3a.list(s3a.genUploadsFolder(*input.Bucket), *input.Prefix, *input.KeyMarker, true, uint32(*input.MaxUploads))
if err != nil {
glog.Errorf("listMultipartUploads %s error: %v", *input.Bucket, err)
return
@@ -154,7 +166,7 @@ func (s3a *S3ApiServer) listMultipartUploads(input *s3.ListMultipartUploadsInput
if entry.Extended != nil {
key := entry.Extended["key"]
output.Uploads = append(output.Uploads, &s3.MultipartUpload{
- Key: aws.String(string(key)),
+ Key: objectKey(aws.String(string(key))),
UploadId: aws.String(entry.Name),
})
}
@@ -164,22 +176,22 @@ func (s3a *S3ApiServer) listMultipartUploads(input *s3.ListMultipartUploadsInput
}
type ListPartsResult struct {
+ XMLName xml.Name `xml:"http://s3.amazonaws.com/doc/2006-03-01/ ListPartsResult"`
s3.ListPartsOutput
}
func (s3a *S3ApiServer) listObjectParts(input *s3.ListPartsInput) (output *ListPartsResult, code ErrorCode) {
output = &ListPartsResult{
- s3.ListPartsOutput{
+ ListPartsOutput: s3.ListPartsOutput{
Bucket: input.Bucket,
- Key: input.Key,
+ Key: objectKey(input.Key),
UploadId: input.UploadId,
MaxParts: input.MaxParts, // the maximum number of parts to return.
PartNumberMarker: input.PartNumberMarker, // the part number starts after this, exclusive
},
}
- entries, err := s3a.list(s3a.genUploadsFolder(*input.Bucket)+"/"+*input.UploadId,
- "", fmt.Sprintf("%04d.part", *input.PartNumberMarker), false, int(*input.MaxParts))
+ entries, err := s3a.list(s3a.genUploadsFolder(*input.Bucket)+"/"+*input.UploadId, "", fmt.Sprintf("%04d.part", *input.PartNumberMarker), false, uint32(*input.MaxParts))
if err != nil {
glog.Errorf("listObjectParts %s %s error: %v", *input.Bucket, *input.UploadId, err)
return nil, ErrNoSuchUpload
@@ -195,9 +207,9 @@ func (s3a *S3ApiServer) listObjectParts(input *s3.ListPartsInput) (output *ListP
}
output.Parts = append(output.Parts, &s3.Part{
PartNumber: aws.Int64(int64(partNumber)),
- LastModified: aws.Time(time.Unix(entry.Attributes.Mtime, 0)),
+ LastModified: aws.Time(time.Unix(entry.Attributes.Mtime, 0).UTC()),
Size: aws.Int64(int64(filer2.TotalSize(entry.Chunks))),
- ETag: aws.String("\"" + filer2.ETag(entry.Chunks) + "\""),
+ ETag: aws.String("\"" + filer2.ETag(entry) + "\""),
})
}
}
diff --git a/weed/s3api/filer_multipart_test.go b/weed/s3api/filer_multipart_test.go
new file mode 100644
index 000000000..835665dd6
--- /dev/null
+++ b/weed/s3api/filer_multipart_test.go
@@ -0,0 +1,26 @@
+package s3api
+
+import (
+ "github.com/aws/aws-sdk-go/aws"
+ "github.com/aws/aws-sdk-go/service/s3"
+ "testing"
+)
+
+func TestInitiateMultipartUploadResult(t *testing.T) {
+
+ expected := `
+example-bucket example-object VXBsb2FkIElEIGZvciA2aWWpbmcncyBteS1tb3ZpZS5tMnRzIHVwbG9hZA `
+ response := &InitiateMultipartUploadResult{
+ CreateMultipartUploadOutput: s3.CreateMultipartUploadOutput{
+ Bucket: aws.String("example-bucket"),
+ Key: aws.String("example-object"),
+ UploadId: aws.String("VXBsb2FkIElEIGZvciA2aWWpbmcncyBteS1tb3ZpZS5tMnRzIHVwbG9hZA"),
+ },
+ }
+
+ encoded := string(encodeResponse(response))
+ if encoded != expected {
+ t.Errorf("unexpected output: %s\nexpecting:%s", encoded, expected)
+ }
+
+}
diff --git a/weed/s3api/filer_util.go b/weed/s3api/filer_util.go
index 40c5a3e26..7f49c320e 100644
--- a/weed/s3api/filer_util.go
+++ b/weed/s3api/filer_util.go
@@ -3,119 +3,42 @@ package s3api
import (
"context"
"fmt"
- "os"
- "time"
+ "strings"
"github.com/chrislusf/seaweedfs/weed/glog"
"github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
)
func (s3a *S3ApiServer) mkdir(parentDirectoryPath string, dirName string, fn func(entry *filer_pb.Entry)) error {
- return s3a.withFilerClient(func(client filer_pb.SeaweedFilerClient) error {
-
- entry := &filer_pb.Entry{
- Name: dirName,
- IsDirectory: true,
- Attributes: &filer_pb.FuseAttributes{
- Mtime: time.Now().Unix(),
- Crtime: time.Now().Unix(),
- FileMode: uint32(0777 | os.ModeDir),
- Uid: OS_UID,
- Gid: OS_GID,
- },
- }
-
- if fn != nil {
- fn(entry)
- }
-
- request := &filer_pb.CreateEntryRequest{
- Directory: parentDirectoryPath,
- Entry: entry,
- }
- glog.V(1).Infof("mkdir: %v", request)
- if _, err := client.CreateEntry(context.Background(), request); err != nil {
- return fmt.Errorf("mkdir %s/%s: %v", parentDirectoryPath, dirName, err)
- }
+ return filer_pb.Mkdir(s3a, parentDirectoryPath, dirName, fn)
- return nil
- })
}
func (s3a *S3ApiServer) mkFile(parentDirectoryPath string, fileName string, chunks []*filer_pb.FileChunk) error {
- return s3a.withFilerClient(func(client filer_pb.SeaweedFilerClient) error {
-
- entry := &filer_pb.Entry{
- Name: fileName,
- IsDirectory: false,
- Attributes: &filer_pb.FuseAttributes{
- Mtime: time.Now().Unix(),
- Crtime: time.Now().Unix(),
- FileMode: uint32(0770),
- Uid: OS_UID,
- Gid: OS_GID,
- },
- Chunks: chunks,
- }
- request := &filer_pb.CreateEntryRequest{
- Directory: parentDirectoryPath,
- Entry: entry,
- }
+ return filer_pb.MkFile(s3a, parentDirectoryPath, fileName, chunks)
- glog.V(1).Infof("create file: %s/%s", parentDirectoryPath, fileName)
- if _, err := client.CreateEntry(context.Background(), request); err != nil {
- return fmt.Errorf("create file %s/%s: %v", parentDirectoryPath, fileName, err)
- }
-
- return nil
- })
}
-func (s3a *S3ApiServer) list(parentDirectoryPath, prefix, startFrom string, inclusive bool, limit int) (entries []*filer_pb.Entry, err error) {
-
- err = s3a.withFilerClient(func(client filer_pb.SeaweedFilerClient) error {
-
- request := &filer_pb.ListEntriesRequest{
- Directory: parentDirectoryPath,
- Prefix: prefix,
- StartFromFileName: startFrom,
- InclusiveStartFrom: inclusive,
- Limit: uint32(limit),
- }
-
- glog.V(4).Infof("read directory: %v", request)
- resp, err := client.ListEntries(context.Background(), request)
- if err != nil {
- return fmt.Errorf("list dir %v: %v", parentDirectoryPath, err)
- }
-
- entries = resp.Entries
+func (s3a *S3ApiServer) list(parentDirectoryPath, prefix, startFrom string, inclusive bool, limit uint32) (entries []*filer_pb.Entry, err error) {
+ err = filer_pb.List(s3a, parentDirectoryPath, prefix, func(entry *filer_pb.Entry, isLast bool) error {
+ entries = append(entries, entry)
return nil
- })
+ }, startFrom, inclusive, limit)
return
}
-func (s3a *S3ApiServer) rm(parentDirectoryPath string, entryName string, isDirectory, isDeleteData, isRecursive bool) error {
-
- return s3a.withFilerClient(func(client filer_pb.SeaweedFilerClient) error {
-
- ctx := context.Background()
+func (s3a *S3ApiServer) rm(parentDirectoryPath, entryName string, isDeleteData, isRecursive bool) error {
- request := &filer_pb.DeleteEntryRequest{
- Directory: parentDirectoryPath,
- Name: entryName,
- IsDeleteData: isDeleteData,
- IsRecursive: isRecursive,
- }
+ return s3a.WithFilerClient(func(client filer_pb.SeaweedFilerClient) error {
- glog.V(1).Infof("delete entry %v/%v: %v", parentDirectoryPath, entryName, request)
- if _, err := client.DeleteEntry(ctx, request); err != nil {
- return fmt.Errorf("delete entry %s/%s: %v", parentDirectoryPath, entryName, err)
+ err := doDeleteEntry(client, parentDirectoryPath, entryName, isDeleteData, isRecursive)
+ if err != nil {
+ return err
}
return nil
@@ -123,27 +46,36 @@ func (s3a *S3ApiServer) rm(parentDirectoryPath string, entryName string, isDirec
}
-func (s3a *S3ApiServer) exists(parentDirectoryPath string, entryName string, isDirectory bool) (exists bool, err error) {
-
- err = s3a.withFilerClient(func(client filer_pb.SeaweedFilerClient) error {
-
- ctx := context.Background()
-
- request := &filer_pb.LookupDirectoryEntryRequest{
- Directory: parentDirectoryPath,
- Name: entryName,
+func doDeleteEntry(client filer_pb.SeaweedFilerClient, parentDirectoryPath string, entryName string, isDeleteData bool, isRecursive bool) error {
+ request := &filer_pb.DeleteEntryRequest{
+ Directory: parentDirectoryPath,
+ Name: entryName,
+ IsDeleteData: isDeleteData,
+ IsRecursive: isRecursive,
+ }
+
+ glog.V(1).Infof("delete entry %v/%v: %v", parentDirectoryPath, entryName, request)
+ if resp, err := client.DeleteEntry(context.Background(), request); err != nil {
+ glog.V(0).Infof("delete entry %v: %v", request, err)
+ return fmt.Errorf("delete entry %s/%s: %v", parentDirectoryPath, entryName, err)
+ } else {
+ if resp.Error != "" {
+ return fmt.Errorf("delete entry %s/%s: %v", parentDirectoryPath, entryName, resp.Error)
}
+ }
+ return nil
+}
- glog.V(4).Infof("exists entry %v/%v: %v", parentDirectoryPath, entryName, request)
- resp, err := client.LookupDirectoryEntry(ctx, request)
- if err != nil {
- return fmt.Errorf("exists entry %s/%s: %v", parentDirectoryPath, entryName, err)
- }
+func (s3a *S3ApiServer) exists(parentDirectoryPath string, entryName string, isDirectory bool) (exists bool, err error) {
- exists = resp.Entry.IsDirectory == isDirectory
+ return filer_pb.Exists(s3a, parentDirectoryPath, entryName, isDirectory)
- return nil
- })
+}
- return
+func objectKey(key *string) *string {
+ if strings.HasPrefix(*key, "/") {
+ t := (*key)[1:]
+ return &t
+ }
+ return key
}
diff --git a/weed/s3api/s3api_auth.go b/weed/s3api/s3api_auth.go
index b680fe1e1..bf5cf5fab 100644
--- a/weed/s3api/s3api_auth.go
+++ b/weed/s3api/s3api_auth.go
@@ -9,6 +9,8 @@ import (
const (
signV4Algorithm = "AWS4-HMAC-SHA256"
signV2Algorithm = "AWS"
+ iso8601Format = "20060102T150405Z"
+ yyyymmdd = "20060102"
)
// Verify if request has JWT.
@@ -23,8 +25,8 @@ func isRequestSignatureV4(r *http.Request) bool {
// Verify if request has AWS Signature Version '2'.
func isRequestSignatureV2(r *http.Request) bool {
- return (!strings.HasPrefix(r.Header.Get("Authorization"), signV4Algorithm) &&
- strings.HasPrefix(r.Header.Get("Authorization"), signV2Algorithm))
+ return !strings.HasPrefix(r.Header.Get("Authorization"), signV4Algorithm) &&
+ strings.HasPrefix(r.Header.Get("Authorization"), signV2Algorithm)
}
// Verify if request has AWS PreSign Version '4'.
diff --git a/weed/s3api/s3api_bucket_handlers.go b/weed/s3api/s3api_bucket_handlers.go
index 1d319e354..7d96e3e0e 100644
--- a/weed/s3api/s3api_bucket_handlers.go
+++ b/weed/s3api/s3api_bucket_handlers.go
@@ -2,27 +2,24 @@ package s3api
import (
"context"
+ "encoding/xml"
"fmt"
"math"
"net/http"
- "os"
"time"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/service/s3"
- "github.com/chrislusf/seaweedfs/weed/glog"
- "github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
"github.com/gorilla/mux"
-)
-var (
- OS_UID = uint32(os.Getuid())
- OS_GID = uint32(os.Getgid())
+ "github.com/chrislusf/seaweedfs/weed/glog"
+ "github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
)
type ListAllMyBucketsResult struct {
- Buckets []*s3.Bucket `xml:"Buckets>Bucket"`
+ XMLName xml.Name `xml:"http://s3.amazonaws.com/doc/2006-03-01/ ListAllMyBucketsResult"`
Owner *s3.Owner
+ Buckets []*s3.Bucket `xml:"Buckets>Bucket"`
}
func (s3a *S3ApiServer) ListBucketsHandler(w http.ResponseWriter, r *http.Request) {
@@ -41,7 +38,7 @@ func (s3a *S3ApiServer) ListBucketsHandler(w http.ResponseWriter, r *http.Reques
if entry.IsDirectory {
buckets = append(buckets, &s3.Bucket{
Name: aws.String(entry.Name),
- CreationDate: aws.Time(time.Unix(entry.Attributes.Crtime, 0)),
+ CreationDate: aws.Time(time.Unix(entry.Attributes.Crtime, 0).UTC()),
})
}
}
@@ -76,9 +73,7 @@ func (s3a *S3ApiServer) DeleteBucketHandler(w http.ResponseWriter, r *http.Reque
vars := mux.Vars(r)
bucket := vars["bucket"]
- err := s3a.withFilerClient(func(client filer_pb.SeaweedFilerClient) error {
-
- ctx := context.Background()
+ err := s3a.WithFilerClient(func(client filer_pb.SeaweedFilerClient) error {
// delete collection
deleteCollectionRequest := &filer_pb.DeleteCollectionRequest{
@@ -86,14 +81,14 @@ func (s3a *S3ApiServer) DeleteBucketHandler(w http.ResponseWriter, r *http.Reque
}
glog.V(1).Infof("delete collection: %v", deleteCollectionRequest)
- if _, err := client.DeleteCollection(ctx, deleteCollectionRequest); err != nil {
+ if _, err := client.DeleteCollection(context.Background(), deleteCollectionRequest); err != nil {
return fmt.Errorf("delete collection %s: %v", bucket, err)
}
return nil
})
- err = s3a.rm(s3a.option.BucketsPath, bucket, true, false, true)
+ err = s3a.rm(s3a.option.BucketsPath, bucket, false, true)
if err != nil {
writeErrorResponse(w, ErrInternalError, r.URL)
@@ -108,7 +103,7 @@ func (s3a *S3ApiServer) HeadBucketHandler(w http.ResponseWriter, r *http.Request
vars := mux.Vars(r)
bucket := vars["bucket"]
- err := s3a.withFilerClient(func(client filer_pb.SeaweedFilerClient) error {
+ err := s3a.WithFilerClient(func(client filer_pb.SeaweedFilerClient) error {
request := &filer_pb.LookupDirectoryEntryRequest{
Directory: s3a.option.BucketsPath,
@@ -116,7 +111,10 @@ func (s3a *S3ApiServer) HeadBucketHandler(w http.ResponseWriter, r *http.Request
}
glog.V(1).Infof("lookup bucket: %v", request)
- if _, err := client.LookupDirectoryEntry(context.Background(), request); err != nil {
+ if _, err := filer_pb.LookupEntry(client, request); err != nil {
+ if err == filer_pb.ErrNotFound {
+ return filer_pb.ErrNotFound
+ }
return fmt.Errorf("lookup bucket %s/%s: %v", s3a.option.BucketsPath, bucket, err)
}
diff --git a/weed/s3api/s3api_bucket_handlers_test.go b/weed/s3api/s3api_bucket_handlers_test.go
new file mode 100644
index 000000000..7ab04830b
--- /dev/null
+++ b/weed/s3api/s3api_bucket_handlers_test.go
@@ -0,0 +1,39 @@
+package s3api
+
+import (
+ "testing"
+ "time"
+
+ "github.com/aws/aws-sdk-go/aws"
+ "github.com/aws/aws-sdk-go/service/s3"
+)
+
+func TestListBucketsHandler(t *testing.T) {
+
+ expected := `
+2011-04-09T12:34:49Z test1 2011-02-09T12:34:49Z test2 `
+ var response ListAllMyBucketsResult
+
+ var buckets []*s3.Bucket
+ buckets = append(buckets, &s3.Bucket{
+ Name: aws.String("test1"),
+ CreationDate: aws.Time(time.Date(2011, 4, 9, 12, 34, 49, 0, time.UTC)),
+ })
+ buckets = append(buckets, &s3.Bucket{
+ Name: aws.String("test2"),
+ CreationDate: aws.Time(time.Date(2011, 2, 9, 12, 34, 49, 0, time.UTC)),
+ })
+
+ response = ListAllMyBucketsResult{
+ Owner: &s3.Owner{
+ ID: aws.String(""),
+ DisplayName: aws.String(""),
+ },
+ Buckets: buckets,
+ }
+
+ encoded := string(encodeResponse(response))
+ if encoded != expected {
+ t.Errorf("unexpected output: %s\nexpecting:%s", encoded, expected)
+ }
+}
diff --git a/weed/s3api/s3api_errors.go b/weed/s3api/s3api_errors.go
index 7ba55ed28..ff411f276 100644
--- a/weed/s3api/s3api_errors.go
+++ b/weed/s3api/s3api_errors.go
@@ -27,11 +27,13 @@ type ErrorCode int
// Error codes, see full list at http://docs.aws.amazon.com/AmazonS3/latest/API/ErrorResponses.html
const (
ErrNone ErrorCode = iota
+ ErrAccessDenied
ErrMethodNotAllowed
ErrBucketNotEmpty
ErrBucketAlreadyExists
ErrBucketAlreadyOwnedByYou
ErrNoSuchBucket
+ ErrNoSuchKey
ErrNoSuchUpload
ErrInvalidBucketName
ErrInvalidDigest
@@ -41,12 +43,43 @@ const (
ErrInvalidPartNumberMarker
ErrInvalidPart
ErrInternalError
+ ErrInvalidCopyDest
+ ErrInvalidCopySource
+ ErrAuthHeaderEmpty
+ ErrSignatureVersionNotSupported
+ ErrMissingFields
+ ErrMissingCredTag
+ ErrCredMalformed
+ ErrMalformedXML
+ ErrMalformedDate
+ ErrMalformedPresignedDate
+ ErrMalformedCredentialDate
+ ErrMissingSignHeadersTag
+ ErrMissingSignTag
+ ErrUnsignedHeaders
+ ErrInvalidQueryParams
+ ErrInvalidQuerySignatureAlgo
+ ErrExpiredPresignRequest
+ ErrMalformedExpires
+ ErrNegativeExpires
+ ErrMaximumExpires
+ ErrSignatureDoesNotMatch
+ ErrContentSHA256Mismatch
+ ErrInvalidAccessKeyID
+ ErrRequestNotReadyYet
+ ErrMissingDateHeader
+ ErrInvalidRequest
ErrNotImplemented
)
// error code to APIError structure, these fields carry respective
// descriptions for all the error responses.
var errorCodeResponse = map[ErrorCode]APIError{
+ ErrAccessDenied: {
+ Code: "AccessDenied",
+ Description: "Access Denied.",
+ HTTPStatusCode: http.StatusForbidden,
+ },
ErrMethodNotAllowed: {
Code: "MethodNotAllowed",
Description: "The specified method is not allowed against this resource.",
@@ -102,6 +135,11 @@ var errorCodeResponse = map[ErrorCode]APIError{
Description: "The specified bucket does not exist",
HTTPStatusCode: http.StatusNotFound,
},
+ ErrNoSuchKey: {
+ Code: "NoSuchKey",
+ Description: "The specified key does not exist.",
+ HTTPStatusCode: http.StatusNotFound,
+ },
ErrNoSuchUpload: {
Code: "NoSuchUpload",
Description: "The specified multipart upload does not exist. The upload ID may be invalid, or the upload may have been aborted or completed.",
@@ -118,6 +156,139 @@ var errorCodeResponse = map[ErrorCode]APIError{
Description: "One or more of the specified parts could not be found. The part may not have been uploaded, or the specified entity tag may not match the part's entity tag.",
HTTPStatusCode: http.StatusBadRequest,
},
+
+ ErrInvalidCopyDest: {
+ Code: "InvalidRequest",
+ Description: "This copy request is illegal because it is trying to copy an object to itself without changing the object's metadata, storage class, website redirect location or encryption attributes.",
+ HTTPStatusCode: http.StatusBadRequest,
+ },
+ ErrInvalidCopySource: {
+ Code: "InvalidArgument",
+ Description: "Copy Source must mention the source bucket and key: sourcebucket/sourcekey.",
+ HTTPStatusCode: http.StatusBadRequest,
+ },
+
+ ErrMalformedXML: {
+ Code: "MalformedXML",
+ Description: "The XML you provided was not well-formed or did not validate against our published schema.",
+ HTTPStatusCode: http.StatusBadRequest,
+ },
+
+ ErrAuthHeaderEmpty: {
+ Code: "InvalidArgument",
+ Description: "Authorization header is invalid -- one and only one ' ' (space) required.",
+ HTTPStatusCode: http.StatusBadRequest,
+ },
+ ErrSignatureVersionNotSupported: {
+ Code: "InvalidRequest",
+ Description: "The authorization mechanism you have provided is not supported. Please use AWS4-HMAC-SHA256.",
+ HTTPStatusCode: http.StatusBadRequest,
+ },
+ ErrMissingFields: {
+ Code: "MissingFields",
+ Description: "Missing fields in request.",
+ HTTPStatusCode: http.StatusBadRequest,
+ },
+ ErrMissingCredTag: {
+ Code: "InvalidRequest",
+ Description: "Missing Credential field for this request.",
+ HTTPStatusCode: http.StatusBadRequest,
+ },
+ ErrCredMalformed: {
+ Code: "AuthorizationQueryParametersError",
+ Description: "Error parsing the X-Amz-Credential parameter; the Credential is mal-formed; expecting \"/YYYYMMDD/REGION/SERVICE/aws4_request\".",
+ HTTPStatusCode: http.StatusBadRequest,
+ },
+ ErrMalformedDate: {
+ Code: "MalformedDate",
+ Description: "Invalid date format header, expected to be in ISO8601, RFC1123 or RFC1123Z time format.",
+ HTTPStatusCode: http.StatusBadRequest,
+ },
+ ErrMalformedPresignedDate: {
+ Code: "AuthorizationQueryParametersError",
+ Description: "X-Amz-Date must be in the ISO8601 Long Format \"yyyyMMdd'T'HHmmss'Z'\"",
+ HTTPStatusCode: http.StatusBadRequest,
+ },
+ ErrMissingSignHeadersTag: {
+ Code: "InvalidArgument",
+ Description: "Signature header missing SignedHeaders field.",
+ HTTPStatusCode: http.StatusBadRequest,
+ },
+ ErrMissingSignTag: {
+ Code: "AccessDenied",
+ Description: "Signature header missing Signature field.",
+ HTTPStatusCode: http.StatusBadRequest,
+ },
+
+ ErrUnsignedHeaders: {
+ Code: "AccessDenied",
+ Description: "There were headers present in the request which were not signed",
+ HTTPStatusCode: http.StatusBadRequest,
+ },
+ ErrInvalidQueryParams: {
+ Code: "AuthorizationQueryParametersError",
+ Description: "Query-string authentication version 4 requires the X-Amz-Algorithm, X-Amz-Credential, X-Amz-Signature, X-Amz-Date, X-Amz-SignedHeaders, and X-Amz-Expires parameters.",
+ HTTPStatusCode: http.StatusBadRequest,
+ },
+ ErrInvalidQuerySignatureAlgo: {
+ Code: "AuthorizationQueryParametersError",
+ Description: "X-Amz-Algorithm only supports \"AWS4-HMAC-SHA256\".",
+ HTTPStatusCode: http.StatusBadRequest,
+ },
+ ErrExpiredPresignRequest: {
+ Code: "AccessDenied",
+ Description: "Request has expired",
+ HTTPStatusCode: http.StatusForbidden,
+ },
+ ErrMalformedExpires: {
+ Code: "AuthorizationQueryParametersError",
+ Description: "X-Amz-Expires should be a number",
+ HTTPStatusCode: http.StatusBadRequest,
+ },
+ ErrNegativeExpires: {
+ Code: "AuthorizationQueryParametersError",
+ Description: "X-Amz-Expires must be non-negative",
+ HTTPStatusCode: http.StatusBadRequest,
+ },
+ ErrMaximumExpires: {
+ Code: "AuthorizationQueryParametersError",
+ Description: "X-Amz-Expires must be less than a week (in seconds); that is, the given X-Amz-Expires must be less than 604800 seconds",
+ HTTPStatusCode: http.StatusBadRequest,
+ },
+
+ ErrInvalidAccessKeyID: {
+ Code: "InvalidAccessKeyId",
+ Description: "The access key ID you provided does not exist in our records.",
+ HTTPStatusCode: http.StatusForbidden,
+ },
+
+ ErrRequestNotReadyYet: {
+ Code: "AccessDenied",
+ Description: "Request is not valid yet",
+ HTTPStatusCode: http.StatusForbidden,
+ },
+
+ ErrSignatureDoesNotMatch: {
+ Code: "SignatureDoesNotMatch",
+ Description: "The request signature we calculated does not match the signature you provided. Check your key and signing method.",
+ HTTPStatusCode: http.StatusForbidden,
+ },
+
+ ErrContentSHA256Mismatch: {
+ Code: "XAmzContentSHA256Mismatch",
+ Description: "The provided 'x-amz-content-sha256' header does not match what was computed.",
+ HTTPStatusCode: http.StatusBadRequest,
+ },
+ ErrMissingDateHeader: {
+ Code: "AccessDenied",
+ Description: "AWS authentication requires a valid Date or x-amz-date header",
+ HTTPStatusCode: http.StatusBadRequest,
+ },
+ ErrInvalidRequest: {
+ Code: "InvalidRequest",
+ Description: "Invalid Request",
+ HTTPStatusCode: http.StatusBadRequest,
+ },
ErrNotImplemented: {
Code: "NotImplemented",
Description: "A header you provided implies functionality that is not implemented",
diff --git a/weed/s3api/s3api_handlers.go b/weed/s3api/s3api_handlers.go
index 286398310..7ef676400 100644
--- a/weed/s3api/s3api_handlers.go
+++ b/weed/s3api/s3api_handlers.go
@@ -5,12 +5,16 @@ import (
"encoding/base64"
"encoding/xml"
"fmt"
- "github.com/chrislusf/seaweedfs/weed/glog"
- "github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
- "github.com/chrislusf/seaweedfs/weed/util"
"net/http"
"net/url"
+ "strconv"
"time"
+
+ "google.golang.org/grpc"
+
+ "github.com/chrislusf/seaweedfs/weed/glog"
+ "github.com/chrislusf/seaweedfs/weed/pb"
+ "github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
)
type mimeType string
@@ -35,17 +39,18 @@ func encodeResponse(response interface{}) []byte {
return bytesBuffer.Bytes()
}
-func (s3a *S3ApiServer) withFilerClient(fn func(filer_pb.SeaweedFilerClient) error) error {
+var _ = filer_pb.FilerClient(&S3ApiServer{})
- grpcConnection, err := util.GrpcDial(s3a.option.FilerGrpcAddress)
- if err != nil {
- return fmt.Errorf("fail to dial %s: %v", s3a.option.FilerGrpcAddress, err)
- }
- defer grpcConnection.Close()
+func (s3a *S3ApiServer) WithFilerClient(fn func(filer_pb.SeaweedFilerClient) error) error {
- client := filer_pb.NewSeaweedFilerClient(grpcConnection)
+ return pb.WithCachedGrpcClient(func(grpcConnection *grpc.ClientConn) error {
+ client := filer_pb.NewSeaweedFilerClient(grpcConnection)
+ return fn(client)
+ }, s3a.option.FilerGrpcAddress, s3a.option.GrpcDialOption)
- return fn(client)
+}
+func (s3a *S3ApiServer) AdjustedUrl(hostAndPort string) string {
+ return hostAndPort
}
// If none of the http routes match respond with MethodNotAllowed
@@ -72,13 +77,19 @@ func getRESTErrorResponse(err APIError, resource string) RESTErrorResponse {
func writeResponse(w http.ResponseWriter, statusCode int, response []byte, mType mimeType) {
setCommonHeaders(w)
+ if response != nil {
+ w.Header().Set("Content-Length", strconv.Itoa(len(response)))
+ }
if mType != mimeNone {
w.Header().Set("Content-Type", string(mType))
}
w.WriteHeader(statusCode)
if response != nil {
glog.V(4).Infof("status %d %s: %s", statusCode, mType, string(response))
- w.Write(response)
+ _, err := w.Write(response)
+ if err != nil {
+ glog.V(0).Infof("write err: %v", err)
+ }
w.(http.Flusher).Flush()
}
}
diff --git a/weed/s3api/s3api_object_copy_handlers.go b/weed/s3api/s3api_object_copy_handlers.go
new file mode 100644
index 000000000..b8fb3f6a4
--- /dev/null
+++ b/weed/s3api/s3api_object_copy_handlers.go
@@ -0,0 +1,151 @@
+package s3api
+
+import (
+ "fmt"
+ "net/http"
+ "net/url"
+ "strconv"
+ "strings"
+ "time"
+
+ "github.com/gorilla/mux"
+
+ "github.com/chrislusf/seaweedfs/weed/util"
+)
+
+func (s3a *S3ApiServer) CopyObjectHandler(w http.ResponseWriter, r *http.Request) {
+
+ vars := mux.Vars(r)
+ dstBucket := vars["bucket"]
+ dstObject := getObject(vars)
+
+ // Copy source path.
+ cpSrcPath, err := url.QueryUnescape(r.Header.Get("X-Amz-Copy-Source"))
+ if err != nil {
+ // Save unescaped string as is.
+ cpSrcPath = r.Header.Get("X-Amz-Copy-Source")
+ }
+
+ srcBucket, srcObject := pathToBucketAndObject(cpSrcPath)
+ // If source object is empty or bucket is empty, reply back invalid copy source.
+ if srcObject == "" || srcBucket == "" {
+ writeErrorResponse(w, ErrInvalidCopySource, r.URL)
+ return
+ }
+
+ if srcBucket == dstBucket && srcObject == dstObject {
+ writeErrorResponse(w, ErrInvalidCopySource, r.URL)
+ return
+ }
+
+ dstUrl := fmt.Sprintf("http://%s%s/%s%s?collection=%s",
+ s3a.option.Filer, s3a.option.BucketsPath, dstBucket, dstObject, dstBucket)
+ srcUrl := fmt.Sprintf("http://%s%s/%s%s",
+ s3a.option.Filer, s3a.option.BucketsPath, srcBucket, srcObject)
+
+ _, _, dataReader, err := util.DownloadFile(srcUrl)
+ if err != nil {
+ writeErrorResponse(w, ErrInvalidCopySource, r.URL)
+ return
+ }
+ defer dataReader.Close()
+
+ etag, errCode := s3a.putToFiler(r, dstUrl, dataReader)
+
+ if errCode != ErrNone {
+ writeErrorResponse(w, errCode, r.URL)
+ return
+ }
+
+ setEtag(w, etag)
+
+ response := CopyObjectResult{
+ ETag: etag,
+ LastModified: time.Now(),
+ }
+
+ writeSuccessResponseXML(w, encodeResponse(response))
+
+}
+
+func pathToBucketAndObject(path string) (bucket, object string) {
+ path = strings.TrimPrefix(path, "/")
+ parts := strings.SplitN(path, "/", 2)
+ if len(parts) == 2 {
+ return parts[0], "/" + parts[1]
+ }
+ return parts[0], "/"
+}
+
+type CopyPartResult struct {
+ LastModified time.Time `xml:"LastModified"`
+ ETag string `xml:"ETag"`
+}
+
+func (s3a *S3ApiServer) CopyObjectPartHandler(w http.ResponseWriter, r *http.Request) {
+ // https://docs.aws.amazon.com/AmazonS3/latest/dev/CopyingObjctsUsingRESTMPUapi.html
+ // https://docs.aws.amazon.com/AmazonS3/latest/API/API_UploadPartCopy.html
+ vars := mux.Vars(r)
+ dstBucket := vars["bucket"]
+ // dstObject := getObject(vars)
+
+ // Copy source path.
+ cpSrcPath, err := url.QueryUnescape(r.Header.Get("X-Amz-Copy-Source"))
+ if err != nil {
+ // Save unescaped string as is.
+ cpSrcPath = r.Header.Get("X-Amz-Copy-Source")
+ }
+
+ srcBucket, srcObject := pathToBucketAndObject(cpSrcPath)
+ // If source object is empty or bucket is empty, reply back invalid copy source.
+ if srcObject == "" || srcBucket == "" {
+ writeErrorResponse(w, ErrInvalidCopySource, r.URL)
+ return
+ }
+
+ uploadID := r.URL.Query().Get("uploadId")
+ partIDString := r.URL.Query().Get("partNumber")
+
+ partID, err := strconv.Atoi(partIDString)
+ if err != nil {
+ writeErrorResponse(w, ErrInvalidPart, r.URL)
+ return
+ }
+
+ // check partID with maximum part ID for multipart objects
+ if partID > globalMaxPartID {
+ writeErrorResponse(w, ErrInvalidMaxParts, r.URL)
+ return
+ }
+
+ rangeHeader := r.Header.Get("x-amz-copy-source-range")
+
+ dstUrl := fmt.Sprintf("http://%s%s/%s/%04d.part?collection=%s",
+ s3a.option.Filer, s3a.genUploadsFolder(dstBucket), uploadID, partID-1, dstBucket)
+ srcUrl := fmt.Sprintf("http://%s%s/%s%s",
+ s3a.option.Filer, s3a.option.BucketsPath, srcBucket, srcObject)
+
+ dataReader, err := util.ReadUrlAsReaderCloser(srcUrl, rangeHeader)
+ if err != nil {
+ writeErrorResponse(w, ErrInvalidCopySource, r.URL)
+ return
+ }
+ defer dataReader.Close()
+
+ etag, errCode := s3a.putToFiler(r, dstUrl, dataReader)
+
+ if errCode != ErrNone {
+ writeErrorResponse(w, errCode, r.URL)
+ return
+ }
+
+ setEtag(w, etag)
+
+ response := CopyPartResult{
+ ETag: etag,
+ LastModified: time.Now(),
+ }
+
+ writeSuccessResponseXML(w, encodeResponse(response))
+
+}
diff --git a/weed/s3api/s3api_object_handlers.go b/weed/s3api/s3api_object_handlers.go
index 44e93d297..0d287c4ff 100644
--- a/weed/s3api/s3api_object_handlers.go
+++ b/weed/s3api/s3api_object_handlers.go
@@ -3,15 +3,19 @@ package s3api
import (
"crypto/md5"
"encoding/json"
+ "encoding/xml"
"fmt"
"io"
"io/ioutil"
"net/http"
"strings"
- "github.com/chrislusf/seaweedfs/weed/glog"
- "github.com/chrislusf/seaweedfs/weed/server"
"github.com/gorilla/mux"
+
+ "github.com/chrislusf/seaweedfs/weed/glog"
+ "github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
+ weed_server "github.com/chrislusf/seaweedfs/weed/server"
+ "github.com/chrislusf/seaweedfs/weed/util"
)
var (
@@ -40,12 +44,17 @@ func (s3a *S3ApiServer) PutObjectHandler(w http.ResponseWriter, r *http.Request)
rAuthType := getRequestAuthType(r)
dataReader := r.Body
+ var s3ErrCode ErrorCode
if rAuthType == authTypeStreamingSigned {
- dataReader = newSignV4ChunkedReader(r)
+ dataReader, s3ErrCode = s3a.iam.newSignV4ChunkedReader(r)
+ }
+ if s3ErrCode != ErrNone {
+ writeErrorResponse(w, s3ErrCode, r.URL)
+ return
}
+ defer dataReader.Close()
- uploadUrl := fmt.Sprintf("http://%s%s/%s%s?collection=%s",
- s3a.option.Filer, s3a.option.BucketsPath, bucket, object, bucket)
+ uploadUrl := fmt.Sprintf("http://%s%s/%s%s", s3a.option.Filer, s3a.option.BucketsPath, bucket, object)
etag, errCode := s3a.putToFiler(r, uploadUrl, dataReader)
@@ -96,11 +105,11 @@ func (s3a *S3ApiServer) DeleteObjectHandler(w http.ResponseWriter, r *http.Reque
bucket := vars["bucket"]
object := getObject(vars)
- destUrl := fmt.Sprintf("http://%s%s/%s%s",
+ destUrl := fmt.Sprintf("http://%s%s/%s%s?recursive=true",
s3a.option.Filer, s3a.option.BucketsPath, bucket, object)
- s3a.proxyToFiler(w, r, destUrl, func(proxyResonse *http.Response, w http.ResponseWriter) {
- for k, v := range proxyResonse.Header {
+ s3a.proxyToFiler(w, r, destUrl, func(proxyResponse *http.Response, w http.ResponseWriter) {
+ for k, v := range proxyResponse.Header {
w.Header()[k] = v
}
w.WriteHeader(http.StatusNoContent)
@@ -108,13 +117,94 @@ func (s3a *S3ApiServer) DeleteObjectHandler(w http.ResponseWriter, r *http.Reque
}
+/// ObjectIdentifier carries key name for the object to delete.
+type ObjectIdentifier struct {
+ ObjectName string `xml:"Key"`
+}
+
+// DeleteObjectsRequest - xml carrying the object key names which needs to be deleted.
+type DeleteObjectsRequest struct {
+ // Element to enable quiet mode for the request
+ Quiet bool
+ // List of objects to be deleted
+ Objects []ObjectIdentifier `xml:"Object"`
+}
+
+// DeleteError structure.
+type DeleteError struct {
+ Code string
+ Message string
+ Key string
+}
+
+// DeleteObjectsResponse container for multiple object deletes.
+type DeleteObjectsResponse struct {
+ XMLName xml.Name `xml:"http://s3.amazonaws.com/doc/2006-03-01/ DeleteResult" json:"-"`
+
+ // Collection of all deleted objects
+ DeletedObjects []ObjectIdentifier `xml:"Deleted,omitempty"`
+
+ // Collection of errors deleting certain objects.
+ Errors []DeleteError `xml:"Error,omitempty"`
+}
+
// DeleteMultipleObjectsHandler - Delete multiple objects
func (s3a *S3ApiServer) DeleteMultipleObjectsHandler(w http.ResponseWriter, r *http.Request) {
- // TODO
- writeErrorResponse(w, ErrNotImplemented, r.URL)
+
+ vars := mux.Vars(r)
+ bucket := vars["bucket"]
+
+ deleteXMLBytes, err := ioutil.ReadAll(r.Body)
+ if err != nil {
+ writeErrorResponse(w, ErrInternalError, r.URL)
+ return
+ }
+
+ deleteObjects := &DeleteObjectsRequest{}
+ if err := xml.Unmarshal(deleteXMLBytes, deleteObjects); err != nil {
+ writeErrorResponse(w, ErrMalformedXML, r.URL)
+ return
+ }
+
+ var deletedObjects []ObjectIdentifier
+ var deleteErrors []DeleteError
+
+ s3a.WithFilerClient(func(client filer_pb.SeaweedFilerClient) error {
+
+ for _, object := range deleteObjects.Objects {
+ lastSeparator := strings.LastIndex(object.ObjectName, "/")
+ parentDirectoryPath, entryName, isDeleteData, isRecursive := "/", object.ObjectName, true, true
+ if lastSeparator > 0 && lastSeparator+1 < len(object.ObjectName) {
+ entryName = object.ObjectName[lastSeparator+1:]
+ parentDirectoryPath = "/" + object.ObjectName[:lastSeparator]
+ }
+ parentDirectoryPath = fmt.Sprintf("%s/%s%s", s3a.option.BucketsPath, bucket, parentDirectoryPath)
+
+ err := doDeleteEntry(client, parentDirectoryPath, entryName, isDeleteData, isRecursive)
+ if err == nil {
+ deletedObjects = append(deletedObjects, object)
+ } else {
+ deleteErrors = append(deleteErrors, DeleteError{
+ Code: "",
+ Message: err.Error(),
+ Key: object.ObjectName,
+ })
+ }
+ }
+ return nil
+ })
+
+ deleteResp := DeleteObjectsResponse{}
+ if !deleteObjects.Quiet {
+ deleteResp.DeletedObjects = deletedObjects
+ }
+ deleteResp.Errors = deleteErrors
+
+ writeSuccessResponseXML(w, encodeResponse(deleteResp))
+
}
-func (s3a *S3ApiServer) proxyToFiler(w http.ResponseWriter, r *http.Request, destUrl string, responseFn func(proxyResonse *http.Response, w http.ResponseWriter)) {
+func (s3a *S3ApiServer) proxyToFiler(w http.ResponseWriter, r *http.Request, destUrl string, responseFn func(proxyResponse *http.Response, w http.ResponseWriter)) {
glog.V(2).Infof("s3 proxying %s to %s", r.Method, destUrl)
@@ -128,7 +218,6 @@ func (s3a *S3ApiServer) proxyToFiler(w http.ResponseWriter, r *http.Request, des
proxyReq.Header.Set("Host", s3a.option.Filer)
proxyReq.Header.Set("X-Forwarded-For", r.RemoteAddr)
- proxyReq.Header.Set("Etag-MD5", "True")
for header, values := range r.Header {
for _, value := range values {
@@ -143,22 +232,23 @@ func (s3a *S3ApiServer) proxyToFiler(w http.ResponseWriter, r *http.Request, des
writeErrorResponse(w, ErrInternalError, r.URL)
return
}
- defer resp.Body.Close()
+ defer util.CloseResponse(resp)
responseFn(resp, w)
+
}
-func passThroughResponse(proxyResonse *http.Response, w http.ResponseWriter) {
- for k, v := range proxyResonse.Header {
+func passThroughResponse(proxyResponse *http.Response, w http.ResponseWriter) {
+ for k, v := range proxyResponse.Header {
w.Header()[k] = v
}
- w.WriteHeader(proxyResonse.StatusCode)
- io.Copy(w, proxyResonse.Body)
+ w.WriteHeader(proxyResponse.StatusCode)
+ io.Copy(w, proxyResponse.Body)
}
-func (s3a *S3ApiServer) putToFiler(r *http.Request, uploadUrl string, dataReader io.ReadCloser) (etag string, code ErrorCode) {
+func (s3a *S3ApiServer) putToFiler(r *http.Request, uploadUrl string, dataReader io.Reader) (etag string, code ErrorCode) {
hash := md5.New()
- var body io.Reader = io.TeeReader(dataReader, hash)
+ var body = io.TeeReader(dataReader, hash)
proxyReq, err := http.NewRequest("PUT", uploadUrl, body)
@@ -178,8 +268,6 @@ func (s3a *S3ApiServer) putToFiler(r *http.Request, uploadUrl string, dataReader
resp, postErr := client.Do(proxyReq)
- dataReader.Close()
-
if postErr != nil {
glog.Errorf("post to filer: %v", postErr)
return "", ErrInternalError
diff --git a/weed/s3api/s3api_object_multipart_handlers.go b/weed/s3api/s3api_object_multipart_handlers.go
index 267d126c5..3282e4176 100644
--- a/weed/s3api/s3api_object_multipart_handlers.go
+++ b/weed/s3api/s3api_object_multipart_handlers.go
@@ -2,20 +2,21 @@ package s3api
import (
"fmt"
- "github.com/aws/aws-sdk-go/aws"
- "github.com/aws/aws-sdk-go/service/s3"
- "github.com/gorilla/mux"
"net/http"
"net/url"
"strconv"
"strings"
+
+ "github.com/aws/aws-sdk-go/aws"
+ "github.com/aws/aws-sdk-go/service/s3"
+ "github.com/gorilla/mux"
)
const (
- maxObjectList = 1000 // Limit number of objects in a listObjectsResponse.
- maxUploadsList = 1000 // Limit number of uploads in a listUploadsResponse.
- maxPartsList = 1000 // Limit number of parts in a listPartsResponse.
- globalMaxPartID = 10000
+ maxObjectListSizeLimit = 10000 // Limit number of objects in a listObjectsResponse.
+ maxUploadsList = 10000 // Limit number of uploads in a listUploadsResponse.
+ maxPartsList = 10000 // Limit number of parts in a listPartsResponse.
+ globalMaxPartID = 100000
)
// NewMultipartUploadHandler - New multipart upload.
@@ -27,7 +28,7 @@ func (s3a *S3ApiServer) NewMultipartUploadHandler(w http.ResponseWriter, r *http
response, errCode := s3a.createMultipartUpload(&s3.CreateMultipartUploadInput{
Bucket: aws.String(bucket),
- Key: aws.String(object),
+ Key: objectKey(aws.String(object)),
})
if errCode != ErrNone {
@@ -52,7 +53,7 @@ func (s3a *S3ApiServer) CompleteMultipartUploadHandler(w http.ResponseWriter, r
response, errCode := s3a.completeMultipartUpload(&s3.CompleteMultipartUploadInput{
Bucket: aws.String(bucket),
- Key: aws.String(object),
+ Key: objectKey(aws.String(object)),
UploadId: aws.String(uploadID),
})
@@ -78,7 +79,7 @@ func (s3a *S3ApiServer) AbortMultipartUploadHandler(w http.ResponseWriter, r *ht
response, errCode := s3a.abortMultipartUpload(&s3.AbortMultipartUploadInput{
Bucket: aws.String(bucket),
- Key: aws.String(object),
+ Key: objectKey(aws.String(object)),
UploadId: aws.String(uploadID),
})
@@ -150,7 +151,7 @@ func (s3a *S3ApiServer) ListObjectPartsHandler(w http.ResponseWriter, r *http.Re
response, errCode := s3a.listObjectParts(&s3.ListPartsInput{
Bucket: aws.String(bucket),
- Key: aws.String(object),
+ Key: objectKey(aws.String(object)),
MaxParts: aws.Int64(int64(maxParts)),
PartNumberMarker: aws.Int64(int64(partNumberMarker)),
UploadId: aws.String(uploadID),
@@ -192,13 +193,19 @@ func (s3a *S3ApiServer) PutObjectPartHandler(w http.ResponseWriter, r *http.Requ
return
}
+ var s3ErrCode ErrorCode
dataReader := r.Body
if rAuthType == authTypeStreamingSigned {
- dataReader = newSignV4ChunkedReader(r)
+ dataReader, s3ErrCode = s3a.iam.newSignV4ChunkedReader(r)
+ }
+ if s3ErrCode != ErrNone {
+ writeErrorResponse(w, s3ErrCode, r.URL)
+ return
}
+ defer dataReader.Close()
- uploadUrl := fmt.Sprintf("http://%s%s/%s/%04d.part",
- s3a.option.Filer, s3a.genUploadsFolder(bucket), uploadID, partID-1)
+ uploadUrl := fmt.Sprintf("http://%s%s/%s/%04d.part?collection=%s",
+ s3a.option.Filer, s3a.genUploadsFolder(bucket), uploadID, partID-1, bucket)
etag, errCode := s3a.putToFiler(r, uploadUrl, dataReader)
diff --git a/weed/s3api/s3api_objects_list_handlers.go b/weed/s3api/s3api_objects_list_handlers.go
index d751a3b1d..919e6230a 100644
--- a/weed/s3api/s3api_objects_list_handlers.go
+++ b/weed/s3api/s3api_objects_list_handlers.go
@@ -3,22 +3,19 @@ package s3api
import (
"context"
"fmt"
+ "io"
"net/http"
"net/url"
"path/filepath"
"strconv"
+ "strings"
"time"
- "github.com/aws/aws-sdk-go/aws"
- "github.com/aws/aws-sdk-go/service/s3"
+ "github.com/gorilla/mux"
+
"github.com/chrislusf/seaweedfs/weed/filer2"
"github.com/chrislusf/seaweedfs/weed/glog"
"github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
- "github.com/gorilla/mux"
-)
-
-const (
- maxObjectListSizeLimit = 1000 // Limit number of objects in a listObjectsResponse.
)
func (s3a *S3ApiServer) ListObjectsV2Handler(w http.ResponseWriter, r *http.Request) {
@@ -85,13 +82,16 @@ func (s3a *S3ApiServer) ListObjectsV1Handler(w http.ResponseWriter, r *http.Requ
writeSuccessResponseXML(w, encodeResponse(response))
}
-func (s3a *S3ApiServer) listFilerEntries(bucket, originalPrefix string, maxKeys int, marker string) (response *s3.ListObjectsOutput, err error) {
+func (s3a *S3ApiServer) listFilerEntries(bucket, originalPrefix string, maxKeys int, marker string) (response ListBucketResult, err error) {
// convert full path prefix into directory name and prefix for entry name
dir, prefix := filepath.Split(originalPrefix)
+ if strings.HasPrefix(dir, "/") {
+ dir = dir[1:]
+ }
// check filer
- err = s3a.withFilerClient(func(client filer_pb.SeaweedFilerClient) error {
+ err = s3a.WithFilerClient(func(client filer_pb.SeaweedFilerClient) error {
request := &filer_pb.ListEntriesRequest{
Directory: fmt.Sprintf("%s/%s/%s", s3a.option.BucketsPath, bucket, dir),
@@ -101,17 +101,28 @@ func (s3a *S3ApiServer) listFilerEntries(bucket, originalPrefix string, maxKeys
InclusiveStartFrom: false,
}
- resp, err := client.ListEntries(context.Background(), request)
+ stream, err := client.ListEntries(context.Background(), request)
if err != nil {
return fmt.Errorf("list buckets: %v", err)
}
- var contents []*s3.Object
- var commonPrefixes []*s3.CommonPrefix
+ var contents []ListEntry
+ var commonPrefixes []PrefixEntry
var counter int
var lastEntryName string
var isTruncated bool
- for _, entry := range resp.Entries {
+
+ for {
+ resp, recvErr := stream.Recv()
+ if recvErr != nil {
+ if recvErr == io.EOF {
+ break
+ } else {
+ return recvErr
+ }
+ }
+
+ entry := resp.Entry
counter++
if counter > maxKeys {
isTruncated = true
@@ -119,37 +130,40 @@ func (s3a *S3ApiServer) listFilerEntries(bucket, originalPrefix string, maxKeys
}
lastEntryName = entry.Name
if entry.IsDirectory {
- commonPrefixes = append(commonPrefixes, &s3.CommonPrefix{
- Prefix: aws.String(fmt.Sprintf("%s%s/", dir, entry.Name)),
- })
+ if entry.Name != ".uploads" {
+ commonPrefixes = append(commonPrefixes, PrefixEntry{
+ Prefix: fmt.Sprintf("%s%s/", dir, entry.Name),
+ })
+ }
} else {
- contents = append(contents, &s3.Object{
- Key: aws.String(fmt.Sprintf("%s%s", dir, entry.Name)),
- LastModified: aws.Time(time.Unix(entry.Attributes.Mtime, 0)),
- ETag: aws.String("\"" + filer2.ETag(entry.Chunks) + "\""),
- Size: aws.Int64(int64(filer2.TotalSize(entry.Chunks))),
- Owner: &s3.Owner{
- ID: aws.String("bcaf161ca5fb16fd081034f"),
- DisplayName: aws.String("webfile"),
+ contents = append(contents, ListEntry{
+ Key: fmt.Sprintf("%s%s", dir, entry.Name),
+ LastModified: time.Unix(entry.Attributes.Mtime, 0).UTC(),
+ ETag: "\"" + filer2.ETag(entry) + "\"",
+ Size: int64(filer2.TotalSize(entry.Chunks)),
+ Owner: CanonicalUser{
+ ID: fmt.Sprintf("%x", entry.Attributes.Uid),
+ DisplayName: entry.Attributes.UserName,
},
- StorageClass: aws.String("STANDARD"),
+ StorageClass: "STANDARD",
})
}
+
}
- response = &s3.ListObjectsOutput{
- Name: aws.String(bucket),
- Prefix: aws.String(originalPrefix),
- Marker: aws.String(marker),
- NextMarker: aws.String(lastEntryName),
- MaxKeys: aws.Int64(int64(maxKeys)),
- Delimiter: aws.String("/"),
- IsTruncated: aws.Bool(isTruncated),
+ response = ListBucketResult{
+ Name: bucket,
+ Prefix: originalPrefix,
+ Marker: marker,
+ NextMarker: lastEntryName,
+ MaxKeys: maxKeys,
+ Delimiter: "/",
+ IsTruncated: isTruncated,
Contents: contents,
CommonPrefixes: commonPrefixes,
}
- glog.V(4).Infof("read directory: %v, found: %v", request, counter)
+ glog.V(4).Infof("read directory: %v, found: %v, %+v", request, counter, response)
return nil
})
diff --git a/weed/s3api/s3api_objects_list_handlers_test.go b/weed/s3api/s3api_objects_list_handlers_test.go
new file mode 100644
index 000000000..7b87b32fb
--- /dev/null
+++ b/weed/s3api/s3api_objects_list_handlers_test.go
@@ -0,0 +1,38 @@
+package s3api
+
+import (
+ "testing"
+ "time"
+)
+
+func TestListObjectsHandler(t *testing.T) {
+
+ // https://docs.aws.amazon.com/AmazonS3/latest/API/v2-RESTBucketGET.html
+
+ expected := `
+test_container 1000 false 1.zip "4397da7a7649e8085de9916c240e8166" 1234567 65a011niqo39cdf8ec533ec3d1ccaafsa932 STANDARD 2011-04-09T12:34:49Z `
+
+ response := ListBucketResult{
+ Name: "test_container",
+ Prefix: "",
+ Marker: "",
+ NextMarker: "",
+ MaxKeys: 1000,
+ IsTruncated: false,
+ Contents: []ListEntry{{
+ Key: "1.zip",
+ LastModified: time.Date(2011, 4, 9, 12, 34, 49, 0, time.UTC),
+ ETag: "\"4397da7a7649e8085de9916c240e8166\"",
+ Size: 1234567,
+ Owner: CanonicalUser{
+ ID: "65a011niqo39cdf8ec533ec3d1ccaafsa932",
+ },
+ StorageClass: "STANDARD",
+ }},
+ }
+
+ encoded := string(encodeResponse(response))
+ if encoded != expected {
+ t.Errorf("unexpected output: %s\nexpecting:%s", encoded, expected)
+ }
+}
diff --git a/weed/s3api/s3api_server.go b/weed/s3api/s3api_server.go
index db798a546..773094a5f 100644
--- a/weed/s3api/s3api_server.go
+++ b/weed/s3api/s3api_server.go
@@ -1,30 +1,30 @@
package s3api
import (
- _ "github.com/chrislusf/seaweedfs/weed/filer2/cassandra"
- _ "github.com/chrislusf/seaweedfs/weed/filer2/leveldb"
- _ "github.com/chrislusf/seaweedfs/weed/filer2/memdb"
- _ "github.com/chrislusf/seaweedfs/weed/filer2/mysql"
- _ "github.com/chrislusf/seaweedfs/weed/filer2/postgres"
- _ "github.com/chrislusf/seaweedfs/weed/filer2/redis"
- "github.com/gorilla/mux"
"net/http"
+
+ "github.com/gorilla/mux"
+ "google.golang.org/grpc"
)
type S3ApiServerOption struct {
Filer string
FilerGrpcAddress string
+ Config string
DomainName string
BucketsPath string
+ GrpcDialOption grpc.DialOption
}
type S3ApiServer struct {
option *S3ApiServerOption
+ iam *IdentityAccessManagement
}
func NewS3ApiServer(router *mux.Router, option *S3ApiServerOption) (s3ApiServer *S3ApiServer, err error) {
s3ApiServer = &S3ApiServer{
option: option,
+ iam: NewIdentityAccessManagement(option.Config, option.DomainName),
}
s3ApiServer.registerRouter(router)
@@ -44,48 +44,47 @@ func (s3a *S3ApiServer) registerRouter(router *mux.Router) {
for _, bucket := range routers {
// HeadObject
- bucket.Methods("HEAD").Path("/{object:.+}").HandlerFunc(s3a.HeadObjectHandler)
+ bucket.Methods("HEAD").Path("/{object:.+}").HandlerFunc(s3a.iam.Auth(s3a.HeadObjectHandler, ACTION_READ))
// HeadBucket
- bucket.Methods("HEAD").HandlerFunc(s3a.HeadBucketHandler)
+ bucket.Methods("HEAD").HandlerFunc(s3a.iam.Auth(s3a.HeadBucketHandler, ACTION_ADMIN))
+ // CopyObjectPart
+ bucket.Methods("PUT").Path("/{object:.+}").HeadersRegexp("X-Amz-Copy-Source", ".*?(\\/|%2F).*?").HandlerFunc(s3a.iam.Auth(s3a.CopyObjectPartHandler, ACTION_WRITE)).Queries("partNumber", "{partNumber:[0-9]+}", "uploadId", "{uploadId:.*}")
// PutObjectPart
- bucket.Methods("PUT").Path("/{object:.+}").HandlerFunc(s3a.PutObjectPartHandler).Queries("partNumber", "{partNumber:[0-9]+}", "uploadId", "{uploadId:.*}")
+ bucket.Methods("PUT").Path("/{object:.+}").HandlerFunc(s3a.iam.Auth(s3a.PutObjectPartHandler, ACTION_WRITE)).Queries("partNumber", "{partNumber:[0-9]+}", "uploadId", "{uploadId:.*}")
// CompleteMultipartUpload
- bucket.Methods("POST").Path("/{object:.+}").HandlerFunc(s3a.CompleteMultipartUploadHandler).Queries("uploadId", "{uploadId:.*}")
+ bucket.Methods("POST").Path("/{object:.+}").HandlerFunc(s3a.iam.Auth(s3a.CompleteMultipartUploadHandler, ACTION_WRITE)).Queries("uploadId", "{uploadId:.*}")
// NewMultipartUpload
- bucket.Methods("POST").Path("/{object:.+}").HandlerFunc(s3a.NewMultipartUploadHandler).Queries("uploads", "")
+ bucket.Methods("POST").Path("/{object:.+}").HandlerFunc(s3a.iam.Auth(s3a.NewMultipartUploadHandler, ACTION_WRITE)).Queries("uploads", "")
// AbortMultipartUpload
- bucket.Methods("DELETE").Path("/{object:.+}").HandlerFunc(s3a.AbortMultipartUploadHandler).Queries("uploadId", "{uploadId:.*}")
+ bucket.Methods("DELETE").Path("/{object:.+}").HandlerFunc(s3a.iam.Auth(s3a.AbortMultipartUploadHandler, ACTION_WRITE)).Queries("uploadId", "{uploadId:.*}")
// ListObjectParts
- bucket.Methods("GET").Path("/{object:.+}").HandlerFunc(s3a.ListObjectPartsHandler).Queries("uploadId", "{uploadId:.*}")
+ bucket.Methods("GET").Path("/{object:.+}").HandlerFunc(s3a.iam.Auth(s3a.ListObjectPartsHandler, ACTION_WRITE)).Queries("uploadId", "{uploadId:.*}")
// ListMultipartUploads
- bucket.Methods("GET").HandlerFunc(s3a.ListMultipartUploadsHandler).Queries("uploads", "")
+ bucket.Methods("GET").HandlerFunc(s3a.iam.Auth(s3a.ListMultipartUploadsHandler, ACTION_WRITE)).Queries("uploads", "")
+ // CopyObject
+ bucket.Methods("PUT").Path("/{object:.+}").HeadersRegexp("X-Amz-Copy-Source", ".*?(\\/|%2F).*?").HandlerFunc(s3a.iam.Auth(s3a.CopyObjectHandler, ACTION_WRITE))
// PutObject
- bucket.Methods("PUT").Path("/{object:.+}").HandlerFunc(s3a.PutObjectHandler)
+ bucket.Methods("PUT").Path("/{object:.+}").HandlerFunc(s3a.iam.Auth(s3a.PutObjectHandler, ACTION_WRITE))
// PutBucket
- bucket.Methods("PUT").HandlerFunc(s3a.PutBucketHandler)
+ bucket.Methods("PUT").HandlerFunc(s3a.iam.Auth(s3a.PutBucketHandler, ACTION_ADMIN))
// DeleteObject
- bucket.Methods("DELETE").Path("/{object:.+}").HandlerFunc(s3a.DeleteObjectHandler)
+ bucket.Methods("DELETE").Path("/{object:.+}").HandlerFunc(s3a.iam.Auth(s3a.DeleteObjectHandler, ACTION_WRITE))
// DeleteBucket
- bucket.Methods("DELETE").HandlerFunc(s3a.DeleteBucketHandler)
+ bucket.Methods("DELETE").HandlerFunc(s3a.iam.Auth(s3a.DeleteBucketHandler, ACTION_WRITE))
// ListObjectsV2
- bucket.Methods("GET").HandlerFunc(s3a.ListObjectsV2Handler).Queries("list-type", "2")
+ bucket.Methods("GET").HandlerFunc(s3a.iam.Auth(s3a.ListObjectsV2Handler, ACTION_READ)).Queries("list-type", "2")
// GetObject, but directory listing is not supported
- bucket.Methods("GET").Path("/{object:.+}").HandlerFunc(s3a.GetObjectHandler)
+ bucket.Methods("GET").Path("/{object:.+}").HandlerFunc(s3a.iam.Auth(s3a.GetObjectHandler, ACTION_READ))
// ListObjectsV1 (Legacy)
- bucket.Methods("GET").HandlerFunc(s3a.ListObjectsV1Handler)
+ bucket.Methods("GET").HandlerFunc(s3a.iam.Auth(s3a.ListObjectsV1Handler, ACTION_READ))
// DeleteMultipleObjects
- bucket.Methods("POST").HandlerFunc(s3a.DeleteMultipleObjectsHandler).Queries("delete", "")
+ bucket.Methods("POST").HandlerFunc(s3a.iam.Auth(s3a.DeleteMultipleObjectsHandler, ACTION_WRITE)).Queries("delete", "")
/*
- // CopyObject
- bucket.Methods("PUT").Path("/{object:.+}").HeadersRegexp("X-Amz-Copy-Source", ".*?(\\/|%2F).*?").HandlerFunc(s3a.CopyObjectHandler)
-
- // CopyObjectPart
- bucket.Methods("PUT").Path("/{object:.+}").HeadersRegexp("X-Amz-Copy-Source", ".*?(\\/|%2F).*?").HandlerFunc(s3a.CopyObjectPartHandler).Queries("partNumber", "{partNumber:[0-9]+}", "uploadId", "{uploadId:.*}")
// not implemented
// GetBucketLocation
@@ -107,7 +106,7 @@ func (s3a *S3ApiServer) registerRouter(router *mux.Router) {
}
// ListBuckets
- apiRouter.Methods("GET").Path("/").HandlerFunc(s3a.ListBucketsHandler)
+ apiRouter.Methods("GET").Path("/").HandlerFunc(s3a.iam.Auth(s3a.ListBucketsHandler, ACTION_ADMIN))
// NotFound
apiRouter.NotFoundHandler = http.HandlerFunc(notFoundHandler)
diff --git a/weed/s3api/s3api_test.go b/weed/s3api/s3api_test.go
new file mode 100644
index 000000000..026766beb
--- /dev/null
+++ b/weed/s3api/s3api_test.go
@@ -0,0 +1,32 @@
+package s3api
+
+import (
+ "testing"
+ "time"
+)
+
+func TestCopyObjectResponse(t *testing.T) {
+
+ // https://docs.aws.amazon.com/AmazonS3/latest/API/API_CopyObject.html
+
+ response := CopyObjectResult{
+ ETag: "12345678",
+ LastModified: time.Now(),
+ }
+
+ println(string(encodeResponse(response)))
+
+}
+
+func TestCopyPartResponse(t *testing.T) {
+
+ // https://docs.aws.amazon.com/AmazonS3/latest/API/API_UploadPartCopy.html
+
+ response := CopyPartResult{
+ ETag: "12345678",
+ LastModified: time.Now(),
+ }
+
+ println(string(encodeResponse(response)))
+
+}
diff --git a/weed/s3api/s3api_xsd_generated.go b/weed/s3api/s3api_xsd_generated.go
index df07f3fea..9d62afc4e 100644
--- a/weed/s3api/s3api_xsd_generated.go
+++ b/weed/s3api/s3api_xsd_generated.go
@@ -25,8 +25,8 @@ type BucketLoggingStatus struct {
}
type CanonicalUser struct {
- ID string `xml:"http://s3.amazonaws.com/doc/2006-03-01/ ID"`
- DisplayName string `xml:"http://s3.amazonaws.com/doc/2006-03-01/ DisplayName,omitempty"`
+ ID string `xml:"ID"`
+ DisplayName string `xml:"DisplayName,omitempty"`
}
type CopyObject struct {
@@ -506,15 +506,15 @@ func (t *ListAllMyBuckets) UnmarshalXML(d *xml.Decoder, start xml.StartElement)
}
type ListAllMyBucketsEntry struct {
- Name string `xml:"http://s3.amazonaws.com/doc/2006-03-01/ Name"`
- CreationDate time.Time `xml:"http://s3.amazonaws.com/doc/2006-03-01/ CreationDate"`
+ Name string `xml:"Name"`
+ CreationDate time.Time `xml:"CreationDate"`
}
func (t *ListAllMyBucketsEntry) MarshalXML(e *xml.Encoder, start xml.StartElement) error {
type T ListAllMyBucketsEntry
var layout struct {
*T
- CreationDate *xsdDateTime `xml:"http://s3.amazonaws.com/doc/2006-03-01/ CreationDate"`
+ CreationDate *xsdDateTime `xml:"CreationDate"`
}
layout.T = (*T)(t)
layout.CreationDate = (*xsdDateTime)(&layout.T.CreationDate)
@@ -524,7 +524,7 @@ func (t *ListAllMyBucketsEntry) UnmarshalXML(d *xml.Decoder, start xml.StartElem
type T ListAllMyBucketsEntry
var overlay struct {
*T
- CreationDate *xsdDateTime `xml:"http://s3.amazonaws.com/doc/2006-03-01/ CreationDate"`
+ CreationDate *xsdDateTime `xml:"CreationDate"`
}
overlay.T = (*T)(t)
overlay.CreationDate = (*xsdDateTime)(&overlay.T.CreationDate)
@@ -532,7 +532,7 @@ func (t *ListAllMyBucketsEntry) UnmarshalXML(d *xml.Decoder, start xml.StartElem
}
type ListAllMyBucketsList struct {
- Bucket []ListAllMyBucketsEntry `xml:"http://s3.amazonaws.com/doc/2006-03-01/ Bucket,omitempty"`
+ Bucket []ListAllMyBucketsEntry `xml:"Bucket,omitempty"`
}
type ListAllMyBucketsResponse struct {
@@ -577,32 +577,33 @@ type ListBucketResponse struct {
}
type ListBucketResult struct {
- Metadata []MetadataEntry `xml:"http://s3.amazonaws.com/doc/2006-03-01/ Metadata,omitempty"`
- Name string `xml:"http://s3.amazonaws.com/doc/2006-03-01/ Name"`
- Prefix string `xml:"http://s3.amazonaws.com/doc/2006-03-01/ Prefix"`
- Marker string `xml:"http://s3.amazonaws.com/doc/2006-03-01/ Marker"`
- NextMarker string `xml:"http://s3.amazonaws.com/doc/2006-03-01/ NextMarker,omitempty"`
- MaxKeys int `xml:"http://s3.amazonaws.com/doc/2006-03-01/ MaxKeys"`
- Delimiter string `xml:"http://s3.amazonaws.com/doc/2006-03-01/ Delimiter,omitempty"`
- IsTruncated bool `xml:"http://s3.amazonaws.com/doc/2006-03-01/ IsTruncated"`
- Contents []ListEntry `xml:"http://s3.amazonaws.com/doc/2006-03-01/ Contents,omitempty"`
- CommonPrefixes []PrefixEntry `xml:"http://s3.amazonaws.com/doc/2006-03-01/ CommonPrefixes,omitempty"`
+ XMLName xml.Name `xml:"http://s3.amazonaws.com/doc/2006-03-01/ ListBucketResult"`
+ Metadata []MetadataEntry `xml:"Metadata,omitempty"`
+ Name string `xml:"Name"`
+ Prefix string `xml:"Prefix"`
+ Marker string `xml:"Marker"`
+ NextMarker string `xml:"NextMarker,omitempty"`
+ MaxKeys int `xml:"MaxKeys"`
+ Delimiter string `xml:"Delimiter,omitempty"`
+ IsTruncated bool `xml:"IsTruncated"`
+ Contents []ListEntry `xml:"Contents,omitempty"`
+ CommonPrefixes []PrefixEntry `xml:"CommonPrefixes,omitempty"`
}
type ListEntry struct {
- Key string `xml:"http://s3.amazonaws.com/doc/2006-03-01/ Key"`
- LastModified time.Time `xml:"http://s3.amazonaws.com/doc/2006-03-01/ LastModified"`
- ETag string `xml:"http://s3.amazonaws.com/doc/2006-03-01/ ETag"`
- Size int64 `xml:"http://s3.amazonaws.com/doc/2006-03-01/ Size"`
- Owner CanonicalUser `xml:"http://s3.amazonaws.com/doc/2006-03-01/ Owner,omitempty"`
- StorageClass StorageClass `xml:"http://s3.amazonaws.com/doc/2006-03-01/ StorageClass"`
+ Key string `xml:"Key"`
+ LastModified time.Time `xml:"LastModified"`
+ ETag string `xml:"ETag"`
+ Size int64 `xml:"Size"`
+ Owner CanonicalUser `xml:"Owner,omitempty"`
+ StorageClass StorageClass `xml:"StorageClass"`
}
func (t *ListEntry) MarshalXML(e *xml.Encoder, start xml.StartElement) error {
type T ListEntry
var layout struct {
*T
- LastModified *xsdDateTime `xml:"http://s3.amazonaws.com/doc/2006-03-01/ LastModified"`
+ LastModified *xsdDateTime `xml:"LastModified"`
}
layout.T = (*T)(t)
layout.LastModified = (*xsdDateTime)(&layout.T.LastModified)
@@ -612,7 +613,7 @@ func (t *ListEntry) UnmarshalXML(d *xml.Decoder, start xml.StartElement) error {
type T ListEntry
var overlay struct {
*T
- LastModified *xsdDateTime `xml:"http://s3.amazonaws.com/doc/2006-03-01/ LastModified"`
+ LastModified *xsdDateTime `xml:"LastModified"`
}
overlay.T = (*T)(t)
overlay.LastModified = (*xsdDateTime)(&overlay.T.LastModified)
@@ -674,7 +675,7 @@ type PostResponse struct {
}
type PrefixEntry struct {
- Prefix string `xml:"http://s3.amazonaws.com/doc/2006-03-01/ Prefix"`
+ Prefix string `xml:"Prefix"`
}
type PutObject struct {
@@ -965,10 +966,10 @@ func (b xsdBase64Binary) MarshalText() ([]byte, error) {
type xsdDateTime time.Time
func (t *xsdDateTime) UnmarshalText(text []byte) error {
- return _unmarshalTime(text, (*time.Time)(t), "2006-01-02T15:04:05.999999999")
+ return _unmarshalTime(text, (*time.Time)(t), s3TimeFormat)
}
func (t xsdDateTime) MarshalText() ([]byte, error) {
- return []byte((time.Time)(t).Format("2006-01-02T15:04:05.999999999")), nil
+ return []byte((time.Time)(t).Format(s3TimeFormat)), nil
}
func (t xsdDateTime) MarshalXML(e *xml.Encoder, start xml.StartElement) error {
if (time.Time)(t).IsZero() {
diff --git a/weed/security/guard.go b/weed/security/guard.go
index dea3b12f2..87ec91ec1 100644
--- a/weed/security/guard.go
+++ b/weed/security/guard.go
@@ -41,21 +41,30 @@ https://github.com/pkieltyka/jwtauth/blob/master/jwtauth.go
*/
type Guard struct {
- whiteList []string
- SecretKey Secret
+ whiteList []string
+ SigningKey SigningKey
+ ExpiresAfterSec int
+ ReadSigningKey SigningKey
+ ReadExpiresAfterSec int
- isActive bool
+ isWriteActive bool
}
-func NewGuard(whiteList []string, secretKey string) *Guard {
- g := &Guard{whiteList: whiteList, SecretKey: Secret(secretKey)}
- g.isActive = len(g.whiteList) != 0 || len(g.SecretKey) != 0
+func NewGuard(whiteList []string, signingKey string, expiresAfterSec int, readSigningKey string, readExpiresAfterSec int) *Guard {
+ g := &Guard{
+ whiteList: whiteList,
+ SigningKey: SigningKey(signingKey),
+ ExpiresAfterSec: expiresAfterSec,
+ ReadSigningKey: SigningKey(readSigningKey),
+ ReadExpiresAfterSec: readExpiresAfterSec,
+ }
+ g.isWriteActive = len(g.whiteList) != 0 || len(g.SigningKey) != 0
return g
}
-func (g *Guard) WhiteList(f func(w http.ResponseWriter, r *http.Request)) func(w http.ResponseWriter, r *http.Request) {
- if !g.isActive {
- //if no security needed, just skip all checkings
+func (g *Guard) WhiteList(f http.HandlerFunc) http.HandlerFunc {
+ if !g.isWriteActive {
+ //if no security needed, just skip all checking
return f
}
return func(w http.ResponseWriter, r *http.Request) {
@@ -67,20 +76,6 @@ func (g *Guard) WhiteList(f func(w http.ResponseWriter, r *http.Request)) func(w
}
}
-func (g *Guard) Secure(f func(w http.ResponseWriter, r *http.Request)) func(w http.ResponseWriter, r *http.Request) {
- if !g.isActive {
- //if no security needed, just skip all checkings
- return f
- }
- return func(w http.ResponseWriter, r *http.Request) {
- if err := g.checkJwt(w, r); err != nil {
- w.WriteHeader(http.StatusUnauthorized)
- return
- }
- f(w, r)
- }
-}
-
func GetActualRemoteHost(r *http.Request) (host string, err error) {
host = r.Header.Get("HTTP_X_FORWARDED_FOR")
if host == "" {
@@ -130,33 +125,3 @@ func (g *Guard) checkWhiteList(w http.ResponseWriter, r *http.Request) error {
glog.V(0).Infof("Not in whitelist: %s", r.RemoteAddr)
return fmt.Errorf("Not in whitelis: %s", r.RemoteAddr)
}
-
-func (g *Guard) checkJwt(w http.ResponseWriter, r *http.Request) error {
- if g.checkWhiteList(w, r) == nil {
- return nil
- }
-
- if len(g.SecretKey) == 0 {
- return nil
- }
-
- tokenStr := GetJwt(r)
-
- if tokenStr == "" {
- return ErrUnauthorized
- }
-
- // Verify the token
- token, err := DecodeJwt(g.SecretKey, tokenStr)
- if err != nil {
- glog.V(1).Infof("Token verification error from %s: %v", r.RemoteAddr, err)
- return ErrUnauthorized
- }
- if !token.Valid {
- glog.V(1).Infof("Token invliad from %s: %v", r.RemoteAddr, tokenStr)
- return ErrUnauthorized
- }
-
- glog.V(1).Infof("No permission from %s", r.RemoteAddr)
- return fmt.Errorf("No write permisson from %s", r.RemoteAddr)
-}
diff --git a/weed/security/jwt.go b/weed/security/jwt.go
index 46b7efaaf..0bd7fa974 100644
--- a/weed/security/jwt.go
+++ b/weed/security/jwt.go
@@ -1,9 +1,9 @@
package security
import (
+ "fmt"
"net/http"
"strings"
-
"time"
"github.com/chrislusf/seaweedfs/weed/glog"
@@ -11,21 +11,29 @@ import (
)
type EncodedJwt string
-type Secret string
+type SigningKey []byte
+
+type SeaweedFileIdClaims struct {
+ Fid string `json:"fid"`
+ jwt.StandardClaims
+}
-func GenJwt(secret Secret, fileId string) EncodedJwt {
- if secret == "" {
+func GenJwt(signingKey SigningKey, expiresAfterSec int, fileId string) EncodedJwt {
+ if len(signingKey) == 0 {
return ""
}
- t := jwt.New(jwt.GetSigningMethod("HS256"))
- t.Claims = &jwt.StandardClaims{
- ExpiresAt: time.Now().Add(time.Second * 10).Unix(),
- Subject: fileId,
+ claims := SeaweedFileIdClaims{
+ fileId,
+ jwt.StandardClaims{},
+ }
+ if expiresAfterSec > 0 {
+ claims.ExpiresAt = time.Now().Add(time.Second * time.Duration(expiresAfterSec)).Unix()
}
- encoded, e := t.SignedString(secret)
+ t := jwt.NewWithClaims(jwt.SigningMethodHS256, claims)
+ encoded, e := t.SignedString([]byte(signingKey))
if e != nil {
- glog.V(0).Infof("Failed to sign claims: %v", t.Claims)
+ glog.V(0).Infof("Failed to sign claims %+v: %v", t.Claims, e)
return ""
}
return EncodedJwt(encoded)
@@ -44,31 +52,15 @@ func GetJwt(r *http.Request) EncodedJwt {
}
}
- // Get token from cookie
- if tokenStr == "" {
- cookie, err := r.Cookie("jwt")
- if err == nil {
- tokenStr = cookie.Value
- }
- }
-
return EncodedJwt(tokenStr)
}
-func EncodeJwt(secret Secret, claims *jwt.StandardClaims) (EncodedJwt, error) {
- if secret == "" {
- return "", nil
- }
-
- t := jwt.New(jwt.GetSigningMethod("HS256"))
- t.Claims = claims
- encoded, e := t.SignedString(secret)
- return EncodedJwt(encoded), e
-}
-
-func DecodeJwt(secret Secret, tokenString EncodedJwt) (token *jwt.Token, err error) {
+func DecodeJwt(signingKey SigningKey, tokenString EncodedJwt) (token *jwt.Token, err error) {
// check exp, nbf
- return jwt.Parse(string(tokenString), func(token *jwt.Token) (interface{}, error) {
- return secret, nil
+ return jwt.ParseWithClaims(string(tokenString), &SeaweedFileIdClaims{}, func(token *jwt.Token) (interface{}, error) {
+ if _, ok := token.Method.(*jwt.SigningMethodHMAC); !ok {
+ return nil, fmt.Errorf("unknown token method")
+ }
+ return []byte(signingKey), nil
})
}
diff --git a/weed/security/tls.go b/weed/security/tls.go
new file mode 100644
index 000000000..1832e6e07
--- /dev/null
+++ b/weed/security/tls.go
@@ -0,0 +1,68 @@
+package security
+
+import (
+ "crypto/tls"
+ "crypto/x509"
+ "io/ioutil"
+
+ "github.com/spf13/viper"
+
+ "google.golang.org/grpc"
+ "google.golang.org/grpc/credentials"
+
+ "github.com/chrislusf/seaweedfs/weed/glog"
+)
+
+func LoadServerTLS(config *viper.Viper, component string) grpc.ServerOption {
+ if config == nil {
+ return nil
+ }
+
+ // load cert/key, ca cert
+ cert, err := tls.LoadX509KeyPair(config.GetString(component+".cert"), config.GetString(component+".key"))
+ if err != nil {
+ glog.V(1).Infof("load cert/key error: %v", err)
+ return nil
+ }
+ caCert, err := ioutil.ReadFile(config.GetString(component + ".ca"))
+ if err != nil {
+ glog.V(1).Infof("read ca cert file error: %v", err)
+ return nil
+ }
+ caCertPool := x509.NewCertPool()
+ caCertPool.AppendCertsFromPEM(caCert)
+ ta := credentials.NewTLS(&tls.Config{
+ Certificates: []tls.Certificate{cert},
+ ClientCAs: caCertPool,
+ ClientAuth: tls.RequireAndVerifyClientCert,
+ })
+
+ return grpc.Creds(ta)
+}
+
+func LoadClientTLS(config *viper.Viper, component string) grpc.DialOption {
+ if config == nil {
+ return grpc.WithInsecure()
+ }
+
+ // load cert/key, cacert
+ cert, err := tls.LoadX509KeyPair(config.GetString(component+".cert"), config.GetString(component+".key"))
+ if err != nil {
+ glog.V(1).Infof("load cert/key error: %v", err)
+ return grpc.WithInsecure()
+ }
+ caCert, err := ioutil.ReadFile(config.GetString(component + ".ca"))
+ if err != nil {
+ glog.V(1).Infof("read ca cert file error: %v", err)
+ return grpc.WithInsecure()
+ }
+ caCertPool := x509.NewCertPool()
+ caCertPool.AppendCertsFromPEM(caCert)
+
+ ta := credentials.NewTLS(&tls.Config{
+ Certificates: []tls.Certificate{cert},
+ RootCAs: caCertPool,
+ InsecureSkipVerify: true,
+ })
+ return grpc.WithTransportCredentials(ta)
+}
diff --git a/weed/sequence/etcd_sequencer.go b/weed/sequence/etcd_sequencer.go
new file mode 100644
index 000000000..1fc378640
--- /dev/null
+++ b/weed/sequence/etcd_sequencer.go
@@ -0,0 +1,296 @@
+package sequence
+
+/*
+Note :
+(1) store the sequence in the ETCD cluster, and local file(sequence.dat)
+(2) batch get the sequences from ETCD cluster, and store the max sequence id in the local file
+(3) the sequence range is : [currentSeqId, maxSeqId), when the currentSeqId >= maxSeqId, fetch the new maxSeqId.
+*/
+
+import (
+ "context"
+ "fmt"
+ "sync"
+ "time"
+
+ "io"
+ "os"
+ "strconv"
+ "strings"
+
+ "github.com/chrislusf/seaweedfs/weed/glog"
+ "go.etcd.io/etcd/client"
+)
+
+const (
+ // EtcdKeyPrefix = "/seaweedfs"
+ EtcdKeySequence = "/master/sequence"
+ EtcdContextTimeoutSecond = 100 * time.Second
+ DefaultEtcdSteps uint64 = 500 // internal counter
+ SequencerFileName = "sequencer.dat"
+ FileMaxSequenceLength = 128
+)
+
+type EtcdSequencer struct {
+ sequenceLock sync.Mutex
+
+ // available sequence range : [currentSeqId, maxSeqId)
+ currentSeqId uint64
+ maxSeqId uint64
+
+ keysAPI client.KeysAPI
+ seqFile *os.File
+}
+
+func NewEtcdSequencer(etcdUrls string, metaFolder string) (*EtcdSequencer, error) {
+ file, err := openSequenceFile(metaFolder + "/" + SequencerFileName)
+ if nil != err {
+ return nil, fmt.Errorf("open sequence file fialed, %v", err)
+ }
+
+ cli, err := client.New(client.Config{
+ Endpoints: strings.Split(etcdUrls, ","),
+ Username: "",
+ Password: "",
+ })
+ if err != nil {
+ return nil, err
+ }
+ keysApi := client.NewKeysAPI(cli)
+
+ // TODO: the current sequence id in local file is not used
+ maxValue, _, err := readSequenceFile(file)
+ if err != nil {
+ return nil, fmt.Errorf("read sequence from file failed, %v", err)
+ }
+ glog.V(4).Infof("read sequence from file : %d", maxValue)
+
+ newSeq, err := setMaxSequenceToEtcd(keysApi, maxValue)
+ if err != nil {
+ return nil, err
+ }
+
+ sequencer := &EtcdSequencer{maxSeqId: newSeq,
+ currentSeqId: newSeq,
+ keysAPI: keysApi,
+ seqFile: file,
+ }
+ return sequencer, nil
+}
+
+func (es *EtcdSequencer) NextFileId(count uint64) uint64 {
+ es.sequenceLock.Lock()
+ defer es.sequenceLock.Unlock()
+
+ if (es.currentSeqId + count) >= es.maxSeqId {
+ reqSteps := DefaultEtcdSteps
+ if count > DefaultEtcdSteps {
+ reqSteps += count
+ }
+ maxId, err := batchGetSequenceFromEtcd(es.keysAPI, reqSteps)
+ glog.V(4).Infof("get max sequence id from etcd, %d", maxId)
+ if err != nil {
+ glog.Error(err)
+ return 0
+ }
+ es.currentSeqId, es.maxSeqId = maxId-reqSteps, maxId
+ glog.V(4).Infof("current id : %d, max id : %d", es.currentSeqId, es.maxSeqId)
+
+ if err := writeSequenceFile(es.seqFile, es.maxSeqId, es.currentSeqId); err != nil {
+ glog.Errorf("flush sequence to file failed, %v", err)
+ }
+ }
+
+ ret := es.currentSeqId
+ es.currentSeqId += count
+ return ret
+}
+
+/**
+instead of collecting the max value from volume server,
+the max value should be saved in local config file and ETCD cluster
+*/
+func (es *EtcdSequencer) SetMax(seenValue uint64) {
+ es.sequenceLock.Lock()
+ defer es.sequenceLock.Unlock()
+ if seenValue > es.maxSeqId {
+ maxId, err := setMaxSequenceToEtcd(es.keysAPI, seenValue)
+ if err != nil {
+ glog.Errorf("set Etcd Max sequence failed : %v", err)
+ return
+ }
+ es.currentSeqId, es.maxSeqId = maxId, maxId
+
+ if err := writeSequenceFile(es.seqFile, maxId, maxId); err != nil {
+ glog.Errorf("flush sequence to file failed, %v", err)
+ }
+ }
+}
+
+func (es *EtcdSequencer) GetMax() uint64 {
+ return es.maxSeqId
+}
+
+func (es *EtcdSequencer) Peek() uint64 {
+ return es.currentSeqId
+}
+
+func batchGetSequenceFromEtcd(kvApi client.KeysAPI, step uint64) (uint64, error) {
+ if step <= 0 {
+ return 0, fmt.Errorf("the step must be large than 1")
+ }
+
+ ctx, cancel := context.WithTimeout(context.Background(), EtcdContextTimeoutSecond)
+ var endSeqValue uint64 = 0
+ defer cancel()
+ for {
+ getResp, err := kvApi.Get(ctx, EtcdKeySequence, &client.GetOptions{Recursive: false, Quorum: true})
+ if err != nil {
+ return 0, err
+ }
+ if getResp.Node == nil {
+ continue
+ }
+
+ prevValue := getResp.Node.Value
+ prevSeqValue, err := strconv.ParseUint(prevValue, 10, 64)
+ if err != nil {
+ return 0, fmt.Errorf("get sequence from etcd failed, %v", err)
+ }
+ endSeqValue = prevSeqValue + step
+ endSeqStr := strconv.FormatUint(endSeqValue, 10)
+
+ _, err = kvApi.Set(ctx, EtcdKeySequence, endSeqStr, &client.SetOptions{PrevValue: prevValue})
+ if err == nil {
+ break
+ }
+ glog.Error(err)
+ }
+
+ return endSeqValue, nil
+}
+
+/**
+update the value of the key EtcdKeySequence in ETCD cluster with the parameter of maxSeq,
+when the value of the key EtcdKeySequence is equal to or large than the parameter maxSeq,
+return the value of EtcdKeySequence in the ETCD cluster;
+when the value of the EtcdKeySequence is less than the parameter maxSeq,
+return the value of the parameter maxSeq
+*/
+func setMaxSequenceToEtcd(kvApi client.KeysAPI, maxSeq uint64) (uint64, error) {
+ maxSeqStr := strconv.FormatUint(maxSeq, 10)
+ ctx, cancel := context.WithTimeout(context.Background(), EtcdContextTimeoutSecond)
+ defer cancel()
+
+ for {
+ getResp, err := kvApi.Get(ctx, EtcdKeySequence, &client.GetOptions{Recursive: false, Quorum: true})
+ if err != nil {
+ if ce, ok := err.(client.Error); ok && (ce.Code == client.ErrorCodeKeyNotFound) {
+ _, err := kvApi.Create(ctx, EtcdKeySequence, maxSeqStr)
+ if err == nil {
+ continue
+ }
+ if ce, ok = err.(client.Error); ok && (ce.Code == client.ErrorCodeNodeExist) {
+ continue
+ }
+ return 0, err
+ } else {
+ return 0, err
+ }
+ }
+
+ if getResp.Node == nil {
+ continue
+ }
+ prevSeqStr := getResp.Node.Value
+ prevSeq, err := strconv.ParseUint(prevSeqStr, 10, 64)
+ if err != nil {
+ return 0, err
+ }
+ if prevSeq >= maxSeq {
+ return prevSeq, nil
+ }
+
+ _, err = kvApi.Set(ctx, EtcdKeySequence, maxSeqStr, &client.SetOptions{PrevValue: prevSeqStr})
+ if err != nil {
+ return 0, err
+ }
+ }
+}
+
+func openSequenceFile(file string) (*os.File, error) {
+ _, err := os.Stat(file)
+ if os.IsNotExist(err) {
+ fid, err := os.OpenFile(file, os.O_RDWR|os.O_CREATE, 0644)
+ if err != nil {
+ return nil, err
+ }
+ if err := writeSequenceFile(fid, 1, 0); err != nil {
+ return nil, err
+ }
+ return fid, nil
+ } else {
+ return os.OpenFile(file, os.O_RDWR|os.O_CREATE, 0644)
+ }
+}
+
+/*
+read sequence and step from sequence file
+*/
+func readSequenceFile(file *os.File) (uint64, uint64, error) {
+ sequence := make([]byte, FileMaxSequenceLength)
+ size, err := file.ReadAt(sequence, 0)
+ if (err != nil) && (err != io.EOF) {
+ err := fmt.Errorf("cannot read file %s, %v", file.Name(), err)
+ return 0, 0, err
+ }
+ sequence = sequence[0:size]
+ seqs := strings.Split(string(sequence), ":")
+ maxId, err := strconv.ParseUint(seqs[0], 10, 64)
+ if err != nil {
+ return 0, 0, fmt.Errorf("parse sequence from file failed, %v", err)
+ }
+
+ if len(seqs) > 1 {
+ step, err := strconv.ParseUint(seqs[1], 10, 64)
+ if err != nil {
+ return 0, 0, fmt.Errorf("parse sequence from file failed, %v", err)
+ }
+ return maxId, step, nil
+ }
+
+ return maxId, 0, nil
+}
+
+/**
+write the sequence and step to sequence file
+*/
+func writeSequenceFile(file *os.File, sequence, step uint64) error {
+ _ = step
+ seqStr := fmt.Sprintf("%d:%d", sequence, sequence)
+ if _, err := file.Seek(0, 0); err != nil {
+ err = fmt.Errorf("cannot seek to the beginning of %s: %v", file.Name(), err)
+ return err
+ }
+ if err := file.Truncate(0); err != nil {
+ return fmt.Errorf("truncate sequence file faield : %v", err)
+ }
+ if _, err := file.WriteString(seqStr); err != nil {
+ return fmt.Errorf("write file %s failed, %v", file.Name(), err)
+ }
+ if err := file.Sync(); err != nil {
+ return fmt.Errorf("flush file %s failed, %v", file.Name(), err)
+ }
+ return nil
+}
+
+// the UT helper method
+// func deleteEtcdKey(kvApi client.KeysAPI, key string) error {
+// ctx, cancel := context.WithTimeout(context.Background(), EtcdContextTimeoutSecond)
+// defer cancel()
+// _, err := kvApi.Delete(ctx, key, &client.DeleteOptions{Dir: false})
+// if err != nil {
+// return err
+// }
+// return nil
+// }
diff --git a/weed/sequence/memory_sequencer.go b/weed/sequence/memory_sequencer.go
index d727dc723..e20c29cc7 100644
--- a/weed/sequence/memory_sequencer.go
+++ b/weed/sequence/memory_sequencer.go
@@ -15,12 +15,12 @@ func NewMemorySequencer() (m *MemorySequencer) {
return
}
-func (m *MemorySequencer) NextFileId(count uint64) (uint64, uint64) {
+func (m *MemorySequencer) NextFileId(count uint64) uint64 {
m.sequenceLock.Lock()
defer m.sequenceLock.Unlock()
ret := m.counter
- m.counter += uint64(count)
- return ret, count
+ m.counter += count
+ return ret
}
func (m *MemorySequencer) SetMax(seenValue uint64) {
diff --git a/weed/sequence/sequence.go b/weed/sequence/sequence.go
index fbdc3b8ef..2258d001b 100644
--- a/weed/sequence/sequence.go
+++ b/weed/sequence/sequence.go
@@ -1,7 +1,7 @@
package sequence
type Sequencer interface {
- NextFileId(count uint64) (uint64, uint64)
+ NextFileId(count uint64) uint64
SetMax(uint64)
Peek() uint64
}
diff --git a/weed/server/common.go b/weed/server/common.go
index d88abfdc8..bc6008864 100644
--- a/weed/server/common.go
+++ b/weed/server/common.go
@@ -1,26 +1,29 @@
package weed_server
import (
- "bytes"
"encoding/json"
"errors"
"fmt"
+ "io"
+ "mime/multipart"
"net/http"
"path/filepath"
"strconv"
"strings"
"time"
+ "google.golang.org/grpc"
+
"github.com/chrislusf/seaweedfs/weed/glog"
"github.com/chrislusf/seaweedfs/weed/operation"
- "github.com/chrislusf/seaweedfs/weed/security"
"github.com/chrislusf/seaweedfs/weed/stats"
- "github.com/chrislusf/seaweedfs/weed/storage"
+ "github.com/chrislusf/seaweedfs/weed/storage/needle"
"github.com/chrislusf/seaweedfs/weed/util"
- _ "github.com/chrislusf/seaweedfs/weed/statik"
"github.com/gorilla/mux"
statik "github.com/rakyll/statik/fs"
+
+ _ "github.com/chrislusf/seaweedfs/weed/statik"
)
var serverStats *stats.ServerStats
@@ -43,14 +46,26 @@ func writeJson(w http.ResponseWriter, r *http.Request, httpStatus int, obj inter
if err != nil {
return
}
+
+ if httpStatus >= 400 {
+ glog.V(0).Infof("response method:%s URL:%s with httpStatus:%d and JSON:%s",
+ r.Method, r.URL.String(), httpStatus, string(bytes))
+ }
+
callback := r.FormValue("callback")
if callback == "" {
w.Header().Set("Content-Type", "application/json")
w.WriteHeader(httpStatus)
+ if httpStatus == http.StatusNotModified {
+ return
+ }
_, err = w.Write(bytes)
} else {
w.Header().Set("Content-Type", "application/javascript")
w.WriteHeader(httpStatus)
+ if httpStatus == http.StatusNotModified {
+ return
+ }
if _, err = w.Write([]uint8(callback)); err != nil {
return
}
@@ -69,7 +84,8 @@ func writeJson(w http.ResponseWriter, r *http.Request, httpStatus int, obj inter
// wrapper for writeJson - just logs errors
func writeJsonQuiet(w http.ResponseWriter, r *http.Request, httpStatus int, obj interface{}) {
if err := writeJson(w, r, httpStatus, obj); err != nil {
- glog.V(0).Infof("error writing JSON %+v status %d: %v", obj, httpStatus, err)
+ glog.V(0).Infof("error writing JSON status %d: %v", httpStatus, err)
+ glog.V(1).Infof("JSON content: %+v", obj)
}
}
func writeJsonError(w http.ResponseWriter, r *http.Request, httpStatus int, err error) {
@@ -82,8 +98,7 @@ func debug(params ...interface{}) {
glog.V(4).Infoln(params...)
}
-func submitForClientHandler(w http.ResponseWriter, r *http.Request, masterUrl string) {
- jwt := security.GetJwt(r)
+func submitForClientHandler(w http.ResponseWriter, r *http.Request, masterUrl string, grpcDialOption grpc.DialOption) {
m := make(map[string]interface{})
if r.Method != "POST" {
writeJsonError(w, r, http.StatusMethodNotAllowed, errors.New("Only submit via POST!"))
@@ -91,13 +106,13 @@ func submitForClientHandler(w http.ResponseWriter, r *http.Request, masterUrl st
}
debug("parsing upload file...")
- fname, data, mimeType, pairMap, isGzipped, originalDataSize, lastModified, _, _, pe := storage.ParseUpload(r)
+ pu, pe := needle.ParseUpload(r, 256*1024*1024)
if pe != nil {
writeJsonError(w, r, http.StatusBadRequest, pe)
return
}
- debug("assigning file id for", fname)
+ debug("assigning file id for", pu.FileName)
r.ParseForm()
count := uint64(1)
if r.FormValue("count") != "" {
@@ -109,32 +124,33 @@ func submitForClientHandler(w http.ResponseWriter, r *http.Request, masterUrl st
}
ar := &operation.VolumeAssignRequest{
Count: count,
+ DataCenter: r.FormValue("dataCenter"),
Replication: r.FormValue("replication"),
Collection: r.FormValue("collection"),
Ttl: r.FormValue("ttl"),
}
- assignResult, ae := operation.Assign(masterUrl, ar)
+ assignResult, ae := operation.Assign(masterUrl, grpcDialOption, ar)
if ae != nil {
writeJsonError(w, r, http.StatusInternalServerError, ae)
return
}
url := "http://" + assignResult.Url + "/" + assignResult.Fid
- if lastModified != 0 {
- url = url + "?ts=" + strconv.FormatUint(lastModified, 10)
+ if pu.ModifiedTime != 0 {
+ url = url + "?ts=" + strconv.FormatUint(pu.ModifiedTime, 10)
}
debug("upload file to store", url)
- uploadResult, err := operation.Upload(url, fname, bytes.NewReader(data), isGzipped, mimeType, pairMap, jwt)
+ uploadResult, err := operation.UploadData(url, pu.FileName, false, pu.Data, pu.IsGzipped, pu.MimeType, pu.PairMap, assignResult.Auth)
if err != nil {
writeJsonError(w, r, http.StatusInternalServerError, err)
return
}
- m["fileName"] = fname
+ m["fileName"] = pu.FileName
m["fid"] = assignResult.Fid
m["fileUrl"] = assignResult.PublicUrl + "/" + assignResult.Fid
- m["size"] = originalDataSize
+ m["size"] = pu.OriginalDataSize
m["eTag"] = uploadResult.ETag
writeJsonQuiet(w, r, http.StatusCreated, m)
return
@@ -175,19 +191,19 @@ func parseURLPath(path string) (vid, fid, filename, ext string, isVolumeIdOnly b
func statsHealthHandler(w http.ResponseWriter, r *http.Request) {
m := make(map[string]interface{})
- m["Version"] = util.VERSION
+ m["Version"] = util.Version()
writeJsonQuiet(w, r, http.StatusOK, m)
}
func statsCounterHandler(w http.ResponseWriter, r *http.Request) {
m := make(map[string]interface{})
- m["Version"] = util.VERSION
+ m["Version"] = util.Version()
m["Counters"] = serverStats
writeJsonQuiet(w, r, http.StatusOK, m)
}
func statsMemoryHandler(w http.ResponseWriter, r *http.Request) {
m := make(map[string]interface{})
- m["Version"] = util.VERSION
+ m["Version"] = util.Version()
m["Memory"] = stats.MemStat()
writeJsonQuiet(w, r, http.StatusOK, m)
}
@@ -201,3 +217,107 @@ func handleStaticResources2(r *mux.Router) {
r.Handle("/favicon.ico", http.FileServer(statikFS))
r.PathPrefix("/seaweedfsstatic/").Handler(http.StripPrefix("/seaweedfsstatic", http.FileServer(statikFS)))
}
+
+func adjustHeadersAfterHEAD(w http.ResponseWriter, r *http.Request, filename string) {
+ if filename != "" {
+ contentDisposition := "inline"
+ if r.FormValue("dl") != "" {
+ if dl, _ := strconv.ParseBool(r.FormValue("dl")); dl {
+ contentDisposition = "attachment"
+ }
+ }
+ w.Header().Set("Content-Disposition", contentDisposition+`; filename="`+fileNameEscaper.Replace(filename)+`"`)
+ }
+}
+
+func processRangeRequest(r *http.Request, w http.ResponseWriter, totalSize int64, mimeType string, writeFn func(writer io.Writer, offset int64, size int64) error) {
+ rangeReq := r.Header.Get("Range")
+
+ if rangeReq == "" {
+ w.Header().Set("Content-Length", strconv.FormatInt(totalSize, 10))
+ if err := writeFn(w, 0, totalSize); err != nil {
+ http.Error(w, err.Error(), http.StatusInternalServerError)
+ return
+ }
+ return
+ }
+
+ //the rest is dealing with partial content request
+ //mostly copy from src/pkg/net/http/fs.go
+ ranges, err := parseRange(rangeReq, totalSize)
+ if err != nil {
+ http.Error(w, err.Error(), http.StatusRequestedRangeNotSatisfiable)
+ return
+ }
+ if sumRangesSize(ranges) > totalSize {
+ // The total number of bytes in all the ranges
+ // is larger than the size of the file by
+ // itself, so this is probably an attack, or a
+ // dumb client. Ignore the range request.
+ return
+ }
+ if len(ranges) == 0 {
+ return
+ }
+ if len(ranges) == 1 {
+ // RFC 2616, Section 14.16:
+ // "When an HTTP message includes the content of a single
+ // range (for example, a response to a request for a
+ // single range, or to a request for a set of ranges
+ // that overlap without any holes), this content is
+ // transmitted with a Content-Range header, and a
+ // Content-Length header showing the number of bytes
+ // actually transferred.
+ // ...
+ // A response to a request for a single range MUST NOT
+ // be sent using the multipart/byteranges media type."
+ ra := ranges[0]
+ w.Header().Set("Content-Length", strconv.FormatInt(ra.length, 10))
+ w.Header().Set("Content-Range", ra.contentRange(totalSize))
+ w.WriteHeader(http.StatusPartialContent)
+
+ err = writeFn(w, ra.start, ra.length)
+ if err != nil {
+ http.Error(w, err.Error(), http.StatusInternalServerError)
+ return
+ }
+ return
+ }
+
+ // process multiple ranges
+ for _, ra := range ranges {
+ if ra.start > totalSize {
+ http.Error(w, "Out of Range", http.StatusRequestedRangeNotSatisfiable)
+ return
+ }
+ }
+ sendSize := rangesMIMESize(ranges, mimeType, totalSize)
+ pr, pw := io.Pipe()
+ mw := multipart.NewWriter(pw)
+ w.Header().Set("Content-Type", "multipart/byteranges; boundary="+mw.Boundary())
+ sendContent := pr
+ defer pr.Close() // cause writing goroutine to fail and exit if CopyN doesn't finish.
+ go func() {
+ for _, ra := range ranges {
+ part, e := mw.CreatePart(ra.mimeHeader(mimeType, totalSize))
+ if e != nil {
+ pw.CloseWithError(e)
+ return
+ }
+ if e = writeFn(part, ra.start, ra.length); e != nil {
+ pw.CloseWithError(e)
+ return
+ }
+ }
+ mw.Close()
+ pw.Close()
+ }()
+ if w.Header().Get("Content-Encoding") == "" {
+ w.Header().Set("Content-Length", strconv.FormatInt(sendSize, 10))
+ }
+ w.WriteHeader(http.StatusPartialContent)
+ if _, err := io.CopyN(w, sendContent, sendSize); err != nil {
+ http.Error(w, "Internal Error", http.StatusInternalServerError)
+ return
+ }
+}
diff --git a/weed/server/common_test.go b/weed/server/common_test.go
new file mode 100644
index 000000000..2e6c70bfe
--- /dev/null
+++ b/weed/server/common_test.go
@@ -0,0 +1,31 @@
+package weed_server
+
+import (
+ "strings"
+ "testing"
+)
+
+func TestParseURL(t *testing.T) {
+ if vid, fid, _, _, _ := parseURLPath("/1,06dfa8a684"); true {
+ if vid != "1" {
+ t.Errorf("fail to parse vid: %s", vid)
+ }
+ if fid != "06dfa8a684" {
+ t.Errorf("fail to parse fid: %s", fid)
+ }
+ }
+ if vid, fid, _, _, _ := parseURLPath("/1,06dfa8a684_1"); true {
+ if vid != "1" {
+ t.Errorf("fail to parse vid: %s", vid)
+ }
+ if fid != "06dfa8a684_1" {
+ t.Errorf("fail to parse fid: %s", fid)
+ }
+ if sepIndex := strings.LastIndex(fid, "_"); sepIndex > 0 {
+ fid = fid[:sepIndex]
+ }
+ if fid != "06dfa8a684" {
+ t.Errorf("fail to parse fid: %s", fid)
+ }
+ }
+}
diff --git a/weed/server/filer_grpc_server.go b/weed/server/filer_grpc_server.go
index 06589e3c6..17e32731c 100644
--- a/weed/server/filer_grpc_server.go
+++ b/weed/server/filer_grpc_server.go
@@ -19,9 +19,15 @@ import (
func (fs *FilerServer) LookupDirectoryEntry(ctx context.Context, req *filer_pb.LookupDirectoryEntryRequest) (*filer_pb.LookupDirectoryEntryResponse, error) {
- entry, err := fs.filer.FindEntry(filer2.FullPath(filepath.Join(req.Directory, req.Name)))
+ glog.V(4).Infof("LookupDirectoryEntry %s", filepath.Join(req.Directory, req.Name))
+
+ entry, err := fs.filer.FindEntry(ctx, util.JoinPath(req.Directory, req.Name))
+ if err == filer_pb.ErrNotFound {
+ return &filer_pb.LookupDirectoryEntryResponse{}, err
+ }
if err != nil {
- return nil, fmt.Errorf("%s not found under %s: %v", req.Name, req.Directory, err)
+ glog.V(3).Infof("LookupDirectoryEntry %s: %+v, ", filepath.Join(req.Directory, req.Name), err)
+ return nil, err
}
return &filer_pb.LookupDirectoryEntryResponse{
@@ -30,27 +36,35 @@ func (fs *FilerServer) LookupDirectoryEntry(ctx context.Context, req *filer_pb.L
IsDirectory: entry.IsDirectory(),
Attributes: filer2.EntryAttributeToPb(entry),
Chunks: entry.Chunks,
+ Extended: entry.Extended,
},
}, nil
}
-func (fs *FilerServer) ListEntries(ctx context.Context, req *filer_pb.ListEntriesRequest) (*filer_pb.ListEntriesResponse, error) {
+func (fs *FilerServer) ListEntries(req *filer_pb.ListEntriesRequest, stream filer_pb.SeaweedFiler_ListEntriesServer) error {
+
+ glog.V(4).Infof("ListEntries %v", req)
limit := int(req.Limit)
if limit == 0 {
limit = fs.option.DirListingLimit
}
- resp := &filer_pb.ListEntriesResponse{}
+ paginationLimit := filer2.PaginationSize
+ if limit < paginationLimit {
+ paginationLimit = limit
+ }
+
lastFileName := req.StartFromFileName
includeLastFile := req.InclusiveStartFrom
for limit > 0 {
- entries, err := fs.filer.ListDirectoryEntries(filer2.FullPath(req.Directory), lastFileName, includeLastFile, 1024)
+ entries, err := fs.filer.ListDirectoryEntries(stream.Context(), util.FullPath(req.Directory), lastFileName, includeLastFile, paginationLimit)
+
if err != nil {
- return nil, err
+ return err
}
if len(entries) == 0 {
- return resp, nil
+ return nil
}
includeLastFile = false
@@ -65,22 +79,31 @@ func (fs *FilerServer) ListEntries(ctx context.Context, req *filer_pb.ListEntrie
}
}
- resp.Entries = append(resp.Entries, &filer_pb.Entry{
- Name: entry.Name(),
- IsDirectory: entry.IsDirectory(),
- Chunks: entry.Chunks,
- Attributes: filer2.EntryAttributeToPb(entry),
- })
+ if err := stream.Send(&filer_pb.ListEntriesResponse{
+ Entry: &filer_pb.Entry{
+ Name: entry.Name(),
+ IsDirectory: entry.IsDirectory(),
+ Chunks: entry.Chunks,
+ Attributes: filer2.EntryAttributeToPb(entry),
+ Extended: entry.Extended,
+ },
+ }); err != nil {
+ return err
+ }
+
limit--
+ if limit == 0 {
+ return nil
+ }
}
- if len(resp.Entries) < 1024 {
+ if len(entries) < paginationLimit {
break
}
}
- return resp, nil
+ return nil
}
func (fs *FilerServer) LookupVolume(ctx context.Context, req *filer_pb.LookupVolumeRequest) (*filer_pb.LookupVolumeResponse, error) {
@@ -96,7 +119,11 @@ func (fs *FilerServer) LookupVolume(ctx context.Context, req *filer_pb.LookupVol
return nil, err
}
var locs []*filer_pb.Location
- for _, loc := range fs.filer.MasterClient.GetLocations(uint32(vid)) {
+ locations, found := fs.filer.MasterClient.GetLocations(uint32(vid))
+ if !found {
+ continue
+ }
+ for _, loc := range locations {
locs = append(locs, &filer_pb.Location{
Url: loc.Url,
PublicUrl: loc.PublicUrl,
@@ -112,49 +139,60 @@ func (fs *FilerServer) LookupVolume(ctx context.Context, req *filer_pb.LookupVol
func (fs *FilerServer) CreateEntry(ctx context.Context, req *filer_pb.CreateEntryRequest) (resp *filer_pb.CreateEntryResponse, err error) {
- fullpath := filer2.FullPath(filepath.Join(req.Directory, req.Entry.Name))
- chunks, garbages := filer2.CompactFileChunks(req.Entry.Chunks)
+ glog.V(4).Infof("CreateEntry %v", req)
- fs.filer.DeleteChunks(garbages)
+ resp = &filer_pb.CreateEntryResponse{}
+
+ chunks, garbages := filer2.CompactFileChunks(req.Entry.Chunks)
if req.Entry.Attributes == nil {
- return nil, fmt.Errorf("can not create entry with empty attributes")
+ glog.V(3).Infof("CreateEntry %s: nil attributes", filepath.Join(req.Directory, req.Entry.Name))
+ resp.Error = fmt.Sprintf("can not create entry with empty attributes")
+ return
}
- err = fs.filer.CreateEntry(&filer2.Entry{
- FullPath: fullpath,
+ createErr := fs.filer.CreateEntry(ctx, &filer2.Entry{
+ FullPath: util.JoinPath(req.Directory, req.Entry.Name),
Attr: filer2.PbToEntryAttribute(req.Entry.Attributes),
Chunks: chunks,
- })
+ }, req.OExcl, req.IsFromOtherCluster)
- if err == nil {
+ if createErr == nil {
+ fs.filer.DeleteChunks(garbages)
+ } else {
+ glog.V(3).Infof("CreateEntry %s: %v", filepath.Join(req.Directory, req.Entry.Name), createErr)
+ resp.Error = createErr.Error()
}
- return &filer_pb.CreateEntryResponse{}, err
+ return
}
func (fs *FilerServer) UpdateEntry(ctx context.Context, req *filer_pb.UpdateEntryRequest) (*filer_pb.UpdateEntryResponse, error) {
- fullpath := filepath.Join(req.Directory, req.Entry.Name)
- entry, err := fs.filer.FindEntry(filer2.FullPath(fullpath))
+ glog.V(4).Infof("UpdateEntry %v", req)
+
+ fullpath := util.Join(req.Directory, req.Entry.Name)
+ entry, err := fs.filer.FindEntry(ctx, util.FullPath(fullpath))
if err != nil {
return &filer_pb.UpdateEntryResponse{}, fmt.Errorf("not found %s: %v", fullpath, err)
}
// remove old chunks if not included in the new ones
- unusedChunks := filer2.FindUnusedFileChunks(entry.Chunks, req.Entry.Chunks)
+ unusedChunks := filer2.MinusChunks(entry.Chunks, req.Entry.Chunks)
chunks, garbages := filer2.CompactFileChunks(req.Entry.Chunks)
newEntry := &filer2.Entry{
- FullPath: filer2.FullPath(filepath.Join(req.Directory, req.Entry.Name)),
+ FullPath: util.JoinPath(req.Directory, req.Entry.Name),
Attr: entry.Attr,
+ Extended: req.Entry.Extended,
Chunks: chunks,
}
- glog.V(3).Infof("updating %s: %+v, chunks %d: %v => %+v, chunks %d: %v",
+ glog.V(3).Infof("updating %s: %+v, chunks %d: %v => %+v, chunks %d: %v, extended: %v => %v",
fullpath, entry.Attr, len(entry.Chunks), entry.Chunks,
- req.Entry.Attributes, len(req.Entry.Chunks), req.Entry.Chunks)
+ req.Entry.Attributes, len(req.Entry.Chunks), req.Entry.Chunks,
+ entry.Extended, req.Entry.Extended)
if req.Entry.Attributes != nil {
if req.Entry.Attributes.Mtime != 0 {
@@ -175,19 +213,62 @@ func (fs *FilerServer) UpdateEntry(ctx context.Context, req *filer_pb.UpdateEntr
return &filer_pb.UpdateEntryResponse{}, err
}
- if err = fs.filer.UpdateEntry(entry, newEntry); err == nil {
+ if err = fs.filer.UpdateEntry(ctx, entry, newEntry); err == nil {
fs.filer.DeleteChunks(unusedChunks)
fs.filer.DeleteChunks(garbages)
+ } else {
+ glog.V(3).Infof("UpdateEntry %s: %v", filepath.Join(req.Directory, req.Entry.Name), err)
}
- fs.filer.NotifyUpdateEvent(entry, newEntry, true)
+ fs.filer.NotifyUpdateEvent(ctx, entry, newEntry, true, req.IsFromOtherCluster)
return &filer_pb.UpdateEntryResponse{}, err
}
+func (fs *FilerServer) AppendToEntry(ctx context.Context, req *filer_pb.AppendToEntryRequest) (*filer_pb.AppendToEntryResponse, error) {
+
+ glog.V(4).Infof("AppendToEntry %v", req)
+
+ fullpath := util.NewFullPath(req.Directory, req.EntryName)
+ var offset int64 = 0
+ entry, err := fs.filer.FindEntry(ctx, util.FullPath(fullpath))
+ if err == filer_pb.ErrNotFound {
+ entry = &filer2.Entry{
+ FullPath: fullpath,
+ Attr: filer2.Attr{
+ Crtime: time.Now(),
+ Mtime: time.Now(),
+ Mode: os.FileMode(0644),
+ Uid: OS_UID,
+ Gid: OS_GID,
+ },
+ }
+ } else {
+ offset = int64(filer2.TotalSize(entry.Chunks))
+ }
+
+ for _, chunk := range req.Chunks {
+ chunk.Offset = offset
+ offset += int64(chunk.Size)
+ }
+
+ entry.Chunks = append(entry.Chunks, req.Chunks...)
+
+ err = fs.filer.CreateEntry(context.Background(), entry, false, false)
+
+ return &filer_pb.AppendToEntryResponse{}, err
+}
+
func (fs *FilerServer) DeleteEntry(ctx context.Context, req *filer_pb.DeleteEntryRequest) (resp *filer_pb.DeleteEntryResponse, err error) {
- err = fs.filer.DeleteEntryMetaAndData(filer2.FullPath(filepath.Join(req.Directory, req.Name)), req.IsRecursive, req.IsDeleteData)
- return &filer_pb.DeleteEntryResponse{}, err
+
+ glog.V(4).Infof("DeleteEntry %v", req)
+
+ err = fs.filer.DeleteEntryMetaAndData(ctx, util.JoinPath(req.Directory, req.Name), req.IsRecursive, req.IgnoreRecursiveError, req.IsDeleteData, req.IsFromOtherCluster)
+ resp = &filer_pb.DeleteEntryResponse{}
+ if err != nil {
+ resp.Error = err.Error()
+ }
+ return resp, nil
}
func (fs *FilerServer) AssignVolume(ctx context.Context, req *filer_pb.AssignVolumeRequest) (resp *filer_pb.AssignVolumeResponse, err error) {
@@ -196,6 +277,7 @@ func (fs *FilerServer) AssignVolume(ctx context.Context, req *filer_pb.AssignVol
if req.TtlSec > 0 {
ttlStr = strconv.Itoa(int(req.TtlSec))
}
+ collection, replication, _ := fs.detectCollection(req.ParentPath, req.Collection, req.Replication)
var altRequest *operation.VolumeAssignRequest
@@ -206,54 +288,73 @@ func (fs *FilerServer) AssignVolume(ctx context.Context, req *filer_pb.AssignVol
assignRequest := &operation.VolumeAssignRequest{
Count: uint64(req.Count),
- Replication: req.Replication,
- Collection: req.Collection,
+ Replication: replication,
+ Collection: collection,
Ttl: ttlStr,
DataCenter: dataCenter,
}
if dataCenter != "" {
altRequest = &operation.VolumeAssignRequest{
Count: uint64(req.Count),
- Replication: req.Replication,
- Collection: req.Collection,
+ Replication: replication,
+ Collection: collection,
Ttl: ttlStr,
DataCenter: "",
}
}
- assignResult, err := operation.Assign(fs.filer.GetMaster(), assignRequest, altRequest)
+ assignResult, err := operation.Assign(fs.filer.GetMaster(), fs.grpcDialOption, assignRequest, altRequest)
if err != nil {
- return nil, fmt.Errorf("assign volume: %v", err)
+ glog.V(3).Infof("AssignVolume: %v", err)
+ return &filer_pb.AssignVolumeResponse{Error: fmt.Sprintf("assign volume: %v", err)}, nil
}
if assignResult.Error != "" {
- return nil, fmt.Errorf("assign volume result: %v", assignResult.Error)
+ glog.V(3).Infof("AssignVolume error: %v", assignResult.Error)
+ return &filer_pb.AssignVolumeResponse{Error: fmt.Sprintf("assign volume result: %v", assignResult.Error)}, nil
}
return &filer_pb.AssignVolumeResponse{
- FileId: assignResult.Fid,
- Count: int32(assignResult.Count),
- Url: assignResult.Url,
- PublicUrl: assignResult.PublicUrl,
- }, err
+ FileId: assignResult.Fid,
+ Count: int32(assignResult.Count),
+ Url: assignResult.Url,
+ PublicUrl: assignResult.PublicUrl,
+ Auth: string(assignResult.Auth),
+ Collection: collection,
+ Replication: replication,
+ }, nil
}
func (fs *FilerServer) DeleteCollection(ctx context.Context, req *filer_pb.DeleteCollectionRequest) (resp *filer_pb.DeleteCollectionResponse, err error) {
- for _, master := range fs.option.Masters {
- _, err = util.Get(fmt.Sprintf("http://%s/col/delete?collection=%s", master, req.Collection))
- }
+ glog.V(4).Infof("DeleteCollection %v", req)
+
+ err = fs.filer.MasterClient.WithClient(func(client master_pb.SeaweedClient) error {
+ _, err := client.CollectionDelete(context.Background(), &master_pb.CollectionDeleteRequest{
+ Name: req.GetCollection(),
+ })
+ return err
+ })
return &filer_pb.DeleteCollectionResponse{}, err
}
func (fs *FilerServer) Statistics(ctx context.Context, req *filer_pb.StatisticsRequest) (resp *filer_pb.StatisticsResponse, err error) {
- input := &master_pb.StatisticsRequest{
- Replication: req.Replication,
- Collection: req.Collection,
- Ttl: req.Ttl,
- }
+ var output *master_pb.StatisticsResponse
+
+ err = fs.filer.MasterClient.WithClient(func(masterClient master_pb.SeaweedClient) error {
+ grpcResponse, grpcErr := masterClient.Statistics(context.Background(), &master_pb.StatisticsRequest{
+ Replication: req.Replication,
+ Collection: req.Collection,
+ Ttl: req.Ttl,
+ })
+ if grpcErr != nil {
+ return grpcErr
+ }
+
+ output = grpcResponse
+ return nil
+ })
- output, err := operation.Statistics(fs.filer.GetMaster(), input)
if err != nil {
return nil, err
}
@@ -264,3 +365,91 @@ func (fs *FilerServer) Statistics(ctx context.Context, req *filer_pb.StatisticsR
FileCount: output.FileCount,
}, nil
}
+
+func (fs *FilerServer) GetFilerConfiguration(ctx context.Context, req *filer_pb.GetFilerConfigurationRequest) (resp *filer_pb.GetFilerConfigurationResponse, err error) {
+
+ t := &filer_pb.GetFilerConfigurationResponse{
+ Masters: fs.option.Masters,
+ Collection: fs.option.Collection,
+ Replication: fs.option.DefaultReplication,
+ MaxMb: uint32(fs.option.MaxMB),
+ DirBuckets: fs.filer.DirBucketsPath,
+ Cipher: fs.filer.Cipher,
+ }
+
+ glog.V(4).Infof("GetFilerConfiguration: %v", t)
+
+ return t, nil
+}
+
+func (fs *FilerServer) KeepConnected(stream filer_pb.SeaweedFiler_KeepConnectedServer) error {
+
+ req, err := stream.Recv()
+ if err != nil {
+ return err
+ }
+
+ clientName := fmt.Sprintf("%s:%d", req.Name, req.GrpcPort)
+ m := make(map[string]bool)
+ for _, tp := range req.Resources {
+ m[tp] = true
+ }
+ fs.brokersLock.Lock()
+ fs.brokers[clientName] = m
+ glog.V(0).Infof("+ broker %v", clientName)
+ fs.brokersLock.Unlock()
+
+ defer func() {
+ fs.brokersLock.Lock()
+ delete(fs.brokers, clientName)
+ glog.V(0).Infof("- broker %v: %v", clientName, err)
+ fs.brokersLock.Unlock()
+ }()
+
+ for {
+ if err := stream.Send(&filer_pb.KeepConnectedResponse{}); err != nil {
+ glog.V(0).Infof("send broker %v: %+v", clientName, err)
+ return err
+ }
+ // println("replied")
+
+ if _, err := stream.Recv(); err != nil {
+ glog.V(0).Infof("recv broker %v: %v", clientName, err)
+ return err
+ }
+ // println("received")
+ }
+
+}
+
+func (fs *FilerServer) LocateBroker(ctx context.Context, req *filer_pb.LocateBrokerRequest) (resp *filer_pb.LocateBrokerResponse, err error) {
+
+ resp = &filer_pb.LocateBrokerResponse{}
+
+ fs.brokersLock.Lock()
+ defer fs.brokersLock.Unlock()
+
+ var localBrokers []*filer_pb.LocateBrokerResponse_Resource
+
+ for b, m := range fs.brokers {
+ if _, found := m[req.Resource]; found {
+ resp.Found = true
+ resp.Resources = []*filer_pb.LocateBrokerResponse_Resource{
+ {
+ GrpcAddresses: b,
+ ResourceCount: int32(len(m)),
+ },
+ }
+ return
+ }
+ localBrokers = append(localBrokers, &filer_pb.LocateBrokerResponse_Resource{
+ GrpcAddresses: b,
+ ResourceCount: int32(len(m)),
+ })
+ }
+
+ resp.Resources = localBrokers
+
+ return resp, nil
+
+}
diff --git a/weed/server/filer_grpc_server_rename.go b/weed/server/filer_grpc_server_rename.go
new file mode 100644
index 000000000..9642fec24
--- /dev/null
+++ b/weed/server/filer_grpc_server_rename.go
@@ -0,0 +1,141 @@
+package weed_server
+
+import (
+ "context"
+ "fmt"
+ "path/filepath"
+
+ "github.com/chrislusf/seaweedfs/weed/filer2"
+ "github.com/chrislusf/seaweedfs/weed/glog"
+ "github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
+ "github.com/chrislusf/seaweedfs/weed/util"
+)
+
+func (fs *FilerServer) AtomicRenameEntry(ctx context.Context, req *filer_pb.AtomicRenameEntryRequest) (*filer_pb.AtomicRenameEntryResponse, error) {
+
+ glog.V(1).Infof("AtomicRenameEntry %v", req)
+
+ ctx, err := fs.filer.BeginTransaction(ctx)
+ if err != nil {
+ return nil, err
+ }
+
+ oldParent := util.FullPath(filepath.ToSlash(req.OldDirectory))
+
+ oldEntry, err := fs.filer.FindEntry(ctx, oldParent.Child(req.OldName))
+ if err != nil {
+ fs.filer.RollbackTransaction(ctx)
+ return nil, fmt.Errorf("%s/%s not found: %v", req.OldDirectory, req.OldName, err)
+ }
+
+ var events MoveEvents
+ moveErr := fs.moveEntry(ctx, oldParent, oldEntry, util.FullPath(filepath.ToSlash(req.NewDirectory)), req.NewName, &events)
+ if moveErr != nil {
+ fs.filer.RollbackTransaction(ctx)
+ return nil, fmt.Errorf("%s/%s move error: %v", req.OldDirectory, req.OldName, moveErr)
+ } else {
+ if commitError := fs.filer.CommitTransaction(ctx); commitError != nil {
+ fs.filer.RollbackTransaction(ctx)
+ return nil, fmt.Errorf("%s/%s move commit error: %v", req.OldDirectory, req.OldName, commitError)
+ }
+ }
+
+ return &filer_pb.AtomicRenameEntryResponse{}, nil
+}
+
+func (fs *FilerServer) moveEntry(ctx context.Context, oldParent util.FullPath, entry *filer2.Entry, newParent util.FullPath, newName string, events *MoveEvents) error {
+
+ if err := fs.moveSelfEntry(ctx, oldParent, entry, newParent, newName, events, func() error {
+ if entry.IsDirectory() {
+ if err := fs.moveFolderSubEntries(ctx, oldParent, entry, newParent, newName, events); err != nil {
+ return err
+ }
+ }
+ return nil
+ }); err != nil {
+ return fmt.Errorf("fail to move %s => %s: %v", oldParent.Child(entry.Name()), newParent.Child(newName), err)
+ }
+
+ return nil
+}
+
+func (fs *FilerServer) moveFolderSubEntries(ctx context.Context, oldParent util.FullPath, entry *filer2.Entry, newParent util.FullPath, newName string, events *MoveEvents) error {
+
+ currentDirPath := oldParent.Child(entry.Name())
+ newDirPath := newParent.Child(newName)
+
+ glog.V(1).Infof("moving folder %s => %s", currentDirPath, newDirPath)
+
+ lastFileName := ""
+ includeLastFile := false
+ for {
+
+ entries, err := fs.filer.ListDirectoryEntries(ctx, currentDirPath, lastFileName, includeLastFile, 1024)
+ if err != nil {
+ return err
+ }
+
+ // println("found", len(entries), "entries under", currentDirPath)
+
+ for _, item := range entries {
+ lastFileName = item.Name()
+ // println("processing", lastFileName)
+ err := fs.moveEntry(ctx, currentDirPath, item, newDirPath, item.Name(), events)
+ if err != nil {
+ return err
+ }
+ }
+ if len(entries) < 1024 {
+ break
+ }
+ }
+ return nil
+}
+
+func (fs *FilerServer) moveSelfEntry(ctx context.Context, oldParent util.FullPath, entry *filer2.Entry, newParent util.FullPath, newName string, events *MoveEvents,
+ moveFolderSubEntries func() error) error {
+
+ oldPath, newPath := oldParent.Child(entry.Name()), newParent.Child(newName)
+
+ glog.V(1).Infof("moving entry %s => %s", oldPath, newPath)
+
+ if oldPath == newPath {
+ glog.V(1).Infof("skip moving entry %s => %s", oldPath, newPath)
+ return nil
+ }
+
+ // add to new directory
+ newEntry := &filer2.Entry{
+ FullPath: newPath,
+ Attr: entry.Attr,
+ Chunks: entry.Chunks,
+ }
+ createErr := fs.filer.CreateEntry(ctx, newEntry, false, false)
+ if createErr != nil {
+ return createErr
+ }
+
+ events.newEntries = append(events.newEntries, newEntry)
+
+ if moveFolderSubEntries != nil {
+ if moveChildrenErr := moveFolderSubEntries(); moveChildrenErr != nil {
+ return moveChildrenErr
+ }
+ }
+
+ // delete old entry
+ deleteErr := fs.filer.DeleteEntryMetaAndData(ctx, oldPath, false, false, false, false)
+ if deleteErr != nil {
+ return deleteErr
+ }
+
+ events.oldEntries = append(events.oldEntries, entry)
+
+ return nil
+
+}
+
+type MoveEvents struct {
+ oldEntries []*filer2.Entry
+ newEntries []*filer2.Entry
+}
diff --git a/weed/server/filer_grpc_server_sub_meta.go b/weed/server/filer_grpc_server_sub_meta.go
new file mode 100644
index 000000000..8ef75cf02
--- /dev/null
+++ b/weed/server/filer_grpc_server_sub_meta.go
@@ -0,0 +1,136 @@
+package weed_server
+
+import (
+ "fmt"
+ "strings"
+ "time"
+
+ "github.com/golang/protobuf/proto"
+
+ "github.com/chrislusf/seaweedfs/weed/filer2"
+ "github.com/chrislusf/seaweedfs/weed/glog"
+ "github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
+ "github.com/chrislusf/seaweedfs/weed/util"
+)
+
+func (fs *FilerServer) SubscribeMetadata(req *filer_pb.SubscribeMetadataRequest, stream filer_pb.SeaweedFiler_SubscribeMetadataServer) error {
+
+ peerAddress := findClientAddress(stream.Context(), 0)
+
+ clientName := fs.addClient(req.ClientName, peerAddress)
+
+ defer fs.deleteClient(clientName)
+
+ lastReadTime := time.Unix(0, req.SinceNs)
+ glog.V(0).Infof(" %v starts to subscribe %s from %+v", clientName, req.PathPrefix, lastReadTime)
+
+ eachEventNotificationFn := eachEventNotificationFn(req, stream, clientName)
+
+ eachLogEntryFn := eachLogEntryFn(eachEventNotificationFn)
+
+ processedTsNs, err := fs.filer.ReadPersistedLogBuffer(lastReadTime, eachLogEntryFn)
+ if err != nil {
+ return fmt.Errorf("reading from persisted logs: %v", err)
+ }
+
+ if processedTsNs != 0 {
+ lastReadTime = time.Unix(0, processedTsNs)
+ }
+
+ err = fs.metaAggregator.MetaLogBuffer.LoopProcessLogData(lastReadTime, func() bool {
+ fs.metaAggregator.ListenersLock.Lock()
+ fs.metaAggregator.ListenersCond.Wait()
+ fs.metaAggregator.ListenersLock.Unlock()
+ return true
+ }, eachLogEntryFn)
+
+ return err
+
+}
+
+func (fs *FilerServer) SubscribeLocalMetadata(req *filer_pb.SubscribeMetadataRequest, stream filer_pb.SeaweedFiler_SubscribeLocalMetadataServer) error {
+
+ peerAddress := findClientAddress(stream.Context(), 0)
+
+ clientName := fs.addClient(req.ClientName, peerAddress)
+
+ defer fs.deleteClient(clientName)
+
+ lastReadTime := time.Unix(0, req.SinceNs)
+ glog.V(0).Infof(" %v local subscribe %s from %+v", clientName, req.PathPrefix, lastReadTime)
+
+ eachEventNotificationFn := eachEventNotificationFn(req, stream, clientName)
+
+ eachLogEntryFn := eachLogEntryFn(eachEventNotificationFn)
+
+ err := fs.filer.LocalMetaLogBuffer.LoopProcessLogData(lastReadTime, func() bool {
+ fs.listenersLock.Lock()
+ fs.listenersCond.Wait()
+ fs.listenersLock.Unlock()
+ return true
+ }, eachLogEntryFn)
+
+ return err
+
+}
+
+func eachLogEntryFn(eachEventNotificationFn func(dirPath string, eventNotification *filer_pb.EventNotification, tsNs int64) error) func(logEntry *filer_pb.LogEntry) error {
+ return func(logEntry *filer_pb.LogEntry) error {
+ event := &filer_pb.SubscribeMetadataResponse{}
+ if err := proto.Unmarshal(logEntry.Data, event); err != nil {
+ glog.Errorf("unexpected unmarshal filer_pb.SubscribeMetadataResponse: %v", err)
+ return fmt.Errorf("unexpected unmarshal filer_pb.SubscribeMetadataResponse: %v", err)
+ }
+
+ if err := eachEventNotificationFn(event.Directory, event.EventNotification, event.TsNs); err != nil {
+ return err
+ }
+
+ return nil
+ }
+}
+
+func eachEventNotificationFn(req *filer_pb.SubscribeMetadataRequest, stream filer_pb.SeaweedFiler_SubscribeMetadataServer, clientName string) func(dirPath string, eventNotification *filer_pb.EventNotification, tsNs int64) error {
+ return func(dirPath string, eventNotification *filer_pb.EventNotification, tsNs int64) error {
+
+ // get complete path to the file or directory
+ var entryName string
+ if eventNotification.OldEntry != nil {
+ entryName = eventNotification.OldEntry.Name
+ } else if eventNotification.NewEntry != nil {
+ entryName = eventNotification.NewEntry.Name
+ }
+
+ fullpath := util.Join(dirPath, entryName)
+
+ // skip on filer internal meta logs
+ if strings.HasPrefix(fullpath, filer2.SystemLogDir) {
+ return nil
+ }
+
+ if !strings.HasPrefix(fullpath, req.PathPrefix) {
+ return nil
+ }
+
+ message := &filer_pb.SubscribeMetadataResponse{
+ Directory: dirPath,
+ EventNotification: eventNotification,
+ TsNs: tsNs,
+ }
+ if err := stream.Send(message); err != nil {
+ glog.V(0).Infof("=> client %v: %+v", clientName, err)
+ return err
+ }
+ return nil
+ }
+}
+
+func (fs *FilerServer) addClient(clientType string, clientAddress string) (clientName string) {
+ clientName = clientType + "@" + clientAddress
+ glog.V(0).Infof("+ listener %v", clientName)
+ return
+}
+
+func (fs *FilerServer) deleteClient(clientName string) {
+ glog.V(0).Infof("- listener %v", clientName)
+}
diff --git a/weed/server/filer_server.go b/weed/server/filer_server.go
index 9d70e4dac..c6ab6ef0f 100644
--- a/weed/server/filer_server.go
+++ b/weed/server/filer_server.go
@@ -1,112 +1,185 @@
package weed_server
import (
+ "context"
+ "fmt"
"net/http"
"os"
+ "strings"
+ "sync"
+ "time"
+
+ "google.golang.org/grpc"
+
+ "github.com/chrislusf/seaweedfs/weed/util/grace"
+
+ "github.com/chrislusf/seaweedfs/weed/operation"
+ "github.com/chrislusf/seaweedfs/weed/pb"
+ "github.com/chrislusf/seaweedfs/weed/pb/master_pb"
+ "github.com/chrislusf/seaweedfs/weed/stats"
+ "github.com/chrislusf/seaweedfs/weed/util"
"github.com/chrislusf/seaweedfs/weed/filer2"
_ "github.com/chrislusf/seaweedfs/weed/filer2/cassandra"
+ _ "github.com/chrislusf/seaweedfs/weed/filer2/etcd"
_ "github.com/chrislusf/seaweedfs/weed/filer2/leveldb"
- _ "github.com/chrislusf/seaweedfs/weed/filer2/memdb"
+ _ "github.com/chrislusf/seaweedfs/weed/filer2/leveldb2"
+ _ "github.com/chrislusf/seaweedfs/weed/filer2/mongodb"
_ "github.com/chrislusf/seaweedfs/weed/filer2/mysql"
_ "github.com/chrislusf/seaweedfs/weed/filer2/postgres"
_ "github.com/chrislusf/seaweedfs/weed/filer2/redis"
+ _ "github.com/chrislusf/seaweedfs/weed/filer2/redis2"
"github.com/chrislusf/seaweedfs/weed/glog"
"github.com/chrislusf/seaweedfs/weed/notification"
_ "github.com/chrislusf/seaweedfs/weed/notification/aws_sqs"
+ _ "github.com/chrislusf/seaweedfs/weed/notification/gocdk_pub_sub"
_ "github.com/chrislusf/seaweedfs/weed/notification/google_pub_sub"
_ "github.com/chrislusf/seaweedfs/weed/notification/kafka"
_ "github.com/chrislusf/seaweedfs/weed/notification/log"
"github.com/chrislusf/seaweedfs/weed/security"
- "github.com/spf13/viper"
)
type FilerOption struct {
Masters []string
Collection string
DefaultReplication string
- RedirectOnRead bool
DisableDirListing bool
MaxMB int
- SecretKey string
DirListingLimit int
DataCenter string
DefaultLevelDbDir string
+ DisableHttp bool
+ Host string
+ Port uint32
+ recursiveDelete bool
+ Cipher bool
+ Filers []string
}
type FilerServer struct {
- option *FilerOption
- secret security.Secret
- filer *filer2.Filer
+ option *FilerOption
+ secret security.SigningKey
+ filer *filer2.Filer
+ metaAggregator *filer2.MetaAggregator
+ grpcDialOption grpc.DialOption
+
+ // notifying clients
+ listenersLock sync.Mutex
+ listenersCond *sync.Cond
+
+ brokers map[string]map[string]bool
+ brokersLock sync.Mutex
}
func NewFilerServer(defaultMux, readonlyMux *http.ServeMux, option *FilerOption) (fs *FilerServer, err error) {
fs = &FilerServer{
- option: option,
+ option: option,
+ grpcDialOption: security.LoadClientTLS(util.GetViper(), "grpc.filer"),
+ brokers: make(map[string]map[string]bool),
}
+ fs.listenersCond = sync.NewCond(&fs.listenersLock)
if len(option.Masters) == 0 {
glog.Fatal("master list is required!")
}
- fs.filer = filer2.NewFiler(option.Masters)
+ fs.filer = filer2.NewFiler(option.Masters, fs.grpcDialOption, option.Host, option.Port, option.Collection, option.DefaultReplication, func() {
+ fs.listenersCond.Broadcast()
+ })
+ fs.filer.Cipher = option.Cipher
+
+ maybeStartMetrics(fs, option)
go fs.filer.KeepConnectedToMaster()
- v := viper.GetViper()
- if !LoadConfiguration("filer", false) {
- v.Set("leveldb.enabled", true)
- v.Set("leveldb.dir", option.DefaultLevelDbDir)
+ v := util.GetViper()
+ if !util.LoadConfiguration("filer", false) {
+ v.Set("leveldb2.enabled", true)
+ v.Set("leveldb2.dir", option.DefaultLevelDbDir)
_, err := os.Stat(option.DefaultLevelDbDir)
if os.IsNotExist(err) {
os.MkdirAll(option.DefaultLevelDbDir, 0755)
}
+ glog.V(0).Infof("default to create filer store dir in %s", option.DefaultLevelDbDir)
}
- LoadConfiguration("notification", false)
+ util.LoadConfiguration("notification", false)
+ fs.option.recursiveDelete = v.GetBool("filer.options.recursive_delete")
+ v.SetDefault("filer.options.buckets_folder", "/buckets")
+ fs.filer.DirBucketsPath = v.GetString("filer.options.buckets_folder")
+ fs.filer.FsyncBuckets = v.GetStringSlice("filer.options.buckets_fsync")
fs.filer.LoadConfiguration(v)
- notification.LoadConfiguration(v.Sub("notification"))
+ notification.LoadConfiguration(v, "notification.")
handleStaticResources(defaultMux)
- defaultMux.HandleFunc("/", fs.filerHandler)
+ if !option.DisableHttp {
+ defaultMux.HandleFunc("/", fs.filerHandler)
+ }
if defaultMux != readonlyMux {
readonlyMux.HandleFunc("/", fs.readonlyFilerHandler)
}
+ // set peers
+ if strings.HasPrefix(fs.filer.GetStore().GetName(), "leveldb") && len(option.Filers) > 0 {
+ glog.Fatalf("filers using separate leveldb stores should not configure %d peers %+v", len(option.Filers), option.Filers)
+ }
+ if len(option.Filers) == 0 {
+ option.Filers = append(option.Filers, fmt.Sprintf("%s:%d", option.Host, option.Port))
+ }
+ fs.metaAggregator = filer2.NewMetaAggregator(option.Filers, fs.grpcDialOption)
+ fs.metaAggregator.StartLoopSubscribe(time.Now().UnixNano())
+
+ fs.filer.LoadBuckets()
+
+ grace.OnInterrupt(func() {
+ fs.filer.Shutdown()
+ })
+
return fs, nil
}
-func (fs *FilerServer) jwt(fileId string) security.EncodedJwt {
- return security.GenJwt(fs.secret, fileId)
-}
+func maybeStartMetrics(fs *FilerServer, option *FilerOption) {
-func LoadConfiguration(configFileName string, required bool) (loaded bool) {
-
- // find a filer store
- viper.SetConfigName(configFileName) // name of config file (without extension)
- viper.AddConfigPath(".") // optionally look for config in the working directory
- viper.AddConfigPath("$HOME/.seaweedfs") // call multiple times to add many search paths
- viper.AddConfigPath("/etc/seaweedfs/") // path to look for the config file in
-
- glog.V(0).Infof("Reading %s.toml from %s", configFileName, viper.ConfigFileUsed())
-
- if err := viper.MergeInConfig(); err != nil { // Handle errors reading the config file
- glog.V(0).Infof("Reading %s: %v", viper.ConfigFileUsed(), err)
- if required {
- glog.Fatalf("Failed to load %s.toml file from current directory, or $HOME/.seaweedfs/, or /etc/seaweedfs/"+
- "\n\nPlease follow this example and add a filer.toml file to "+
- "current directory, or $HOME/.seaweedfs/, or /etc/seaweedfs/:\n"+
- " https://github.com/chrislusf/seaweedfs/blob/master/weed/%s.toml\n"+
- "\nOr use this command to generate the default toml file\n"+
- " weed scaffold -config=%s -output=.\n\n\n",
- configFileName, configFileName, configFileName)
- } else {
- return false
+ for _, master := range option.Masters {
+ _, err := pb.ParseFilerGrpcAddress(master)
+ if err != nil {
+ glog.Fatalf("invalid master address %s: %v", master, err)
}
}
- return true
+ isConnected := false
+ var metricsAddress string
+ var metricsIntervalSec int
+ var readErr error
+ for !isConnected {
+ for _, master := range option.Masters {
+ metricsAddress, metricsIntervalSec, readErr = readFilerConfiguration(fs.grpcDialOption, master)
+ if readErr == nil {
+ isConnected = true
+ } else {
+ time.Sleep(7 * time.Second)
+ }
+ }
+ }
+ if metricsAddress == "" && metricsIntervalSec <= 0 {
+ return
+ }
+ go stats.LoopPushingMetric("filer", stats.SourceName(option.Port), stats.FilerGather,
+ func() (addr string, intervalSeconds int) {
+ return metricsAddress, metricsIntervalSec
+ })
+}
+func readFilerConfiguration(grpcDialOption grpc.DialOption, masterAddress string) (metricsAddress string, metricsIntervalSec int, err error) {
+ err = operation.WithMasterServerClient(masterAddress, grpcDialOption, func(masterClient master_pb.SeaweedClient) error {
+ resp, err := masterClient.GetMasterConfiguration(context.Background(), &master_pb.GetMasterConfigurationRequest{})
+ if err != nil {
+ return fmt.Errorf("get master %s configuration: %v", masterAddress, err)
+ }
+ metricsAddress, metricsIntervalSec = resp.MetricsAddress, int(resp.MetricsIntervalSeconds)
+ return nil
+ })
+ return
}
diff --git a/weed/server/filer_server_handlers.go b/weed/server/filer_server_handlers.go
index d76d7df8c..b6bfc3b04 100644
--- a/weed/server/filer_server_handlers.go
+++ b/weed/server/filer_server_handlers.go
@@ -2,28 +2,47 @@ package weed_server
import (
"net/http"
+ "time"
+
+ "github.com/chrislusf/seaweedfs/weed/stats"
)
func (fs *FilerServer) filerHandler(w http.ResponseWriter, r *http.Request) {
+ start := time.Now()
switch r.Method {
case "GET":
+ stats.FilerRequestCounter.WithLabelValues("get").Inc()
fs.GetOrHeadHandler(w, r, true)
+ stats.FilerRequestHistogram.WithLabelValues("get").Observe(time.Since(start).Seconds())
case "HEAD":
+ stats.FilerRequestCounter.WithLabelValues("head").Inc()
fs.GetOrHeadHandler(w, r, false)
+ stats.FilerRequestHistogram.WithLabelValues("head").Observe(time.Since(start).Seconds())
case "DELETE":
+ stats.FilerRequestCounter.WithLabelValues("delete").Inc()
fs.DeleteHandler(w, r)
+ stats.FilerRequestHistogram.WithLabelValues("delete").Observe(time.Since(start).Seconds())
case "PUT":
+ stats.FilerRequestCounter.WithLabelValues("put").Inc()
fs.PostHandler(w, r)
+ stats.FilerRequestHistogram.WithLabelValues("put").Observe(time.Since(start).Seconds())
case "POST":
+ stats.FilerRequestCounter.WithLabelValues("post").Inc()
fs.PostHandler(w, r)
+ stats.FilerRequestHistogram.WithLabelValues("post").Observe(time.Since(start).Seconds())
}
}
func (fs *FilerServer) readonlyFilerHandler(w http.ResponseWriter, r *http.Request) {
+ start := time.Now()
switch r.Method {
case "GET":
+ stats.FilerRequestCounter.WithLabelValues("get").Inc()
fs.GetOrHeadHandler(w, r, true)
+ stats.FilerRequestHistogram.WithLabelValues("get").Observe(time.Since(start).Seconds())
case "HEAD":
+ stats.FilerRequestCounter.WithLabelValues("head").Inc()
fs.GetOrHeadHandler(w, r, false)
+ stats.FilerRequestHistogram.WithLabelValues("head").Observe(time.Since(start).Seconds())
}
}
diff --git a/weed/server/filer_server_handlers_read.go b/weed/server/filer_server_handlers_read.go
index 226de640c..76c924df1 100644
--- a/weed/server/filer_server_handlers_read.go
+++ b/weed/server/filer_server_handlers_read.go
@@ -1,34 +1,47 @@
package weed_server
import (
+ "bytes"
+ "context"
"io"
"mime"
- "mime/multipart"
"net/http"
- "net/url"
- "path"
+ "path/filepath"
"strconv"
"strings"
+ "time"
"github.com/chrislusf/seaweedfs/weed/filer2"
"github.com/chrislusf/seaweedfs/weed/glog"
+ "github.com/chrislusf/seaweedfs/weed/images"
+ "github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
+ "github.com/chrislusf/seaweedfs/weed/stats"
"github.com/chrislusf/seaweedfs/weed/util"
)
func (fs *FilerServer) GetOrHeadHandler(w http.ResponseWriter, r *http.Request, isGetMethod bool) {
+
path := r.URL.Path
- if strings.HasSuffix(path, "/") && len(path) > 1 {
+ isForDirectory := strings.HasSuffix(path, "/")
+ if isForDirectory && len(path) > 1 {
path = path[:len(path)-1]
}
- entry, err := fs.filer.FindEntry(filer2.FullPath(path))
+ entry, err := fs.filer.FindEntry(context.Background(), util.FullPath(path))
if err != nil {
if path == "/" {
fs.listDirectoryHandler(w, r)
return
}
- glog.V(1).Infof("Not found %s: %v", path, err)
- w.WriteHeader(http.StatusNotFound)
+ if err == filer_pb.ErrNotFound {
+ glog.V(1).Infof("Not found %s: %v", path, err)
+ stats.FilerRequestCounter.WithLabelValues("read.notfound").Inc()
+ w.WriteHeader(http.StatusNotFound)
+ } else {
+ glog.V(0).Infof("Internal %s: %v", path, err)
+ stats.FilerRequestCounter.WithLabelValues("read.internalerror").Inc()
+ w.WriteHeader(http.StatusInternalServerError)
+ }
return
}
@@ -41,212 +54,81 @@ func (fs *FilerServer) GetOrHeadHandler(w http.ResponseWriter, r *http.Request,
return
}
- if len(entry.Chunks) == 0 {
- glog.V(1).Infof("no file chunks for %s, attr=%+v", path, entry.Attr)
- w.WriteHeader(http.StatusNoContent)
- return
- }
-
- w.Header().Set("Accept-Ranges", "bytes")
- if r.Method == "HEAD" {
- w.Header().Set("Content-Length", strconv.FormatInt(int64(filer2.TotalSize(entry.Chunks)), 10))
- w.Header().Set("Last-Modified", entry.Attr.Mtime.Format(http.TimeFormat))
- return
- }
-
- if len(entry.Chunks) == 1 {
- fs.handleSingleChunk(w, r, entry)
- return
- }
-
- fs.handleMultipleChunks(w, r, entry)
-
-}
-
-func (fs *FilerServer) handleSingleChunk(w http.ResponseWriter, r *http.Request, entry *filer2.Entry) {
-
- fileId := entry.Chunks[0].FileId
-
- urlString, err := fs.filer.MasterClient.LookupFileId(fileId)
- if err != nil {
- glog.V(1).Infof("operation LookupFileId %s failed, err: %v", fileId, err)
+ if isForDirectory {
w.WriteHeader(http.StatusNotFound)
return
}
- if fs.option.RedirectOnRead {
- http.Redirect(w, r, urlString, http.StatusFound)
- return
- }
-
- u, _ := url.Parse(urlString)
- q := u.Query()
- for key, values := range r.URL.Query() {
- for _, value := range values {
- q.Add(key, value)
- }
- }
- u.RawQuery = q.Encode()
- request := &http.Request{
- Method: r.Method,
- URL: u,
- Proto: r.Proto,
- ProtoMajor: r.ProtoMajor,
- ProtoMinor: r.ProtoMinor,
- Header: r.Header,
- Body: r.Body,
- Host: r.Host,
- ContentLength: r.ContentLength,
- }
- glog.V(3).Infoln("retrieving from", u)
- resp, do_err := util.Do(request)
- if do_err != nil {
- glog.V(0).Infoln("failing to connect to volume server", do_err.Error())
- writeJsonError(w, r, http.StatusInternalServerError, do_err)
+ if len(entry.Chunks) == 0 {
+ glog.V(1).Infof("no file chunks for %s, attr=%+v", path, entry.Attr)
+ stats.FilerRequestCounter.WithLabelValues("read.nocontent").Inc()
+ w.WriteHeader(http.StatusNoContent)
return
}
- defer resp.Body.Close()
- for k, v := range resp.Header {
- w.Header()[k] = v
- }
- w.WriteHeader(resp.StatusCode)
- io.Copy(w, resp.Body)
-}
-func (fs *FilerServer) handleMultipleChunks(w http.ResponseWriter, r *http.Request, entry *filer2.Entry) {
+ w.Header().Set("Accept-Ranges", "bytes")
+ w.Header().Set("Last-Modified", entry.Attr.Mtime.Format(http.TimeFormat))
- mimeType := entry.Mime
+ // mime type
+ mimeType := entry.Attr.Mime
if mimeType == "" {
- if ext := path.Ext(entry.Name()); ext != "" {
+ if ext := filepath.Ext(entry.Name()); ext != "" {
mimeType = mime.TypeByExtension(ext)
}
}
if mimeType != "" {
w.Header().Set("Content-Type", mimeType)
}
- setEtag(w, filer2.ETag(entry.Chunks))
- totalSize := int64(filer2.TotalSize(entry.Chunks))
-
- rangeReq := r.Header.Get("Range")
-
- if rangeReq == "" {
- w.Header().Set("Content-Length", strconv.FormatInt(totalSize, 10))
- if err := fs.writeContent(w, entry, 0, int(totalSize)); err != nil {
- http.Error(w, err.Error(), http.StatusInternalServerError)
- return
+ // if modified since
+ if !entry.Attr.Mtime.IsZero() {
+ w.Header().Set("Last-Modified", entry.Attr.Mtime.UTC().Format(http.TimeFormat))
+ if r.Header.Get("If-Modified-Since") != "" {
+ if t, parseError := time.Parse(http.TimeFormat, r.Header.Get("If-Modified-Since")); parseError == nil {
+ if t.After(entry.Attr.Mtime) {
+ w.WriteHeader(http.StatusNotModified)
+ return
+ }
+ }
}
- return
}
- //the rest is dealing with partial content request
- //mostly copy from src/pkg/net/http/fs.go
- ranges, err := parseRange(rangeReq, totalSize)
- if err != nil {
- http.Error(w, err.Error(), http.StatusRequestedRangeNotSatisfiable)
- return
- }
- if sumRangesSize(ranges) > totalSize {
- // The total number of bytes in all the ranges
- // is larger than the size of the file by
- // itself, so this is probably an attack, or a
- // dumb client. Ignore the range request.
- return
- }
- if len(ranges) == 0 {
- return
- }
- if len(ranges) == 1 {
- // RFC 2616, Section 14.16:
- // "When an HTTP message includes the content of a single
- // range (for example, a response to a request for a
- // single range, or to a request for a set of ranges
- // that overlap without any holes), this content is
- // transmitted with a Content-Range header, and a
- // Content-Length header showing the number of bytes
- // actually transferred.
- // ...
- // A response to a request for a single range MUST NOT
- // be sent using the multipart/byteranges media type."
- ra := ranges[0]
- w.Header().Set("Content-Length", strconv.FormatInt(ra.length, 10))
- w.Header().Set("Content-Range", ra.contentRange(totalSize))
- w.WriteHeader(http.StatusPartialContent)
-
- err = fs.writeContent(w, entry, ra.start, int(ra.length))
- if err != nil {
- http.Error(w, err.Error(), http.StatusInternalServerError)
- return
- }
+ // set etag
+ etag := filer2.ETagEntry(entry)
+ if inm := r.Header.Get("If-None-Match"); inm == "\""+etag+"\"" {
+ w.WriteHeader(http.StatusNotModified)
return
}
+ setEtag(w, etag)
- // process multiple ranges
- for _, ra := range ranges {
- if ra.start > totalSize {
- http.Error(w, "Out of Range", http.StatusRequestedRangeNotSatisfiable)
- return
- }
- }
- sendSize := rangesMIMESize(ranges, mimeType, totalSize)
- pr, pw := io.Pipe()
- mw := multipart.NewWriter(pw)
- w.Header().Set("Content-Type", "multipart/byteranges; boundary="+mw.Boundary())
- sendContent := pr
- defer pr.Close() // cause writing goroutine to fail and exit if CopyN doesn't finish.
- go func() {
- for _, ra := range ranges {
- part, e := mw.CreatePart(ra.mimeHeader(mimeType, totalSize))
- if e != nil {
- pw.CloseWithError(e)
- return
- }
- if e = fs.writeContent(part, entry, ra.start, int(ra.length)); e != nil {
- pw.CloseWithError(e)
- return
- }
- }
- mw.Close()
- pw.Close()
- }()
- if w.Header().Get("Content-Encoding") == "" {
- w.Header().Set("Content-Length", strconv.FormatInt(sendSize, 10))
- }
- w.WriteHeader(http.StatusPartialContent)
- if _, err := io.CopyN(w, sendContent, sendSize); err != nil {
- http.Error(w, "Internal Error", http.StatusInternalServerError)
+ if r.Method == "HEAD" {
+ w.Header().Set("Content-Length", strconv.FormatInt(int64(filer2.TotalSize(entry.Chunks)), 10))
return
}
-}
-
-func (fs *FilerServer) writeContent(w io.Writer, entry *filer2.Entry, offset int64, size int) error {
+ filename := entry.Name()
+ adjustHeadersAfterHEAD(w, r, filename)
- chunkViews := filer2.ViewFromChunks(entry.Chunks, offset, size)
-
- fileId2Url := make(map[string]string)
-
- for _, chunkView := range chunkViews {
-
- urlString, err := fs.filer.MasterClient.LookupFileId(chunkView.FileId)
- if err != nil {
- glog.V(1).Infof("operation LookupFileId %s failed, err: %v", chunkView.FileId, err)
- return err
- }
- fileId2Url[chunkView.FileId] = urlString
- }
+ totalSize := int64(filer2.TotalSize(entry.Chunks))
- for _, chunkView := range chunkViews {
- urlString := fileId2Url[chunkView.FileId]
- _, err := util.ReadUrlAsStream(urlString, chunkView.Offset, int(chunkView.Size), func(data []byte) {
- w.Write(data)
- })
- if err != nil {
- glog.V(1).Infof("read %s failed, err: %v", chunkView.FileId, err)
- return err
+ if rangeReq := r.Header.Get("Range"); rangeReq == "" {
+ ext := filepath.Ext(filename)
+ width, height, mode, shouldResize := shouldResizeImages(ext, r)
+ if shouldResize {
+ data, err := filer2.ReadAll(fs.filer.MasterClient, entry.Chunks)
+ if err != nil {
+ glog.Errorf("failed to read %s: %v", path, err)
+ w.WriteHeader(http.StatusNotModified)
+ return
+ }
+ rs, _, _ := images.Resized(ext, bytes.NewReader(data), width, height, mode)
+ io.Copy(w, rs)
+ return
}
}
- return nil
+ processRangeRequest(r, w, totalSize, mimeType, func(writer io.Writer, offset int64, size int64) error {
+ return filer2.StreamContent(fs.filer.MasterClient, writer, entry.Chunks, offset, size)
+ })
}
diff --git a/weed/server/filer_server_handlers_read_dir.go b/weed/server/filer_server_handlers_read_dir.go
index bcf7f0eb5..ae28fc1db 100644
--- a/weed/server/filer_server_handlers_read_dir.go
+++ b/weed/server/filer_server_handlers_read_dir.go
@@ -1,13 +1,15 @@
package weed_server
import (
+ "context"
"net/http"
"strconv"
"strings"
- "github.com/chrislusf/seaweedfs/weed/filer2"
"github.com/chrislusf/seaweedfs/weed/glog"
ui "github.com/chrislusf/seaweedfs/weed/server/filer_ui"
+ "github.com/chrislusf/seaweedfs/weed/stats"
+ "github.com/chrislusf/seaweedfs/weed/util"
)
// listDirectoryHandler lists directories and folers under a directory
@@ -15,6 +17,9 @@ import (
// sub directories are listed on the first page, when "lastFileName"
// is empty.
func (fs *FilerServer) listDirectoryHandler(w http.ResponseWriter, r *http.Request) {
+
+ stats.FilerRequestCounter.WithLabelValues("list").Inc()
+
path := r.URL.Path
if strings.HasSuffix(path, "/") && len(path) > 1 {
path = path[:len(path)-1]
@@ -27,7 +32,7 @@ func (fs *FilerServer) listDirectoryHandler(w http.ResponseWriter, r *http.Reque
lastFileName := r.FormValue("lastFileName")
- entries, err := fs.filer.ListDirectoryEntries(filer2.FullPath(path), lastFileName, false, limit)
+ entries, err := fs.filer.ListDirectoryEntries(context.Background(), util.FullPath(path), lastFileName, false, limit)
if err != nil {
glog.V(0).Infof("listDirectory %s %s %d: %s", path, lastFileName, limit, err)
diff --git a/weed/server/filer_server_handlers_write.go b/weed/server/filer_server_handlers_write.go
index 32f481e74..a642c502a 100644
--- a/weed/server/filer_server_handlers_write.go
+++ b/weed/server/filer_server_handlers_write.go
@@ -1,11 +1,18 @@
package weed_server
import (
+ "context"
+ "crypto/md5"
"encoding/json"
"errors"
+ "fmt"
+ "io"
"io/ioutil"
+ "mime"
"net/http"
"net/url"
+ "os"
+ filenamePath "path"
"strconv"
"strings"
"time"
@@ -14,8 +21,10 @@ import (
"github.com/chrislusf/seaweedfs/weed/glog"
"github.com/chrislusf/seaweedfs/weed/operation"
"github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
+ "github.com/chrislusf/seaweedfs/weed/security"
+ "github.com/chrislusf/seaweedfs/weed/stats"
+ "github.com/chrislusf/seaweedfs/weed/storage/needle"
"github.com/chrislusf/seaweedfs/weed/util"
- "os"
)
var (
@@ -25,18 +34,23 @@ var (
type FilerPostResult struct {
Name string `json:"name,omitempty"`
- Size uint32 `json:"size,omitempty"`
+ Size int64 `json:"size,omitempty"`
Error string `json:"error,omitempty"`
Fid string `json:"fid,omitempty"`
Url string `json:"url,omitempty"`
}
-func (fs *FilerServer) assignNewFileInfo(w http.ResponseWriter, r *http.Request, replication, collection string, dataCenter string) (fileId, urlLocation string, err error) {
+func (fs *FilerServer) assignNewFileInfo(w http.ResponseWriter, r *http.Request, replication, collection, dataCenter, ttlString string, fsync bool) (fileId, urlLocation string, auth security.EncodedJwt, err error) {
+
+ stats.FilerRequestCounter.WithLabelValues("assign").Inc()
+ start := time.Now()
+ defer func() { stats.FilerRequestHistogram.WithLabelValues("assign").Observe(time.Since(start).Seconds()) }()
+
ar := &operation.VolumeAssignRequest{
Count: 1,
Replication: replication,
Collection: collection,
- Ttl: r.URL.Query().Get("ttl"),
+ Ttl: ttlString,
DataCenter: dataCenter,
}
var altRequest *operation.VolumeAssignRequest
@@ -45,12 +59,12 @@ func (fs *FilerServer) assignNewFileInfo(w http.ResponseWriter, r *http.Request,
Count: 1,
Replication: replication,
Collection: collection,
- Ttl: r.URL.Query().Get("ttl"),
+ Ttl: ttlString,
DataCenter: "",
}
}
- assignResult, ae := operation.Assign(fs.filer.GetMaster(), ar, altRequest)
+ assignResult, ae := operation.Assign(fs.filer.GetMaster(), fs.grpcDialOption, ar, altRequest)
if ae != nil {
glog.Errorf("failing to assign a file id: %v", ae)
writeJsonError(w, r, http.StatusInternalServerError, ae)
@@ -59,166 +73,293 @@ func (fs *FilerServer) assignNewFileInfo(w http.ResponseWriter, r *http.Request,
}
fileId = assignResult.Fid
urlLocation = "http://" + assignResult.Url + "/" + assignResult.Fid
+ if fsync {
+ urlLocation += "?fsync=true"
+ }
+ auth = assignResult.Auth
return
}
func (fs *FilerServer) PostHandler(w http.ResponseWriter, r *http.Request) {
+ ctx := context.Background()
+
query := r.URL.Query()
- replication := query.Get("replication")
- if replication == "" {
- replication = fs.option.DefaultReplication
- }
- collection := query.Get("collection")
- if collection == "" {
- collection = fs.option.Collection
- }
+ collection, replication, fsync := fs.detectCollection(r.RequestURI, query.Get("collection"), query.Get("replication"))
dataCenter := query.Get("dataCenter")
if dataCenter == "" {
dataCenter = fs.option.DataCenter
}
+ ttlString := r.URL.Query().Get("ttl")
+
+ // read ttl in seconds
+ ttl, err := needle.ReadTTL(ttlString)
+ ttlSeconds := int32(0)
+ if err == nil {
+ ttlSeconds = int32(ttl.Minutes()) * 60
+ }
+
+ if autoChunked := fs.autoChunk(ctx, w, r, replication, collection, dataCenter, ttlSeconds, ttlString, fsync); autoChunked {
+ return
+ }
+
+ if fs.option.Cipher {
+ reply, err := fs.encrypt(ctx, w, r, replication, collection, dataCenter, ttlSeconds, ttlString, fsync)
+ if err != nil {
+ writeJsonError(w, r, http.StatusInternalServerError, err)
+ } else if reply != nil {
+ writeJsonQuiet(w, r, http.StatusCreated, reply)
+ }
- if autoChunked := fs.autoChunk(w, r, replication, collection, dataCenter); autoChunked {
return
}
- fileId, urlLocation, err := fs.assignNewFileInfo(w, r, replication, collection, dataCenter)
+ fileId, urlLocation, auth, err := fs.assignNewFileInfo(w, r, replication, collection, dataCenter, ttlString, fsync)
if err != nil || fileId == "" || urlLocation == "" {
glog.V(0).Infof("fail to allocate volume for %s, collection:%s, datacenter:%s", r.URL.Path, collection, dataCenter)
+ writeJsonError(w, r, http.StatusInternalServerError, fmt.Errorf("fail to allocate volume for %s, collection:%s, datacenter:%s", r.URL.Path, collection, dataCenter))
return
}
glog.V(4).Infof("write %s to %v", r.URL.Path, urlLocation)
u, _ := url.Parse(urlLocation)
-
- // This allows a client to generate a chunk manifest and submit it to the filer -- it is a little off
- // because they need to provide FIDs instead of file paths...
- cm, _ := strconv.ParseBool(query.Get("cm"))
- if cm {
- q := u.Query()
- q.Set("cm", "true")
- u.RawQuery = q.Encode()
+ ret, md5value, err := fs.uploadToVolumeServer(r, u, auth, w, fileId)
+ if err != nil {
+ return
}
- glog.V(4).Infoln("post to", u)
- // send request to volume server
- request := &http.Request{
- Method: r.Method,
- URL: u,
- Proto: r.Proto,
- ProtoMajor: r.ProtoMajor,
- ProtoMinor: r.ProtoMinor,
- Header: r.Header,
- Body: r.Body,
- Host: r.Host,
- ContentLength: r.ContentLength,
- }
- resp, do_err := util.Do(request)
- if do_err != nil {
- glog.Errorf("failing to connect to volume server %s: %v, %+v", r.RequestURI, do_err, r.Method)
- writeJsonError(w, r, http.StatusInternalServerError, do_err)
+ if err = fs.updateFilerStore(ctx, r, w, replication, collection, ret, md5value, fileId, ttlSeconds); err != nil {
return
}
- defer resp.Body.Close()
- etag := resp.Header.Get("ETag")
- resp_body, ra_err := ioutil.ReadAll(resp.Body)
- if ra_err != nil {
- glog.V(0).Infoln("failing to upload to volume server", r.RequestURI, ra_err.Error())
- writeJsonError(w, r, http.StatusInternalServerError, ra_err)
- return
+
+ // send back post result
+ reply := FilerPostResult{
+ Name: ret.Name,
+ Size: int64(ret.Size),
+ Error: ret.Error,
+ Fid: fileId,
+ Url: urlLocation,
}
- glog.V(4).Infoln("post result", string(resp_body))
- var ret operation.UploadResult
- unmarshal_err := json.Unmarshal(resp_body, &ret)
- if unmarshal_err != nil {
- glog.V(0).Infoln("failing to read upload resonse", r.RequestURI, string(resp_body))
- writeJsonError(w, r, http.StatusInternalServerError, unmarshal_err)
- return
+ setEtag(w, ret.ETag)
+ writeJsonQuiet(w, r, http.StatusCreated, reply)
+}
+
+// update metadata in filer store
+func (fs *FilerServer) updateFilerStore(ctx context.Context, r *http.Request, w http.ResponseWriter, replication string,
+ collection string, ret *operation.UploadResult, md5value []byte, fileId string, ttlSeconds int32) (err error) {
+
+ stats.FilerRequestCounter.WithLabelValues("postStoreWrite").Inc()
+ start := time.Now()
+ defer func() {
+ stats.FilerRequestHistogram.WithLabelValues("postStoreWrite").Observe(time.Since(start).Seconds())
+ }()
+
+ modeStr := r.URL.Query().Get("mode")
+ if modeStr == "" {
+ modeStr = "0660"
}
- if ret.Error != "" {
- glog.V(0).Infoln("failing to post to volume server", r.RequestURI, ret.Error)
- writeJsonError(w, r, http.StatusInternalServerError, errors.New(ret.Error))
- return
+ mode, err := strconv.ParseUint(modeStr, 8, 32)
+ if err != nil {
+ glog.Errorf("Invalid mode format: %s, use 0660 by default", modeStr)
+ mode = 0660
}
- // find correct final path
path := r.URL.Path
if strings.HasSuffix(path, "/") {
if ret.Name != "" {
path += ret.Name
- } else {
- fs.filer.DeleteFileByFileId(fileId)
- glog.V(0).Infoln("Can not to write to folder", path, "without a file name!")
- writeJsonError(w, r, http.StatusInternalServerError,
- errors.New("Can not to write to folder "+path+" without a file name"))
- return
}
}
-
- // update metadata in filer store
- existingEntry, err := fs.filer.FindEntry(filer2.FullPath(path))
+ existingEntry, err := fs.filer.FindEntry(ctx, util.FullPath(path))
crTime := time.Now()
if err == nil && existingEntry != nil {
- // glog.V(4).Infof("existing %s => %+v", path, existingEntry)
- if existingEntry.IsDirectory() {
- path += "/" + ret.Name
- } else {
- crTime = existingEntry.Crtime
- }
+ crTime = existingEntry.Crtime
}
entry := &filer2.Entry{
- FullPath: filer2.FullPath(path),
+ FullPath: util.FullPath(path),
Attr: filer2.Attr{
Mtime: time.Now(),
Crtime: crTime,
- Mode: 0660,
+ Mode: os.FileMode(mode),
Uid: OS_UID,
Gid: OS_GID,
Replication: replication,
Collection: collection,
- TtlSec: int32(util.ParseInt(r.URL.Query().Get("ttl"), 0)),
+ TtlSec: ttlSeconds,
+ Mime: ret.Mime,
+ Md5: md5value,
},
Chunks: []*filer_pb.FileChunk{{
FileId: fileId,
Size: uint64(ret.Size),
Mtime: time.Now().UnixNano(),
- ETag: etag,
+ ETag: ret.ETag,
}},
}
+ if entry.Attr.Mime == "" {
+ if ext := filenamePath.Ext(path); ext != "" {
+ entry.Attr.Mime = mime.TypeByExtension(ext)
+ }
+ }
// glog.V(4).Infof("saving %s => %+v", path, entry)
- if db_err := fs.filer.CreateEntry(entry); db_err != nil {
- fs.filer.DeleteFileByFileId(fileId)
- glog.V(0).Infof("failing to write %s to filer server : %v", path, db_err)
- writeJsonError(w, r, http.StatusInternalServerError, db_err)
+ if dbErr := fs.filer.CreateEntry(ctx, entry, false, false); dbErr != nil {
+ fs.filer.DeleteChunks(entry.Chunks)
+ glog.V(0).Infof("failing to write %s to filer server : %v", path, dbErr)
+ writeJsonError(w, r, http.StatusInternalServerError, dbErr)
+ err = dbErr
return
}
- // send back post result
- reply := FilerPostResult{
- Name: ret.Name,
- Size: ret.Size,
- Error: ret.Error,
- Fid: fileId,
- Url: urlLocation,
+ return nil
+}
+
+// send request to volume server
+func (fs *FilerServer) uploadToVolumeServer(r *http.Request, u *url.URL, auth security.EncodedJwt, w http.ResponseWriter, fileId string) (ret *operation.UploadResult, md5value []byte, err error) {
+
+ stats.FilerRequestCounter.WithLabelValues("postUpload").Inc()
+ start := time.Now()
+ defer func() { stats.FilerRequestHistogram.WithLabelValues("postUpload").Observe(time.Since(start).Seconds()) }()
+
+ ret = &operation.UploadResult{}
+
+ md5Hash := md5.New()
+ body := r.Body
+ if r.Method == "PUT" {
+ // only PUT or large chunked files has Md5 in attributes
+ body = ioutil.NopCloser(io.TeeReader(r.Body, md5Hash))
}
- setEtag(w, etag)
- writeJsonQuiet(w, r, http.StatusCreated, reply)
+
+ request := &http.Request{
+ Method: r.Method,
+ URL: u,
+ Proto: r.Proto,
+ ProtoMajor: r.ProtoMajor,
+ ProtoMinor: r.ProtoMinor,
+ Header: r.Header,
+ Body: body,
+ Host: r.Host,
+ ContentLength: r.ContentLength,
+ }
+
+ if auth != "" {
+ request.Header.Set("Authorization", "BEARER "+string(auth))
+ }
+ resp, doErr := util.Do(request)
+ if doErr != nil {
+ glog.Errorf("failing to connect to volume server %s: %v, %+v", r.RequestURI, doErr, r.Method)
+ writeJsonError(w, r, http.StatusInternalServerError, doErr)
+ err = doErr
+ return
+ }
+ defer func() {
+ io.Copy(ioutil.Discard, resp.Body)
+ resp.Body.Close()
+ }()
+
+ respBody, raErr := ioutil.ReadAll(resp.Body)
+ if raErr != nil {
+ glog.V(0).Infoln("failing to upload to volume server", r.RequestURI, raErr.Error())
+ writeJsonError(w, r, http.StatusInternalServerError, raErr)
+ err = raErr
+ return
+ }
+
+ glog.V(4).Infoln("post result", string(respBody))
+ unmarshalErr := json.Unmarshal(respBody, &ret)
+ if unmarshalErr != nil {
+ glog.V(0).Infoln("failing to read upload resonse", r.RequestURI, string(respBody))
+ writeJsonError(w, r, http.StatusInternalServerError, unmarshalErr)
+ err = unmarshalErr
+ return
+ }
+ if ret.Error != "" {
+ err = errors.New(ret.Error)
+ glog.V(0).Infoln("failing to post to volume server", r.RequestURI, ret.Error)
+ writeJsonError(w, r, http.StatusInternalServerError, err)
+ return
+ }
+ // find correct final path
+ path := r.URL.Path
+ if strings.HasSuffix(path, "/") {
+ if ret.Name != "" {
+ path += ret.Name
+ } else {
+ err = fmt.Errorf("can not to write to folder %s without a file name", path)
+ fs.filer.DeleteFileByFileId(fileId)
+ glog.V(0).Infoln("Can not to write to folder", path, "without a file name!")
+ writeJsonError(w, r, http.StatusInternalServerError, err)
+ return
+ }
+ }
+ // use filer calculated md5 ETag, instead of the volume server crc ETag
+ if r.Method == "PUT" {
+ md5value = md5Hash.Sum(nil)
+ }
+ ret.ETag = getEtag(resp)
+ return
}
// curl -X DELETE http://localhost:8888/path/to
// curl -X DELETE http://localhost:8888/path/to?recursive=true
+// curl -X DELETE http://localhost:8888/path/to?recursive=true&ignoreRecursiveError=true
+// curl -X DELETE http://localhost:8888/path/to?recursive=true&skipChunkDeletion=true
func (fs *FilerServer) DeleteHandler(w http.ResponseWriter, r *http.Request) {
isRecursive := r.FormValue("recursive") == "true"
+ if !isRecursive && fs.option.recursiveDelete {
+ if r.FormValue("recursive") != "false" {
+ isRecursive = true
+ }
+ }
+ ignoreRecursiveError := r.FormValue("ignoreRecursiveError") == "true"
+ skipChunkDeletion := r.FormValue("skipChunkDeletion") == "true"
- err := fs.filer.DeleteEntryMetaAndData(filer2.FullPath(r.URL.Path), isRecursive, true)
+ objectPath := r.URL.Path
+ if len(r.URL.Path) > 1 && strings.HasSuffix(objectPath, "/") {
+ objectPath = objectPath[0 : len(objectPath)-1]
+ }
+
+ err := fs.filer.DeleteEntryMetaAndData(context.Background(), util.FullPath(objectPath), isRecursive, ignoreRecursiveError, !skipChunkDeletion, false)
if err != nil {
- glog.V(1).Infoln("deleting", r.URL.Path, ":", err.Error())
- writeJsonError(w, r, http.StatusInternalServerError, err)
+ glog.V(1).Infoln("deleting", objectPath, ":", err.Error())
+ httpStatus := http.StatusInternalServerError
+ if err == filer_pb.ErrNotFound {
+ httpStatus = http.StatusNotFound
+ }
+ writeJsonError(w, r, httpStatus, err)
return
}
w.WriteHeader(http.StatusNoContent)
}
+
+func (fs *FilerServer) detectCollection(requestURI, qCollection, qReplication string) (collection, replication string, fsync bool) {
+ // default
+ collection = fs.option.Collection
+ replication = fs.option.DefaultReplication
+
+ // get default collection settings
+ if qCollection != "" {
+ collection = qCollection
+ }
+ if qReplication != "" {
+ replication = qReplication
+ }
+
+ // required by buckets folder
+ if strings.HasPrefix(requestURI, fs.filer.DirBucketsPath+"/") {
+ bucketAndObjectKey := requestURI[len(fs.filer.DirBucketsPath)+1:]
+ t := strings.Index(bucketAndObjectKey, "/")
+ if t < 0 {
+ collection = bucketAndObjectKey
+ }
+ if t > 0 {
+ collection = bucketAndObjectKey[:t]
+ }
+ replication, fsync = fs.filer.ReadBucketOption(collection)
+ }
+
+ return
+}
diff --git a/weed/server/filer_server_handlers_write_autochunk.go b/weed/server/filer_server_handlers_write_autochunk.go
index 4b1745aaa..29546542c 100644
--- a/weed/server/filer_server_handlers_write_autochunk.go
+++ b/weed/server/filer_server_handlers_write_autochunk.go
@@ -1,7 +1,8 @@
package weed_server
import (
- "bytes"
+ "context"
+ "crypto/md5"
"io"
"io/ioutil"
"net/http"
@@ -14,10 +15,13 @@ import (
"github.com/chrislusf/seaweedfs/weed/glog"
"github.com/chrislusf/seaweedfs/weed/operation"
"github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
+ "github.com/chrislusf/seaweedfs/weed/security"
+ "github.com/chrislusf/seaweedfs/weed/stats"
"github.com/chrislusf/seaweedfs/weed/util"
)
-func (fs *FilerServer) autoChunk(w http.ResponseWriter, r *http.Request, replication string, collection string, dataCenter string) bool {
+func (fs *FilerServer) autoChunk(ctx context.Context, w http.ResponseWriter, r *http.Request,
+ replication string, collection string, dataCenter string, ttlSec int32, ttlString string, fsync bool) bool {
if r.Method != "POST" {
glog.V(4).Infoln("AutoChunking not supported for method", r.Method)
return false
@@ -53,7 +57,7 @@ func (fs *FilerServer) autoChunk(w http.ResponseWriter, r *http.Request, replica
return false
}
- reply, err := fs.doAutoChunk(w, r, contentLength, chunkSize, replication, collection, dataCenter)
+ reply, err := fs.doAutoChunk(ctx, w, r, contentLength, chunkSize, replication, collection, dataCenter, ttlSec, ttlString, fsync)
if err != nil {
writeJsonError(w, r, http.StatusInternalServerError, err)
} else if reply != nil {
@@ -62,7 +66,14 @@ func (fs *FilerServer) autoChunk(w http.ResponseWriter, r *http.Request, replica
return true
}
-func (fs *FilerServer) doAutoChunk(w http.ResponseWriter, r *http.Request, contentLength int64, chunkSize int32, replication string, collection string, dataCenter string) (filerResult *FilerPostResult, replyerr error) {
+func (fs *FilerServer) doAutoChunk(ctx context.Context, w http.ResponseWriter, r *http.Request,
+ contentLength int64, chunkSize int32, replication string, collection string, dataCenter string, ttlSec int32, ttlString string, fsync bool) (filerResult *FilerPostResult, replyerr error) {
+
+ stats.FilerRequestCounter.WithLabelValues("postAutoChunk").Inc()
+ start := time.Now()
+ defer func() {
+ stats.FilerRequestHistogram.WithLabelValues("postAutoChunk").Observe(time.Since(start).Seconds())
+ }()
multipartReader, multipartReaderErr := r.MultipartReader()
if multipartReaderErr != nil {
@@ -78,68 +89,46 @@ func (fs *FilerServer) doAutoChunk(w http.ResponseWriter, r *http.Request, conte
if fileName != "" {
fileName = path.Base(fileName)
}
+ contentType := part1.Header.Get("Content-Type")
var fileChunks []*filer_pb.FileChunk
- totalBytesRead := int64(0)
- tmpBufferSize := int32(1024 * 1024)
- tmpBuffer := bytes.NewBuffer(make([]byte, 0, tmpBufferSize))
- chunkBuf := make([]byte, chunkSize+tmpBufferSize, chunkSize+tmpBufferSize) // chunk size plus a little overflow
- chunkBufOffset := int32(0)
+ md5Hash := md5.New()
+ var partReader = ioutil.NopCloser(io.TeeReader(part1, md5Hash))
+
chunkOffset := int64(0)
- writtenChunks := 0
- filerResult = &FilerPostResult{
- Name: fileName,
- }
+ for chunkOffset < contentLength {
+ limitedReader := io.LimitReader(partReader, int64(chunkSize))
- for totalBytesRead < contentLength {
- tmpBuffer.Reset()
- bytesRead, readErr := io.CopyN(tmpBuffer, part1, int64(tmpBufferSize))
- readFully := readErr != nil && readErr == io.EOF
- tmpBuf := tmpBuffer.Bytes()
- bytesToCopy := tmpBuf[0:int(bytesRead)]
-
- copy(chunkBuf[chunkBufOffset:chunkBufOffset+int32(bytesRead)], bytesToCopy)
- chunkBufOffset = chunkBufOffset + int32(bytesRead)
-
- if chunkBufOffset >= chunkSize || readFully || (chunkBufOffset > 0 && bytesRead == 0) {
- writtenChunks = writtenChunks + 1
- fileId, urlLocation, assignErr := fs.assignNewFileInfo(w, r, replication, collection, dataCenter)
- if assignErr != nil {
- return nil, assignErr
- }
-
- // upload the chunk to the volume server
- chunkName := fileName + "_chunk_" + strconv.FormatInt(int64(len(fileChunks)+1), 10)
- uploadErr := fs.doUpload(urlLocation, w, r, chunkBuf[0:chunkBufOffset], chunkName, "application/octet-stream", fileId)
- if uploadErr != nil {
- return nil, uploadErr
- }
-
- // Save to chunk manifest structure
- fileChunks = append(fileChunks,
- &filer_pb.FileChunk{
- FileId: fileId,
- Offset: chunkOffset,
- Size: uint64(chunkBufOffset),
- Mtime: time.Now().UnixNano(),
- },
- )
-
- // reset variables for the next chunk
- chunkBufOffset = 0
- chunkOffset = totalBytesRead + int64(bytesRead)
+ // assign one file id for one chunk
+ fileId, urlLocation, auth, assignErr := fs.assignNewFileInfo(w, r, replication, collection, dataCenter, ttlString, fsync)
+ if assignErr != nil {
+ return nil, assignErr
}
- totalBytesRead = totalBytesRead + int64(bytesRead)
+ // upload the chunk to the volume server
+ uploadResult, uploadErr := fs.doUpload(urlLocation, w, r, limitedReader, fileName, contentType, nil, auth)
+ if uploadErr != nil {
+ return nil, uploadErr
+ }
- if bytesRead == 0 || readFully {
+ // if last chunk exhausted the reader exactly at the border
+ if uploadResult.Size == 0 {
break
}
- if readErr != nil {
- return nil, readErr
+ // Save to chunk manifest structure
+ fileChunks = append(fileChunks, uploadResult.ToPbFileChunk(fileId, chunkOffset))
+
+ glog.V(4).Infof("uploaded %s chunk %d to %s [%d,%d) of %d", fileName, len(fileChunks), fileId, chunkOffset, chunkOffset+int64(uploadResult.Size), contentLength)
+
+ // reset variables for the next chunk
+ chunkOffset = chunkOffset + int64(uploadResult.Size)
+
+ // if last chunk was not at full chunk size, but already exhausted the reader
+ if int64(uploadResult.Size) < int64(chunkSize) {
+ break
}
}
@@ -152,7 +141,7 @@ func (fs *FilerServer) doAutoChunk(w http.ResponseWriter, r *http.Request, conte
glog.V(4).Infoln("saving", path)
entry := &filer2.Entry{
- FullPath: filer2.FullPath(path),
+ FullPath: util.FullPath(path),
Attr: filer2.Attr{
Mtime: time.Now(),
Crtime: time.Now(),
@@ -161,30 +150,37 @@ func (fs *FilerServer) doAutoChunk(w http.ResponseWriter, r *http.Request, conte
Gid: OS_GID,
Replication: replication,
Collection: collection,
- TtlSec: int32(util.ParseInt(r.URL.Query().Get("ttl"), 0)),
+ TtlSec: ttlSec,
+ Mime: contentType,
+ Md5: md5Hash.Sum(nil),
},
Chunks: fileChunks,
}
- if db_err := fs.filer.CreateEntry(entry); db_err != nil {
- replyerr = db_err
- filerResult.Error = db_err.Error()
- glog.V(0).Infof("failing to write %s to filer server : %v", path, db_err)
+
+ filerResult = &FilerPostResult{
+ Name: fileName,
+ Size: chunkOffset,
+ }
+
+ if dbErr := fs.filer.CreateEntry(ctx, entry, false, false); dbErr != nil {
+ fs.filer.DeleteChunks(entry.Chunks)
+ replyerr = dbErr
+ filerResult.Error = dbErr.Error()
+ glog.V(0).Infof("failing to write %s to filer server : %v", path, dbErr)
return
}
return
}
-func (fs *FilerServer) doUpload(urlLocation string, w http.ResponseWriter, r *http.Request, chunkBuf []byte, fileName string, contentType string, fileId string) (err error) {
- err = nil
+func (fs *FilerServer) doUpload(urlLocation string, w http.ResponseWriter, r *http.Request, limitedReader io.Reader, fileName string, contentType string, pairMap map[string]string, auth security.EncodedJwt) (*operation.UploadResult, error) {
- ioReader := ioutil.NopCloser(bytes.NewBuffer(chunkBuf))
- uploadResult, uploadError := operation.Upload(urlLocation, fileName, ioReader, false, contentType, nil, fs.jwt(fileId))
- if uploadResult != nil {
- glog.V(0).Infoln("Chunk upload result. Name:", uploadResult.Name, "Fid:", fileId, "Size:", uploadResult.Size)
- }
- if uploadError != nil {
- err = uploadError
- }
- return
+ stats.FilerRequestCounter.WithLabelValues("postAutoChunkUpload").Inc()
+ start := time.Now()
+ defer func() {
+ stats.FilerRequestHistogram.WithLabelValues("postAutoChunkUpload").Observe(time.Since(start).Seconds())
+ }()
+
+ uploadResult, err, _ := operation.Upload(urlLocation, fileName, fs.option.Cipher, limitedReader, false, contentType, pairMap, auth)
+ return uploadResult, err
}
diff --git a/weed/server/filer_server_handlers_write_cipher.go b/weed/server/filer_server_handlers_write_cipher.go
new file mode 100644
index 000000000..17f35838d
--- /dev/null
+++ b/weed/server/filer_server_handlers_write_cipher.go
@@ -0,0 +1,90 @@
+package weed_server
+
+import (
+ "context"
+ "fmt"
+ "net/http"
+ "strings"
+ "time"
+
+ "github.com/chrislusf/seaweedfs/weed/filer2"
+ "github.com/chrislusf/seaweedfs/weed/glog"
+ "github.com/chrislusf/seaweedfs/weed/operation"
+ "github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
+ "github.com/chrislusf/seaweedfs/weed/storage/needle"
+ "github.com/chrislusf/seaweedfs/weed/util"
+)
+
+// handling single chunk POST or PUT upload
+func (fs *FilerServer) encrypt(ctx context.Context, w http.ResponseWriter, r *http.Request,
+ replication string, collection string, dataCenter string, ttlSeconds int32, ttlString string, fsync bool) (filerResult *FilerPostResult, err error) {
+
+ fileId, urlLocation, auth, err := fs.assignNewFileInfo(w, r, replication, collection, dataCenter, ttlString, fsync)
+
+ if err != nil || fileId == "" || urlLocation == "" {
+ return nil, fmt.Errorf("fail to allocate volume for %s, collection:%s, datacenter:%s", r.URL.Path, collection, dataCenter)
+ }
+
+ glog.V(4).Infof("write %s to %v", r.URL.Path, urlLocation)
+
+ // Note: encrypt(gzip(data)), encrypt data first, then gzip
+
+ sizeLimit := int64(fs.option.MaxMB) * 1024 * 1024
+
+ pu, err := needle.ParseUpload(r, sizeLimit)
+ uncompressedData := pu.Data
+ if pu.IsGzipped {
+ uncompressedData = pu.UncompressedData
+ }
+ if pu.MimeType == "" {
+ pu.MimeType = http.DetectContentType(uncompressedData)
+ // println("detect2 mimetype to", pu.MimeType)
+ }
+
+ uploadResult, uploadError := operation.UploadData(urlLocation, pu.FileName, true, uncompressedData, false, pu.MimeType, pu.PairMap, auth)
+ if uploadError != nil {
+ return nil, fmt.Errorf("upload to volume server: %v", uploadError)
+ }
+
+ // Save to chunk manifest structure
+ fileChunks := []*filer_pb.FileChunk{uploadResult.ToPbFileChunk(fileId, 0)}
+
+ // fmt.Printf("uploaded: %+v\n", uploadResult)
+
+ path := r.URL.Path
+ if strings.HasSuffix(path, "/") {
+ if pu.FileName != "" {
+ path += pu.FileName
+ }
+ }
+
+ entry := &filer2.Entry{
+ FullPath: util.FullPath(path),
+ Attr: filer2.Attr{
+ Mtime: time.Now(),
+ Crtime: time.Now(),
+ Mode: 0660,
+ Uid: OS_UID,
+ Gid: OS_GID,
+ Replication: replication,
+ Collection: collection,
+ TtlSec: ttlSeconds,
+ Mime: pu.MimeType,
+ },
+ Chunks: fileChunks,
+ }
+
+ filerResult = &FilerPostResult{
+ Name: pu.FileName,
+ Size: int64(pu.OriginalDataSize),
+ }
+
+ if dbErr := fs.filer.CreateEntry(ctx, entry, false, false); dbErr != nil {
+ fs.filer.DeleteChunks(entry.Chunks)
+ err = dbErr
+ filerResult.Error = dbErr.Error()
+ return
+ }
+
+ return
+}
diff --git a/weed/server/filer_ui/breadcrumb.go b/weed/server/filer_ui/breadcrumb.go
index d056a4b25..f21cce7d1 100644
--- a/weed/server/filer_ui/breadcrumb.go
+++ b/weed/server/filer_ui/breadcrumb.go
@@ -1,8 +1,9 @@
package master_ui
import (
- "path/filepath"
"strings"
+
+ "github.com/chrislusf/seaweedfs/weed/util"
)
type Breadcrumb struct {
@@ -14,10 +15,14 @@ func ToBreadcrumb(fullpath string) (crumbs []Breadcrumb) {
parts := strings.Split(fullpath, "/")
for i := 0; i < len(parts); i++ {
- crumbs = append(crumbs, Breadcrumb{
- Name: parts[i] + "/",
- Link: "/" + filepath.Join(parts[0:i+1]...),
- })
+ crumb := Breadcrumb{
+ Name: parts[i] + " /",
+ Link: "/" + util.Join(parts[0:i+1]...),
+ }
+ if !strings.HasSuffix(crumb.Link, "/") {
+ crumb.Link += "/"
+ }
+ crumbs = append(crumbs, crumb)
}
return
diff --git a/weed/server/filer_ui/templates.go b/weed/server/filer_ui/templates.go
index e31685ea0..e532b27e2 100644
--- a/weed/server/filer_ui/templates.go
+++ b/weed/server/filer_ui/templates.go
@@ -50,7 +50,7 @@ var StatusTpl = template.Must(template.New("status").Funcs(funcMap).Parse(`
{{ range $entry := .Breadcrumbs }}
-
+
{{ $entry.Name }}
{{ end }}
@@ -78,20 +78,19 @@ var StatusTpl = template.Must(template.New("status").Funcs(funcMap).Parse(`
{{end}}
-
+
{{if $entry.IsDirectory}}
{{else}}
- {{ $entry.Mime }}
+ {{ $entry.Mime }}
{{end}}
-
+
{{if $entry.IsDirectory}}
{{else}}
- {{ $entry.Size | humanizeBytes }}
-
+ {{ $entry.Size | humanizeBytes }}
{{end}}
-
+
{{ $entry.Timestamp.Format "2006-01-02 15:04" }}
@@ -162,7 +161,7 @@ function uploadFile(file, i) {
var url = window.location.href
var xhr = new XMLHttpRequest()
var formData = new FormData()
- xhr.open('POST', url, true)
+ xhr.open('POST', url, false)
formData.append('file', file)
xhr.send(formData)
diff --git a/weed/server/master_grpc_server.go b/weed/server/master_grpc_server.go
index 93dce59d8..1ee214deb 100644
--- a/weed/server/master_grpc_server.go
+++ b/weed/server/master_grpc_server.go
@@ -1,16 +1,20 @@
package weed_server
import (
+ "context"
"fmt"
"net"
"strings"
"time"
"github.com/chrislusf/raft"
+ "google.golang.org/grpc/peer"
+
"github.com/chrislusf/seaweedfs/weed/glog"
"github.com/chrislusf/seaweedfs/weed/pb/master_pb"
+ "github.com/chrislusf/seaweedfs/weed/storage/backend"
+ "github.com/chrislusf/seaweedfs/weed/storage/needle"
"github.com/chrislusf/seaweedfs/weed/topology"
- "google.golang.org/grpc/peer"
)
func (ms *MasterServer) SendHeartbeat(stream master_pb.Seaweed_SendHeartbeatServer) error {
@@ -20,8 +24,10 @@ func (ms *MasterServer) SendHeartbeat(stream master_pb.Seaweed_SendHeartbeatServ
defer func() {
if dn != nil {
- glog.V(0).Infof("unregister disconnected volume server %s:%d", dn.Ip, dn.Port)
+ // if the volume server disconnects and reconnects quickly
+ // the unregister and register can race with each other
t.UnRegisterDataNode(dn)
+ glog.V(0).Infof("unregister disconnected volume server %s:%d", dn.Ip, dn.Port)
message := &master_pb.VolumeLocation{
Url: dn.Url(),
@@ -30,6 +36,9 @@ func (ms *MasterServer) SendHeartbeat(stream master_pb.Seaweed_SendHeartbeatServ
for _, v := range dn.GetVolumes() {
message.DeletedVids = append(message.DeletedVids, uint32(v.Id))
}
+ for _, s := range dn.GetEcShards() {
+ message.DeletedVids = append(message.DeletedVids, uint32(s.VolumeId))
+ }
if len(message.DeletedVids) > 0 {
ms.clientChansLock.RLock()
@@ -45,57 +54,109 @@ func (ms *MasterServer) SendHeartbeat(stream master_pb.Seaweed_SendHeartbeatServ
for {
heartbeat, err := stream.Recv()
if err != nil {
+ if dn != nil {
+ glog.Warningf("SendHeartbeat.Recv server %s:%d : %v", dn.Ip, dn.Port, err)
+ } else {
+ glog.Warningf("SendHeartbeat.Recv: %v", err)
+ }
return err
}
+ t.Sequence.SetMax(heartbeat.MaxFileKey)
+
if dn == nil {
- t.Sequence.SetMax(heartbeat.MaxFileKey)
- if heartbeat.Ip == "" {
- if pr, ok := peer.FromContext(stream.Context()); ok {
- if pr.Addr != net.Addr(nil) {
- heartbeat.Ip = pr.Addr.String()[0:strings.LastIndex(pr.Addr.String(), ":")]
- glog.V(0).Infof("remote IP address is detected as %v", heartbeat.Ip)
- }
- }
- }
dcName, rackName := t.Configuration.Locate(heartbeat.Ip, heartbeat.DataCenter, heartbeat.Rack)
dc := t.GetOrCreateDataCenter(dcName)
rack := dc.GetOrCreateRack(rackName)
dn = rack.GetOrCreateDataNode(heartbeat.Ip,
int(heartbeat.Port), heartbeat.PublicUrl,
- int(heartbeat.MaxVolumeCount))
+ int64(heartbeat.MaxVolumeCount))
glog.V(0).Infof("added volume server %v:%d", heartbeat.GetIp(), heartbeat.GetPort())
if err := stream.Send(&master_pb.HeartbeatResponse{
- VolumeSizeLimit: uint64(ms.volumeSizeLimitMB) * 1024 * 1024,
- SecretKey: string(ms.guard.SecretKey),
+ VolumeSizeLimit: uint64(ms.option.VolumeSizeLimitMB) * 1024 * 1024,
+ MetricsAddress: ms.option.MetricsAddress,
+ MetricsIntervalSeconds: uint32(ms.option.MetricsIntervalSec),
+ StorageBackends: backend.ToPbStorageBackends(),
}); err != nil {
+ glog.Warningf("SendHeartbeat.Send volume size to %s:%d %v", dn.Ip, dn.Port, err)
return err
}
}
+ if heartbeat.MaxVolumeCount != 0 && dn.GetMaxVolumeCount() != int64(heartbeat.MaxVolumeCount) {
+ delta := int64(heartbeat.MaxVolumeCount) - dn.GetMaxVolumeCount()
+ dn.UpAdjustMaxVolumeCountDelta(delta)
+ }
+
+ glog.V(4).Infof("master received heartbeat %s", heartbeat.String())
message := &master_pb.VolumeLocation{
Url: dn.Url(),
PublicUrl: dn.PublicUrl,
}
- if len(heartbeat.NewVids) > 0 || len(heartbeat.DeletedVids) > 0 {
+ if len(heartbeat.NewVolumes) > 0 || len(heartbeat.DeletedVolumes) > 0 {
// process delta volume ids if exists for fast volume id updates
- message.NewVids = append(message.NewVids, heartbeat.NewVids...)
- message.DeletedVids = append(message.DeletedVids, heartbeat.DeletedVids...)
- } else {
+ for _, volInfo := range heartbeat.NewVolumes {
+ message.NewVids = append(message.NewVids, volInfo.Id)
+ }
+ for _, volInfo := range heartbeat.DeletedVolumes {
+ message.DeletedVids = append(message.DeletedVids, volInfo.Id)
+ }
+ // update master internal volume layouts
+ t.IncrementalSyncDataNodeRegistration(heartbeat.NewVolumes, heartbeat.DeletedVolumes, dn)
+ }
+
+ if len(heartbeat.Volumes) > 0 || heartbeat.HasNoVolumes {
// process heartbeat.Volumes
newVolumes, deletedVolumes := t.SyncDataNodeRegistration(heartbeat.Volumes, dn)
for _, v := range newVolumes {
+ glog.V(0).Infof("master see new volume %d from %s", uint32(v.Id), dn.Url())
message.NewVids = append(message.NewVids, uint32(v.Id))
}
for _, v := range deletedVolumes {
+ glog.V(0).Infof("master see deleted volume %d from %s", uint32(v.Id), dn.Url())
message.DeletedVids = append(message.DeletedVids, uint32(v.Id))
}
}
+ if len(heartbeat.NewEcShards) > 0 || len(heartbeat.DeletedEcShards) > 0 {
+
+ // update master internal volume layouts
+ t.IncrementalSyncDataNodeEcShards(heartbeat.NewEcShards, heartbeat.DeletedEcShards, dn)
+
+ for _, s := range heartbeat.NewEcShards {
+ message.NewVids = append(message.NewVids, s.Id)
+ }
+ for _, s := range heartbeat.DeletedEcShards {
+ if dn.HasVolumesById(needle.VolumeId(s.Id)) {
+ continue
+ }
+ message.DeletedVids = append(message.DeletedVids, s.Id)
+ }
+
+ }
+
+ if len(heartbeat.EcShards) > 0 || heartbeat.HasNoEcShards {
+ glog.V(1).Infof("master recieved ec shards from %s: %+v", dn.Url(), heartbeat.EcShards)
+ newShards, deletedShards := t.SyncDataNodeEcShards(heartbeat.EcShards, dn)
+
+ // broadcast the ec vid changes to master clients
+ for _, s := range newShards {
+ message.NewVids = append(message.NewVids, uint32(s.VolumeId))
+ }
+ for _, s := range deletedShards {
+ if dn.HasVolumesById(s.VolumeId) {
+ continue
+ }
+ message.DeletedVids = append(message.DeletedVids, uint32(s.VolumeId))
+ }
+
+ }
+
if len(message.NewVids) > 0 || len(message.DeletedVids) > 0 {
ms.clientChansLock.RLock()
- for _, ch := range ms.clientChans {
+ for host, ch := range ms.clientChans {
+ glog.V(0).Infof("master send to %s: %s", host, message.String())
ch <- message
}
ms.clientChansLock.RUnlock()
@@ -103,12 +164,15 @@ func (ms *MasterServer) SendHeartbeat(stream master_pb.Seaweed_SendHeartbeatServ
// tell the volume servers about the leader
newLeader, err := t.Leader()
- if err == nil {
- if err := stream.Send(&master_pb.HeartbeatResponse{
- Leader: newLeader,
- }); err != nil {
- return err
- }
+ if err != nil {
+ glog.Warningf("SendHeartbeat find leader: %v", err)
+ return err
+ }
+ if err := stream.Send(&master_pb.HeartbeatResponse{
+ Leader: newLeader,
+ }); err != nil {
+ glog.Warningf("SendHeartbeat.Send response to to %s:%d %v", dn.Ip, dn.Port, err)
+ return err
}
}
}
@@ -123,38 +187,16 @@ func (ms *MasterServer) KeepConnected(stream master_pb.Seaweed_KeepConnectedServ
}
if !ms.Topo.IsLeader() {
- return raft.NotLeaderError
- }
-
- // remember client address
- ctx := stream.Context()
- // fmt.Printf("FromContext %+v\n", ctx)
- pr, ok := peer.FromContext(ctx)
- if !ok {
- glog.Error("failed to get peer from ctx")
- return fmt.Errorf("failed to get peer from ctx")
- }
- if pr.Addr == net.Addr(nil) {
- glog.Error("failed to get peer address")
- return fmt.Errorf("failed to get peer address")
+ return ms.informNewLeader(stream)
}
- clientName := req.Name + pr.Addr.String()
- glog.V(0).Infof("+ client %v", clientName)
+ peerAddress := findClientAddress(stream.Context(), req.GrpcPort)
- messageChan := make(chan *master_pb.VolumeLocation)
stopChan := make(chan bool)
- ms.clientChansLock.Lock()
- ms.clientChans[clientName] = messageChan
- ms.clientChansLock.Unlock()
+ clientName, messageChan := ms.addClient(req.Name, peerAddress)
- defer func() {
- glog.V(0).Infof("- client %v", clientName)
- ms.clientChansLock.Lock()
- delete(ms.clientChans, clientName)
- ms.clientChansLock.Unlock()
- }()
+ defer ms.deleteClient(clientName)
for _, message := range ms.Topo.ToVolumeLocations() {
if err := stream.Send(message); err != nil {
@@ -183,12 +225,79 @@ func (ms *MasterServer) KeepConnected(stream master_pb.Seaweed_KeepConnectedServ
}
case <-ticker.C:
if !ms.Topo.IsLeader() {
- return raft.NotLeaderError
+ return ms.informNewLeader(stream)
}
case <-stopChan:
return nil
}
}
+}
+
+func (ms *MasterServer) informNewLeader(stream master_pb.Seaweed_KeepConnectedServer) error {
+ leader, err := ms.Topo.Leader()
+ if err != nil {
+ glog.Errorf("topo leader: %v", err)
+ return raft.NotLeaderError
+ }
+ if err := stream.Send(&master_pb.VolumeLocation{
+ Leader: leader,
+ }); err != nil {
+ return err
+ }
return nil
}
+
+func (ms *MasterServer) addClient(clientType string, clientAddress string) (clientName string, messageChan chan *master_pb.VolumeLocation) {
+ clientName = clientType + "@" + clientAddress
+ glog.V(0).Infof("+ client %v", clientName)
+
+ messageChan = make(chan *master_pb.VolumeLocation)
+
+ ms.clientChansLock.Lock()
+ ms.clientChans[clientName] = messageChan
+ ms.clientChansLock.Unlock()
+ return
+}
+
+func (ms *MasterServer) deleteClient(clientName string) {
+ glog.V(0).Infof("- client %v", clientName)
+ ms.clientChansLock.Lock()
+ delete(ms.clientChans, clientName)
+ ms.clientChansLock.Unlock()
+}
+
+func findClientAddress(ctx context.Context, grpcPort uint32) string {
+ // fmt.Printf("FromContext %+v\n", ctx)
+ pr, ok := peer.FromContext(ctx)
+ if !ok {
+ glog.Error("failed to get peer from ctx")
+ return ""
+ }
+ if pr.Addr == net.Addr(nil) {
+ glog.Error("failed to get peer address")
+ return ""
+ }
+ if grpcPort == 0 {
+ return pr.Addr.String()
+ }
+ if tcpAddr, ok := pr.Addr.(*net.TCPAddr); ok {
+ externalIP := tcpAddr.IP
+ return fmt.Sprintf("%s:%d", externalIP, grpcPort)
+ }
+ return pr.Addr.String()
+
+}
+
+func (ms *MasterServer) ListMasterClients(ctx context.Context, req *master_pb.ListMasterClientsRequest) (*master_pb.ListMasterClientsResponse, error) {
+ resp := &master_pb.ListMasterClientsResponse{}
+ ms.clientChansLock.RLock()
+ defer ms.clientChansLock.RUnlock()
+
+ for k := range ms.clientChans {
+ if strings.HasPrefix(k, req.ClientType+"@") {
+ resp.GrpcAddresses = append(resp.GrpcAddresses, k[len(req.ClientType)+1:])
+ }
+ }
+ return resp, nil
+}
diff --git a/weed/server/master_grpc_server_admin.go b/weed/server/master_grpc_server_admin.go
new file mode 100644
index 000000000..7e7dcb36b
--- /dev/null
+++ b/weed/server/master_grpc_server_admin.go
@@ -0,0 +1,138 @@
+package weed_server
+
+import (
+ "context"
+ "fmt"
+ "math/rand"
+ "sync"
+ "time"
+
+ "github.com/chrislusf/seaweedfs/weed/pb/master_pb"
+)
+
+/*
+How exclusive lock works?
+-----------
+
+Shell
+------
+When shell lock,
+ * lease an admin token (lockTime, token)
+ * start a goroutine to renew the admin token periodically
+
+When shell unlock
+ * stop the renewal goroutine
+ * sends a release lock request
+
+Master
+------
+Master maintains:
+ * randomNumber
+ * lastLockTime
+When master receives the lease/renew request from shell
+ If lastLockTime still fresh {
+ if is a renew and token is valid {
+ // for renew
+ generate the randomNumber => token
+ return
+ }
+ refuse
+ return
+ } else {
+ // for fresh lease request
+ generate the randomNumber => token
+ return
+ }
+
+When master receives the release lock request from shell
+ set the lastLockTime to zero
+
+
+The volume server does not need to verify.
+This makes the lock/unlock optional, similar to what golang code usually does.
+
+*/
+
+const (
+ LockDuration = 10 * time.Second
+)
+
+type AdminLock struct {
+ accessSecret int64
+ accessLockTime time.Time
+}
+
+type AdminLocks struct {
+ locks map[string]*AdminLock
+ sync.RWMutex
+}
+
+func NewAdminLocks() *AdminLocks {
+ return &AdminLocks{
+ locks: make(map[string]*AdminLock),
+ }
+}
+
+func (locks *AdminLocks) isLocked(lockName string) bool {
+ locks.RLock()
+ defer locks.RUnlock()
+ adminLock, found := locks.locks[lockName]
+ if !found {
+ return false
+ }
+ return adminLock.accessLockTime.Add(LockDuration).After(time.Now())
+}
+
+func (locks *AdminLocks) isValidToken(lockName string, ts time.Time, token int64) bool {
+ locks.RLock()
+ defer locks.RUnlock()
+ adminLock, found := locks.locks[lockName]
+ if !found {
+ return false
+ }
+ return adminLock.accessLockTime.Equal(ts) && adminLock.accessSecret == token
+}
+
+func (locks *AdminLocks) generateToken(lockName string) (ts time.Time, token int64) {
+ locks.Lock()
+ defer locks.Unlock()
+ lock := &AdminLock{
+ accessSecret: rand.Int63(),
+ accessLockTime: time.Now(),
+ }
+ locks.locks[lockName] = lock
+ return lock.accessLockTime, lock.accessSecret
+}
+
+func (locks *AdminLocks) deleteLock(lockName string) {
+ locks.Lock()
+ defer locks.Unlock()
+ delete(locks.locks, lockName)
+}
+
+func (ms *MasterServer) LeaseAdminToken(ctx context.Context, req *master_pb.LeaseAdminTokenRequest) (*master_pb.LeaseAdminTokenResponse, error) {
+ resp := &master_pb.LeaseAdminTokenResponse{}
+
+ if ms.adminLocks.isLocked(req.LockName) {
+ if req.PreviousToken != 0 && ms.adminLocks.isValidToken(req.LockName, time.Unix(0, req.PreviousLockTime), req.PreviousToken) {
+ // for renew
+ ts, token := ms.adminLocks.generateToken(req.LockName)
+ resp.Token, resp.LockTsNs = token, ts.UnixNano()
+ return resp, nil
+ }
+ // refuse since still locked
+ return resp, fmt.Errorf("already locked")
+ }
+ // for fresh lease request
+ ts, token := ms.adminLocks.generateToken(req.LockName)
+ resp.Token, resp.LockTsNs = token, ts.UnixNano()
+ return resp, nil
+}
+
+func (ms *MasterServer) ReleaseAdminToken(ctx context.Context, req *master_pb.ReleaseAdminTokenRequest) (*master_pb.ReleaseAdminTokenResponse, error) {
+ resp := &master_pb.ReleaseAdminTokenResponse{}
+ if ms.adminLocks.isValidToken(req.LockName, time.Unix(0, req.PreviousLockTime), req.PreviousToken) {
+ ms.adminLocks.deleteLock(req.LockName)
+ }
+ return resp, nil
+}
diff --git a/weed/server/master_grpc_server_collection.go b/weed/server/master_grpc_server_collection.go
new file mode 100644
index 000000000..b92d6bcbe
--- /dev/null
+++ b/weed/server/master_grpc_server_collection.go
@@ -0,0 +1,95 @@
+package weed_server
+
+import (
+ "context"
+
+ "github.com/chrislusf/raft"
+
+ "github.com/chrislusf/seaweedfs/weed/operation"
+ "github.com/chrislusf/seaweedfs/weed/pb/master_pb"
+ "github.com/chrislusf/seaweedfs/weed/pb/volume_server_pb"
+)
+
+func (ms *MasterServer) CollectionList(ctx context.Context, req *master_pb.CollectionListRequest) (*master_pb.CollectionListResponse, error) {
+
+ if !ms.Topo.IsLeader() {
+ return nil, raft.NotLeaderError
+ }
+
+ resp := &master_pb.CollectionListResponse{}
+ collections := ms.Topo.ListCollections(req.IncludeNormalVolumes, req.IncludeEcVolumes)
+ for _, c := range collections {
+ resp.Collections = append(resp.Collections, &master_pb.Collection{
+ Name: c,
+ })
+ }
+
+ return resp, nil
+}
+
+func (ms *MasterServer) CollectionDelete(ctx context.Context, req *master_pb.CollectionDeleteRequest) (*master_pb.CollectionDeleteResponse, error) {
+
+ if !ms.Topo.IsLeader() {
+ return nil, raft.NotLeaderError
+ }
+
+ resp := &master_pb.CollectionDeleteResponse{}
+
+ err := ms.doDeleteNormalCollection(req.Name)
+
+ if err != nil {
+ return nil, err
+ }
+
+ err = ms.doDeleteEcCollection(req.Name)
+
+ if err != nil {
+ return nil, err
+ }
+
+ return resp, nil
+}
+
+func (ms *MasterServer) doDeleteNormalCollection(collectionName string) error {
+
+ collection, ok := ms.Topo.FindCollection(collectionName)
+ if !ok {
+ return nil
+ }
+
+ for _, server := range collection.ListVolumeServers() {
+ err := operation.WithVolumeServerClient(server.Url(), ms.grpcDialOption, func(client volume_server_pb.VolumeServerClient) error {
+ _, deleteErr := client.DeleteCollection(context.Background(), &volume_server_pb.DeleteCollectionRequest{
+ Collection: collectionName,
+ })
+ return deleteErr
+ })
+ if err != nil {
+ return err
+ }
+ }
+ ms.Topo.DeleteCollection(collectionName)
+
+ return nil
+}
+
+func (ms *MasterServer) doDeleteEcCollection(collectionName string) error {
+
+ listOfEcServers := ms.Topo.ListEcServersByCollection(collectionName)
+
+ for _, server := range listOfEcServers {
+ err := operation.WithVolumeServerClient(server, ms.grpcDialOption, func(client volume_server_pb.VolumeServerClient) error {
+ _, deleteErr := client.DeleteCollection(context.Background(), &volume_server_pb.DeleteCollectionRequest{
+ Collection: collectionName,
+ })
+ return deleteErr
+ })
+ if err != nil {
+ return err
+ }
+ }
+
+ ms.Topo.DeleteEcCollection(collectionName)
+
+ return nil
+}
diff --git a/weed/server/master_grpc_server_volume.go b/weed/server/master_grpc_server_volume.go
index ae0819d2d..282c75679 100644
--- a/weed/server/master_grpc_server_volume.go
+++ b/weed/server/master_grpc_server_volume.go
@@ -5,8 +5,11 @@ import (
"fmt"
"github.com/chrislusf/raft"
+
"github.com/chrislusf/seaweedfs/weed/pb/master_pb"
- "github.com/chrislusf/seaweedfs/weed/storage"
+ "github.com/chrislusf/seaweedfs/weed/security"
+ "github.com/chrislusf/seaweedfs/weed/storage/needle"
+ "github.com/chrislusf/seaweedfs/weed/storage/super_block"
"github.com/chrislusf/seaweedfs/weed/topology"
)
@@ -48,25 +51,26 @@ func (ms *MasterServer) Assign(ctx context.Context, req *master_pb.AssignRequest
}
if req.Replication == "" {
- req.Replication = ms.defaultReplicaPlacement
+ req.Replication = ms.option.DefaultReplicaPlacement
}
- replicaPlacement, err := storage.NewReplicaPlacementFromString(req.Replication)
+ replicaPlacement, err := super_block.NewReplicaPlacementFromString(req.Replication)
if err != nil {
return nil, err
}
- ttl, err := storage.ReadTTL(req.Ttl)
+ ttl, err := needle.ReadTTL(req.Ttl)
if err != nil {
return nil, err
}
option := &topology.VolumeGrowOption{
- Collection: req.Collection,
- ReplicaPlacement: replicaPlacement,
- Ttl: ttl,
- Prealloacte: ms.preallocate,
- DataCenter: req.DataCenter,
- Rack: req.Rack,
- DataNode: req.DataNode,
+ Collection: req.Collection,
+ ReplicaPlacement: replicaPlacement,
+ Ttl: ttl,
+ Prealloacte: ms.preallocateSize,
+ DataCenter: req.DataCenter,
+ Rack: req.Rack,
+ DataNode: req.DataNode,
+ MemoryMapMaxSizeMb: req.MemoryMapMaxSizeMb,
}
if !ms.Topo.HasWritableVolume(option) {
@@ -75,7 +79,7 @@ func (ms *MasterServer) Assign(ctx context.Context, req *master_pb.AssignRequest
}
ms.vgLock.Lock()
if !ms.Topo.HasWritableVolume(option) {
- if _, err = ms.vg.AutomaticGrowByType(option, ms.Topo); err != nil {
+ if _, err = ms.vg.AutomaticGrowByType(option, ms.grpcDialOption, ms.Topo, int(req.WritableVolumeCount)); err != nil {
ms.vgLock.Unlock()
return nil, fmt.Errorf("Cannot grow volume group! %v", err)
}
@@ -92,6 +96,7 @@ func (ms *MasterServer) Assign(ctx context.Context, req *master_pb.AssignRequest
Url: dn.Url(),
PublicUrl: dn.PublicUrl,
Count: count,
+ Auth: string(security.GenJwt(ms.guard.SigningKey, ms.guard.ExpiresAfterSec, fid)),
}, nil
}
@@ -102,13 +107,13 @@ func (ms *MasterServer) Statistics(ctx context.Context, req *master_pb.Statistic
}
if req.Replication == "" {
- req.Replication = ms.defaultReplicaPlacement
+ req.Replication = ms.option.DefaultReplicaPlacement
}
- replicaPlacement, err := storage.NewReplicaPlacementFromString(req.Replication)
+ replicaPlacement, err := super_block.NewReplicaPlacementFromString(req.Replication)
if err != nil {
return nil, err
}
- ttl, err := storage.ReadTTL(req.Ttl)
+ ttl, err := needle.ReadTTL(req.Ttl)
if err != nil {
return nil, err
}
@@ -116,11 +121,70 @@ func (ms *MasterServer) Statistics(ctx context.Context, req *master_pb.Statistic
volumeLayout := ms.Topo.GetVolumeLayout(req.Collection, replicaPlacement, ttl)
stats := volumeLayout.Stats()
+ totalSize := ms.Topo.GetMaxVolumeCount() * int64(ms.option.VolumeSizeLimitMB) * 1024 * 1024
+
resp := &master_pb.StatisticsResponse{
- TotalSize: stats.TotalSize,
+ TotalSize: uint64(totalSize),
UsedSize: stats.UsedSize,
FileCount: stats.FileCount,
}
return resp, nil
}
+
+func (ms *MasterServer) VolumeList(ctx context.Context, req *master_pb.VolumeListRequest) (*master_pb.VolumeListResponse, error) {
+
+ if !ms.Topo.IsLeader() {
+ return nil, raft.NotLeaderError
+ }
+
+ resp := &master_pb.VolumeListResponse{
+ TopologyInfo: ms.Topo.ToTopologyInfo(),
+ VolumeSizeLimitMb: uint64(ms.option.VolumeSizeLimitMB),
+ }
+
+ return resp, nil
+}
+
+func (ms *MasterServer) LookupEcVolume(ctx context.Context, req *master_pb.LookupEcVolumeRequest) (*master_pb.LookupEcVolumeResponse, error) {
+
+ if !ms.Topo.IsLeader() {
+ return nil, raft.NotLeaderError
+ }
+
+ resp := &master_pb.LookupEcVolumeResponse{}
+
+ ecLocations, found := ms.Topo.LookupEcShards(needle.VolumeId(req.VolumeId))
+
+ if !found {
+ return resp, fmt.Errorf("ec volume %d not found", req.VolumeId)
+ }
+
+ resp.VolumeId = req.VolumeId
+
+ for shardId, shardLocations := range ecLocations.Locations {
+ var locations []*master_pb.Location
+ for _, dn := range shardLocations {
+ locations = append(locations, &master_pb.Location{
+ Url: string(dn.Id()),
+ PublicUrl: dn.PublicUrl,
+ })
+ }
+ resp.ShardIdLocations = append(resp.ShardIdLocations, &master_pb.LookupEcVolumeResponse_EcShardIdLocation{
+ ShardId: uint32(shardId),
+ Locations: locations,
+ })
+ }
+
+ return resp, nil
+}
+
+func (ms *MasterServer) GetMasterConfiguration(ctx context.Context, req *master_pb.GetMasterConfigurationRequest) (*master_pb.GetMasterConfigurationResponse, error) {
+
+ resp := &master_pb.GetMasterConfigurationResponse{
+ MetricsAddress: ms.option.MetricsAddress,
+ MetricsIntervalSeconds: uint32(ms.option.MetricsIntervalSec),
+ }
+
+ return resp, nil
+}
diff --git a/weed/server/master_server.go b/weed/server/master_server.go
index f22925e56..9a490bb1f 100644
--- a/weed/server/master_server.go
+++ b/weed/server/master_server.go
@@ -5,27 +5,52 @@ import (
"net/http"
"net/http/httputil"
"net/url"
+ "os"
+ "regexp"
+ "strconv"
+ "strings"
"sync"
+ "time"
"github.com/chrislusf/raft"
+ "github.com/gorilla/mux"
+ "google.golang.org/grpc"
+
"github.com/chrislusf/seaweedfs/weed/glog"
"github.com/chrislusf/seaweedfs/weed/pb/master_pb"
"github.com/chrislusf/seaweedfs/weed/security"
"github.com/chrislusf/seaweedfs/weed/sequence"
+ "github.com/chrislusf/seaweedfs/weed/shell"
"github.com/chrislusf/seaweedfs/weed/topology"
"github.com/chrislusf/seaweedfs/weed/util"
- "github.com/gorilla/mux"
+ "github.com/chrislusf/seaweedfs/weed/wdclient"
)
+const (
+ SequencerType = "master.sequencer.type"
+ SequencerEtcdUrls = "master.sequencer.sequencer_etcd_urls"
+)
+
+type MasterOption struct {
+ Host string
+ Port int
+ MetaFolder string
+ VolumeSizeLimitMB uint
+ VolumePreallocate bool
+ // PulseSeconds int
+ DefaultReplicaPlacement string
+ GarbageThreshold float64
+ WhiteList []string
+ DisableHttp bool
+ MetricsAddress string
+ MetricsIntervalSec int
+}
+
type MasterServer struct {
- port int
- metaFolder string
- volumeSizeLimitMB uint
- preallocate int64
- pulseSeconds int
- defaultReplicaPlacement string
- garbageThreshold float64
- guard *security.Guard
+ option *MasterOption
+ guard *security.Guard
+
+ preallocateSize int64
Topo *topology.Topology
vg *topology.VolumeGrowth
@@ -36,56 +61,77 @@ type MasterServer struct {
// notifying clients
clientChansLock sync.RWMutex
clientChans map[string]chan *master_pb.VolumeLocation
+
+ grpcDialOption grpc.DialOption
+
+ MasterClient *wdclient.MasterClient
+
+ adminLocks *AdminLocks
}
-func NewMasterServer(r *mux.Router, port int, metaFolder string,
- volumeSizeLimitMB uint,
- preallocate bool,
- pulseSeconds int,
- defaultReplicaPlacement string,
- garbageThreshold float64,
- whiteList []string,
- secureKey string,
-) *MasterServer {
+func NewMasterServer(r *mux.Router, option *MasterOption, peers []string) *MasterServer {
+
+ v := util.GetViper()
+ signingKey := v.GetString("jwt.signing.key")
+ v.SetDefault("jwt.signing.expires_after_seconds", 10)
+ expiresAfterSec := v.GetInt("jwt.signing.expires_after_seconds")
+
+ readSigningKey := v.GetString("jwt.signing.read.key")
+ v.SetDefault("jwt.signing.read.expires_after_seconds", 60)
+ readExpiresAfterSec := v.GetInt("jwt.signing.read.expires_after_seconds")
+
+ v.SetDefault("master.replication.treat_replication_as_minimums", false)
+ replicationAsMin := v.GetBool("master.replication.treat_replication_as_minimums")
var preallocateSize int64
- if preallocate {
- preallocateSize = int64(volumeSizeLimitMB) * (1 << 20)
+ if option.VolumePreallocate {
+ preallocateSize = int64(option.VolumeSizeLimitMB) * (1 << 20)
}
+
+ grpcDialOption := security.LoadClientTLS(v, "grpc.master")
ms := &MasterServer{
- port: port,
- volumeSizeLimitMB: volumeSizeLimitMB,
- preallocate: preallocateSize,
- pulseSeconds: pulseSeconds,
- defaultReplicaPlacement: defaultReplicaPlacement,
- garbageThreshold: garbageThreshold,
- clientChans: make(map[string]chan *master_pb.VolumeLocation),
+ option: option,
+ preallocateSize: preallocateSize,
+ clientChans: make(map[string]chan *master_pb.VolumeLocation),
+ grpcDialOption: grpcDialOption,
+ MasterClient: wdclient.NewMasterClient(grpcDialOption, "master", option.Host, 0, peers),
+ adminLocks: NewAdminLocks(),
}
ms.bounedLeaderChan = make(chan int, 16)
- seq := sequence.NewMemorySequencer()
- ms.Topo = topology.NewTopology("topo", seq, uint64(volumeSizeLimitMB)*1024*1024, pulseSeconds)
+
+ seq := ms.createSequencer(option)
+ if nil == seq {
+ glog.Fatalf("create sequencer failed.")
+ }
+ ms.Topo = topology.NewTopology("topo", seq, uint64(ms.option.VolumeSizeLimitMB)*1024*1024, 5, replicationAsMin)
ms.vg = topology.NewDefaultVolumeGrowth()
- glog.V(0).Infoln("Volume Size Limit is", volumeSizeLimitMB, "MB")
-
- ms.guard = security.NewGuard(whiteList, secureKey)
-
- handleStaticResources2(r)
- r.HandleFunc("/", ms.uiStatusHandler)
- r.HandleFunc("/ui/index.html", ms.uiStatusHandler)
- r.HandleFunc("/dir/assign", ms.proxyToLeader(ms.guard.WhiteList(ms.dirAssignHandler)))
- r.HandleFunc("/dir/lookup", ms.proxyToLeader(ms.guard.WhiteList(ms.dirLookupHandler)))
- r.HandleFunc("/dir/status", ms.proxyToLeader(ms.guard.WhiteList(ms.dirStatusHandler)))
- r.HandleFunc("/col/delete", ms.proxyToLeader(ms.guard.WhiteList(ms.collectionDeleteHandler)))
- r.HandleFunc("/vol/grow", ms.proxyToLeader(ms.guard.WhiteList(ms.volumeGrowHandler)))
- r.HandleFunc("/vol/status", ms.proxyToLeader(ms.guard.WhiteList(ms.volumeStatusHandler)))
- r.HandleFunc("/vol/vacuum", ms.proxyToLeader(ms.guard.WhiteList(ms.volumeVacuumHandler)))
- r.HandleFunc("/submit", ms.guard.WhiteList(ms.submitFromMasterServerHandler))
- r.HandleFunc("/stats/health", ms.guard.WhiteList(statsHealthHandler))
- r.HandleFunc("/stats/counter", ms.guard.WhiteList(statsCounterHandler))
- r.HandleFunc("/stats/memory", ms.guard.WhiteList(statsMemoryHandler))
- r.HandleFunc("/{fileId}", ms.proxyToLeader(ms.redirectHandler))
-
- ms.Topo.StartRefreshWritableVolumes(garbageThreshold, ms.preallocate)
+ glog.V(0).Infoln("Volume Size Limit is", ms.option.VolumeSizeLimitMB, "MB")
+
+ ms.guard = security.NewGuard(ms.option.WhiteList, signingKey, expiresAfterSec, readSigningKey, readExpiresAfterSec)
+
+ if !ms.option.DisableHttp {
+ handleStaticResources2(r)
+ r.HandleFunc("/", ms.proxyToLeader(ms.uiStatusHandler))
+ r.HandleFunc("/ui/index.html", ms.uiStatusHandler)
+ r.HandleFunc("/dir/assign", ms.proxyToLeader(ms.guard.WhiteList(ms.dirAssignHandler)))
+ r.HandleFunc("/dir/lookup", ms.guard.WhiteList(ms.dirLookupHandler))
+ r.HandleFunc("/dir/status", ms.proxyToLeader(ms.guard.WhiteList(ms.dirStatusHandler)))
+ r.HandleFunc("/col/delete", ms.proxyToLeader(ms.guard.WhiteList(ms.collectionDeleteHandler)))
+ r.HandleFunc("/vol/grow", ms.proxyToLeader(ms.guard.WhiteList(ms.volumeGrowHandler)))
+ r.HandleFunc("/vol/status", ms.proxyToLeader(ms.guard.WhiteList(ms.volumeStatusHandler)))
+ r.HandleFunc("/vol/vacuum", ms.proxyToLeader(ms.guard.WhiteList(ms.volumeVacuumHandler)))
+ r.HandleFunc("/submit", ms.guard.WhiteList(ms.submitFromMasterServerHandler))
+ /*
+ r.HandleFunc("/stats/health", ms.guard.WhiteList(statsHealthHandler))
+ r.HandleFunc("/stats/counter", ms.guard.WhiteList(statsCounterHandler))
+ r.HandleFunc("/stats/memory", ms.guard.WhiteList(statsMemoryHandler))
+ */
+ r.HandleFunc("/{fileId}", ms.redirectHandler)
+ }
+
+ ms.Topo.StartRefreshWritableVolumes(ms.grpcDialOption, ms.option.GarbageThreshold, ms.preallocateSize)
+
+ ms.startAdminScripts()
return ms
}
@@ -98,6 +144,9 @@ func (ms *MasterServer) SetRaftServer(raftServer *RaftServer) {
glog.V(0).Infoln("[", ms.Topo.RaftServer.Name(), "]", ms.Topo.RaftServer.Leader(), "becomes leader.")
}
})
+ ms.Topo.RaftServer.AddEventListener(raft.StateChangeEventType, func(e raft.Event) {
+ glog.V(0).Infof("state change: %+v", e)
+ })
if ms.Topo.IsLeader() {
glog.V(0).Infoln("[", ms.Topo.RaftServer.Name(), "]", "I am the leader!")
} else {
@@ -107,7 +156,7 @@ func (ms *MasterServer) SetRaftServer(raftServer *RaftServer) {
}
}
-func (ms *MasterServer) proxyToLeader(f func(w http.ResponseWriter, r *http.Request)) func(w http.ResponseWriter, r *http.Request) {
+func (ms *MasterServer) proxyToLeader(f http.HandlerFunc) http.HandlerFunc {
return func(w http.ResponseWriter, r *http.Request) {
if ms.Topo.IsLeader() {
f(w, r)
@@ -133,8 +182,107 @@ func (ms *MasterServer) proxyToLeader(f func(w http.ResponseWriter, r *http.Requ
proxy.Transport = util.Transport
proxy.ServeHTTP(w, r)
} else {
- //drop it to the floor
- //writeJsonError(w, r, errors.New(ms.Topo.RaftServer.Name()+" does not know Leader yet:"+ms.Topo.RaftServer.Leader()))
+ // drop it to the floor
+ // writeJsonError(w, r, errors.New(ms.Topo.RaftServer.Name()+" does not know Leader yet:"+ms.Topo.RaftServer.Leader()))
+ }
+ }
+}
+
+func (ms *MasterServer) startAdminScripts() {
+ var err error
+
+ v := util.GetViper()
+ adminScripts := v.GetString("master.maintenance.scripts")
+ glog.V(0).Infof("adminScripts:\n%v", adminScripts)
+ if adminScripts == "" {
+ return
+ }
+
+ v.SetDefault("master.maintenance.sleep_minutes", 17)
+ sleepMinutes := v.GetInt("master.maintenance.sleep_minutes")
+
+ v.SetDefault("master.filer.default", "localhost:8888")
+ filerHostPort := v.GetString("master.filer.default")
+
+ scriptLines := strings.Split(adminScripts, "\n")
+ if !strings.Contains(adminScripts, "lock") {
+ scriptLines = append(append([]string{}, "lock"), scriptLines...)
+ scriptLines = append(scriptLines, "unlock")
+ }
+
+ masterAddress := "localhost:" + strconv.Itoa(ms.option.Port)
+
+ var shellOptions shell.ShellOptions
+ shellOptions.GrpcDialOption = security.LoadClientTLS(v, "grpc.master")
+ shellOptions.Masters = &masterAddress
+
+ shellOptions.FilerHost, shellOptions.FilerPort, err = util.ParseHostPort(filerHostPort)
+ shellOptions.Directory = "/"
+ if err != nil {
+ glog.V(0).Infof("failed to parse master.filer.default = %s : %v\n", filerHostPort, err)
+ return
+ }
+
+ commandEnv := shell.NewCommandEnv(shellOptions)
+
+ reg, _ := regexp.Compile(`'.*?'|".*?"|\S+`)
+
+ go commandEnv.MasterClient.KeepConnectedToMaster()
+
+ go func() {
+ commandEnv.MasterClient.WaitUntilConnected()
+
+ c := time.Tick(time.Duration(sleepMinutes) * time.Minute)
+ for range c {
+ if ms.Topo.IsLeader() {
+ for _, line := range scriptLines {
+ for _, c := range strings.Split(line, ";") {
+ processEachCmd(reg, c, commandEnv)
+ }
+ }
+ }
+ }
+ }()
+}
+
+func processEachCmd(reg *regexp.Regexp, line string, commandEnv *shell.CommandEnv) {
+ cmds := reg.FindAllString(line, -1)
+ if len(cmds) == 0 {
+ return
+ }
+ args := make([]string, len(cmds[1:]))
+ for i := range args {
+ args[i] = strings.Trim(string(cmds[1+i]), "\"'")
+ }
+ cmd := strings.ToLower(cmds[0])
+
+ for _, c := range shell.Commands {
+ if c.Name() == cmd {
+ glog.V(0).Infof("executing: %s %v", cmd, args)
+ if err := c.Do(args, commandEnv, os.Stdout); err != nil {
+ glog.V(0).Infof("error: %v", err)
+ }
+ }
+ }
+}
+
+func (ms *MasterServer) createSequencer(option *MasterOption) sequence.Sequencer {
+ var seq sequence.Sequencer
+ v := util.GetViper()
+ seqType := strings.ToLower(v.GetString(SequencerType))
+ glog.V(1).Infof("[%s] : [%s]", SequencerType, seqType)
+ switch strings.ToLower(seqType) {
+ case "etcd":
+ var err error
+ urls := v.GetString(SequencerEtcdUrls)
+ glog.V(0).Infof("[%s] : [%s]", SequencerEtcdUrls, urls)
+ seq, err = sequence.NewEtcdSequencer(urls, option.MetaFolder)
+ if err != nil {
+ glog.Error(err)
+ seq = nil
}
+ default:
+ seq = sequence.NewMemorySequencer()
}
+ return seq
}
diff --git a/weed/server/master_server_handlers.go b/weed/server/master_server_handlers.go
index a797dddfc..ebcb7efd2 100644
--- a/weed/server/master_server_handlers.go
+++ b/weed/server/master_server_handlers.go
@@ -7,8 +7,9 @@ import (
"strings"
"github.com/chrislusf/seaweedfs/weed/operation"
+ "github.com/chrislusf/seaweedfs/weed/security"
"github.com/chrislusf/seaweedfs/weed/stats"
- "github.com/chrislusf/seaweedfs/weed/storage"
+ "github.com/chrislusf/seaweedfs/weed/storage/needle"
)
func (ms *MasterServer) lookupVolumeId(vids []string, collection string) (volumeLocations map[string]operation.LookupResult) {
@@ -21,43 +22,77 @@ func (ms *MasterServer) lookupVolumeId(vids []string, collection string) (volume
if _, ok := volumeLocations[vid]; ok {
continue
}
- volumeId, err := storage.NewVolumeId(vid)
- if err == nil {
- machines := ms.Topo.Lookup(collection, volumeId)
- if machines != nil {
- var ret []operation.Location
- for _, dn := range machines {
- ret = append(ret, operation.Location{Url: dn.Url(), PublicUrl: dn.PublicUrl})
- }
- volumeLocations[vid] = operation.LookupResult{VolumeId: vid, Locations: ret}
- } else {
- volumeLocations[vid] = operation.LookupResult{VolumeId: vid, Error: fmt.Sprintf("volumeId %s not found.", vid)}
- }
- } else {
- volumeLocations[vid] = operation.LookupResult{VolumeId: vid, Error: fmt.Sprintf("Unknown volumeId format: %s", vid)}
- }
+ volumeLocations[vid] = ms.findVolumeLocation(collection, vid)
}
return
}
-// Takes one volumeId only, can not do batch lookup
+// If "fileId" is provided, this returns the fileId location and a JWT to update or delete the file.
+// If "volumeId" is provided, this only returns the volumeId location
func (ms *MasterServer) dirLookupHandler(w http.ResponseWriter, r *http.Request) {
vid := r.FormValue("volumeId")
- commaSep := strings.Index(vid, ",")
- if commaSep > 0 {
- vid = vid[0:commaSep]
+ if vid != "" {
+ // backward compatible
+ commaSep := strings.Index(vid, ",")
+ if commaSep > 0 {
+ vid = vid[0:commaSep]
+ }
}
- vids := []string{vid}
- collection := r.FormValue("collection") //optional, but can be faster if too many collections
- volumeLocations := ms.lookupVolumeId(vids, collection)
- location := volumeLocations[vid]
+ fileId := r.FormValue("fileId")
+ if fileId != "" {
+ commaSep := strings.Index(fileId, ",")
+ if commaSep > 0 {
+ vid = fileId[0:commaSep]
+ }
+ }
+ collection := r.FormValue("collection") // optional, but can be faster if too many collections
+ location := ms.findVolumeLocation(collection, vid)
httpStatus := http.StatusOK
- if location.Error != "" {
+ if location.Error != "" || location.Locations == nil {
httpStatus = http.StatusNotFound
+ } else {
+ forRead := r.FormValue("read")
+ isRead := forRead == "yes"
+ ms.maybeAddJwtAuthorization(w, fileId, !isRead)
}
writeJsonQuiet(w, r, httpStatus, location)
}
+// findVolumeLocation finds the volume location from master topo if it is leader,
+// or from master client if not leader
+func (ms *MasterServer) findVolumeLocation(collection, vid string) operation.LookupResult {
+ var locations []operation.Location
+ var err error
+ if ms.Topo.IsLeader() {
+ volumeId, newVolumeIdErr := needle.NewVolumeId(vid)
+ if newVolumeIdErr != nil {
+ err = fmt.Errorf("Unknown volume id %s", vid)
+ } else {
+ machines := ms.Topo.Lookup(collection, volumeId)
+ for _, loc := range machines {
+ locations = append(locations, operation.Location{Url: loc.Url(), PublicUrl: loc.PublicUrl})
+ }
+ }
+ } else {
+ machines, getVidLocationsErr := ms.MasterClient.GetVidLocations(vid)
+ for _, loc := range machines {
+ locations = append(locations, operation.Location{Url: loc.Url, PublicUrl: loc.PublicUrl})
+ }
+ err = getVidLocationsErr
+ }
+ if len(locations) == 0 && err == nil {
+ err = fmt.Errorf("volume id %s not found", vid)
+ }
+ ret := operation.LookupResult{
+ VolumeId: vid,
+ Locations: locations,
+ }
+ if err != nil {
+ ret.Error = err.Error()
+ }
+ return ret
+}
+
func (ms *MasterServer) dirAssignHandler(w http.ResponseWriter, r *http.Request) {
stats.AssignRequest()
requestedCount, e := strconv.ParseUint(r.FormValue("count"), 10, 64)
@@ -65,6 +100,11 @@ func (ms *MasterServer) dirAssignHandler(w http.ResponseWriter, r *http.Request)
requestedCount = 1
}
+ writableVolumeCount, e := strconv.Atoi(r.FormValue("writableVolumeCount"))
+ if e != nil {
+ writableVolumeCount = 0
+ }
+
option, err := ms.getVolumeGrowOption(r)
if err != nil {
writeJsonQuiet(w, r, http.StatusNotAcceptable, operation.AssignResult{Error: err.Error()})
@@ -79,7 +119,7 @@ func (ms *MasterServer) dirAssignHandler(w http.ResponseWriter, r *http.Request)
ms.vgLock.Lock()
defer ms.vgLock.Unlock()
if !ms.Topo.HasWritableVolume(option) {
- if _, err = ms.vg.AutomaticGrowByType(option, ms.Topo); err != nil {
+ if _, err = ms.vg.AutomaticGrowByType(option, ms.grpcDialOption, ms.Topo, writableVolumeCount); err != nil {
writeJsonError(w, r, http.StatusInternalServerError,
fmt.Errorf("Cannot grow volume group! %v", err))
return
@@ -88,8 +128,23 @@ func (ms *MasterServer) dirAssignHandler(w http.ResponseWriter, r *http.Request)
}
fid, count, dn, err := ms.Topo.PickForWrite(requestedCount, option)
if err == nil {
+ ms.maybeAddJwtAuthorization(w, fid, true)
writeJsonQuiet(w, r, http.StatusOK, operation.AssignResult{Fid: fid, Url: dn.Url(), PublicUrl: dn.PublicUrl, Count: count})
} else {
writeJsonQuiet(w, r, http.StatusNotAcceptable, operation.AssignResult{Error: err.Error()})
}
}
+
+func (ms *MasterServer) maybeAddJwtAuthorization(w http.ResponseWriter, fileId string, isWrite bool) {
+ var encodedJwt security.EncodedJwt
+ if isWrite {
+ encodedJwt = security.GenJwt(ms.guard.SigningKey, ms.guard.ExpiresAfterSec, fileId)
+ } else {
+ encodedJwt = security.GenJwt(ms.guard.ReadSigningKey, ms.guard.ReadExpiresAfterSec, fileId)
+ }
+ if encodedJwt == "" {
+ return
+ }
+
+ w.Header().Set("Authorization", "BEARER "+string(encodedJwt))
+}
diff --git a/weed/server/master_server_handlers_admin.go b/weed/server/master_server_handlers_admin.go
index 3a2662908..7595c0171 100644
--- a/weed/server/master_server_handlers_admin.go
+++ b/weed/server/master_server_handlers_admin.go
@@ -2,33 +2,31 @@ package weed_server
import (
"context"
- "errors"
"fmt"
"math/rand"
"net/http"
"strconv"
- "time"
"github.com/chrislusf/seaweedfs/weed/glog"
"github.com/chrislusf/seaweedfs/weed/operation"
"github.com/chrislusf/seaweedfs/weed/pb/volume_server_pb"
- "github.com/chrislusf/seaweedfs/weed/storage"
+ "github.com/chrislusf/seaweedfs/weed/storage/backend/memory_map"
+ "github.com/chrislusf/seaweedfs/weed/storage/needle"
+ "github.com/chrislusf/seaweedfs/weed/storage/super_block"
"github.com/chrislusf/seaweedfs/weed/topology"
"github.com/chrislusf/seaweedfs/weed/util"
)
func (ms *MasterServer) collectionDeleteHandler(w http.ResponseWriter, r *http.Request) {
- collection, ok := ms.Topo.FindCollection(r.FormValue("collection"))
+ collectionName := r.FormValue("collection")
+ collection, ok := ms.Topo.FindCollection(collectionName)
if !ok {
- writeJsonError(w, r, http.StatusBadRequest, fmt.Errorf("collection %s does not exist", r.FormValue("collection")))
+ writeJsonError(w, r, http.StatusBadRequest, fmt.Errorf("collection %s does not exist", collectionName))
return
}
for _, server := range collection.ListVolumeServers() {
- err := operation.WithVolumeServerClient(server.Url(), func(client volume_server_pb.VolumeServerClient) error {
- ctx, cancel := context.WithTimeout(context.Background(), time.Duration(5*time.Second))
- defer cancel()
-
- _, deleteErr := client.DeleteCollection(ctx, &volume_server_pb.DeleteCollectionRequest{
+ err := operation.WithVolumeServerClient(server.Url(), ms.grpcDialOption, func(client volume_server_pb.VolumeServerClient) error {
+ _, deleteErr := client.DeleteCollection(context.Background(), &volume_server_pb.DeleteCollectionRequest{
Collection: collection.Name,
})
return deleteErr
@@ -38,29 +36,33 @@ func (ms *MasterServer) collectionDeleteHandler(w http.ResponseWriter, r *http.R
return
}
}
- ms.Topo.DeleteCollection(r.FormValue("collection"))
+ ms.Topo.DeleteCollection(collectionName)
+
+ w.WriteHeader(http.StatusNoContent)
+ return
}
func (ms *MasterServer) dirStatusHandler(w http.ResponseWriter, r *http.Request) {
m := make(map[string]interface{})
- m["Version"] = util.VERSION
+ m["Version"] = util.Version()
m["Topology"] = ms.Topo.ToMap()
writeJsonQuiet(w, r, http.StatusOK, m)
}
func (ms *MasterServer) volumeVacuumHandler(w http.ResponseWriter, r *http.Request) {
gcString := r.FormValue("garbageThreshold")
- gcThreshold := ms.garbageThreshold
+ gcThreshold := ms.option.GarbageThreshold
if gcString != "" {
var err error
gcThreshold, err = strconv.ParseFloat(gcString, 32)
if err != nil {
glog.V(0).Infof("garbageThreshold %s is not a valid float number: %v", gcString, err)
+ writeJsonError(w, r, http.StatusNotAcceptable, fmt.Errorf("garbageThreshold %s is not a valid float number", gcString))
return
}
}
- glog.Infoln("garbageThreshold =", gcThreshold)
- ms.Topo.Vacuum(gcThreshold, ms.preallocate)
+ // glog.Infoln("garbageThreshold =", gcThreshold)
+ ms.Topo.Vacuum(ms.grpcDialOption, gcThreshold, ms.preallocateSize)
ms.dirStatusHandler(w, r)
}
@@ -71,17 +73,17 @@ func (ms *MasterServer) volumeGrowHandler(w http.ResponseWriter, r *http.Request
writeJsonError(w, r, http.StatusNotAcceptable, err)
return
}
- if err == nil {
- if count, err = strconv.Atoi(r.FormValue("count")); err == nil {
- if ms.Topo.FreeSpace() < count*option.ReplicaPlacement.GetCopyCount() {
- err = errors.New("Only " + strconv.Itoa(ms.Topo.FreeSpace()) + " volumes left! Not enough for " + strconv.Itoa(count*option.ReplicaPlacement.GetCopyCount()))
- } else {
- count, err = ms.vg.GrowByCountAndType(count, option, ms.Topo)
- }
+
+ if count, err = strconv.Atoi(r.FormValue("count")); err == nil {
+ if ms.Topo.FreeSpace() < int64(count*option.ReplicaPlacement.GetCopyCount()) {
+ err = fmt.Errorf("only %d volumes left, not enough for %d", ms.Topo.FreeSpace(), count*option.ReplicaPlacement.GetCopyCount())
} else {
- err = errors.New("parameter count is not found")
+ count, err = ms.vg.GrowByCountAndType(ms.grpcDialOption, count, option, ms.Topo)
}
+ } else {
+ err = fmt.Errorf("can not parse parameter count %s", r.FormValue("count"))
}
+
if err != nil {
writeJsonError(w, r, http.StatusNotAcceptable, err)
} else {
@@ -91,30 +93,26 @@ func (ms *MasterServer) volumeGrowHandler(w http.ResponseWriter, r *http.Request
func (ms *MasterServer) volumeStatusHandler(w http.ResponseWriter, r *http.Request) {
m := make(map[string]interface{})
- m["Version"] = util.VERSION
+ m["Version"] = util.Version()
m["Volumes"] = ms.Topo.ToVolumeMap()
writeJsonQuiet(w, r, http.StatusOK, m)
}
func (ms *MasterServer) redirectHandler(w http.ResponseWriter, r *http.Request) {
vid, _, _, _, _ := parseURLPath(r.URL.Path)
- volumeId, err := storage.NewVolumeId(vid)
- if err != nil {
- debug("parsing error:", err, r.URL.Path)
- return
- }
collection := r.FormValue("collection")
- machines := ms.Topo.Lookup(collection, volumeId)
- if machines != nil && len(machines) > 0 {
+ location := ms.findVolumeLocation(collection, vid)
+ if location.Error == "" {
+ loc := location.Locations[rand.Intn(len(location.Locations))]
var url string
if r.URL.RawQuery != "" {
- url = util.NormalizeUrl(machines[rand.Intn(len(machines))].PublicUrl) + r.URL.Path + "?" + r.URL.RawQuery
+ url = util.NormalizeUrl(loc.PublicUrl) + r.URL.Path + "?" + r.URL.RawQuery
} else {
- url = util.NormalizeUrl(machines[rand.Intn(len(machines))].PublicUrl) + r.URL.Path
+ url = util.NormalizeUrl(loc.PublicUrl) + r.URL.Path
}
http.Redirect(w, r, url, http.StatusMovedPermanently)
} else {
- writeJsonError(w, r, http.StatusNotFound, fmt.Errorf("volume id %d or collection %s not found", volumeId, collection))
+ writeJsonError(w, r, http.StatusNotFound, fmt.Errorf("volume id %s not found: %s", vid, location.Error))
}
}
@@ -122,17 +120,17 @@ func (ms *MasterServer) selfUrl(r *http.Request) string {
if r.Host != "" {
return r.Host
}
- return "localhost:" + strconv.Itoa(ms.port)
+ return "localhost:" + strconv.Itoa(ms.option.Port)
}
func (ms *MasterServer) submitFromMasterServerHandler(w http.ResponseWriter, r *http.Request) {
if ms.Topo.IsLeader() {
- submitForClientHandler(w, r, ms.selfUrl(r))
+ submitForClientHandler(w, r, ms.selfUrl(r), ms.grpcDialOption)
} else {
masterUrl, err := ms.Topo.Leader()
if err != nil {
writeJsonError(w, r, http.StatusInternalServerError, err)
} else {
- submitForClientHandler(w, r, masterUrl)
+ submitForClientHandler(w, r, masterUrl, ms.grpcDialOption)
}
}
}
@@ -145,17 +143,22 @@ func (ms *MasterServer) HasWritableVolume(option *topology.VolumeGrowOption) boo
func (ms *MasterServer) getVolumeGrowOption(r *http.Request) (*topology.VolumeGrowOption, error) {
replicationString := r.FormValue("replication")
if replicationString == "" {
- replicationString = ms.defaultReplicaPlacement
+ replicationString = ms.option.DefaultReplicaPlacement
+ }
+ replicaPlacement, err := super_block.NewReplicaPlacementFromString(replicationString)
+ if err != nil {
+ return nil, err
}
- replicaPlacement, err := storage.NewReplicaPlacementFromString(replicationString)
+ ttl, err := needle.ReadTTL(r.FormValue("ttl"))
if err != nil {
return nil, err
}
- ttl, err := storage.ReadTTL(r.FormValue("ttl"))
+ memoryMapMaxSizeMb, err := memory_map.ReadMemoryMapMaxSizeMb(r.FormValue("memoryMapMaxSizeMb"))
if err != nil {
return nil, err
}
- preallocate := ms.preallocate
+
+ preallocate := ms.preallocateSize
if r.FormValue("preallocate") != "" {
preallocate, err = strconv.ParseInt(r.FormValue("preallocate"), 10, 64)
if err != nil {
@@ -163,13 +166,14 @@ func (ms *MasterServer) getVolumeGrowOption(r *http.Request) (*topology.VolumeGr
}
}
volumeGrowOption := &topology.VolumeGrowOption{
- Collection: r.FormValue("collection"),
- ReplicaPlacement: replicaPlacement,
- Ttl: ttl,
- Prealloacte: preallocate,
- DataCenter: r.FormValue("dataCenter"),
- Rack: r.FormValue("rack"),
- DataNode: r.FormValue("dataNode"),
+ Collection: r.FormValue("collection"),
+ ReplicaPlacement: replicaPlacement,
+ Ttl: ttl,
+ Prealloacte: preallocate,
+ DataCenter: r.FormValue("dataCenter"),
+ Rack: r.FormValue("rack"),
+ DataNode: r.FormValue("dataNode"),
+ MemoryMapMaxSizeMb: memoryMapMaxSizeMb,
}
return volumeGrowOption, nil
}
diff --git a/weed/server/master_server_handlers_ui.go b/weed/server/master_server_handlers_ui.go
index f241df87f..9cd58158b 100644
--- a/weed/server/master_server_handlers_ui.go
+++ b/weed/server/master_server_handlers_ui.go
@@ -2,6 +2,7 @@ package weed_server
import (
"net/http"
+ "time"
"github.com/chrislusf/raft"
ui "github.com/chrislusf/seaweedfs/weed/server/master_ui"
@@ -11,7 +12,7 @@ import (
func (ms *MasterServer) uiStatusHandler(w http.ResponseWriter, r *http.Request) {
infos := make(map[string]interface{})
- infos["Version"] = util.VERSION
+ infos["Up Time"] = time.Now().Sub(startTime).String()
args := struct {
Version string
Topology interface{}
@@ -19,7 +20,7 @@ func (ms *MasterServer) uiStatusHandler(w http.ResponseWriter, r *http.Request)
Stats map[string]interface{}
Counters *stats.ServerStats
}{
- util.VERSION,
+ util.Version(),
ms.Topo.ToMap(),
ms.Topo.RaftServer,
infos,
diff --git a/weed/server/master_ui/templates.go b/weed/server/master_ui/templates.go
index f32e8e61b..7189064d0 100644
--- a/weed/server/master_ui/templates.go
+++ b/weed/server/master_ui/templates.go
@@ -41,7 +41,7 @@ var StatusTpl = template.Must(template.New("status").Parse(`
Other Masters:
@@ -76,6 +76,8 @@ var StatusTpl = template.Must(template.New("status").Parse(`
Rack
RemoteAddr
#Volumes
+
Volume Ids
+
#ErasureCodingShards
Max
@@ -88,6 +90,8 @@ var StatusTpl = template.Must(template.New("status").Parse(`
{{ $rack.Id }}
{{ $dn.Url }}
{{ $dn.Volumes }}
+
{{ $dn.VolumeIds}}
+
{{ $dn.EcShards }}
{{ $dn.Max }}
{{ end }}
diff --git a/weed/server/raft_server.go b/weed/server/raft_server.go
index 2cc8252b8..958680d2b 100644
--- a/weed/server/raft_server.go
+++ b/weed/server/raft_server.go
@@ -3,36 +3,37 @@ package weed_server
import (
"encoding/json"
"io/ioutil"
- "math/rand"
"os"
"path"
"reflect"
"sort"
- "strings"
"time"
+ "google.golang.org/grpc"
+
+ "github.com/chrislusf/seaweedfs/weed/pb"
+
"github.com/chrislusf/raft"
+
"github.com/chrislusf/seaweedfs/weed/glog"
"github.com/chrislusf/seaweedfs/weed/topology"
- "github.com/gorilla/mux"
)
type RaftServer struct {
peers []string // initial peers to join with
raftServer raft.Server
dataDir string
- httpAddr string
- router *mux.Router
+ serverAddr string
topo *topology.Topology
+ *raft.GrpcServer
}
-func NewRaftServer(r *mux.Router, peers []string, httpAddr string, dataDir string, topo *topology.Topology, pulseSeconds int) *RaftServer {
+func NewRaftServer(grpcDialOption grpc.DialOption, peers []string, serverAddr, dataDir string, topo *topology.Topology, pulseSeconds int) *RaftServer {
s := &RaftServer{
- peers: peers,
- httpAddr: httpAddr,
- dataDir: dataDir,
- router: r,
- topo: topo,
+ peers: peers,
+ serverAddr: serverAddr,
+ dataDir: dataDir,
+ topo: topo,
}
if glog.V(4) {
@@ -42,41 +43,40 @@ func NewRaftServer(r *mux.Router, peers []string, httpAddr string, dataDir strin
raft.RegisterCommand(&topology.MaxVolumeIdCommand{})
var err error
- transporter := raft.NewHTTPTransporter("/cluster", time.Second)
- transporter.Transport.MaxIdleConnsPerHost = 1024
- glog.V(0).Infof("Starting RaftServer with %v", httpAddr)
+ transporter := raft.NewGrpcTransporter(grpcDialOption)
+ glog.V(0).Infof("Starting RaftServer with %v", serverAddr)
+ // always clear previous metadata
+ os.RemoveAll(path.Join(s.dataDir, "conf"))
+ os.RemoveAll(path.Join(s.dataDir, "log"))
+ os.RemoveAll(path.Join(s.dataDir, "snapshot"))
// Clear old cluster configurations if peers are changed
- if oldPeers, changed := isPeersChanged(s.dataDir, httpAddr, s.peers); changed {
+ if oldPeers, changed := isPeersChanged(s.dataDir, serverAddr, s.peers); changed {
glog.V(0).Infof("Peers Change: %v => %v", oldPeers, s.peers)
- os.RemoveAll(path.Join(s.dataDir, "conf"))
- os.RemoveAll(path.Join(s.dataDir, "log"))
- os.RemoveAll(path.Join(s.dataDir, "snapshot"))
}
- s.raftServer, err = raft.NewServer(s.httpAddr, s.dataDir, transporter, nil, topo, "")
+ s.raftServer, err = raft.NewServer(s.serverAddr, s.dataDir, transporter, nil, topo, "")
if err != nil {
glog.V(0).Infoln(err)
return nil
}
- transporter.Install(s.raftServer, s)
s.raftServer.SetHeartbeatInterval(500 * time.Millisecond)
s.raftServer.SetElectionTimeout(time.Duration(pulseSeconds) * 500 * time.Millisecond)
s.raftServer.Start()
- s.router.HandleFunc("/cluster/status", s.statusHandler).Methods("GET")
-
for _, peer := range s.peers {
- s.raftServer.AddPeer(peer, "http://"+peer)
+ s.raftServer.AddPeer(peer, pb.ServerToGrpcAddress(peer))
}
- time.Sleep(time.Duration(1000+rand.Int31n(3000)) * time.Millisecond)
- if s.raftServer.IsLogEmpty() {
+
+ s.GrpcServer = raft.NewGrpcServer(s.raftServer)
+
+ if s.raftServer.IsLogEmpty() && isTheFirstOne(serverAddr, s.peers) {
// Initialize the server by joining itself.
glog.V(0).Infoln("Initializing new cluster")
_, err := s.raftServer.Do(&raft.DefaultJoinCommand{
Name: s.raftServer.Name(),
- ConnectionString: "http://" + s.httpAddr,
+ ConnectionString: pb.ServerToGrpcAddress(s.serverAddr),
})
if err != nil {
@@ -94,7 +94,7 @@ func (s *RaftServer) Peers() (members []string) {
peers := s.raftServer.Peers()
for _, p := range peers {
- members = append(members, strings.TrimPrefix(p.ConnectionString, "http://"))
+ members = append(members, p.Name)
}
return
@@ -113,7 +113,7 @@ func isPeersChanged(dir string, self string, peers []string) (oldPeers []string,
}
for _, p := range conf.Peers {
- oldPeers = append(oldPeers, strings.TrimPrefix(p.ConnectionString, "http://"))
+ oldPeers = append(oldPeers, p.Name)
}
oldPeers = append(oldPeers, self)
@@ -127,3 +127,11 @@ func isPeersChanged(dir string, self string, peers []string) (oldPeers []string,
return oldPeers, !reflect.DeepEqual(peers, oldPeers)
}
+
+func isTheFirstOne(self string, peers []string) bool {
+ sort.Strings(peers)
+ if len(peers) <= 0 {
+ return true
+ }
+ return self == peers[0]
+}
diff --git a/weed/server/raft_server_handlers.go b/weed/server/raft_server_handlers.go
index 627fe354e..fd38cb977 100644
--- a/weed/server/raft_server_handlers.go
+++ b/weed/server/raft_server_handlers.go
@@ -1,16 +1,17 @@
package weed_server
import (
- "github.com/chrislusf/seaweedfs/weed/operation"
"net/http"
)
-func (s *RaftServer) HandleFunc(pattern string, handler func(http.ResponseWriter, *http.Request)) {
- s.router.HandleFunc(pattern, handler)
+type ClusterStatusResult struct {
+ IsLeader bool `json:"IsLeader,omitempty"`
+ Leader string `json:"Leader,omitempty"`
+ Peers []string `json:"Peers,omitempty"`
}
-func (s *RaftServer) statusHandler(w http.ResponseWriter, r *http.Request) {
- ret := operation.ClusterStatusResult{
+func (s *RaftServer) StatusHandler(w http.ResponseWriter, r *http.Request) {
+ ret := ClusterStatusResult{
IsLeader: s.topo.IsLeader(),
Peers: s.Peers(),
}
diff --git a/weed/server/volume_grpc_admin.go b/weed/server/volume_grpc_admin.go
index 429ca9b68..27b21ac09 100644
--- a/weed/server/volume_grpc_admin.go
+++ b/weed/server/volume_grpc_admin.go
@@ -2,10 +2,14 @@ package weed_server
import (
"context"
+ "fmt"
+ "path/filepath"
"github.com/chrislusf/seaweedfs/weed/glog"
"github.com/chrislusf/seaweedfs/weed/pb/volume_server_pb"
- "github.com/chrislusf/seaweedfs/weed/storage"
+ "github.com/chrislusf/seaweedfs/weed/stats"
+ "github.com/chrislusf/seaweedfs/weed/storage/needle"
+ "github.com/chrislusf/seaweedfs/weed/storage/super_block"
)
func (vs *VolumeServer) DeleteCollection(ctx context.Context, req *volume_server_pb.DeleteCollectionRequest) (*volume_server_pb.DeleteCollectionResponse, error) {
@@ -24,17 +28,18 @@ func (vs *VolumeServer) DeleteCollection(ctx context.Context, req *volume_server
}
-func (vs *VolumeServer) AssignVolume(ctx context.Context, req *volume_server_pb.AssignVolumeRequest) (*volume_server_pb.AssignVolumeResponse, error) {
+func (vs *VolumeServer) AllocateVolume(ctx context.Context, req *volume_server_pb.AllocateVolumeRequest) (*volume_server_pb.AllocateVolumeResponse, error) {
- resp := &volume_server_pb.AssignVolumeResponse{}
+ resp := &volume_server_pb.AllocateVolumeResponse{}
err := vs.store.AddVolume(
- storage.VolumeId(req.VolumdId),
+ needle.VolumeId(req.VolumeId),
req.Collection,
vs.needleMapKind,
req.Replication,
req.Ttl,
req.Preallocate,
+ req.MemoryMapMaxSizeMb,
)
if err != nil {
@@ -51,7 +56,7 @@ func (vs *VolumeServer) VolumeMount(ctx context.Context, req *volume_server_pb.V
resp := &volume_server_pb.VolumeMountResponse{}
- err := vs.store.MountVolume(storage.VolumeId(req.VolumdId))
+ err := vs.store.MountVolume(needle.VolumeId(req.VolumeId))
if err != nil {
glog.Errorf("volume mount %v: %v", req, err)
@@ -67,7 +72,7 @@ func (vs *VolumeServer) VolumeUnmount(ctx context.Context, req *volume_server_pb
resp := &volume_server_pb.VolumeUnmountResponse{}
- err := vs.store.UnmountVolume(storage.VolumeId(req.VolumdId))
+ err := vs.store.UnmountVolume(needle.VolumeId(req.VolumeId))
if err != nil {
glog.Errorf("volume unmount %v: %v", req, err)
@@ -83,7 +88,7 @@ func (vs *VolumeServer) VolumeDelete(ctx context.Context, req *volume_server_pb.
resp := &volume_server_pb.VolumeDeleteResponse{}
- err := vs.store.DeleteVolume(storage.VolumeId(req.VolumdId))
+ err := vs.store.DeleteVolume(needle.VolumeId(req.VolumeId))
if err != nil {
glog.Errorf("volume delete %v: %v", req, err)
@@ -94,3 +99,70 @@ func (vs *VolumeServer) VolumeDelete(ctx context.Context, req *volume_server_pb.
return resp, err
}
+
+func (vs *VolumeServer) VolumeConfigure(ctx context.Context, req *volume_server_pb.VolumeConfigureRequest) (*volume_server_pb.VolumeConfigureResponse, error) {
+
+ resp := &volume_server_pb.VolumeConfigureResponse{}
+
+ // check replication format
+ if _, err := super_block.NewReplicaPlacementFromString(req.Replication); err != nil {
+ resp.Error = fmt.Sprintf("volume configure replication %v: %v", req, err)
+ return resp, nil
+ }
+
+ // unmount
+ if err := vs.store.UnmountVolume(needle.VolumeId(req.VolumeId)); err != nil {
+ glog.Errorf("volume configure unmount %v: %v", req, err)
+ resp.Error = fmt.Sprintf("volume configure unmount %v: %v", req, err)
+ return resp, nil
+ }
+
+ // modify the volume info file
+ if err := vs.store.ConfigureVolume(needle.VolumeId(req.VolumeId), req.Replication); err != nil {
+ glog.Errorf("volume configure %v: %v", req, err)
+ resp.Error = fmt.Sprintf("volume configure %v: %v", req, err)
+ return resp, nil
+ }
+
+ // mount
+ if err := vs.store.MountVolume(needle.VolumeId(req.VolumeId)); err != nil {
+ glog.Errorf("volume configure mount %v: %v", req, err)
+ resp.Error = fmt.Sprintf("volume configure mount %v: %v", req, err)
+ return resp, nil
+ }
+
+ return resp, nil
+
+}
+
+func (vs *VolumeServer) VolumeMarkReadonly(ctx context.Context, req *volume_server_pb.VolumeMarkReadonlyRequest) (*volume_server_pb.VolumeMarkReadonlyResponse, error) {
+
+ resp := &volume_server_pb.VolumeMarkReadonlyResponse{}
+
+ err := vs.store.MarkVolumeReadonly(needle.VolumeId(req.VolumeId))
+
+ if err != nil {
+ glog.Errorf("volume mark readonly %v: %v", req, err)
+ } else {
+ glog.V(2).Infof("volume mark readonly %v", req)
+ }
+
+ return resp, err
+
+}
+
+func (vs *VolumeServer) VolumeServerStatus(ctx context.Context, req *volume_server_pb.VolumeServerStatusRequest) (*volume_server_pb.VolumeServerStatusResponse, error) {
+
+ resp := &volume_server_pb.VolumeServerStatusResponse{}
+
+ for _, loc := range vs.store.Locations {
+ if dir, e := filepath.Abs(loc.Directory); e == nil {
+ resp.DiskStatuses = append(resp.DiskStatuses, stats.NewDiskStatus(dir))
+ }
+ }
+
+ resp.MemoryStatus = stats.MemStat()
+
+ return resp, nil
+
+}
diff --git a/weed/server/volume_grpc_batch_delete.go b/weed/server/volume_grpc_batch_delete.go
index 3554d97ae..501964191 100644
--- a/weed/server/volume_grpc_batch_delete.go
+++ b/weed/server/volume_grpc_batch_delete.go
@@ -7,7 +7,8 @@ import (
"github.com/chrislusf/seaweedfs/weed/operation"
"github.com/chrislusf/seaweedfs/weed/pb/volume_server_pb"
- "github.com/chrislusf/seaweedfs/weed/storage"
+ "github.com/chrislusf/seaweedfs/weed/storage/needle"
+ "github.com/chrislusf/seaweedfs/weed/storage/types"
)
func (vs *VolumeServer) BatchDelete(ctx context.Context, req *volume_server_pb.BatchDeleteRequest) (*volume_server_pb.BatchDeleteResponse, error) {
@@ -26,18 +27,36 @@ func (vs *VolumeServer) BatchDelete(ctx context.Context, req *volume_server_pb.B
continue
}
- n := new(storage.Needle)
- volumeId, _ := storage.NewVolumeId(vid)
- n.ParsePath(id_cookie)
-
- cookie := n.Cookie
- if _, err := vs.store.ReadVolumeNeedle(volumeId, n); err != nil {
- resp.Results = append(resp.Results, &volume_server_pb.DeleteResult{
- FileId: fid,
- Status: http.StatusNotFound,
- Error: err.Error(),
- })
- continue
+ n := new(needle.Needle)
+ volumeId, _ := needle.NewVolumeId(vid)
+ if req.SkipCookieCheck {
+ n.Id, err = types.ParseNeedleId(id_cookie)
+ if err != nil {
+ resp.Results = append(resp.Results, &volume_server_pb.DeleteResult{
+ FileId: fid,
+ Status: http.StatusBadRequest,
+ Error: err.Error()})
+ continue
+ }
+ } else {
+ n.ParsePath(id_cookie)
+ cookie := n.Cookie
+ if _, err := vs.store.ReadVolumeNeedle(volumeId, n); err != nil {
+ resp.Results = append(resp.Results, &volume_server_pb.DeleteResult{
+ FileId: fid,
+ Status: http.StatusNotFound,
+ Error: err.Error(),
+ })
+ continue
+ }
+ if n.Cookie != cookie {
+ resp.Results = append(resp.Results, &volume_server_pb.DeleteResult{
+ FileId: fid,
+ Status: http.StatusBadRequest,
+ Error: "File Random Cookie does not match.",
+ })
+ break
+ }
}
if n.IsChunkedManifest() {
@@ -49,16 +68,8 @@ func (vs *VolumeServer) BatchDelete(ctx context.Context, req *volume_server_pb.B
continue
}
- if n.Cookie != cookie {
- resp.Results = append(resp.Results, &volume_server_pb.DeleteResult{
- FileId: fid,
- Status: http.StatusBadRequest,
- Error: "File Random Cookie does not match.",
- })
- break
- }
n.LastModified = now
- if size, err := vs.store.Delete(volumeId, n); err != nil {
+ if size, err := vs.store.DeleteVolumeNeedle(volumeId, n); err != nil {
resp.Results = append(resp.Results, &volume_server_pb.DeleteResult{
FileId: fid,
Status: http.StatusInternalServerError,
diff --git a/weed/server/volume_grpc_client_to_master.go b/weed/server/volume_grpc_client_to_master.go
index bd3ffd7b3..7cb836344 100644
--- a/weed/server/volume_grpc_client_to_master.go
+++ b/weed/server/volume_grpc_client_to_master.go
@@ -2,13 +2,21 @@ package weed_server
import (
"fmt"
+ "net"
"time"
+ "google.golang.org/grpc"
+
+ "github.com/chrislusf/seaweedfs/weed/pb"
+ "github.com/chrislusf/seaweedfs/weed/security"
+ "github.com/chrislusf/seaweedfs/weed/storage/backend"
+ "github.com/chrislusf/seaweedfs/weed/storage/erasure_coding"
+
+ "golang.org/x/net/context"
+
"github.com/chrislusf/seaweedfs/weed/glog"
"github.com/chrislusf/seaweedfs/weed/pb/master_pb"
- "github.com/chrislusf/seaweedfs/weed/security"
"github.com/chrislusf/seaweedfs/weed/util"
- "golang.org/x/net/context"
)
func (vs *VolumeServer) GetMaster() string {
@@ -16,34 +24,42 @@ func (vs *VolumeServer) GetMaster() string {
}
func (vs *VolumeServer) heartbeat() {
- glog.V(0).Infof("Volume server start with masters: %v", vs.MasterNodes)
+ glog.V(0).Infof("Volume server start with seed master nodes: %v", vs.SeedMasterNodes)
vs.store.SetDataCenter(vs.dataCenter)
vs.store.SetRack(vs.rack)
+ grpcDialOption := security.LoadClientTLS(util.GetViper(), "grpc.volume")
+
var err error
var newLeader string
for {
- for _, master := range vs.MasterNodes {
+ for _, master := range vs.SeedMasterNodes {
if newLeader != "" {
+ // the new leader may actually is the same master
+ // need to wait a bit before adding itself
+ time.Sleep(3 * time.Second)
master = newLeader
}
- masterGrpcAddress, parseErr := util.ParseServerToGrpcAddress(master, 0)
+ masterGrpcAddress, parseErr := pb.ParseServerToGrpcAddress(master)
if parseErr != nil {
- glog.V(0).Infof("failed to parse master grpc %v", masterGrpcAddress)
+ glog.V(0).Infof("failed to parse master grpc %v: %v", masterGrpcAddress, parseErr)
continue
}
- newLeader, err = vs.doHeartbeat(master, masterGrpcAddress, time.Duration(vs.pulseSeconds)*time.Second)
+ vs.store.MasterAddress = master
+ newLeader, err = vs.doHeartbeat(master, masterGrpcAddress, grpcDialOption, time.Duration(vs.pulseSeconds)*time.Second)
if err != nil {
glog.V(0).Infof("heartbeat error: %v", err)
time.Sleep(time.Duration(vs.pulseSeconds) * time.Second)
+ newLeader = ""
+ vs.store.MasterAddress = ""
}
}
}
}
-func (vs *VolumeServer) doHeartbeat(masterNode, masterGrpcAddress string, sleepInterval time.Duration) (newLeader string, err error) {
+func (vs *VolumeServer) doHeartbeat(masterNode, masterGrpcAddress string, grpcDialOption grpc.DialOption, sleepInterval time.Duration) (newLeader string, err error) {
- grpcConection, err := util.GrpcDial(masterGrpcAddress)
+ grpcConection, err := pb.GrpcDial(context.Background(), masterGrpcAddress, grpcDialOption)
if err != nil {
return "", fmt.Errorf("fail to dial %s : %v", masterNode, err)
}
@@ -58,9 +74,6 @@ func (vs *VolumeServer) doHeartbeat(masterNode, masterGrpcAddress string, sleepI
glog.V(0).Infof("Heartbeat to: %v", masterNode)
vs.currentMaster = masterNode
- vs.store.Client = stream
- defer func() { vs.store.Client = nil }()
-
doneChan := make(chan error, 1)
go func() {
@@ -70,18 +83,27 @@ func (vs *VolumeServer) doHeartbeat(masterNode, masterGrpcAddress string, sleepI
doneChan <- err
return
}
- if in.GetVolumeSizeLimit() != 0 {
- vs.store.VolumeSizeLimit = in.GetVolumeSizeLimit()
- }
- if in.GetSecretKey() != "" {
- vs.guard.SecretKey = security.Secret(in.GetSecretKey())
+ if in.GetVolumeSizeLimit() != 0 && vs.store.GetVolumeSizeLimit() != in.GetVolumeSizeLimit() {
+ vs.store.SetVolumeSizeLimit(in.GetVolumeSizeLimit())
+ if vs.store.MaybeAdjustVolumeMax() {
+ if err = stream.Send(vs.store.CollectHeartbeat()); err != nil {
+ glog.V(0).Infof("Volume Server Failed to talk with master %s: %v", masterNode, err)
+ }
+ }
}
- if in.GetLeader() != "" && masterNode != in.GetLeader() {
+ if in.GetLeader() != "" && masterNode != in.GetLeader() && !isSameIP(in.GetLeader(), masterNode) {
glog.V(0).Infof("Volume Server found a new master newLeader: %v instead of %v", in.GetLeader(), masterNode)
newLeader = in.GetLeader()
doneChan <- nil
return
}
+ if in.GetMetricsAddress() != "" && vs.MetricsAddress != in.GetMetricsAddress() {
+ vs.MetricsAddress = in.GetMetricsAddress()
+ vs.MetricsIntervalSec = int(in.GetMetricsIntervalSeconds())
+ }
+ if len(in.StorageBackends) > 0 {
+ backend.LoadFromPbStorageBackends(in.StorageBackends)
+ }
}
}()
@@ -90,33 +112,89 @@ func (vs *VolumeServer) doHeartbeat(masterNode, masterGrpcAddress string, sleepI
return "", err
}
- tickChan := time.Tick(sleepInterval)
+ if err = stream.Send(vs.store.CollectErasureCodingHeartbeat()); err != nil {
+ glog.V(0).Infof("Volume Server Failed to talk with master %s: %v", masterNode, err)
+ return "", err
+ }
+
+ volumeTickChan := time.Tick(sleepInterval)
+ ecShardTickChan := time.Tick(17 * sleepInterval)
for {
select {
- case vid := <-vs.store.NewVolumeIdChan:
+ case volumeMessage := <-vs.store.NewVolumesChan:
deltaBeat := &master_pb.Heartbeat{
- NewVids: []uint32{uint32(vid)},
+ NewVolumes: []*master_pb.VolumeShortInformationMessage{
+ &volumeMessage,
+ },
}
+ glog.V(1).Infof("volume server %s:%d adds volume %d", vs.store.Ip, vs.store.Port, volumeMessage.Id)
if err = stream.Send(deltaBeat); err != nil {
glog.V(0).Infof("Volume Server Failed to update to master %s: %v", masterNode, err)
return "", err
}
- case vid := <-vs.store.DeletedVolumeIdChan:
+ case ecShardMessage := <-vs.store.NewEcShardsChan:
deltaBeat := &master_pb.Heartbeat{
- DeletedVids: []uint32{uint32(vid)},
+ NewEcShards: []*master_pb.VolumeEcShardInformationMessage{
+ &ecShardMessage,
+ },
}
+ glog.V(1).Infof("volume server %s:%d adds ec shard %d:%d", vs.store.Ip, vs.store.Port, ecShardMessage.Id,
+ erasure_coding.ShardBits(ecShardMessage.EcIndexBits).ShardIds())
if err = stream.Send(deltaBeat); err != nil {
glog.V(0).Infof("Volume Server Failed to update to master %s: %v", masterNode, err)
return "", err
}
- case <-tickChan:
+ case volumeMessage := <-vs.store.DeletedVolumesChan:
+ deltaBeat := &master_pb.Heartbeat{
+ DeletedVolumes: []*master_pb.VolumeShortInformationMessage{
+ &volumeMessage,
+ },
+ }
+ glog.V(1).Infof("volume server %s:%d deletes volume %d", vs.store.Ip, vs.store.Port, volumeMessage.Id)
+ if err = stream.Send(deltaBeat); err != nil {
+ glog.V(0).Infof("Volume Server Failed to update to master %s: %v", masterNode, err)
+ return "", err
+ }
+ case ecShardMessage := <-vs.store.DeletedEcShardsChan:
+ deltaBeat := &master_pb.Heartbeat{
+ DeletedEcShards: []*master_pb.VolumeEcShardInformationMessage{
+ &ecShardMessage,
+ },
+ }
+ glog.V(1).Infof("volume server %s:%d deletes ec shard %d:%d", vs.store.Ip, vs.store.Port, ecShardMessage.Id,
+ erasure_coding.ShardBits(ecShardMessage.EcIndexBits).ShardIds())
+ if err = stream.Send(deltaBeat); err != nil {
+ glog.V(0).Infof("Volume Server Failed to update to master %s: %v", masterNode, err)
+ return "", err
+ }
+ case <-volumeTickChan:
+ glog.V(4).Infof("volume server %s:%d heartbeat", vs.store.Ip, vs.store.Port)
if err = stream.Send(vs.store.CollectHeartbeat()); err != nil {
glog.V(0).Infof("Volume Server Failed to talk with master %s: %v", masterNode, err)
return "", err
}
+ case <-ecShardTickChan:
+ glog.V(4).Infof("volume server %s:%d ec heartbeat", vs.store.Ip, vs.store.Port)
+ if err = stream.Send(vs.store.CollectErasureCodingHeartbeat()); err != nil {
+ glog.V(0).Infof("Volume Server Failed to talk with master %s: %v", masterNode, err)
+ return "", err
+ }
case err = <-doneChan:
return
}
}
}
+
+func isSameIP(ip string, host string) bool {
+ ips, err := net.LookupIP(host)
+ if err != nil {
+ return false
+ }
+ for _, t := range ips {
+ if ip == t.String() {
+ return true
+ }
+ }
+ return false
+}
diff --git a/weed/server/volume_grpc_copy.go b/weed/server/volume_grpc_copy.go
new file mode 100644
index 000000000..5c7d5572c
--- /dev/null
+++ b/weed/server/volume_grpc_copy.go
@@ -0,0 +1,286 @@
+package weed_server
+
+import (
+ "context"
+ "fmt"
+ "io"
+ "math"
+ "os"
+ "time"
+
+ "github.com/chrislusf/seaweedfs/weed/glog"
+ "github.com/chrislusf/seaweedfs/weed/operation"
+ "github.com/chrislusf/seaweedfs/weed/pb/volume_server_pb"
+ "github.com/chrislusf/seaweedfs/weed/storage"
+ "github.com/chrislusf/seaweedfs/weed/storage/erasure_coding"
+ "github.com/chrislusf/seaweedfs/weed/storage/needle"
+ "github.com/chrislusf/seaweedfs/weed/util"
+)
+
+const BufferSizeLimit = 1024 * 1024 * 2
+
+// VolumeCopy copy the .idx .dat .vif files, and mount the volume
+func (vs *VolumeServer) VolumeCopy(ctx context.Context, req *volume_server_pb.VolumeCopyRequest) (*volume_server_pb.VolumeCopyResponse, error) {
+
+ v := vs.store.GetVolume(needle.VolumeId(req.VolumeId))
+ if v != nil {
+
+ glog.V(0).Infof("volume %d already exists. deleted before copying...", req.VolumeId)
+
+ err := vs.store.UnmountVolume(needle.VolumeId(req.VolumeId))
+ if err != nil {
+ return nil, fmt.Errorf("failed to mount existing volume %d: %v", req.VolumeId, err)
+ }
+
+ err = vs.store.DeleteVolume(needle.VolumeId(req.VolumeId))
+ if err != nil {
+ return nil, fmt.Errorf("failed to delete existing volume %d: %v", req.VolumeId, err)
+ }
+
+ glog.V(0).Infof("deleted exisitng volume %d before copying.", req.VolumeId)
+ }
+
+ location := vs.store.FindFreeLocation()
+ if location == nil {
+ return nil, fmt.Errorf("no space left")
+ }
+
+ // the master will not start compaction for read-only volumes, so it is safe to just copy files directly
+ // copy .dat and .idx files
+ // read .idx .dat file size and timestamp
+ // send .idx file
+ // send .dat file
+ // confirm size and timestamp
+ var volFileInfoResp *volume_server_pb.ReadVolumeFileStatusResponse
+ var volumeFileName, idxFileName, datFileName string
+ err := operation.WithVolumeServerClient(req.SourceDataNode, vs.grpcDialOption, func(client volume_server_pb.VolumeServerClient) error {
+ var err error
+ volFileInfoResp, err = client.ReadVolumeFileStatus(context.Background(),
+ &volume_server_pb.ReadVolumeFileStatusRequest{
+ VolumeId: req.VolumeId,
+ })
+ if nil != err {
+ return fmt.Errorf("read volume file status failed, %v", err)
+ }
+
+ volumeFileName = storage.VolumeFileName(location.Directory, volFileInfoResp.Collection, int(req.VolumeId))
+
+ // println("source:", volFileInfoResp.String())
+ // copy ecx file
+ if err := vs.doCopyFile(client, false, req.Collection, req.VolumeId, volFileInfoResp.CompactionRevision, volFileInfoResp.IdxFileSize, volumeFileName, ".idx", false, false); err != nil {
+ return err
+ }
+
+ if err := vs.doCopyFile(client, false, req.Collection, req.VolumeId, volFileInfoResp.CompactionRevision, volFileInfoResp.DatFileSize, volumeFileName, ".dat", false, true); err != nil {
+ return err
+ }
+
+ if err := vs.doCopyFile(client, false, req.Collection, req.VolumeId, volFileInfoResp.CompactionRevision, volFileInfoResp.DatFileSize, volumeFileName, ".vif", false, true); err != nil {
+ return err
+ }
+
+ return nil
+ })
+
+ if err != nil {
+ return nil, err
+ }
+ if volumeFileName == "" {
+ return nil, fmt.Errorf("not found volume %d file", req.VolumeId)
+ }
+
+ idxFileName = volumeFileName + ".idx"
+ datFileName = volumeFileName + ".dat"
+
+ defer func() {
+ if err != nil && volumeFileName != "" {
+ os.Remove(idxFileName)
+ os.Remove(datFileName)
+ os.Remove(volumeFileName + ".vif")
+ }
+ }()
+
+ if err = checkCopyFiles(volFileInfoResp, idxFileName, datFileName); err != nil { // added by panyc16
+ return nil, err
+ }
+
+ // mount the volume
+ err = vs.store.MountVolume(needle.VolumeId(req.VolumeId))
+ if err != nil {
+ return nil, fmt.Errorf("failed to mount volume %d: %v", req.VolumeId, err)
+ }
+
+ return &volume_server_pb.VolumeCopyResponse{
+ LastAppendAtNs: volFileInfoResp.DatFileTimestampSeconds * uint64(time.Second),
+ }, err
+}
+
+func (vs *VolumeServer) doCopyFile(client volume_server_pb.VolumeServerClient, isEcVolume bool, collection string, vid, compactRevision uint32, stopOffset uint64, baseFileName, ext string, isAppend, ignoreSourceFileNotFound bool) error {
+
+ copyFileClient, err := client.CopyFile(context.Background(), &volume_server_pb.CopyFileRequest{
+ VolumeId: vid,
+ Ext: ext,
+ CompactionRevision: compactRevision,
+ StopOffset: stopOffset,
+ Collection: collection,
+ IsEcVolume: isEcVolume,
+ IgnoreSourceFileNotFound: ignoreSourceFileNotFound,
+ })
+ if err != nil {
+ return fmt.Errorf("failed to start copying volume %d %s file: %v", vid, ext, err)
+ }
+
+ err = writeToFile(copyFileClient, baseFileName+ext, util.NewWriteThrottler(vs.compactionBytePerSecond), isAppend)
+ if err != nil {
+ return fmt.Errorf("failed to copy %s file: %v", baseFileName+ext, err)
+ }
+
+ return nil
+
+}
+
+/**
+only check the the differ of the file size
+todo: maybe should check the received count and deleted count of the volume
+*/
+func checkCopyFiles(originFileInf *volume_server_pb.ReadVolumeFileStatusResponse, idxFileName, datFileName string) error {
+ stat, err := os.Stat(idxFileName)
+ if err != nil {
+ return fmt.Errorf("stat idx file %s failed, %v", idxFileName, err)
+ }
+ if originFileInf.IdxFileSize != uint64(stat.Size()) {
+ return fmt.Errorf("idx file %s size [%v] is not same as origin file size [%v]",
+ idxFileName, stat.Size(), originFileInf.IdxFileSize)
+ }
+
+ stat, err = os.Stat(datFileName)
+ if err != nil {
+ return fmt.Errorf("get dat file info failed, %v", err)
+ }
+ if originFileInf.DatFileSize != uint64(stat.Size()) {
+ return fmt.Errorf("the dat file size [%v] is not same as origin file size [%v]",
+ stat.Size(), originFileInf.DatFileSize)
+ }
+ return nil
+}
+
+func writeToFile(client volume_server_pb.VolumeServer_CopyFileClient, fileName string, wt *util.WriteThrottler, isAppend bool) error {
+ glog.V(4).Infof("writing to %s", fileName)
+ flags := os.O_WRONLY | os.O_CREATE | os.O_TRUNC
+ if isAppend {
+ flags = os.O_WRONLY | os.O_CREATE
+ }
+ dst, err := os.OpenFile(fileName, flags, 0644)
+ if err != nil {
+ return nil
+ }
+ defer dst.Close()
+
+ for {
+ resp, receiveErr := client.Recv()
+ if receiveErr == io.EOF {
+ break
+ }
+ if receiveErr != nil {
+ return fmt.Errorf("receiving %s: %v", fileName, receiveErr)
+ }
+ dst.Write(resp.FileContent)
+ wt.MaybeSlowdown(int64(len(resp.FileContent)))
+ }
+ return nil
+}
+
+func (vs *VolumeServer) ReadVolumeFileStatus(ctx context.Context, req *volume_server_pb.ReadVolumeFileStatusRequest) (*volume_server_pb.ReadVolumeFileStatusResponse, error) {
+ resp := &volume_server_pb.ReadVolumeFileStatusResponse{}
+ v := vs.store.GetVolume(needle.VolumeId(req.VolumeId))
+ if v == nil {
+ return nil, fmt.Errorf("not found volume id %d", req.VolumeId)
+ }
+
+ resp.VolumeId = req.VolumeId
+ datSize, idxSize, modTime := v.FileStat()
+ resp.DatFileSize = datSize
+ resp.IdxFileSize = idxSize
+ resp.DatFileTimestampSeconds = uint64(modTime.Unix())
+ resp.IdxFileTimestampSeconds = uint64(modTime.Unix())
+ resp.FileCount = v.FileCount()
+ resp.CompactionRevision = uint32(v.CompactionRevision)
+ resp.Collection = v.Collection
+ return resp, nil
+}
+
+// CopyFile client pulls the volume related file from the source server.
+// if req.CompactionRevision != math.MaxUint32, it ensures the compact revision is as expected
+// The copying still stop at req.StopOffset, but you can set it to math.MaxUint64 in order to read all data.
+func (vs *VolumeServer) CopyFile(req *volume_server_pb.CopyFileRequest, stream volume_server_pb.VolumeServer_CopyFileServer) error {
+
+ var fileName string
+ if !req.IsEcVolume {
+ v := vs.store.GetVolume(needle.VolumeId(req.VolumeId))
+ if v == nil {
+ return fmt.Errorf("not found volume id %d", req.VolumeId)
+ }
+
+ if uint32(v.CompactionRevision) != req.CompactionRevision && req.CompactionRevision != math.MaxUint32 {
+ return fmt.Errorf("volume %d is compacted", req.VolumeId)
+ }
+ fileName = v.FileName() + req.Ext
+ } else {
+ baseFileName := erasure_coding.EcShardBaseFileName(req.Collection, int(req.VolumeId)) + req.Ext
+ for _, location := range vs.store.Locations {
+ tName := util.Join(location.Directory, baseFileName)
+ if util.FileExists(tName) {
+ fileName = tName
+ }
+ }
+ if fileName == "" {
+ if req.IgnoreSourceFileNotFound {
+ return nil
+ }
+ return fmt.Errorf("CopyFile not found ec volume id %d", req.VolumeId)
+ }
+ }
+
+ bytesToRead := int64(req.StopOffset)
+
+ file, err := os.Open(fileName)
+ if err != nil {
+ if req.IgnoreSourceFileNotFound && err == os.ErrNotExist {
+ return nil
+ }
+ return err
+ }
+ defer file.Close()
+
+ buffer := make([]byte, BufferSizeLimit)
+
+ for bytesToRead > 0 {
+ bytesread, err := file.Read(buffer)
+
+ // println(fileName, "read", bytesread, "bytes, with target", bytesToRead)
+
+ if err != nil {
+ if err != io.EOF {
+ return err
+ }
+ // println(fileName, "read", bytesread, "bytes, with target", bytesToRead, "err", err.Error())
+ break
+ }
+
+ if int64(bytesread) > bytesToRead {
+ bytesread = int(bytesToRead)
+ }
+ err = stream.Send(&volume_server_pb.CopyFileResponse{
+ FileContent: buffer[:bytesread],
+ })
+ if err != nil {
+ // println("sending", bytesread, "bytes err", err.Error())
+ return err
+ }
+
+ bytesToRead -= int64(bytesread)
+
+ }
+
+ return nil
+}
diff --git a/weed/server/volume_grpc_copy_incremental.go b/weed/server/volume_grpc_copy_incremental.go
new file mode 100644
index 000000000..6d6c3daa3
--- /dev/null
+++ b/weed/server/volume_grpc_copy_incremental.go
@@ -0,0 +1,66 @@
+package weed_server
+
+import (
+ "context"
+ "fmt"
+ "io"
+
+ "github.com/chrislusf/seaweedfs/weed/pb/volume_server_pb"
+ "github.com/chrislusf/seaweedfs/weed/storage/backend"
+ "github.com/chrislusf/seaweedfs/weed/storage/needle"
+)
+
+func (vs *VolumeServer) VolumeIncrementalCopy(req *volume_server_pb.VolumeIncrementalCopyRequest, stream volume_server_pb.VolumeServer_VolumeIncrementalCopyServer) error {
+
+ v := vs.store.GetVolume(needle.VolumeId(req.VolumeId))
+ if v == nil {
+ return fmt.Errorf("not found volume id %d", req.VolumeId)
+ }
+
+ stopOffset, _, _ := v.FileStat()
+ foundOffset, isLastOne, err := v.BinarySearchByAppendAtNs(req.SinceNs)
+ if err != nil {
+ return fmt.Errorf("fail to locate by appendAtNs %d: %s", req.SinceNs, err)
+ }
+
+ if isLastOne {
+ return nil
+ }
+
+ startOffset := foundOffset.ToAcutalOffset()
+
+ buf := make([]byte, 1024*1024*2)
+ return sendFileContent(v.DataBackend, buf, startOffset, int64(stopOffset), stream)
+
+}
+
+func (vs *VolumeServer) VolumeSyncStatus(ctx context.Context, req *volume_server_pb.VolumeSyncStatusRequest) (*volume_server_pb.VolumeSyncStatusResponse, error) {
+
+ v := vs.store.GetVolume(needle.VolumeId(req.VolumeId))
+ if v == nil {
+ return nil, fmt.Errorf("not found volume id %d", req.VolumeId)
+ }
+
+ resp := v.GetVolumeSyncStatus()
+
+ return resp, nil
+
+}
+
+func sendFileContent(datBackend backend.BackendStorageFile, buf []byte, startOffset, stopOffset int64, stream volume_server_pb.VolumeServer_VolumeIncrementalCopyServer) error {
+ var blockSizeLimit = int64(len(buf))
+ for i := int64(0); i < stopOffset-startOffset; i += blockSizeLimit {
+ n, readErr := datBackend.ReadAt(buf, startOffset+i)
+ if readErr == nil || readErr == io.EOF {
+ resp := &volume_server_pb.VolumeIncrementalCopyResponse{}
+ resp.FileContent = buf[:int64(n)]
+ sendErr := stream.Send(resp)
+ if sendErr != nil {
+ return sendErr
+ }
+ } else {
+ return readErr
+ }
+ }
+ return nil
+}
diff --git a/weed/server/volume_grpc_erasure_coding.go b/weed/server/volume_grpc_erasure_coding.go
new file mode 100644
index 000000000..79348c9d7
--- /dev/null
+++ b/weed/server/volume_grpc_erasure_coding.go
@@ -0,0 +1,391 @@
+package weed_server
+
+import (
+ "context"
+ "fmt"
+ "io"
+ "io/ioutil"
+ "math"
+ "os"
+ "path"
+ "path/filepath"
+ "strings"
+
+ "github.com/chrislusf/seaweedfs/weed/glog"
+ "github.com/chrislusf/seaweedfs/weed/operation"
+ "github.com/chrislusf/seaweedfs/weed/pb"
+ "github.com/chrislusf/seaweedfs/weed/pb/volume_server_pb"
+ "github.com/chrislusf/seaweedfs/weed/storage"
+ "github.com/chrislusf/seaweedfs/weed/storage/erasure_coding"
+ "github.com/chrislusf/seaweedfs/weed/storage/needle"
+ "github.com/chrislusf/seaweedfs/weed/storage/types"
+ "github.com/chrislusf/seaweedfs/weed/util"
+)
+
+/*
+
+Steps to apply erasure coding to .dat .idx files
+0. ensure the volume is readonly
+1. client call VolumeEcShardsGenerate to generate the .ecx and .ec00 ~ .ec13 files
+2. client ask master for possible servers to hold the ec files, at least 4 servers
+3. client call VolumeEcShardsCopy on above target servers to copy ec files from the source server
+4. target servers report the new ec files to the master
+5. master stores vid -> [14]*DataNode
+6. client checks master. If all 14 slices are ready, delete the original .idx, .idx files
+
+*/
+
+// VolumeEcShardsGenerate generates the .ecx and .ec00 ~ .ec13 files
+func (vs *VolumeServer) VolumeEcShardsGenerate(ctx context.Context, req *volume_server_pb.VolumeEcShardsGenerateRequest) (*volume_server_pb.VolumeEcShardsGenerateResponse, error) {
+
+ glog.V(0).Infof("VolumeEcShardsGenerate: %v", req)
+
+ v := vs.store.GetVolume(needle.VolumeId(req.VolumeId))
+ if v == nil {
+ return nil, fmt.Errorf("volume %d not found", req.VolumeId)
+ }
+ baseFileName := v.FileName()
+
+ if v.Collection != req.Collection {
+ return nil, fmt.Errorf("existing collection:%v unexpected input: %v", v.Collection, req.Collection)
+ }
+
+ // write .ec00 ~ .ec13 files
+ if err := erasure_coding.WriteEcFiles(baseFileName); err != nil {
+ return nil, fmt.Errorf("WriteEcFiles %s: %v", baseFileName, err)
+ }
+
+ // write .ecx file
+ if err := erasure_coding.WriteSortedFileFromIdx(baseFileName, ".ecx"); err != nil {
+ return nil, fmt.Errorf("WriteSortedFileFromIdx %s: %v", baseFileName, err)
+ }
+
+ // write .vif files
+ if err := pb.SaveVolumeInfo(baseFileName+".vif", &volume_server_pb.VolumeInfo{Version: uint32(v.Version())}); err != nil {
+ return nil, fmt.Errorf("WriteEcFiles %s: %v", baseFileName, err)
+ }
+
+ return &volume_server_pb.VolumeEcShardsGenerateResponse{}, nil
+}
+
+// VolumeEcShardsRebuild generates the any of the missing .ec00 ~ .ec13 files
+func (vs *VolumeServer) VolumeEcShardsRebuild(ctx context.Context, req *volume_server_pb.VolumeEcShardsRebuildRequest) (*volume_server_pb.VolumeEcShardsRebuildResponse, error) {
+
+ glog.V(0).Infof("VolumeEcShardsRebuild: %v", req)
+
+ baseFileName := erasure_coding.EcShardBaseFileName(req.Collection, int(req.VolumeId))
+
+ var rebuiltShardIds []uint32
+
+ for _, location := range vs.store.Locations {
+ if util.FileExists(path.Join(location.Directory, baseFileName+".ecx")) {
+ // write .ec00 ~ .ec13 files
+ baseFileName = path.Join(location.Directory, baseFileName)
+ if generatedShardIds, err := erasure_coding.RebuildEcFiles(baseFileName); err != nil {
+ return nil, fmt.Errorf("RebuildEcFiles %s: %v", baseFileName, err)
+ } else {
+ rebuiltShardIds = generatedShardIds
+ }
+
+ if err := erasure_coding.RebuildEcxFile(baseFileName); err != nil {
+ return nil, fmt.Errorf("RebuildEcxFile %s: %v", baseFileName, err)
+ }
+
+ break
+ }
+ }
+
+ return &volume_server_pb.VolumeEcShardsRebuildResponse{
+ RebuiltShardIds: rebuiltShardIds,
+ }, nil
+}
+
+// VolumeEcShardsCopy copy the .ecx and some ec data slices
+func (vs *VolumeServer) VolumeEcShardsCopy(ctx context.Context, req *volume_server_pb.VolumeEcShardsCopyRequest) (*volume_server_pb.VolumeEcShardsCopyResponse, error) {
+
+ glog.V(0).Infof("VolumeEcShardsCopy: %v", req)
+
+ location := vs.store.FindFreeLocation()
+ if location == nil {
+ return nil, fmt.Errorf("no space left")
+ }
+
+ baseFileName := storage.VolumeFileName(location.Directory, req.Collection, int(req.VolumeId))
+
+ err := operation.WithVolumeServerClient(req.SourceDataNode, vs.grpcDialOption, func(client volume_server_pb.VolumeServerClient) error {
+
+ // copy ec data slices
+ for _, shardId := range req.ShardIds {
+ if err := vs.doCopyFile(client, true, req.Collection, req.VolumeId, math.MaxUint32, math.MaxInt64, baseFileName, erasure_coding.ToExt(int(shardId)), false, false); err != nil {
+ return err
+ }
+ }
+
+ if req.CopyEcxFile {
+
+ // copy ecx file
+ if err := vs.doCopyFile(client, true, req.Collection, req.VolumeId, math.MaxUint32, math.MaxInt64, baseFileName, ".ecx", false, false); err != nil {
+ return err
+ }
+ return nil
+ }
+
+ if req.CopyEcjFile {
+ // copy ecj file
+ if err := vs.doCopyFile(client, true, req.Collection, req.VolumeId, math.MaxUint32, math.MaxInt64, baseFileName, ".ecj", true, true); err != nil {
+ return err
+ }
+ }
+
+ if req.CopyVifFile {
+ // copy vif file
+ if err := vs.doCopyFile(client, true, req.Collection, req.VolumeId, math.MaxUint32, math.MaxInt64, baseFileName, ".vif", false, true); err != nil {
+ return err
+ }
+ }
+
+ return nil
+ })
+ if err != nil {
+ return nil, fmt.Errorf("VolumeEcShardsCopy volume %d: %v", req.VolumeId, err)
+ }
+
+ return &volume_server_pb.VolumeEcShardsCopyResponse{}, nil
+}
+
+// VolumeEcShardsDelete local delete the .ecx and some ec data slices if not needed
+// the shard should not be mounted before calling this.
+func (vs *VolumeServer) VolumeEcShardsDelete(ctx context.Context, req *volume_server_pb.VolumeEcShardsDeleteRequest) (*volume_server_pb.VolumeEcShardsDeleteResponse, error) {
+
+ baseFilename := erasure_coding.EcShardBaseFileName(req.Collection, int(req.VolumeId))
+
+ glog.V(0).Infof("ec volume %d shard delete %v", req.VolumeId, req.ShardIds)
+
+ found := false
+ for _, location := range vs.store.Locations {
+ if util.FileExists(path.Join(location.Directory, baseFilename+".ecx")) {
+ found = true
+ baseFilename = path.Join(location.Directory, baseFilename)
+ for _, shardId := range req.ShardIds {
+ os.Remove(baseFilename + erasure_coding.ToExt(int(shardId)))
+ }
+ break
+ }
+ }
+
+ if !found {
+ return nil, nil
+ }
+
+ // check whether to delete the .ecx and .ecj file also
+ hasEcxFile := false
+ hasIdxFile := false
+ existingShardCount := 0
+
+ bName := filepath.Base(baseFilename)
+ for _, location := range vs.store.Locations {
+ fileInfos, err := ioutil.ReadDir(location.Directory)
+ if err != nil {
+ continue
+ }
+ for _, fileInfo := range fileInfos {
+ if fileInfo.Name() == bName+".ecx" || fileInfo.Name() == bName+".ecj" {
+ hasEcxFile = true
+ continue
+ }
+ if fileInfo.Name() == bName+".idx" {
+ hasIdxFile = true
+ continue
+ }
+ if strings.HasPrefix(fileInfo.Name(), bName+".ec") {
+ existingShardCount++
+ }
+ }
+ }
+
+ if hasEcxFile && existingShardCount == 0 {
+ if err := os.Remove(baseFilename + ".ecx"); err != nil {
+ return nil, err
+ }
+ os.Remove(baseFilename + ".ecj")
+ }
+ if !hasIdxFile {
+ // .vif is used for ec volumes and normal volumes
+ os.Remove(baseFilename + ".vif")
+ }
+
+ return &volume_server_pb.VolumeEcShardsDeleteResponse{}, nil
+}
+
+func (vs *VolumeServer) VolumeEcShardsMount(ctx context.Context, req *volume_server_pb.VolumeEcShardsMountRequest) (*volume_server_pb.VolumeEcShardsMountResponse, error) {
+
+ glog.V(0).Infof("VolumeEcShardsMount: %v", req)
+
+ for _, shardId := range req.ShardIds {
+ err := vs.store.MountEcShards(req.Collection, needle.VolumeId(req.VolumeId), erasure_coding.ShardId(shardId))
+
+ if err != nil {
+ glog.Errorf("ec shard mount %v: %v", req, err)
+ } else {
+ glog.V(2).Infof("ec shard mount %v", req)
+ }
+
+ if err != nil {
+ return nil, fmt.Errorf("mount %d.%d: %v", req.VolumeId, shardId, err)
+ }
+ }
+
+ return &volume_server_pb.VolumeEcShardsMountResponse{}, nil
+}
+
+func (vs *VolumeServer) VolumeEcShardsUnmount(ctx context.Context, req *volume_server_pb.VolumeEcShardsUnmountRequest) (*volume_server_pb.VolumeEcShardsUnmountResponse, error) {
+
+ glog.V(0).Infof("VolumeEcShardsUnmount: %v", req)
+
+ for _, shardId := range req.ShardIds {
+ err := vs.store.UnmountEcShards(needle.VolumeId(req.VolumeId), erasure_coding.ShardId(shardId))
+
+ if err != nil {
+ glog.Errorf("ec shard unmount %v: %v", req, err)
+ } else {
+ glog.V(2).Infof("ec shard unmount %v", req)
+ }
+
+ if err != nil {
+ return nil, fmt.Errorf("unmount %d.%d: %v", req.VolumeId, shardId, err)
+ }
+ }
+
+ return &volume_server_pb.VolumeEcShardsUnmountResponse{}, nil
+}
+
+func (vs *VolumeServer) VolumeEcShardRead(req *volume_server_pb.VolumeEcShardReadRequest, stream volume_server_pb.VolumeServer_VolumeEcShardReadServer) error {
+
+ ecVolume, found := vs.store.FindEcVolume(needle.VolumeId(req.VolumeId))
+ if !found {
+ return fmt.Errorf("VolumeEcShardRead not found ec volume id %d", req.VolumeId)
+ }
+ ecShard, found := ecVolume.FindEcVolumeShard(erasure_coding.ShardId(req.ShardId))
+ if !found {
+ return fmt.Errorf("not found ec shard %d.%d", req.VolumeId, req.ShardId)
+ }
+
+ if req.FileKey != 0 {
+ _, size, _ := ecVolume.FindNeedleFromEcx(types.Uint64ToNeedleId(req.FileKey))
+ if size == types.TombstoneFileSize {
+ return stream.Send(&volume_server_pb.VolumeEcShardReadResponse{
+ IsDeleted: true,
+ })
+ }
+ }
+
+ bufSize := req.Size
+ if bufSize > BufferSizeLimit {
+ bufSize = BufferSizeLimit
+ }
+ buffer := make([]byte, bufSize)
+
+ startOffset, bytesToRead := req.Offset, req.Size
+
+ for bytesToRead > 0 {
+ // min of bytesToRead and bufSize
+ bufferSize := bufSize
+ if bufferSize > bytesToRead {
+ bufferSize = bytesToRead
+ }
+ bytesread, err := ecShard.ReadAt(buffer[0:bufferSize], startOffset)
+
+ // println("read", ecShard.FileName(), "startOffset", startOffset, bytesread, "bytes, with target", bufferSize)
+ if bytesread > 0 {
+
+ if int64(bytesread) > bytesToRead {
+ bytesread = int(bytesToRead)
+ }
+ err = stream.Send(&volume_server_pb.VolumeEcShardReadResponse{
+ Data: buffer[:bytesread],
+ })
+ if err != nil {
+ // println("sending", bytesread, "bytes err", err.Error())
+ return err
+ }
+
+ startOffset += int64(bytesread)
+ bytesToRead -= int64(bytesread)
+
+ }
+
+ if err != nil {
+ if err != io.EOF {
+ return err
+ }
+ return nil
+ }
+
+ }
+
+ return nil
+
+}
+
+func (vs *VolumeServer) VolumeEcBlobDelete(ctx context.Context, req *volume_server_pb.VolumeEcBlobDeleteRequest) (*volume_server_pb.VolumeEcBlobDeleteResponse, error) {
+
+ glog.V(0).Infof("VolumeEcBlobDelete: %v", req)
+
+ resp := &volume_server_pb.VolumeEcBlobDeleteResponse{}
+
+ for _, location := range vs.store.Locations {
+ if localEcVolume, found := location.FindEcVolume(needle.VolumeId(req.VolumeId)); found {
+
+ _, size, _, err := localEcVolume.LocateEcShardNeedle(types.NeedleId(req.FileKey), needle.Version(req.Version))
+ if err != nil {
+ return nil, fmt.Errorf("locate in local ec volume: %v", err)
+ }
+ if size == types.TombstoneFileSize {
+ return resp, nil
+ }
+
+ err = localEcVolume.DeleteNeedleFromEcx(types.NeedleId(req.FileKey))
+ if err != nil {
+ return nil, err
+ }
+
+ break
+ }
+ }
+
+ return resp, nil
+}
+
+// VolumeEcShardsToVolume generates the .idx, .dat files from .ecx, .ecj and .ec01 ~ .ec14 files
+func (vs *VolumeServer) VolumeEcShardsToVolume(ctx context.Context, req *volume_server_pb.VolumeEcShardsToVolumeRequest) (*volume_server_pb.VolumeEcShardsToVolumeResponse, error) {
+
+ glog.V(0).Infof("VolumeEcShardsToVolume: %v", req)
+
+ v, found := vs.store.FindEcVolume(needle.VolumeId(req.VolumeId))
+ if !found {
+ return nil, fmt.Errorf("ec volume %d not found", req.VolumeId)
+ }
+ baseFileName := v.FileName()
+
+ if v.Collection != req.Collection {
+ return nil, fmt.Errorf("existing collection:%v unexpected input: %v", v.Collection, req.Collection)
+ }
+
+ // calculate .dat file size
+ datFileSize, err := erasure_coding.FindDatFileSize(baseFileName)
+ if err != nil {
+ return nil, fmt.Errorf("FindDatFileSize %s: %v", baseFileName, err)
+ }
+
+ // write .dat file from .ec00 ~ .ec09 files
+ if err := erasure_coding.WriteDatFile(baseFileName, datFileSize); err != nil {
+ return nil, fmt.Errorf("WriteEcFiles %s: %v", baseFileName, err)
+ }
+
+ // write .idx file from .ecx and .ecj files
+ if err := erasure_coding.WriteIdxFileFromEcIndex(baseFileName); err != nil {
+ return nil, fmt.Errorf("WriteIdxFileFromEcIndex %s: %v", baseFileName, err)
+ }
+
+ return &volume_server_pb.VolumeEcShardsToVolumeResponse{}, nil
+}
diff --git a/weed/server/volume_grpc_query.go b/weed/server/volume_grpc_query.go
new file mode 100644
index 000000000..767e28e7b
--- /dev/null
+++ b/weed/server/volume_grpc_query.go
@@ -0,0 +1,69 @@
+package weed_server
+
+import (
+ "github.com/chrislusf/seaweedfs/weed/glog"
+ "github.com/chrislusf/seaweedfs/weed/operation"
+ "github.com/chrislusf/seaweedfs/weed/pb/volume_server_pb"
+ "github.com/chrislusf/seaweedfs/weed/query/json"
+ "github.com/chrislusf/seaweedfs/weed/storage/needle"
+ "github.com/tidwall/gjson"
+)
+
+func (vs *VolumeServer) Query(req *volume_server_pb.QueryRequest, stream volume_server_pb.VolumeServer_QueryServer) error {
+
+ for _, fid := range req.FromFileIds {
+
+ vid, id_cookie, err := operation.ParseFileId(fid)
+ if err != nil {
+ glog.V(0).Infof("volume query failed to parse fid %s: %v", fid, err)
+ return err
+ }
+
+ n := new(needle.Needle)
+ volumeId, _ := needle.NewVolumeId(vid)
+ n.ParsePath(id_cookie)
+
+ cookie := n.Cookie
+ if _, err := vs.store.ReadVolumeNeedle(volumeId, n); err != nil {
+ glog.V(0).Infof("volume query failed to read fid %s: %v", fid, err)
+ return err
+ }
+
+ if n.Cookie != cookie {
+ glog.V(0).Infof("volume query failed to read fid cookie %s: %v", fid, err)
+ return err
+ }
+
+ if req.InputSerialization.CsvInput != nil {
+
+ }
+
+ if req.InputSerialization.JsonInput != nil {
+
+ stripe := &volume_server_pb.QueriedStripe{
+ Records: nil,
+ }
+
+ filter := json.Query{
+ Field: req.Filter.Field,
+ Op: req.Filter.Operand,
+ Value: req.Filter.Value,
+ }
+ gjson.ForEachLine(string(n.Data), func(line gjson.Result) bool {
+ passedFilter, values := json.QueryJson(line.Raw, req.Selections, filter)
+ if !passedFilter {
+ return true
+ }
+ stripe.Records = json.ToJson(stripe.Records, req.Selections, values)
+ return true
+ })
+ err = stream.Send(stripe)
+ if err != nil {
+ return err
+ }
+ }
+
+ }
+
+ return nil
+}
diff --git a/weed/server/volume_grpc_sync.go b/weed/server/volume_grpc_sync.go
deleted file mode 100644
index 5f56ec17d..000000000
--- a/weed/server/volume_grpc_sync.go
+++ /dev/null
@@ -1,101 +0,0 @@
-package weed_server
-
-import (
- "context"
- "fmt"
-
- "github.com/chrislusf/seaweedfs/weed/glog"
- "github.com/chrislusf/seaweedfs/weed/pb/volume_server_pb"
- "github.com/chrislusf/seaweedfs/weed/storage"
- "github.com/chrislusf/seaweedfs/weed/storage/types"
-)
-
-func (vs *VolumeServer) VolumeSyncStatus(ctx context.Context, req *volume_server_pb.VolumeSyncStatusRequest) (*volume_server_pb.VolumeSyncStatusResponse, error) {
-
- v := vs.store.GetVolume(storage.VolumeId(req.VolumdId))
- if v == nil {
- return nil, fmt.Errorf("Not Found Volume Id %d", req.VolumdId)
- }
-
- resp := v.GetVolumeSyncStatus()
-
- glog.V(2).Infof("volume sync status %d", req.VolumdId)
-
- return resp, nil
-
-}
-
-func (vs *VolumeServer) VolumeSyncIndex(req *volume_server_pb.VolumeSyncIndexRequest, stream volume_server_pb.VolumeServer_VolumeSyncIndexServer) error {
-
- v := vs.store.GetVolume(storage.VolumeId(req.VolumdId))
- if v == nil {
- return fmt.Errorf("Not Found Volume Id %d", req.VolumdId)
- }
-
- content, err := v.IndexFileContent()
-
- if err != nil {
- glog.Errorf("sync volume %d index: %v", req.VolumdId, err)
- } else {
- glog.V(2).Infof("sync volume %d index", req.VolumdId)
- }
-
- const blockSizeLimit = 1024 * 1024 * 2
- for i := 0; i < len(content); i += blockSizeLimit {
- blockSize := len(content) - i
- if blockSize > blockSizeLimit {
- blockSize = blockSizeLimit
- }
- resp := &volume_server_pb.VolumeSyncIndexResponse{}
- resp.IndexFileContent = content[i : i+blockSize]
- stream.Send(resp)
- }
-
- return nil
-
-}
-
-func (vs *VolumeServer) VolumeSyncData(req *volume_server_pb.VolumeSyncDataRequest, stream volume_server_pb.VolumeServer_VolumeSyncDataServer) error {
-
- v := vs.store.GetVolume(storage.VolumeId(req.VolumdId))
- if v == nil {
- return fmt.Errorf("Not Found Volume Id %d", req.VolumdId)
- }
-
- if uint32(v.SuperBlock.CompactRevision) != req.Revision {
- return fmt.Errorf("Requested Volume Revision is %d, but current revision is %d", req.Revision, v.SuperBlock.CompactRevision)
- }
-
- content, err := storage.ReadNeedleBlob(v.DataFile(), int64(req.Offset)*types.NeedlePaddingSize, req.Size, v.Version())
- if err != nil {
- return fmt.Errorf("read offset:%d size:%d", req.Offset, req.Size)
- }
-
- id, err := types.ParseNeedleId(req.NeedleId)
- if err != nil {
- return fmt.Errorf("parsing needle id %s: %v", req.NeedleId, err)
- }
- n := new(storage.Needle)
- n.ParseNeedleHeader(content)
- if id != n.Id {
- return fmt.Errorf("Expected file entry id %d, but found %d", id, n.Id)
- }
-
- if err != nil {
- glog.Errorf("sync volume %d data: %v", req.VolumdId, err)
- }
-
- const blockSizeLimit = 1024 * 1024 * 2
- for i := 0; i < len(content); i += blockSizeLimit {
- blockSize := len(content) - i
- if blockSize > blockSizeLimit {
- blockSize = blockSizeLimit
- }
- resp := &volume_server_pb.VolumeSyncDataResponse{}
- resp.FileContent = content[i : i+blockSize]
- stream.Send(resp)
- }
-
- return nil
-
-}
diff --git a/weed/server/volume_grpc_tail.go b/weed/server/volume_grpc_tail.go
new file mode 100644
index 000000000..2dde5b69c
--- /dev/null
+++ b/weed/server/volume_grpc_tail.go
@@ -0,0 +1,136 @@
+package weed_server
+
+import (
+ "context"
+ "fmt"
+ "time"
+
+ "github.com/chrislusf/seaweedfs/weed/glog"
+ "github.com/chrislusf/seaweedfs/weed/operation"
+ "github.com/chrislusf/seaweedfs/weed/pb/volume_server_pb"
+ "github.com/chrislusf/seaweedfs/weed/storage"
+ "github.com/chrislusf/seaweedfs/weed/storage/needle"
+ "github.com/chrislusf/seaweedfs/weed/storage/super_block"
+)
+
+func (vs *VolumeServer) VolumeTailSender(req *volume_server_pb.VolumeTailSenderRequest, stream volume_server_pb.VolumeServer_VolumeTailSenderServer) error {
+
+ v := vs.store.GetVolume(needle.VolumeId(req.VolumeId))
+ if v == nil {
+ return fmt.Errorf("not found volume id %d", req.VolumeId)
+ }
+
+ defer glog.V(1).Infof("tailing volume %d finished", v.Id)
+
+ lastTimestampNs := req.SinceNs
+ drainingSeconds := req.IdleTimeoutSeconds
+
+ for {
+ lastProcessedTimestampNs, err := sendNeedlesSince(stream, v, lastTimestampNs)
+ if err != nil {
+ glog.Infof("sendNeedlesSince: %v", err)
+ return fmt.Errorf("streamFollow: %v", err)
+ }
+ time.Sleep(2 * time.Second)
+
+ if req.IdleTimeoutSeconds == 0 {
+ lastTimestampNs = lastProcessedTimestampNs
+ continue
+ }
+ if lastProcessedTimestampNs == lastTimestampNs {
+ drainingSeconds--
+ if drainingSeconds <= 0 {
+ return nil
+ }
+ glog.V(1).Infof("tailing volume %d drains requests with %d seconds remaining", v.Id, drainingSeconds)
+ } else {
+ lastTimestampNs = lastProcessedTimestampNs
+ drainingSeconds = req.IdleTimeoutSeconds
+ glog.V(1).Infof("tailing volume %d resets draining wait time to %d seconds", v.Id, drainingSeconds)
+ }
+
+ }
+
+}
+
+func sendNeedlesSince(stream volume_server_pb.VolumeServer_VolumeTailSenderServer, v *storage.Volume, lastTimestampNs uint64) (lastProcessedTimestampNs uint64, err error) {
+
+ foundOffset, isLastOne, err := v.BinarySearchByAppendAtNs(lastTimestampNs)
+ if err != nil {
+ return 0, fmt.Errorf("fail to locate by appendAtNs %d: %s", lastTimestampNs, err)
+ }
+
+ // log.Printf("reading ts %d offset %d isLast %v", lastTimestampNs, foundOffset, isLastOne)
+
+ if isLastOne {
+ // need to heart beat to the client to ensure the connection health
+ sendErr := stream.Send(&volume_server_pb.VolumeTailSenderResponse{IsLastChunk: true})
+ return lastTimestampNs, sendErr
+ }
+
+ scanner := &VolumeFileScanner4Tailing{
+ stream: stream,
+ }
+
+ err = storage.ScanVolumeFileFrom(v.Version(), v.DataBackend, foundOffset.ToAcutalOffset(), scanner)
+
+ return scanner.lastProcessedTimestampNs, err
+
+}
+
+func (vs *VolumeServer) VolumeTailReceiver(ctx context.Context, req *volume_server_pb.VolumeTailReceiverRequest) (*volume_server_pb.VolumeTailReceiverResponse, error) {
+
+ resp := &volume_server_pb.VolumeTailReceiverResponse{}
+
+ v := vs.store.GetVolume(needle.VolumeId(req.VolumeId))
+ if v == nil {
+ return resp, fmt.Errorf("receiver not found volume id %d", req.VolumeId)
+ }
+
+ defer glog.V(1).Infof("receive tailing volume %d finished", v.Id)
+
+ return resp, operation.TailVolumeFromSource(req.SourceVolumeServer, vs.grpcDialOption, v.Id, req.SinceNs, int(req.IdleTimeoutSeconds), func(n *needle.Needle) error {
+ _, err := vs.store.WriteVolumeNeedle(v.Id, n, false)
+ return err
+ })
+
+}
+
+// generate the volume idx
+type VolumeFileScanner4Tailing struct {
+ stream volume_server_pb.VolumeServer_VolumeTailSenderServer
+ lastProcessedTimestampNs uint64
+}
+
+func (scanner *VolumeFileScanner4Tailing) VisitSuperBlock(superBlock super_block.SuperBlock) error {
+ return nil
+
+}
+func (scanner *VolumeFileScanner4Tailing) ReadNeedleBody() bool {
+ return true
+}
+
+func (scanner *VolumeFileScanner4Tailing) VisitNeedle(n *needle.Needle, offset int64, needleHeader, needleBody []byte) error {
+ isLastChunk := false
+
+ // need to send body by chunks
+ for i := 0; i < len(needleBody); i += BufferSizeLimit {
+ stopOffset := i + BufferSizeLimit
+ if stopOffset >= len(needleBody) {
+ isLastChunk = true
+ stopOffset = len(needleBody)
+ }
+
+ sendErr := scanner.stream.Send(&volume_server_pb.VolumeTailSenderResponse{
+ NeedleHeader: needleHeader,
+ NeedleBody: needleBody[i:stopOffset],
+ IsLastChunk: isLastChunk,
+ })
+ if sendErr != nil {
+ return sendErr
+ }
+ }
+
+ scanner.lastProcessedTimestampNs = n.AppendAtNs
+ return nil
+}
diff --git a/weed/server/volume_grpc_tier_download.go b/weed/server/volume_grpc_tier_download.go
new file mode 100644
index 000000000..7b3982e40
--- /dev/null
+++ b/weed/server/volume_grpc_tier_download.go
@@ -0,0 +1,85 @@
+package weed_server
+
+import (
+ "fmt"
+ "time"
+
+ "github.com/chrislusf/seaweedfs/weed/pb/volume_server_pb"
+ "github.com/chrislusf/seaweedfs/weed/storage/backend"
+ "github.com/chrislusf/seaweedfs/weed/storage/needle"
+)
+
+// VolumeTierMoveDatFromRemote copy dat file from a remote tier to local volume server
+func (vs *VolumeServer) VolumeTierMoveDatFromRemote(req *volume_server_pb.VolumeTierMoveDatFromRemoteRequest, stream volume_server_pb.VolumeServer_VolumeTierMoveDatFromRemoteServer) error {
+
+ // find existing volume
+ v := vs.store.GetVolume(needle.VolumeId(req.VolumeId))
+ if v == nil {
+ return fmt.Errorf("volume %d not found", req.VolumeId)
+ }
+
+ // verify the collection
+ if v.Collection != req.Collection {
+ return fmt.Errorf("existing collection:%v unexpected input: %v", v.Collection, req.Collection)
+ }
+
+ // locate the disk file
+ storageName, storageKey := v.RemoteStorageNameKey()
+ if storageName == "" || storageKey == "" {
+ return fmt.Errorf("volume %d is already on local disk", req.VolumeId)
+ }
+
+ // check whether the local .dat already exists
+ _, ok := v.DataBackend.(*backend.DiskFile)
+ if ok {
+ return fmt.Errorf("volume %d is already on local disk", req.VolumeId)
+ }
+
+ // check valid storage backend type
+ backendStorage, found := backend.BackendStorages[storageName]
+ if !found {
+ var keys []string
+ for key := range backend.BackendStorages {
+ keys = append(keys, key)
+ }
+ return fmt.Errorf("remote storage %s not found from suppported: %v", storageName, keys)
+ }
+
+ startTime := time.Now()
+ fn := func(progressed int64, percentage float32) error {
+ now := time.Now()
+ if now.Sub(startTime) < time.Second {
+ return nil
+ }
+ startTime = now
+ return stream.Send(&volume_server_pb.VolumeTierMoveDatFromRemoteResponse{
+ Processed: progressed,
+ ProcessedPercentage: percentage,
+ })
+ }
+ // copy the data file
+ _, err := backendStorage.DownloadFile(v.FileName()+".dat", storageKey, fn)
+ if err != nil {
+ return fmt.Errorf("backend %s copy file %s: %v", storageName, v.FileName()+".dat", err)
+ }
+
+ if req.KeepRemoteDatFile {
+ return nil
+ }
+
+ // remove remote file
+ if err := backendStorage.DeleteFile(storageKey); err != nil {
+ return fmt.Errorf("volume %d fail to delete remote file %s: %v", v.Id, storageKey, err)
+ }
+
+ // forget remote file
+ v.GetVolumeInfo().Files = v.GetVolumeInfo().Files[1:]
+ if err := v.SaveVolumeInfo(); err != nil {
+ return fmt.Errorf("volume %d fail to save remote file info: %v", v.Id, err)
+ }
+
+ v.DataBackend.Close()
+ v.DataBackend = nil
+
+ return nil
+}
diff --git a/weed/server/volume_grpc_tier_upload.go b/weed/server/volume_grpc_tier_upload.go
new file mode 100644
index 000000000..c9694df59
--- /dev/null
+++ b/weed/server/volume_grpc_tier_upload.go
@@ -0,0 +1,100 @@
+package weed_server
+
+import (
+ "fmt"
+ "os"
+ "time"
+
+ "github.com/chrislusf/seaweedfs/weed/pb/volume_server_pb"
+ "github.com/chrislusf/seaweedfs/weed/storage/backend"
+ "github.com/chrislusf/seaweedfs/weed/storage/needle"
+)
+
+// VolumeTierMoveDatToRemote copy dat file to a remote tier
+func (vs *VolumeServer) VolumeTierMoveDatToRemote(req *volume_server_pb.VolumeTierMoveDatToRemoteRequest, stream volume_server_pb.VolumeServer_VolumeTierMoveDatToRemoteServer) error {
+
+ // find existing volume
+ v := vs.store.GetVolume(needle.VolumeId(req.VolumeId))
+ if v == nil {
+ return fmt.Errorf("volume %d not found", req.VolumeId)
+ }
+
+ // verify the collection
+ if v.Collection != req.Collection {
+ return fmt.Errorf("existing collection:%v unexpected input: %v", v.Collection, req.Collection)
+ }
+
+ // locate the disk file
+ diskFile, ok := v.DataBackend.(*backend.DiskFile)
+ if !ok {
+ return fmt.Errorf("volume %d is not on local disk", req.VolumeId)
+ }
+
+ // check valid storage backend type
+ backendStorage, found := backend.BackendStorages[req.DestinationBackendName]
+ if !found {
+ var keys []string
+ for key := range backend.BackendStorages {
+ keys = append(keys, key)
+ }
+ return fmt.Errorf("destination %s not found, suppported: %v", req.DestinationBackendName, keys)
+ }
+
+ // check whether the existing backend storage is the same as requested
+ // if same, skip
+ backendType, backendId := backend.BackendNameToTypeId(req.DestinationBackendName)
+ for _, remoteFile := range v.GetVolumeInfo().GetFiles() {
+ if remoteFile.BackendType == backendType && remoteFile.BackendId == backendId {
+ return fmt.Errorf("destination %s already exists", req.DestinationBackendName)
+ }
+ }
+
+ startTime := time.Now()
+ fn := func(progressed int64, percentage float32) error {
+ now := time.Now()
+ if now.Sub(startTime) < time.Second {
+ return nil
+ }
+ startTime = now
+ return stream.Send(&volume_server_pb.VolumeTierMoveDatToRemoteResponse{
+ Processed: progressed,
+ ProcessedPercentage: percentage,
+ })
+ }
+
+ // remember the file original source
+ attributes := make(map[string]string)
+ attributes["volumeId"] = v.Id.String()
+ attributes["collection"] = v.Collection
+ attributes["ext"] = ".dat"
+ // copy the data file
+ key, size, err := backendStorage.CopyFile(diskFile.File, attributes, fn)
+ if err != nil {
+ return fmt.Errorf("backend %s copy file %s: %v", req.DestinationBackendName, diskFile.Name(), err)
+ }
+
+ // save the remote file to volume tier info
+ v.GetVolumeInfo().Files = append(v.GetVolumeInfo().GetFiles(), &volume_server_pb.RemoteFile{
+ BackendType: backendType,
+ BackendId: backendId,
+ Key: key,
+ Offset: 0,
+ FileSize: uint64(size),
+ ModifiedTime: uint64(time.Now().Unix()),
+ Extension: ".dat",
+ })
+
+ if err := v.SaveVolumeInfo(); err != nil {
+ return fmt.Errorf("volume %d fail to save remote file info: %v", v.Id, err)
+ }
+
+ if err := v.LoadRemoteFile(); err != nil {
+ return fmt.Errorf("volume %d fail to load remote file: %v", v.Id, err)
+ }
+
+ if !req.KeepLocalDatFile {
+ os.Remove(v.FileName() + ".dat")
+ }
+
+ return nil
+}
diff --git a/weed/server/volume_grpc_vacuum.go b/weed/server/volume_grpc_vacuum.go
index f0c87b582..b87de4b5b 100644
--- a/weed/server/volume_grpc_vacuum.go
+++ b/weed/server/volume_grpc_vacuum.go
@@ -5,19 +5,19 @@ import (
"github.com/chrislusf/seaweedfs/weed/glog"
"github.com/chrislusf/seaweedfs/weed/pb/volume_server_pb"
- "github.com/chrislusf/seaweedfs/weed/storage"
+ "github.com/chrislusf/seaweedfs/weed/storage/needle"
)
func (vs *VolumeServer) VacuumVolumeCheck(ctx context.Context, req *volume_server_pb.VacuumVolumeCheckRequest) (*volume_server_pb.VacuumVolumeCheckResponse, error) {
resp := &volume_server_pb.VacuumVolumeCheckResponse{}
- garbageRatio, err := vs.store.CheckCompactVolume(storage.VolumeId(req.VolumdId))
+ garbageRatio, err := vs.store.CheckCompactVolume(needle.VolumeId(req.VolumeId))
resp.GarbageRatio = garbageRatio
if err != nil {
- glog.V(3).Infof("check volume %d: %v", req.VolumdId, err)
+ glog.V(3).Infof("check volume %d: %v", req.VolumeId, err)
}
return resp, err
@@ -28,12 +28,12 @@ func (vs *VolumeServer) VacuumVolumeCompact(ctx context.Context, req *volume_ser
resp := &volume_server_pb.VacuumVolumeCompactResponse{}
- err := vs.store.CompactVolume(storage.VolumeId(req.VolumdId), req.Preallocate)
+ err := vs.store.CompactVolume(needle.VolumeId(req.VolumeId), req.Preallocate, vs.compactionBytePerSecond)
if err != nil {
- glog.Errorf("compact volume %d: %v", req.VolumdId, err)
+ glog.Errorf("compact volume %d: %v", req.VolumeId, err)
} else {
- glog.V(1).Infof("compact volume %d", req.VolumdId)
+ glog.V(1).Infof("compact volume %d", req.VolumeId)
}
return resp, err
@@ -44,12 +44,17 @@ func (vs *VolumeServer) VacuumVolumeCommit(ctx context.Context, req *volume_serv
resp := &volume_server_pb.VacuumVolumeCommitResponse{}
- err := vs.store.CommitCompactVolume(storage.VolumeId(req.VolumdId))
+ err := vs.store.CommitCompactVolume(needle.VolumeId(req.VolumeId))
if err != nil {
- glog.Errorf("commit volume %d: %v", req.VolumdId, err)
+ glog.Errorf("commit volume %d: %v", req.VolumeId, err)
} else {
- glog.V(1).Infof("commit volume %d", req.VolumdId)
+ glog.V(1).Infof("commit volume %d", req.VolumeId)
+ }
+ if err == nil {
+ if vs.store.GetVolume(needle.VolumeId(req.VolumeId)).IsReadOnly() {
+ resp.IsReadOnly = true
+ }
}
return resp, err
@@ -60,12 +65,12 @@ func (vs *VolumeServer) VacuumVolumeCleanup(ctx context.Context, req *volume_ser
resp := &volume_server_pb.VacuumVolumeCleanupResponse{}
- err := vs.store.CommitCleanupVolume(storage.VolumeId(req.VolumdId))
+ err := vs.store.CommitCleanupVolume(needle.VolumeId(req.VolumeId))
if err != nil {
- glog.Errorf("cleanup volume %d: %v", req.VolumdId, err)
+ glog.Errorf("cleanup volume %d: %v", req.VolumeId, err)
} else {
- glog.V(1).Infof("cleanup volume %d", req.VolumdId)
+ glog.V(1).Infof("cleanup volume %d", req.VolumeId)
}
return resp, err
diff --git a/weed/server/volume_server.go b/weed/server/volume_server.go
index 0914e81b0..b7ed81be0 100644
--- a/weed/server/volume_server.go
+++ b/weed/server/volume_server.go
@@ -1,55 +1,84 @@
package weed_server
import (
+ "fmt"
"net/http"
+ "google.golang.org/grpc"
+
+ "github.com/chrislusf/seaweedfs/weed/stats"
+ "github.com/chrislusf/seaweedfs/weed/util"
+
"github.com/chrislusf/seaweedfs/weed/glog"
"github.com/chrislusf/seaweedfs/weed/security"
"github.com/chrislusf/seaweedfs/weed/storage"
)
type VolumeServer struct {
- MasterNodes []string
- currentMaster string
- pulseSeconds int
- dataCenter string
- rack string
- store *storage.Store
- guard *security.Guard
+ SeedMasterNodes []string
+ currentMaster string
+ pulseSeconds int
+ dataCenter string
+ rack string
+ store *storage.Store
+ guard *security.Guard
+ grpcDialOption grpc.DialOption
- needleMapKind storage.NeedleMapType
- FixJpgOrientation bool
- ReadRedirect bool
+ needleMapKind storage.NeedleMapType
+ ReadRedirect bool
+ compactionBytePerSecond int64
+ MetricsAddress string
+ MetricsIntervalSec int
+ fileSizeLimitBytes int64
}
func NewVolumeServer(adminMux, publicMux *http.ServeMux, ip string,
port int, publicUrl string,
- folders []string, maxCounts []int,
+ folders []string, maxCounts []int, minFreeSpacePercents []float32,
needleMapKind storage.NeedleMapType,
masterNodes []string, pulseSeconds int,
dataCenter string, rack string,
whiteList []string,
- fixJpgOrientation bool,
- readRedirect bool) *VolumeServer {
+ readRedirect bool,
+ compactionMBPerSecond int,
+ fileSizeLimitMB int,
+) *VolumeServer {
+
+ v := util.GetViper()
+ signingKey := v.GetString("jwt.signing.key")
+ v.SetDefault("jwt.signing.expires_after_seconds", 10)
+ expiresAfterSec := v.GetInt("jwt.signing.expires_after_seconds")
+ enableUiAccess := v.GetBool("access.ui")
+
+ readSigningKey := v.GetString("jwt.signing.read.key")
+ v.SetDefault("jwt.signing.read.expires_after_seconds", 60)
+ readExpiresAfterSec := v.GetInt("jwt.signing.read.expires_after_seconds")
+
vs := &VolumeServer{
- pulseSeconds: pulseSeconds,
- dataCenter: dataCenter,
- rack: rack,
- needleMapKind: needleMapKind,
- FixJpgOrientation: fixJpgOrientation,
- ReadRedirect: readRedirect,
+ pulseSeconds: pulseSeconds,
+ dataCenter: dataCenter,
+ rack: rack,
+ needleMapKind: needleMapKind,
+ ReadRedirect: readRedirect,
+ grpcDialOption: security.LoadClientTLS(util.GetViper(), "grpc.volume"),
+ compactionBytePerSecond: int64(compactionMBPerSecond) * 1024 * 1024,
+ fileSizeLimitBytes: int64(fileSizeLimitMB) * 1024 * 1024,
}
- vs.MasterNodes = masterNodes
- vs.store = storage.NewStore(port, ip, publicUrl, folders, maxCounts, vs.needleMapKind)
-
- vs.guard = security.NewGuard(whiteList, "")
+ vs.SeedMasterNodes = masterNodes
+ vs.store = storage.NewStore(vs.grpcDialOption, port, ip, publicUrl, folders, maxCounts, minFreeSpacePercents, vs.needleMapKind)
+ vs.guard = security.NewGuard(whiteList, signingKey, expiresAfterSec, readSigningKey, readExpiresAfterSec)
handleStaticResources(adminMux)
- adminMux.HandleFunc("/ui/index.html", vs.uiStatusHandler)
- adminMux.HandleFunc("/status", vs.guard.WhiteList(vs.statusHandler))
- adminMux.HandleFunc("/stats/counter", vs.guard.WhiteList(statsCounterHandler))
- adminMux.HandleFunc("/stats/memory", vs.guard.WhiteList(statsMemoryHandler))
- adminMux.HandleFunc("/stats/disk", vs.guard.WhiteList(vs.statsDiskHandler))
+ if signingKey == "" || enableUiAccess {
+ // only expose the volume server details for safe environments
+ adminMux.HandleFunc("/ui/index.html", vs.uiStatusHandler)
+ adminMux.HandleFunc("/status", vs.guard.WhiteList(vs.statusHandler))
+ /*
+ adminMux.HandleFunc("/stats/counter", vs.guard.WhiteList(statsCounterHandler))
+ adminMux.HandleFunc("/stats/memory", vs.guard.WhiteList(statsMemoryHandler))
+ adminMux.HandleFunc("/stats/disk", vs.guard.WhiteList(vs.statsDiskHandler))
+ */
+ }
adminMux.HandleFunc("/", vs.privateStoreHandler)
if publicMux != adminMux {
// separated admin and public port
@@ -58,6 +87,11 @@ func NewVolumeServer(adminMux, publicMux *http.ServeMux, ip string,
}
go vs.heartbeat()
+ hostAddress := fmt.Sprintf("%s:%d", ip, port)
+ go stats.LoopPushingMetric("volumeServer", hostAddress, stats.VolumeServerGather,
+ func() (addr string, intervalSeconds int) {
+ return vs.MetricsAddress, vs.MetricsIntervalSec
+ })
return vs
}
@@ -67,7 +101,3 @@ func (vs *VolumeServer) Shutdown() {
vs.store.Close()
glog.V(0).Infoln("Shut down successfully!")
}
-
-func (vs *VolumeServer) jwt(fileId string) security.EncodedJwt {
- return security.GenJwt(vs.guard.SecretKey, fileId)
-}
diff --git a/weed/server/volume_server_handlers.go b/weed/server/volume_server_handlers.go
index 77b1274fd..14ad27d42 100644
--- a/weed/server/volume_server_handlers.go
+++ b/weed/server/volume_server_handlers.go
@@ -2,7 +2,10 @@ package weed_server
import (
"net/http"
+ "strings"
+ "github.com/chrislusf/seaweedfs/weed/glog"
+ "github.com/chrislusf/seaweedfs/weed/security"
"github.com/chrislusf/seaweedfs/weed/stats"
)
@@ -45,3 +48,47 @@ func (vs *VolumeServer) publicReadOnlyHandler(w http.ResponseWriter, r *http.Req
vs.GetOrHeadHandler(w, r)
}
}
+
+func (vs *VolumeServer) maybeCheckJwtAuthorization(r *http.Request, vid, fid string, isWrite bool) bool {
+
+ var signingKey security.SigningKey
+
+ if isWrite {
+ if len(vs.guard.SigningKey) == 0 {
+ return true
+ } else {
+ signingKey = vs.guard.SigningKey
+ }
+ } else {
+ if len(vs.guard.ReadSigningKey) == 0 {
+ return true
+ } else {
+ signingKey = vs.guard.ReadSigningKey
+ }
+ }
+
+ tokenStr := security.GetJwt(r)
+ if tokenStr == "" {
+ glog.V(1).Infof("missing jwt from %s", r.RemoteAddr)
+ return false
+ }
+
+ token, err := security.DecodeJwt(signingKey, tokenStr)
+ if err != nil {
+ glog.V(1).Infof("jwt verification error from %s: %v", r.RemoteAddr, err)
+ return false
+ }
+ if !token.Valid {
+ glog.V(1).Infof("jwt invalid from %s: %v", r.RemoteAddr, tokenStr)
+ return false
+ }
+
+ if sc, ok := token.Claims.(*security.SeaweedFileIdClaims); ok {
+ if sepIndex := strings.LastIndex(fid, "_"); sepIndex > 0 {
+ fid = fid[:sepIndex]
+ }
+ return sc.Fid == vid+","+fid
+ }
+ glog.V(1).Infof("unexpected jwt from %s: %v", r.RemoteAddr, tokenStr)
+ return false
+}
diff --git a/weed/server/volume_server_handlers_admin.go b/weed/server/volume_server_handlers_admin.go
index 25b6582f7..34655d833 100644
--- a/weed/server/volume_server_handlers_admin.go
+++ b/weed/server/volume_server_handlers_admin.go
@@ -11,14 +11,21 @@ import (
func (vs *VolumeServer) statusHandler(w http.ResponseWriter, r *http.Request) {
m := make(map[string]interface{})
- m["Version"] = util.VERSION
- m["Volumes"] = vs.store.Status()
+ m["Version"] = util.Version()
+ var ds []*volume_server_pb.DiskStatus
+ for _, loc := range vs.store.Locations {
+ if dir, e := filepath.Abs(loc.Directory); e == nil {
+ ds = append(ds, stats.NewDiskStatus(dir))
+ }
+ }
+ m["DiskStatuses"] = ds
+ m["Volumes"] = vs.store.VolumeInfos()
writeJsonQuiet(w, r, http.StatusOK, m)
}
func (vs *VolumeServer) statsDiskHandler(w http.ResponseWriter, r *http.Request) {
m := make(map[string]interface{})
- m["Version"] = util.VERSION
+ m["Version"] = util.Version()
var ds []*volume_server_pb.DiskStatus
for _, loc := range vs.store.Locations {
if dir, e := filepath.Abs(loc.Directory); e == nil {
diff --git a/weed/server/volume_server_handlers_read.go b/weed/server/volume_server_handlers_read.go
index 92c728141..7ef1170b3 100644
--- a/weed/server/volume_server_handlers_read.go
+++ b/weed/server/volume_server_handlers_read.go
@@ -2,45 +2,59 @@ package weed_server
import (
"bytes"
+ "encoding/json"
+ "errors"
+ "fmt"
"io"
"mime"
- "mime/multipart"
"net/http"
"net/url"
- "path"
+ "path/filepath"
"strconv"
"strings"
"time"
- "encoding/json"
-
"github.com/chrislusf/seaweedfs/weed/glog"
"github.com/chrislusf/seaweedfs/weed/images"
"github.com/chrislusf/seaweedfs/weed/operation"
- "github.com/chrislusf/seaweedfs/weed/storage"
+ "github.com/chrislusf/seaweedfs/weed/stats"
+ "github.com/chrislusf/seaweedfs/weed/storage/needle"
"github.com/chrislusf/seaweedfs/weed/util"
)
var fileNameEscaper = strings.NewReplacer("\\", "\\\\", "\"", "\\\"")
func (vs *VolumeServer) GetOrHeadHandler(w http.ResponseWriter, r *http.Request) {
- n := new(storage.Needle)
+
+ stats.VolumeServerRequestCounter.WithLabelValues("get").Inc()
+ start := time.Now()
+ defer func() { stats.VolumeServerRequestHistogram.WithLabelValues("get").Observe(time.Since(start).Seconds()) }()
+
+ n := new(needle.Needle)
vid, fid, filename, ext, _ := parseURLPath(r.URL.Path)
- volumeId, err := storage.NewVolumeId(vid)
+
+ if !vs.maybeCheckJwtAuthorization(r, vid, fid, false) {
+ writeJsonError(w, r, http.StatusUnauthorized, errors.New("wrong jwt"))
+ return
+ }
+
+ volumeId, err := needle.NewVolumeId(vid)
if err != nil {
- glog.V(2).Infoln("parsing error:", err, r.URL.Path)
+ glog.V(2).Infof("parsing vid %s: %v", r.URL.Path, err)
w.WriteHeader(http.StatusBadRequest)
return
}
err = n.ParsePath(fid)
if err != nil {
- glog.V(2).Infoln("parsing fid error:", err, r.URL.Path)
+ glog.V(2).Infof("parsing fid %s: %v", r.URL.Path, err)
w.WriteHeader(http.StatusBadRequest)
return
}
- glog.V(4).Infoln("volume", volumeId, "reading", n)
- if !vs.store.HasVolume(volumeId) {
+ // glog.V(4).Infoln("volume", volumeId, "reading", n)
+ hasVolume := vs.store.HasVolume(volumeId)
+ _, hasEcVolume := vs.store.FindEcVolume(volumeId)
+ if !hasVolume && !hasEcVolume {
if !vs.ReadRedirect {
glog.V(2).Infoln("volume is not local:", err, r.URL.Path)
w.WriteHeader(http.StatusNotFound)
@@ -50,7 +64,7 @@ func (vs *VolumeServer) GetOrHeadHandler(w http.ResponseWriter, r *http.Request)
glog.V(2).Infoln("volume", volumeId, "found on", lookupResult, "error", err)
if err == nil && len(lookupResult.Locations) > 0 {
u, _ := url.Parse(util.NormalizeUrl(lookupResult.Locations[0].PublicUrl))
- u.Path = r.URL.Path
+ u.Path = fmt.Sprintf("%s/%s,%s", u.Path, vid, fid)
arg := url.Values{}
if c := r.FormValue("collection"); c != "" {
arg.Set("collection", c)
@@ -65,10 +79,15 @@ func (vs *VolumeServer) GetOrHeadHandler(w http.ResponseWriter, r *http.Request)
return
}
cookie := n.Cookie
- count, e := vs.store.ReadVolumeNeedle(volumeId, n)
- glog.V(4).Infoln("read bytes", count, "error", e)
- if e != nil || count < 0 {
- glog.V(0).Infof("read %s error: %v", r.URL.Path, e)
+ var count int
+ if hasVolume {
+ count, err = vs.store.ReadVolumeNeedle(volumeId, n)
+ } else if hasEcVolume {
+ count, err = vs.store.ReadEcShardNeedle(volumeId, n)
+ }
+ // glog.V(4).Infoln("read bytes", count, "error", err)
+ if err != nil || count < 0 {
+ glog.V(0).Infof("read %s isNormalVolume %v error: %v", r.URL.Path, hasVolume, err)
w.WriteHeader(http.StatusNotFound)
return
}
@@ -92,11 +111,7 @@ func (vs *VolumeServer) GetOrHeadHandler(w http.ResponseWriter, r *http.Request)
w.WriteHeader(http.StatusNotModified)
return
}
- if r.Header.Get("ETag-MD5") == "True" {
- setEtag(w, n.MD5())
- } else {
- setEtag(w, n.Etag())
- }
+ setEtag(w, n.Etag())
if n.HasPairs() {
pairMap := make(map[string]string)
@@ -109,14 +124,14 @@ func (vs *VolumeServer) GetOrHeadHandler(w http.ResponseWriter, r *http.Request)
}
}
- if vs.tryHandleChunkedFile(n, filename, w, r) {
+ if vs.tryHandleChunkedFile(n, filename, ext, w, r) {
return
}
if n.NameSize > 0 && filename == "" {
filename = string(n.Name)
if ext == "" {
- ext = path.Ext(filename)
+ ext = filepath.Ext(filename)
}
}
mtype := ""
@@ -127,13 +142,19 @@ func (vs *VolumeServer) GetOrHeadHandler(w http.ResponseWriter, r *http.Request)
}
}
- if ext != ".gz" {
- if n.IsGzipped() {
- if strings.Contains(r.Header.Get("Accept-Encoding"), "gzip") {
+ if ext != ".gz" && ext != ".zst" {
+ if n.IsCompressed() {
+ if _, _, _, shouldResize := shouldResizeImages(ext, r); shouldResize {
+ if n.Data, err = util.DecompressData(n.Data); err != nil {
+ glog.V(0).Infoln("ungzip error:", err, r.URL.Path)
+ }
+ } else if strings.Contains(r.Header.Get("Accept-Encoding"), "zstd") && util.IsZstdContent(n.Data) {
+ w.Header().Set("Content-Encoding", "zstd")
+ } else if strings.Contains(r.Header.Get("Accept-Encoding"), "gzip") && util.IsGzippedContent(n.Data) {
w.Header().Set("Content-Encoding", "gzip")
} else {
- if n.Data, err = operation.UnGzipData(n.Data); err != nil {
- glog.V(0).Infoln("ungzip error:", err, r.URL.Path)
+ if n.Data, err = util.DecompressData(n.Data); err != nil {
+ glog.V(0).Infoln("uncompress error:", err, r.URL.Path)
}
}
}
@@ -146,12 +167,12 @@ func (vs *VolumeServer) GetOrHeadHandler(w http.ResponseWriter, r *http.Request)
}
}
-func (vs *VolumeServer) tryHandleChunkedFile(n *storage.Needle, fileName string, w http.ResponseWriter, r *http.Request) (processed bool) {
+func (vs *VolumeServer) tryHandleChunkedFile(n *needle.Needle, fileName string, ext string, w http.ResponseWriter, r *http.Request) (processed bool) {
if !n.IsChunkedManifest() || r.URL.Query().Get("cm") == "false" {
return false
}
- chunkManifest, e := operation.LoadChunkManifest(n.Data, n.IsGzipped())
+ chunkManifest, e := operation.LoadChunkManifest(n.Data, n.IsCompressed())
if e != nil {
glog.V(0).Infof("load chunked manifest (%s) error: %v", r.URL.Path, e)
return false
@@ -160,7 +181,9 @@ func (vs *VolumeServer) tryHandleChunkedFile(n *storage.Needle, fileName string,
fileName = chunkManifest.Name
}
- ext := path.Ext(fileName)
+ if ext == "" {
+ ext = filepath.Ext(fileName)
+ }
mType := ""
if chunkManifest.Mime != "" {
@@ -172,10 +195,7 @@ func (vs *VolumeServer) tryHandleChunkedFile(n *storage.Needle, fileName string,
w.Header().Set("X-File-Store", "chunked")
- chunkedFileReader := &operation.ChunkedFileReader{
- Manifest: chunkManifest,
- Master: vs.GetMaster(),
- }
+ chunkedFileReader := operation.NewChunkedFileReader(chunkManifest.Chunks, vs.GetMaster())
defer chunkedFileReader.Close()
rs := conditionallyResizeImages(chunkedFileReader, ext, r)
@@ -188,132 +208,56 @@ func (vs *VolumeServer) tryHandleChunkedFile(n *storage.Needle, fileName string,
func conditionallyResizeImages(originalDataReaderSeeker io.ReadSeeker, ext string, r *http.Request) io.ReadSeeker {
rs := originalDataReaderSeeker
+
+ width, height, mode, shouldResize := shouldResizeImages(ext, r)
+ if shouldResize {
+ rs, _, _ = images.Resized(ext, originalDataReaderSeeker, width, height, mode)
+ }
+ return rs
+}
+
+func shouldResizeImages(ext string, r *http.Request) (width, height int, mode string, shouldResize bool) {
if len(ext) > 0 {
ext = strings.ToLower(ext)
}
if ext == ".png" || ext == ".jpg" || ext == ".jpeg" || ext == ".gif" {
- width, height := 0, 0
if r.FormValue("width") != "" {
width, _ = strconv.Atoi(r.FormValue("width"))
}
if r.FormValue("height") != "" {
height, _ = strconv.Atoi(r.FormValue("height"))
}
- rs, _, _ = images.Resized(ext, originalDataReaderSeeker, width, height, r.FormValue("mode"))
}
- return rs
+ mode = r.FormValue("mode")
+ shouldResize = width > 0 || height > 0
+ return
}
func writeResponseContent(filename, mimeType string, rs io.ReadSeeker, w http.ResponseWriter, r *http.Request) error {
totalSize, e := rs.Seek(0, 2)
if mimeType == "" {
- if ext := path.Ext(filename); ext != "" {
+ if ext := filepath.Ext(filename); ext != "" {
mimeType = mime.TypeByExtension(ext)
}
}
if mimeType != "" {
w.Header().Set("Content-Type", mimeType)
}
- if filename != "" {
- contentDisposition := "inline"
- if r.FormValue("dl") != "" {
- if dl, _ := strconv.ParseBool(r.FormValue("dl")); dl {
- contentDisposition = "attachment"
- }
- }
- w.Header().Set("Content-Disposition", contentDisposition+`; filename="`+fileNameEscaper.Replace(filename)+`"`)
- }
w.Header().Set("Accept-Ranges", "bytes")
+
if r.Method == "HEAD" {
w.Header().Set("Content-Length", strconv.FormatInt(totalSize, 10))
return nil
}
- rangeReq := r.Header.Get("Range")
- if rangeReq == "" {
- w.Header().Set("Content-Length", strconv.FormatInt(totalSize, 10))
- if _, e = rs.Seek(0, 0); e != nil {
- return e
- }
- _, e = io.Copy(w, rs)
- return e
- }
- //the rest is dealing with partial content request
- //mostly copy from src/pkg/net/http/fs.go
- ranges, err := parseRange(rangeReq, totalSize)
- if err != nil {
- http.Error(w, err.Error(), http.StatusRequestedRangeNotSatisfiable)
- return nil
- }
- if sumRangesSize(ranges) > totalSize {
- // The total number of bytes in all the ranges
- // is larger than the size of the file by
- // itself, so this is probably an attack, or a
- // dumb client. Ignore the range request.
- return nil
- }
- if len(ranges) == 0 {
- return nil
- }
- if len(ranges) == 1 {
- // RFC 2616, Section 14.16:
- // "When an HTTP message includes the content of a single
- // range (for example, a response to a request for a
- // single range, or to a request for a set of ranges
- // that overlap without any holes), this content is
- // transmitted with a Content-Range header, and a
- // Content-Length header showing the number of bytes
- // actually transferred.
- // ...
- // A response to a request for a single range MUST NOT
- // be sent using the multipart/byteranges media type."
- ra := ranges[0]
- w.Header().Set("Content-Length", strconv.FormatInt(ra.length, 10))
- w.Header().Set("Content-Range", ra.contentRange(totalSize))
- w.WriteHeader(http.StatusPartialContent)
- if _, e = rs.Seek(ra.start, 0); e != nil {
+ adjustHeadersAfterHEAD(w, r, filename)
+
+ processRangeRequest(r, w, totalSize, mimeType, func(writer io.Writer, offset int64, size int64) error {
+ if _, e = rs.Seek(offset, 0); e != nil {
return e
}
-
- _, e = io.CopyN(w, rs, ra.length)
+ _, e = io.CopyN(writer, rs, size)
return e
- }
- // process multiple ranges
- for _, ra := range ranges {
- if ra.start > totalSize {
- http.Error(w, "Out of Range", http.StatusRequestedRangeNotSatisfiable)
- return nil
- }
- }
- sendSize := rangesMIMESize(ranges, mimeType, totalSize)
- pr, pw := io.Pipe()
- mw := multipart.NewWriter(pw)
- w.Header().Set("Content-Type", "multipart/byteranges; boundary="+mw.Boundary())
- sendContent := pr
- defer pr.Close() // cause writing goroutine to fail and exit if CopyN doesn't finish.
- go func() {
- for _, ra := range ranges {
- part, e := mw.CreatePart(ra.mimeHeader(mimeType, totalSize))
- if e != nil {
- pw.CloseWithError(e)
- return
- }
- if _, e = rs.Seek(ra.start, 0); e != nil {
- pw.CloseWithError(e)
- return
- }
- if _, e = io.CopyN(part, rs, ra.length); e != nil {
- pw.CloseWithError(e)
- return
- }
- }
- mw.Close()
- pw.Close()
- }()
- if w.Header().Get("Content-Encoding") == "" {
- w.Header().Set("Content-Length", strconv.FormatInt(sendSize, 10))
- }
- w.WriteHeader(http.StatusPartialContent)
- _, e = io.CopyN(w, sendContent, sendSize)
- return e
+ })
+ return nil
}
diff --git a/weed/server/volume_server_handlers_ui.go b/weed/server/volume_server_handlers_ui.go
index b3d9a21fd..8b2027e7b 100644
--- a/weed/server/volume_server_handlers_ui.go
+++ b/weed/server/volume_server_handlers_ui.go
@@ -8,6 +8,7 @@ import (
"github.com/chrislusf/seaweedfs/weed/pb/volume_server_pb"
ui "github.com/chrislusf/seaweedfs/weed/server/volume_server_ui"
"github.com/chrislusf/seaweedfs/weed/stats"
+ "github.com/chrislusf/seaweedfs/weed/storage"
"github.com/chrislusf/seaweedfs/weed/util"
)
@@ -20,17 +21,30 @@ func (vs *VolumeServer) uiStatusHandler(w http.ResponseWriter, r *http.Request)
ds = append(ds, stats.NewDiskStatus(dir))
}
}
+ volumeInfos := vs.store.VolumeInfos()
+ var normalVolumeInfos, remoteVolumeInfos []*storage.VolumeInfo
+ for _, vinfo := range volumeInfos {
+ if vinfo.IsRemote() {
+ remoteVolumeInfos = append(remoteVolumeInfos, vinfo)
+ } else {
+ normalVolumeInfos = append(normalVolumeInfos, vinfo)
+ }
+ }
args := struct {
- Version string
- Masters []string
- Volumes interface{}
- DiskStatuses interface{}
- Stats interface{}
- Counters *stats.ServerStats
+ Version string
+ Masters []string
+ Volumes interface{}
+ EcVolumes interface{}
+ RemoteVolumes interface{}
+ DiskStatuses interface{}
+ Stats interface{}
+ Counters *stats.ServerStats
}{
- util.VERSION,
- vs.MasterNodes,
- vs.store.Status(),
+ util.Version(),
+ vs.SeedMasterNodes,
+ normalVolumeInfos,
+ vs.store.EcVolumes(),
+ remoteVolumeInfos,
ds,
infos,
serverStats,
diff --git a/weed/server/volume_server_handlers_write.go b/weed/server/volume_server_handlers_write.go
index fd93142e1..5ece46ed0 100644
--- a/weed/server/volume_server_handlers_write.go
+++ b/weed/server/volume_server_handlers_write.go
@@ -10,56 +10,99 @@ import (
"github.com/chrislusf/seaweedfs/weed/glog"
"github.com/chrislusf/seaweedfs/weed/operation"
- "github.com/chrislusf/seaweedfs/weed/storage"
+ "github.com/chrislusf/seaweedfs/weed/stats"
+ "github.com/chrislusf/seaweedfs/weed/storage/needle"
"github.com/chrislusf/seaweedfs/weed/topology"
)
func (vs *VolumeServer) PostHandler(w http.ResponseWriter, r *http.Request) {
+
+ stats.VolumeServerRequestCounter.WithLabelValues("post").Inc()
+ start := time.Now()
+ defer func() {
+ stats.VolumeServerRequestHistogram.WithLabelValues("post").Observe(time.Since(start).Seconds())
+ }()
+
if e := r.ParseForm(); e != nil {
glog.V(0).Infoln("form parse error:", e)
writeJsonError(w, r, http.StatusBadRequest, e)
return
}
- vid, _, _, _, _ := parseURLPath(r.URL.Path)
- volumeId, ve := storage.NewVolumeId(vid)
+
+ vid, fid, _, _, _ := parseURLPath(r.URL.Path)
+ volumeId, ve := needle.NewVolumeId(vid)
if ve != nil {
glog.V(0).Infoln("NewVolumeId error:", ve)
writeJsonError(w, r, http.StatusBadRequest, ve)
return
}
- needle, originalSize, ne := storage.CreateNeedleFromRequest(r, vs.FixJpgOrientation)
+
+ if !vs.maybeCheckJwtAuthorization(r, vid, fid, true) {
+ writeJsonError(w, r, http.StatusUnauthorized, errors.New("wrong jwt"))
+ return
+ }
+
+ reqNeedle, originalSize, ne := needle.CreateNeedleFromRequest(r, vs.fileSizeLimitBytes)
if ne != nil {
writeJsonError(w, r, http.StatusBadRequest, ne)
return
}
ret := operation.UploadResult{}
- _, errorStatus := topology.ReplicatedWrite(vs.GetMaster(),
- vs.store, volumeId, needle, r)
+ isUnchanged, writeError := topology.ReplicatedWrite(vs.GetMaster(), vs.store, volumeId, reqNeedle, r)
+
+ // http 204 status code does not allow body
+ if writeError == nil && isUnchanged {
+ setEtag(w, reqNeedle.Etag())
+ w.WriteHeader(http.StatusNoContent)
+ return
+ }
+
httpStatus := http.StatusCreated
- if errorStatus != "" {
+ if writeError != nil {
httpStatus = http.StatusInternalServerError
- ret.Error = errorStatus
+ ret.Error = writeError.Error()
}
- if needle.HasName() {
- ret.Name = string(needle.Name)
+ if reqNeedle.HasName() {
+ ret.Name = string(reqNeedle.Name)
}
ret.Size = uint32(originalSize)
- ret.ETag = needle.Etag()
+ ret.ETag = reqNeedle.Etag()
+ ret.Mime = string(reqNeedle.Mime)
setEtag(w, ret.ETag)
writeJsonQuiet(w, r, httpStatus, ret)
}
func (vs *VolumeServer) DeleteHandler(w http.ResponseWriter, r *http.Request) {
- n := new(storage.Needle)
+
+ stats.VolumeServerRequestCounter.WithLabelValues("delete").Inc()
+ start := time.Now()
+ defer func() {
+ stats.VolumeServerRequestHistogram.WithLabelValues("delete").Observe(time.Since(start).Seconds())
+ }()
+
+ n := new(needle.Needle)
vid, fid, _, _, _ := parseURLPath(r.URL.Path)
- volumeId, _ := storage.NewVolumeId(vid)
+ volumeId, _ := needle.NewVolumeId(vid)
n.ParsePath(fid)
+ if !vs.maybeCheckJwtAuthorization(r, vid, fid, true) {
+ writeJsonError(w, r, http.StatusUnauthorized, errors.New("wrong jwt"))
+ return
+ }
+
// glog.V(2).Infof("volume %s deleting %s", vid, n)
cookie := n.Cookie
+ ecVolume, hasEcVolume := vs.store.FindEcVolume(volumeId)
+
+ if hasEcVolume {
+ count, err := vs.store.DeleteEcShardNeedle(ecVolume, n, cookie)
+ writeDeleteResult(err, count, w, r)
+ return
+ }
+
_, ok := vs.store.ReadVolumeNeedle(volumeId, n)
if ok != nil {
m := make(map[string]uint32)
@@ -77,13 +120,13 @@ func (vs *VolumeServer) DeleteHandler(w http.ResponseWriter, r *http.Request) {
count := int64(n.Size)
if n.IsChunkedManifest() {
- chunkManifest, e := operation.LoadChunkManifest(n.Data, n.IsGzipped())
+ chunkManifest, e := operation.LoadChunkManifest(n.Data, n.IsCompressed())
if e != nil {
writeJsonError(w, r, http.StatusInternalServerError, fmt.Errorf("Load chunks manifest error: %v", e))
return
}
// make sure all chunks had deleted before delete manifest
- if e := chunkManifest.DeleteChunks(vs.GetMaster()); e != nil {
+ if e := chunkManifest.DeleteChunks(vs.GetMaster(), false, vs.grpcDialOption); e != nil {
writeJsonError(w, r, http.StatusInternalServerError, fmt.Errorf("Delete chunks error: %v", e))
return
}
@@ -100,6 +143,11 @@ func (vs *VolumeServer) DeleteHandler(w http.ResponseWriter, r *http.Request) {
_, err := topology.ReplicatedDelete(vs.GetMaster(), vs.store, volumeId, n, r)
+ writeDeleteResult(err, count, w, r)
+
+}
+
+func writeDeleteResult(err error, count int64, w http.ResponseWriter, r *http.Request) {
if err == nil {
m := make(map[string]int64)
m["size"] = count
@@ -107,7 +155,6 @@ func (vs *VolumeServer) DeleteHandler(w http.ResponseWriter, r *http.Request) {
} else {
writeJsonError(w, r, http.StatusInternalServerError, fmt.Errorf("Deletion Failed: %v", err))
}
-
}
func setEtag(w http.ResponseWriter, etag string) {
@@ -119,3 +166,11 @@ func setEtag(w http.ResponseWriter, etag string) {
}
}
}
+
+func getEtag(resp *http.Response) (etag string) {
+ etag = resp.Header.Get("ETag")
+ if strings.HasPrefix(etag, "\"") && strings.HasSuffix(etag, "\"") {
+ return etag[1 : len(etag)-1]
+ }
+ return
+}
diff --git a/weed/server/volume_server_ui/templates.go b/weed/server/volume_server_ui/templates.go
index b9740510f..8705bc088 100644
--- a/weed/server/volume_server_ui/templates.go
+++ b/weed/server/volume_server_ui/templates.go
@@ -1,11 +1,17 @@
package master_ui
import (
+ "fmt"
+ "github.com/chrislusf/seaweedfs/weed/util"
"html/template"
"strconv"
"strings"
)
+func percentFrom(total uint64, part_of uint64) string {
+ return fmt.Sprintf("%.2f", (float64(part_of)/float64(total))*100)
+}
+
func join(data []int64) string {
var ret []string
for _, d := range data {
@@ -15,7 +21,9 @@ func join(data []int64) string {
}
var funcMap = template.FuncMap{
- "join": join,
+ "join": join,
+ "bytesToHumanReadable": util.BytesToHumanReadable,
+ "percentFrom": percentFrom,
}
var StatusTpl = template.Must(template.New("status").Funcs(funcMap).Parse(`
@@ -57,13 +65,25 @@ var StatusTpl = template.Must(template.New("status").Funcs(funcMap).Parse(`
Disk Stats
-
+
+
+
+ Path
+ Total
+ Free
+ Usage
+
+
+
{{ range .DiskStatuses }}
- {{ .Dir }}
- {{ .Free }} Bytes Free
+ {{ .Dir }}
+ {{ bytesToHumanReadable .All }}
+ {{ bytesToHumanReadable .Free }}
+ {{ percentFrom .All .Used}}%
{{ end }}
+
@@ -107,10 +127,11 @@ var StatusTpl = template.Must(template.New("status").Funcs(funcMap).Parse(`
Id
Collection
- Size
+ Data Size
Files
Trash
TTL
+ ReadOnly
@@ -118,10 +139,67 @@ var StatusTpl = template.Must(template.New("status").Funcs(funcMap).Parse(`
{{ .Id }}
{{ .Collection }}
- {{ .Size }} Bytes
+ {{ bytesToHumanReadable .Size }}
{{ .FileCount }}
- {{ .DeleteCount }} / {{.DeletedByteCount}} Bytes
+ {{ .DeleteCount }} / {{bytesToHumanReadable .DeletedByteCount}}
{{ .Ttl }}
+ {{ .ReadOnly }}
+
+ {{ end }}
+
+
+
+
+
+
Remote Volumes
+
+
+
+ Id
+ Collection
+ Size
+ Files
+ Trash
+ Remote
+ Key
+
+
+
+ {{ range .RemoteVolumes }}
+
+ {{ .Id }}
+ {{ .Collection }}
+ {{ bytesToHumanReadable .Size }}
+ {{ .FileCount }}
+ {{ .DeleteCount }} / {{bytesToHumanReadable .DeletedByteCount}}
+ {{ .RemoteStorageName }}
+ {{ .RemoteStorageKey }}
+
+ {{ end }}
+
+
+
+
+
+
Erasure Coding Shards
+
+
+
+ Id
+ Collection
+ Shard Size
+ Shards
+ CreatedAt
+
+
+
+ {{ range .EcVolumes }}
+
+ {{ .VolumeId }}
+ {{ .Collection }}
+ {{ bytesToHumanReadable .ShardSize }}
+ {{ .ShardIdList }}
+ {{ .CreatedAt.Format "02 Jan 06 15:04 -0700" }}
{{ end }}
diff --git a/weed/server/webdav_server.go b/weed/server/webdav_server.go
new file mode 100644
index 000000000..e8bedd352
--- /dev/null
+++ b/weed/server/webdav_server.go
@@ -0,0 +1,573 @@
+package weed_server
+
+import (
+ "context"
+ "fmt"
+ "io"
+ "math"
+ "os"
+ "path"
+ "strings"
+ "time"
+
+ "github.com/chrislusf/seaweedfs/weed/util/grace"
+ "golang.org/x/net/webdav"
+ "google.golang.org/grpc"
+
+ "github.com/chrislusf/seaweedfs/weed/operation"
+ "github.com/chrislusf/seaweedfs/weed/pb"
+ "github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
+ "github.com/chrislusf/seaweedfs/weed/util"
+ "github.com/chrislusf/seaweedfs/weed/util/chunk_cache"
+
+ "github.com/chrislusf/seaweedfs/weed/filer2"
+ "github.com/chrislusf/seaweedfs/weed/glog"
+ "github.com/chrislusf/seaweedfs/weed/security"
+)
+
+type WebDavOption struct {
+ Filer string
+ FilerGrpcAddress string
+ DomainName string
+ BucketsPath string
+ GrpcDialOption grpc.DialOption
+ Collection string
+ Uid uint32
+ Gid uint32
+ Cipher bool
+ CacheDir string
+ CacheSizeMB int64
+}
+
+type WebDavServer struct {
+ option *WebDavOption
+ secret security.SigningKey
+ filer *filer2.Filer
+ grpcDialOption grpc.DialOption
+ Handler *webdav.Handler
+}
+
+func NewWebDavServer(option *WebDavOption) (ws *WebDavServer, err error) {
+
+ fs, _ := NewWebDavFileSystem(option)
+
+ ws = &WebDavServer{
+ option: option,
+ grpcDialOption: security.LoadClientTLS(util.GetViper(), "grpc.filer"),
+ Handler: &webdav.Handler{
+ FileSystem: fs,
+ LockSystem: webdav.NewMemLS(),
+ },
+ }
+
+ return ws, nil
+}
+
+// adapted from https://github.com/mattn/davfs/blob/master/plugin/mysql/mysql.go
+
+type WebDavFileSystem struct {
+ option *WebDavOption
+ secret security.SigningKey
+ filer *filer2.Filer
+ grpcDialOption grpc.DialOption
+ chunkCache *chunk_cache.ChunkCache
+}
+
+type FileInfo struct {
+ name string
+ size int64
+ mode os.FileMode
+ modifiledTime time.Time
+ isDirectory bool
+}
+
+func (fi *FileInfo) Name() string { return fi.name }
+func (fi *FileInfo) Size() int64 { return fi.size }
+func (fi *FileInfo) Mode() os.FileMode { return fi.mode }
+func (fi *FileInfo) ModTime() time.Time { return fi.modifiledTime }
+func (fi *FileInfo) IsDir() bool { return fi.isDirectory }
+func (fi *FileInfo) Sys() interface{} { return nil }
+
+type WebDavFile struct {
+ fs *WebDavFileSystem
+ name string
+ isDirectory bool
+ off int64
+ entry *filer_pb.Entry
+ entryViewCache []filer2.VisibleInterval
+ reader io.ReaderAt
+}
+
+func NewWebDavFileSystem(option *WebDavOption) (webdav.FileSystem, error) {
+
+ chunkCache := chunk_cache.NewChunkCache(256, option.CacheDir, option.CacheSizeMB)
+ grace.OnInterrupt(func() {
+ chunkCache.Shutdown()
+ })
+ return &WebDavFileSystem{
+ option: option,
+ chunkCache: chunkCache,
+ }, nil
+}
+
+var _ = filer_pb.FilerClient(&WebDavFileSystem{})
+
+func (fs *WebDavFileSystem) WithFilerClient(fn func(filer_pb.SeaweedFilerClient) error) error {
+
+ return pb.WithCachedGrpcClient(func(grpcConnection *grpc.ClientConn) error {
+ client := filer_pb.NewSeaweedFilerClient(grpcConnection)
+ return fn(client)
+ }, fs.option.FilerGrpcAddress, fs.option.GrpcDialOption)
+
+}
+func (fs *WebDavFileSystem) AdjustedUrl(hostAndPort string) string {
+ return hostAndPort
+}
+
+func clearName(name string) (string, error) {
+ slashed := strings.HasSuffix(name, "/")
+ name = path.Clean(name)
+ if !strings.HasSuffix(name, "/") && slashed {
+ name += "/"
+ }
+ if !strings.HasPrefix(name, "/") {
+ return "", os.ErrInvalid
+ }
+ return name, nil
+}
+
+func (fs *WebDavFileSystem) Mkdir(ctx context.Context, fullDirPath string, perm os.FileMode) error {
+
+ glog.V(2).Infof("WebDavFileSystem.Mkdir %v", fullDirPath)
+
+ if !strings.HasSuffix(fullDirPath, "/") {
+ fullDirPath += "/"
+ }
+
+ var err error
+ if fullDirPath, err = clearName(fullDirPath); err != nil {
+ return err
+ }
+
+ _, err = fs.stat(ctx, fullDirPath)
+ if err == nil {
+ return os.ErrExist
+ }
+
+ return fs.WithFilerClient(func(client filer_pb.SeaweedFilerClient) error {
+ dir, name := util.FullPath(fullDirPath).DirAndName()
+ request := &filer_pb.CreateEntryRequest{
+ Directory: dir,
+ Entry: &filer_pb.Entry{
+ Name: name,
+ IsDirectory: true,
+ Attributes: &filer_pb.FuseAttributes{
+ Mtime: time.Now().Unix(),
+ Crtime: time.Now().Unix(),
+ FileMode: uint32(perm | os.ModeDir),
+ Uid: fs.option.Uid,
+ Gid: fs.option.Gid,
+ },
+ },
+ }
+
+ glog.V(1).Infof("mkdir: %v", request)
+ if err := filer_pb.CreateEntry(client, request); err != nil {
+ return fmt.Errorf("mkdir %s/%s: %v", dir, name, err)
+ }
+
+ return nil
+ })
+}
+
+func (fs *WebDavFileSystem) OpenFile(ctx context.Context, fullFilePath string, flag int, perm os.FileMode) (webdav.File, error) {
+
+ glog.V(2).Infof("WebDavFileSystem.OpenFile %v %x", fullFilePath, flag)
+
+ var err error
+ if fullFilePath, err = clearName(fullFilePath); err != nil {
+ return nil, err
+ }
+
+ if flag&os.O_CREATE != 0 {
+ // file should not have / suffix.
+ if strings.HasSuffix(fullFilePath, "/") {
+ return nil, os.ErrInvalid
+ }
+ _, err = fs.stat(ctx, fullFilePath)
+ if err == nil {
+ if flag&os.O_EXCL != 0 {
+ return nil, os.ErrExist
+ }
+ fs.removeAll(ctx, fullFilePath)
+ }
+
+ dir, name := util.FullPath(fullFilePath).DirAndName()
+ err = fs.WithFilerClient(func(client filer_pb.SeaweedFilerClient) error {
+ if err := filer_pb.CreateEntry(client, &filer_pb.CreateEntryRequest{
+ Directory: dir,
+ Entry: &filer_pb.Entry{
+ Name: name,
+ IsDirectory: perm&os.ModeDir > 0,
+ Attributes: &filer_pb.FuseAttributes{
+ Mtime: time.Now().Unix(),
+ Crtime: time.Now().Unix(),
+ FileMode: uint32(perm),
+ Uid: fs.option.Uid,
+ Gid: fs.option.Gid,
+ Collection: fs.option.Collection,
+ Replication: "000",
+ TtlSec: 0,
+ },
+ },
+ }); err != nil {
+ return fmt.Errorf("create %s: %v", fullFilePath, err)
+ }
+ return nil
+ })
+ if err != nil {
+ return nil, err
+ }
+ return &WebDavFile{
+ fs: fs,
+ name: fullFilePath,
+ isDirectory: false,
+ }, nil
+ }
+
+ fi, err := fs.stat(ctx, fullFilePath)
+ if err != nil {
+ return nil, os.ErrNotExist
+ }
+ if !strings.HasSuffix(fullFilePath, "/") && fi.IsDir() {
+ fullFilePath += "/"
+ }
+
+ return &WebDavFile{
+ fs: fs,
+ name: fullFilePath,
+ isDirectory: false,
+ }, nil
+
+}
+
+func (fs *WebDavFileSystem) removeAll(ctx context.Context, fullFilePath string) error {
+ var err error
+ if fullFilePath, err = clearName(fullFilePath); err != nil {
+ return err
+ }
+
+ dir, name := util.FullPath(fullFilePath).DirAndName()
+
+ return filer_pb.Remove(fs, dir, name, true, false, false, false)
+
+}
+
+func (fs *WebDavFileSystem) RemoveAll(ctx context.Context, name string) error {
+
+ glog.V(2).Infof("WebDavFileSystem.RemoveAll %v", name)
+
+ return fs.removeAll(ctx, name)
+}
+
+func (fs *WebDavFileSystem) Rename(ctx context.Context, oldName, newName string) error {
+
+ glog.V(2).Infof("WebDavFileSystem.Rename %v to %v", oldName, newName)
+
+ var err error
+ if oldName, err = clearName(oldName); err != nil {
+ return err
+ }
+ if newName, err = clearName(newName); err != nil {
+ return err
+ }
+
+ of, err := fs.stat(ctx, oldName)
+ if err != nil {
+ return os.ErrExist
+ }
+ if of.IsDir() {
+ if strings.HasSuffix(oldName, "/") {
+ oldName = strings.TrimRight(oldName, "/")
+ }
+ if strings.HasSuffix(newName, "/") {
+ newName = strings.TrimRight(newName, "/")
+ }
+ }
+
+ _, err = fs.stat(ctx, newName)
+ if err == nil {
+ return os.ErrExist
+ }
+
+ oldDir, oldBaseName := util.FullPath(oldName).DirAndName()
+ newDir, newBaseName := util.FullPath(newName).DirAndName()
+
+ return fs.WithFilerClient(func(client filer_pb.SeaweedFilerClient) error {
+
+ request := &filer_pb.AtomicRenameEntryRequest{
+ OldDirectory: oldDir,
+ OldName: oldBaseName,
+ NewDirectory: newDir,
+ NewName: newBaseName,
+ }
+
+ _, err := client.AtomicRenameEntry(ctx, request)
+ if err != nil {
+ return fmt.Errorf("renaming %s/%s => %s/%s: %v", oldDir, oldBaseName, newDir, newBaseName, err)
+ }
+
+ return nil
+
+ })
+}
+
+func (fs *WebDavFileSystem) stat(ctx context.Context, fullFilePath string) (os.FileInfo, error) {
+ var err error
+ if fullFilePath, err = clearName(fullFilePath); err != nil {
+ return nil, err
+ }
+
+ fullpath := util.FullPath(fullFilePath)
+
+ var fi FileInfo
+ entry, err := filer_pb.GetEntry(fs, fullpath)
+ if entry == nil {
+ return nil, os.ErrNotExist
+ }
+ if err != nil {
+ return nil, err
+ }
+ fi.size = int64(filer2.TotalSize(entry.GetChunks()))
+ fi.name = string(fullpath)
+ fi.mode = os.FileMode(entry.Attributes.FileMode)
+ fi.modifiledTime = time.Unix(entry.Attributes.Mtime, 0)
+ fi.isDirectory = entry.IsDirectory
+
+ if fi.name == "/" {
+ fi.modifiledTime = time.Now()
+ fi.isDirectory = true
+ }
+ return &fi, nil
+}
+
+func (fs *WebDavFileSystem) Stat(ctx context.Context, name string) (os.FileInfo, error) {
+
+ glog.V(2).Infof("WebDavFileSystem.Stat %v", name)
+
+ return fs.stat(ctx, name)
+}
+
+func (f *WebDavFile) Write(buf []byte) (int, error) {
+
+ glog.V(2).Infof("WebDavFileSystem.Write %v", f.name)
+
+ dir, _ := util.FullPath(f.name).DirAndName()
+
+ var err error
+ ctx := context.Background()
+ if f.entry == nil {
+ f.entry, err = filer_pb.GetEntry(f.fs, util.FullPath(f.name))
+ }
+
+ if f.entry == nil {
+ return 0, err
+ }
+ if err != nil {
+ return 0, err
+ }
+
+ var fileId, host string
+ var auth security.EncodedJwt
+ var collection, replication string
+
+ if err = f.fs.WithFilerClient(func(client filer_pb.SeaweedFilerClient) error {
+
+ request := &filer_pb.AssignVolumeRequest{
+ Count: 1,
+ Replication: "",
+ Collection: f.fs.option.Collection,
+ ParentPath: dir,
+ }
+
+ resp, err := client.AssignVolume(ctx, request)
+ if err != nil {
+ glog.V(0).Infof("assign volume failure %v: %v", request, err)
+ return err
+ }
+ if resp.Error != "" {
+ return fmt.Errorf("assign volume failure %v: %v", request, resp.Error)
+ }
+
+ fileId, host, auth = resp.FileId, resp.Url, security.EncodedJwt(resp.Auth)
+ collection, replication = resp.Collection, resp.Replication
+
+ return nil
+ }); err != nil {
+ return 0, fmt.Errorf("filerGrpcAddress assign volume: %v", err)
+ }
+
+ fileUrl := fmt.Sprintf("http://%s/%s", host, fileId)
+ uploadResult, err := operation.UploadData(fileUrl, f.name, f.fs.option.Cipher, buf, false, "", nil, auth)
+ if err != nil {
+ glog.V(0).Infof("upload data %v to %s: %v", f.name, fileUrl, err)
+ return 0, fmt.Errorf("upload data: %v", err)
+ }
+ if uploadResult.Error != "" {
+ glog.V(0).Infof("upload failure %v to %s: %v", f.name, fileUrl, err)
+ return 0, fmt.Errorf("upload result: %v", uploadResult.Error)
+ }
+
+ f.entry.Chunks = append(f.entry.Chunks, uploadResult.ToPbFileChunk(fileId, f.off))
+
+ err = f.fs.WithFilerClient(func(client filer_pb.SeaweedFilerClient) error {
+ f.entry.Attributes.Mtime = time.Now().Unix()
+ f.entry.Attributes.Collection = collection
+ f.entry.Attributes.Replication = replication
+
+ request := &filer_pb.UpdateEntryRequest{
+ Directory: dir,
+ Entry: f.entry,
+ }
+
+ if _, err := client.UpdateEntry(ctx, request); err != nil {
+ return fmt.Errorf("update %s: %v", f.name, err)
+ }
+
+ return nil
+ })
+
+ if err == nil {
+ glog.V(3).Infof("WebDavFileSystem.Write %v: written [%d,%d)", f.name, f.off, f.off+int64(len(buf)))
+ f.off += int64(len(buf))
+ }
+
+ return len(buf), err
+}
+
+func (f *WebDavFile) Close() error {
+
+ glog.V(2).Infof("WebDavFileSystem.Close %v", f.name)
+
+ if f.entry != nil {
+ f.entry = nil
+ f.entryViewCache = nil
+ }
+
+ return nil
+}
+
+func (f *WebDavFile) Read(p []byte) (readSize int, err error) {
+
+ glog.V(2).Infof("WebDavFileSystem.Read %v", f.name)
+
+ if f.entry == nil {
+ f.entry, err = filer_pb.GetEntry(f.fs, util.FullPath(f.name))
+ }
+ if f.entry == nil {
+ return 0, err
+ }
+ if err != nil {
+ return 0, err
+ }
+ if len(f.entry.Chunks) == 0 {
+ return 0, io.EOF
+ }
+ if f.entryViewCache == nil {
+ f.entryViewCache = filer2.NonOverlappingVisibleIntervals(f.entry.Chunks)
+ f.reader = nil
+ }
+ if f.reader == nil {
+ chunkViews := filer2.ViewFromVisibleIntervals(f.entryViewCache, 0, math.MaxInt32)
+ f.reader = filer2.NewChunkReaderAtFromClient(f.fs, chunkViews, f.fs.chunkCache)
+ }
+
+ readSize, err = f.reader.ReadAt(p, f.off)
+
+ glog.V(3).Infof("WebDavFileSystem.Read %v: [%d,%d)", f.name, f.off, f.off+int64(readSize))
+ f.off += int64(readSize)
+
+ if err == io.EOF {
+ err = nil
+ }
+
+ if err != nil {
+ glog.Errorf("file read %s: %v", f.name, err)
+ }
+
+ return
+
+}
+
+func (f *WebDavFile) Readdir(count int) (ret []os.FileInfo, err error) {
+
+ glog.V(2).Infof("WebDavFileSystem.Readdir %v count %d", f.name, count)
+
+ dir, _ := util.FullPath(f.name).DirAndName()
+
+ err = filer_pb.ReadDirAllEntries(f.fs, util.FullPath(dir), "", func(entry *filer_pb.Entry, isLast bool) error {
+ fi := FileInfo{
+ size: int64(filer2.TotalSize(entry.GetChunks())),
+ name: entry.Name,
+ mode: os.FileMode(entry.Attributes.FileMode),
+ modifiledTime: time.Unix(entry.Attributes.Mtime, 0),
+ isDirectory: entry.IsDirectory,
+ }
+
+ if !strings.HasSuffix(fi.name, "/") && fi.IsDir() {
+ fi.name += "/"
+ }
+ glog.V(4).Infof("entry: %v", fi.name)
+ ret = append(ret, &fi)
+ return nil
+ })
+
+ old := f.off
+ if old >= int64(len(ret)) {
+ if count > 0 {
+ return nil, io.EOF
+ }
+ return nil, nil
+ }
+ if count > 0 {
+ f.off += int64(count)
+ if f.off > int64(len(ret)) {
+ f.off = int64(len(ret))
+ }
+ } else {
+ f.off = int64(len(ret))
+ old = 0
+ }
+
+ return ret[old:f.off], nil
+}
+
+func (f *WebDavFile) Seek(offset int64, whence int) (int64, error) {
+
+ glog.V(2).Infof("WebDavFile.Seek %v %v %v", f.name, offset, whence)
+
+ ctx := context.Background()
+
+ var err error
+ switch whence {
+ case 0:
+ f.off = 0
+ case 2:
+ if fi, err := f.fs.stat(ctx, f.name); err != nil {
+ return 0, err
+ } else {
+ f.off = fi.Size()
+ }
+ }
+ f.off += offset
+ return f.off, err
+}
+
+func (f *WebDavFile) Stat() (os.FileInfo, error) {
+
+ glog.V(2).Infof("WebDavFile.Stat %v", f.name)
+
+ ctx := context.Background()
+
+ return f.fs.stat(ctx, f.name)
+}
diff --git a/weed/shell/command_bucket_create.go b/weed/shell/command_bucket_create.go
new file mode 100644
index 000000000..52d96e4c3
--- /dev/null
+++ b/weed/shell/command_bucket_create.go
@@ -0,0 +1,83 @@
+package shell
+
+import (
+ "context"
+ "flag"
+ "fmt"
+ "io"
+ "os"
+ "time"
+
+ "github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
+)
+
+func init() {
+ Commands = append(Commands, &commandBucketCreate{})
+}
+
+type commandBucketCreate struct {
+}
+
+func (c *commandBucketCreate) Name() string {
+ return "bucket.create"
+}
+
+func (c *commandBucketCreate) Help() string {
+ return `create a bucket with a given name
+
+ Example:
+ bucket.create -name -replication 001
+`
+}
+
+func (c *commandBucketCreate) Do(args []string, commandEnv *CommandEnv, writer io.Writer) (err error) {
+
+ bucketCommand := flag.NewFlagSet(c.Name(), flag.ContinueOnError)
+ bucketName := bucketCommand.String("name", "", "bucket name")
+ replication := bucketCommand.String("replication", "", "replication setting for the bucket")
+ if err = bucketCommand.Parse(args); err != nil {
+ return nil
+ }
+
+ if *bucketName == "" {
+ return fmt.Errorf("empty bucket name")
+ }
+
+ err = commandEnv.WithFilerClient(func(client filer_pb.SeaweedFilerClient) error {
+
+ resp, err := client.GetFilerConfiguration(context.Background(), &filer_pb.GetFilerConfigurationRequest{})
+ if err != nil {
+ return fmt.Errorf("get filer configuration: %v", err)
+ }
+ filerBucketsPath := resp.DirBuckets
+
+ println("create bucket under", filerBucketsPath)
+
+ entry := &filer_pb.Entry{
+ Name: *bucketName,
+ IsDirectory: true,
+ Attributes: &filer_pb.FuseAttributes{
+ Mtime: time.Now().Unix(),
+ Crtime: time.Now().Unix(),
+ FileMode: uint32(0777 | os.ModeDir),
+ Collection: *bucketName,
+ Replication: *replication,
+ },
+ }
+
+ if err := filer_pb.CreateEntry(client, &filer_pb.CreateEntryRequest{
+ Directory: filerBucketsPath,
+ Entry: entry,
+ }); err != nil {
+ return err
+ }
+
+ println("created bucket", *bucketName)
+
+ return nil
+
+ })
+
+ return err
+
+}
diff --git a/weed/shell/command_bucket_delete.go b/weed/shell/command_bucket_delete.go
new file mode 100644
index 000000000..8f5f63b46
--- /dev/null
+++ b/weed/shell/command_bucket_delete.go
@@ -0,0 +1,54 @@
+package shell
+
+import (
+ "flag"
+ "fmt"
+ "io"
+
+ "github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
+)
+
+func init() {
+ Commands = append(Commands, &commandBucketDelete{})
+}
+
+type commandBucketDelete struct {
+}
+
+func (c *commandBucketDelete) Name() string {
+ return "bucket.delete"
+}
+
+func (c *commandBucketDelete) Help() string {
+ return `delete a bucket by a given name
+
+ bucket.delete -name
+`
+}
+
+func (c *commandBucketDelete) Do(args []string, commandEnv *CommandEnv, writer io.Writer) (err error) {
+
+ bucketCommand := flag.NewFlagSet(c.Name(), flag.ContinueOnError)
+ bucketName := bucketCommand.String("name", "", "bucket name")
+ if err = bucketCommand.Parse(args); err != nil {
+ return nil
+ }
+
+ if *bucketName == "" {
+ return fmt.Errorf("empty bucket name")
+ }
+
+ _, parseErr := commandEnv.parseUrl(findInputDirectory(bucketCommand.Args()))
+ if parseErr != nil {
+ return parseErr
+ }
+
+ var filerBucketsPath string
+ filerBucketsPath, err = readFilerBucketsPath(commandEnv)
+ if err != nil {
+ return fmt.Errorf("read buckets: %v", err)
+ }
+
+ return filer_pb.Remove(commandEnv, filerBucketsPath, *bucketName, false, true, true, false)
+
+}
diff --git a/weed/shell/command_bucket_list.go b/weed/shell/command_bucket_list.go
new file mode 100644
index 000000000..2e446b6b2
--- /dev/null
+++ b/weed/shell/command_bucket_list.go
@@ -0,0 +1,78 @@
+package shell
+
+import (
+ "context"
+ "flag"
+ "fmt"
+ "io"
+ "math"
+
+ "github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
+)
+
+func init() {
+ Commands = append(Commands, &commandBucketList{})
+}
+
+type commandBucketList struct {
+}
+
+func (c *commandBucketList) Name() string {
+ return "bucket.list"
+}
+
+func (c *commandBucketList) Help() string {
+ return `list all buckets
+
+`
+}
+
+func (c *commandBucketList) Do(args []string, commandEnv *CommandEnv, writer io.Writer) (err error) {
+
+ bucketCommand := flag.NewFlagSet(c.Name(), flag.ContinueOnError)
+ if err = bucketCommand.Parse(args); err != nil {
+ return nil
+ }
+
+ _, parseErr := commandEnv.parseUrl(findInputDirectory(bucketCommand.Args()))
+ if parseErr != nil {
+ return parseErr
+ }
+
+ var filerBucketsPath string
+ filerBucketsPath, err = readFilerBucketsPath(commandEnv)
+ if err != nil {
+ return fmt.Errorf("read buckets: %v", err)
+ }
+
+ err = filer_pb.List(commandEnv, filerBucketsPath, "", func(entry *filer_pb.Entry, isLast bool) error {
+ if entry.Attributes.Replication == "" || entry.Attributes.Replication == "000" {
+ fmt.Fprintf(writer, " %s\n", entry.Name)
+ } else {
+ fmt.Fprintf(writer, " %s\t\t\treplication: %s\n", entry.Name, entry.Attributes.Replication)
+ }
+ return nil
+ }, "", false, math.MaxUint32)
+ if err != nil {
+ return fmt.Errorf("list buckets under %v: %v", filerBucketsPath, err)
+ }
+
+ return err
+
+}
+
+func readFilerBucketsPath(filerClient filer_pb.FilerClient) (filerBucketsPath string, err error) {
+ err = filerClient.WithFilerClient(func(client filer_pb.SeaweedFilerClient) error {
+
+ resp, err := client.GetFilerConfiguration(context.Background(), &filer_pb.GetFilerConfigurationRequest{})
+ if err != nil {
+ return fmt.Errorf("get filer configuration: %v", err)
+ }
+ filerBucketsPath = resp.DirBuckets
+
+ return nil
+
+ })
+
+ return filerBucketsPath, err
+}
diff --git a/weed/shell/command_collection_delete.go b/weed/shell/command_collection_delete.go
new file mode 100644
index 000000000..4b3d7f0be
--- /dev/null
+++ b/weed/shell/command_collection_delete.go
@@ -0,0 +1,50 @@
+package shell
+
+import (
+ "context"
+ "fmt"
+ "github.com/chrislusf/seaweedfs/weed/pb/master_pb"
+ "io"
+)
+
+func init() {
+ Commands = append(Commands, &commandCollectionDelete{})
+}
+
+type commandCollectionDelete struct {
+}
+
+func (c *commandCollectionDelete) Name() string {
+ return "collection.delete"
+}
+
+func (c *commandCollectionDelete) Help() string {
+ return `delete specified collection
+
+ collection.delete
+
+`
+}
+
+func (c *commandCollectionDelete) Do(args []string, commandEnv *CommandEnv, writer io.Writer) (err error) {
+
+ if len(args) == 0 {
+ return nil
+ }
+
+ collectionName := args[0]
+
+ err = commandEnv.MasterClient.WithClient(func(client master_pb.SeaweedClient) error {
+ _, err = client.CollectionDelete(context.Background(), &master_pb.CollectionDeleteRequest{
+ Name: collectionName,
+ })
+ return err
+ })
+ if err != nil {
+ return
+ }
+
+ fmt.Fprintf(writer, "collection %s is deleted.\n", collectionName)
+
+ return nil
+}
diff --git a/weed/shell/command_collection_list.go b/weed/shell/command_collection_list.go
new file mode 100644
index 000000000..2a114e61b
--- /dev/null
+++ b/weed/shell/command_collection_list.go
@@ -0,0 +1,58 @@
+package shell
+
+import (
+ "context"
+ "fmt"
+ "github.com/chrislusf/seaweedfs/weed/pb/master_pb"
+ "io"
+)
+
+func init() {
+ Commands = append(Commands, &commandCollectionList{})
+}
+
+type commandCollectionList struct {
+}
+
+func (c *commandCollectionList) Name() string {
+ return "collection.list"
+}
+
+func (c *commandCollectionList) Help() string {
+ return `list all collections`
+}
+
+func (c *commandCollectionList) Do(args []string, commandEnv *CommandEnv, writer io.Writer) (err error) {
+
+ collections, err := ListCollectionNames(commandEnv, true, true)
+
+ if err != nil {
+ return err
+ }
+
+ for _, c := range collections {
+ fmt.Fprintf(writer, "collection:\"%s\"\n", c)
+ }
+
+ fmt.Fprintf(writer, "Total %d collections.\n", len(collections))
+
+ return nil
+}
+
+func ListCollectionNames(commandEnv *CommandEnv, includeNormalVolumes, includeEcVolumes bool) (collections []string, err error) {
+ var resp *master_pb.CollectionListResponse
+ err = commandEnv.MasterClient.WithClient(func(client master_pb.SeaweedClient) error {
+ resp, err = client.CollectionList(context.Background(), &master_pb.CollectionListRequest{
+ IncludeNormalVolumes: includeNormalVolumes,
+ IncludeEcVolumes: includeEcVolumes,
+ })
+ return err
+ })
+ if err != nil {
+ return
+ }
+ for _, c := range resp.Collections {
+ collections = append(collections, c.Name)
+ }
+ return
+}
diff --git a/weed/shell/command_ec_balance.go b/weed/shell/command_ec_balance.go
new file mode 100644
index 000000000..1ddb6a490
--- /dev/null
+++ b/weed/shell/command_ec_balance.go
@@ -0,0 +1,519 @@
+package shell
+
+import (
+ "flag"
+ "fmt"
+ "io"
+ "sort"
+
+ "github.com/chrislusf/seaweedfs/weed/storage/erasure_coding"
+ "github.com/chrislusf/seaweedfs/weed/storage/needle"
+)
+
+func init() {
+ Commands = append(Commands, &commandEcBalance{})
+}
+
+type commandEcBalance struct {
+}
+
+func (c *commandEcBalance) Name() string {
+ return "ec.balance"
+}
+
+func (c *commandEcBalance) Help() string {
+ return `balance all ec shards among all racks and volume servers
+
+ ec.balance [-c EACH_COLLECTION|] [-force] [-dataCenter ]
+
+ Algorithm:
+
+ For each type of volume server (different max volume count limit){
+ for each collection:
+ balanceEcVolumes(collectionName)
+ for each rack:
+ balanceEcRack(rack)
+ }
+
+ func balanceEcVolumes(collectionName){
+ for each volume:
+ doDeduplicateEcShards(volumeId)
+
+ tracks rack~shardCount mapping
+ for each volume:
+ doBalanceEcShardsAcrossRacks(volumeId)
+
+ for each volume:
+ doBalanceEcShardsWithinRacks(volumeId)
+ }
+
+ // spread ec shards into more racks
+ func doBalanceEcShardsAcrossRacks(volumeId){
+ tracks rack~volumeIdShardCount mapping
+ averageShardsPerEcRack = totalShardNumber / numRacks // totalShardNumber is 14 for now, later could varies for each dc
+ ecShardsToMove = select overflown ec shards from racks with ec shard counts > averageShardsPerEcRack
+ for each ecShardsToMove {
+ destRack = pickOneRack(rack~shardCount, rack~volumeIdShardCount, averageShardsPerEcRack)
+ destVolumeServers = volume servers on the destRack
+ pickOneEcNodeAndMoveOneShard(destVolumeServers)
+ }
+ }
+
+ func doBalanceEcShardsWithinRacks(volumeId){
+ racks = collect all racks that the volume id is on
+ for rack, shards := range racks
+ doBalanceEcShardsWithinOneRack(volumeId, shards, rack)
+ }
+
+ // move ec shards
+ func doBalanceEcShardsWithinOneRack(volumeId, shards, rackId){
+ tracks volumeServer~volumeIdShardCount mapping
+ averageShardCount = len(shards) / numVolumeServers
+ volumeServersOverAverage = volume servers with volumeId's ec shard counts > averageShardsPerEcRack
+ ecShardsToMove = select overflown ec shards from volumeServersOverAverage
+ for each ecShardsToMove {
+ destVolumeServer = pickOneVolumeServer(volumeServer~shardCount, volumeServer~volumeIdShardCount, averageShardCount)
+ pickOneEcNodeAndMoveOneShard(destVolumeServers)
+ }
+ }
+
+ // move ec shards while keeping shard distribution for the same volume unchanged or more even
+ func balanceEcRack(rack){
+ averageShardCount = total shards / numVolumeServers
+ for hasMovedOneEcShard {
+ sort all volume servers ordered by the number of local ec shards
+ pick the volume server A with the lowest number of ec shards x
+ pick the volume server B with the highest number of ec shards y
+ if y > averageShardCount and x +1 <= averageShardCount {
+ if B has a ec shard with volume id v that A does not have {
+ move one ec shard v from B to A
+ hasMovedOneEcShard = true
+ }
+ }
+ }
+ }
+
+`
+}
+
+func (c *commandEcBalance) Do(args []string, commandEnv *CommandEnv, writer io.Writer) (err error) {
+
+ if err = commandEnv.confirmIsLocked(); err != nil {
+ return
+ }
+
+ balanceCommand := flag.NewFlagSet(c.Name(), flag.ContinueOnError)
+ collection := balanceCommand.String("collection", "EACH_COLLECTION", "collection name, or \"EACH_COLLECTION\" for each collection")
+ dc := balanceCommand.String("dataCenter", "", "only apply the balancing for this dataCenter")
+ applyBalancing := balanceCommand.Bool("force", false, "apply the balancing plan")
+ if err = balanceCommand.Parse(args); err != nil {
+ return nil
+ }
+
+ // collect all ec nodes
+ allEcNodes, totalFreeEcSlots, err := collectEcNodes(commandEnv, *dc)
+ if err != nil {
+ return err
+ }
+ if totalFreeEcSlots < 1 {
+ return fmt.Errorf("no free ec shard slots. only %d left", totalFreeEcSlots)
+ }
+
+ racks := collectRacks(allEcNodes)
+
+ if *collection == "EACH_COLLECTION" {
+ collections, err := ListCollectionNames(commandEnv, false, true)
+ if err != nil {
+ return err
+ }
+ fmt.Printf("balanceEcVolumes collections %+v\n", len(collections))
+ for _, c := range collections {
+ fmt.Printf("balanceEcVolumes collection %+v\n", c)
+ if err = balanceEcVolumes(commandEnv, c, allEcNodes, racks, *applyBalancing); err != nil {
+ return err
+ }
+ }
+ } else {
+ if err = balanceEcVolumes(commandEnv, *collection, allEcNodes, racks, *applyBalancing); err != nil {
+ return err
+ }
+ }
+
+ if err := balanceEcRacks(commandEnv, racks, *applyBalancing); err != nil {
+ return fmt.Errorf("balance ec racks: %v", err)
+ }
+
+ return nil
+}
+
+func collectRacks(allEcNodes []*EcNode) map[RackId]*EcRack {
+ // collect racks info
+ racks := make(map[RackId]*EcRack)
+ for _, ecNode := range allEcNodes {
+ if racks[ecNode.rack] == nil {
+ racks[ecNode.rack] = &EcRack{
+ ecNodes: make(map[EcNodeId]*EcNode),
+ }
+ }
+ racks[ecNode.rack].ecNodes[EcNodeId(ecNode.info.Id)] = ecNode
+ racks[ecNode.rack].freeEcSlot += ecNode.freeEcSlot
+ }
+ return racks
+}
+
+func balanceEcVolumes(commandEnv *CommandEnv, collection string, allEcNodes []*EcNode, racks map[RackId]*EcRack, applyBalancing bool) error {
+
+ fmt.Printf("balanceEcVolumes %s\n", collection)
+
+ if err := deleteDuplicatedEcShards(commandEnv, allEcNodes, collection, applyBalancing); err != nil {
+ return fmt.Errorf("delete duplicated collection %s ec shards: %v", collection, err)
+ }
+
+ if err := balanceEcShardsAcrossRacks(commandEnv, allEcNodes, racks, collection, applyBalancing); err != nil {
+ return fmt.Errorf("balance across racks collection %s ec shards: %v", collection, err)
+ }
+
+ if err := balanceEcShardsWithinRacks(commandEnv, allEcNodes, racks, collection, applyBalancing); err != nil {
+ return fmt.Errorf("balance across racks collection %s ec shards: %v", collection, err)
+ }
+
+ return nil
+}
+
+func deleteDuplicatedEcShards(commandEnv *CommandEnv, allEcNodes []*EcNode, collection string, applyBalancing bool) error {
+ // vid => []ecNode
+ vidLocations := collectVolumeIdToEcNodes(allEcNodes)
+ // deduplicate ec shards
+ for vid, locations := range vidLocations {
+ if err := doDeduplicateEcShards(commandEnv, collection, vid, locations, applyBalancing); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+func doDeduplicateEcShards(commandEnv *CommandEnv, collection string, vid needle.VolumeId, locations []*EcNode, applyBalancing bool) error {
+
+ // check whether this volume has ecNodes that are over average
+ shardToLocations := make([][]*EcNode, erasure_coding.TotalShardsCount)
+ for _, ecNode := range locations {
+ shardBits := findEcVolumeShards(ecNode, vid)
+ for _, shardId := range shardBits.ShardIds() {
+ shardToLocations[shardId] = append(shardToLocations[shardId], ecNode)
+ }
+ }
+ for shardId, ecNodes := range shardToLocations {
+ if len(ecNodes) <= 1 {
+ continue
+ }
+ sortEcNodesByFreeslotsAscending(ecNodes)
+ fmt.Printf("ec shard %d.%d has %d copies, keeping %v\n", vid, shardId, len(ecNodes), ecNodes[0].info.Id)
+ if !applyBalancing {
+ continue
+ }
+
+ duplicatedShardIds := []uint32{uint32(shardId)}
+ for _, ecNode := range ecNodes[1:] {
+ if err := unmountEcShards(commandEnv.option.GrpcDialOption, vid, ecNode.info.Id, duplicatedShardIds); err != nil {
+ return err
+ }
+ if err := sourceServerDeleteEcShards(commandEnv.option.GrpcDialOption, collection, vid, ecNode.info.Id, duplicatedShardIds); err != nil {
+ return err
+ }
+ ecNode.deleteEcVolumeShards(vid, duplicatedShardIds)
+ }
+ }
+ return nil
+}
+
+func balanceEcShardsAcrossRacks(commandEnv *CommandEnv, allEcNodes []*EcNode, racks map[RackId]*EcRack, collection string, applyBalancing bool) error {
+ // collect vid => []ecNode, since previous steps can change the locations
+ vidLocations := collectVolumeIdToEcNodes(allEcNodes)
+ // spread the ec shards evenly
+ for vid, locations := range vidLocations {
+ if err := doBalanceEcShardsAcrossRacks(commandEnv, collection, vid, locations, racks, applyBalancing); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+func doBalanceEcShardsAcrossRacks(commandEnv *CommandEnv, collection string, vid needle.VolumeId, locations []*EcNode, racks map[RackId]*EcRack, applyBalancing bool) error {
+
+ // calculate average number of shards an ec rack should have for one volume
+ averageShardsPerEcRack := ceilDivide(erasure_coding.TotalShardsCount, len(racks))
+
+ // see the volume's shards are in how many racks, and how many in each rack
+ rackToShardCount := groupByCount(locations, func(ecNode *EcNode) (id string, count int) {
+ shardBits := findEcVolumeShards(ecNode, vid)
+ return string(ecNode.rack), shardBits.ShardIdCount()
+ })
+ rackEcNodesWithVid := groupBy(locations, func(ecNode *EcNode) string {
+ return string(ecNode.rack)
+ })
+
+ // ecShardsToMove = select overflown ec shards from racks with ec shard counts > averageShardsPerEcRack
+ ecShardsToMove := make(map[erasure_coding.ShardId]*EcNode)
+ for rackId, count := range rackToShardCount {
+ if count > averageShardsPerEcRack {
+ possibleEcNodes := rackEcNodesWithVid[rackId]
+ for shardId, ecNode := range pickNEcShardsToMoveFrom(possibleEcNodes, vid, count-averageShardsPerEcRack) {
+ ecShardsToMove[shardId] = ecNode
+ }
+ }
+ }
+
+ for shardId, ecNode := range ecShardsToMove {
+ rackId := pickOneRack(racks, rackToShardCount, averageShardsPerEcRack)
+ if rackId == "" {
+ fmt.Printf("ec shard %d.%d at %s can not find a destination rack\n", vid, shardId, ecNode.info.Id)
+ continue
+ }
+ var possibleDestinationEcNodes []*EcNode
+ for _, n := range racks[rackId].ecNodes {
+ possibleDestinationEcNodes = append(possibleDestinationEcNodes, n)
+ }
+ err := pickOneEcNodeAndMoveOneShard(commandEnv, averageShardsPerEcRack, ecNode, collection, vid, shardId, possibleDestinationEcNodes, applyBalancing)
+ if err != nil {
+ return err
+ }
+ rackToShardCount[string(rackId)] += 1
+ rackToShardCount[string(ecNode.rack)] -= 1
+ racks[rackId].freeEcSlot -= 1
+ racks[ecNode.rack].freeEcSlot += 1
+ }
+
+ return nil
+}
+
+func pickOneRack(rackToEcNodes map[RackId]*EcRack, rackToShardCount map[string]int, averageShardsPerEcRack int) RackId {
+
+ // TODO later may need to add some randomness
+
+ for rackId, rack := range rackToEcNodes {
+ if rackToShardCount[string(rackId)] >= averageShardsPerEcRack {
+ continue
+ }
+
+ if rack.freeEcSlot <= 0 {
+ continue
+ }
+
+ return rackId
+ }
+
+ return ""
+}
+
+func balanceEcShardsWithinRacks(commandEnv *CommandEnv, allEcNodes []*EcNode, racks map[RackId]*EcRack, collection string, applyBalancing bool) error {
+ // collect vid => []ecNode, since previous steps can change the locations
+ vidLocations := collectVolumeIdToEcNodes(allEcNodes)
+
+ // spread the ec shards evenly
+ for vid, locations := range vidLocations {
+
+ // see the volume's shards are in how many racks, and how many in each rack
+ rackToShardCount := groupByCount(locations, func(ecNode *EcNode) (id string, count int) {
+ shardBits := findEcVolumeShards(ecNode, vid)
+ return string(ecNode.rack), shardBits.ShardIdCount()
+ })
+ rackEcNodesWithVid := groupBy(locations, func(ecNode *EcNode) string {
+ return string(ecNode.rack)
+ })
+
+ for rackId, _ := range rackToShardCount {
+
+ var possibleDestinationEcNodes []*EcNode
+ for _, n := range racks[RackId(rackId)].ecNodes {
+ possibleDestinationEcNodes = append(possibleDestinationEcNodes, n)
+ }
+ sourceEcNodes := rackEcNodesWithVid[rackId]
+ averageShardsPerEcNode := ceilDivide(rackToShardCount[rackId], len(possibleDestinationEcNodes))
+ if err := doBalanceEcShardsWithinOneRack(commandEnv, averageShardsPerEcNode, collection, vid, sourceEcNodes, possibleDestinationEcNodes, applyBalancing); err != nil {
+ return err
+ }
+ }
+ }
+ return nil
+}
+
+func doBalanceEcShardsWithinOneRack(commandEnv *CommandEnv, averageShardsPerEcNode int, collection string, vid needle.VolumeId, existingLocations, possibleDestinationEcNodes []*EcNode, applyBalancing bool) error {
+
+ for _, ecNode := range existingLocations {
+
+ shardBits := findEcVolumeShards(ecNode, vid)
+ overLimitCount := shardBits.ShardIdCount() - averageShardsPerEcNode
+
+ for _, shardId := range shardBits.ShardIds() {
+
+ if overLimitCount <= 0 {
+ break
+ }
+
+ fmt.Printf("%s has %d overlimit, moving ec shard %d.%d\n", ecNode.info.Id, overLimitCount, vid, shardId)
+
+ err := pickOneEcNodeAndMoveOneShard(commandEnv, averageShardsPerEcNode, ecNode, collection, vid, shardId, possibleDestinationEcNodes, applyBalancing)
+ if err != nil {
+ return err
+ }
+
+ overLimitCount--
+ }
+ }
+
+ return nil
+}
+
+func balanceEcRacks(commandEnv *CommandEnv, racks map[RackId]*EcRack, applyBalancing bool) error {
+
+ // balance one rack for all ec shards
+ for _, ecRack := range racks {
+ if err := doBalanceEcRack(commandEnv, ecRack, applyBalancing); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+func doBalanceEcRack(commandEnv *CommandEnv, ecRack *EcRack, applyBalancing bool) error {
+
+ if len(ecRack.ecNodes) <= 1 {
+ return nil
+ }
+
+ var rackEcNodes []*EcNode
+ for _, node := range ecRack.ecNodes {
+ rackEcNodes = append(rackEcNodes, node)
+ }
+
+ ecNodeIdToShardCount := groupByCount(rackEcNodes, func(node *EcNode) (id string, count int) {
+ for _, ecShardInfo := range node.info.EcShardInfos {
+ count += erasure_coding.ShardBits(ecShardInfo.EcIndexBits).ShardIdCount()
+ }
+ return node.info.Id, count
+ })
+
+ var totalShardCount int
+ for _, count := range ecNodeIdToShardCount {
+ totalShardCount += count
+ }
+
+ averageShardCount := ceilDivide(totalShardCount, len(rackEcNodes))
+
+ hasMove := true
+ for hasMove {
+ hasMove = false
+ sort.Slice(rackEcNodes, func(i, j int) bool {
+ return rackEcNodes[i].freeEcSlot > rackEcNodes[j].freeEcSlot
+ })
+ emptyNode, fullNode := rackEcNodes[0], rackEcNodes[len(rackEcNodes)-1]
+ emptyNodeShardCount, fullNodeShardCount := ecNodeIdToShardCount[emptyNode.info.Id], ecNodeIdToShardCount[fullNode.info.Id]
+ if fullNodeShardCount > averageShardCount && emptyNodeShardCount+1 <= averageShardCount {
+
+ emptyNodeIds := make(map[uint32]bool)
+ for _, shards := range emptyNode.info.EcShardInfos {
+ emptyNodeIds[shards.Id] = true
+ }
+ for _, shards := range fullNode.info.EcShardInfos {
+ if _, found := emptyNodeIds[shards.Id]; !found {
+ for _, shardId := range erasure_coding.ShardBits(shards.EcIndexBits).ShardIds() {
+
+ fmt.Printf("%s moves ec shards %d.%d to %s\n", fullNode.info.Id, shards.Id, shardId, emptyNode.info.Id)
+
+ err := moveMountedShardToEcNode(commandEnv, fullNode, shards.Collection, needle.VolumeId(shards.Id), shardId, emptyNode, applyBalancing)
+ if err != nil {
+ return err
+ }
+
+ ecNodeIdToShardCount[emptyNode.info.Id]++
+ ecNodeIdToShardCount[fullNode.info.Id]--
+ hasMove = true
+ break
+ }
+ break
+ }
+ }
+ }
+ }
+
+ return nil
+}
+
+func pickOneEcNodeAndMoveOneShard(commandEnv *CommandEnv, averageShardsPerEcNode int, existingLocation *EcNode, collection string, vid needle.VolumeId, shardId erasure_coding.ShardId, possibleDestinationEcNodes []*EcNode, applyBalancing bool) error {
+
+ sortEcNodesByFreeslotsDecending(possibleDestinationEcNodes)
+
+ for _, destEcNode := range possibleDestinationEcNodes {
+ if destEcNode.info.Id == existingLocation.info.Id {
+ continue
+ }
+
+ if destEcNode.freeEcSlot <= 0 {
+ continue
+ }
+ if findEcVolumeShards(destEcNode, vid).ShardIdCount() >= averageShardsPerEcNode {
+ continue
+ }
+
+ fmt.Printf("%s moves ec shard %d.%d to %s\n", existingLocation.info.Id, vid, shardId, destEcNode.info.Id)
+
+ err := moveMountedShardToEcNode(commandEnv, existingLocation, collection, vid, shardId, destEcNode, applyBalancing)
+ if err != nil {
+ return err
+ }
+
+ return nil
+ }
+
+ return nil
+}
+
+func pickNEcShardsToMoveFrom(ecNodes []*EcNode, vid needle.VolumeId, n int) map[erasure_coding.ShardId]*EcNode {
+ picked := make(map[erasure_coding.ShardId]*EcNode)
+ var candidateEcNodes []*CandidateEcNode
+ for _, ecNode := range ecNodes {
+ shardBits := findEcVolumeShards(ecNode, vid)
+ if shardBits.ShardIdCount() > 0 {
+ candidateEcNodes = append(candidateEcNodes, &CandidateEcNode{
+ ecNode: ecNode,
+ shardCount: shardBits.ShardIdCount(),
+ })
+ }
+ }
+ sort.Slice(candidateEcNodes, func(i, j int) bool {
+ return candidateEcNodes[i].shardCount > candidateEcNodes[j].shardCount
+ })
+ for i := 0; i < n; i++ {
+ selectedEcNodeIndex := -1
+ for i, candidateEcNode := range candidateEcNodes {
+ shardBits := findEcVolumeShards(candidateEcNode.ecNode, vid)
+ if shardBits > 0 {
+ selectedEcNodeIndex = i
+ for _, shardId := range shardBits.ShardIds() {
+ candidateEcNode.shardCount--
+ picked[shardId] = candidateEcNode.ecNode
+ candidateEcNode.ecNode.deleteEcVolumeShards(vid, []uint32{uint32(shardId)})
+ break
+ }
+ break
+ }
+ }
+ if selectedEcNodeIndex >= 0 {
+ ensureSortedEcNodes(candidateEcNodes, selectedEcNodeIndex, func(i, j int) bool {
+ return candidateEcNodes[i].shardCount > candidateEcNodes[j].shardCount
+ })
+ }
+
+ }
+ return picked
+}
+
+func collectVolumeIdToEcNodes(allEcNodes []*EcNode) map[needle.VolumeId][]*EcNode {
+ vidLocations := make(map[needle.VolumeId][]*EcNode)
+ for _, ecNode := range allEcNodes {
+ for _, shardInfo := range ecNode.info.EcShardInfos {
+ vidLocations[needle.VolumeId(shardInfo.Id)] = append(vidLocations[needle.VolumeId(shardInfo.Id)], ecNode)
+ }
+ }
+ return vidLocations
+}
diff --git a/weed/shell/command_ec_common.go b/weed/shell/command_ec_common.go
new file mode 100644
index 000000000..0db119d3c
--- /dev/null
+++ b/weed/shell/command_ec_common.go
@@ -0,0 +1,337 @@
+package shell
+
+import (
+ "context"
+ "fmt"
+ "math"
+ "sort"
+
+ "github.com/chrislusf/seaweedfs/weed/glog"
+ "github.com/chrislusf/seaweedfs/weed/operation"
+ "github.com/chrislusf/seaweedfs/weed/pb/master_pb"
+ "github.com/chrislusf/seaweedfs/weed/pb/volume_server_pb"
+ "github.com/chrislusf/seaweedfs/weed/storage/erasure_coding"
+ "github.com/chrislusf/seaweedfs/weed/storage/needle"
+ "google.golang.org/grpc"
+)
+
+func moveMountedShardToEcNode(commandEnv *CommandEnv, existingLocation *EcNode, collection string, vid needle.VolumeId, shardId erasure_coding.ShardId, destinationEcNode *EcNode, applyBalancing bool) (err error) {
+
+ copiedShardIds := []uint32{uint32(shardId)}
+
+ if applyBalancing {
+
+ // ask destination node to copy shard and the ecx file from source node, and mount it
+ copiedShardIds, err = oneServerCopyAndMountEcShardsFromSource(commandEnv.option.GrpcDialOption, destinationEcNode, []uint32{uint32(shardId)}, vid, collection, existingLocation.info.Id)
+ if err != nil {
+ return err
+ }
+
+ // unmount the to be deleted shards
+ err = unmountEcShards(commandEnv.option.GrpcDialOption, vid, existingLocation.info.Id, copiedShardIds)
+ if err != nil {
+ return err
+ }
+
+ // ask source node to delete the shard, and maybe the ecx file
+ err = sourceServerDeleteEcShards(commandEnv.option.GrpcDialOption, collection, vid, existingLocation.info.Id, copiedShardIds)
+ if err != nil {
+ return err
+ }
+
+ fmt.Printf("moved ec shard %d.%d %s => %s\n", vid, shardId, existingLocation.info.Id, destinationEcNode.info.Id)
+
+ }
+
+ destinationEcNode.addEcVolumeShards(vid, collection, copiedShardIds)
+ existingLocation.deleteEcVolumeShards(vid, copiedShardIds)
+
+ return nil
+
+}
+
+func oneServerCopyAndMountEcShardsFromSource(grpcDialOption grpc.DialOption,
+ targetServer *EcNode, shardIdsToCopy []uint32,
+ volumeId needle.VolumeId, collection string, existingLocation string) (copiedShardIds []uint32, err error) {
+
+ fmt.Printf("allocate %d.%v %s => %s\n", volumeId, shardIdsToCopy, existingLocation, targetServer.info.Id)
+
+ err = operation.WithVolumeServerClient(targetServer.info.Id, grpcDialOption, func(volumeServerClient volume_server_pb.VolumeServerClient) error {
+
+ if targetServer.info.Id != existingLocation {
+
+ fmt.Printf("copy %d.%v %s => %s\n", volumeId, shardIdsToCopy, existingLocation, targetServer.info.Id)
+ _, copyErr := volumeServerClient.VolumeEcShardsCopy(context.Background(), &volume_server_pb.VolumeEcShardsCopyRequest{
+ VolumeId: uint32(volumeId),
+ Collection: collection,
+ ShardIds: shardIdsToCopy,
+ CopyEcxFile: true,
+ CopyEcjFile: true,
+ CopyVifFile: true,
+ SourceDataNode: existingLocation,
+ })
+ if copyErr != nil {
+ return fmt.Errorf("copy %d.%v %s => %s : %v\n", volumeId, shardIdsToCopy, existingLocation, targetServer.info.Id, copyErr)
+ }
+ }
+
+ fmt.Printf("mount %d.%v on %s\n", volumeId, shardIdsToCopy, targetServer.info.Id)
+ _, mountErr := volumeServerClient.VolumeEcShardsMount(context.Background(), &volume_server_pb.VolumeEcShardsMountRequest{
+ VolumeId: uint32(volumeId),
+ Collection: collection,
+ ShardIds: shardIdsToCopy,
+ })
+ if mountErr != nil {
+ return fmt.Errorf("mount %d.%v on %s : %v\n", volumeId, shardIdsToCopy, targetServer.info.Id, mountErr)
+ }
+
+ if targetServer.info.Id != existingLocation {
+ copiedShardIds = shardIdsToCopy
+ glog.V(0).Infof("%s ec volume %d deletes shards %+v", existingLocation, volumeId, copiedShardIds)
+ }
+
+ return nil
+ })
+
+ if err != nil {
+ return
+ }
+
+ return
+}
+
+func eachDataNode(topo *master_pb.TopologyInfo, fn func(dc string, rack RackId, dn *master_pb.DataNodeInfo)) {
+ for _, dc := range topo.DataCenterInfos {
+ for _, rack := range dc.RackInfos {
+ for _, dn := range rack.DataNodeInfos {
+ fn(dc.Id, RackId(rack.Id), dn)
+ }
+ }
+ }
+}
+
+func sortEcNodesByFreeslotsDecending(ecNodes []*EcNode) {
+ sort.Slice(ecNodes, func(i, j int) bool {
+ return ecNodes[i].freeEcSlot > ecNodes[j].freeEcSlot
+ })
+}
+
+func sortEcNodesByFreeslotsAscending(ecNodes []*EcNode) {
+ sort.Slice(ecNodes, func(i, j int) bool {
+ return ecNodes[i].freeEcSlot < ecNodes[j].freeEcSlot
+ })
+}
+
+type CandidateEcNode struct {
+ ecNode *EcNode
+ shardCount int
+}
+
+// if the index node changed the freeEcSlot, need to keep every EcNode still sorted
+func ensureSortedEcNodes(data []*CandidateEcNode, index int, lessThan func(i, j int) bool) {
+ for i := index - 1; i >= 0; i-- {
+ if lessThan(i+1, i) {
+ swap(data, i, i+1)
+ } else {
+ break
+ }
+ }
+ for i := index + 1; i < len(data); i++ {
+ if lessThan(i, i-1) {
+ swap(data, i, i-1)
+ } else {
+ break
+ }
+ }
+}
+
+func swap(data []*CandidateEcNode, i, j int) {
+ t := data[i]
+ data[i] = data[j]
+ data[j] = t
+}
+
+func countShards(ecShardInfos []*master_pb.VolumeEcShardInformationMessage) (count int) {
+ for _, ecShardInfo := range ecShardInfos {
+ shardBits := erasure_coding.ShardBits(ecShardInfo.EcIndexBits)
+ count += shardBits.ShardIdCount()
+ }
+ return
+}
+
+func countFreeShardSlots(dn *master_pb.DataNodeInfo) (count int) {
+ return int(dn.MaxVolumeCount-dn.ActiveVolumeCount)*erasure_coding.DataShardsCount - countShards(dn.EcShardInfos)
+}
+
+type RackId string
+type EcNodeId string
+
+type EcNode struct {
+ info *master_pb.DataNodeInfo
+ dc string
+ rack RackId
+ freeEcSlot int
+}
+
+type EcRack struct {
+ ecNodes map[EcNodeId]*EcNode
+ freeEcSlot int
+}
+
+func collectEcNodes(commandEnv *CommandEnv, selectedDataCenter string) (ecNodes []*EcNode, totalFreeEcSlots int, err error) {
+
+ // list all possible locations
+ var resp *master_pb.VolumeListResponse
+ err = commandEnv.MasterClient.WithClient(func(client master_pb.SeaweedClient) error {
+ resp, err = client.VolumeList(context.Background(), &master_pb.VolumeListRequest{})
+ return err
+ })
+ if err != nil {
+ return nil, 0, err
+ }
+
+ // find out all volume servers with one slot left.
+ eachDataNode(resp.TopologyInfo, func(dc string, rack RackId, dn *master_pb.DataNodeInfo) {
+ if selectedDataCenter != "" && selectedDataCenter != dc {
+ return
+ }
+
+ freeEcSlots := countFreeShardSlots(dn)
+ ecNodes = append(ecNodes, &EcNode{
+ info: dn,
+ dc: dc,
+ rack: rack,
+ freeEcSlot: int(freeEcSlots),
+ })
+ totalFreeEcSlots += freeEcSlots
+ })
+
+ sortEcNodesByFreeslotsDecending(ecNodes)
+
+ return
+}
+
+func sourceServerDeleteEcShards(grpcDialOption grpc.DialOption, collection string, volumeId needle.VolumeId, sourceLocation string, toBeDeletedShardIds []uint32) error {
+
+ fmt.Printf("delete %d.%v from %s\n", volumeId, toBeDeletedShardIds, sourceLocation)
+
+ return operation.WithVolumeServerClient(sourceLocation, grpcDialOption, func(volumeServerClient volume_server_pb.VolumeServerClient) error {
+ _, deleteErr := volumeServerClient.VolumeEcShardsDelete(context.Background(), &volume_server_pb.VolumeEcShardsDeleteRequest{
+ VolumeId: uint32(volumeId),
+ Collection: collection,
+ ShardIds: toBeDeletedShardIds,
+ })
+ return deleteErr
+ })
+
+}
+
+func unmountEcShards(grpcDialOption grpc.DialOption, volumeId needle.VolumeId, sourceLocation string, toBeUnmountedhardIds []uint32) error {
+
+ fmt.Printf("unmount %d.%v from %s\n", volumeId, toBeUnmountedhardIds, sourceLocation)
+
+ return operation.WithVolumeServerClient(sourceLocation, grpcDialOption, func(volumeServerClient volume_server_pb.VolumeServerClient) error {
+ _, deleteErr := volumeServerClient.VolumeEcShardsUnmount(context.Background(), &volume_server_pb.VolumeEcShardsUnmountRequest{
+ VolumeId: uint32(volumeId),
+ ShardIds: toBeUnmountedhardIds,
+ })
+ return deleteErr
+ })
+}
+
+func mountEcShards(grpcDialOption grpc.DialOption, collection string, volumeId needle.VolumeId, sourceLocation string, toBeMountedhardIds []uint32) error {
+
+ fmt.Printf("mount %d.%v on %s\n", volumeId, toBeMountedhardIds, sourceLocation)
+
+ return operation.WithVolumeServerClient(sourceLocation, grpcDialOption, func(volumeServerClient volume_server_pb.VolumeServerClient) error {
+ _, mountErr := volumeServerClient.VolumeEcShardsMount(context.Background(), &volume_server_pb.VolumeEcShardsMountRequest{
+ VolumeId: uint32(volumeId),
+ Collection: collection,
+ ShardIds: toBeMountedhardIds,
+ })
+ return mountErr
+ })
+}
+
+func ceilDivide(total, n int) int {
+ return int(math.Ceil(float64(total) / float64(n)))
+}
+
+func findEcVolumeShards(ecNode *EcNode, vid needle.VolumeId) erasure_coding.ShardBits {
+
+ for _, shardInfo := range ecNode.info.EcShardInfos {
+ if needle.VolumeId(shardInfo.Id) == vid {
+ return erasure_coding.ShardBits(shardInfo.EcIndexBits)
+ }
+ }
+
+ return 0
+}
+
+func (ecNode *EcNode) addEcVolumeShards(vid needle.VolumeId, collection string, shardIds []uint32) *EcNode {
+
+ foundVolume := false
+ for _, shardInfo := range ecNode.info.EcShardInfos {
+ if needle.VolumeId(shardInfo.Id) == vid {
+ oldShardBits := erasure_coding.ShardBits(shardInfo.EcIndexBits)
+ newShardBits := oldShardBits
+ for _, shardId := range shardIds {
+ newShardBits = newShardBits.AddShardId(erasure_coding.ShardId(shardId))
+ }
+ shardInfo.EcIndexBits = uint32(newShardBits)
+ ecNode.freeEcSlot -= newShardBits.ShardIdCount() - oldShardBits.ShardIdCount()
+ foundVolume = true
+ break
+ }
+ }
+
+ if !foundVolume {
+ var newShardBits erasure_coding.ShardBits
+ for _, shardId := range shardIds {
+ newShardBits = newShardBits.AddShardId(erasure_coding.ShardId(shardId))
+ }
+ ecNode.info.EcShardInfos = append(ecNode.info.EcShardInfos, &master_pb.VolumeEcShardInformationMessage{
+ Id: uint32(vid),
+ Collection: collection,
+ EcIndexBits: uint32(newShardBits),
+ })
+ ecNode.freeEcSlot -= len(shardIds)
+ }
+
+ return ecNode
+}
+
+func (ecNode *EcNode) deleteEcVolumeShards(vid needle.VolumeId, shardIds []uint32) *EcNode {
+
+ for _, shardInfo := range ecNode.info.EcShardInfos {
+ if needle.VolumeId(shardInfo.Id) == vid {
+ oldShardBits := erasure_coding.ShardBits(shardInfo.EcIndexBits)
+ newShardBits := oldShardBits
+ for _, shardId := range shardIds {
+ newShardBits = newShardBits.RemoveShardId(erasure_coding.ShardId(shardId))
+ }
+ shardInfo.EcIndexBits = uint32(newShardBits)
+ ecNode.freeEcSlot -= newShardBits.ShardIdCount() - oldShardBits.ShardIdCount()
+ }
+ }
+
+ return ecNode
+}
+
+func groupByCount(data []*EcNode, identifierFn func(*EcNode) (id string, count int)) map[string]int {
+ countMap := make(map[string]int)
+ for _, d := range data {
+ id, count := identifierFn(d)
+ countMap[id] += count
+ }
+ return countMap
+}
+
+func groupBy(data []*EcNode, identifierFn func(*EcNode) (id string)) map[string][]*EcNode {
+ groupMap := make(map[string][]*EcNode)
+ for _, d := range data {
+ id := identifierFn(d)
+ groupMap[id] = append(groupMap[id], d)
+ }
+ return groupMap
+}
diff --git a/weed/shell/command_ec_decode.go b/weed/shell/command_ec_decode.go
new file mode 100644
index 000000000..5f03df58c
--- /dev/null
+++ b/weed/shell/command_ec_decode.go
@@ -0,0 +1,268 @@
+package shell
+
+import (
+ "context"
+ "flag"
+ "fmt"
+ "io"
+
+ "google.golang.org/grpc"
+
+ "github.com/chrislusf/seaweedfs/weed/operation"
+ "github.com/chrislusf/seaweedfs/weed/pb/master_pb"
+ "github.com/chrislusf/seaweedfs/weed/pb/volume_server_pb"
+ "github.com/chrislusf/seaweedfs/weed/storage/erasure_coding"
+ "github.com/chrislusf/seaweedfs/weed/storage/needle"
+)
+
+func init() {
+ Commands = append(Commands, &commandEcDecode{})
+}
+
+type commandEcDecode struct {
+}
+
+func (c *commandEcDecode) Name() string {
+ return "ec.decode"
+}
+
+func (c *commandEcDecode) Help() string {
+ return `decode a erasure coded volume into a normal volume
+
+ ec.decode [-collection=""] [-volumeId=]
+
+`
+}
+
+func (c *commandEcDecode) Do(args []string, commandEnv *CommandEnv, writer io.Writer) (err error) {
+
+ if err = commandEnv.confirmIsLocked(); err != nil {
+ return
+ }
+
+ encodeCommand := flag.NewFlagSet(c.Name(), flag.ContinueOnError)
+ volumeId := encodeCommand.Int("volumeId", 0, "the volume id")
+ collection := encodeCommand.String("collection", "", "the collection name")
+ if err = encodeCommand.Parse(args); err != nil {
+ return nil
+ }
+
+ vid := needle.VolumeId(*volumeId)
+
+ // collect topology information
+ topologyInfo, err := collectTopologyInfo(commandEnv)
+ if err != nil {
+ return err
+ }
+
+ // volumeId is provided
+ if vid != 0 {
+ return doEcDecode(commandEnv, topologyInfo, *collection, vid)
+ }
+
+ // apply to all volumes in the collection
+ volumeIds := collectEcShardIds(topologyInfo, *collection)
+ fmt.Printf("ec encode volumes: %v\n", volumeIds)
+ for _, vid := range volumeIds {
+ if err = doEcDecode(commandEnv, topologyInfo, *collection, vid); err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+func doEcDecode(commandEnv *CommandEnv, topoInfo *master_pb.TopologyInfo, collection string, vid needle.VolumeId) (err error) {
+ // find volume location
+ nodeToEcIndexBits := collectEcNodeShardBits(topoInfo, vid)
+
+ fmt.Printf("ec volume %d shard locations: %+v\n", vid, nodeToEcIndexBits)
+
+ // collect ec shards to the server with most space
+ targetNodeLocation, err := collectEcShards(commandEnv, nodeToEcIndexBits, collection, vid)
+ if err != nil {
+ return fmt.Errorf("collectEcShards for volume %d: %v", vid, err)
+ }
+
+ // generate a normal volume
+ err = generateNormalVolume(commandEnv.option.GrpcDialOption, needle.VolumeId(vid), collection, targetNodeLocation)
+ if err != nil {
+ return fmt.Errorf("generate normal volume %d on %s: %v", vid, targetNodeLocation, err)
+ }
+
+ // delete the previous ec shards
+ err = mountVolumeAndDeleteEcShards(commandEnv.option.GrpcDialOption, collection, targetNodeLocation, nodeToEcIndexBits, vid)
+ if err != nil {
+ return fmt.Errorf("delete ec shards for volume %d: %v", vid, err)
+ }
+
+ return nil
+}
+
+func mountVolumeAndDeleteEcShards(grpcDialOption grpc.DialOption, collection, targetNodeLocation string, nodeToEcIndexBits map[string]erasure_coding.ShardBits, vid needle.VolumeId) error {
+
+ // mount volume
+ if err := operation.WithVolumeServerClient(targetNodeLocation, grpcDialOption, func(volumeServerClient volume_server_pb.VolumeServerClient) error {
+ _, mountErr := volumeServerClient.VolumeMount(context.Background(), &volume_server_pb.VolumeMountRequest{
+ VolumeId: uint32(vid),
+ })
+ return mountErr
+ }); err != nil {
+ return fmt.Errorf("mountVolumeAndDeleteEcShards mount volume %d on %s: %v", vid, targetNodeLocation, err)
+ }
+
+ // unmount ec shards
+ for location, ecIndexBits := range nodeToEcIndexBits {
+ fmt.Printf("unmount ec volume %d on %s has shards: %+v\n", vid, location, ecIndexBits.ShardIds())
+ err := unmountEcShards(grpcDialOption, vid, location, ecIndexBits.ToUint32Slice())
+ if err != nil {
+ return fmt.Errorf("mountVolumeAndDeleteEcShards unmount ec volume %d on %s: %v", vid, location, err)
+ }
+ }
+ // delete ec shards
+ for location, ecIndexBits := range nodeToEcIndexBits {
+ fmt.Printf("delete ec volume %d on %s has shards: %+v\n", vid, location, ecIndexBits.ShardIds())
+ err := sourceServerDeleteEcShards(grpcDialOption, collection, vid, location, ecIndexBits.ToUint32Slice())
+ if err != nil {
+ return fmt.Errorf("mountVolumeAndDeleteEcShards delete ec volume %d on %s: %v", vid, location, err)
+ }
+ }
+
+ return nil
+}
+
+func generateNormalVolume(grpcDialOption grpc.DialOption, vid needle.VolumeId, collection string, sourceVolumeServer string) error {
+
+ fmt.Printf("generateNormalVolume from ec volume %d on %s\n", vid, sourceVolumeServer)
+
+ err := operation.WithVolumeServerClient(sourceVolumeServer, grpcDialOption, func(volumeServerClient volume_server_pb.VolumeServerClient) error {
+ _, genErr := volumeServerClient.VolumeEcShardsToVolume(context.Background(), &volume_server_pb.VolumeEcShardsToVolumeRequest{
+ VolumeId: uint32(vid),
+ Collection: collection,
+ })
+ return genErr
+ })
+
+ return err
+
+}
+
+func collectEcShards(commandEnv *CommandEnv, nodeToEcIndexBits map[string]erasure_coding.ShardBits, collection string, vid needle.VolumeId) (targetNodeLocation string, err error) {
+
+ maxShardCount := 0
+ var exisitngEcIndexBits erasure_coding.ShardBits
+ for loc, ecIndexBits := range nodeToEcIndexBits {
+ toBeCopiedShardCount := ecIndexBits.MinusParityShards().ShardIdCount()
+ if toBeCopiedShardCount > maxShardCount {
+ maxShardCount = toBeCopiedShardCount
+ targetNodeLocation = loc
+ exisitngEcIndexBits = ecIndexBits
+ }
+ }
+
+ fmt.Printf("collectEcShards: ec volume %d collect shards to %s from: %+v\n", vid, targetNodeLocation, nodeToEcIndexBits)
+
+ var copiedEcIndexBits erasure_coding.ShardBits
+ for loc, ecIndexBits := range nodeToEcIndexBits {
+ if loc == targetNodeLocation {
+ continue
+ }
+
+ needToCopyEcIndexBits := ecIndexBits.Minus(exisitngEcIndexBits).MinusParityShards()
+ if needToCopyEcIndexBits.ShardIdCount() == 0 {
+ continue
+ }
+
+ err = operation.WithVolumeServerClient(targetNodeLocation, commandEnv.option.GrpcDialOption, func(volumeServerClient volume_server_pb.VolumeServerClient) error {
+
+ fmt.Printf("copy %d.%v %s => %s\n", vid, needToCopyEcIndexBits.ShardIds(), loc, targetNodeLocation)
+
+ _, copyErr := volumeServerClient.VolumeEcShardsCopy(context.Background(), &volume_server_pb.VolumeEcShardsCopyRequest{
+ VolumeId: uint32(vid),
+ Collection: collection,
+ ShardIds: needToCopyEcIndexBits.ToUint32Slice(),
+ CopyEcxFile: false,
+ CopyEcjFile: true,
+ CopyVifFile: true,
+ SourceDataNode: loc,
+ })
+ if copyErr != nil {
+ return fmt.Errorf("copy %d.%v %s => %s : %v\n", vid, needToCopyEcIndexBits.ShardIds(), loc, targetNodeLocation, copyErr)
+ }
+
+ return nil
+ })
+
+ if err != nil {
+ break
+ }
+
+ copiedEcIndexBits = copiedEcIndexBits.Plus(needToCopyEcIndexBits)
+
+ }
+
+ nodeToEcIndexBits[targetNodeLocation] = exisitngEcIndexBits.Plus(copiedEcIndexBits)
+
+ return targetNodeLocation, err
+
+}
+
+func collectTopologyInfo(commandEnv *CommandEnv) (topoInfo *master_pb.TopologyInfo, err error) {
+
+ var resp *master_pb.VolumeListResponse
+ err = commandEnv.MasterClient.WithClient(func(client master_pb.SeaweedClient) error {
+ resp, err = client.VolumeList(context.Background(), &master_pb.VolumeListRequest{})
+ return err
+ })
+ if err != nil {
+ return
+ }
+
+ return resp.TopologyInfo, nil
+
+}
+
+func collectEcShardInfos(topoInfo *master_pb.TopologyInfo, selectedCollection string, vid needle.VolumeId) (ecShardInfos []*master_pb.VolumeEcShardInformationMessage) {
+
+ eachDataNode(topoInfo, func(dc string, rack RackId, dn *master_pb.DataNodeInfo) {
+ for _, v := range dn.EcShardInfos {
+ if v.Collection == selectedCollection && v.Id == uint32(vid) {
+ ecShardInfos = append(ecShardInfos, v)
+ }
+ }
+ })
+
+ return
+}
+
+func collectEcShardIds(topoInfo *master_pb.TopologyInfo, selectedCollection string) (vids []needle.VolumeId) {
+
+ vidMap := make(map[uint32]bool)
+ eachDataNode(topoInfo, func(dc string, rack RackId, dn *master_pb.DataNodeInfo) {
+ for _, v := range dn.EcShardInfos {
+ if v.Collection == selectedCollection {
+ vidMap[v.Id] = true
+ }
+ }
+ })
+
+ for vid := range vidMap {
+ vids = append(vids, needle.VolumeId(vid))
+ }
+
+ return
+}
+
+func collectEcNodeShardBits(topoInfo *master_pb.TopologyInfo, vid needle.VolumeId) map[string]erasure_coding.ShardBits {
+
+ nodeToEcIndexBits := make(map[string]erasure_coding.ShardBits)
+ eachDataNode(topoInfo, func(dc string, rack RackId, dn *master_pb.DataNodeInfo) {
+ for _, v := range dn.EcShardInfos {
+ if v.Id == uint32(vid) {
+ nodeToEcIndexBits[dn.Id] = erasure_coding.ShardBits(v.EcIndexBits)
+ }
+ }
+ })
+
+ return nodeToEcIndexBits
+}
diff --git a/weed/shell/command_ec_encode.go b/weed/shell/command_ec_encode.go
new file mode 100644
index 000000000..5a8146954
--- /dev/null
+++ b/weed/shell/command_ec_encode.go
@@ -0,0 +1,298 @@
+package shell
+
+import (
+ "context"
+ "flag"
+ "fmt"
+ "io"
+ "sync"
+ "time"
+
+ "google.golang.org/grpc"
+
+ "github.com/chrislusf/seaweedfs/weed/operation"
+ "github.com/chrislusf/seaweedfs/weed/pb/master_pb"
+ "github.com/chrislusf/seaweedfs/weed/pb/volume_server_pb"
+ "github.com/chrislusf/seaweedfs/weed/storage/erasure_coding"
+ "github.com/chrislusf/seaweedfs/weed/storage/needle"
+ "github.com/chrislusf/seaweedfs/weed/wdclient"
+)
+
+func init() {
+ Commands = append(Commands, &commandEcEncode{})
+}
+
+type commandEcEncode struct {
+}
+
+func (c *commandEcEncode) Name() string {
+ return "ec.encode"
+}
+
+func (c *commandEcEncode) Help() string {
+ return `apply erasure coding to a volume
+
+ ec.encode [-collection=""] [-fullPercent=95] [-quietFor=1h]
+ ec.encode [-collection=""] [-volumeId=]
+
+ This command will:
+ 1. freeze one volume
+ 2. apply erasure coding to the volume
+ 3. move the encoded shards to multiple volume servers
+
+ The erasure coding is 10.4. So ideally you have more than 14 volume servers, and you can afford
+ to lose 4 volume servers.
+
+ If the number of volumes are not high, the worst case is that you only have 4 volume servers,
+ and the shards are spread as 4,4,3,3, respectively. You can afford to lose one volume server.
+
+ If you only have less than 4 volume servers, with erasure coding, at least you can afford to
+ have 4 corrupted shard files.
+
+`
+}
+
+func (c *commandEcEncode) Do(args []string, commandEnv *CommandEnv, writer io.Writer) (err error) {
+
+ if err = commandEnv.confirmIsLocked(); err != nil {
+ return
+ }
+
+ encodeCommand := flag.NewFlagSet(c.Name(), flag.ContinueOnError)
+ volumeId := encodeCommand.Int("volumeId", 0, "the volume id")
+ collection := encodeCommand.String("collection", "", "the collection name")
+ fullPercentage := encodeCommand.Float64("fullPercent", 95, "the volume reaches the percentage of max volume size")
+ quietPeriod := encodeCommand.Duration("quietFor", time.Hour, "select volumes without no writes for this period")
+ if err = encodeCommand.Parse(args); err != nil {
+ return nil
+ }
+
+ vid := needle.VolumeId(*volumeId)
+
+ // volumeId is provided
+ if vid != 0 {
+ return doEcEncode(commandEnv, *collection, vid)
+ }
+
+ // apply to all volumes in the collection
+ volumeIds, err := collectVolumeIdsForEcEncode(commandEnv, *collection, *fullPercentage, *quietPeriod)
+ if err != nil {
+ return err
+ }
+ fmt.Printf("ec encode volumes: %v\n", volumeIds)
+ for _, vid := range volumeIds {
+ if err = doEcEncode(commandEnv, *collection, vid); err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+func doEcEncode(commandEnv *CommandEnv, collection string, vid needle.VolumeId) (err error) {
+ // find volume location
+ locations, found := commandEnv.MasterClient.GetLocations(uint32(vid))
+ if !found {
+ return fmt.Errorf("volume %d not found", vid)
+ }
+
+ // fmt.Printf("found ec %d shards on %v\n", vid, locations)
+
+ // mark the volume as readonly
+ err = markVolumeReadonly(commandEnv.option.GrpcDialOption, needle.VolumeId(vid), locations)
+ if err != nil {
+ return fmt.Errorf("mark volume %d as readonly on %s: %v", vid, locations[0].Url, err)
+ }
+
+ // generate ec shards
+ err = generateEcShards(commandEnv.option.GrpcDialOption, needle.VolumeId(vid), collection, locations[0].Url)
+ if err != nil {
+ return fmt.Errorf("generate ec shards for volume %d on %s: %v", vid, locations[0].Url, err)
+ }
+
+ // balance the ec shards to current cluster
+ err = spreadEcShards(commandEnv, vid, collection, locations)
+ if err != nil {
+ return fmt.Errorf("spread ec shards for volume %d from %s: %v", vid, locations[0].Url, err)
+ }
+
+ return nil
+}
+
+func markVolumeReadonly(grpcDialOption grpc.DialOption, volumeId needle.VolumeId, locations []wdclient.Location) error {
+
+ for _, location := range locations {
+
+ fmt.Printf("markVolumeReadonly %d on %s ...\n", volumeId, location.Url)
+
+ err := operation.WithVolumeServerClient(location.Url, grpcDialOption, func(volumeServerClient volume_server_pb.VolumeServerClient) error {
+ _, markErr := volumeServerClient.VolumeMarkReadonly(context.Background(), &volume_server_pb.VolumeMarkReadonlyRequest{
+ VolumeId: uint32(volumeId),
+ })
+ return markErr
+ })
+
+ if err != nil {
+ return err
+ }
+
+ }
+
+ return nil
+}
+
+func generateEcShards(grpcDialOption grpc.DialOption, volumeId needle.VolumeId, collection string, sourceVolumeServer string) error {
+
+ fmt.Printf("generateEcShards %s %d on %s ...\n", collection, volumeId, sourceVolumeServer)
+
+ err := operation.WithVolumeServerClient(sourceVolumeServer, grpcDialOption, func(volumeServerClient volume_server_pb.VolumeServerClient) error {
+ _, genErr := volumeServerClient.VolumeEcShardsGenerate(context.Background(), &volume_server_pb.VolumeEcShardsGenerateRequest{
+ VolumeId: uint32(volumeId),
+ Collection: collection,
+ })
+ return genErr
+ })
+
+ return err
+
+}
+
+func spreadEcShards(commandEnv *CommandEnv, volumeId needle.VolumeId, collection string, existingLocations []wdclient.Location) (err error) {
+
+ allEcNodes, totalFreeEcSlots, err := collectEcNodes(commandEnv, "")
+ if err != nil {
+ return err
+ }
+
+ if totalFreeEcSlots < erasure_coding.TotalShardsCount {
+ return fmt.Errorf("not enough free ec shard slots. only %d left", totalFreeEcSlots)
+ }
+ allocatedDataNodes := allEcNodes
+ if len(allocatedDataNodes) > erasure_coding.TotalShardsCount {
+ allocatedDataNodes = allocatedDataNodes[:erasure_coding.TotalShardsCount]
+ }
+
+ // calculate how many shards to allocate for these servers
+ allocatedEcIds := balancedEcDistribution(allocatedDataNodes)
+
+ // ask the data nodes to copy from the source volume server
+ copiedShardIds, err := parallelCopyEcShardsFromSource(commandEnv.option.GrpcDialOption, allocatedDataNodes, allocatedEcIds, volumeId, collection, existingLocations[0])
+ if err != nil {
+ return err
+ }
+
+ // unmount the to be deleted shards
+ err = unmountEcShards(commandEnv.option.GrpcDialOption, volumeId, existingLocations[0].Url, copiedShardIds)
+ if err != nil {
+ return err
+ }
+
+ // ask the source volume server to clean up copied ec shards
+ err = sourceServerDeleteEcShards(commandEnv.option.GrpcDialOption, collection, volumeId, existingLocations[0].Url, copiedShardIds)
+ if err != nil {
+ return fmt.Errorf("source delete copied ecShards %s %d.%v: %v", existingLocations[0].Url, volumeId, copiedShardIds, err)
+ }
+
+ // ask the source volume server to delete the original volume
+ for _, location := range existingLocations {
+ fmt.Printf("delete volume %d from %s\n", volumeId, location.Url)
+ err = deleteVolume(commandEnv.option.GrpcDialOption, volumeId, location.Url)
+ if err != nil {
+ return fmt.Errorf("deleteVolume %s volume %d: %v", location.Url, volumeId, err)
+ }
+ }
+
+ return err
+
+}
+
+func parallelCopyEcShardsFromSource(grpcDialOption grpc.DialOption, targetServers []*EcNode, allocatedEcIds [][]uint32, volumeId needle.VolumeId, collection string, existingLocation wdclient.Location) (actuallyCopied []uint32, err error) {
+
+ fmt.Printf("parallelCopyEcShardsFromSource %d %s\n", volumeId, existingLocation.Url)
+
+ // parallelize
+ shardIdChan := make(chan []uint32, len(targetServers))
+ var wg sync.WaitGroup
+ for i, server := range targetServers {
+ if len(allocatedEcIds[i]) <= 0 {
+ continue
+ }
+
+ wg.Add(1)
+ go func(server *EcNode, allocatedEcShardIds []uint32) {
+ defer wg.Done()
+ copiedShardIds, copyErr := oneServerCopyAndMountEcShardsFromSource(grpcDialOption, server,
+ allocatedEcShardIds, volumeId, collection, existingLocation.Url)
+ if copyErr != nil {
+ err = copyErr
+ } else {
+ shardIdChan <- copiedShardIds
+ server.addEcVolumeShards(volumeId, collection, copiedShardIds)
+ }
+ }(server, allocatedEcIds[i])
+ }
+ wg.Wait()
+ close(shardIdChan)
+
+ if err != nil {
+ return nil, err
+ }
+
+ for shardIds := range shardIdChan {
+ actuallyCopied = append(actuallyCopied, shardIds...)
+ }
+
+ return
+}
+
+func balancedEcDistribution(servers []*EcNode) (allocated [][]uint32) {
+ allocated = make([][]uint32, len(servers))
+ allocatedShardIdIndex := uint32(0)
+ serverIndex := 0
+ for allocatedShardIdIndex < erasure_coding.TotalShardsCount {
+ if servers[serverIndex].freeEcSlot > 0 {
+ allocated[serverIndex] = append(allocated[serverIndex], allocatedShardIdIndex)
+ allocatedShardIdIndex++
+ }
+ serverIndex++
+ if serverIndex >= len(servers) {
+ serverIndex = 0
+ }
+ }
+
+ return allocated
+}
+
+func collectVolumeIdsForEcEncode(commandEnv *CommandEnv, selectedCollection string, fullPercentage float64, quietPeriod time.Duration) (vids []needle.VolumeId, err error) {
+
+ var resp *master_pb.VolumeListResponse
+ err = commandEnv.MasterClient.WithClient(func(client master_pb.SeaweedClient) error {
+ resp, err = client.VolumeList(context.Background(), &master_pb.VolumeListRequest{})
+ return err
+ })
+ if err != nil {
+ return
+ }
+
+ quietSeconds := int64(quietPeriod / time.Second)
+ nowUnixSeconds := time.Now().Unix()
+
+ fmt.Printf("ec encode volumes quiet for: %d seconds\n", quietSeconds)
+
+ vidMap := make(map[uint32]bool)
+ eachDataNode(resp.TopologyInfo, func(dc string, rack RackId, dn *master_pb.DataNodeInfo) {
+ for _, v := range dn.VolumeInfos {
+ if v.Collection == selectedCollection && v.ModifiedAtSecond+quietSeconds < nowUnixSeconds {
+ if float64(v.Size) > fullPercentage/100*float64(resp.VolumeSizeLimitMb)*1024*1024 {
+ vidMap[v.Id] = true
+ }
+ }
+ }
+ })
+
+ for vid := range vidMap {
+ vids = append(vids, needle.VolumeId(vid))
+ }
+
+ return
+}
diff --git a/weed/shell/command_ec_rebuild.go b/weed/shell/command_ec_rebuild.go
new file mode 100644
index 000000000..df28681fe
--- /dev/null
+++ b/weed/shell/command_ec_rebuild.go
@@ -0,0 +1,271 @@
+package shell
+
+import (
+ "context"
+ "flag"
+ "fmt"
+ "io"
+
+ "github.com/chrislusf/seaweedfs/weed/operation"
+ "github.com/chrislusf/seaweedfs/weed/pb/volume_server_pb"
+ "github.com/chrislusf/seaweedfs/weed/storage/erasure_coding"
+ "github.com/chrislusf/seaweedfs/weed/storage/needle"
+ "google.golang.org/grpc"
+)
+
+func init() {
+ Commands = append(Commands, &commandEcRebuild{})
+}
+
+type commandEcRebuild struct {
+}
+
+func (c *commandEcRebuild) Name() string {
+ return "ec.rebuild"
+}
+
+func (c *commandEcRebuild) Help() string {
+ return `find and rebuild missing ec shards among volume servers
+
+ ec.rebuild [-c EACH_COLLECTION|] [-force]
+
+ Algorithm:
+
+ For each type of volume server (different max volume count limit){
+ for each collection {
+ rebuildEcVolumes()
+ }
+ }
+
+ func rebuildEcVolumes(){
+ idealWritableVolumes = totalWritableVolumes / numVolumeServers
+ for {
+ sort all volume servers ordered by the number of local writable volumes
+ pick the volume server A with the lowest number of writable volumes x
+ pick the volume server B with the highest number of writable volumes y
+ if y > idealWritableVolumes and x +1 <= idealWritableVolumes {
+ if B has a writable volume id v that A does not have {
+ move writable volume v from A to B
+ }
+ }
+ }
+ }
+
+`
+}
+
+func (c *commandEcRebuild) Do(args []string, commandEnv *CommandEnv, writer io.Writer) (err error) {
+
+ if err = commandEnv.confirmIsLocked(); err != nil {
+ return
+ }
+
+ fixCommand := flag.NewFlagSet(c.Name(), flag.ContinueOnError)
+ collection := fixCommand.String("collection", "EACH_COLLECTION", "collection name, or \"EACH_COLLECTION\" for each collection")
+ applyChanges := fixCommand.Bool("force", false, "apply the changes")
+ if err = fixCommand.Parse(args); err != nil {
+ return nil
+ }
+
+ // collect all ec nodes
+ allEcNodes, _, err := collectEcNodes(commandEnv, "")
+ if err != nil {
+ return err
+ }
+
+ if *collection == "EACH_COLLECTION" {
+ collections, err := ListCollectionNames(commandEnv, false, true)
+ if err != nil {
+ return err
+ }
+ fmt.Printf("rebuildEcVolumes collections %+v\n", len(collections))
+ for _, c := range collections {
+ fmt.Printf("rebuildEcVolumes collection %+v\n", c)
+ if err = rebuildEcVolumes(commandEnv, allEcNodes, c, writer, *applyChanges); err != nil {
+ return err
+ }
+ }
+ } else {
+ if err = rebuildEcVolumes(commandEnv, allEcNodes, *collection, writer, *applyChanges); err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+func rebuildEcVolumes(commandEnv *CommandEnv, allEcNodes []*EcNode, collection string, writer io.Writer, applyChanges bool) error {
+
+ fmt.Printf("rebuildEcVolumes %s\n", collection)
+
+ // collect vid => each shard locations, similar to ecShardMap in topology.go
+ ecShardMap := make(EcShardMap)
+ for _, ecNode := range allEcNodes {
+ ecShardMap.registerEcNode(ecNode, collection)
+ }
+
+ for vid, locations := range ecShardMap {
+ shardCount := locations.shardCount()
+ if shardCount == erasure_coding.TotalShardsCount {
+ continue
+ }
+ if shardCount < erasure_coding.DataShardsCount {
+ return fmt.Errorf("ec volume %d is unrepairable with %d shards\n", vid, shardCount)
+ }
+
+ sortEcNodesByFreeslotsDecending(allEcNodes)
+
+ if allEcNodes[0].freeEcSlot < erasure_coding.TotalShardsCount {
+ return fmt.Errorf("disk space is not enough")
+ }
+
+ if err := rebuildOneEcVolume(commandEnv, allEcNodes[0], collection, vid, locations, writer, applyChanges); err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+func rebuildOneEcVolume(commandEnv *CommandEnv, rebuilder *EcNode, collection string, volumeId needle.VolumeId, locations EcShardLocations, writer io.Writer, applyChanges bool) error {
+
+ fmt.Printf("rebuildOneEcVolume %s %d\n", collection, volumeId)
+
+ // collect shard files to rebuilder local disk
+ var generatedShardIds []uint32
+ copiedShardIds, _, err := prepareDataToRecover(commandEnv, rebuilder, collection, volumeId, locations, writer, applyChanges)
+ if err != nil {
+ return err
+ }
+ defer func() {
+ // clean up working files
+
+ // ask the rebuilder to delete the copied shards
+ err = sourceServerDeleteEcShards(commandEnv.option.GrpcDialOption, collection, volumeId, rebuilder.info.Id, copiedShardIds)
+ if err != nil {
+ fmt.Fprintf(writer, "%s delete copied ec shards %s %d.%v\n", rebuilder.info.Id, collection, volumeId, copiedShardIds)
+ }
+
+ }()
+
+ if !applyChanges {
+ return nil
+ }
+
+ // generate ec shards, and maybe ecx file
+ generatedShardIds, err = generateMissingShards(commandEnv.option.GrpcDialOption, collection, volumeId, rebuilder.info.Id)
+ if err != nil {
+ return err
+ }
+
+ // mount the generated shards
+ err = mountEcShards(commandEnv.option.GrpcDialOption, collection, volumeId, rebuilder.info.Id, generatedShardIds)
+ if err != nil {
+ return err
+ }
+
+ rebuilder.addEcVolumeShards(volumeId, collection, generatedShardIds)
+
+ return nil
+}
+
+func generateMissingShards(grpcDialOption grpc.DialOption, collection string, volumeId needle.VolumeId, sourceLocation string) (rebuiltShardIds []uint32, err error) {
+
+ err = operation.WithVolumeServerClient(sourceLocation, grpcDialOption, func(volumeServerClient volume_server_pb.VolumeServerClient) error {
+ resp, rebultErr := volumeServerClient.VolumeEcShardsRebuild(context.Background(), &volume_server_pb.VolumeEcShardsRebuildRequest{
+ VolumeId: uint32(volumeId),
+ Collection: collection,
+ })
+ if rebultErr == nil {
+ rebuiltShardIds = resp.RebuiltShardIds
+ }
+ return rebultErr
+ })
+ return
+}
+
+func prepareDataToRecover(commandEnv *CommandEnv, rebuilder *EcNode, collection string, volumeId needle.VolumeId, locations EcShardLocations, writer io.Writer, applyBalancing bool) (copiedShardIds []uint32, localShardIds []uint32, err error) {
+
+ needEcxFile := true
+ var localShardBits erasure_coding.ShardBits
+ for _, ecShardInfo := range rebuilder.info.EcShardInfos {
+ if ecShardInfo.Collection == collection && needle.VolumeId(ecShardInfo.Id) == volumeId {
+ needEcxFile = false
+ localShardBits = erasure_coding.ShardBits(ecShardInfo.EcIndexBits)
+ }
+ }
+
+ for shardId, ecNodes := range locations {
+
+ if len(ecNodes) == 0 {
+ fmt.Fprintf(writer, "missing shard %d.%d\n", volumeId, shardId)
+ continue
+ }
+
+ if localShardBits.HasShardId(erasure_coding.ShardId(shardId)) {
+ localShardIds = append(localShardIds, uint32(shardId))
+ fmt.Fprintf(writer, "use existing shard %d.%d\n", volumeId, shardId)
+ continue
+ }
+
+ var copyErr error
+ if applyBalancing {
+ copyErr = operation.WithVolumeServerClient(rebuilder.info.Id, commandEnv.option.GrpcDialOption, func(volumeServerClient volume_server_pb.VolumeServerClient) error {
+ _, copyErr := volumeServerClient.VolumeEcShardsCopy(context.Background(), &volume_server_pb.VolumeEcShardsCopyRequest{
+ VolumeId: uint32(volumeId),
+ Collection: collection,
+ ShardIds: []uint32{uint32(shardId)},
+ CopyEcxFile: needEcxFile,
+ CopyEcjFile: needEcxFile,
+ CopyVifFile: needEcxFile,
+ SourceDataNode: ecNodes[0].info.Id,
+ })
+ return copyErr
+ })
+ if copyErr == nil && needEcxFile {
+ needEcxFile = false
+ }
+ }
+ if copyErr != nil {
+ fmt.Fprintf(writer, "%s failed to copy %d.%d from %s: %v\n", rebuilder.info.Id, volumeId, shardId, ecNodes[0].info.Id, copyErr)
+ } else {
+ fmt.Fprintf(writer, "%s copied %d.%d from %s\n", rebuilder.info.Id, volumeId, shardId, ecNodes[0].info.Id)
+ copiedShardIds = append(copiedShardIds, uint32(shardId))
+ }
+
+ }
+
+ if len(copiedShardIds)+len(localShardIds) >= erasure_coding.DataShardsCount {
+ return copiedShardIds, localShardIds, nil
+ }
+
+ return nil, nil, fmt.Errorf("%d shards are not enough to recover volume %d", len(copiedShardIds)+len(localShardIds), volumeId)
+
+}
+
+type EcShardMap map[needle.VolumeId]EcShardLocations
+type EcShardLocations [][]*EcNode
+
+func (ecShardMap EcShardMap) registerEcNode(ecNode *EcNode, collection string) {
+ for _, shardInfo := range ecNode.info.EcShardInfos {
+ if shardInfo.Collection == collection {
+ existing, found := ecShardMap[needle.VolumeId(shardInfo.Id)]
+ if !found {
+ existing = make([][]*EcNode, erasure_coding.TotalShardsCount)
+ ecShardMap[needle.VolumeId(shardInfo.Id)] = existing
+ }
+ for _, shardId := range erasure_coding.ShardBits(shardInfo.EcIndexBits).ShardIds() {
+ existing[shardId] = append(existing[shardId], ecNode)
+ }
+ }
+ }
+}
+
+func (ecShardLocations EcShardLocations) shardCount() (count int) {
+ for _, locations := range ecShardLocations {
+ if len(locations) > 0 {
+ count++
+ }
+ }
+ return
+}
diff --git a/weed/shell/command_ec_test.go b/weed/shell/command_ec_test.go
new file mode 100644
index 000000000..4fddcbea5
--- /dev/null
+++ b/weed/shell/command_ec_test.go
@@ -0,0 +1,139 @@
+package shell
+
+import (
+ "fmt"
+ "testing"
+
+ "github.com/chrislusf/seaweedfs/weed/pb/master_pb"
+ "github.com/chrislusf/seaweedfs/weed/storage/needle"
+)
+
+func TestCommandEcDistribution(t *testing.T) {
+
+ allEcNodes := []*EcNode{
+ newEcNode("dc1", "rack1", "dn1", 100),
+ newEcNode("dc1", "rack2", "dn2", 100),
+ }
+
+ allocated := balancedEcDistribution(allEcNodes)
+
+ fmt.Printf("allocated: %+v", allocated)
+}
+
+func TestCommandEcBalanceSmall(t *testing.T) {
+
+ allEcNodes := []*EcNode{
+ newEcNode("dc1", "rack1", "dn1", 100).addEcVolumeAndShardsForTest(1, "c1", []uint32{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13}),
+ newEcNode("dc1", "rack2", "dn2", 100).addEcVolumeAndShardsForTest(2, "c1", []uint32{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13}),
+ }
+
+ racks := collectRacks(allEcNodes)
+ balanceEcVolumes(nil, "c1", allEcNodes, racks, false)
+}
+
+func TestCommandEcBalanceNothingToMove(t *testing.T) {
+
+ allEcNodes := []*EcNode{
+ newEcNode("dc1", "rack1", "dn1", 100).
+ addEcVolumeAndShardsForTest(1, "c1", []uint32{0, 1, 2, 3, 4, 5, 6}).
+ addEcVolumeAndShardsForTest(2, "c1", []uint32{7, 8, 9, 10, 11, 12, 13}),
+ newEcNode("dc1", "rack1", "dn2", 100).
+ addEcVolumeAndShardsForTest(1, "c1", []uint32{7, 8, 9, 10, 11, 12, 13}).
+ addEcVolumeAndShardsForTest(2, "c1", []uint32{0, 1, 2, 3, 4, 5, 6}),
+ }
+
+ racks := collectRacks(allEcNodes)
+ balanceEcVolumes(nil, "c1", allEcNodes, racks, false)
+}
+
+func TestCommandEcBalanceAddNewServers(t *testing.T) {
+
+ allEcNodes := []*EcNode{
+ newEcNode("dc1", "rack1", "dn1", 100).
+ addEcVolumeAndShardsForTest(1, "c1", []uint32{0, 1, 2, 3, 4, 5, 6}).
+ addEcVolumeAndShardsForTest(2, "c1", []uint32{7, 8, 9, 10, 11, 12, 13}),
+ newEcNode("dc1", "rack1", "dn2", 100).
+ addEcVolumeAndShardsForTest(1, "c1", []uint32{7, 8, 9, 10, 11, 12, 13}).
+ addEcVolumeAndShardsForTest(2, "c1", []uint32{0, 1, 2, 3, 4, 5, 6}),
+ newEcNode("dc1", "rack1", "dn3", 100),
+ newEcNode("dc1", "rack1", "dn4", 100),
+ }
+
+ racks := collectRacks(allEcNodes)
+ balanceEcVolumes(nil, "c1", allEcNodes, racks, false)
+}
+
+func TestCommandEcBalanceAddNewRacks(t *testing.T) {
+
+ allEcNodes := []*EcNode{
+ newEcNode("dc1", "rack1", "dn1", 100).
+ addEcVolumeAndShardsForTest(1, "c1", []uint32{0, 1, 2, 3, 4, 5, 6}).
+ addEcVolumeAndShardsForTest(2, "c1", []uint32{7, 8, 9, 10, 11, 12, 13}),
+ newEcNode("dc1", "rack1", "dn2", 100).
+ addEcVolumeAndShardsForTest(1, "c1", []uint32{7, 8, 9, 10, 11, 12, 13}).
+ addEcVolumeAndShardsForTest(2, "c1", []uint32{0, 1, 2, 3, 4, 5, 6}),
+ newEcNode("dc1", "rack2", "dn3", 100),
+ newEcNode("dc1", "rack2", "dn4", 100),
+ }
+
+ racks := collectRacks(allEcNodes)
+ balanceEcVolumes(nil, "c1", allEcNodes, racks, false)
+}
+
+func TestCommandEcBalanceVolumeEvenButRackUneven(t *testing.T) {
+
+ allEcNodes := []*EcNode{
+ newEcNode("dc1", "rack1", "dn_shared", 100).
+ addEcVolumeAndShardsForTest(1, "c1", []uint32{0}).
+ addEcVolumeAndShardsForTest(2, "c1", []uint32{0}),
+
+ newEcNode("dc1", "rack1", "dn_a1", 100).addEcVolumeAndShardsForTest(1, "c1", []uint32{1}),
+ newEcNode("dc1", "rack1", "dn_a2", 100).addEcVolumeAndShardsForTest(1, "c1", []uint32{2}),
+ newEcNode("dc1", "rack1", "dn_a3", 100).addEcVolumeAndShardsForTest(1, "c1", []uint32{3}),
+ newEcNode("dc1", "rack1", "dn_a4", 100).addEcVolumeAndShardsForTest(1, "c1", []uint32{4}),
+ newEcNode("dc1", "rack1", "dn_a5", 100).addEcVolumeAndShardsForTest(1, "c1", []uint32{5}),
+ newEcNode("dc1", "rack1", "dn_a6", 100).addEcVolumeAndShardsForTest(1, "c1", []uint32{6}),
+ newEcNode("dc1", "rack1", "dn_a7", 100).addEcVolumeAndShardsForTest(1, "c1", []uint32{7}),
+ newEcNode("dc1", "rack1", "dn_a8", 100).addEcVolumeAndShardsForTest(1, "c1", []uint32{8}),
+ newEcNode("dc1", "rack1", "dn_a9", 100).addEcVolumeAndShardsForTest(1, "c1", []uint32{9}),
+ newEcNode("dc1", "rack1", "dn_a10", 100).addEcVolumeAndShardsForTest(1, "c1", []uint32{10}),
+ newEcNode("dc1", "rack1", "dn_a11", 100).addEcVolumeAndShardsForTest(1, "c1", []uint32{11}),
+ newEcNode("dc1", "rack1", "dn_a12", 100).addEcVolumeAndShardsForTest(1, "c1", []uint32{12}),
+ newEcNode("dc1", "rack1", "dn_a13", 100).addEcVolumeAndShardsForTest(1, "c1", []uint32{13}),
+
+ newEcNode("dc1", "rack1", "dn_b1", 100).addEcVolumeAndShardsForTest(2, "c1", []uint32{1}),
+ newEcNode("dc1", "rack1", "dn_b2", 100).addEcVolumeAndShardsForTest(2, "c1", []uint32{2}),
+ newEcNode("dc1", "rack1", "dn_b3", 100).addEcVolumeAndShardsForTest(2, "c1", []uint32{3}),
+ newEcNode("dc1", "rack1", "dn_b4", 100).addEcVolumeAndShardsForTest(2, "c1", []uint32{4}),
+ newEcNode("dc1", "rack1", "dn_b5", 100).addEcVolumeAndShardsForTest(2, "c1", []uint32{5}),
+ newEcNode("dc1", "rack1", "dn_b6", 100).addEcVolumeAndShardsForTest(2, "c1", []uint32{6}),
+ newEcNode("dc1", "rack1", "dn_b7", 100).addEcVolumeAndShardsForTest(2, "c1", []uint32{7}),
+ newEcNode("dc1", "rack1", "dn_b8", 100).addEcVolumeAndShardsForTest(2, "c1", []uint32{8}),
+ newEcNode("dc1", "rack1", "dn_b9", 100).addEcVolumeAndShardsForTest(2, "c1", []uint32{9}),
+ newEcNode("dc1", "rack1", "dn_b10", 100).addEcVolumeAndShardsForTest(2, "c1", []uint32{10}),
+ newEcNode("dc1", "rack1", "dn_b11", 100).addEcVolumeAndShardsForTest(2, "c1", []uint32{11}),
+ newEcNode("dc1", "rack1", "dn_b12", 100).addEcVolumeAndShardsForTest(2, "c1", []uint32{12}),
+ newEcNode("dc1", "rack1", "dn_b13", 100).addEcVolumeAndShardsForTest(2, "c1", []uint32{13}),
+
+ newEcNode("dc1", "rack1", "dn3", 100),
+ }
+
+ racks := collectRacks(allEcNodes)
+ balanceEcVolumes(nil, "c1", allEcNodes, racks, false)
+ balanceEcRacks(nil, racks, false)
+}
+
+func newEcNode(dc string, rack string, dataNodeId string, freeEcSlot int) *EcNode {
+ return &EcNode{
+ info: &master_pb.DataNodeInfo{
+ Id: dataNodeId,
+ },
+ dc: dc,
+ rack: RackId(rack),
+ freeEcSlot: freeEcSlot,
+ }
+}
+
+func (ecNode *EcNode) addEcVolumeAndShardsForTest(vid uint32, collection string, shardIds []uint32) *EcNode {
+ return ecNode.addEcVolumeShards(needle.VolumeId(vid), collection, shardIds)
+}
diff --git a/weed/shell/command_fs_cat.go b/weed/shell/command_fs_cat.go
new file mode 100644
index 000000000..7177d8ac3
--- /dev/null
+++ b/weed/shell/command_fs_cat.go
@@ -0,0 +1,59 @@
+package shell
+
+import (
+ "fmt"
+ "io"
+ "math"
+
+ "github.com/chrislusf/seaweedfs/weed/filer2"
+ "github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
+ "github.com/chrislusf/seaweedfs/weed/util"
+)
+
+func init() {
+ Commands = append(Commands, &commandFsCat{})
+}
+
+type commandFsCat struct {
+}
+
+func (c *commandFsCat) Name() string {
+ return "fs.cat"
+}
+
+func (c *commandFsCat) Help() string {
+ return `stream the file content on to the screen
+
+ fs.cat /dir/file_name
+`
+}
+
+func (c *commandFsCat) Do(args []string, commandEnv *CommandEnv, writer io.Writer) (err error) {
+
+ path, err := commandEnv.parseUrl(findInputDirectory(args))
+ if err != nil {
+ return err
+ }
+
+ if commandEnv.isDirectory(path) {
+ return fmt.Errorf("%s is a directory", path)
+ }
+
+ dir, name := util.FullPath(path).DirAndName()
+
+ return commandEnv.WithFilerClient(func(client filer_pb.SeaweedFilerClient) error {
+
+ request := &filer_pb.LookupDirectoryEntryRequest{
+ Name: name,
+ Directory: dir,
+ }
+ respLookupEntry, err := filer_pb.LookupEntry(client, request)
+ if err != nil {
+ return err
+ }
+
+ return filer2.StreamContent(commandEnv.MasterClient, writer, respLookupEntry.Entry.Chunks, 0, math.MaxInt64)
+
+ })
+
+}
diff --git a/weed/shell/command_fs_cd.go b/weed/shell/command_fs_cd.go
new file mode 100644
index 000000000..2cc28f7a2
--- /dev/null
+++ b/weed/shell/command_fs_cd.go
@@ -0,0 +1,50 @@
+package shell
+
+import (
+ "io"
+)
+
+func init() {
+ Commands = append(Commands, &commandFsCd{})
+}
+
+type commandFsCd struct {
+}
+
+func (c *commandFsCd) Name() string {
+ return "fs.cd"
+}
+
+func (c *commandFsCd) Help() string {
+ return `change directory to a directory /path/to/dir
+
+ The full path can be too long to type. For example,
+ fs.ls /some/path/to/file_name
+
+ can be simplified as
+
+ fs.cd /some/path
+ fs.ls to/file_name
+`
+}
+
+func (c *commandFsCd) Do(args []string, commandEnv *CommandEnv, writer io.Writer) (err error) {
+
+ path, err := commandEnv.parseUrl(findInputDirectory(args))
+ if err != nil {
+ return err
+ }
+
+ if path == "/" {
+ commandEnv.option.Directory = "/"
+ return nil
+ }
+
+ err = commandEnv.checkDirectory(path)
+
+ if err == nil {
+ commandEnv.option.Directory = path
+ }
+
+ return err
+}
diff --git a/weed/shell/command_fs_du.go b/weed/shell/command_fs_du.go
new file mode 100644
index 000000000..96551dd5a
--- /dev/null
+++ b/weed/shell/command_fs_du.go
@@ -0,0 +1,84 @@
+package shell
+
+import (
+ "fmt"
+ "io"
+
+ "github.com/chrislusf/seaweedfs/weed/filer2"
+ "github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
+ "github.com/chrislusf/seaweedfs/weed/util"
+)
+
+func init() {
+ Commands = append(Commands, &commandFsDu{})
+}
+
+type commandFsDu struct {
+}
+
+func (c *commandFsDu) Name() string {
+ return "fs.du"
+}
+
+func (c *commandFsDu) Help() string {
+ return `show disk usage
+
+ fs.du /dir
+ fs.du /dir/file_name
+ fs.du /dir/file_prefix
+`
+}
+
+func (c *commandFsDu) Do(args []string, commandEnv *CommandEnv, writer io.Writer) (err error) {
+
+ path, err := commandEnv.parseUrl(findInputDirectory(args))
+ if err != nil {
+ return err
+ }
+
+ if commandEnv.isDirectory(path) {
+ path = path + "/"
+ }
+
+ var blockCount, byteCount uint64
+ dir, name := util.FullPath(path).DirAndName()
+ blockCount, byteCount, err = duTraverseDirectory(writer, commandEnv, dir, name)
+
+ if name == "" && err == nil {
+ fmt.Fprintf(writer, "block:%4d\tbyte:%10d\t%s\n", blockCount, byteCount, dir)
+ }
+
+ return
+
+}
+
+func duTraverseDirectory(writer io.Writer, filerClient filer_pb.FilerClient, dir, name string) (blockCount, byteCount uint64, err error) {
+
+ err = filer_pb.ReadDirAllEntries(filerClient, util.FullPath(dir), name, func(entry *filer_pb.Entry, isLast bool) error {
+
+ var fileBlockCount, fileByteCount uint64
+
+ if entry.IsDirectory {
+ subDir := fmt.Sprintf("%s/%s", dir, entry.Name)
+ if dir == "/" {
+ subDir = "/" + entry.Name
+ }
+ numBlock, numByte, err := duTraverseDirectory(writer, filerClient, subDir, "")
+ if err == nil {
+ blockCount += numBlock
+ byteCount += numByte
+ }
+ } else {
+ fileBlockCount = uint64(len(entry.Chunks))
+ fileByteCount = filer2.TotalSize(entry.Chunks)
+ blockCount += uint64(len(entry.Chunks))
+ byteCount += filer2.TotalSize(entry.Chunks)
+ }
+
+ if name != "" && !entry.IsDirectory {
+ fmt.Fprintf(writer, "block:%4d\tbyte:%10d\t%s/%s\n", fileBlockCount, fileByteCount, dir, entry.Name)
+ }
+ return nil
+ })
+ return
+}
diff --git a/weed/shell/command_fs_lock_unlock.go b/weed/shell/command_fs_lock_unlock.go
new file mode 100644
index 000000000..8a6e8f71b
--- /dev/null
+++ b/weed/shell/command_fs_lock_unlock.go
@@ -0,0 +1,54 @@
+package shell
+
+import (
+ "io"
+)
+
+func init() {
+ Commands = append(Commands, &commandUnlock{})
+ Commands = append(Commands, &commandLock{})
+}
+
+// =========== Lock ==============
+type commandLock struct {
+}
+
+func (c *commandLock) Name() string {
+ return "lock"
+}
+
+func (c *commandLock) Help() string {
+ return `lock in order to exclusively manage the cluster
+
+ This is a blocking operation if there is alread another lock.
+`
+}
+
+func (c *commandLock) Do(args []string, commandEnv *CommandEnv, writer io.Writer) (err error) {
+
+ commandEnv.locker.RequestLock()
+
+ return nil
+}
+
+// =========== Unlock ==============
+
+type commandUnlock struct {
+}
+
+func (c *commandUnlock) Name() string {
+ return "unlock"
+}
+
+func (c *commandUnlock) Help() string {
+ return `unlock the cluster-wide lock
+
+`
+}
+
+func (c *commandUnlock) Do(args []string, commandEnv *CommandEnv, writer io.Writer) (err error) {
+
+ commandEnv.locker.ReleaseLock()
+
+ return nil
+}
diff --git a/weed/shell/command_fs_ls.go b/weed/shell/command_fs_ls.go
new file mode 100644
index 000000000..36133992f
--- /dev/null
+++ b/weed/shell/command_fs_ls.go
@@ -0,0 +1,111 @@
+package shell
+
+import (
+ "fmt"
+ "io"
+ "os"
+ "os/user"
+ "strconv"
+ "strings"
+
+ "github.com/chrislusf/seaweedfs/weed/filer2"
+ "github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
+ "github.com/chrislusf/seaweedfs/weed/util"
+)
+
+func init() {
+ Commands = append(Commands, &commandFsLs{})
+}
+
+type commandFsLs struct {
+}
+
+func (c *commandFsLs) Name() string {
+ return "fs.ls"
+}
+
+func (c *commandFsLs) Help() string {
+ return `list all files under a directory
+
+ fs.ls [-l] [-a] /dir/
+ fs.ls [-l] [-a] /dir/file_name
+ fs.ls [-l] [-a] /dir/file_prefix
+`
+}
+
+func (c *commandFsLs) Do(args []string, commandEnv *CommandEnv, writer io.Writer) (err error) {
+
+ var isLongFormat, showHidden bool
+ for _, arg := range args {
+ if !strings.HasPrefix(arg, "-") {
+ break
+ }
+ for _, t := range arg {
+ switch t {
+ case 'a':
+ showHidden = true
+ case 'l':
+ isLongFormat = true
+ }
+ }
+ }
+
+ path, err := commandEnv.parseUrl(findInputDirectory(args))
+ if err != nil {
+ return err
+ }
+
+ if commandEnv.isDirectory(path) {
+ path = path + "/"
+ }
+
+ dir, name := util.FullPath(path).DirAndName()
+ entryCount := 0
+
+ err = filer_pb.ReadDirAllEntries(commandEnv, util.FullPath(dir), name, func(entry *filer_pb.Entry, isLast bool) error {
+
+ if !showHidden && strings.HasPrefix(entry.Name, ".") {
+ return nil
+ }
+
+ entryCount++
+
+ if isLongFormat {
+ fileMode := os.FileMode(entry.Attributes.FileMode)
+ userName, groupNames := entry.Attributes.UserName, entry.Attributes.GroupName
+ if userName == "" {
+ if user, userErr := user.LookupId(strconv.Itoa(int(entry.Attributes.Uid))); userErr == nil {
+ userName = user.Username
+ }
+ }
+ groupName := ""
+ if len(groupNames) > 0 {
+ groupName = groupNames[0]
+ }
+ if groupName == "" {
+ if group, groupErr := user.LookupGroupId(strconv.Itoa(int(entry.Attributes.Gid))); groupErr == nil {
+ groupName = group.Name
+ }
+ }
+
+ if strings.HasSuffix(dir, "/") {
+ // just for printing
+ dir = dir[:len(dir)-1]
+ }
+ fmt.Fprintf(writer, "%s %3d %s %s %6d %s/%s\n",
+ fileMode, len(entry.Chunks),
+ userName, groupName,
+ filer2.TotalSize(entry.Chunks), dir, entry.Name)
+ } else {
+ fmt.Fprintf(writer, "%s\n", entry.Name)
+ }
+
+ return nil
+ })
+
+ if isLongFormat && err == nil {
+ fmt.Fprintf(writer, "total %d\n", entryCount)
+ }
+
+ return
+}
diff --git a/weed/shell/command_fs_meta_cat.go b/weed/shell/command_fs_meta_cat.go
new file mode 100644
index 000000000..0679ec075
--- /dev/null
+++ b/weed/shell/command_fs_meta_cat.go
@@ -0,0 +1,68 @@
+package shell
+
+import (
+ "fmt"
+ "io"
+
+ "github.com/golang/protobuf/jsonpb"
+
+ "github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
+ "github.com/chrislusf/seaweedfs/weed/util"
+)
+
+func init() {
+ Commands = append(Commands, &commandFsMetaCat{})
+}
+
+type commandFsMetaCat struct {
+}
+
+func (c *commandFsMetaCat) Name() string {
+ return "fs.meta.cat"
+}
+
+func (c *commandFsMetaCat) Help() string {
+ return `print out the meta data content for a file or directory
+
+ fs.meta.cat /dir/
+ fs.meta.cat /dir/file_name
+`
+}
+
+func (c *commandFsMetaCat) Do(args []string, commandEnv *CommandEnv, writer io.Writer) (err error) {
+
+ path, err := commandEnv.parseUrl(findInputDirectory(args))
+ if err != nil {
+ return err
+ }
+
+ dir, name := util.FullPath(path).DirAndName()
+
+ return commandEnv.WithFilerClient(func(client filer_pb.SeaweedFilerClient) error {
+
+ request := &filer_pb.LookupDirectoryEntryRequest{
+ Name: name,
+ Directory: dir,
+ }
+ respLookupEntry, err := filer_pb.LookupEntry(client, request)
+ if err != nil {
+ return err
+ }
+
+ m := jsonpb.Marshaler{
+ EmitDefaults: true,
+ Indent: " ",
+ }
+
+ text, marshalErr := m.MarshalToString(respLookupEntry.Entry)
+ if marshalErr != nil {
+ return fmt.Errorf("marshal meta: %v", marshalErr)
+ }
+
+ fmt.Fprintf(writer, "%s\n", text)
+
+ return nil
+
+ })
+
+}
diff --git a/weed/shell/command_fs_meta_load.go b/weed/shell/command_fs_meta_load.go
new file mode 100644
index 000000000..69ae9454c
--- /dev/null
+++ b/weed/shell/command_fs_meta_load.go
@@ -0,0 +1,100 @@
+package shell
+
+import (
+ "fmt"
+ "io"
+ "os"
+
+ "github.com/golang/protobuf/proto"
+
+ "github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
+ "github.com/chrislusf/seaweedfs/weed/util"
+)
+
+func init() {
+ Commands = append(Commands, &commandFsMetaLoad{})
+}
+
+type commandFsMetaLoad struct {
+}
+
+func (c *commandFsMetaLoad) Name() string {
+ return "fs.meta.load"
+}
+
+func (c *commandFsMetaLoad) Help() string {
+ return `load saved filer meta data to restore the directory and file structure
+
+ fs.meta.load --.meta
+
+`
+}
+
+func (c *commandFsMetaLoad) Do(args []string, commandEnv *CommandEnv, writer io.Writer) (err error) {
+
+ if len(args) == 0 {
+ fmt.Fprintf(writer, "missing a metadata file\n")
+ return nil
+ }
+
+ fileName := args[len(args)-1]
+
+ dst, err := os.OpenFile(fileName, os.O_RDONLY, 0644)
+ if err != nil {
+ return nil
+ }
+ defer dst.Close()
+
+ var dirCount, fileCount uint64
+
+ err = commandEnv.WithFilerClient(func(client filer_pb.SeaweedFilerClient) error {
+
+ sizeBuf := make([]byte, 4)
+
+ for {
+ if n, err := dst.Read(sizeBuf); n != 4 {
+ if err == io.EOF {
+ return nil
+ }
+ return err
+ }
+
+ size := util.BytesToUint32(sizeBuf)
+
+ data := make([]byte, int(size))
+
+ if n, err := dst.Read(data); n != len(data) {
+ return err
+ }
+
+ fullEntry := &filer_pb.FullEntry{}
+ if err = proto.Unmarshal(data, fullEntry); err != nil {
+ return err
+ }
+
+ if err := filer_pb.CreateEntry(client, &filer_pb.CreateEntryRequest{
+ Directory: fullEntry.Dir,
+ Entry: fullEntry.Entry,
+ }); err != nil {
+ return err
+ }
+
+ fmt.Fprintf(writer, "load %s\n", util.FullPath(fullEntry.Dir).Child(fullEntry.Entry.Name))
+
+ if fullEntry.Entry.IsDirectory {
+ dirCount++
+ } else {
+ fileCount++
+ }
+
+ }
+
+ })
+
+ if err == nil {
+ fmt.Fprintf(writer, "\ntotal %d directories, %d files", dirCount, fileCount)
+ fmt.Fprintf(writer, "\n%s is loaded.\n", fileName)
+ }
+
+ return err
+}
diff --git a/weed/shell/command_fs_meta_notify.go b/weed/shell/command_fs_meta_notify.go
new file mode 100644
index 000000000..4342fa81d
--- /dev/null
+++ b/weed/shell/command_fs_meta_notify.go
@@ -0,0 +1,73 @@
+package shell
+
+import (
+ "fmt"
+ "io"
+
+ "github.com/chrislusf/seaweedfs/weed/notification"
+ "github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
+ "github.com/chrislusf/seaweedfs/weed/util"
+)
+
+func init() {
+ Commands = append(Commands, &commandFsMetaNotify{})
+}
+
+type commandFsMetaNotify struct {
+}
+
+func (c *commandFsMetaNotify) Name() string {
+ return "fs.meta.notify"
+}
+
+func (c *commandFsMetaNotify) Help() string {
+ return `recursively send directory and file meta data to notifiction message queue
+
+ fs.meta.notify # send meta data from current directory to notification message queue
+
+ The message queue will use it to trigger replication from this filer.
+
+`
+}
+
+func (c *commandFsMetaNotify) Do(args []string, commandEnv *CommandEnv, writer io.Writer) (err error) {
+
+ path, err := commandEnv.parseUrl(findInputDirectory(args))
+ if err != nil {
+ return err
+ }
+
+ util.LoadConfiguration("notification", true)
+ v := util.GetViper()
+ notification.LoadConfiguration(v, "notification.")
+
+ var dirCount, fileCount uint64
+
+ err = filer_pb.TraverseBfs(commandEnv, util.FullPath(path), func(parentPath util.FullPath, entry *filer_pb.Entry) {
+
+ if entry.IsDirectory {
+ dirCount++
+ } else {
+ fileCount++
+ }
+
+ notifyErr := notification.Queue.SendMessage(
+ string(parentPath.Child(entry.Name)),
+ &filer_pb.EventNotification{
+ NewEntry: entry,
+ },
+ )
+
+ if notifyErr != nil {
+ fmt.Fprintf(writer, "fail to notify new entry event for %s: %v\n", parentPath.Child(entry.Name), notifyErr)
+ }
+
+ })
+
+ if err == nil {
+ fmt.Fprintf(writer, "\ntotal notified %d directories, %d files\n", dirCount, fileCount)
+ }
+
+ return err
+
+}
diff --git a/weed/shell/command_fs_meta_save.go b/weed/shell/command_fs_meta_save.go
new file mode 100644
index 000000000..ed19e3d01
--- /dev/null
+++ b/weed/shell/command_fs_meta_save.go
@@ -0,0 +1,143 @@
+package shell
+
+import (
+ "flag"
+ "fmt"
+ "io"
+ "os"
+ "sync"
+ "sync/atomic"
+ "time"
+
+ "github.com/golang/protobuf/proto"
+
+ "github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
+ "github.com/chrislusf/seaweedfs/weed/util"
+)
+
+func init() {
+ Commands = append(Commands, &commandFsMetaSave{})
+}
+
+type commandFsMetaSave struct {
+}
+
+func (c *commandFsMetaSave) Name() string {
+ return "fs.meta.save"
+}
+
+func (c *commandFsMetaSave) Help() string {
+ return `save all directory and file meta data to a local file for metadata backup.
+
+ fs.meta.save / # save from the root
+ fs.meta.save -v -o t.meta / # save from the root, output to t.meta file.
+ fs.meta.save /path/to/save # save from the directory /path/to/save
+ fs.meta.save . # save from current directory
+ fs.meta.save # save from current directory
+
+ The meta data will be saved into a local --.meta file.
+ These meta data can be later loaded by fs.meta.load command,
+
+`
+}
+
+func (c *commandFsMetaSave) Do(args []string, commandEnv *CommandEnv, writer io.Writer) (err error) {
+
+ fsMetaSaveCommand := flag.NewFlagSet(c.Name(), flag.ContinueOnError)
+ verbose := fsMetaSaveCommand.Bool("v", false, "print out each processed files")
+ outputFileName := fsMetaSaveCommand.String("o", "", "output the meta data to this file")
+ // chunksFileName := fsMetaSaveCommand.String("chunks", "", "output all the chunks to this file")
+ if err = fsMetaSaveCommand.Parse(args); err != nil {
+ return nil
+ }
+
+ path, parseErr := commandEnv.parseUrl(findInputDirectory(fsMetaSaveCommand.Args()))
+ if parseErr != nil {
+ return parseErr
+ }
+
+ fileName := *outputFileName
+ if fileName == "" {
+ t := time.Now()
+ fileName = fmt.Sprintf("%s-%d-%4d%02d%02d-%02d%02d%02d.meta",
+ commandEnv.option.FilerHost, commandEnv.option.FilerPort, t.Year(), t.Month(), t.Day(), t.Hour(), t.Minute(), t.Second())
+ }
+
+ dst, openErr := os.OpenFile(fileName, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0644)
+ if openErr != nil {
+ return fmt.Errorf("failed to create file %s: %v", fileName, openErr)
+ }
+ defer dst.Close()
+
+ err = doTraverseBfsAndSaving(commandEnv, writer, path, *verbose, func(outputChan chan interface{}) {
+ sizeBuf := make([]byte, 4)
+ for item := range outputChan {
+ b := item.([]byte)
+ util.Uint32toBytes(sizeBuf, uint32(len(b)))
+ dst.Write(sizeBuf)
+ dst.Write(b)
+ }
+ }, func(entry *filer_pb.FullEntry, outputChan chan interface{}) (err error) {
+ bytes, err := proto.Marshal(entry)
+ if err != nil {
+ fmt.Fprintf(writer, "marshall error: %v\n", err)
+ return
+ }
+
+ outputChan <- bytes
+ return nil
+ })
+
+ if err == nil {
+ fmt.Fprintf(writer, "meta data for http://%s:%d%s is saved to %s\n", commandEnv.option.FilerHost, commandEnv.option.FilerPort, path, fileName)
+ }
+
+ return err
+
+}
+
+func doTraverseBfsAndSaving(filerClient filer_pb.FilerClient, writer io.Writer, path string, verbose bool, saveFn func(outputChan chan interface{}), genFn func(entry *filer_pb.FullEntry, outputChan chan interface{}) error) error {
+
+ var wg sync.WaitGroup
+ wg.Add(1)
+ outputChan := make(chan interface{}, 1024)
+ go func() {
+ saveFn(outputChan)
+ wg.Done()
+ }()
+
+ var dirCount, fileCount uint64
+
+ err := filer_pb.TraverseBfs(filerClient, util.FullPath(path), func(parentPath util.FullPath, entry *filer_pb.Entry) {
+
+ protoMessage := &filer_pb.FullEntry{
+ Dir: string(parentPath),
+ Entry: entry,
+ }
+
+ if err := genFn(protoMessage, outputChan); err != nil {
+ fmt.Fprintf(writer, "marshall error: %v\n", err)
+ return
+ }
+
+ if entry.IsDirectory {
+ atomic.AddUint64(&dirCount, 1)
+ } else {
+ atomic.AddUint64(&fileCount, 1)
+ }
+
+ if verbose {
+ println(parentPath.Child(entry.Name))
+ }
+
+ })
+
+ close(outputChan)
+
+ wg.Wait()
+
+ if err == nil && writer != nil {
+ fmt.Fprintf(writer, "total %d directories, %d files\n", dirCount, fileCount)
+ }
+ return err
+}
diff --git a/weed/shell/command_fs_mv.go b/weed/shell/command_fs_mv.go
new file mode 100644
index 000000000..0a7eed02d
--- /dev/null
+++ b/weed/shell/command_fs_mv.go
@@ -0,0 +1,90 @@
+package shell
+
+import (
+ "context"
+ "fmt"
+ "io"
+
+ "github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
+ "github.com/chrislusf/seaweedfs/weed/util"
+)
+
+func init() {
+ Commands = append(Commands, &commandFsMv{})
+}
+
+type commandFsMv struct {
+}
+
+func (c *commandFsMv) Name() string {
+ return "fs.mv"
+}
+
+func (c *commandFsMv) Help() string {
+ return `move or rename a file or a folder
+
+ fs.mv
+
+ fs.mv /dir/file_name /dir2/filename2
+ fs.mv /dir/file_name /dir2
+
+ fs.mv /dir/dir2 /dir3/dir4/
+ fs.mv /dir/dir2 /dir3/new_dir
+
+`
+}
+
+func (c *commandFsMv) Do(args []string, commandEnv *CommandEnv, writer io.Writer) (err error) {
+
+ sourcePath, err := commandEnv.parseUrl(args[0])
+ if err != nil {
+ return err
+ }
+
+ destinationPath, err := commandEnv.parseUrl(args[1])
+ if err != nil {
+ return err
+ }
+
+ sourceDir, sourceName := util.FullPath(sourcePath).DirAndName()
+
+ destinationDir, destinationName := util.FullPath(destinationPath).DirAndName()
+
+ return commandEnv.WithFilerClient(func(client filer_pb.SeaweedFilerClient) error {
+
+ // collect destination entry info
+ destinationRequest := &filer_pb.LookupDirectoryEntryRequest{
+ Name: destinationDir,
+ Directory: destinationName,
+ }
+ respDestinationLookupEntry, err := filer_pb.LookupEntry(client, destinationRequest)
+
+ var targetDir, targetName string
+
+ // moving a file or folder
+ if err == nil && respDestinationLookupEntry.Entry.IsDirectory {
+ // to a directory
+ targetDir = util.Join(destinationDir, destinationName)
+ targetName = sourceName
+ } else {
+ // to a file or folder
+ targetDir = destinationDir
+ targetName = destinationName
+ }
+
+ request := &filer_pb.AtomicRenameEntryRequest{
+ OldDirectory: sourceDir,
+ OldName: sourceName,
+ NewDirectory: targetDir,
+ NewName: targetName,
+ }
+
+ _, err = client.AtomicRenameEntry(context.Background(), request)
+
+ fmt.Fprintf(writer, "move: %s => %s\n", sourcePath, util.NewFullPath(targetDir, targetName))
+
+ return err
+
+ })
+
+}
diff --git a/weed/shell/command_fs_pwd.go b/weed/shell/command_fs_pwd.go
new file mode 100644
index 000000000..d7d9819c8
--- /dev/null
+++ b/weed/shell/command_fs_pwd.go
@@ -0,0 +1,28 @@
+package shell
+
+import (
+ "fmt"
+ "io"
+)
+
+func init() {
+ Commands = append(Commands, &commandFsPwd{})
+}
+
+type commandFsPwd struct {
+}
+
+func (c *commandFsPwd) Name() string {
+ return "fs.pwd"
+}
+
+func (c *commandFsPwd) Help() string {
+ return `print out current directory`
+}
+
+func (c *commandFsPwd) Do(args []string, commandEnv *CommandEnv, writer io.Writer) (err error) {
+
+ fmt.Fprintf(writer, "%s\n", commandEnv.option.Directory)
+
+ return nil
+}
diff --git a/weed/shell/command_fs_tree.go b/weed/shell/command_fs_tree.go
new file mode 100644
index 000000000..a8c5b2018
--- /dev/null
+++ b/weed/shell/command_fs_tree.go
@@ -0,0 +1,113 @@
+package shell
+
+import (
+ "fmt"
+ "io"
+ "strings"
+
+ "github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
+ "github.com/chrislusf/seaweedfs/weed/util"
+)
+
+func init() {
+ Commands = append(Commands, &commandFsTree{})
+}
+
+type commandFsTree struct {
+}
+
+func (c *commandFsTree) Name() string {
+ return "fs.tree"
+}
+
+func (c *commandFsTree) Help() string {
+ return `recursively list all files under a directory
+
+ fs.tree /some/dir
+
+`
+}
+
+func (c *commandFsTree) Do(args []string, commandEnv *CommandEnv, writer io.Writer) (err error) {
+
+ path, err := commandEnv.parseUrl(findInputDirectory(args))
+ if err != nil {
+ return err
+ }
+
+ dir, name := util.FullPath(path).DirAndName()
+
+ dirCount, fCount, terr := treeTraverseDirectory(writer, commandEnv, util.FullPath(dir), name, newPrefix(), -1)
+
+ if terr == nil {
+ fmt.Fprintf(writer, "%d directories, %d files\n", dirCount, fCount)
+ }
+
+ return terr
+
+}
+
+func treeTraverseDirectory(writer io.Writer, filerClient filer_pb.FilerClient, dir util.FullPath, name string, prefix *Prefix, level int) (directoryCount, fileCount int64, err error) {
+
+ prefix.addMarker(level)
+
+ err = filer_pb.ReadDirAllEntries(filerClient, dir, name, func(entry *filer_pb.Entry, isLast bool) error {
+ if level < 0 && name != "" {
+ if entry.Name != name {
+ return nil
+ }
+ }
+
+ fmt.Fprintf(writer, "%s%s\n", prefix.getPrefix(level, isLast), entry.Name)
+
+ if entry.IsDirectory {
+ directoryCount++
+ subDir := dir.Child(entry.Name)
+ dirCount, fCount, terr := treeTraverseDirectory(writer, filerClient, subDir, "", prefix, level+1)
+ directoryCount += dirCount
+ fileCount += fCount
+ err = terr
+ } else {
+ fileCount++
+ }
+ return nil
+ })
+ return
+}
+
+type Prefix struct {
+ markers map[int]bool
+}
+
+func newPrefix() *Prefix {
+ return &Prefix{
+ markers: make(map[int]bool),
+ }
+}
+func (p *Prefix) addMarker(marker int) {
+ p.markers[marker] = true
+}
+func (p *Prefix) removeMarker(marker int) {
+ delete(p.markers, marker)
+}
+func (p *Prefix) getPrefix(level int, isLastChild bool) string {
+ var sb strings.Builder
+ if level < 0 {
+ return ""
+ }
+ for i := 0; i < level; i++ {
+ if _, ok := p.markers[i]; ok {
+ sb.WriteString("â")
+ } else {
+ sb.WriteString(" ")
+ }
+ sb.WriteString(" ")
+ }
+ if isLastChild {
+ sb.WriteString("âââ")
+ p.removeMarker(level)
+ } else {
+ sb.WriteString("âââ")
+ }
+ return sb.String()
+}
diff --git a/weed/shell/command_volume_balance.go b/weed/shell/command_volume_balance.go
new file mode 100644
index 000000000..69e3c7fd9
--- /dev/null
+++ b/weed/shell/command_volume_balance.go
@@ -0,0 +1,257 @@
+package shell
+
+import (
+ "context"
+ "flag"
+ "fmt"
+ "io"
+ "os"
+ "sort"
+ "time"
+
+ "github.com/chrislusf/seaweedfs/weed/pb/master_pb"
+ "github.com/chrislusf/seaweedfs/weed/storage/needle"
+)
+
+func init() {
+ Commands = append(Commands, &commandVolumeBalance{})
+}
+
+type commandVolumeBalance struct {
+}
+
+func (c *commandVolumeBalance) Name() string {
+ return "volume.balance"
+}
+
+func (c *commandVolumeBalance) Help() string {
+ return `balance all volumes among volume servers
+
+ volume.balance [-collection ALL|EACH_COLLECTION|] [-force] [-dataCenter=]
+
+ Algorithm:
+
+ For each type of volume server (different max volume count limit){
+ for each collection {
+ balanceWritableVolumes()
+ balanceReadOnlyVolumes()
+ }
+ }
+
+ func balanceWritableVolumes(){
+ idealWritableVolumes = totalWritableVolumes / numVolumeServers
+ for hasMovedOneVolume {
+ sort all volume servers ordered by the number of local writable volumes
+ pick the volume server A with the lowest number of writable volumes x
+ pick the volume server B with the highest number of writable volumes y
+ if y > idealWritableVolumes and x +1 <= idealWritableVolumes {
+ if B has a writable volume id v that A does not have {
+ move writable volume v from A to B
+ }
+ }
+ }
+ }
+ func balanceReadOnlyVolumes(){
+ //similar to balanceWritableVolumes
+ }
+
+`
+}
+
+func (c *commandVolumeBalance) Do(args []string, commandEnv *CommandEnv, writer io.Writer) (err error) {
+
+ if err = commandEnv.confirmIsLocked(); err != nil {
+ return
+ }
+
+ balanceCommand := flag.NewFlagSet(c.Name(), flag.ContinueOnError)
+ collection := balanceCommand.String("collection", "EACH_COLLECTION", "collection name, or use \"ALL_COLLECTIONS\" across collections, \"EACH_COLLECTION\" for each collection")
+ dc := balanceCommand.String("dataCenter", "", "only apply the balancing for this dataCenter")
+ applyBalancing := balanceCommand.Bool("force", false, "apply the balancing plan.")
+ if err = balanceCommand.Parse(args); err != nil {
+ return nil
+ }
+
+ var resp *master_pb.VolumeListResponse
+ err = commandEnv.MasterClient.WithClient(func(client master_pb.SeaweedClient) error {
+ resp, err = client.VolumeList(context.Background(), &master_pb.VolumeListRequest{})
+ return err
+ })
+ if err != nil {
+ return err
+ }
+
+ typeToNodes := collectVolumeServersByType(resp.TopologyInfo, *dc)
+
+ for maxVolumeCount, volumeServers := range typeToNodes {
+ if len(volumeServers) < 2 {
+ fmt.Printf("only 1 node is configured max %d volumes, skipping balancing\n", maxVolumeCount)
+ continue
+ }
+ if *collection == "EACH_COLLECTION" {
+ collections, err := ListCollectionNames(commandEnv, true, false)
+ if err != nil {
+ return err
+ }
+ for _, c := range collections {
+ if err = balanceVolumeServers(commandEnv, volumeServers, resp.VolumeSizeLimitMb*1024*1024, c, *applyBalancing); err != nil {
+ return err
+ }
+ }
+ } else if *collection == "ALL_COLLECTIONS" {
+ if err = balanceVolumeServers(commandEnv, volumeServers, resp.VolumeSizeLimitMb*1024*1024, "ALL_COLLECTIONS", *applyBalancing); err != nil {
+ return err
+ }
+ } else {
+ if err = balanceVolumeServers(commandEnv, volumeServers, resp.VolumeSizeLimitMb*1024*1024, *collection, *applyBalancing); err != nil {
+ return err
+ }
+ }
+
+ }
+ return nil
+}
+
+func balanceVolumeServers(commandEnv *CommandEnv, nodes []*Node, volumeSizeLimit uint64, collection string, applyBalancing bool) error {
+
+ // balance writable volumes
+ for _, n := range nodes {
+ n.selectVolumes(func(v *master_pb.VolumeInformationMessage) bool {
+ if collection != "ALL_COLLECTIONS" {
+ if v.Collection != collection {
+ return false
+ }
+ }
+ return !v.ReadOnly && v.Size < volumeSizeLimit
+ })
+ }
+ if err := balanceSelectedVolume(commandEnv, nodes, sortWritableVolumes, applyBalancing); err != nil {
+ return err
+ }
+
+ // balance readable volumes
+ for _, n := range nodes {
+ n.selectVolumes(func(v *master_pb.VolumeInformationMessage) bool {
+ if collection != "ALL_COLLECTIONS" {
+ if v.Collection != collection {
+ return false
+ }
+ }
+ return v.ReadOnly || v.Size >= volumeSizeLimit
+ })
+ }
+ if err := balanceSelectedVolume(commandEnv, nodes, sortReadOnlyVolumes, applyBalancing); err != nil {
+ return err
+ }
+
+ return nil
+}
+
+func collectVolumeServersByType(t *master_pb.TopologyInfo, selectedDataCenter string) (typeToNodes map[uint64][]*Node) {
+ typeToNodes = make(map[uint64][]*Node)
+ for _, dc := range t.DataCenterInfos {
+ if selectedDataCenter != "" && dc.Id != selectedDataCenter {
+ continue
+ }
+ for _, r := range dc.RackInfos {
+ for _, dn := range r.DataNodeInfos {
+ typeToNodes[dn.MaxVolumeCount] = append(typeToNodes[dn.MaxVolumeCount], &Node{
+ info: dn,
+ dc: dc.Id,
+ rack: r.Id,
+ })
+ }
+ }
+ }
+ return
+}
+
+type Node struct {
+ info *master_pb.DataNodeInfo
+ selectedVolumes map[uint32]*master_pb.VolumeInformationMessage
+ dc string
+ rack string
+}
+
+func sortWritableVolumes(volumes []*master_pb.VolumeInformationMessage) {
+ sort.Slice(volumes, func(i, j int) bool {
+ return volumes[i].Size < volumes[j].Size
+ })
+}
+
+func sortReadOnlyVolumes(volumes []*master_pb.VolumeInformationMessage) {
+ sort.Slice(volumes, func(i, j int) bool {
+ return volumes[i].Id < volumes[j].Id
+ })
+}
+
+func balanceSelectedVolume(commandEnv *CommandEnv, nodes []*Node, sortCandidatesFn func(volumes []*master_pb.VolumeInformationMessage), applyBalancing bool) error {
+ selectedVolumeCount := 0
+ for _, dn := range nodes {
+ selectedVolumeCount += len(dn.selectedVolumes)
+ }
+
+ idealSelectedVolumes := ceilDivide(selectedVolumeCount, len(nodes))
+
+ hasMove := true
+
+ for hasMove {
+ hasMove = false
+ sort.Slice(nodes, func(i, j int) bool {
+ // TODO sort by free volume slots???
+ return len(nodes[i].selectedVolumes) < len(nodes[j].selectedVolumes)
+ })
+ emptyNode, fullNode := nodes[0], nodes[len(nodes)-1]
+ if len(fullNode.selectedVolumes) > idealSelectedVolumes && len(emptyNode.selectedVolumes)+1 <= idealSelectedVolumes {
+
+ // sort the volumes to move
+ var candidateVolumes []*master_pb.VolumeInformationMessage
+ for _, v := range fullNode.selectedVolumes {
+ candidateVolumes = append(candidateVolumes, v)
+ }
+ sortCandidatesFn(candidateVolumes)
+
+ for _, v := range candidateVolumes {
+ if v.ReplicaPlacement > 0 {
+ if fullNode.dc != emptyNode.dc && fullNode.rack != emptyNode.rack {
+ // TODO this logic is too simple, but should work most of the time
+ // Need a correct algorithm to handle all different cases
+ continue
+ }
+ }
+ if _, found := emptyNode.selectedVolumes[v.Id]; !found {
+ if err := moveVolume(commandEnv, v, fullNode, emptyNode, applyBalancing); err == nil {
+ delete(fullNode.selectedVolumes, v.Id)
+ emptyNode.selectedVolumes[v.Id] = v
+ hasMove = true
+ break
+ } else {
+ return err
+ }
+ }
+ }
+ }
+ }
+ return nil
+}
+
+func moveVolume(commandEnv *CommandEnv, v *master_pb.VolumeInformationMessage, fullNode *Node, emptyNode *Node, applyBalancing bool) error {
+ collectionPrefix := v.Collection + "_"
+ if v.Collection == "" {
+ collectionPrefix = ""
+ }
+ fmt.Fprintf(os.Stdout, "moving volume %s%d %s => %s\n", collectionPrefix, v.Id, fullNode.info.Id, emptyNode.info.Id)
+ if applyBalancing {
+ return LiveMoveVolume(commandEnv.option.GrpcDialOption, needle.VolumeId(v.Id), fullNode.info.Id, emptyNode.info.Id, 5*time.Second)
+ }
+ return nil
+}
+
+func (node *Node) selectVolumes(fn func(v *master_pb.VolumeInformationMessage) bool) {
+ node.selectedVolumes = make(map[uint32]*master_pb.VolumeInformationMessage)
+ for _, v := range node.info.VolumeInfos {
+ if fn(v) {
+ node.selectedVolumes[v.Id] = v
+ }
+ }
+}
diff --git a/weed/shell/command_volume_configure_replication.go b/weed/shell/command_volume_configure_replication.go
new file mode 100644
index 000000000..ff976c345
--- /dev/null
+++ b/weed/shell/command_volume_configure_replication.go
@@ -0,0 +1,108 @@
+package shell
+
+import (
+ "context"
+ "errors"
+ "flag"
+ "fmt"
+ "io"
+
+ "github.com/chrislusf/seaweedfs/weed/operation"
+ "github.com/chrislusf/seaweedfs/weed/pb/master_pb"
+ "github.com/chrislusf/seaweedfs/weed/pb/volume_server_pb"
+ "github.com/chrislusf/seaweedfs/weed/storage/needle"
+ "github.com/chrislusf/seaweedfs/weed/storage/super_block"
+)
+
+func init() {
+ Commands = append(Commands, &commandVolumeConfigureReplication{})
+}
+
+type commandVolumeConfigureReplication struct {
+}
+
+func (c *commandVolumeConfigureReplication) Name() string {
+ return "volume.configure.replication"
+}
+
+func (c *commandVolumeConfigureReplication) Help() string {
+ return `change volume replication value
+
+ This command changes a volume replication value. It should be followed by volume.fix.replication.
+
+`
+}
+
+func (c *commandVolumeConfigureReplication) Do(args []string, commandEnv *CommandEnv, writer io.Writer) (err error) {
+
+ if err = commandEnv.confirmIsLocked(); err != nil {
+ return
+ }
+
+ configureReplicationCommand := flag.NewFlagSet(c.Name(), flag.ContinueOnError)
+ volumeIdInt := configureReplicationCommand.Int("volumeId", 0, "the volume id")
+ replicationString := configureReplicationCommand.String("replication", "", "the intended replication value")
+ if err = configureReplicationCommand.Parse(args); err != nil {
+ return nil
+ }
+
+ if *replicationString == "" {
+ return fmt.Errorf("empty replication value")
+ }
+
+ replicaPlacement, err := super_block.NewReplicaPlacementFromString(*replicationString)
+ if err != nil {
+ return fmt.Errorf("replication format: %v", err)
+ }
+ replicaPlacementInt32 := uint32(replicaPlacement.Byte())
+
+ var resp *master_pb.VolumeListResponse
+ err = commandEnv.MasterClient.WithClient(func(client master_pb.SeaweedClient) error {
+ resp, err = client.VolumeList(context.Background(), &master_pb.VolumeListRequest{})
+ return err
+ })
+ if err != nil {
+ return err
+ }
+
+ vid := needle.VolumeId(*volumeIdInt)
+
+ // find all data nodes with volumes that needs replication change
+ var allLocations []location
+ eachDataNode(resp.TopologyInfo, func(dc string, rack RackId, dn *master_pb.DataNodeInfo) {
+ loc := newLocation(dc, string(rack), dn)
+ for _, v := range dn.VolumeInfos {
+ if v.Id == uint32(vid) && v.ReplicaPlacement != replicaPlacementInt32 {
+ allLocations = append(allLocations, loc)
+ continue
+ }
+ }
+ })
+
+ if len(allLocations) == 0 {
+ return fmt.Errorf("no volume needs change")
+ }
+
+ for _, dst := range allLocations {
+ err := operation.WithVolumeServerClient(dst.dataNode.Id, commandEnv.option.GrpcDialOption, func(volumeServerClient volume_server_pb.VolumeServerClient) error {
+ resp, configureErr := volumeServerClient.VolumeConfigure(context.Background(), &volume_server_pb.VolumeConfigureRequest{
+ VolumeId: uint32(vid),
+ Replication: replicaPlacement.String(),
+ })
+ if configureErr != nil {
+ return configureErr
+ }
+ if resp.Error != "" {
+ return errors.New(resp.Error)
+ }
+ return nil
+ })
+
+ if err != nil {
+ return err
+ }
+
+ }
+
+ return nil
+}
diff --git a/weed/shell/command_volume_copy.go b/weed/shell/command_volume_copy.go
new file mode 100644
index 000000000..cdd10863f
--- /dev/null
+++ b/weed/shell/command_volume_copy.go
@@ -0,0 +1,55 @@
+package shell
+
+import (
+ "fmt"
+ "io"
+
+ "github.com/chrislusf/seaweedfs/weed/storage/needle"
+)
+
+func init() {
+ Commands = append(Commands, &commandVolumeCopy{})
+}
+
+type commandVolumeCopy struct {
+}
+
+func (c *commandVolumeCopy) Name() string {
+ return "volume.copy"
+}
+
+func (c *commandVolumeCopy) Help() string {
+ return `copy a volume from one volume server to another volume server
+
+ volume.copy
+
+ This command copies a volume from one volume server to another volume server.
+ Usually you will want to unmount the volume first before copying.
+
+`
+}
+
+func (c *commandVolumeCopy) Do(args []string, commandEnv *CommandEnv, writer io.Writer) (err error) {
+
+ if err = commandEnv.confirmIsLocked(); err != nil {
+ return
+ }
+
+ if len(args) != 3 {
+ fmt.Fprintf(writer, "received args: %+v\n", args)
+ return fmt.Errorf("need 3 args of ")
+ }
+ sourceVolumeServer, targetVolumeServer, volumeIdString := args[0], args[1], args[2]
+
+ volumeId, err := needle.NewVolumeId(volumeIdString)
+ if err != nil {
+ return fmt.Errorf("wrong volume id format %s: %v", volumeId, err)
+ }
+
+ if sourceVolumeServer == targetVolumeServer {
+ return fmt.Errorf("source and target volume servers are the same!")
+ }
+
+ _, err = copyVolume(commandEnv.option.GrpcDialOption, volumeId, sourceVolumeServer, targetVolumeServer)
+ return
+}
diff --git a/weed/shell/command_volume_delete.go b/weed/shell/command_volume_delete.go
new file mode 100644
index 000000000..c5cc9e277
--- /dev/null
+++ b/weed/shell/command_volume_delete.go
@@ -0,0 +1,50 @@
+package shell
+
+import (
+ "fmt"
+ "io"
+
+ "github.com/chrislusf/seaweedfs/weed/storage/needle"
+)
+
+func init() {
+ Commands = append(Commands, &commandVolumeDelete{})
+}
+
+type commandVolumeDelete struct {
+}
+
+func (c *commandVolumeDelete) Name() string {
+ return "volume.delete"
+}
+
+func (c *commandVolumeDelete) Help() string {
+ return `delete a live volume from one volume server
+
+ volume.delete
+
+ This command deletes a volume from one volume server.
+
+`
+}
+
+func (c *commandVolumeDelete) Do(args []string, commandEnv *CommandEnv, writer io.Writer) (err error) {
+
+ if err = commandEnv.confirmIsLocked(); err != nil {
+ return
+ }
+
+ if len(args) != 2 {
+ fmt.Fprintf(writer, "received args: %+v\n", args)
+ return fmt.Errorf("need 2 args of ")
+ }
+ sourceVolumeServer, volumeIdString := args[0], args[1]
+
+ volumeId, err := needle.NewVolumeId(volumeIdString)
+ if err != nil {
+ return fmt.Errorf("wrong volume id format %s: %v", volumeId, err)
+ }
+
+ return deleteVolume(commandEnv.option.GrpcDialOption, volumeId, sourceVolumeServer)
+
+}
diff --git a/weed/shell/command_volume_fix_replication.go b/weed/shell/command_volume_fix_replication.go
new file mode 100644
index 000000000..6b5e4e735
--- /dev/null
+++ b/weed/shell/command_volume_fix_replication.go
@@ -0,0 +1,307 @@
+package shell
+
+import (
+ "context"
+ "fmt"
+ "io"
+ "math/rand"
+ "sort"
+
+ "github.com/chrislusf/seaweedfs/weed/operation"
+ "github.com/chrislusf/seaweedfs/weed/pb/master_pb"
+ "github.com/chrislusf/seaweedfs/weed/pb/volume_server_pb"
+ "github.com/chrislusf/seaweedfs/weed/storage/super_block"
+)
+
+func init() {
+ Commands = append(Commands, &commandVolumeFixReplication{})
+}
+
+type commandVolumeFixReplication struct {
+}
+
+func (c *commandVolumeFixReplication) Name() string {
+ return "volume.fix.replication"
+}
+
+func (c *commandVolumeFixReplication) Help() string {
+ return `add replicas to volumes that are missing replicas
+
+ This command file all under-replicated volumes, and find volume servers with free slots.
+ If the free slots satisfy the replication requirement, the volume content is copied over and mounted.
+
+ volume.fix.replication -n # do not take action
+ volume.fix.replication # actually copying the volume files and mount the volume
+
+ Note:
+ * each time this will only add back one replica for one volume id. If there are multiple replicas
+ are missing, e.g. multiple volume servers are new, you may need to run this multiple times.
+ * do not run this too quick within seconds, since the new volume replica may take a few seconds
+ to register itself to the master.
+
+`
+}
+
+func (c *commandVolumeFixReplication) Do(args []string, commandEnv *CommandEnv, writer io.Writer) (err error) {
+
+ if err = commandEnv.confirmIsLocked(); err != nil {
+ return
+ }
+
+ takeAction := true
+ if len(args) > 0 && args[0] == "-n" {
+ takeAction = false
+ }
+
+ var resp *master_pb.VolumeListResponse
+ err = commandEnv.MasterClient.WithClient(func(client master_pb.SeaweedClient) error {
+ resp, err = client.VolumeList(context.Background(), &master_pb.VolumeListRequest{})
+ return err
+ })
+ if err != nil {
+ return err
+ }
+
+ // find all volumes that needs replication
+ // collect all data nodes
+ replicatedVolumeLocations := make(map[uint32][]location)
+ replicatedVolumeInfo := make(map[uint32]*master_pb.VolumeInformationMessage)
+ var allLocations []location
+ eachDataNode(resp.TopologyInfo, func(dc string, rack RackId, dn *master_pb.DataNodeInfo) {
+ loc := newLocation(dc, string(rack), dn)
+ for _, v := range dn.VolumeInfos {
+ if v.ReplicaPlacement > 0 {
+ replicatedVolumeLocations[v.Id] = append(replicatedVolumeLocations[v.Id], loc)
+ replicatedVolumeInfo[v.Id] = v
+ }
+ }
+ allLocations = append(allLocations, loc)
+ })
+
+ // find all under replicated volumes
+ underReplicatedVolumeLocations := make(map[uint32][]location)
+ for vid, locations := range replicatedVolumeLocations {
+ volumeInfo := replicatedVolumeInfo[vid]
+ replicaPlacement, _ := super_block.NewReplicaPlacementFromByte(byte(volumeInfo.ReplicaPlacement))
+ if replicaPlacement.GetCopyCount() > len(locations) {
+ underReplicatedVolumeLocations[vid] = locations
+ }
+ }
+
+ if len(underReplicatedVolumeLocations) == 0 {
+ return fmt.Errorf("no under replicated volumes")
+ }
+
+ if len(allLocations) == 0 {
+ return fmt.Errorf("no data nodes at all")
+ }
+
+ // find the most under populated data nodes
+ keepDataNodesSorted(allLocations)
+
+ for vid, locations := range underReplicatedVolumeLocations {
+ volumeInfo := replicatedVolumeInfo[vid]
+ replicaPlacement, _ := super_block.NewReplicaPlacementFromByte(byte(volumeInfo.ReplicaPlacement))
+ foundNewLocation := false
+ for _, dst := range allLocations {
+ // check whether data nodes satisfy the constraints
+ if dst.dataNode.FreeVolumeCount > 0 && satisfyReplicaPlacement(replicaPlacement, locations, dst) {
+ // ask the volume server to replicate the volume
+ sourceNodes := underReplicatedVolumeLocations[vid]
+ sourceNode := sourceNodes[rand.Intn(len(sourceNodes))]
+ foundNewLocation = true
+ fmt.Fprintf(writer, "replicating volume %d %s from %s to dataNode %s ...\n", volumeInfo.Id, replicaPlacement, sourceNode.dataNode.Id, dst.dataNode.Id)
+
+ if !takeAction {
+ break
+ }
+
+ err := operation.WithVolumeServerClient(dst.dataNode.Id, commandEnv.option.GrpcDialOption, func(volumeServerClient volume_server_pb.VolumeServerClient) error {
+ _, replicateErr := volumeServerClient.VolumeCopy(context.Background(), &volume_server_pb.VolumeCopyRequest{
+ VolumeId: volumeInfo.Id,
+ SourceDataNode: sourceNode.dataNode.Id,
+ })
+ if replicateErr != nil {
+ return fmt.Errorf("copying from %s => %s : %v", sourceNode.dataNode.Id, dst.dataNode.Id, replicateErr)
+ }
+ return nil
+ })
+
+ if err != nil {
+ return err
+ }
+
+ // adjust free volume count
+ dst.dataNode.FreeVolumeCount--
+ keepDataNodesSorted(allLocations)
+ break
+ }
+ }
+ if !foundNewLocation {
+ fmt.Fprintf(writer, "failed to place volume %d replica as %s, existing:%+v\n", volumeInfo.Id, replicaPlacement, locations)
+ }
+
+ }
+
+ return nil
+}
+
+func keepDataNodesSorted(dataNodes []location) {
+ sort.Slice(dataNodes, func(i, j int) bool {
+ return dataNodes[i].dataNode.FreeVolumeCount > dataNodes[j].dataNode.FreeVolumeCount
+ })
+}
+
+/*
+ if on an existing data node {
+ return false
+ }
+ if different from existing dcs {
+ if lack on different dcs {
+ return true
+ }else{
+ return false
+ }
+ }
+ if not on primary dc {
+ return false
+ }
+ if different from existing racks {
+ if lack on different racks {
+ return true
+ }else{
+ return false
+ }
+ }
+ if not on primary rack {
+ return false
+ }
+ if lacks on same rack {
+ return true
+ } else {
+ return false
+ }
+*/
+func satisfyReplicaPlacement(replicaPlacement *super_block.ReplicaPlacement, existingLocations []location, possibleLocation location) bool {
+
+ existingDataNodes := make(map[string]int)
+ for _, loc := range existingLocations {
+ existingDataNodes[loc.String()] += 1
+ }
+ sameDataNodeCount := existingDataNodes[possibleLocation.String()]
+ // avoid duplicated volume on the same data node
+ if sameDataNodeCount > 0 {
+ return false
+ }
+
+ existingDataCenters := make(map[string]int)
+ for _, loc := range existingLocations {
+ existingDataCenters[loc.DataCenter()] += 1
+ }
+ primaryDataCenters, _ := findTopKeys(existingDataCenters)
+
+ // ensure data center count is within limit
+ if _, found := existingDataCenters[possibleLocation.DataCenter()]; !found {
+ // different from existing dcs
+ if len(existingDataCenters) < replicaPlacement.DiffDataCenterCount+1 {
+ // lack on different dcs
+ return true
+ } else {
+ // adding this would go over the different dcs limit
+ return false
+ }
+ }
+ // now this is same as one of the existing data center
+ if !isAmong(possibleLocation.DataCenter(), primaryDataCenters) {
+ // not on one of the primary dcs
+ return false
+ }
+
+ // now this is one of the primary dcs
+ existingRacks := make(map[string]int)
+ for _, loc := range existingLocations {
+ if loc.DataCenter() != possibleLocation.DataCenter() {
+ continue
+ }
+ existingRacks[loc.Rack()] += 1
+ }
+ primaryRacks, _ := findTopKeys(existingRacks)
+ sameRackCount := existingRacks[possibleLocation.Rack()]
+
+ // ensure rack count is within limit
+ if _, found := existingRacks[possibleLocation.Rack()]; !found {
+ // different from existing racks
+ if len(existingRacks) < replicaPlacement.DiffRackCount+1 {
+ // lack on different racks
+ return true
+ } else {
+ // adding this would go over the different racks limit
+ return false
+ }
+ }
+ // now this is same as one of the existing racks
+ if !isAmong(possibleLocation.Rack(), primaryRacks) {
+ // not on the primary rack
+ return false
+ }
+
+ // now this is on the primary rack
+
+ // different from existing data nodes
+ if sameRackCount < replicaPlacement.SameRackCount+1 {
+ // lack on same rack
+ return true
+ } else {
+ // adding this would go over the same data node limit
+ return false
+ }
+
+}
+
+func findTopKeys(m map[string]int) (topKeys []string, max int) {
+ for k, c := range m {
+ if max < c {
+ topKeys = topKeys[:0]
+ topKeys = append(topKeys, k)
+ max = c
+ } else if max == c {
+ topKeys = append(topKeys, k)
+ }
+ }
+ return
+}
+
+func isAmong(key string, keys []string) bool {
+ for _, k := range keys {
+ if k == key {
+ return true
+ }
+ }
+ return false
+}
+
+type location struct {
+ dc string
+ rack string
+ dataNode *master_pb.DataNodeInfo
+}
+
+func newLocation(dc, rack string, dataNode *master_pb.DataNodeInfo) location {
+ return location{
+ dc: dc,
+ rack: rack,
+ dataNode: dataNode,
+ }
+}
+
+func (l location) String() string {
+ return fmt.Sprintf("%s %s %s", l.dc, l.rack, l.dataNode.Id)
+}
+
+func (l location) Rack() string {
+ return fmt.Sprintf("%s %s", l.dc, l.rack)
+}
+
+func (l location) DataCenter() string {
+ return l.dc
+}
diff --git a/weed/shell/command_volume_fix_replication_test.go b/weed/shell/command_volume_fix_replication_test.go
new file mode 100644
index 000000000..4cfbd96aa
--- /dev/null
+++ b/weed/shell/command_volume_fix_replication_test.go
@@ -0,0 +1,207 @@
+package shell
+
+import (
+ "testing"
+
+ "github.com/chrislusf/seaweedfs/weed/pb/master_pb"
+ "github.com/chrislusf/seaweedfs/weed/storage/super_block"
+)
+
+type testcase struct {
+ name string
+ replication string
+ existingLocations []location
+ possibleLocation location
+ expected bool
+}
+
+func TestSatisfyReplicaPlacementComplicated(t *testing.T) {
+
+ var tests = []testcase{
+ {
+ name: "test 100 negative",
+ replication: "100",
+ existingLocations: []location{
+ {"dc1", "r1", &master_pb.DataNodeInfo{Id: "dn1"}},
+ },
+ possibleLocation: location{"dc1", "r2", &master_pb.DataNodeInfo{Id: "dn2"}},
+ expected: false,
+ },
+ {
+ name: "test 100 positive",
+ replication: "100",
+ existingLocations: []location{
+ {"dc1", "r1", &master_pb.DataNodeInfo{Id: "dn1"}},
+ },
+ possibleLocation: location{"dc2", "r2", &master_pb.DataNodeInfo{Id: "dn2"}},
+ expected: true,
+ },
+ {
+ name: "test 022 positive",
+ replication: "022",
+ existingLocations: []location{
+ {"dc1", "r1", &master_pb.DataNodeInfo{Id: "dn1"}},
+ {"dc1", "r2", &master_pb.DataNodeInfo{Id: "dn2"}},
+ {"dc1", "r3", &master_pb.DataNodeInfo{Id: "dn3"}},
+ },
+ possibleLocation: location{"dc1", "r1", &master_pb.DataNodeInfo{Id: "dn4"}},
+ expected: true,
+ },
+ {
+ name: "test 022 negative",
+ replication: "022",
+ existingLocations: []location{
+ {"dc1", "r1", &master_pb.DataNodeInfo{Id: "dn1"}},
+ {"dc1", "r2", &master_pb.DataNodeInfo{Id: "dn2"}},
+ {"dc1", "r3", &master_pb.DataNodeInfo{Id: "dn3"}},
+ },
+ possibleLocation: location{"dc1", "r4", &master_pb.DataNodeInfo{Id: "dn4"}},
+ expected: false,
+ },
+ {
+ name: "test 210 moved from 200 positive",
+ replication: "210",
+ existingLocations: []location{
+ {"dc1", "r1", &master_pb.DataNodeInfo{Id: "dn1"}},
+ {"dc2", "r2", &master_pb.DataNodeInfo{Id: "dn2"}},
+ {"dc3", "r3", &master_pb.DataNodeInfo{Id: "dn3"}},
+ },
+ possibleLocation: location{"dc1", "r4", &master_pb.DataNodeInfo{Id: "dn4"}},
+ expected: true,
+ },
+ {
+ name: "test 210 moved from 200 negative extra dc",
+ replication: "210",
+ existingLocations: []location{
+ {"dc1", "r1", &master_pb.DataNodeInfo{Id: "dn1"}},
+ {"dc2", "r2", &master_pb.DataNodeInfo{Id: "dn2"}},
+ {"dc3", "r3", &master_pb.DataNodeInfo{Id: "dn3"}},
+ },
+ possibleLocation: location{"dc4", "r4", &master_pb.DataNodeInfo{Id: "dn4"}},
+ expected: false,
+ },
+ {
+ name: "test 210 moved from 200 negative extra data node",
+ replication: "210",
+ existingLocations: []location{
+ {"dc1", "r1", &master_pb.DataNodeInfo{Id: "dn1"}},
+ {"dc2", "r2", &master_pb.DataNodeInfo{Id: "dn2"}},
+ {"dc3", "r3", &master_pb.DataNodeInfo{Id: "dn3"}},
+ },
+ possibleLocation: location{"dc1", "r1", &master_pb.DataNodeInfo{Id: "dn4"}},
+ expected: false,
+ },
+ }
+
+ runTests(tests, t)
+
+}
+
+func TestSatisfyReplicaPlacement01x(t *testing.T) {
+
+ var tests = []testcase{
+ {
+ name: "test 011 same existing rack",
+ replication: "011",
+ existingLocations: []location{
+ {"dc1", "r1", &master_pb.DataNodeInfo{Id: "dn1"}},
+ {"dc1", "r1", &master_pb.DataNodeInfo{Id: "dn2"}},
+ },
+ possibleLocation: location{"dc1", "r2", &master_pb.DataNodeInfo{Id: "dn3"}},
+ expected: true,
+ },
+ {
+ name: "test 011 negative",
+ replication: "011",
+ existingLocations: []location{
+ {"dc1", "r1", &master_pb.DataNodeInfo{Id: "dn1"}},
+ {"dc1", "r1", &master_pb.DataNodeInfo{Id: "dn2"}},
+ },
+ possibleLocation: location{"dc1", "r1", &master_pb.DataNodeInfo{Id: "dn3"}},
+ expected: false,
+ },
+ {
+ name: "test 011 different existing racks",
+ replication: "011",
+ existingLocations: []location{
+ {"dc1", "r1", &master_pb.DataNodeInfo{Id: "dn1"}},
+ {"dc1", "r2", &master_pb.DataNodeInfo{Id: "dn2"}},
+ },
+ possibleLocation: location{"dc1", "r2", &master_pb.DataNodeInfo{Id: "dn3"}},
+ expected: true,
+ },
+ {
+ name: "test 011 different existing racks negative",
+ replication: "011",
+ existingLocations: []location{
+ {"dc1", "r1", &master_pb.DataNodeInfo{Id: "dn1"}},
+ {"dc1", "r2", &master_pb.DataNodeInfo{Id: "dn2"}},
+ },
+ possibleLocation: location{"dc1", "r3", &master_pb.DataNodeInfo{Id: "dn3"}},
+ expected: false,
+ },
+ }
+
+ runTests(tests, t)
+
+}
+
+func TestSatisfyReplicaPlacement00x(t *testing.T) {
+
+ var tests = []testcase{
+ {
+ name: "test 001",
+ replication: "001",
+ existingLocations: []location{
+ {"dc1", "r1", &master_pb.DataNodeInfo{Id: "dn1"}},
+ },
+ possibleLocation: location{"dc1", "r1", &master_pb.DataNodeInfo{Id: "dn2"}},
+ expected: true,
+ },
+ {
+ name: "test 002 positive",
+ replication: "002",
+ existingLocations: []location{
+ {"dc1", "r1", &master_pb.DataNodeInfo{Id: "dn1"}},
+ {"dc1", "r1", &master_pb.DataNodeInfo{Id: "dn2"}},
+ },
+ possibleLocation: location{"dc1", "r1", &master_pb.DataNodeInfo{Id: "dn3"}},
+ expected: true,
+ },
+ {
+ name: "test 002 negative, repeat the same node",
+ replication: "002",
+ existingLocations: []location{
+ {"dc1", "r1", &master_pb.DataNodeInfo{Id: "dn1"}},
+ {"dc1", "r1", &master_pb.DataNodeInfo{Id: "dn2"}},
+ },
+ possibleLocation: location{"dc1", "r1", &master_pb.DataNodeInfo{Id: "dn2"}},
+ expected: false,
+ },
+ {
+ name: "test 002 negative, enough node already",
+ replication: "002",
+ existingLocations: []location{
+ {"dc1", "r1", &master_pb.DataNodeInfo{Id: "dn1"}},
+ {"dc1", "r1", &master_pb.DataNodeInfo{Id: "dn2"}},
+ {"dc1", "r1", &master_pb.DataNodeInfo{Id: "dn3"}},
+ },
+ possibleLocation: location{"dc1", "r1", &master_pb.DataNodeInfo{Id: "dn4"}},
+ expected: false,
+ },
+ }
+
+ runTests(tests, t)
+
+}
+
+func runTests(tests []testcase, t *testing.T) {
+ for _, tt := range tests {
+ replicaPlacement, _ := super_block.NewReplicaPlacementFromString(tt.replication)
+ println("replication:", tt.replication, "expected", tt.expected, "name:", tt.name)
+ if satisfyReplicaPlacement(replicaPlacement, tt.existingLocations, tt.possibleLocation) != tt.expected {
+ t.Errorf("%s: expect %v add %v to %s %+v",
+ tt.name, tt.expected, tt.possibleLocation, tt.replication, tt.existingLocations)
+ }
+ }
+}
diff --git a/weed/shell/command_volume_fsck.go b/weed/shell/command_volume_fsck.go
new file mode 100644
index 000000000..69a1a63b4
--- /dev/null
+++ b/weed/shell/command_volume_fsck.go
@@ -0,0 +1,361 @@
+package shell
+
+import (
+ "context"
+ "flag"
+ "fmt"
+ "io"
+ "io/ioutil"
+ "math"
+ "os"
+ "path/filepath"
+ "sync"
+
+ "github.com/chrislusf/seaweedfs/weed/operation"
+ "github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
+ "github.com/chrislusf/seaweedfs/weed/pb/master_pb"
+ "github.com/chrislusf/seaweedfs/weed/pb/volume_server_pb"
+ "github.com/chrislusf/seaweedfs/weed/storage/needle_map"
+ "github.com/chrislusf/seaweedfs/weed/storage/types"
+ "github.com/chrislusf/seaweedfs/weed/util"
+)
+
+func init() {
+ Commands = append(Commands, &commandVolumeFsck{})
+}
+
+type commandVolumeFsck struct {
+ env *CommandEnv
+}
+
+func (c *commandVolumeFsck) Name() string {
+ return "volume.fsck"
+}
+
+func (c *commandVolumeFsck) Help() string {
+ return `check all volumes to find entries not used by the filer
+
+ Important assumption!!!
+ the system is all used by one filer.
+
+ This command works this way:
+ 1. collect all file ids from all volumes, as set A
+ 2. collect all file ids from the filer, as set B
+ 3. find out the set A subtract B
+
+`
+}
+
+func (c *commandVolumeFsck) Do(args []string, commandEnv *CommandEnv, writer io.Writer) (err error) {
+
+ if err = commandEnv.confirmIsLocked(); err != nil {
+ return
+ }
+
+ fsckCommand := flag.NewFlagSet(c.Name(), flag.ContinueOnError)
+ verbose := fsckCommand.Bool("v", false, "verbose mode")
+ applyPurging := fsckCommand.Bool("reallyDeleteFromVolume", false, " delete data not referenced by the filer")
+ if err = fsckCommand.Parse(args); err != nil {
+ return nil
+ }
+
+ c.env = commandEnv
+
+ // create a temp folder
+ tempFolder, err := ioutil.TempDir("", "sw_fsck")
+ if err != nil {
+ return fmt.Errorf("failed to create temp folder: %v", err)
+ }
+ if *verbose {
+ fmt.Fprintf(writer, "working directory: %s\n", tempFolder)
+ }
+ defer os.RemoveAll(tempFolder)
+
+ // collect all volume id locations
+ volumeIdToVInfo, err := c.collectVolumeIds(*verbose, writer)
+ if err != nil {
+ return fmt.Errorf("failed to collect all volume locations: %v", err)
+ }
+
+ // collect each volume file ids
+ for volumeId, vinfo := range volumeIdToVInfo {
+ err = c.collectOneVolumeFileIds(tempFolder, volumeId, vinfo, *verbose, writer)
+ if err != nil {
+ return fmt.Errorf("failed to collect file ids from volume %d on %s: %v", volumeId, vinfo.server, err)
+ }
+ }
+
+ // collect all filer file ids
+ if err = c.collectFilerFileIds(tempFolder, volumeIdToVInfo, *verbose, writer); err != nil {
+ return fmt.Errorf("failed to collect file ids from filer: %v", err)
+ }
+
+ // volume file ids substract filer file ids
+ var totalInUseCount, totalOrphanChunkCount, totalOrphanDataSize uint64
+ for volumeId, vinfo := range volumeIdToVInfo {
+ inUseCount, orphanFileIds, orphanDataSize, checkErr := c.oneVolumeFileIdsSubtractFilerFileIds(tempFolder, volumeId, writer, *verbose)
+ if checkErr != nil {
+ return fmt.Errorf("failed to collect file ids from volume %d on %s: %v", volumeId, vinfo.server, checkErr)
+ }
+ totalInUseCount += inUseCount
+ totalOrphanChunkCount += uint64(len(orphanFileIds))
+ totalOrphanDataSize += orphanDataSize
+
+ if *applyPurging && len(orphanFileIds) > 0 {
+ if vinfo.isEcVolume {
+ fmt.Fprintf(writer, "Skip purging for Erasure Coded volumes.\n")
+ }
+ if err = c.purgeFileIdsForOneVolume(volumeId, orphanFileIds, writer); err != nil {
+ return fmt.Errorf("purge for volume %d: %v\n", volumeId, err)
+ }
+ }
+ }
+
+ if totalOrphanChunkCount == 0 {
+ fmt.Fprintf(writer, "no orphan data\n")
+ return nil
+ }
+
+ if !*applyPurging {
+ pct := float64(totalOrphanChunkCount*100) / (float64(totalOrphanChunkCount + totalInUseCount))
+ fmt.Fprintf(writer, "\nTotal\t\tentries:%d\torphan:%d\t%.2f%%\t%dB\n",
+ totalOrphanChunkCount+totalInUseCount, totalOrphanChunkCount, pct, totalOrphanDataSize)
+
+ fmt.Fprintf(writer, "This could be normal if multiple filers or no filers are used.\n")
+ }
+
+ return nil
+}
+
+func (c *commandVolumeFsck) collectOneVolumeFileIds(tempFolder string, volumeId uint32, vinfo VInfo, verbose bool, writer io.Writer) error {
+
+ if verbose {
+ fmt.Fprintf(writer, "collecting volume %d file ids from %s ...\n", volumeId, vinfo.server)
+ }
+
+ return operation.WithVolumeServerClient(vinfo.server, c.env.option.GrpcDialOption, func(volumeServerClient volume_server_pb.VolumeServerClient) error {
+
+ ext := ".idx"
+ if vinfo.isEcVolume {
+ ext = ".ecx"
+ }
+
+ copyFileClient, err := volumeServerClient.CopyFile(context.Background(), &volume_server_pb.CopyFileRequest{
+ VolumeId: volumeId,
+ Ext: ext,
+ CompactionRevision: math.MaxUint32,
+ StopOffset: math.MaxInt64,
+ Collection: vinfo.collection,
+ IsEcVolume: vinfo.isEcVolume,
+ IgnoreSourceFileNotFound: false,
+ })
+ if err != nil {
+ return fmt.Errorf("failed to start copying volume %d.idx: %v", volumeId, err)
+ }
+
+ err = writeToFile(copyFileClient, getVolumeFileIdFile(tempFolder, volumeId))
+ if err != nil {
+ return fmt.Errorf("failed to copy %d.idx from %s: %v", volumeId, vinfo.server, err)
+ }
+
+ return nil
+
+ })
+
+}
+
+func (c *commandVolumeFsck) collectFilerFileIds(tempFolder string, volumeIdToServer map[uint32]VInfo, verbose bool, writer io.Writer) error {
+
+ if verbose {
+ fmt.Fprintf(writer, "collecting file ids from filer ...\n")
+ }
+
+ files := make(map[uint32]*os.File)
+ for vid := range volumeIdToServer {
+ dst, openErr := os.OpenFile(getFilerFileIdFile(tempFolder, vid), os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0644)
+ if openErr != nil {
+ return fmt.Errorf("failed to create file %s: %v", getFilerFileIdFile(tempFolder, vid), openErr)
+ }
+ files[vid] = dst
+ }
+ defer func() {
+ for _, f := range files {
+ f.Close()
+ }
+ }()
+
+ type Item struct {
+ vid uint32
+ fileKey uint64
+ }
+ return doTraverseBfsAndSaving(c.env, nil, "/", false, func(outputChan chan interface{}) {
+ buffer := make([]byte, 8)
+ for item := range outputChan {
+ i := item.(*Item)
+ util.Uint64toBytes(buffer, i.fileKey)
+ files[i.vid].Write(buffer)
+ }
+ }, func(entry *filer_pb.FullEntry, outputChan chan interface{}) (err error) {
+ for _, chunk := range entry.Entry.Chunks {
+ outputChan <- &Item{
+ vid: chunk.Fid.VolumeId,
+ fileKey: chunk.Fid.FileKey,
+ }
+ }
+ return nil
+ })
+}
+
+func (c *commandVolumeFsck) oneVolumeFileIdsSubtractFilerFileIds(tempFolder string, volumeId uint32, writer io.Writer, verbose bool) (inUseCount uint64, orphanFileIds []string, orphanDataSize uint64, err error) {
+
+ db := needle_map.NewMemDb()
+ defer db.Close()
+
+ if err = db.LoadFromIdx(getVolumeFileIdFile(tempFolder, volumeId)); err != nil {
+ return
+ }
+
+ filerFileIdsData, err := ioutil.ReadFile(getFilerFileIdFile(tempFolder, volumeId))
+ if err != nil {
+ return
+ }
+
+ dataLen := len(filerFileIdsData)
+ if dataLen%8 != 0 {
+ return 0, nil, 0, fmt.Errorf("filer data is corrupted")
+ }
+
+ for i := 0; i < len(filerFileIdsData); i += 8 {
+ fileKey := util.BytesToUint64(filerFileIdsData[i : i+8])
+ db.Delete(types.NeedleId(fileKey))
+ inUseCount++
+ }
+
+ var orphanFileCount uint64
+ db.AscendingVisit(func(n needle_map.NeedleValue) error {
+ // fmt.Printf("%d,%x\n", volumeId, n.Key)
+ orphanFileIds = append(orphanFileIds, fmt.Sprintf("%d,%s", volumeId, n.Key.String()))
+ orphanFileCount++
+ orphanDataSize += uint64(n.Size)
+ return nil
+ })
+
+ if orphanFileCount > 0 {
+ pct := float64(orphanFileCount*100) / (float64(orphanFileCount + inUseCount))
+ fmt.Fprintf(writer, "volume:%d\tentries:%d\torphan:%d\t%.2f%%\t%dB\n",
+ volumeId, orphanFileCount+inUseCount, orphanFileCount, pct, orphanDataSize)
+ }
+
+ return
+
+}
+
+type VInfo struct {
+ server string
+ collection string
+ isEcVolume bool
+}
+
+func (c *commandVolumeFsck) collectVolumeIds(verbose bool, writer io.Writer) (volumeIdToServer map[uint32]VInfo, err error) {
+
+ if verbose {
+ fmt.Fprintf(writer, "collecting volume id and locations from master ...\n")
+ }
+
+ volumeIdToServer = make(map[uint32]VInfo)
+ var resp *master_pb.VolumeListResponse
+ err = c.env.MasterClient.WithClient(func(client master_pb.SeaweedClient) error {
+ resp, err = client.VolumeList(context.Background(), &master_pb.VolumeListRequest{})
+ return err
+ })
+ if err != nil {
+ return
+ }
+
+ eachDataNode(resp.TopologyInfo, func(dc string, rack RackId, t *master_pb.DataNodeInfo) {
+ for _, vi := range t.VolumeInfos {
+ volumeIdToServer[vi.Id] = VInfo{
+ server: t.Id,
+ collection: vi.Collection,
+ isEcVolume: false,
+ }
+ }
+ for _, ecShardInfo := range t.EcShardInfos {
+ volumeIdToServer[ecShardInfo.Id] = VInfo{
+ server: t.Id,
+ collection: ecShardInfo.Collection,
+ isEcVolume: true,
+ }
+ }
+ })
+
+ if verbose {
+ fmt.Fprintf(writer, "collected %d volumes and locations.\n", len(volumeIdToServer))
+ }
+ return
+}
+
+func (c *commandVolumeFsck) purgeFileIdsForOneVolume(volumeId uint32, fileIds []string, writer io.Writer) (err error) {
+ fmt.Fprintf(writer, "purging orphan data for volume %d...\n", volumeId)
+ locations, found := c.env.MasterClient.GetLocations(volumeId)
+ if !found {
+ return fmt.Errorf("failed to find volume %d locations", volumeId)
+ }
+
+ resultChan := make(chan []*volume_server_pb.DeleteResult, len(locations))
+ var wg sync.WaitGroup
+ for _, location := range locations {
+ wg.Add(1)
+ go func(server string, fidList []string) {
+ defer wg.Done()
+
+ if deleteResults, deleteErr := operation.DeleteFilesAtOneVolumeServer(server, c.env.option.GrpcDialOption, fidList, false); deleteErr != nil {
+ err = deleteErr
+ } else if deleteResults != nil {
+ resultChan <- deleteResults
+ }
+
+ }(location.Url, fileIds)
+ }
+ wg.Wait()
+ close(resultChan)
+
+ for results := range resultChan {
+ for _, result := range results {
+ if result.Error != "" {
+ fmt.Fprintf(writer, "purge error: %s\n", result.Error)
+ }
+ }
+ }
+
+ return
+}
+
+func getVolumeFileIdFile(tempFolder string, vid uint32) string {
+ return filepath.Join(tempFolder, fmt.Sprintf("%d.idx", vid))
+}
+
+func getFilerFileIdFile(tempFolder string, vid uint32) string {
+ return filepath.Join(tempFolder, fmt.Sprintf("%d.fid", vid))
+}
+
+func writeToFile(client volume_server_pb.VolumeServer_CopyFileClient, fileName string) error {
+ flags := os.O_WRONLY | os.O_CREATE | os.O_TRUNC
+ dst, err := os.OpenFile(fileName, flags, 0644)
+ if err != nil {
+ return nil
+ }
+ defer dst.Close()
+
+ for {
+ resp, receiveErr := client.Recv()
+ if receiveErr == io.EOF {
+ break
+ }
+ if receiveErr != nil {
+ return fmt.Errorf("receiving %s: %v", fileName, receiveErr)
+ }
+ dst.Write(resp.FileContent)
+ }
+ return nil
+}
diff --git a/weed/shell/command_volume_list.go b/weed/shell/command_volume_list.go
new file mode 100644
index 000000000..c5a9388fa
--- /dev/null
+++ b/weed/shell/command_volume_list.go
@@ -0,0 +1,133 @@
+package shell
+
+import (
+ "context"
+ "fmt"
+ "github.com/chrislusf/seaweedfs/weed/pb/master_pb"
+ "github.com/chrislusf/seaweedfs/weed/storage/erasure_coding"
+
+ "io"
+ "sort"
+)
+
+func init() {
+ Commands = append(Commands, &commandVolumeList{})
+}
+
+type commandVolumeList struct {
+}
+
+func (c *commandVolumeList) Name() string {
+ return "volume.list"
+}
+
+func (c *commandVolumeList) Help() string {
+ return `list all volumes
+
+ This command list all volumes as a tree of dataCenter > rack > dataNode > volume.
+
+`
+}
+
+func (c *commandVolumeList) Do(args []string, commandEnv *CommandEnv, writer io.Writer) (err error) {
+
+ var resp *master_pb.VolumeListResponse
+ err = commandEnv.MasterClient.WithClient(func(client master_pb.SeaweedClient) error {
+ resp, err = client.VolumeList(context.Background(), &master_pb.VolumeListRequest{})
+ return err
+ })
+ if err != nil {
+ return err
+ }
+
+ writeTopologyInfo(writer, resp.TopologyInfo, resp.VolumeSizeLimitMb)
+ return nil
+}
+
+func writeTopologyInfo(writer io.Writer, t *master_pb.TopologyInfo, volumeSizeLimitMb uint64) statistics {
+ fmt.Fprintf(writer, "Topology volume:%d/%d active:%d free:%d remote:%d volumeSizeLimit:%d MB\n", t.VolumeCount, t.MaxVolumeCount, t.ActiveVolumeCount, t.FreeVolumeCount, t.RemoteVolumeCount, volumeSizeLimitMb)
+ sort.Slice(t.DataCenterInfos, func(i, j int) bool {
+ return t.DataCenterInfos[i].Id < t.DataCenterInfos[j].Id
+ })
+ var s statistics
+ for _, dc := range t.DataCenterInfos {
+ s = s.plus(writeDataCenterInfo(writer, dc))
+ }
+ fmt.Fprintf(writer, "%+v \n", s)
+ return s
+}
+func writeDataCenterInfo(writer io.Writer, t *master_pb.DataCenterInfo) statistics {
+ fmt.Fprintf(writer, " DataCenter %s volume:%d/%d active:%d free:%d remote:%d\n", t.Id, t.VolumeCount, t.MaxVolumeCount, t.ActiveVolumeCount, t.FreeVolumeCount, t.RemoteVolumeCount)
+ var s statistics
+ sort.Slice(t.RackInfos, func(i, j int) bool {
+ return t.RackInfos[i].Id < t.RackInfos[j].Id
+ })
+ for _, r := range t.RackInfos {
+ s = s.plus(writeRackInfo(writer, r))
+ }
+ fmt.Fprintf(writer, " DataCenter %s %+v \n", t.Id, s)
+ return s
+}
+func writeRackInfo(writer io.Writer, t *master_pb.RackInfo) statistics {
+ fmt.Fprintf(writer, " Rack %s volume:%d/%d active:%d free:%d remote:%d\n", t.Id, t.VolumeCount, t.MaxVolumeCount, t.ActiveVolumeCount, t.FreeVolumeCount, t.RemoteVolumeCount)
+ var s statistics
+ sort.Slice(t.DataNodeInfos, func(i, j int) bool {
+ return t.DataNodeInfos[i].Id < t.DataNodeInfos[j].Id
+ })
+ for _, dn := range t.DataNodeInfos {
+ s = s.plus(writeDataNodeInfo(writer, dn))
+ }
+ fmt.Fprintf(writer, " Rack %s %+v \n", t.Id, s)
+ return s
+}
+func writeDataNodeInfo(writer io.Writer, t *master_pb.DataNodeInfo) statistics {
+ fmt.Fprintf(writer, " DataNode %s volume:%d/%d active:%d free:%d remote:%d\n", t.Id, t.VolumeCount, t.MaxVolumeCount, t.ActiveVolumeCount, t.FreeVolumeCount, t.RemoteVolumeCount)
+ var s statistics
+ sort.Slice(t.VolumeInfos, func(i, j int) bool {
+ return t.VolumeInfos[i].Id < t.VolumeInfos[j].Id
+ })
+ for _, vi := range t.VolumeInfos {
+ s = s.plus(writeVolumeInformationMessage(writer, vi))
+ }
+ for _, ecShardInfo := range t.EcShardInfos {
+ fmt.Fprintf(writer, " ec volume id:%v collection:%v shards:%v\n", ecShardInfo.Id, ecShardInfo.Collection, erasure_coding.ShardBits(ecShardInfo.EcIndexBits).ShardIds())
+ }
+ fmt.Fprintf(writer, " DataNode %s %+v \n", t.Id, s)
+ return s
+}
+func writeVolumeInformationMessage(writer io.Writer, t *master_pb.VolumeInformationMessage) statistics {
+ fmt.Fprintf(writer, " volume %+v \n", t)
+ return newStatistics(t)
+}
+
+type statistics struct {
+ Size uint64
+ FileCount uint64
+ DeletedFileCount uint64
+ DeletedBytes uint64
+}
+
+func newStatistics(t *master_pb.VolumeInformationMessage) statistics {
+ return statistics{
+ Size: t.Size,
+ FileCount: t.FileCount,
+ DeletedFileCount: t.DeleteCount,
+ DeletedBytes: t.DeletedByteCount,
+ }
+}
+
+func (s statistics) plus(t statistics) statistics {
+ return statistics{
+ Size: s.Size + t.Size,
+ FileCount: s.FileCount + t.FileCount,
+ DeletedFileCount: s.DeletedFileCount + t.DeletedFileCount,
+ DeletedBytes: s.DeletedBytes + t.DeletedBytes,
+ }
+}
+
+func (s statistics) String() string {
+ if s.DeletedFileCount > 0 {
+ return fmt.Sprintf("total size:%d file_count:%d deleted_file:%d deleted_bytes:%d", s.Size, s.FileCount, s.DeletedFileCount, s.DeletedBytes)
+ }
+ return fmt.Sprintf("total size:%d file_count:%d", s.Size, s.FileCount)
+}
diff --git a/weed/shell/command_volume_mount.go b/weed/shell/command_volume_mount.go
new file mode 100644
index 000000000..ded7b7e66
--- /dev/null
+++ b/weed/shell/command_volume_mount.go
@@ -0,0 +1,63 @@
+package shell
+
+import (
+ "context"
+ "fmt"
+ "io"
+
+ "github.com/chrislusf/seaweedfs/weed/operation"
+ "github.com/chrislusf/seaweedfs/weed/pb/volume_server_pb"
+ "github.com/chrislusf/seaweedfs/weed/storage/needle"
+ "google.golang.org/grpc"
+)
+
+func init() {
+ Commands = append(Commands, &commandVolumeMount{})
+}
+
+type commandVolumeMount struct {
+}
+
+func (c *commandVolumeMount) Name() string {
+ return "volume.mount"
+}
+
+func (c *commandVolumeMount) Help() string {
+ return `mount a volume from one volume server
+
+ volume.mount
+
+ This command mounts a volume from one volume server.
+
+`
+}
+
+func (c *commandVolumeMount) Do(args []string, commandEnv *CommandEnv, writer io.Writer) (err error) {
+
+ if err = commandEnv.confirmIsLocked(); err != nil {
+ return
+ }
+
+ if len(args) != 2 {
+ fmt.Fprintf(writer, "received args: %+v\n", args)
+ return fmt.Errorf("need 2 args of ")
+ }
+ sourceVolumeServer, volumeIdString := args[0], args[1]
+
+ volumeId, err := needle.NewVolumeId(volumeIdString)
+ if err != nil {
+ return fmt.Errorf("wrong volume id format %s: %v", volumeId, err)
+ }
+
+ return mountVolume(commandEnv.option.GrpcDialOption, volumeId, sourceVolumeServer)
+
+}
+
+func mountVolume(grpcDialOption grpc.DialOption, volumeId needle.VolumeId, sourceVolumeServer string) (err error) {
+ return operation.WithVolumeServerClient(sourceVolumeServer, grpcDialOption, func(volumeServerClient volume_server_pb.VolumeServerClient) error {
+ _, mountErr := volumeServerClient.VolumeMount(context.Background(), &volume_server_pb.VolumeMountRequest{
+ VolumeId: uint32(volumeId),
+ })
+ return mountErr
+ })
+}
diff --git a/weed/shell/command_volume_move.go b/weed/shell/command_volume_move.go
new file mode 100644
index 000000000..392b947e7
--- /dev/null
+++ b/weed/shell/command_volume_move.go
@@ -0,0 +1,129 @@
+package shell
+
+import (
+ "context"
+ "fmt"
+ "io"
+ "log"
+ "time"
+
+ "github.com/chrislusf/seaweedfs/weed/operation"
+ "github.com/chrislusf/seaweedfs/weed/pb/volume_server_pb"
+ "github.com/chrislusf/seaweedfs/weed/storage/needle"
+ "google.golang.org/grpc"
+)
+
+func init() {
+ Commands = append(Commands, &commandVolumeMove{})
+}
+
+type commandVolumeMove struct {
+}
+
+func (c *commandVolumeMove) Name() string {
+ return "volume.move"
+}
+
+func (c *commandVolumeMove) Help() string {
+ return `move a live volume from one volume server to another volume server
+
+ volume.move
+
+ This command move a live volume from one volume server to another volume server. Here are the steps:
+
+ 1. This command asks the target volume server to copy the source volume from source volume server, remember the last entry's timestamp.
+ 2. This command asks the target volume server to mount the new volume
+ Now the master will mark this volume id as readonly.
+ 3. This command asks the target volume server to tail the source volume for updates after the timestamp, for 1 minutes to drain the requests.
+ 4. This command asks the source volume server to unmount the source volume
+ Now the master will mark this volume id as writable.
+ 5. This command asks the source volume server to delete the source volume
+
+`
+}
+
+func (c *commandVolumeMove) Do(args []string, commandEnv *CommandEnv, writer io.Writer) (err error) {
+
+ if err = commandEnv.confirmIsLocked(); err != nil {
+ return
+ }
+
+ if len(args) != 3 {
+ fmt.Fprintf(writer, "received args: %+v\n", args)
+ return fmt.Errorf("need 3 args of ")
+ }
+ sourceVolumeServer, targetVolumeServer, volumeIdString := args[0], args[1], args[2]
+
+ volumeId, err := needle.NewVolumeId(volumeIdString)
+ if err != nil {
+ return fmt.Errorf("wrong volume id format %s: %v", volumeId, err)
+ }
+
+ if sourceVolumeServer == targetVolumeServer {
+ return fmt.Errorf("source and target volume servers are the same!")
+ }
+
+ return LiveMoveVolume(commandEnv.option.GrpcDialOption, volumeId, sourceVolumeServer, targetVolumeServer, 5*time.Second)
+}
+
+// LiveMoveVolume moves one volume from one source volume server to one target volume server, with idleTimeout to drain the incoming requests.
+func LiveMoveVolume(grpcDialOption grpc.DialOption, volumeId needle.VolumeId, sourceVolumeServer, targetVolumeServer string, idleTimeout time.Duration) (err error) {
+
+ log.Printf("copying volume %d from %s to %s", volumeId, sourceVolumeServer, targetVolumeServer)
+ lastAppendAtNs, err := copyVolume(grpcDialOption, volumeId, sourceVolumeServer, targetVolumeServer)
+ if err != nil {
+ return fmt.Errorf("copy volume %d from %s to %s: %v", volumeId, sourceVolumeServer, targetVolumeServer, err)
+ }
+
+ log.Printf("tailing volume %d from %s to %s", volumeId, sourceVolumeServer, targetVolumeServer)
+ if err = tailVolume(grpcDialOption, volumeId, sourceVolumeServer, targetVolumeServer, lastAppendAtNs, idleTimeout); err != nil {
+ return fmt.Errorf("tail volume %d from %s to %s: %v", volumeId, sourceVolumeServer, targetVolumeServer, err)
+ }
+
+ log.Printf("deleting volume %d from %s", volumeId, sourceVolumeServer)
+ if err = deleteVolume(grpcDialOption, volumeId, sourceVolumeServer); err != nil {
+ return fmt.Errorf("delete volume %d from %s: %v", volumeId, sourceVolumeServer, err)
+ }
+
+ log.Printf("moved volume %d from %s to %s", volumeId, sourceVolumeServer, targetVolumeServer)
+ return nil
+}
+
+func copyVolume(grpcDialOption grpc.DialOption, volumeId needle.VolumeId, sourceVolumeServer, targetVolumeServer string) (lastAppendAtNs uint64, err error) {
+
+ err = operation.WithVolumeServerClient(targetVolumeServer, grpcDialOption, func(volumeServerClient volume_server_pb.VolumeServerClient) error {
+ resp, replicateErr := volumeServerClient.VolumeCopy(context.Background(), &volume_server_pb.VolumeCopyRequest{
+ VolumeId: uint32(volumeId),
+ SourceDataNode: sourceVolumeServer,
+ })
+ if replicateErr == nil {
+ lastAppendAtNs = resp.LastAppendAtNs
+ }
+ return replicateErr
+ })
+
+ return
+}
+
+func tailVolume(grpcDialOption grpc.DialOption, volumeId needle.VolumeId, sourceVolumeServer, targetVolumeServer string, lastAppendAtNs uint64, idleTimeout time.Duration) (err error) {
+
+ return operation.WithVolumeServerClient(targetVolumeServer, grpcDialOption, func(volumeServerClient volume_server_pb.VolumeServerClient) error {
+ _, replicateErr := volumeServerClient.VolumeTailReceiver(context.Background(), &volume_server_pb.VolumeTailReceiverRequest{
+ VolumeId: uint32(volumeId),
+ SinceNs: lastAppendAtNs,
+ IdleTimeoutSeconds: uint32(idleTimeout.Seconds()),
+ SourceVolumeServer: sourceVolumeServer,
+ })
+ return replicateErr
+ })
+
+}
+
+func deleteVolume(grpcDialOption grpc.DialOption, volumeId needle.VolumeId, sourceVolumeServer string) (err error) {
+ return operation.WithVolumeServerClient(sourceVolumeServer, grpcDialOption, func(volumeServerClient volume_server_pb.VolumeServerClient) error {
+ _, deleteErr := volumeServerClient.VolumeDelete(context.Background(), &volume_server_pb.VolumeDeleteRequest{
+ VolumeId: uint32(volumeId),
+ })
+ return deleteErr
+ })
+}
diff --git a/weed/shell/command_volume_tier_download.go b/weed/shell/command_volume_tier_download.go
new file mode 100644
index 000000000..d31c8c031
--- /dev/null
+++ b/weed/shell/command_volume_tier_download.go
@@ -0,0 +1,170 @@
+package shell
+
+import (
+ "context"
+ "flag"
+ "fmt"
+ "io"
+
+ "google.golang.org/grpc"
+
+ "github.com/chrislusf/seaweedfs/weed/operation"
+ "github.com/chrislusf/seaweedfs/weed/pb/master_pb"
+ "github.com/chrislusf/seaweedfs/weed/pb/volume_server_pb"
+ "github.com/chrislusf/seaweedfs/weed/storage/needle"
+)
+
+func init() {
+ Commands = append(Commands, &commandVolumeTierDownload{})
+}
+
+type commandVolumeTierDownload struct {
+}
+
+func (c *commandVolumeTierDownload) Name() string {
+ return "volume.tier.download"
+}
+
+func (c *commandVolumeTierDownload) Help() string {
+ return `download the dat file of a volume from a remote tier
+
+ volume.tier.download [-collection=""]
+ volume.tier.download [-collection=""] -volumeId=
+
+ e.g.:
+ volume.tier.download -volumeId=7
+ volume.tier.download -volumeId=7
+
+ This command will download the dat file of a volume from a remote tier to a volume server in local cluster.
+
+`
+}
+
+func (c *commandVolumeTierDownload) Do(args []string, commandEnv *CommandEnv, writer io.Writer) (err error) {
+
+ if err = commandEnv.confirmIsLocked(); err != nil {
+ return
+ }
+
+ tierCommand := flag.NewFlagSet(c.Name(), flag.ContinueOnError)
+ volumeId := tierCommand.Int("volumeId", 0, "the volume id")
+ collection := tierCommand.String("collection", "", "the collection name")
+ if err = tierCommand.Parse(args); err != nil {
+ return nil
+ }
+
+ vid := needle.VolumeId(*volumeId)
+
+ // collect topology information
+ topologyInfo, err := collectTopologyInfo(commandEnv)
+ if err != nil {
+ return err
+ }
+
+ // volumeId is provided
+ if vid != 0 {
+ return doVolumeTierDownload(commandEnv, writer, *collection, vid)
+ }
+
+ // apply to all volumes in the collection
+ // reusing collectVolumeIdsForEcEncode for now
+ volumeIds := collectRemoteVolumes(topologyInfo, *collection)
+ if err != nil {
+ return err
+ }
+ fmt.Printf("tier download volumes: %v\n", volumeIds)
+ for _, vid := range volumeIds {
+ if err = doVolumeTierDownload(commandEnv, writer, *collection, vid); err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+func collectRemoteVolumes(topoInfo *master_pb.TopologyInfo, selectedCollection string) (vids []needle.VolumeId) {
+
+ vidMap := make(map[uint32]bool)
+ eachDataNode(topoInfo, func(dc string, rack RackId, dn *master_pb.DataNodeInfo) {
+ for _, v := range dn.VolumeInfos {
+ if v.Collection == selectedCollection && v.RemoteStorageKey != "" && v.RemoteStorageName != "" {
+ vidMap[v.Id] = true
+ }
+ }
+ })
+
+ for vid := range vidMap {
+ vids = append(vids, needle.VolumeId(vid))
+ }
+
+ return
+}
+
+func doVolumeTierDownload(commandEnv *CommandEnv, writer io.Writer, collection string, vid needle.VolumeId) (err error) {
+ // find volume location
+ locations, found := commandEnv.MasterClient.GetLocations(uint32(vid))
+ if !found {
+ return fmt.Errorf("volume %d not found", vid)
+ }
+
+ // TODO parallelize this
+ for _, loc := range locations {
+ // copy the .dat file from remote tier to local
+ err = downloadDatFromRemoteTier(commandEnv.option.GrpcDialOption, writer, needle.VolumeId(vid), collection, loc.Url)
+ if err != nil {
+ return fmt.Errorf("download dat file for volume %d to %s: %v", vid, loc.Url, err)
+ }
+ }
+
+ return nil
+}
+
+func downloadDatFromRemoteTier(grpcDialOption grpc.DialOption, writer io.Writer, volumeId needle.VolumeId, collection string, targetVolumeServer string) error {
+
+ err := operation.WithVolumeServerClient(targetVolumeServer, grpcDialOption, func(volumeServerClient volume_server_pb.VolumeServerClient) error {
+ stream, downloadErr := volumeServerClient.VolumeTierMoveDatFromRemote(context.Background(), &volume_server_pb.VolumeTierMoveDatFromRemoteRequest{
+ VolumeId: uint32(volumeId),
+ Collection: collection,
+ })
+
+ var lastProcessed int64
+ for {
+ resp, recvErr := stream.Recv()
+ if recvErr != nil {
+ if recvErr == io.EOF {
+ break
+ } else {
+ return recvErr
+ }
+ }
+
+ processingSpeed := float64(resp.Processed-lastProcessed) / 1024.0 / 1024.0
+
+ fmt.Fprintf(writer, "downloaded %.2f%%, %d bytes, %.2fMB/s\n", resp.ProcessedPercentage, resp.Processed, processingSpeed)
+
+ lastProcessed = resp.Processed
+ }
+ if downloadErr != nil {
+ return downloadErr
+ }
+
+ _, unmountErr := volumeServerClient.VolumeUnmount(context.Background(), &volume_server_pb.VolumeUnmountRequest{
+ VolumeId: uint32(volumeId),
+ })
+ if unmountErr != nil {
+ return unmountErr
+ }
+
+ _, mountErr := volumeServerClient.VolumeMount(context.Background(), &volume_server_pb.VolumeMountRequest{
+ VolumeId: uint32(volumeId),
+ })
+ if mountErr != nil {
+ return mountErr
+ }
+
+ return nil
+ })
+
+ return err
+
+}
diff --git a/weed/shell/command_volume_tier_upload.go b/weed/shell/command_volume_tier_upload.go
new file mode 100644
index 000000000..f92cdc3e4
--- /dev/null
+++ b/weed/shell/command_volume_tier_upload.go
@@ -0,0 +1,151 @@
+package shell
+
+import (
+ "context"
+ "flag"
+ "fmt"
+ "io"
+ "time"
+
+ "google.golang.org/grpc"
+
+ "github.com/chrislusf/seaweedfs/weed/operation"
+ "github.com/chrislusf/seaweedfs/weed/pb/volume_server_pb"
+ "github.com/chrislusf/seaweedfs/weed/storage/needle"
+)
+
+func init() {
+ Commands = append(Commands, &commandVolumeTierUpload{})
+}
+
+type commandVolumeTierUpload struct {
+}
+
+func (c *commandVolumeTierUpload) Name() string {
+ return "volume.tier.upload"
+}
+
+func (c *commandVolumeTierUpload) Help() string {
+ return `upload the dat file of a volume to a remote tier
+
+ volume.tier.upload [-collection=""] [-fullPercent=95] [-quietFor=1h]
+ volume.tier.upload [-collection=""] -volumeId= -dest= [-keepLocalDatFile]
+
+ e.g.:
+ volume.tier.upload -volumeId=7 -dest=s3
+ volume.tier.upload -volumeId=7 -dest=s3.default
+
+ The is defined in master.toml.
+ For example, "s3.default" in [storage.backend.s3.default]
+
+ This command will move the dat file of a volume to a remote tier.
+
+ SeaweedFS enables scalable and fast local access to lots of files,
+ and the cloud storage is slower by cost efficient. How to combine them together?
+
+ Usually the data follows 80/20 rule: only 20% of data is frequently accessed.
+ We can offload the old volumes to the cloud.
+
+ With this, SeaweedFS can be both fast and scalable, and infinite storage space.
+ Just add more local SeaweedFS volume servers to increase the throughput.
+
+ The index file is still local, and the same O(1) disk read is applied to the remote file.
+
+`
+}
+
+func (c *commandVolumeTierUpload) Do(args []string, commandEnv *CommandEnv, writer io.Writer) (err error) {
+
+ if err = commandEnv.confirmIsLocked(); err != nil {
+ return
+ }
+
+ tierCommand := flag.NewFlagSet(c.Name(), flag.ContinueOnError)
+ volumeId := tierCommand.Int("volumeId", 0, "the volume id")
+ collection := tierCommand.String("collection", "", "the collection name")
+ fullPercentage := tierCommand.Float64("fullPercent", 95, "the volume reaches the percentage of max volume size")
+ quietPeriod := tierCommand.Duration("quietFor", 24*time.Hour, "select volumes without no writes for this period")
+ dest := tierCommand.String("dest", "", "the target tier name")
+ keepLocalDatFile := tierCommand.Bool("keepLocalDatFile", false, "whether keep local dat file")
+ if err = tierCommand.Parse(args); err != nil {
+ return nil
+ }
+
+ vid := needle.VolumeId(*volumeId)
+
+ // volumeId is provided
+ if vid != 0 {
+ return doVolumeTierUpload(commandEnv, writer, *collection, vid, *dest, *keepLocalDatFile)
+ }
+
+ // apply to all volumes in the collection
+ // reusing collectVolumeIdsForEcEncode for now
+ volumeIds, err := collectVolumeIdsForEcEncode(commandEnv, *collection, *fullPercentage, *quietPeriod)
+ if err != nil {
+ return err
+ }
+ fmt.Printf("tier upload volumes: %v\n", volumeIds)
+ for _, vid := range volumeIds {
+ if err = doVolumeTierUpload(commandEnv, writer, *collection, vid, *dest, *keepLocalDatFile); err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+func doVolumeTierUpload(commandEnv *CommandEnv, writer io.Writer, collection string, vid needle.VolumeId, dest string, keepLocalDatFile bool) (err error) {
+ // find volume location
+ locations, found := commandEnv.MasterClient.GetLocations(uint32(vid))
+ if !found {
+ return fmt.Errorf("volume %d not found", vid)
+ }
+
+ err = markVolumeReadonly(commandEnv.option.GrpcDialOption, needle.VolumeId(vid), locations)
+ if err != nil {
+ return fmt.Errorf("mark volume %d as readonly on %s: %v", vid, locations[0].Url, err)
+ }
+
+ // copy the .dat file to remote tier
+ err = uploadDatToRemoteTier(commandEnv.option.GrpcDialOption, writer, needle.VolumeId(vid), collection, locations[0].Url, dest, keepLocalDatFile)
+ if err != nil {
+ return fmt.Errorf("copy dat file for volume %d on %s to %s: %v", vid, locations[0].Url, dest, err)
+ }
+
+ return nil
+}
+
+func uploadDatToRemoteTier(grpcDialOption grpc.DialOption, writer io.Writer, volumeId needle.VolumeId, collection string, sourceVolumeServer string, dest string, keepLocalDatFile bool) error {
+
+ err := operation.WithVolumeServerClient(sourceVolumeServer, grpcDialOption, func(volumeServerClient volume_server_pb.VolumeServerClient) error {
+ stream, copyErr := volumeServerClient.VolumeTierMoveDatToRemote(context.Background(), &volume_server_pb.VolumeTierMoveDatToRemoteRequest{
+ VolumeId: uint32(volumeId),
+ Collection: collection,
+ DestinationBackendName: dest,
+ KeepLocalDatFile: keepLocalDatFile,
+ })
+
+ var lastProcessed int64
+ for {
+ resp, recvErr := stream.Recv()
+ if recvErr != nil {
+ if recvErr == io.EOF {
+ break
+ } else {
+ return recvErr
+ }
+ }
+
+ processingSpeed := float64(resp.Processed-lastProcessed) / 1024.0 / 1024.0
+
+ fmt.Fprintf(writer, "copied %.2f%%, %d bytes, %.2fMB/s\n", resp.ProcessedPercentage, resp.Processed, processingSpeed)
+
+ lastProcessed = resp.Processed
+ }
+
+ return copyErr
+ })
+
+ return err
+
+}
diff --git a/weed/shell/command_volume_unmount.go b/weed/shell/command_volume_unmount.go
new file mode 100644
index 000000000..7596bb4c8
--- /dev/null
+++ b/weed/shell/command_volume_unmount.go
@@ -0,0 +1,63 @@
+package shell
+
+import (
+ "context"
+ "fmt"
+ "io"
+
+ "github.com/chrislusf/seaweedfs/weed/operation"
+ "github.com/chrislusf/seaweedfs/weed/pb/volume_server_pb"
+ "github.com/chrislusf/seaweedfs/weed/storage/needle"
+ "google.golang.org/grpc"
+)
+
+func init() {
+ Commands = append(Commands, &commandVolumeUnmount{})
+}
+
+type commandVolumeUnmount struct {
+}
+
+func (c *commandVolumeUnmount) Name() string {
+ return "volume.unmount"
+}
+
+func (c *commandVolumeUnmount) Help() string {
+ return `unmount a volume from one volume server
+
+ volume.unmount
+
+ This command unmounts a volume from one volume server.
+
+`
+}
+
+func (c *commandVolumeUnmount) Do(args []string, commandEnv *CommandEnv, writer io.Writer) (err error) {
+
+ if err = commandEnv.confirmIsLocked(); err != nil {
+ return
+ }
+
+ if len(args) != 2 {
+ fmt.Fprintf(writer, "received args: %+v\n", args)
+ return fmt.Errorf("need 2 args of ")
+ }
+ sourceVolumeServer, volumeIdString := args[0], args[1]
+
+ volumeId, err := needle.NewVolumeId(volumeIdString)
+ if err != nil {
+ return fmt.Errorf("wrong volume id format %s: %v", volumeId, err)
+ }
+
+ return unmountVolume(commandEnv.option.GrpcDialOption, volumeId, sourceVolumeServer)
+
+}
+
+func unmountVolume(grpcDialOption grpc.DialOption, volumeId needle.VolumeId, sourceVolumeServer string) (err error) {
+ return operation.WithVolumeServerClient(sourceVolumeServer, grpcDialOption, func(volumeServerClient volume_server_pb.VolumeServerClient) error {
+ _, unmountErr := volumeServerClient.VolumeUnmount(context.Background(), &volume_server_pb.VolumeUnmountRequest{
+ VolumeId: uint32(volumeId),
+ })
+ return unmountErr
+ })
+}
diff --git a/weed/shell/commands.go b/weed/shell/commands.go
new file mode 100644
index 000000000..f61ed9f82
--- /dev/null
+++ b/weed/shell/commands.go
@@ -0,0 +1,137 @@
+package shell
+
+import (
+ "fmt"
+ "io"
+ "net/url"
+ "strconv"
+ "strings"
+
+ "google.golang.org/grpc"
+
+ "github.com/chrislusf/seaweedfs/weed/pb"
+ "github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
+ "github.com/chrislusf/seaweedfs/weed/util"
+ "github.com/chrislusf/seaweedfs/weed/wdclient"
+ "github.com/chrislusf/seaweedfs/weed/wdclient/exclusive_locks"
+)
+
+type ShellOptions struct {
+ Masters *string
+ GrpcDialOption grpc.DialOption
+ // shell transient context
+ FilerHost string
+ FilerPort int64
+ Directory string
+}
+
+type CommandEnv struct {
+ env map[string]string
+ MasterClient *wdclient.MasterClient
+ option ShellOptions
+ locker *exclusive_locks.ExclusiveLocker
+}
+
+type command interface {
+ Name() string
+ Help() string
+ Do([]string, *CommandEnv, io.Writer) error
+}
+
+var (
+ Commands = []command{}
+)
+
+func NewCommandEnv(options ShellOptions) *CommandEnv {
+ ce := &CommandEnv{
+ env: make(map[string]string),
+ MasterClient: wdclient.NewMasterClient(options.GrpcDialOption, pb.AdminShellClient, "", 0, strings.Split(*options.Masters, ",")),
+ option: options,
+ }
+ ce.locker = exclusive_locks.NewExclusiveLocker(ce.MasterClient)
+ return ce
+}
+
+func (ce *CommandEnv) parseUrl(input string) (path string, err error) {
+ if strings.HasPrefix(input, "http") {
+ err = fmt.Errorf("http://: prefix is not supported any more")
+ return
+ }
+ if !strings.HasPrefix(input, "/") {
+ input = util.Join(ce.option.Directory, input)
+ }
+ return input, err
+}
+
+func (ce *CommandEnv) isDirectory(path string) bool {
+
+ return ce.checkDirectory(path) == nil
+
+}
+
+func (ce *CommandEnv) confirmIsLocked() error {
+
+ if ce.locker.IsLocking() {
+ return nil
+ }
+
+ return fmt.Errorf("need to lock to continue")
+
+}
+
+func (ce *CommandEnv) checkDirectory(path string) error {
+
+ dir, name := util.FullPath(path).DirAndName()
+
+ exists, err := filer_pb.Exists(ce, dir, name, true)
+
+ if !exists {
+ return fmt.Errorf("%s is not a directory", path)
+ }
+
+ return err
+
+}
+
+var _ = filer_pb.FilerClient(&CommandEnv{})
+
+func (ce *CommandEnv) WithFilerClient(fn func(filer_pb.SeaweedFilerClient) error) error {
+
+ filerGrpcAddress := fmt.Sprintf("%s:%d", ce.option.FilerHost, ce.option.FilerPort+10000)
+ return pb.WithGrpcFilerClient(filerGrpcAddress, ce.option.GrpcDialOption, fn)
+
+}
+
+func (ce *CommandEnv) AdjustedUrl(hostAndPort string) string {
+ return hostAndPort
+}
+
+func parseFilerUrl(entryPath string) (filerServer string, filerPort int64, path string, err error) {
+ if strings.HasPrefix(entryPath, "http") {
+ var u *url.URL
+ u, err = url.Parse(entryPath)
+ if err != nil {
+ return
+ }
+ filerServer = u.Hostname()
+ portString := u.Port()
+ if portString != "" {
+ filerPort, err = strconv.ParseInt(portString, 10, 32)
+ }
+ path = u.Path
+ } else {
+ err = fmt.Errorf("path should have full url /path/to/dirOrFile : %s", entryPath)
+ }
+ return
+}
+
+func findInputDirectory(args []string) (input string) {
+ input = "."
+ if len(args) > 0 {
+ input = args[len(args)-1]
+ if strings.HasPrefix(input, "-") {
+ input = "."
+ }
+ }
+ return input
+}
diff --git a/weed/shell/shell_liner.go b/weed/shell/shell_liner.go
new file mode 100644
index 000000000..4632a1fb0
--- /dev/null
+++ b/weed/shell/shell_liner.go
@@ -0,0 +1,154 @@
+package shell
+
+import (
+ "fmt"
+ "io"
+ "os"
+ "path"
+ "regexp"
+ "sort"
+ "strings"
+
+ "github.com/peterh/liner"
+)
+
+var (
+ line *liner.State
+ historyPath = path.Join(os.TempDir(), "weed-shell")
+)
+
+func RunShell(options ShellOptions) {
+
+ line = liner.NewLiner()
+ defer line.Close()
+
+ line.SetCtrlCAborts(true)
+
+ setCompletionHandler()
+ loadHistory()
+
+ defer saveHistory()
+
+ reg, _ := regexp.Compile(`'.*?'|".*?"|\S+`)
+
+ commandEnv := NewCommandEnv(options)
+
+ go commandEnv.MasterClient.KeepConnectedToMaster()
+ commandEnv.MasterClient.WaitUntilConnected()
+
+ for {
+ cmd, err := line.Prompt("> ")
+ if err != nil {
+ if err != io.EOF {
+ fmt.Printf("%v\n", err)
+ }
+ return
+ }
+
+ for _, c := range strings.Split(cmd, ";") {
+ if processEachCmd(reg, c, commandEnv) {
+ return
+ }
+ }
+ }
+}
+
+func processEachCmd(reg *regexp.Regexp, cmd string, commandEnv *CommandEnv) bool {
+ cmds := reg.FindAllString(cmd, -1)
+ if len(cmds) == 0 {
+ return false
+ } else {
+ line.AppendHistory(cmd)
+
+ args := make([]string, len(cmds[1:]))
+
+ for i := range args {
+ args[i] = strings.Trim(string(cmds[1+i]), "\"'")
+ }
+
+ cmd := strings.ToLower(cmds[0])
+ if cmd == "help" || cmd == "?" {
+ printHelp(cmds)
+ } else if cmd == "exit" || cmd == "quit" {
+ return true
+ } else {
+ foundCommand := false
+ for _, c := range Commands {
+ if c.Name() == cmd || c.Name() == "fs."+cmd {
+ if err := c.Do(args, commandEnv, os.Stdout); err != nil {
+ fmt.Fprintf(os.Stderr, "error: %v\n", err)
+ }
+ foundCommand = true
+ }
+ }
+ if !foundCommand {
+ fmt.Fprintf(os.Stderr, "unknown command: %v\n", cmd)
+ }
+ }
+
+ }
+ return false
+}
+
+func printGenericHelp() {
+ msg :=
+ `Type: "help " for help on
+`
+ fmt.Print(msg)
+
+ sort.Slice(Commands, func(i, j int) bool {
+ return strings.Compare(Commands[i].Name(), Commands[j].Name()) < 0
+ })
+ for _, c := range Commands {
+ helpTexts := strings.SplitN(c.Help(), "\n", 2)
+ fmt.Printf(" %-30s\t# %s \n", c.Name(), helpTexts[0])
+ }
+}
+
+func printHelp(cmds []string) {
+ args := cmds[1:]
+ if len(args) == 0 {
+ printGenericHelp()
+ } else if len(args) > 1 {
+ fmt.Println()
+ } else {
+ cmd := strings.ToLower(args[0])
+
+ sort.Slice(Commands, func(i, j int) bool {
+ return strings.Compare(Commands[i].Name(), Commands[j].Name()) < 0
+ })
+
+ for _, c := range Commands {
+ if c.Name() == cmd {
+ fmt.Printf(" %s\t# %s\n", c.Name(), c.Help())
+ }
+ }
+ }
+}
+
+func setCompletionHandler() {
+ line.SetCompleter(func(line string) (c []string) {
+ for _, i := range Commands {
+ if strings.HasPrefix(i.Name(), strings.ToLower(line)) {
+ c = append(c, i.Name())
+ }
+ }
+ return
+ })
+}
+
+func loadHistory() {
+ if f, err := os.Open(historyPath); err == nil {
+ line.ReadHistory(f)
+ f.Close()
+ }
+}
+
+func saveHistory() {
+ if f, err := os.Create(historyPath); err != nil {
+ fmt.Printf("Error writing history file: %v\n", err)
+ } else {
+ line.WriteHistory(f)
+ f.Close()
+ }
+}
diff --git a/weed/stats/disk.go b/weed/stats/disk.go
index e9d8baedd..8af7240a8 100644
--- a/weed/stats/disk.go
+++ b/weed/stats/disk.go
@@ -1,9 +1,13 @@
package stats
-import "github.com/chrislusf/seaweedfs/weed/pb/volume_server_pb"
+import (
+ "github.com/chrislusf/seaweedfs/weed/glog"
+ "github.com/chrislusf/seaweedfs/weed/pb/volume_server_pb"
+)
func NewDiskStatus(path string) (disk *volume_server_pb.DiskStatus) {
disk = &volume_server_pb.DiskStatus{Dir: path}
fillInDiskStatus(disk)
+ glog.V(2).Infof("read disk size: %v", disk)
return
}
diff --git a/weed/stats/disk_notsupported.go b/weed/stats/disk_notsupported.go
index ace662f6a..3d99e6ce7 100644
--- a/weed/stats/disk_notsupported.go
+++ b/weed/stats/disk_notsupported.go
@@ -1,4 +1,4 @@
-// +build windows openbsd netbsd plan9 solaris
+// +build openbsd netbsd plan9 solaris
package stats
diff --git a/weed/stats/disk_supported.go b/weed/stats/disk_supported.go
index 0537828b0..dff580b5b 100644
--- a/weed/stats/disk_supported.go
+++ b/weed/stats/disk_supported.go
@@ -17,5 +17,7 @@ func fillInDiskStatus(disk *volume_server_pb.DiskStatus) {
disk.All = fs.Blocks * uint64(fs.Bsize)
disk.Free = fs.Bfree * uint64(fs.Bsize)
disk.Used = disk.All - disk.Free
+ disk.PercentFree = float32((float64(disk.Free) / float64(disk.All)) * 100)
+ disk.PercentUsed = float32((float64(disk.Used) / float64(disk.All)) * 100)
return
}
diff --git a/weed/stats/disk_windows.go b/weed/stats/disk_windows.go
new file mode 100644
index 000000000..3cfa52c0b
--- /dev/null
+++ b/weed/stats/disk_windows.go
@@ -0,0 +1,47 @@
+package stats
+
+import (
+ "github.com/chrislusf/seaweedfs/weed/pb/volume_server_pb"
+ "golang.org/x/sys/windows"
+ "syscall"
+ "unsafe"
+)
+
+var (
+ kernel32 = windows.NewLazySystemDLL("Kernel32.dll")
+ getDiskFreeSpaceEx = kernel32.NewProc("GetDiskFreeSpaceExW")
+)
+
+func fillInDiskStatus(disk *volume_server_pb.DiskStatus) {
+
+ ptr, err := syscall.UTF16PtrFromString(disk.Dir)
+
+ if err != nil {
+ return
+ }
+ var _temp uint64
+ /* #nosec */
+ r, _, e := syscall.Syscall6(
+ getDiskFreeSpaceEx.Addr(),
+ 4,
+ uintptr(unsafe.Pointer(ptr)),
+ uintptr(unsafe.Pointer(&disk.Free)),
+ uintptr(unsafe.Pointer(&disk.All)),
+ uintptr(unsafe.Pointer(&_temp)),
+ 0,
+ 0,
+ )
+
+ if r == 0 {
+ if e != 0 {
+ return
+ }
+
+ return
+ }
+ disk.Used = disk.All - disk.Free
+ disk.PercentFree = float32((float64(disk.Free) / float64(disk.All)) * 100)
+ disk.PercentUsed = float32((float64(disk.Used) / float64(disk.All)) * 100)
+
+ return
+}
diff --git a/weed/stats/metrics.go b/weed/stats/metrics.go
new file mode 100644
index 000000000..7ff09a388
--- /dev/null
+++ b/weed/stats/metrics.go
@@ -0,0 +1,147 @@
+package stats
+
+import (
+ "fmt"
+ "os"
+ "strings"
+ "time"
+
+ "github.com/prometheus/client_golang/prometheus"
+ "github.com/prometheus/client_golang/prometheus/push"
+
+ "github.com/chrislusf/seaweedfs/weed/glog"
+)
+
+var (
+ FilerGather = prometheus.NewRegistry()
+ VolumeServerGather = prometheus.NewRegistry()
+
+ FilerRequestCounter = prometheus.NewCounterVec(
+ prometheus.CounterOpts{
+ Namespace: "SeaweedFS",
+ Subsystem: "filer",
+ Name: "request_total",
+ Help: "Counter of filer requests.",
+ }, []string{"type"})
+
+ FilerRequestHistogram = prometheus.NewHistogramVec(
+ prometheus.HistogramOpts{
+ Namespace: "SeaweedFS",
+ Subsystem: "filer",
+ Name: "request_seconds",
+ Help: "Bucketed histogram of filer request processing time.",
+ Buckets: prometheus.ExponentialBuckets(0.0001, 2, 24),
+ }, []string{"type"})
+
+ FilerStoreCounter = prometheus.NewCounterVec(
+ prometheus.CounterOpts{
+ Namespace: "SeaweedFS",
+ Subsystem: "filerStore",
+ Name: "request_total",
+ Help: "Counter of filer store requests.",
+ }, []string{"store", "type"})
+
+ FilerStoreHistogram = prometheus.NewHistogramVec(
+ prometheus.HistogramOpts{
+ Namespace: "SeaweedFS",
+ Subsystem: "filerStore",
+ Name: "request_seconds",
+ Help: "Bucketed histogram of filer store request processing time.",
+ Buckets: prometheus.ExponentialBuckets(0.0001, 2, 24),
+ }, []string{"store", "type"})
+
+ VolumeServerRequestCounter = prometheus.NewCounterVec(
+ prometheus.CounterOpts{
+ Namespace: "SeaweedFS",
+ Subsystem: "volumeServer",
+ Name: "request_total",
+ Help: "Counter of volume server requests.",
+ }, []string{"type"})
+
+ VolumeServerRequestHistogram = prometheus.NewHistogramVec(
+ prometheus.HistogramOpts{
+ Namespace: "SeaweedFS",
+ Subsystem: "volumeServer",
+ Name: "request_seconds",
+ Help: "Bucketed histogram of volume server request processing time.",
+ Buckets: prometheus.ExponentialBuckets(0.0001, 2, 24),
+ }, []string{"type"})
+
+ VolumeServerVolumeCounter = prometheus.NewGaugeVec(
+ prometheus.GaugeOpts{
+ Namespace: "SeaweedFS",
+ Subsystem: "volumeServer",
+ Name: "volumes",
+ Help: "Number of volumes or shards.",
+ }, []string{"collection", "type"})
+
+ VolumeServerMaxVolumeCounter = prometheus.NewGauge(
+ prometheus.GaugeOpts{
+ Namespace: "SeaweedFS",
+ Subsystem: "volumeServer",
+ Name: "max_volumes",
+ Help: "Maximum number of volumes.",
+ })
+
+ VolumeServerDiskSizeGauge = prometheus.NewGaugeVec(
+ prometheus.GaugeOpts{
+ Namespace: "SeaweedFS",
+ Subsystem: "volumeServer",
+ Name: "total_disk_size",
+ Help: "Actual disk size used by volumes.",
+ }, []string{"collection", "type"})
+)
+
+func init() {
+
+ FilerGather.MustRegister(FilerRequestCounter)
+ FilerGather.MustRegister(FilerRequestHistogram)
+ FilerGather.MustRegister(FilerStoreCounter)
+ FilerGather.MustRegister(FilerStoreHistogram)
+ FilerGather.MustRegister(prometheus.NewGoCollector())
+
+ VolumeServerGather.MustRegister(VolumeServerRequestCounter)
+ VolumeServerGather.MustRegister(VolumeServerRequestHistogram)
+ VolumeServerGather.MustRegister(VolumeServerVolumeCounter)
+ VolumeServerGather.MustRegister(VolumeServerMaxVolumeCounter)
+ VolumeServerGather.MustRegister(VolumeServerDiskSizeGauge)
+
+}
+
+func LoopPushingMetric(name, instance string, gatherer *prometheus.Registry, fnGetMetricsDest func() (addr string, intervalSeconds int)) {
+
+ if fnGetMetricsDest == nil {
+ return
+ }
+
+ addr, intervalSeconds := fnGetMetricsDest()
+ pusher := push.New(addr, name).Gatherer(gatherer).Grouping("instance", instance)
+ currentAddr := addr
+
+ for {
+ if currentAddr != "" {
+ err := pusher.Push()
+ if err != nil && !strings.HasPrefix(err.Error(), "unexpected status code 200") {
+ glog.V(0).Infof("could not push metrics to prometheus push gateway %s: %v", addr, err)
+ }
+ }
+ if intervalSeconds <= 0 {
+ intervalSeconds = 15
+ }
+ time.Sleep(time.Duration(intervalSeconds) * time.Second)
+ addr, intervalSeconds = fnGetMetricsDest()
+ if currentAddr != addr {
+ pusher = push.New(addr, name).Gatherer(gatherer).Grouping("instance", instance)
+ currentAddr = addr
+ }
+
+ }
+}
+
+func SourceName(port uint32) string {
+ hostname, err := os.Hostname()
+ if err != nil {
+ return "unknown"
+ }
+ return fmt.Sprintf("%s:%d", hostname, port)
+}
diff --git a/weed/storage/backend/backend.go b/weed/storage/backend/backend.go
new file mode 100644
index 000000000..daab29621
--- /dev/null
+++ b/weed/storage/backend/backend.go
@@ -0,0 +1,136 @@
+package backend
+
+import (
+ "io"
+ "os"
+ "strings"
+ "time"
+
+ "github.com/chrislusf/seaweedfs/weed/glog"
+ "github.com/chrislusf/seaweedfs/weed/pb/master_pb"
+ "github.com/chrislusf/seaweedfs/weed/pb/volume_server_pb"
+ "github.com/spf13/viper"
+)
+
+type BackendStorageFile interface {
+ io.ReaderAt
+ io.WriterAt
+ Truncate(off int64) error
+ io.Closer
+ GetStat() (datSize int64, modTime time.Time, err error)
+ Name() string
+ Sync() error
+}
+
+type BackendStorage interface {
+ ToProperties() map[string]string
+ NewStorageFile(key string, tierInfo *volume_server_pb.VolumeInfo) BackendStorageFile
+ CopyFile(f *os.File, attributes map[string]string, fn func(progressed int64, percentage float32) error) (key string, size int64, err error)
+ DownloadFile(fileName string, key string, fn func(progressed int64, percentage float32) error) (size int64, err error)
+ DeleteFile(key string) (err error)
+}
+
+type StringProperties interface {
+ GetString(key string) string
+}
+type StorageType string
+type BackendStorageFactory interface {
+ StorageType() StorageType
+ BuildStorage(configuration StringProperties, configPrefix string, id string) (BackendStorage, error)
+}
+
+var (
+ BackendStorageFactories = make(map[StorageType]BackendStorageFactory)
+ BackendStorages = make(map[string]BackendStorage)
+)
+
+// used by master to load remote storage configurations
+func LoadConfiguration(config *viper.Viper) {
+
+ StorageBackendPrefix := "storage.backend"
+
+ for backendTypeName := range config.GetStringMap(StorageBackendPrefix) {
+ backendStorageFactory, found := BackendStorageFactories[StorageType(backendTypeName)]
+ if !found {
+ glog.Fatalf("backend storage type %s not found", backendTypeName)
+ }
+ for backendStorageId := range config.GetStringMap(StorageBackendPrefix + "." + backendTypeName) {
+ if !config.GetBool(StorageBackendPrefix + "." + backendTypeName + "." + backendStorageId + ".enabled") {
+ continue
+ }
+ backendStorage, buildErr := backendStorageFactory.BuildStorage(config,
+ StorageBackendPrefix+"."+backendTypeName+"."+backendStorageId+".", backendStorageId)
+ if buildErr != nil {
+ glog.Fatalf("fail to create backend storage %s.%s", backendTypeName, backendStorageId)
+ }
+ BackendStorages[backendTypeName+"."+backendStorageId] = backendStorage
+ if backendStorageId == "default" {
+ BackendStorages[backendTypeName] = backendStorage
+ }
+ }
+ }
+
+}
+
+// used by volume server to receive remote storage configurations from master
+func LoadFromPbStorageBackends(storageBackends []*master_pb.StorageBackend) {
+
+ for _, storageBackend := range storageBackends {
+ backendStorageFactory, found := BackendStorageFactories[StorageType(storageBackend.Type)]
+ if !found {
+ glog.Warningf("storage type %s not found", storageBackend.Type)
+ continue
+ }
+ backendStorage, buildErr := backendStorageFactory.BuildStorage(newProperties(storageBackend.Properties), "", storageBackend.Id)
+ if buildErr != nil {
+ glog.Fatalf("fail to create backend storage %s.%s", storageBackend.Type, storageBackend.Id)
+ }
+ BackendStorages[storageBackend.Type+"."+storageBackend.Id] = backendStorage
+ if storageBackend.Id == "default" {
+ BackendStorages[storageBackend.Type] = backendStorage
+ }
+ }
+}
+
+type Properties struct {
+ m map[string]string
+}
+
+func newProperties(m map[string]string) *Properties {
+ return &Properties{m: m}
+}
+
+func (p *Properties) GetString(key string) string {
+ if v, found := p.m[key]; found {
+ return v
+ }
+ return ""
+}
+
+func ToPbStorageBackends() (backends []*master_pb.StorageBackend) {
+ for sName, s := range BackendStorages {
+ sType, sId := BackendNameToTypeId(sName)
+ if sType == "" {
+ continue
+ }
+ backends = append(backends, &master_pb.StorageBackend{
+ Type: sType,
+ Id: sId,
+ Properties: s.ToProperties(),
+ })
+ }
+ return
+}
+
+func BackendNameToTypeId(backendName string) (backendType, backendId string) {
+ parts := strings.Split(backendName, ".")
+ if len(parts) == 1 {
+ return backendName, "default"
+ }
+ if len(parts) != 2 {
+ return
+ }
+
+ backendType, backendId = parts[0], parts[1]
+ return
+}
diff --git a/weed/storage/backend/disk_file.go b/weed/storage/backend/disk_file.go
new file mode 100644
index 000000000..2b04c8df2
--- /dev/null
+++ b/weed/storage/backend/disk_file.go
@@ -0,0 +1,54 @@
+package backend
+
+import (
+ "os"
+ "time"
+)
+
+var (
+ _ BackendStorageFile = &DiskFile{}
+)
+
+type DiskFile struct {
+ File *os.File
+ fullFilePath string
+}
+
+func NewDiskFile(f *os.File) *DiskFile {
+ return &DiskFile{
+ fullFilePath: f.Name(),
+ File: f,
+ }
+}
+
+func (df *DiskFile) ReadAt(p []byte, off int64) (n int, err error) {
+ return df.File.ReadAt(p, off)
+}
+
+func (df *DiskFile) WriteAt(p []byte, off int64) (n int, err error) {
+ return df.File.WriteAt(p, off)
+}
+
+func (df *DiskFile) Truncate(off int64) error {
+ return df.File.Truncate(off)
+}
+
+func (df *DiskFile) Close() error {
+ return df.File.Close()
+}
+
+func (df *DiskFile) GetStat() (datSize int64, modTime time.Time, err error) {
+ stat, e := df.File.Stat()
+ if e == nil {
+ return stat.Size(), stat.ModTime(), nil
+ }
+ return 0, time.Time{}, err
+}
+
+func (df *DiskFile) Name() string {
+ return df.fullFilePath
+}
+
+func (df *DiskFile) Sync() error {
+ return df.File.Sync()
+}
diff --git a/weed/storage/backend/memory_map/memory_map.go b/weed/storage/backend/memory_map/memory_map.go
new file mode 100644
index 000000000..5dc7ba33d
--- /dev/null
+++ b/weed/storage/backend/memory_map/memory_map.go
@@ -0,0 +1,30 @@
+package memory_map
+
+import (
+ "os"
+ "strconv"
+)
+
+type MemoryBuffer struct {
+ aligned_length uint64
+ length uint64
+ aligned_ptr uintptr
+ ptr uintptr
+ Buffer []byte
+}
+
+type MemoryMap struct {
+ File *os.File
+ file_memory_map_handle uintptr
+ write_map_views []MemoryBuffer
+ max_length uint64
+ End_of_file int64
+}
+
+func ReadMemoryMapMaxSizeMb(memoryMapMaxSizeMbString string) (uint32, error) {
+ if memoryMapMaxSizeMbString == "" {
+ return 0, nil
+ }
+ memoryMapMaxSize64, err := strconv.ParseUint(memoryMapMaxSizeMbString, 10, 32)
+ return uint32(memoryMapMaxSize64), err
+}
diff --git a/weed/storage/backend/memory_map/memory_map_backend.go b/weed/storage/backend/memory_map/memory_map_backend.go
new file mode 100644
index 000000000..8ff03d9af
--- /dev/null
+++ b/weed/storage/backend/memory_map/memory_map_backend.go
@@ -0,0 +1,62 @@
+package memory_map
+
+import (
+ "os"
+ "time"
+)
+
+var (
+// _ backend.BackendStorageFile = &MemoryMappedFile{} // remove this to break import cycle
+)
+
+type MemoryMappedFile struct {
+ mm *MemoryMap
+}
+
+func NewMemoryMappedFile(f *os.File, memoryMapSizeMB uint32) *MemoryMappedFile {
+ mmf := &MemoryMappedFile{
+ mm: new(MemoryMap),
+ }
+ mmf.mm.CreateMemoryMap(f, 1024*1024*uint64(memoryMapSizeMB))
+ return mmf
+}
+
+func (mmf *MemoryMappedFile) ReadAt(p []byte, off int64) (n int, err error) {
+ readBytes, e := mmf.mm.ReadMemory(uint64(off), uint64(len(p)))
+ if e != nil {
+ return 0, e
+ }
+ // TODO avoid the extra copy
+ copy(p, readBytes)
+ return len(readBytes), nil
+}
+
+func (mmf *MemoryMappedFile) WriteAt(p []byte, off int64) (n int, err error) {
+ mmf.mm.WriteMemory(uint64(off), uint64(len(p)), p)
+ return len(p), nil
+}
+
+func (mmf *MemoryMappedFile) Truncate(off int64) error {
+ return nil
+}
+
+func (mmf *MemoryMappedFile) Close() error {
+ mmf.mm.DeleteFileAndMemoryMap()
+ return nil
+}
+
+func (mmf *MemoryMappedFile) GetStat() (datSize int64, modTime time.Time, err error) {
+ stat, e := mmf.mm.File.Stat()
+ if e == nil {
+ return mmf.mm.End_of_file + 1, stat.ModTime(), nil
+ }
+ return 0, time.Time{}, err
+}
+
+func (mmf *MemoryMappedFile) Name() string {
+ return mmf.mm.File.Name()
+}
+
+func (mm *MemoryMappedFile) Sync() error {
+ return nil
+}
diff --git a/weed/storage/backend/memory_map/memory_map_other.go b/weed/storage/backend/memory_map/memory_map_other.go
new file mode 100644
index 000000000..e06a0b59a
--- /dev/null
+++ b/weed/storage/backend/memory_map/memory_map_other.go
@@ -0,0 +1,24 @@
+// +build !windows
+
+package memory_map
+
+import (
+ "fmt"
+ "os"
+)
+
+func (mMap *MemoryMap) CreateMemoryMap(file *os.File, maxLength uint64) {
+}
+
+func (mMap *MemoryMap) WriteMemory(offset uint64, length uint64, data []byte) {
+
+}
+
+func (mMap *MemoryMap) ReadMemory(offset uint64, length uint64) ([]byte, error) {
+ dataSlice := []byte{}
+ return dataSlice, fmt.Errorf("Memory Map not implemented for this platform")
+}
+
+func (mBuffer *MemoryMap) DeleteFileAndMemoryMap() {
+
+}
diff --git a/weed/storage/backend/memory_map/memory_map_test.go b/weed/storage/backend/memory_map/memory_map_test.go
new file mode 100644
index 000000000..33e1a828c
--- /dev/null
+++ b/weed/storage/backend/memory_map/memory_map_test.go
@@ -0,0 +1,10 @@
+package memory_map
+
+import "testing"
+
+func TestMemoryMapMaxSizeReadWrite(t *testing.T) {
+ memoryMapSize, _ := ReadMemoryMapMaxSizeMb("5000")
+ if memoryMapSize != 5000 {
+ t.Errorf("empty memoryMapSize:%v", memoryMapSize)
+ }
+}
diff --git a/weed/storage/backend/memory_map/memory_map_windows.go b/weed/storage/backend/memory_map/memory_map_windows.go
new file mode 100644
index 000000000..7eb713442
--- /dev/null
+++ b/weed/storage/backend/memory_map/memory_map_windows.go
@@ -0,0 +1,325 @@
+// +build windows
+
+package memory_map
+
+import (
+ "os"
+ "reflect"
+ "syscall"
+ "unsafe"
+
+ "golang.org/x/sys/windows"
+)
+
+type DWORDLONG = uint64
+type DWORD = uint32
+type WORD = uint16
+
+var (
+ modkernel32 = syscall.NewLazyDLL("kernel32.dll")
+
+ procGetSystemInfo = modkernel32.NewProc("GetSystemInfo")
+ procGlobalMemoryStatusEx = modkernel32.NewProc("GlobalMemoryStatusEx")
+ procGetProcessWorkingSetSize = modkernel32.NewProc("GetProcessWorkingSetSize")
+ procSetProcessWorkingSetSize = modkernel32.NewProc("SetProcessWorkingSetSize")
+)
+
+var currentProcess, _ = windows.GetCurrentProcess()
+var currentMinWorkingSet uint64 = 0
+var currentMaxWorkingSet uint64 = 0
+var _ = getProcessWorkingSetSize(uintptr(currentProcess), ¤tMinWorkingSet, ¤tMaxWorkingSet)
+
+var systemInfo, _ = getSystemInfo()
+var chunkSize = uint64(systemInfo.dwAllocationGranularity) * 128
+
+var memoryStatusEx, _ = globalMemoryStatusEx()
+var maxMemoryLimitBytes = uint64(float64(memoryStatusEx.ullTotalPhys) * 0.8)
+
+func (mMap *MemoryMap) CreateMemoryMap(file *os.File, maxLength uint64) {
+
+ chunks := (maxLength / chunkSize)
+ if chunks*chunkSize < maxLength {
+ chunks = chunks + 1
+ }
+
+ alignedMaxLength := chunks * chunkSize
+
+ maxLength_high := uint32(alignedMaxLength >> 32)
+ maxLength_low := uint32(alignedMaxLength & 0xFFFFFFFF)
+ file_memory_map_handle, err := windows.CreateFileMapping(windows.Handle(file.Fd()), nil, windows.PAGE_READWRITE, maxLength_high, maxLength_low, nil)
+
+ if err == nil {
+ mMap.File = file
+ mMap.file_memory_map_handle = uintptr(file_memory_map_handle)
+ mMap.write_map_views = make([]MemoryBuffer, 0, alignedMaxLength/chunkSize)
+ mMap.max_length = alignedMaxLength
+ mMap.End_of_file = -1
+ }
+}
+
+func (mMap *MemoryMap) DeleteFileAndMemoryMap() {
+ //First we close the file handles first to delete the file,
+ //Then we unmap the memory to ensure the unmapping process doesn't write the data to disk
+ windows.CloseHandle(windows.Handle(mMap.file_memory_map_handle))
+ windows.CloseHandle(windows.Handle(mMap.File.Fd()))
+
+ for _, view := range mMap.write_map_views {
+ view.releaseMemory()
+ }
+
+ mMap.write_map_views = nil
+ mMap.max_length = 0
+}
+
+func min(x, y uint64) uint64 {
+ if x < y {
+ return x
+ }
+ return y
+}
+
+func (mMap *MemoryMap) WriteMemory(offset uint64, length uint64, data []byte) {
+
+ for {
+ if ((offset+length)/chunkSize)+1 > uint64(len(mMap.write_map_views)) {
+ allocateChunk(mMap)
+ } else {
+ break
+ }
+ }
+
+ remaining_length := length
+ sliceIndex := offset / chunkSize
+ sliceOffset := offset - (sliceIndex * chunkSize)
+ dataOffset := uint64(0)
+
+ for {
+ writeEnd := min((remaining_length + sliceOffset), chunkSize)
+ copy(mMap.write_map_views[sliceIndex].Buffer[sliceOffset:writeEnd], data[dataOffset:])
+ remaining_length -= (writeEnd - sliceOffset)
+ dataOffset += (writeEnd - sliceOffset)
+
+ if remaining_length > 0 {
+ sliceIndex += 1
+ sliceOffset = 0
+ } else {
+ break
+ }
+ }
+
+ if mMap.End_of_file < int64(offset+length-1) {
+ mMap.End_of_file = int64(offset + length - 1)
+ }
+}
+
+func (mMap *MemoryMap) ReadMemory(offset uint64, length uint64) (dataSlice []byte, err error) {
+ dataSlice = make([]byte, length)
+ mBuffer, err := allocate(windows.Handle(mMap.file_memory_map_handle), offset, length, false)
+ copy(dataSlice, mBuffer.Buffer)
+ mBuffer.releaseMemory()
+ return dataSlice, err
+}
+
+func (mBuffer *MemoryBuffer) releaseMemory() {
+
+ windows.VirtualUnlock(mBuffer.aligned_ptr, uintptr(mBuffer.aligned_length))
+ windows.UnmapViewOfFile(mBuffer.aligned_ptr)
+
+ currentMinWorkingSet -= mBuffer.aligned_length
+ currentMaxWorkingSet -= mBuffer.aligned_length
+
+ if currentMinWorkingSet < maxMemoryLimitBytes {
+ var _ = setProcessWorkingSetSize(uintptr(currentProcess), currentMinWorkingSet, currentMaxWorkingSet)
+ }
+
+ mBuffer.ptr = 0
+ mBuffer.aligned_ptr = 0
+ mBuffer.length = 0
+ mBuffer.aligned_length = 0
+ mBuffer.Buffer = nil
+}
+
+func allocateChunk(mMap *MemoryMap) {
+ start := uint64(len(mMap.write_map_views)) * chunkSize
+ mBuffer, err := allocate(windows.Handle(mMap.file_memory_map_handle), start, chunkSize, true)
+
+ if err == nil {
+ mMap.write_map_views = append(mMap.write_map_views, mBuffer)
+ }
+}
+
+func allocate(hMapFile windows.Handle, offset uint64, length uint64, write bool) (MemoryBuffer, error) {
+
+ mBuffer := MemoryBuffer{}
+
+ //align memory allocations to the minium virtal memory allocation size
+ dwSysGran := systemInfo.dwAllocationGranularity
+
+ start := (offset / uint64(dwSysGran)) * uint64(dwSysGran)
+ diff := offset - start
+ aligned_length := diff + length
+
+ offset_high := uint32(start >> 32)
+ offset_low := uint32(start & 0xFFFFFFFF)
+
+ access := windows.FILE_MAP_READ
+
+ if write {
+ access = windows.FILE_MAP_WRITE
+ }
+
+ currentMinWorkingSet += aligned_length
+ currentMaxWorkingSet += aligned_length
+
+ if currentMinWorkingSet < maxMemoryLimitBytes {
+ // increase the process working set size to hint to windows memory manager to
+ // prioritise keeping this memory mapped in physical memory over other standby memory
+ var _ = setProcessWorkingSetSize(uintptr(currentProcess), currentMinWorkingSet, currentMaxWorkingSet)
+ }
+
+ addr_ptr, errno := windows.MapViewOfFile(hMapFile,
+ uint32(access), // read/write permission
+ offset_high,
+ offset_low,
+ uintptr(aligned_length))
+
+ if addr_ptr == 0 {
+ return mBuffer, errno
+ }
+
+ if currentMinWorkingSet < maxMemoryLimitBytes {
+ windows.VirtualLock(mBuffer.aligned_ptr, uintptr(mBuffer.aligned_length))
+ }
+
+ mBuffer.aligned_ptr = addr_ptr
+ mBuffer.aligned_length = aligned_length
+ mBuffer.ptr = addr_ptr + uintptr(diff)
+ mBuffer.length = length
+
+ slice_header := (*reflect.SliceHeader)(unsafe.Pointer(&mBuffer.Buffer))
+ slice_header.Data = addr_ptr + uintptr(diff)
+ slice_header.Len = int(length)
+ slice_header.Cap = int(length)
+
+ return mBuffer, nil
+}
+
+//typedef struct _MEMORYSTATUSEX {
+// DWORD dwLength;
+// DWORD dwMemoryLoad;
+// DWORDLONG ullTotalPhys;
+// DWORDLONG ullAvailPhys;
+// DWORDLONG ullTotalPageFile;
+// DWORDLONG ullAvailPageFile;
+// DWORDLONG ullTotalVirtual;
+// DWORDLONG ullAvailVirtual;
+// DWORDLONG ullAvailExtendedVirtual;
+// } MEMORYSTATUSEX, *LPMEMORYSTATUSEX;
+//https://docs.microsoft.com/en-gb/windows/win32/api/sysinfoapi/ns-sysinfoapi-memorystatusex
+
+type _MEMORYSTATUSEX struct {
+ dwLength DWORD
+ dwMemoryLoad DWORD
+ ullTotalPhys DWORDLONG
+ ullAvailPhys DWORDLONG
+ ullTotalPageFile DWORDLONG
+ ullAvailPageFile DWORDLONG
+ ullTotalVirtual DWORDLONG
+ ullAvailVirtual DWORDLONG
+ ullAvailExtendedVirtual DWORDLONG
+}
+
+// BOOL GlobalMemoryStatusEx(
+// LPMEMORYSTATUSEX lpBuffer
+// );
+// https://docs.microsoft.com/en-gb/windows/win32/api/sysinfoapi/nf-sysinfoapi-globalmemorystatusex
+func globalMemoryStatusEx() (_MEMORYSTATUSEX, error) {
+ var mem_status _MEMORYSTATUSEX
+
+ mem_status.dwLength = uint32(unsafe.Sizeof(mem_status))
+ _, _, err := procGlobalMemoryStatusEx.Call(uintptr(unsafe.Pointer(&mem_status)))
+
+ if err != syscall.Errno(0) {
+ return mem_status, err
+ }
+ return mem_status, nil
+}
+
+// typedef struct _SYSTEM_INFO {
+// union {
+// DWORD dwOemId;
+// struct {
+// WORD wProcessorArchitecture;
+// WORD wReserved;
+// };
+// };
+// DWORD dwPageSize;
+// LPVOID lpMinimumApplicationAddress;
+// LPVOID lpMaximumApplicationAddress;
+// DWORD_PTR dwActiveProcessorMask;
+// DWORD dwNumberOfProcessors;
+// DWORD dwProcessorType;
+// DWORD dwAllocationGranularity;
+// WORD wProcessorLevel;
+// WORD wProcessorRevision;
+// } SYSTEM_INFO;
+// https://docs.microsoft.com/en-gb/windows/win32/api/sysinfoapi/ns-sysinfoapi-system_info
+type _SYSTEM_INFO struct {
+ dwOemId DWORD
+ dwPageSize DWORD
+ lpMinimumApplicationAddress uintptr
+ lpMaximumApplicationAddress uintptr
+ dwActiveProcessorMask uintptr
+ dwNumberOfProcessors DWORD
+ dwProcessorType DWORD
+ dwAllocationGranularity DWORD
+ wProcessorLevel WORD
+ wProcessorRevision WORD
+}
+
+// void WINAPI GetSystemInfo(
+// _Out_ LPSYSTEM_INFO lpSystemInfo
+// );
+// https://docs.microsoft.com/en-us/windows/win32/api/sysinfoapi/nf-sysinfoapi-getsysteminfo
+func getSystemInfo() (_SYSTEM_INFO, error) {
+ var si _SYSTEM_INFO
+ _, _, err := procGetSystemInfo.Call(uintptr(unsafe.Pointer(&si)))
+ if err != syscall.Errno(0) {
+ return si, err
+ }
+ return si, nil
+}
+
+// BOOL GetProcessWorkingSetSize(
+// HANDLE hProcess,
+// PSIZE_T lpMinimumWorkingSetSize,
+// PSIZE_T lpMaximumWorkingSetSize
+// );
+// https://docs.microsoft.com/en-us/windows/win32/api/winbase/nf-winbase-getprocessworkingsetsize
+
+func getProcessWorkingSetSize(process uintptr, dwMinWorkingSet *uint64, dwMaxWorkingSet *uint64) error {
+ r1, _, err := syscall.Syscall(procGetProcessWorkingSetSize.Addr(), 3, process, uintptr(unsafe.Pointer(dwMinWorkingSet)), uintptr(unsafe.Pointer(dwMaxWorkingSet)))
+ if r1 == 0 {
+ if err != syscall.Errno(0) {
+ return err
+ }
+ }
+ return nil
+}
+
+// BOOL SetProcessWorkingSetSize(
+// HANDLE hProcess,
+// SIZE_T dwMinimumWorkingSetSize,
+// SIZE_T dwMaximumWorkingSetSize
+// );
+// https://docs.microsoft.com/en-us/windows/win32/api/winbase/nf-winbase-setprocessworkingsetsize
+
+func setProcessWorkingSetSize(process uintptr, dwMinWorkingSet uint64, dwMaxWorkingSet uint64) error {
+ r1, _, err := syscall.Syscall(procSetProcessWorkingSetSize.Addr(), 3, process, uintptr(dwMinWorkingSet), uintptr(dwMaxWorkingSet))
+ if r1 == 0 {
+ if err != syscall.Errno(0) {
+ return err
+ }
+ }
+ return nil
+}
diff --git a/weed/storage/backend/memory_map/os_overloads/file_windows.go b/weed/storage/backend/memory_map/os_overloads/file_windows.go
new file mode 100644
index 000000000..05aa384e2
--- /dev/null
+++ b/weed/storage/backend/memory_map/os_overloads/file_windows.go
@@ -0,0 +1,168 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package os_overloads
+
+import (
+ "os"
+ "syscall"
+
+ "golang.org/x/sys/windows"
+)
+
+func isAbs(path string) (b bool) {
+ v := volumeName(path)
+ if v == "" {
+ return false
+ }
+ path = path[len(v):]
+ if path == "" {
+ return false
+ }
+ return os.IsPathSeparator(path[0])
+}
+
+func volumeName(path string) (v string) {
+ if len(path) < 2 {
+ return ""
+ }
+ // with drive letter
+ c := path[0]
+ if path[1] == ':' &&
+ ('0' <= c && c <= '9' || 'a' <= c && c <= 'z' ||
+ 'A' <= c && c <= 'Z') {
+ return path[:2]
+ }
+ // is it UNC
+ if l := len(path); l >= 5 && os.IsPathSeparator(path[0]) && os.IsPathSeparator(path[1]) &&
+ !os.IsPathSeparator(path[2]) && path[2] != '.' {
+ // first, leading `\\` and next shouldn't be `\`. its server name.
+ for n := 3; n < l-1; n++ {
+ // second, next '\' shouldn't be repeated.
+ if os.IsPathSeparator(path[n]) {
+ n++
+ // third, following something characters. its share name.
+ if !os.IsPathSeparator(path[n]) {
+ if path[n] == '.' {
+ break
+ }
+ for ; n < l; n++ {
+ if os.IsPathSeparator(path[n]) {
+ break
+ }
+ }
+ return path[:n]
+ }
+ break
+ }
+ }
+ }
+ return ""
+}
+
+// fixLongPath returns the extended-length (\\?\-prefixed) form of
+// path when needed, in order to avoid the default 260 character file
+// path limit imposed by Windows. If path is not easily converted to
+// the extended-length form (for example, if path is a relative path
+// or contains .. elements), or is short enough, fixLongPath returns
+// path unmodified.
+//
+// See https://msdn.microsoft.com/en-us/library/windows/desktop/aa365247(v=vs.85).aspx#maxpath
+func fixLongPath(path string) string {
+ // Do nothing (and don't allocate) if the path is "short".
+ // Empirically (at least on the Windows Server 2013 builder),
+ // the kernel is arbitrarily okay with < 248 bytes. That
+ // matches what the docs above say:
+ // "When using an API to create a directory, the specified
+ // path cannot be so long that you cannot append an 8.3 file
+ // name (that is, the directory name cannot exceed MAX_PATH
+ // minus 12)." Since MAX_PATH is 260, 260 - 12 = 248.
+ //
+ // The MSDN docs appear to say that a normal path that is 248 bytes long
+ // will work; empirically the path must be less then 248 bytes long.
+ if len(path) < 248 {
+ // Don't fix. (This is how Go 1.7 and earlier worked,
+ // not automatically generating the \\?\ form)
+ return path
+ }
+
+ // The extended form begins with \\?\, as in
+ // \\?\c:\windows\foo.txt or \\?\UNC\server\share\foo.txt.
+ // The extended form disables evaluation of . and .. path
+ // elements and disables the interpretation of / as equivalent
+ // to \. The conversion here rewrites / to \ and elides
+ // . elements as well as trailing or duplicate separators. For
+ // simplicity it avoids the conversion entirely for relative
+ // paths or paths containing .. elements. For now,
+ // \\server\share paths are not converted to
+ // \\?\UNC\server\share paths because the rules for doing so
+ // are less well-specified.
+ if len(path) >= 2 && path[:2] == `\\` {
+ // Don't canonicalize UNC paths.
+ return path
+ }
+ if !isAbs(path) {
+ // Relative path
+ return path
+ }
+
+ const prefix = `\\?`
+
+ pathbuf := make([]byte, len(prefix)+len(path)+len(`\`))
+ copy(pathbuf, prefix)
+ n := len(path)
+ r, w := 0, len(prefix)
+ for r < n {
+ switch {
+ case os.IsPathSeparator(path[r]):
+ // empty block
+ r++
+ case path[r] == '.' && (r+1 == n || os.IsPathSeparator(path[r+1])):
+ // /./
+ r++
+ case r+1 < n && path[r] == '.' && path[r+1] == '.' && (r+2 == n || os.IsPathSeparator(path[r+2])):
+ // /../ is currently unhandled
+ return path
+ default:
+ pathbuf[w] = '\\'
+ w++
+ for ; r < n && !os.IsPathSeparator(path[r]); r++ {
+ pathbuf[w] = path[r]
+ w++
+ }
+ }
+ }
+ // A drive's root directory needs a trailing \
+ if w == len(`\\?\c:`) {
+ pathbuf[w] = '\\'
+ w++
+ }
+ return string(pathbuf[:w])
+}
+
+// syscallMode returns the syscall-specific mode bits from Go's portable mode bits.
+func syscallMode(i os.FileMode) (o uint32) {
+ o |= uint32(i.Perm())
+ if i&os.ModeSetuid != 0 {
+ o |= syscall.S_ISUID
+ }
+ if i&os.ModeSetgid != 0 {
+ o |= syscall.S_ISGID
+ }
+ if i&os.ModeSticky != 0 {
+ o |= syscall.S_ISVTX
+ }
+ // No mapping for Go's ModeTemporary (plan9 only).
+ return
+}
+
+//If the bool is set to true then the file is opened with the parameters FILE_ATTRIBUTE_TEMPORARY and
+// FILE_FLAG_DELETE_ON_CLOSE
+func OpenFile(name string, flag int, perm os.FileMode, setToTempAndDelete bool) (file *os.File, err error) {
+ r, e := Open(fixLongPath(name), flag|windows.O_CLOEXEC, syscallMode(perm), setToTempAndDelete)
+ if e != nil {
+ return nil, e
+ }
+ return os.NewFile(uintptr(r), name), nil
+}
diff --git a/weed/storage/backend/memory_map/os_overloads/syscall_windows.go b/weed/storage/backend/memory_map/os_overloads/syscall_windows.go
new file mode 100644
index 000000000..081cba431
--- /dev/null
+++ b/weed/storage/backend/memory_map/os_overloads/syscall_windows.go
@@ -0,0 +1,80 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Windows system calls.
+
+package os_overloads
+
+import (
+ "syscall"
+ "unsafe"
+
+ "golang.org/x/sys/windows"
+)
+
+// windows api calls
+
+//sys CreateFile(name *uint16, access uint32, mode uint32, sa *SecurityAttributes, createmode uint32, attrs uint32, templatefile int32) (handle Handle, err error) [failretval==InvalidHandle] = CreateFileW
+
+func makeInheritSa() *syscall.SecurityAttributes {
+ var sa syscall.SecurityAttributes
+ sa.Length = uint32(unsafe.Sizeof(sa))
+ sa.InheritHandle = 1
+ return &sa
+}
+
+// opens the
+func Open(path string, mode int, perm uint32, setToTempAndDelete bool) (fd syscall.Handle, err error) {
+ if len(path) == 0 {
+ return syscall.InvalidHandle, windows.ERROR_FILE_NOT_FOUND
+ }
+ pathp, err := syscall.UTF16PtrFromString(path)
+ if err != nil {
+ return syscall.InvalidHandle, err
+ }
+ var access uint32
+ switch mode & (windows.O_RDONLY | windows.O_WRONLY | windows.O_RDWR) {
+ case windows.O_RDONLY:
+ access = windows.GENERIC_READ
+ case windows.O_WRONLY:
+ access = windows.GENERIC_WRITE
+ case windows.O_RDWR:
+ access = windows.GENERIC_READ | windows.GENERIC_WRITE
+ }
+ if mode&windows.O_CREAT != 0 {
+ access |= windows.GENERIC_WRITE
+ }
+ if mode&windows.O_APPEND != 0 {
+ access &^= windows.GENERIC_WRITE
+ access |= windows.FILE_APPEND_DATA
+ }
+ sharemode := uint32(windows.FILE_SHARE_READ | windows.FILE_SHARE_WRITE)
+ var sa *syscall.SecurityAttributes
+ if mode&windows.O_CLOEXEC == 0 {
+ sa = makeInheritSa()
+ }
+ var createmode uint32
+ switch {
+ case mode&(windows.O_CREAT|windows.O_EXCL) == (windows.O_CREAT | windows.O_EXCL):
+ createmode = windows.CREATE_NEW
+ case mode&(windows.O_CREAT|windows.O_TRUNC) == (windows.O_CREAT | windows.O_TRUNC):
+ createmode = windows.CREATE_ALWAYS
+ case mode&windows.O_CREAT == windows.O_CREAT:
+ createmode = windows.OPEN_ALWAYS
+ case mode&windows.O_TRUNC == windows.O_TRUNC:
+ createmode = windows.TRUNCATE_EXISTING
+ default:
+ createmode = windows.OPEN_EXISTING
+ }
+
+ var h syscall.Handle
+ var e error
+
+ if setToTempAndDelete {
+ h, e = syscall.CreateFile(pathp, access, sharemode, sa, createmode, (windows.FILE_ATTRIBUTE_TEMPORARY | FILE_FLAG_DELETE_ON_CLOSE), 0)
+ } else {
+ h, e = syscall.CreateFile(pathp, access, sharemode, sa, createmode, windows.FILE_ATTRIBUTE_NORMAL, 0)
+ }
+ return h, e
+}
diff --git a/weed/storage/backend/memory_map/os_overloads/types_windows.go b/weed/storage/backend/memory_map/os_overloads/types_windows.go
new file mode 100644
index 000000000..254ba3002
--- /dev/null
+++ b/weed/storage/backend/memory_map/os_overloads/types_windows.go
@@ -0,0 +1,9 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package os_overloads
+
+const (
+ FILE_FLAG_DELETE_ON_CLOSE = 0x04000000
+)
diff --git a/weed/storage/backend/s3_backend/s3_backend.go b/weed/storage/backend/s3_backend/s3_backend.go
new file mode 100644
index 000000000..4706c9334
--- /dev/null
+++ b/weed/storage/backend/s3_backend/s3_backend.go
@@ -0,0 +1,185 @@
+package s3_backend
+
+import (
+ "fmt"
+ "io"
+ "os"
+ "strings"
+ "time"
+
+ "github.com/aws/aws-sdk-go/service/s3"
+ "github.com/aws/aws-sdk-go/service/s3/s3iface"
+ "github.com/google/uuid"
+
+ "github.com/chrislusf/seaweedfs/weed/glog"
+ "github.com/chrislusf/seaweedfs/weed/pb/volume_server_pb"
+ "github.com/chrislusf/seaweedfs/weed/storage/backend"
+)
+
+func init() {
+ backend.BackendStorageFactories["s3"] = &S3BackendFactory{}
+}
+
+type S3BackendFactory struct {
+}
+
+func (factory *S3BackendFactory) StorageType() backend.StorageType {
+ return backend.StorageType("s3")
+}
+func (factory *S3BackendFactory) BuildStorage(configuration backend.StringProperties, configPrefix string, id string) (backend.BackendStorage, error) {
+ return newS3BackendStorage(configuration, configPrefix, id)
+}
+
+type S3BackendStorage struct {
+ id string
+ aws_access_key_id string
+ aws_secret_access_key string
+ region string
+ bucket string
+ endpoint string
+ conn s3iface.S3API
+}
+
+func newS3BackendStorage(configuration backend.StringProperties, configPrefix string, id string) (s *S3BackendStorage, err error) {
+ s = &S3BackendStorage{}
+ s.id = id
+ s.aws_access_key_id = configuration.GetString(configPrefix + "aws_access_key_id")
+ s.aws_secret_access_key = configuration.GetString(configPrefix + "aws_secret_access_key")
+ s.region = configuration.GetString(configPrefix + "region")
+ s.bucket = configuration.GetString(configPrefix + "bucket")
+ s.endpoint = configuration.GetString(configPrefix + "endpoint")
+
+ s.conn, err = createSession(s.aws_access_key_id, s.aws_secret_access_key, s.region, s.endpoint)
+
+ glog.V(0).Infof("created backend storage s3.%s for region %s bucket %s", s.id, s.region, s.bucket)
+ return
+}
+
+func (s *S3BackendStorage) ToProperties() map[string]string {
+ m := make(map[string]string)
+ m["aws_access_key_id"] = s.aws_access_key_id
+ m["aws_secret_access_key"] = s.aws_secret_access_key
+ m["region"] = s.region
+ m["bucket"] = s.bucket
+ m["endpoint"] = s.endpoint
+ return m
+}
+
+func (s *S3BackendStorage) NewStorageFile(key string, tierInfo *volume_server_pb.VolumeInfo) backend.BackendStorageFile {
+ if strings.HasPrefix(key, "/") {
+ key = key[1:]
+ }
+
+ f := &S3BackendStorageFile{
+ backendStorage: s,
+ key: key,
+ tierInfo: tierInfo,
+ }
+
+ return f
+}
+
+func (s *S3BackendStorage) CopyFile(f *os.File, attributes map[string]string, fn func(progressed int64, percentage float32) error) (key string, size int64, err error) {
+ randomUuid, _ := uuid.NewRandom()
+ key = randomUuid.String()
+
+ glog.V(1).Infof("copying dat file of %s to remote s3.%s as %s", f.Name(), s.id, key)
+
+ size, err = uploadToS3(s.conn, f.Name(), s.bucket, key, attributes, fn)
+
+ return
+}
+
+func (s *S3BackendStorage) DownloadFile(fileName string, key string, fn func(progressed int64, percentage float32) error) (size int64, err error) {
+
+ glog.V(1).Infof("download dat file of %s from remote s3.%s as %s", fileName, s.id, key)
+
+ size, err = downloadFromS3(s.conn, fileName, s.bucket, key, fn)
+
+ return
+}
+
+func (s *S3BackendStorage) DeleteFile(key string) (err error) {
+
+ glog.V(1).Infof("delete dat file %s from remote", key)
+
+ err = deleteFromS3(s.conn, s.bucket, key)
+
+ return
+}
+
+type S3BackendStorageFile struct {
+ backendStorage *S3BackendStorage
+ key string
+ tierInfo *volume_server_pb.VolumeInfo
+}
+
+func (s3backendStorageFile S3BackendStorageFile) ReadAt(p []byte, off int64) (n int, err error) {
+
+ bytesRange := fmt.Sprintf("bytes=%d-%d", off, off+int64(len(p))-1)
+
+ // glog.V(0).Infof("read %s %s", s3backendStorageFile.key, bytesRange)
+
+ getObjectOutput, getObjectErr := s3backendStorageFile.backendStorage.conn.GetObject(&s3.GetObjectInput{
+ Bucket: &s3backendStorageFile.backendStorage.bucket,
+ Key: &s3backendStorageFile.key,
+ Range: &bytesRange,
+ })
+
+ if getObjectErr != nil {
+ return 0, fmt.Errorf("bucket %s GetObject %s: %v", s3backendStorageFile.backendStorage.bucket, s3backendStorageFile.key, getObjectErr)
+ }
+ defer getObjectOutput.Body.Close()
+
+ glog.V(4).Infof("read %s %s", s3backendStorageFile.key, bytesRange)
+ glog.V(4).Infof("content range: %s, contentLength: %d", *getObjectOutput.ContentRange, *getObjectOutput.ContentLength)
+
+ for {
+ if n, err = getObjectOutput.Body.Read(p); err == nil && n < len(p) {
+ p = p[n:]
+ } else {
+ break
+ }
+ }
+
+ if err == io.EOF {
+ err = nil
+ }
+
+ return
+}
+
+func (s3backendStorageFile S3BackendStorageFile) WriteAt(p []byte, off int64) (n int, err error) {
+ panic("not implemented")
+}
+
+func (s3backendStorageFile S3BackendStorageFile) Truncate(off int64) error {
+ panic("not implemented")
+}
+
+func (s3backendStorageFile S3BackendStorageFile) Close() error {
+ return nil
+}
+
+func (s3backendStorageFile S3BackendStorageFile) GetStat() (datSize int64, modTime time.Time, err error) {
+
+ files := s3backendStorageFile.tierInfo.GetFiles()
+
+ if len(files) == 0 {
+ err = fmt.Errorf("remote file info not found")
+ return
+ }
+
+ datSize = int64(files[0].FileSize)
+ modTime = time.Unix(int64(files[0].ModifiedTime), 0)
+
+ return
+}
+
+func (s3backendStorageFile S3BackendStorageFile) Name() string {
+ return s3backendStorageFile.key
+}
+
+func (s3backendStorageFile S3BackendStorageFile) Sync() error {
+ return nil
+}
diff --git a/weed/storage/backend/s3_backend/s3_download.go b/weed/storage/backend/s3_backend/s3_download.go
new file mode 100644
index 000000000..dbc28446a
--- /dev/null
+++ b/weed/storage/backend/s3_backend/s3_download.go
@@ -0,0 +1,98 @@
+package s3_backend
+
+import (
+ "fmt"
+ "os"
+ "sync/atomic"
+
+ "github.com/aws/aws-sdk-go/aws"
+ "github.com/aws/aws-sdk-go/service/s3"
+ "github.com/aws/aws-sdk-go/service/s3/s3iface"
+ "github.com/aws/aws-sdk-go/service/s3/s3manager"
+
+ "github.com/chrislusf/seaweedfs/weed/glog"
+)
+
+func downloadFromS3(sess s3iface.S3API, destFileName string, sourceBucket string, sourceKey string,
+ fn func(progressed int64, percentage float32) error) (fileSize int64, err error) {
+
+ fileSize, err = getFileSize(sess, sourceBucket, sourceKey)
+ if err != nil {
+ return
+ }
+
+ //open the file
+ f, err := os.OpenFile(destFileName, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0644)
+ if err != nil {
+ return 0, fmt.Errorf("failed to open file %q, %v", destFileName, err)
+ }
+ defer f.Close()
+
+ // Create a downloader with the session and custom options
+ downloader := s3manager.NewDownloaderWithClient(sess, func(u *s3manager.Downloader) {
+ u.PartSize = int64(64 * 1024 * 1024)
+ u.Concurrency = 5
+ })
+
+ fileWriter := &s3DownloadProgressedWriter{
+ fp: f,
+ size: fileSize,
+ written: 0,
+ fn: fn,
+ }
+
+ // Download the file from S3.
+ fileSize, err = downloader.Download(fileWriter, &s3.GetObjectInput{
+ Bucket: aws.String(sourceBucket),
+ Key: aws.String(sourceKey),
+ })
+ if err != nil {
+ return fileSize, fmt.Errorf("failed to download file %s: %v", destFileName, err)
+ }
+
+ glog.V(1).Infof("downloaded file %s\n", destFileName)
+
+ return
+}
+
+// adapted from https://github.com/aws/aws-sdk-go/pull/1868
+// and https://petersouter.xyz/s3-download-progress-bar-in-golang/
+type s3DownloadProgressedWriter struct {
+ fp *os.File
+ size int64
+ written int64
+ fn func(progressed int64, percentage float32) error
+}
+
+func (w *s3DownloadProgressedWriter) WriteAt(p []byte, off int64) (int, error) {
+ n, err := w.fp.WriteAt(p, off)
+ if err != nil {
+ return n, err
+ }
+
+ // Got the length have read( or means has uploaded), and you can construct your message
+ atomic.AddInt64(&w.written, int64(n))
+
+ if w.fn != nil {
+ written := w.written
+ if err := w.fn(written, float32(written*100)/float32(w.size)); err != nil {
+ return n, err
+ }
+ }
+
+ return n, err
+}
+
+func getFileSize(svc s3iface.S3API, bucket string, key string) (filesize int64, error error) {
+ params := &s3.HeadObjectInput{
+ Bucket: aws.String(bucket),
+ Key: aws.String(key),
+ }
+
+ resp, err := svc.HeadObject(params)
+ if err != nil {
+ return 0, err
+ }
+
+ return *resp.ContentLength, nil
+}
diff --git a/weed/storage/backend/s3_backend/s3_sessions.go b/weed/storage/backend/s3_backend/s3_sessions.go
new file mode 100644
index 000000000..e2fdf1eb6
--- /dev/null
+++ b/weed/storage/backend/s3_backend/s3_sessions.go
@@ -0,0 +1,63 @@
+package s3_backend
+
+import (
+ "fmt"
+ "sync"
+
+ "github.com/aws/aws-sdk-go/aws"
+ "github.com/aws/aws-sdk-go/aws/credentials"
+ "github.com/aws/aws-sdk-go/aws/session"
+ "github.com/aws/aws-sdk-go/service/s3"
+ "github.com/aws/aws-sdk-go/service/s3/s3iface"
+)
+
+var (
+ s3Sessions = make(map[string]s3iface.S3API)
+ sessionsLock sync.RWMutex
+)
+
+func getSession(region string) (s3iface.S3API, bool) {
+ sessionsLock.RLock()
+ defer sessionsLock.RUnlock()
+
+ sess, found := s3Sessions[region]
+ return sess, found
+}
+
+func createSession(awsAccessKeyId, awsSecretAccessKey, region, endpoint string) (s3iface.S3API, error) {
+
+ sessionsLock.Lock()
+ defer sessionsLock.Unlock()
+
+ if t, found := s3Sessions[region]; found {
+ return t, nil
+ }
+
+ config := &aws.Config{
+ Region: aws.String(region),
+ Endpoint: aws.String(endpoint),
+ }
+ if awsAccessKeyId != "" && awsSecretAccessKey != "" {
+ config.Credentials = credentials.NewStaticCredentials(awsAccessKeyId, awsSecretAccessKey, "")
+ }
+
+ sess, err := session.NewSession(config)
+ if err != nil {
+ return nil, fmt.Errorf("create aws session in region %s: %v", region, err)
+ }
+
+ t := s3.New(sess)
+
+ s3Sessions[region] = t
+
+ return t, nil
+
+}
+
+func deleteFromS3(sess s3iface.S3API, sourceBucket string, sourceKey string) (err error) {
+ _, err = sess.DeleteObject(&s3.DeleteObjectInput{
+ Bucket: aws.String(sourceBucket),
+ Key: aws.String(sourceKey),
+ })
+ return err
+}
diff --git a/weed/storage/backend/s3_backend/s3_upload.go b/weed/storage/backend/s3_backend/s3_upload.go
new file mode 100644
index 000000000..500a85590
--- /dev/null
+++ b/weed/storage/backend/s3_backend/s3_upload.go
@@ -0,0 +1,114 @@
+package s3_backend
+
+import (
+ "fmt"
+ "os"
+ "sync/atomic"
+
+ "github.com/aws/aws-sdk-go/aws"
+ "github.com/aws/aws-sdk-go/service/s3/s3iface"
+ "github.com/aws/aws-sdk-go/service/s3/s3manager"
+
+ "github.com/chrislusf/seaweedfs/weed/glog"
+)
+
+func uploadToS3(sess s3iface.S3API, filename string, destBucket string, destKey string,
+ attributes map[string]string,
+ fn func(progressed int64, percentage float32) error) (fileSize int64, err error) {
+
+ //open the file
+ f, err := os.Open(filename)
+ if err != nil {
+ return 0, fmt.Errorf("failed to open file %q, %v", filename, err)
+ }
+ defer f.Close()
+
+ info, err := f.Stat()
+ if err != nil {
+ return 0, fmt.Errorf("failed to stat file %q, %v", filename, err)
+ }
+
+ fileSize = info.Size()
+
+ partSize := int64(64 * 1024 * 1024) // The minimum/default allowed part size is 5MB
+ for partSize*1000 < fileSize {
+ partSize *= 4
+ }
+
+ // Create an uploader with the session and custom options
+ uploader := s3manager.NewUploaderWithClient(sess, func(u *s3manager.Uploader) {
+ u.PartSize = partSize
+ u.Concurrency = 5
+ })
+
+ fileReader := &s3UploadProgressedReader{
+ fp: f,
+ size: fileSize,
+ read: -fileSize,
+ fn: fn,
+ }
+
+ // process tagging
+ tags := ""
+ for k, v := range attributes {
+ if len(tags) > 0 {
+ tags = tags + "&"
+ }
+ tags = tags + k + "=" + v
+ }
+
+ // Upload the file to S3.
+ var result *s3manager.UploadOutput
+ result, err = uploader.Upload(&s3manager.UploadInput{
+ Bucket: aws.String(destBucket),
+ Key: aws.String(destKey),
+ Body: fileReader,
+ ACL: aws.String("private"),
+ ServerSideEncryption: aws.String("AES256"),
+ StorageClass: aws.String("STANDARD_IA"),
+ Tagging: aws.String(tags),
+ })
+
+ //in case it fails to upload
+ if err != nil {
+ return 0, fmt.Errorf("failed to upload file %s: %v", filename, err)
+ }
+ glog.V(1).Infof("file %s uploaded to %s\n", filename, result.Location)
+
+ return
+}
+
+// adapted from https://github.com/aws/aws-sdk-go/pull/1868
+type s3UploadProgressedReader struct {
+ fp *os.File
+ size int64
+ read int64
+ fn func(progressed int64, percentage float32) error
+}
+
+func (r *s3UploadProgressedReader) Read(p []byte) (int, error) {
+ return r.fp.Read(p)
+}
+
+func (r *s3UploadProgressedReader) ReadAt(p []byte, off int64) (int, error) {
+ n, err := r.fp.ReadAt(p, off)
+ if err != nil {
+ return n, err
+ }
+
+ // Got the length have read( or means has uploaded), and you can construct your message
+ atomic.AddInt64(&r.read, int64(n))
+
+ if r.fn != nil {
+ read := r.read
+ if err := r.fn(read, float32(read*100)/float32(r.size)); err != nil {
+ return n, err
+ }
+ }
+
+ return n, err
+}
+
+func (r *s3UploadProgressedReader) Seek(offset int64, whence int) (int64, error) {
+ return r.fp.Seek(offset, whence)
+}
diff --git a/weed/storage/backend/volume_create.go b/weed/storage/backend/volume_create.go
new file mode 100644
index 000000000..abb1f7238
--- /dev/null
+++ b/weed/storage/backend/volume_create.go
@@ -0,0 +1,20 @@
+// +build !linux,!windows
+
+package backend
+
+import (
+ "os"
+
+ "github.com/chrislusf/seaweedfs/weed/glog"
+)
+
+func CreateVolumeFile(fileName string, preallocate int64, memoryMapSizeMB uint32) (BackendStorageFile, error) {
+ file, e := os.OpenFile(fileName, os.O_RDWR|os.O_CREATE|os.O_TRUNC, 0644)
+ if e != nil {
+ return nil, e
+ }
+ if preallocate > 0 {
+ glog.V(0).Infof("Preallocated disk space for %s is not supported", fileName)
+ }
+ return NewDiskFile(file), nil
+}
diff --git a/weed/storage/backend/volume_create_linux.go b/weed/storage/backend/volume_create_linux.go
new file mode 100644
index 000000000..4602831ca
--- /dev/null
+++ b/weed/storage/backend/volume_create_linux.go
@@ -0,0 +1,22 @@
+// +build linux
+
+package backend
+
+import (
+ "os"
+ "syscall"
+
+ "github.com/chrislusf/seaweedfs/weed/glog"
+)
+
+func CreateVolumeFile(fileName string, preallocate int64, memoryMapSizeMB uint32) (BackendStorageFile, error) {
+ file, e := os.OpenFile(fileName, os.O_RDWR|os.O_CREATE|os.O_TRUNC, 0644)
+ if e != nil {
+ return nil, e
+ }
+ if preallocate != 0 {
+ syscall.Fallocate(int(file.Fd()), 1, 0, preallocate)
+ glog.V(0).Infof("Preallocated %d bytes disk space for %s", preallocate, fileName)
+ }
+ return NewDiskFile(file), nil
+}
diff --git a/weed/storage/backend/volume_create_windows.go b/weed/storage/backend/volume_create_windows.go
new file mode 100644
index 000000000..7d40ec0d7
--- /dev/null
+++ b/weed/storage/backend/volume_create_windows.go
@@ -0,0 +1,32 @@
+// +build windows
+
+package backend
+
+import (
+ "github.com/chrislusf/seaweedfs/weed/storage/backend/memory_map"
+ "golang.org/x/sys/windows"
+
+ "github.com/chrislusf/seaweedfs/weed/glog"
+ "github.com/chrislusf/seaweedfs/weed/storage/backend/memory_map/os_overloads"
+)
+
+func CreateVolumeFile(fileName string, preallocate int64, memoryMapSizeMB uint32) (BackendStorageFile, error) {
+ if preallocate > 0 {
+ glog.V(0).Infof("Preallocated disk space for %s is not supported", fileName)
+ }
+
+ if memoryMapSizeMB > 0 {
+ file, e := os_overloads.OpenFile(fileName, windows.O_RDWR|windows.O_CREAT, 0644, true)
+ if e != nil {
+ return nil, e
+ }
+ return memory_map.NewMemoryMappedFile(file, memoryMapSizeMB), nil
+ } else {
+ file, e := os_overloads.OpenFile(fileName, windows.O_RDWR|windows.O_CREAT|windows.O_TRUNC, 0644, false)
+ if e != nil {
+ return nil, e
+ }
+ return NewDiskFile(file), nil
+ }
+
+}
diff --git a/weed/storage/disk_location.go b/weed/storage/disk_location.go
index 9589d9281..c309b3f92 100644
--- a/weed/storage/disk_location.go
+++ b/weed/storage/disk_location.go
@@ -1,76 +1,97 @@
package storage
import (
+ "fmt"
"io/ioutil"
"os"
+ "path/filepath"
"strings"
"sync"
-
- "fmt"
+ "time"
"github.com/chrislusf/seaweedfs/weed/glog"
+ "github.com/chrislusf/seaweedfs/weed/stats"
+ "github.com/chrislusf/seaweedfs/weed/storage/erasure_coding"
+ "github.com/chrislusf/seaweedfs/weed/storage/needle"
)
type DiskLocation struct {
- Directory string
- MaxVolumeCount int
- volumes map[VolumeId]*Volume
- sync.RWMutex
+ Directory string
+ MaxVolumeCount int
+ MinFreeSpacePercent float32
+ volumes map[needle.VolumeId]*Volume
+ volumesLock sync.RWMutex
+
+ // erasure coding
+ ecVolumes map[needle.VolumeId]*erasure_coding.EcVolume
+ ecVolumesLock sync.RWMutex
+
+ isDiskSpaceLow bool
}
-func NewDiskLocation(dir string, maxVolumeCount int) *DiskLocation {
- location := &DiskLocation{Directory: dir, MaxVolumeCount: maxVolumeCount}
- location.volumes = make(map[VolumeId]*Volume)
+func NewDiskLocation(dir string, maxVolumeCount int, minFreeSpacePercent float32) *DiskLocation {
+ location := &DiskLocation{Directory: dir, MaxVolumeCount: maxVolumeCount, MinFreeSpacePercent: minFreeSpacePercent}
+ location.volumes = make(map[needle.VolumeId]*Volume)
+ location.ecVolumes = make(map[needle.VolumeId]*erasure_coding.EcVolume)
+ go location.CheckDiskSpace()
return location
}
-func (l *DiskLocation) volumeIdFromPath(dir os.FileInfo) (VolumeId, string, error) {
+func (l *DiskLocation) volumeIdFromPath(dir os.FileInfo) (needle.VolumeId, string, error) {
name := dir.Name()
- if !dir.IsDir() && strings.HasSuffix(name, ".dat") {
- collection := ""
- base := name[:len(name)-len(".dat")]
- i := strings.LastIndex(base, "_")
- if i > 0 {
- collection, base = base[0:i], base[i+1:]
- }
- vol, err := NewVolumeId(base)
- return vol, collection, err
+ if !dir.IsDir() && strings.HasSuffix(name, ".idx") {
+ base := name[:len(name)-len(".idx")]
+ collection, volumeId, err := parseCollectionVolumeId(base)
+ return volumeId, collection, err
}
return 0, "", fmt.Errorf("Path is not a volume: %s", name)
}
-func (l *DiskLocation) loadExistingVolume(dir os.FileInfo, needleMapKind NeedleMapType, mutex *sync.RWMutex) {
- name := dir.Name()
- if !dir.IsDir() && strings.HasSuffix(name, ".dat") {
- vid, collection, err := l.volumeIdFromPath(dir)
- if err == nil {
- mutex.RLock()
- _, found := l.volumes[vid]
- mutex.RUnlock()
- if !found {
- if v, e := NewVolume(l.Directory, collection, vid, needleMapKind, nil, nil, 0); e == nil {
- mutex.Lock()
- l.volumes[vid] = v
- mutex.Unlock()
- glog.V(0).Infof("data file %s, replicaPlacement=%s v=%d size=%d ttl=%s",
- l.Directory+"/"+name, v.ReplicaPlacement, v.Version(), v.Size(), v.Ttl.String())
- } else {
- glog.V(0).Infof("new volume %s error %s", name, e)
- }
- }
- }
+func parseCollectionVolumeId(base string) (collection string, vid needle.VolumeId, err error) {
+ i := strings.LastIndex(base, "_")
+ if i > 0 {
+ collection, base = base[0:i], base[i+1:]
}
+ vol, err := needle.NewVolumeId(base)
+ return collection, vol, err
}
-func (l *DiskLocation) concurrentLoadingVolumes(needleMapKind NeedleMapType, concurrentFlag bool) {
- var concurrency int
- if concurrentFlag {
- //You could choose a better optimized concurency value after testing at your environment
- concurrency = 10
- } else {
- concurrency = 1
+func (l *DiskLocation) loadExistingVolume(fileInfo os.FileInfo, needleMapKind NeedleMapType) bool {
+ name := fileInfo.Name()
+ if !fileInfo.IsDir() && strings.HasSuffix(name, ".idx") {
+ vid, collection, err := l.volumeIdFromPath(fileInfo)
+ if err != nil {
+ glog.Warningf("get volume id failed, %s, err : %s", name, err)
+ return false
+ }
+
+ // void loading one volume more than once
+ l.volumesLock.RLock()
+ _, found := l.volumes[vid]
+ l.volumesLock.RUnlock()
+ if found {
+ glog.V(1).Infof("loaded volume, %v", vid)
+ return true
+ }
+
+ v, e := NewVolume(l.Directory, collection, vid, needleMapKind, nil, nil, 0, 0)
+ if e != nil {
+ glog.V(0).Infof("new volume %s error %s", name, e)
+ return false
+ }
+
+ l.SetVolume(vid, v)
+
+ size, _, _ := v.FileStat()
+ glog.V(0).Infof("data file %s, replicaPlacement=%s v=%d size=%d ttl=%s",
+ l.Directory+"/"+name, v.ReplicaPlacement, v.Version(), size, v.Ttl.String())
+ return true
}
+ return false
+}
+
+func (l *DiskLocation) concurrentLoadingVolumes(needleMapKind NeedleMapType, concurrency int) {
task_queue := make(chan os.FileInfo, 10*concurrency)
go func() {
@@ -83,13 +104,12 @@ func (l *DiskLocation) concurrentLoadingVolumes(needleMapKind NeedleMapType, con
}()
var wg sync.WaitGroup
- var mutex sync.RWMutex
for workerNum := 0; workerNum < concurrency; workerNum++ {
wg.Add(1)
go func() {
defer wg.Done()
for dir := range task_queue {
- l.loadExistingVolume(dir, needleMapKind, &mutex)
+ _ = l.loadExistingVolume(dir, needleMapKind)
}
}()
}
@@ -98,30 +118,62 @@ func (l *DiskLocation) concurrentLoadingVolumes(needleMapKind NeedleMapType, con
}
func (l *DiskLocation) loadExistingVolumes(needleMapKind NeedleMapType) {
- l.Lock()
- defer l.Unlock()
- l.concurrentLoadingVolumes(needleMapKind, true)
+ l.concurrentLoadingVolumes(needleMapKind, 10)
+ glog.V(0).Infof("Store started on dir: %s with %d volumes max %d", l.Directory, len(l.volumes), l.MaxVolumeCount)
+
+ l.loadAllEcShards()
+ glog.V(0).Infof("Store started on dir: %s with %d ec shards", l.Directory, len(l.ecVolumes))
- glog.V(0).Infoln("Store started on dir:", l.Directory, "with", len(l.volumes), "volumes", "max", l.MaxVolumeCount)
}
func (l *DiskLocation) DeleteCollectionFromDiskLocation(collection string) (e error) {
- l.Lock()
- defer l.Unlock()
- for k, v := range l.volumes {
- if v.Collection == collection {
- e = l.deleteVolumeById(k)
- if e != nil {
- return
+ l.volumesLock.Lock()
+ delVolsMap := l.unmountVolumeByCollection(collection)
+ l.volumesLock.Unlock()
+
+ l.ecVolumesLock.Lock()
+ delEcVolsMap := l.unmountEcVolumeByCollection(collection)
+ l.ecVolumesLock.Unlock()
+
+ errChain := make(chan error, 2)
+ var wg sync.WaitGroup
+ wg.Add(2)
+ go func() {
+ for _, v := range delVolsMap {
+ if err := v.Destroy(); err != nil {
+ errChain <- err
}
}
+ wg.Done()
+ }()
+
+ go func() {
+ for _, v := range delEcVolsMap {
+ v.Destroy()
+ }
+ wg.Done()
+ }()
+
+ go func() {
+ wg.Wait()
+ close(errChain)
+ }()
+
+ errBuilder := strings.Builder{}
+ for err := range errChain {
+ errBuilder.WriteString(err.Error())
+ errBuilder.WriteString("; ")
+ }
+ if errBuilder.Len() > 0 {
+ e = fmt.Errorf(errBuilder.String())
}
+
return
}
-func (l *DiskLocation) deleteVolumeById(vid VolumeId) (e error) {
+func (l *DiskLocation) deleteVolumeById(vid needle.VolumeId) (found bool, e error) {
v, ok := l.volumes[vid]
if !ok {
return
@@ -130,39 +182,33 @@ func (l *DiskLocation) deleteVolumeById(vid VolumeId) (e error) {
if e != nil {
return
}
+ found = true
delete(l.volumes, vid)
return
}
-func (l *DiskLocation) LoadVolume(vid VolumeId, needleMapKind NeedleMapType) bool {
- if dirs, err := ioutil.ReadDir(l.Directory); err == nil {
- for _, dir := range dirs {
- volId, _, err := l.volumeIdFromPath(dir)
- if vid == volId && err == nil {
- var mutex sync.RWMutex
- l.loadExistingVolume(dir, needleMapKind, &mutex)
- return true
- }
- }
+func (l *DiskLocation) LoadVolume(vid needle.VolumeId, needleMapKind NeedleMapType) bool {
+ if fileInfo, found := l.LocateVolume(vid); found {
+ return l.loadExistingVolume(fileInfo, needleMapKind)
}
-
return false
}
-func (l *DiskLocation) DeleteVolume(vid VolumeId) error {
- l.Lock()
- defer l.Unlock()
+func (l *DiskLocation) DeleteVolume(vid needle.VolumeId) error {
+ l.volumesLock.Lock()
+ defer l.volumesLock.Unlock()
_, ok := l.volumes[vid]
if !ok {
return fmt.Errorf("Volume not found, VolumeId: %d", vid)
}
- return l.deleteVolumeById(vid)
+ _, err := l.deleteVolumeById(vid)
+ return err
}
-func (l *DiskLocation) UnloadVolume(vid VolumeId) error {
- l.Lock()
- defer l.Unlock()
+func (l *DiskLocation) UnloadVolume(vid needle.VolumeId) error {
+ l.volumesLock.Lock()
+ defer l.volumesLock.Unlock()
v, ok := l.volumes[vid]
if !ok {
@@ -173,34 +219,102 @@ func (l *DiskLocation) UnloadVolume(vid VolumeId) error {
return nil
}
-func (l *DiskLocation) SetVolume(vid VolumeId, volume *Volume) {
- l.Lock()
- defer l.Unlock()
+func (l *DiskLocation) unmountVolumeByCollection(collectionName string) map[needle.VolumeId]*Volume {
+ deltaVols := make(map[needle.VolumeId]*Volume, 0)
+ for k, v := range l.volumes {
+ if v.Collection == collectionName && !v.isCompacting {
+ deltaVols[k] = v
+ }
+ }
+
+ for k := range deltaVols {
+ delete(l.volumes, k)
+ }
+ return deltaVols
+}
+
+func (l *DiskLocation) SetVolume(vid needle.VolumeId, volume *Volume) {
+ l.volumesLock.Lock()
+ defer l.volumesLock.Unlock()
l.volumes[vid] = volume
+ volume.location = l
}
-func (l *DiskLocation) FindVolume(vid VolumeId) (*Volume, bool) {
- l.RLock()
- defer l.RUnlock()
+func (l *DiskLocation) FindVolume(vid needle.VolumeId) (*Volume, bool) {
+ l.volumesLock.RLock()
+ defer l.volumesLock.RUnlock()
v, ok := l.volumes[vid]
return v, ok
}
func (l *DiskLocation) VolumesLen() int {
- l.RLock()
- defer l.RUnlock()
+ l.volumesLock.RLock()
+ defer l.volumesLock.RUnlock()
return len(l.volumes)
}
func (l *DiskLocation) Close() {
- l.Lock()
- defer l.Unlock()
-
+ l.volumesLock.Lock()
for _, v := range l.volumes {
v.Close()
}
+ l.volumesLock.Unlock()
+
+ l.ecVolumesLock.Lock()
+ for _, ecVolume := range l.ecVolumes {
+ ecVolume.Close()
+ }
+ l.ecVolumesLock.Unlock()
+
+ return
+}
+
+func (l *DiskLocation) LocateVolume(vid needle.VolumeId) (os.FileInfo, bool) {
+ if fileInfos, err := ioutil.ReadDir(l.Directory); err == nil {
+ for _, fileInfo := range fileInfos {
+ volId, _, err := l.volumeIdFromPath(fileInfo)
+ if vid == volId && err == nil {
+ return fileInfo, true
+ }
+ }
+ }
+
+ return nil, false
+}
+
+func (l *DiskLocation) UnUsedSpace(volumeSizeLimit uint64) (unUsedSpace uint64) {
+
+ l.volumesLock.RLock()
+ defer l.volumesLock.RUnlock()
+
+ for _, vol := range l.volumes {
+ if vol.IsReadOnly() {
+ continue
+ }
+ datSize, idxSize, _ := vol.FileStat()
+ unUsedSpace += volumeSizeLimit - (datSize + idxSize)
+ }
+
return
}
+
+func (l *DiskLocation) CheckDiskSpace() {
+ for {
+ if dir, e := filepath.Abs(l.Directory); e == nil {
+ s := stats.NewDiskStatus(dir)
+ if (s.PercentFree < l.MinFreeSpacePercent) != l.isDiskSpaceLow {
+ l.isDiskSpaceLow = !l.isDiskSpaceLow
+ }
+ if l.isDiskSpaceLow {
+ glog.V(0).Infof("dir %s freePercent %.2f%% < min %.2f%%, isLowDiskSpace: %v", dir, s.PercentFree, l.MinFreeSpacePercent, l.isDiskSpaceLow)
+ } else {
+ glog.V(4).Infof("dir %s freePercent %.2f%% < min %.2f%%, isLowDiskSpace: %v", dir, s.PercentFree, l.MinFreeSpacePercent, l.isDiskSpaceLow)
+ }
+ }
+ time.Sleep(time.Minute)
+ }
+
+}
diff --git a/weed/storage/disk_location_ec.go b/weed/storage/disk_location_ec.go
new file mode 100644
index 000000000..72d3e2b3e
--- /dev/null
+++ b/weed/storage/disk_location_ec.go
@@ -0,0 +1,192 @@
+package storage
+
+import (
+ "fmt"
+ "io/ioutil"
+ "path"
+ "regexp"
+ "sort"
+ "strconv"
+
+ "github.com/chrislusf/seaweedfs/weed/storage/erasure_coding"
+ "github.com/chrislusf/seaweedfs/weed/storage/needle"
+)
+
+var (
+ re = regexp.MustCompile("\\.ec[0-9][0-9]")
+)
+
+func (l *DiskLocation) FindEcVolume(vid needle.VolumeId) (*erasure_coding.EcVolume, bool) {
+ l.ecVolumesLock.RLock()
+ defer l.ecVolumesLock.RUnlock()
+
+ ecVolume, ok := l.ecVolumes[vid]
+ if ok {
+ return ecVolume, true
+ }
+ return nil, false
+}
+
+func (l *DiskLocation) DestroyEcVolume(vid needle.VolumeId) {
+ l.ecVolumesLock.Lock()
+ defer l.ecVolumesLock.Unlock()
+
+ ecVolume, found := l.ecVolumes[vid]
+ if found {
+ ecVolume.Destroy()
+ delete(l.ecVolumes, vid)
+ }
+}
+
+func (l *DiskLocation) FindEcShard(vid needle.VolumeId, shardId erasure_coding.ShardId) (*erasure_coding.EcVolumeShard, bool) {
+ l.ecVolumesLock.RLock()
+ defer l.ecVolumesLock.RUnlock()
+
+ ecVolume, ok := l.ecVolumes[vid]
+ if !ok {
+ return nil, false
+ }
+ for _, ecShard := range ecVolume.Shards {
+ if ecShard.ShardId == shardId {
+ return ecShard, true
+ }
+ }
+ return nil, false
+}
+
+func (l *DiskLocation) LoadEcShard(collection string, vid needle.VolumeId, shardId erasure_coding.ShardId) (err error) {
+
+ ecVolumeShard, err := erasure_coding.NewEcVolumeShard(l.Directory, collection, vid, shardId)
+ if err != nil {
+ return fmt.Errorf("failed to create ec shard %d.%d: %v", vid, shardId, err)
+ }
+ l.ecVolumesLock.Lock()
+ defer l.ecVolumesLock.Unlock()
+ ecVolume, found := l.ecVolumes[vid]
+ if !found {
+ ecVolume, err = erasure_coding.NewEcVolume(l.Directory, collection, vid)
+ if err != nil {
+ return fmt.Errorf("failed to create ec volume %d: %v", vid, err)
+ }
+ l.ecVolumes[vid] = ecVolume
+ }
+ ecVolume.AddEcVolumeShard(ecVolumeShard)
+
+ return nil
+}
+
+func (l *DiskLocation) UnloadEcShard(vid needle.VolumeId, shardId erasure_coding.ShardId) bool {
+
+ l.ecVolumesLock.Lock()
+ defer l.ecVolumesLock.Unlock()
+
+ ecVolume, found := l.ecVolumes[vid]
+ if !found {
+ return false
+ }
+ if _, deleted := ecVolume.DeleteEcVolumeShard(shardId); deleted {
+ if len(ecVolume.Shards) == 0 {
+ delete(l.ecVolumes, vid)
+ ecVolume.Close()
+ }
+ return true
+ }
+
+ return true
+}
+
+func (l *DiskLocation) loadEcShards(shards []string, collection string, vid needle.VolumeId) (err error) {
+
+ for _, shard := range shards {
+ shardId, err := strconv.ParseInt(path.Ext(shard)[3:], 10, 64)
+ if err != nil {
+ return fmt.Errorf("failed to parse ec shard name %v: %v", shard, err)
+ }
+
+ err = l.LoadEcShard(collection, vid, erasure_coding.ShardId(shardId))
+ if err != nil {
+ return fmt.Errorf("failed to load ec shard %v: %v", shard, err)
+ }
+ }
+
+ return nil
+}
+
+func (l *DiskLocation) loadAllEcShards() (err error) {
+
+ fileInfos, err := ioutil.ReadDir(l.Directory)
+ if err != nil {
+ return fmt.Errorf("load all ec shards in dir %s: %v", l.Directory, err)
+ }
+
+ sort.Slice(fileInfos, func(i, j int) bool {
+ return fileInfos[i].Name() < fileInfos[j].Name()
+ })
+
+ var sameVolumeShards []string
+ var prevVolumeId needle.VolumeId
+ for _, fileInfo := range fileInfos {
+ if fileInfo.IsDir() {
+ continue
+ }
+ ext := path.Ext(fileInfo.Name())
+ name := fileInfo.Name()
+ baseName := name[:len(name)-len(ext)]
+
+ collection, volumeId, err := parseCollectionVolumeId(baseName)
+ if err != nil {
+ continue
+ }
+
+ if re.MatchString(ext) {
+ if prevVolumeId == 0 || volumeId == prevVolumeId {
+ sameVolumeShards = append(sameVolumeShards, fileInfo.Name())
+ } else {
+ sameVolumeShards = []string{fileInfo.Name()}
+ }
+ prevVolumeId = volumeId
+ continue
+ }
+
+ if ext == ".ecx" && volumeId == prevVolumeId {
+ if err = l.loadEcShards(sameVolumeShards, collection, volumeId); err != nil {
+ return fmt.Errorf("loadEcShards collection:%v volumeId:%d : %v", collection, volumeId, err)
+ }
+ prevVolumeId = volumeId
+ continue
+ }
+
+ }
+ return nil
+}
+
+func (l *DiskLocation) deleteEcVolumeById(vid needle.VolumeId) (e error) {
+ ecVolume, ok := l.ecVolumes[vid]
+ if !ok {
+ return
+ }
+ ecVolume.Destroy()
+ delete(l.ecVolumes, vid)
+ return
+}
+
+func (l *DiskLocation) unmountEcVolumeByCollection(collectionName string) map[needle.VolumeId]*erasure_coding.EcVolume {
+ deltaVols := make(map[needle.VolumeId]*erasure_coding.EcVolume, 0)
+ for k, v := range l.ecVolumes {
+ if v.Collection == collectionName {
+ deltaVols[k] = v
+ }
+ }
+
+ for k, _ := range deltaVols {
+ delete(l.ecVolumes, k)
+ }
+ return deltaVols
+}
+
+func (l *DiskLocation) EcVolumesLen() int {
+ l.ecVolumesLock.RLock()
+ defer l.ecVolumesLock.RUnlock()
+
+ return len(l.ecVolumes)
+}
diff --git a/weed/storage/erasure_coding/1.dat b/weed/storage/erasure_coding/1.dat
new file mode 100644
index 000000000..869427926
Binary files /dev/null and b/weed/storage/erasure_coding/1.dat differ
diff --git a/weed/storage/erasure_coding/1.idx b/weed/storage/erasure_coding/1.idx
new file mode 100644
index 000000000..65a950e64
Binary files /dev/null and b/weed/storage/erasure_coding/1.idx differ
diff --git a/weed/storage/erasure_coding/389.ecx b/weed/storage/erasure_coding/389.ecx
new file mode 100644
index 000000000..158781920
Binary files /dev/null and b/weed/storage/erasure_coding/389.ecx differ
diff --git a/weed/storage/erasure_coding/ec_decoder.go b/weed/storage/erasure_coding/ec_decoder.go
new file mode 100644
index 000000000..7b42d02e7
--- /dev/null
+++ b/weed/storage/erasure_coding/ec_decoder.go
@@ -0,0 +1,202 @@
+package erasure_coding
+
+import (
+ "fmt"
+ "io"
+ "os"
+
+ "github.com/chrislusf/seaweedfs/weed/storage/backend"
+ "github.com/chrislusf/seaweedfs/weed/storage/idx"
+ "github.com/chrislusf/seaweedfs/weed/storage/needle"
+ "github.com/chrislusf/seaweedfs/weed/storage/needle_map"
+ "github.com/chrislusf/seaweedfs/weed/storage/super_block"
+ "github.com/chrislusf/seaweedfs/weed/storage/types"
+ "github.com/chrislusf/seaweedfs/weed/util"
+)
+
+// write .idx file from .ecx and .ecj files
+func WriteIdxFileFromEcIndex(baseFileName string) (err error) {
+
+ ecxFile, openErr := os.OpenFile(baseFileName+".ecx", os.O_RDONLY, 0644)
+ if openErr != nil {
+ return fmt.Errorf("cannot open ec index %s.ecx: %v", baseFileName, openErr)
+ }
+ defer ecxFile.Close()
+
+ idxFile, openErr := os.OpenFile(baseFileName+".idx", os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0644)
+ if openErr != nil {
+ return fmt.Errorf("cannot open %s.idx: %v", baseFileName, openErr)
+ }
+ defer idxFile.Close()
+
+ io.Copy(idxFile, ecxFile)
+
+ err = iterateEcjFile(baseFileName, func(key types.NeedleId) error {
+
+ bytes := needle_map.ToBytes(key, types.Offset{}, types.TombstoneFileSize)
+ idxFile.Write(bytes)
+
+ return nil
+ })
+
+ return err
+}
+
+// FindDatFileSize calculate .dat file size from max offset entry
+// there may be extra deletions after that entry
+// but they are deletions anyway
+func FindDatFileSize(baseFileName string) (datSize int64, err error) {
+
+ version, err := readEcVolumeVersion(baseFileName)
+ if err != nil {
+ return 0, fmt.Errorf("read ec volume %s version: %v", baseFileName, err)
+ }
+
+ err = iterateEcxFile(baseFileName, func(key types.NeedleId, offset types.Offset, size uint32) error {
+
+ if size == types.TombstoneFileSize {
+ return nil
+ }
+
+ entryStopOffset := offset.ToAcutalOffset() + needle.GetActualSize(size, version)
+ if datSize < entryStopOffset {
+ datSize = entryStopOffset
+ }
+
+ return nil
+ })
+
+ return
+}
+
+func readEcVolumeVersion(baseFileName string) (version needle.Version, err error) {
+
+ // find volume version
+ datFile, err := os.OpenFile(baseFileName+".ec00", os.O_RDONLY, 0644)
+ if err != nil {
+ return 0, fmt.Errorf("open ec volume %s superblock: %v", baseFileName, err)
+ }
+ datBackend := backend.NewDiskFile(datFile)
+
+ superBlock, err := super_block.ReadSuperBlock(datBackend)
+ datBackend.Close()
+ if err != nil {
+ return 0, fmt.Errorf("read ec volume %s superblock: %v", baseFileName, err)
+ }
+
+ return superBlock.Version, nil
+
+}
+
+func iterateEcxFile(baseFileName string, processNeedleFn func(key types.NeedleId, offset types.Offset, size uint32) error) error {
+ ecxFile, openErr := os.OpenFile(baseFileName+".ecx", os.O_RDONLY, 0644)
+ if openErr != nil {
+ return fmt.Errorf("cannot open ec index %s.ecx: %v", baseFileName, openErr)
+ }
+ defer ecxFile.Close()
+
+ buf := make([]byte, types.NeedleMapEntrySize)
+ for {
+ n, err := ecxFile.Read(buf)
+ if n != types.NeedleMapEntrySize {
+ if err == io.EOF {
+ return nil
+ }
+ return err
+ }
+ key, offset, size := idx.IdxFileEntry(buf)
+ if processNeedleFn != nil {
+ err = processNeedleFn(key, offset, size)
+ }
+ if err != nil {
+ if err != io.EOF {
+ return err
+ }
+ return nil
+ }
+ }
+
+}
+
+func iterateEcjFile(baseFileName string, processNeedleFn func(key types.NeedleId) error) error {
+ if !util.FileExists(baseFileName+".ecj") {
+ return nil
+ }
+ ecjFile, openErr := os.OpenFile(baseFileName+".ecj", os.O_RDONLY, 0644)
+ if openErr != nil {
+ return fmt.Errorf("cannot open ec index %s.ecj: %v", baseFileName, openErr)
+ }
+ defer ecjFile.Close()
+
+ buf := make([]byte, types.NeedleIdSize)
+ for {
+ n, err := ecjFile.Read(buf)
+ if n != types.NeedleIdSize {
+ if err == io.EOF {
+ return nil
+ }
+ return err
+ }
+ if processNeedleFn != nil {
+ err = processNeedleFn(types.BytesToNeedleId(buf))
+ }
+ if err != nil {
+ if err == io.EOF {
+ return nil
+ }
+ return err
+ }
+ }
+
+}
+
+// WriteDatFile generates .dat from from .ec00 ~ .ec09 files
+func WriteDatFile(baseFileName string, datFileSize int64) error {
+
+ datFile, openErr := os.OpenFile(baseFileName+".dat", os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0644)
+ if openErr != nil {
+ return fmt.Errorf("cannot write volume %s.dat: %v", baseFileName, openErr)
+ }
+ defer datFile.Close()
+
+ inputFiles := make([]*os.File, DataShardsCount)
+
+ for shardId := 0; shardId < DataShardsCount; shardId++ {
+ shardFileName := baseFileName + ToExt(shardId)
+ inputFiles[shardId], openErr = os.OpenFile(shardFileName, os.O_RDONLY, 0)
+ if openErr != nil {
+ return openErr
+ }
+ defer inputFiles[shardId].Close()
+ }
+
+ for datFileSize >= DataShardsCount*ErasureCodingLargeBlockSize {
+ for shardId := 0; shardId < DataShardsCount; shardId++ {
+ w, err := io.CopyN(datFile, inputFiles[shardId], ErasureCodingLargeBlockSize)
+ if w != ErasureCodingLargeBlockSize {
+ return fmt.Errorf("copy %s large block %d: %v", baseFileName, shardId, err)
+ }
+ datFileSize -= ErasureCodingLargeBlockSize
+ }
+ }
+
+ for datFileSize > 0 {
+ for shardId := 0; shardId < DataShardsCount; shardId++ {
+ toRead := min(datFileSize, ErasureCodingSmallBlockSize)
+ w, err := io.CopyN(datFile, inputFiles[shardId], toRead)
+ if w != toRead {
+ return fmt.Errorf("copy %s small block %d: %v", baseFileName, shardId, err)
+ }
+ datFileSize -= toRead
+ }
+ }
+
+ return nil
+}
+
+func min(x, y int64) int64 {
+ if x > y {
+ return y
+ }
+ return x
+}
diff --git a/weed/storage/erasure_coding/ec_encoder.go b/weed/storage/erasure_coding/ec_encoder.go
new file mode 100644
index 000000000..5f0f20284
--- /dev/null
+++ b/weed/storage/erasure_coding/ec_encoder.go
@@ -0,0 +1,306 @@
+package erasure_coding
+
+import (
+ "fmt"
+ "io"
+ "os"
+
+ "github.com/klauspost/reedsolomon"
+
+ "github.com/chrislusf/seaweedfs/weed/glog"
+ "github.com/chrislusf/seaweedfs/weed/storage/idx"
+ "github.com/chrislusf/seaweedfs/weed/storage/needle_map"
+ "github.com/chrislusf/seaweedfs/weed/storage/types"
+ "github.com/chrislusf/seaweedfs/weed/util"
+)
+
+const (
+ DataShardsCount = 10
+ ParityShardsCount = 4
+ TotalShardsCount = DataShardsCount + ParityShardsCount
+ ErasureCodingLargeBlockSize = 1024 * 1024 * 1024 // 1GB
+ ErasureCodingSmallBlockSize = 1024 * 1024 // 1MB
+)
+
+// WriteSortedFileFromIdx generates .ecx file from existing .idx file
+// all keys are sorted in ascending order
+func WriteSortedFileFromIdx(baseFileName string, ext string) (e error) {
+
+ nm, err := readNeedleMap(baseFileName)
+ if nm != nil {
+ defer nm.Close()
+ }
+ if err != nil {
+ return fmt.Errorf("readNeedleMap: %v", err)
+ }
+
+ ecxFile, err := os.OpenFile(baseFileName+ext, os.O_TRUNC|os.O_CREATE|os.O_WRONLY, 0644)
+ if err != nil {
+ return fmt.Errorf("failed to open ecx file: %v", err)
+ }
+ defer ecxFile.Close()
+
+ err = nm.AscendingVisit(func(value needle_map.NeedleValue) error {
+ bytes := value.ToBytes()
+ _, writeErr := ecxFile.Write(bytes)
+ return writeErr
+ })
+
+ if err != nil {
+ return fmt.Errorf("failed to visit idx file: %v", err)
+ }
+
+ return nil
+}
+
+// WriteEcFiles generates .ec00 ~ .ec13 files
+func WriteEcFiles(baseFileName string) error {
+ return generateEcFiles(baseFileName, 256*1024, ErasureCodingLargeBlockSize, ErasureCodingSmallBlockSize)
+}
+
+func RebuildEcFiles(baseFileName string) ([]uint32, error) {
+ return generateMissingEcFiles(baseFileName, 256*1024, ErasureCodingLargeBlockSize, ErasureCodingSmallBlockSize)
+}
+
+func ToExt(ecIndex int) string {
+ return fmt.Sprintf(".ec%02d", ecIndex)
+}
+
+func generateEcFiles(baseFileName string, bufferSize int, largeBlockSize int64, smallBlockSize int64) error {
+ file, err := os.OpenFile(baseFileName+".dat", os.O_RDONLY, 0)
+ if err != nil {
+ return fmt.Errorf("failed to open dat file: %v", err)
+ }
+ defer file.Close()
+
+ fi, err := file.Stat()
+ if err != nil {
+ return fmt.Errorf("failed to stat dat file: %v", err)
+ }
+
+ glog.V(0).Infof("encodeDatFile %s.dat size:%d", baseFileName, fi.Size())
+ err = encodeDatFile(fi.Size(), err, baseFileName, bufferSize, largeBlockSize, file, smallBlockSize)
+ if err != nil {
+ return fmt.Errorf("encodeDatFile: %v", err)
+ }
+ return nil
+}
+
+func generateMissingEcFiles(baseFileName string, bufferSize int, largeBlockSize int64, smallBlockSize int64) (generatedShardIds []uint32, err error) {
+
+ shardHasData := make([]bool, TotalShardsCount)
+ inputFiles := make([]*os.File, TotalShardsCount)
+ outputFiles := make([]*os.File, TotalShardsCount)
+ for shardId := 0; shardId < TotalShardsCount; shardId++ {
+ shardFileName := baseFileName + ToExt(shardId)
+ if util.FileExists(shardFileName) {
+ shardHasData[shardId] = true
+ inputFiles[shardId], err = os.OpenFile(shardFileName, os.O_RDONLY, 0)
+ if err != nil {
+ return nil, err
+ }
+ defer inputFiles[shardId].Close()
+ } else {
+ outputFiles[shardId], err = os.OpenFile(shardFileName, os.O_TRUNC|os.O_WRONLY|os.O_CREATE, 0644)
+ if err != nil {
+ return nil, err
+ }
+ defer outputFiles[shardId].Close()
+ generatedShardIds = append(generatedShardIds, uint32(shardId))
+ }
+ }
+
+ err = rebuildEcFiles(shardHasData, inputFiles, outputFiles)
+ if err != nil {
+ return nil, fmt.Errorf("rebuildEcFiles: %v", err)
+ }
+ return
+}
+
+func encodeData(file *os.File, enc reedsolomon.Encoder, startOffset, blockSize int64, buffers [][]byte, outputs []*os.File) error {
+
+ bufferSize := int64(len(buffers[0]))
+ batchCount := blockSize / bufferSize
+ if blockSize%bufferSize != 0 {
+ glog.Fatalf("unexpected block size %d buffer size %d", blockSize, bufferSize)
+ }
+
+ for b := int64(0); b < batchCount; b++ {
+ err := encodeDataOneBatch(file, enc, startOffset+b*bufferSize, blockSize, buffers, outputs)
+ if err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+func openEcFiles(baseFileName string, forRead bool) (files []*os.File, err error) {
+ for i := 0; i < TotalShardsCount; i++ {
+ fname := baseFileName + ToExt(i)
+ openOption := os.O_TRUNC | os.O_CREATE | os.O_WRONLY
+ if forRead {
+ openOption = os.O_RDONLY
+ }
+ f, err := os.OpenFile(fname, openOption, 0644)
+ if err != nil {
+ return files, fmt.Errorf("failed to open file %s: %v", fname, err)
+ }
+ files = append(files, f)
+ }
+ return
+}
+
+func closeEcFiles(files []*os.File) {
+ for _, f := range files {
+ if f != nil {
+ f.Close()
+ }
+ }
+}
+
+func encodeDataOneBatch(file *os.File, enc reedsolomon.Encoder, startOffset, blockSize int64, buffers [][]byte, outputs []*os.File) error {
+
+ // read data into buffers
+ for i := 0; i < DataShardsCount; i++ {
+ n, err := file.ReadAt(buffers[i], startOffset+blockSize*int64(i))
+ if err != nil {
+ if err != io.EOF {
+ return err
+ }
+ }
+ if n < len(buffers[i]) {
+ for t := len(buffers[i]) - 1; t >= n; t-- {
+ buffers[i][t] = 0
+ }
+ }
+ }
+
+ err := enc.Encode(buffers)
+ if err != nil {
+ return err
+ }
+
+ for i := 0; i < TotalShardsCount; i++ {
+ _, err := outputs[i].Write(buffers[i])
+ if err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+func encodeDatFile(remainingSize int64, err error, baseFileName string, bufferSize int, largeBlockSize int64, file *os.File, smallBlockSize int64) error {
+
+ var processedSize int64
+
+ enc, err := reedsolomon.New(DataShardsCount, ParityShardsCount)
+ if err != nil {
+ return fmt.Errorf("failed to create encoder: %v", err)
+ }
+
+ buffers := make([][]byte, TotalShardsCount)
+ for i := range buffers {
+ buffers[i] = make([]byte, bufferSize)
+ }
+
+ outputs, err := openEcFiles(baseFileName, false)
+ defer closeEcFiles(outputs)
+ if err != nil {
+ return fmt.Errorf("failed to open ec files %s: %v", baseFileName, err)
+ }
+
+ for remainingSize > largeBlockSize*DataShardsCount {
+ err = encodeData(file, enc, processedSize, largeBlockSize, buffers, outputs)
+ if err != nil {
+ return fmt.Errorf("failed to encode large chunk data: %v", err)
+ }
+ remainingSize -= largeBlockSize * DataShardsCount
+ processedSize += largeBlockSize * DataShardsCount
+ }
+ for remainingSize > 0 {
+ encodeData(file, enc, processedSize, smallBlockSize, buffers, outputs)
+ if err != nil {
+ return fmt.Errorf("failed to encode small chunk data: %v", err)
+ }
+ remainingSize -= smallBlockSize * DataShardsCount
+ processedSize += smallBlockSize * DataShardsCount
+ }
+ return nil
+}
+
+func rebuildEcFiles(shardHasData []bool, inputFiles []*os.File, outputFiles []*os.File) error {
+
+ enc, err := reedsolomon.New(DataShardsCount, ParityShardsCount)
+ if err != nil {
+ return fmt.Errorf("failed to create encoder: %v", err)
+ }
+
+ buffers := make([][]byte, TotalShardsCount)
+ for i := range buffers {
+ if shardHasData[i] {
+ buffers[i] = make([]byte, ErasureCodingSmallBlockSize)
+ }
+ }
+
+ var startOffset int64
+ var inputBufferDataSize int
+ for {
+
+ // read the input data from files
+ for i := 0; i < TotalShardsCount; i++ {
+ if shardHasData[i] {
+ n, _ := inputFiles[i].ReadAt(buffers[i], startOffset)
+ if n == 0 {
+ return nil
+ }
+ if inputBufferDataSize == 0 {
+ inputBufferDataSize = n
+ }
+ if inputBufferDataSize != n {
+ return fmt.Errorf("ec shard size expected %d actual %d", inputBufferDataSize, n)
+ }
+ } else {
+ buffers[i] = nil
+ }
+ }
+
+ // encode the data
+ err = enc.Reconstruct(buffers)
+ if err != nil {
+ return fmt.Errorf("reconstruct: %v", err)
+ }
+
+ // write the data to output files
+ for i := 0; i < TotalShardsCount; i++ {
+ if !shardHasData[i] {
+ n, _ := outputFiles[i].WriteAt(buffers[i][:inputBufferDataSize], startOffset)
+ if inputBufferDataSize != n {
+ return fmt.Errorf("fail to write to %s", outputFiles[i].Name())
+ }
+ }
+ }
+ startOffset += int64(inputBufferDataSize)
+ }
+
+}
+
+func readNeedleMap(baseFileName string) (*needle_map.MemDb, error) {
+ indexFile, err := os.OpenFile(baseFileName+".idx", os.O_RDONLY, 0644)
+ if err != nil {
+ return nil, fmt.Errorf("cannot read Volume Index %s.idx: %v", baseFileName, err)
+ }
+ defer indexFile.Close()
+
+ cm := needle_map.NewMemDb()
+ err = idx.WalkIndexFile(indexFile, func(key types.NeedleId, offset types.Offset, size uint32) error {
+ if !offset.IsZero() && size != types.TombstoneFileSize {
+ cm.Set(key, offset, size)
+ } else {
+ cm.Delete(key)
+ }
+ return nil
+ })
+ return cm, err
+}
diff --git a/weed/storage/erasure_coding/ec_locate.go b/weed/storage/erasure_coding/ec_locate.go
new file mode 100644
index 000000000..562966f8f
--- /dev/null
+++ b/weed/storage/erasure_coding/ec_locate.go
@@ -0,0 +1,83 @@
+package erasure_coding
+
+type Interval struct {
+ BlockIndex int
+ InnerBlockOffset int64
+ Size uint32
+ IsLargeBlock bool
+ LargeBlockRowsCount int
+}
+
+func LocateData(largeBlockLength, smallBlockLength int64, datSize int64, offset int64, size uint32) (intervals []Interval) {
+ blockIndex, isLargeBlock, innerBlockOffset := locateOffset(largeBlockLength, smallBlockLength, datSize, offset)
+
+ // adding DataShardsCount*smallBlockLength to ensure we can derive the number of large block size from a shard size
+ nLargeBlockRows := int((datSize + DataShardsCount*smallBlockLength) / (largeBlockLength * DataShardsCount))
+
+ for size > 0 {
+ interval := Interval{
+ BlockIndex: blockIndex,
+ InnerBlockOffset: innerBlockOffset,
+ IsLargeBlock: isLargeBlock,
+ LargeBlockRowsCount: nLargeBlockRows,
+ }
+
+ blockRemaining := largeBlockLength - innerBlockOffset
+ if !isLargeBlock {
+ blockRemaining = smallBlockLength - innerBlockOffset
+ }
+
+ if int64(size) <= blockRemaining {
+ interval.Size = size
+ intervals = append(intervals, interval)
+ return
+ }
+ interval.Size = uint32(blockRemaining)
+ intervals = append(intervals, interval)
+
+ size -= interval.Size
+ blockIndex += 1
+ if isLargeBlock && blockIndex == nLargeBlockRows*DataShardsCount {
+ isLargeBlock = false
+ blockIndex = 0
+ }
+ innerBlockOffset = 0
+
+ }
+ return
+}
+
+func locateOffset(largeBlockLength, smallBlockLength int64, datSize int64, offset int64) (blockIndex int, isLargeBlock bool, innerBlockOffset int64) {
+ largeRowSize := largeBlockLength * DataShardsCount
+ nLargeBlockRows := datSize / (largeBlockLength * DataShardsCount)
+
+ // if offset is within the large block area
+ if offset < nLargeBlockRows*largeRowSize {
+ isLargeBlock = true
+ blockIndex, innerBlockOffset = locateOffsetWithinBlocks(largeBlockLength, offset)
+ return
+ }
+
+ isLargeBlock = false
+ offset -= nLargeBlockRows * largeRowSize
+ blockIndex, innerBlockOffset = locateOffsetWithinBlocks(smallBlockLength, offset)
+ return
+}
+
+func locateOffsetWithinBlocks(blockLength int64, offset int64) (blockIndex int, innerBlockOffset int64) {
+ blockIndex = int(offset / blockLength)
+ innerBlockOffset = offset % blockLength
+ return
+}
+
+func (interval Interval) ToShardIdAndOffset(largeBlockSize, smallBlockSize int64) (ShardId, int64) {
+ ecFileOffset := interval.InnerBlockOffset
+ rowIndex := interval.BlockIndex / DataShardsCount
+ if interval.IsLargeBlock {
+ ecFileOffset += int64(rowIndex) * largeBlockSize
+ } else {
+ ecFileOffset += int64(interval.LargeBlockRowsCount)*largeBlockSize + int64(rowIndex)*smallBlockSize
+ }
+ ecFileIndex := interval.BlockIndex % DataShardsCount
+ return ShardId(ecFileIndex), ecFileOffset
+}
diff --git a/weed/storage/erasure_coding/ec_shard.go b/weed/storage/erasure_coding/ec_shard.go
new file mode 100644
index 000000000..47e6d3d1e
--- /dev/null
+++ b/weed/storage/erasure_coding/ec_shard.go
@@ -0,0 +1,91 @@
+package erasure_coding
+
+import (
+ "fmt"
+ "os"
+ "path"
+ "strconv"
+
+ "github.com/chrislusf/seaweedfs/weed/stats"
+ "github.com/chrislusf/seaweedfs/weed/storage/needle"
+)
+
+type ShardId uint8
+
+type EcVolumeShard struct {
+ VolumeId needle.VolumeId
+ ShardId ShardId
+ Collection string
+ dir string
+ ecdFile *os.File
+ ecdFileSize int64
+}
+
+func NewEcVolumeShard(dirname string, collection string, id needle.VolumeId, shardId ShardId) (v *EcVolumeShard, e error) {
+
+ v = &EcVolumeShard{dir: dirname, Collection: collection, VolumeId: id, ShardId: shardId}
+
+ baseFileName := v.FileName()
+
+ // open ecd file
+ if v.ecdFile, e = os.OpenFile(baseFileName+ToExt(int(shardId)), os.O_RDONLY, 0644); e != nil {
+ return nil, fmt.Errorf("cannot read ec volume shard %s.%s: %v", baseFileName, ToExt(int(shardId)), e)
+ }
+ ecdFi, statErr := v.ecdFile.Stat()
+ if statErr != nil {
+ return nil, fmt.Errorf("can not stat ec volume shard %s.%s: %v", baseFileName, ToExt(int(shardId)), statErr)
+ }
+ v.ecdFileSize = ecdFi.Size()
+
+ stats.VolumeServerVolumeCounter.WithLabelValues(v.Collection, "ec_shards").Inc()
+
+ return
+}
+
+func (shard *EcVolumeShard) Size() int64 {
+ return shard.ecdFileSize
+}
+
+func (shard *EcVolumeShard) String() string {
+ return fmt.Sprintf("ec shard %v:%v, dir:%s, Collection:%s", shard.VolumeId, shard.ShardId, shard.dir, shard.Collection)
+}
+
+func (shard *EcVolumeShard) FileName() (fileName string) {
+ return EcShardFileName(shard.Collection, shard.dir, int(shard.VolumeId))
+}
+
+func EcShardFileName(collection string, dir string, id int) (fileName string) {
+ idString := strconv.Itoa(id)
+ if collection == "" {
+ fileName = path.Join(dir, idString)
+ } else {
+ fileName = path.Join(dir, collection+"_"+idString)
+ }
+ return
+}
+
+func EcShardBaseFileName(collection string, id int) (baseFileName string) {
+ baseFileName = strconv.Itoa(id)
+ if collection != "" {
+ baseFileName = collection + "_" + baseFileName
+ }
+ return
+}
+
+func (shard *EcVolumeShard) Close() {
+ if shard.ecdFile != nil {
+ _ = shard.ecdFile.Close()
+ shard.ecdFile = nil
+ }
+}
+
+func (shard *EcVolumeShard) Destroy() {
+ os.Remove(shard.FileName() + ToExt(int(shard.ShardId)))
+ stats.VolumeServerVolumeCounter.WithLabelValues(shard.Collection, "ec_shards").Dec()
+}
+
+func (shard *EcVolumeShard) ReadAt(buf []byte, offset int64) (int, error) {
+
+ return shard.ecdFile.ReadAt(buf, offset)
+
+}
diff --git a/weed/storage/erasure_coding/ec_test.go b/weed/storage/erasure_coding/ec_test.go
new file mode 100644
index 000000000..92b83cdc8
--- /dev/null
+++ b/weed/storage/erasure_coding/ec_test.go
@@ -0,0 +1,207 @@
+package erasure_coding
+
+import (
+ "bytes"
+ "fmt"
+ "math/rand"
+ "os"
+ "testing"
+
+ "github.com/klauspost/reedsolomon"
+
+ "github.com/chrislusf/seaweedfs/weed/storage/needle_map"
+ "github.com/chrislusf/seaweedfs/weed/storage/types"
+)
+
+const (
+ largeBlockSize = 10000
+ smallBlockSize = 100
+)
+
+func TestEncodingDecoding(t *testing.T) {
+ bufferSize := 50
+ baseFileName := "1"
+
+ err := generateEcFiles(baseFileName, bufferSize, largeBlockSize, smallBlockSize)
+ if err != nil {
+ t.Logf("generateEcFiles: %v", err)
+ }
+
+ err = WriteSortedFileFromIdx(baseFileName, ".ecx")
+ if err != nil {
+ t.Logf("WriteSortedFileFromIdx: %v", err)
+ }
+
+ err = validateFiles(baseFileName)
+ if err != nil {
+ t.Logf("WriteSortedFileFromIdx: %v", err)
+ }
+
+ removeGeneratedFiles(baseFileName)
+
+}
+
+func validateFiles(baseFileName string) error {
+ nm, err := readNeedleMap(baseFileName)
+ defer nm.Close()
+ if err != nil {
+ return fmt.Errorf("readNeedleMap: %v", err)
+ }
+
+ datFile, err := os.OpenFile(baseFileName+".dat", os.O_RDONLY, 0)
+ if err != nil {
+ return fmt.Errorf("failed to open dat file: %v", err)
+ }
+ defer datFile.Close()
+
+ fi, err := datFile.Stat()
+ if err != nil {
+ return fmt.Errorf("failed to stat dat file: %v", err)
+ }
+
+ ecFiles, err := openEcFiles(baseFileName, true)
+ defer closeEcFiles(ecFiles)
+
+ err = nm.AscendingVisit(func(value needle_map.NeedleValue) error {
+ return assertSame(datFile, fi.Size(), ecFiles, value.Offset, value.Size)
+ })
+ if err != nil {
+ return fmt.Errorf("failed to check ec files: %v", err)
+ }
+ return nil
+}
+
+func assertSame(datFile *os.File, datSize int64, ecFiles []*os.File, offset types.Offset, size uint32) error {
+
+ data, err := readDatFile(datFile, offset, size)
+ if err != nil {
+ return fmt.Errorf("failed to read dat file: %v", err)
+ }
+
+ ecData, err := readEcFile(datSize, ecFiles, offset, size)
+ if err != nil {
+ return fmt.Errorf("failed to read ec file: %v", err)
+ }
+
+ if bytes.Compare(data, ecData) != 0 {
+ return fmt.Errorf("unexpected data read")
+ }
+
+ return nil
+}
+
+func readDatFile(datFile *os.File, offset types.Offset, size uint32) ([]byte, error) {
+
+ data := make([]byte, size)
+ n, err := datFile.ReadAt(data, offset.ToAcutalOffset())
+ if err != nil {
+ return nil, fmt.Errorf("failed to ReadAt dat file: %v", err)
+ }
+ if n != int(size) {
+ return nil, fmt.Errorf("unexpected read size %d, expected %d", n, size)
+ }
+ return data, nil
+}
+
+func readEcFile(datSize int64, ecFiles []*os.File, offset types.Offset, size uint32) (data []byte, err error) {
+
+ intervals := LocateData(largeBlockSize, smallBlockSize, datSize, offset.ToAcutalOffset(), size)
+
+ for i, interval := range intervals {
+ if d, e := readOneInterval(interval, ecFiles); e != nil {
+ return nil, e
+ } else {
+ if i == 0 {
+ data = d
+ } else {
+ data = append(data, d...)
+ }
+ }
+ }
+
+ return data, nil
+}
+
+func readOneInterval(interval Interval, ecFiles []*os.File) (data []byte, err error) {
+
+ ecFileIndex, ecFileOffset := interval.ToShardIdAndOffset(largeBlockSize, smallBlockSize)
+
+ data = make([]byte, interval.Size)
+ err = readFromFile(ecFiles[ecFileIndex], data, ecFileOffset)
+ { // do some ec testing
+ ecData, err := readFromOtherEcFiles(ecFiles, int(ecFileIndex), ecFileOffset, interval.Size)
+ if err != nil {
+ return nil, fmt.Errorf("ec reconstruct error: %v", err)
+ }
+ if bytes.Compare(data, ecData) != 0 {
+ return nil, fmt.Errorf("ec compare error")
+ }
+ }
+ return
+}
+
+func readFromOtherEcFiles(ecFiles []*os.File, ecFileIndex int, ecFileOffset int64, size uint32) (data []byte, err error) {
+ enc, err := reedsolomon.New(DataShardsCount, ParityShardsCount)
+ if err != nil {
+ return nil, fmt.Errorf("failed to create encoder: %v", err)
+ }
+
+ bufs := make([][]byte, TotalShardsCount)
+ for i := 0; i < DataShardsCount; {
+ n := int(rand.Int31n(TotalShardsCount))
+ if n == ecFileIndex || bufs[n] != nil {
+ continue
+ }
+ bufs[n] = make([]byte, size)
+ i++
+ }
+
+ for i, buf := range bufs {
+ if buf == nil {
+ continue
+ }
+ err = readFromFile(ecFiles[i], buf, ecFileOffset)
+ if err != nil {
+ return
+ }
+ }
+
+ if err = enc.ReconstructData(bufs); err != nil {
+ return nil, err
+ }
+
+ return bufs[ecFileIndex], nil
+}
+
+func readFromFile(file *os.File, data []byte, ecFileOffset int64) (err error) {
+ _, err = file.ReadAt(data, ecFileOffset)
+ return
+}
+
+func removeGeneratedFiles(baseFileName string) {
+ for i := 0; i < DataShardsCount+ParityShardsCount; i++ {
+ fname := fmt.Sprintf("%s.ec%02d", baseFileName, i)
+ os.Remove(fname)
+ }
+ os.Remove(baseFileName + ".ecx")
+}
+
+func TestLocateData(t *testing.T) {
+ intervals := LocateData(largeBlockSize, smallBlockSize, DataShardsCount*largeBlockSize+1, DataShardsCount*largeBlockSize, 1)
+ if len(intervals) != 1 {
+ t.Errorf("unexpected interval size %d", len(intervals))
+ }
+ if !intervals[0].sameAs(Interval{0, 0, 1, false, 1}) {
+ t.Errorf("unexpected interval %+v", intervals[0])
+ }
+
+ intervals = LocateData(largeBlockSize, smallBlockSize, DataShardsCount*largeBlockSize+1, DataShardsCount*largeBlockSize/2+100, DataShardsCount*largeBlockSize+1-DataShardsCount*largeBlockSize/2-100)
+ fmt.Printf("%+v\n", intervals)
+}
+
+func (this Interval) sameAs(that Interval) bool {
+ return this.IsLargeBlock == that.IsLargeBlock &&
+ this.InnerBlockOffset == that.InnerBlockOffset &&
+ this.BlockIndex == that.BlockIndex &&
+ this.Size == that.Size
+}
diff --git a/weed/storage/erasure_coding/ec_volume.go b/weed/storage/erasure_coding/ec_volume.go
new file mode 100644
index 000000000..eef53765f
--- /dev/null
+++ b/weed/storage/erasure_coding/ec_volume.go
@@ -0,0 +1,235 @@
+package erasure_coding
+
+import (
+ "errors"
+ "fmt"
+ "math"
+ "os"
+ "sort"
+ "sync"
+ "time"
+
+ "github.com/chrislusf/seaweedfs/weed/pb"
+ "github.com/chrislusf/seaweedfs/weed/pb/master_pb"
+ "github.com/chrislusf/seaweedfs/weed/pb/volume_server_pb"
+ "github.com/chrislusf/seaweedfs/weed/storage/idx"
+ "github.com/chrislusf/seaweedfs/weed/storage/needle"
+ "github.com/chrislusf/seaweedfs/weed/storage/types"
+)
+
+var (
+ NotFoundError = errors.New("needle not found")
+)
+
+type EcVolume struct {
+ VolumeId needle.VolumeId
+ Collection string
+ dir string
+ ecxFile *os.File
+ ecxFileSize int64
+ ecxCreatedAt time.Time
+ Shards []*EcVolumeShard
+ ShardLocations map[ShardId][]string
+ ShardLocationsRefreshTime time.Time
+ ShardLocationsLock sync.RWMutex
+ Version needle.Version
+ ecjFile *os.File
+ ecjFileAccessLock sync.Mutex
+}
+
+func NewEcVolume(dir string, collection string, vid needle.VolumeId) (ev *EcVolume, err error) {
+ ev = &EcVolume{dir: dir, Collection: collection, VolumeId: vid}
+
+ baseFileName := EcShardFileName(collection, dir, int(vid))
+
+ // open ecx file
+ if ev.ecxFile, err = os.OpenFile(baseFileName+".ecx", os.O_RDWR, 0644); err != nil {
+ return nil, fmt.Errorf("cannot open ec volume index %s.ecx: %v", baseFileName, err)
+ }
+ ecxFi, statErr := ev.ecxFile.Stat()
+ if statErr != nil {
+ return nil, fmt.Errorf("can not stat ec volume index %s.ecx: %v", baseFileName, statErr)
+ }
+ ev.ecxFileSize = ecxFi.Size()
+ ev.ecxCreatedAt = ecxFi.ModTime()
+
+ // open ecj file
+ if ev.ecjFile, err = os.OpenFile(baseFileName+".ecj", os.O_RDWR|os.O_CREATE, 0644); err != nil {
+ return nil, fmt.Errorf("cannot open ec volume journal %s.ecj: %v", baseFileName, err)
+ }
+
+ // read volume info
+ ev.Version = needle.Version3
+ if volumeInfo, found, _ := pb.MaybeLoadVolumeInfo(baseFileName + ".vif"); found {
+ ev.Version = needle.Version(volumeInfo.Version)
+ } else {
+ pb.SaveVolumeInfo(baseFileName+".vif", &volume_server_pb.VolumeInfo{Version: uint32(ev.Version)})
+ }
+
+ ev.ShardLocations = make(map[ShardId][]string)
+
+ return
+}
+
+func (ev *EcVolume) AddEcVolumeShard(ecVolumeShard *EcVolumeShard) bool {
+ for _, s := range ev.Shards {
+ if s.ShardId == ecVolumeShard.ShardId {
+ return false
+ }
+ }
+ ev.Shards = append(ev.Shards, ecVolumeShard)
+ sort.Slice(ev.Shards, func(i, j int) bool {
+ return ev.Shards[i].VolumeId < ev.Shards[j].VolumeId ||
+ ev.Shards[i].VolumeId == ev.Shards[j].VolumeId && ev.Shards[i].ShardId < ev.Shards[j].ShardId
+ })
+ return true
+}
+
+func (ev *EcVolume) DeleteEcVolumeShard(shardId ShardId) (ecVolumeShard *EcVolumeShard, deleted bool) {
+ foundPosition := -1
+ for i, s := range ev.Shards {
+ if s.ShardId == shardId {
+ foundPosition = i
+ }
+ }
+ if foundPosition < 0 {
+ return nil, false
+ }
+
+ ecVolumeShard = ev.Shards[foundPosition]
+
+ ev.Shards = append(ev.Shards[:foundPosition], ev.Shards[foundPosition+1:]...)
+ return ecVolumeShard, true
+}
+
+func (ev *EcVolume) FindEcVolumeShard(shardId ShardId) (ecVolumeShard *EcVolumeShard, found bool) {
+ for _, s := range ev.Shards {
+ if s.ShardId == shardId {
+ return s, true
+ }
+ }
+ return nil, false
+}
+
+func (ev *EcVolume) Close() {
+ for _, s := range ev.Shards {
+ s.Close()
+ }
+ if ev.ecjFile != nil {
+ ev.ecjFileAccessLock.Lock()
+ _ = ev.ecjFile.Close()
+ ev.ecjFile = nil
+ ev.ecjFileAccessLock.Unlock()
+ }
+ if ev.ecxFile != nil {
+ _ = ev.ecxFile.Close()
+ ev.ecxFile = nil
+ }
+}
+
+func (ev *EcVolume) Destroy() {
+
+ ev.Close()
+
+ for _, s := range ev.Shards {
+ s.Destroy()
+ }
+ os.Remove(ev.FileName() + ".ecx")
+ os.Remove(ev.FileName() + ".ecj")
+ os.Remove(ev.FileName() + ".vif")
+}
+
+func (ev *EcVolume) FileName() string {
+
+ return EcShardFileName(ev.Collection, ev.dir, int(ev.VolumeId))
+
+}
+
+func (ev *EcVolume) ShardSize() int64 {
+ if len(ev.Shards) > 0 {
+ return ev.Shards[0].Size()
+ }
+ return 0
+}
+
+func (ev *EcVolume) Size() (size int64) {
+ for _, shard := range ev.Shards {
+ size += shard.Size()
+ }
+ return
+}
+
+func (ev *EcVolume) CreatedAt() time.Time {
+ return ev.ecxCreatedAt
+}
+
+func (ev *EcVolume) ShardIdList() (shardIds []ShardId) {
+ for _, s := range ev.Shards {
+ shardIds = append(shardIds, s.ShardId)
+ }
+ return
+}
+
+func (ev *EcVolume) ToVolumeEcShardInformationMessage() (messages []*master_pb.VolumeEcShardInformationMessage) {
+ prevVolumeId := needle.VolumeId(math.MaxUint32)
+ var m *master_pb.VolumeEcShardInformationMessage
+ for _, s := range ev.Shards {
+ if s.VolumeId != prevVolumeId {
+ m = &master_pb.VolumeEcShardInformationMessage{
+ Id: uint32(s.VolumeId),
+ Collection: s.Collection,
+ }
+ messages = append(messages, m)
+ }
+ prevVolumeId = s.VolumeId
+ m.EcIndexBits = uint32(ShardBits(m.EcIndexBits).AddShardId(s.ShardId))
+ }
+ return
+}
+
+func (ev *EcVolume) LocateEcShardNeedle(needleId types.NeedleId, version needle.Version) (offset types.Offset, size uint32, intervals []Interval, err error) {
+
+ // find the needle from ecx file
+ offset, size, err = ev.FindNeedleFromEcx(needleId)
+ if err != nil {
+ return types.Offset{}, 0, nil, fmt.Errorf("FindNeedleFromEcx: %v", err)
+ }
+
+ shard := ev.Shards[0]
+
+ // calculate the locations in the ec shards
+ intervals = LocateData(ErasureCodingLargeBlockSize, ErasureCodingSmallBlockSize, DataShardsCount*shard.ecdFileSize, offset.ToAcutalOffset(), uint32(needle.GetActualSize(size, version)))
+
+ return
+}
+
+func (ev *EcVolume) FindNeedleFromEcx(needleId types.NeedleId) (offset types.Offset, size uint32, err error) {
+ return SearchNeedleFromSortedIndex(ev.ecxFile, ev.ecxFileSize, needleId, nil)
+}
+
+func SearchNeedleFromSortedIndex(ecxFile *os.File, ecxFileSize int64, needleId types.NeedleId, processNeedleFn func(file *os.File, offset int64) error) (offset types.Offset, size uint32, err error) {
+ var key types.NeedleId
+ buf := make([]byte, types.NeedleMapEntrySize)
+ l, h := int64(0), ecxFileSize/types.NeedleMapEntrySize
+ for l < h {
+ m := (l + h) / 2
+ if _, err := ecxFile.ReadAt(buf, m*types.NeedleMapEntrySize); err != nil {
+ return types.Offset{}, types.TombstoneFileSize, fmt.Errorf("ecx file %d read at %d: %v", ecxFileSize, m*types.NeedleMapEntrySize, err)
+ }
+ key, offset, size = idx.IdxFileEntry(buf)
+ if key == needleId {
+ if processNeedleFn != nil {
+ err = processNeedleFn(ecxFile, m*types.NeedleHeaderSize)
+ }
+ return
+ }
+ if key < needleId {
+ l = m + 1
+ } else {
+ h = m
+ }
+ }
+
+ err = NotFoundError
+ return
+}
diff --git a/weed/storage/erasure_coding/ec_volume_delete.go b/weed/storage/erasure_coding/ec_volume_delete.go
new file mode 100644
index 000000000..822a9e923
--- /dev/null
+++ b/weed/storage/erasure_coding/ec_volume_delete.go
@@ -0,0 +1,98 @@
+package erasure_coding
+
+import (
+ "fmt"
+ "io"
+ "os"
+
+ "github.com/chrislusf/seaweedfs/weed/storage/types"
+ "github.com/chrislusf/seaweedfs/weed/util"
+)
+
+var (
+ MarkNeedleDeleted = func(file *os.File, offset int64) error {
+ b := make([]byte, types.SizeSize)
+ util.Uint32toBytes(b, types.TombstoneFileSize)
+ n, err := file.WriteAt(b, offset+types.NeedleIdSize+types.OffsetSize)
+ if err != nil {
+ return fmt.Errorf("sorted needle write error: %v", err)
+ }
+ if n != types.SizeSize {
+ return fmt.Errorf("sorted needle written %d bytes, expecting %d", n, types.SizeSize)
+ }
+ return nil
+ }
+)
+
+func (ev *EcVolume) DeleteNeedleFromEcx(needleId types.NeedleId) (err error) {
+
+ _, _, err = SearchNeedleFromSortedIndex(ev.ecxFile, ev.ecxFileSize, needleId, MarkNeedleDeleted)
+
+ if err != nil {
+ if err == NotFoundError {
+ return nil
+ }
+ return err
+ }
+
+ b := make([]byte, types.NeedleIdSize)
+ types.NeedleIdToBytes(b, needleId)
+
+ ev.ecjFileAccessLock.Lock()
+
+ ev.ecjFile.Seek(0, io.SeekEnd)
+ ev.ecjFile.Write(b)
+
+ ev.ecjFileAccessLock.Unlock()
+
+ return
+}
+
+func RebuildEcxFile(baseFileName string) error {
+
+ if !util.FileExists(baseFileName + ".ecj") {
+ return nil
+ }
+
+ ecxFile, err := os.OpenFile(baseFileName+".ecx", os.O_RDWR, 0644)
+ if err != nil {
+ return fmt.Errorf("rebuild: failed to open ecx file: %v", err)
+ }
+ defer ecxFile.Close()
+
+ fstat, err := ecxFile.Stat()
+ if err != nil {
+ return err
+ }
+
+ ecxFileSize := fstat.Size()
+
+ ecjFile, err := os.OpenFile(baseFileName+".ecj", os.O_RDWR, 0644)
+ if err != nil {
+ return fmt.Errorf("rebuild: failed to open ecj file: %v", err)
+ }
+
+ buf := make([]byte, types.NeedleIdSize)
+ for {
+ n, _ := ecjFile.Read(buf)
+ if n != types.NeedleIdSize {
+ break
+ }
+
+ needleId := types.BytesToNeedleId(buf)
+
+ _, _, err = SearchNeedleFromSortedIndex(ecxFile, ecxFileSize, needleId, MarkNeedleDeleted)
+
+ if err != nil && err != NotFoundError {
+ ecxFile.Close()
+ return err
+ }
+
+ }
+
+ ecxFile.Close()
+
+ os.Remove(baseFileName + ".ecj")
+
+ return nil
+}
diff --git a/weed/storage/erasure_coding/ec_volume_info.go b/weed/storage/erasure_coding/ec_volume_info.go
new file mode 100644
index 000000000..8ff65bb0f
--- /dev/null
+++ b/weed/storage/erasure_coding/ec_volume_info.go
@@ -0,0 +1,113 @@
+package erasure_coding
+
+import (
+ "github.com/chrislusf/seaweedfs/weed/pb/master_pb"
+ "github.com/chrislusf/seaweedfs/weed/storage/needle"
+)
+
+// data structure used in master
+type EcVolumeInfo struct {
+ VolumeId needle.VolumeId
+ Collection string
+ ShardBits ShardBits
+}
+
+func NewEcVolumeInfo(collection string, vid needle.VolumeId, shardBits ShardBits) *EcVolumeInfo {
+ return &EcVolumeInfo{
+ Collection: collection,
+ VolumeId: vid,
+ ShardBits: shardBits,
+ }
+}
+
+func (ecInfo *EcVolumeInfo) AddShardId(id ShardId) {
+ ecInfo.ShardBits = ecInfo.ShardBits.AddShardId(id)
+}
+
+func (ecInfo *EcVolumeInfo) RemoveShardId(id ShardId) {
+ ecInfo.ShardBits = ecInfo.ShardBits.RemoveShardId(id)
+}
+
+func (ecInfo *EcVolumeInfo) HasShardId(id ShardId) bool {
+ return ecInfo.ShardBits.HasShardId(id)
+}
+
+func (ecInfo *EcVolumeInfo) ShardIds() (ret []ShardId) {
+ return ecInfo.ShardBits.ShardIds()
+}
+
+func (ecInfo *EcVolumeInfo) ShardIdCount() (count int) {
+ return ecInfo.ShardBits.ShardIdCount()
+}
+
+func (ecInfo *EcVolumeInfo) Minus(other *EcVolumeInfo) *EcVolumeInfo {
+ ret := &EcVolumeInfo{
+ VolumeId: ecInfo.VolumeId,
+ Collection: ecInfo.Collection,
+ ShardBits: ecInfo.ShardBits.Minus(other.ShardBits),
+ }
+
+ return ret
+}
+
+func (ecInfo *EcVolumeInfo) ToVolumeEcShardInformationMessage() (ret *master_pb.VolumeEcShardInformationMessage) {
+ return &master_pb.VolumeEcShardInformationMessage{
+ Id: uint32(ecInfo.VolumeId),
+ EcIndexBits: uint32(ecInfo.ShardBits),
+ Collection: ecInfo.Collection,
+ }
+}
+
+type ShardBits uint32 // use bits to indicate the shard id, use 32 bits just for possible future extension
+
+func (b ShardBits) AddShardId(id ShardId) ShardBits {
+ return b | (1 << id)
+}
+
+func (b ShardBits) RemoveShardId(id ShardId) ShardBits {
+ return b &^ (1 << id)
+}
+
+func (b ShardBits) HasShardId(id ShardId) bool {
+ return b&(1< 0
+}
+
+func (b ShardBits) ShardIds() (ret []ShardId) {
+ for i := ShardId(0); i < TotalShardsCount; i++ {
+ if b.HasShardId(i) {
+ ret = append(ret, i)
+ }
+ }
+ return
+}
+
+func (b ShardBits) ToUint32Slice() (ret []uint32) {
+ for i := uint32(0); i < TotalShardsCount; i++ {
+ if b.HasShardId(ShardId(i)) {
+ ret = append(ret, i)
+ }
+ }
+ return
+}
+
+func (b ShardBits) ShardIdCount() (count int) {
+ for count = 0; b > 0; count++ {
+ b &= b - 1
+ }
+ return
+}
+
+func (b ShardBits) Minus(other ShardBits) ShardBits {
+ return b &^ other
+}
+
+func (b ShardBits) Plus(other ShardBits) ShardBits {
+ return b | other
+}
+
+func (b ShardBits) MinusParityShards() ShardBits {
+ for i := DataShardsCount; i < TotalShardsCount; i++ {
+ b = b.RemoveShardId(ShardId(i))
+ }
+ return b
+}
diff --git a/weed/storage/erasure_coding/ec_volume_test.go b/weed/storage/erasure_coding/ec_volume_test.go
new file mode 100644
index 000000000..66be2b997
--- /dev/null
+++ b/weed/storage/erasure_coding/ec_volume_test.go
@@ -0,0 +1,54 @@
+package erasure_coding
+
+import (
+ "fmt"
+ "os"
+ "testing"
+
+ "github.com/stretchr/testify/assert"
+
+ "github.com/chrislusf/seaweedfs/weed/storage/needle"
+ "github.com/chrislusf/seaweedfs/weed/storage/types"
+)
+
+func TestPositioning(t *testing.T) {
+
+ ecxFile, err := os.OpenFile("389.ecx", os.O_RDONLY, 0)
+ if err != nil {
+ t.Errorf("failed to open ecx file: %v", err)
+ }
+ defer ecxFile.Close()
+
+ stat, _ := ecxFile.Stat()
+ fileSize := stat.Size()
+
+ tests := []struct {
+ needleId string
+ offset int64
+ size int
+ }{
+ {needleId: "0f0edb92", offset: 31300679656, size: 1167},
+ {needleId: "0ef7d7f8", offset: 11513014944, size: 66044},
+ }
+
+ for _, test := range tests {
+ needleId, _ := types.ParseNeedleId(test.needleId)
+ offset, size, err := SearchNeedleFromSortedIndex(ecxFile, fileSize, needleId, nil)
+ assert.Equal(t, nil, err, "SearchNeedleFromSortedIndex")
+ fmt.Printf("offset: %d size: %d\n", offset.ToAcutalOffset(), size)
+ }
+
+ needleId, _ := types.ParseNeedleId("0f087622")
+ offset, size, err := SearchNeedleFromSortedIndex(ecxFile, fileSize, needleId, nil)
+ assert.Equal(t, nil, err, "SearchNeedleFromSortedIndex")
+ fmt.Printf("offset: %d size: %d\n", offset.ToAcutalOffset(), size)
+
+ var shardEcdFileSize int64 = 1118830592 // 1024*1024*1024*3
+ intervals := LocateData(ErasureCodingLargeBlockSize, ErasureCodingSmallBlockSize, DataShardsCount*shardEcdFileSize, offset.ToAcutalOffset(), uint32(needle.GetActualSize(size, needle.CurrentVersion)))
+
+ for _, interval := range intervals {
+ shardId, shardOffset := interval.ToShardIdAndOffset(ErasureCodingLargeBlockSize, ErasureCodingSmallBlockSize)
+ fmt.Printf("interval: %+v, shardId: %d, shardOffset: %d\n", interval, shardId, shardOffset)
+ }
+
+}
diff --git a/weed/storage/file_id.go b/weed/storage/file_id.go
deleted file mode 100644
index 37dcb7c70..000000000
--- a/weed/storage/file_id.go
+++ /dev/null
@@ -1,34 +0,0 @@
-package storage
-
-import (
- "encoding/hex"
- . "github.com/chrislusf/seaweedfs/weed/storage/types"
-)
-
-type FileId struct {
- VolumeId VolumeId
- Key NeedleId
- Cookie Cookie
-}
-
-func NewFileIdFromNeedle(VolumeId VolumeId, n *Needle) *FileId {
- return &FileId{VolumeId: VolumeId, Key: n.Id, Cookie: n.Cookie}
-}
-
-func NewFileId(VolumeId VolumeId, key uint64, cookie uint32) *FileId {
- return &FileId{VolumeId: VolumeId, Key: Uint64ToNeedleId(key), Cookie: Uint32ToCookie(cookie)}
-}
-
-func (n *FileId) String() string {
- return n.VolumeId.String() + "," + formatNeedleIdCookie(n.Key, n.Cookie)
-}
-
-func formatNeedleIdCookie(key NeedleId, cookie Cookie) string {
- bytes := make([]byte, NeedleIdSize+CookieSize)
- NeedleIdToBytes(bytes[0:NeedleIdSize], key)
- CookieToBytes(bytes[NeedleIdSize:NeedleIdSize+CookieSize], cookie)
- nonzero_index := 0
- for ; bytes[nonzero_index] == 0; nonzero_index++ {
- }
- return hex.EncodeToString(bytes[nonzero_index:])
-}
diff --git a/weed/storage/idx/walk.go b/weed/storage/idx/walk.go
new file mode 100644
index 000000000..90efb75e6
--- /dev/null
+++ b/weed/storage/idx/walk.go
@@ -0,0 +1,53 @@
+package idx
+
+import (
+ "io"
+ "os"
+
+ "github.com/chrislusf/seaweedfs/weed/glog"
+ "github.com/chrislusf/seaweedfs/weed/storage/types"
+ "github.com/chrislusf/seaweedfs/weed/util"
+)
+
+// walks through the index file, calls fn function with each key, offset, size
+// stops with the error returned by the fn function
+func WalkIndexFile(r *os.File, fn func(key types.NeedleId, offset types.Offset, size uint32) error) error {
+ var readerOffset int64
+ bytes := make([]byte, types.NeedleMapEntrySize*RowsToRead)
+ count, e := r.ReadAt(bytes, readerOffset)
+ glog.V(3).Infoln("file", r.Name(), "readerOffset", readerOffset, "count", count, "e", e)
+ readerOffset += int64(count)
+ var (
+ key types.NeedleId
+ offset types.Offset
+ size uint32
+ i int
+ )
+
+ for count > 0 && e == nil || e == io.EOF {
+ for i = 0; i+types.NeedleMapEntrySize <= count; i += types.NeedleMapEntrySize {
+ key, offset, size = IdxFileEntry(bytes[i : i+types.NeedleMapEntrySize])
+ if e = fn(key, offset, size); e != nil {
+ return e
+ }
+ }
+ if e == io.EOF {
+ return nil
+ }
+ count, e = r.ReadAt(bytes, readerOffset)
+ glog.V(3).Infoln("file", r.Name(), "readerOffset", readerOffset, "count", count, "e", e)
+ readerOffset += int64(count)
+ }
+ return e
+}
+
+func IdxFileEntry(bytes []byte) (key types.NeedleId, offset types.Offset, size uint32) {
+ key = types.BytesToNeedleId(bytes[:types.NeedleIdSize])
+ offset = types.BytesToOffset(bytes[types.NeedleIdSize : types.NeedleIdSize+types.OffsetSize])
+ size = util.BytesToUint32(bytes[types.NeedleIdSize+types.OffsetSize : types.NeedleIdSize+types.OffsetSize+types.SizeSize])
+ return
+}
+
+const (
+ RowsToRead = 1024
+)
diff --git a/weed/storage/needle/async_request.go b/weed/storage/needle/async_request.go
new file mode 100644
index 000000000..ea02c55c5
--- /dev/null
+++ b/weed/storage/needle/async_request.go
@@ -0,0 +1,53 @@
+package needle
+
+type AsyncRequest struct {
+ N *Needle
+ IsWriteRequest bool
+ ActualSize int64
+ offset uint64
+ size uint64
+ doneChan chan interface{}
+ isUnchanged bool
+ err error
+}
+
+func NewAsyncRequest(n *Needle, isWriteRequest bool) *AsyncRequest {
+ return &AsyncRequest{
+ offset: 0,
+ size: 0,
+ ActualSize: 0,
+ doneChan: make(chan interface{}),
+ N: n,
+ isUnchanged: false,
+ IsWriteRequest: isWriteRequest,
+ err: nil,
+ }
+}
+
+func (r *AsyncRequest) WaitComplete() (uint64, uint64, bool, error) {
+ <-r.doneChan
+ return r.offset, r.size, r.isUnchanged, r.err
+}
+
+func (r *AsyncRequest) Complete(offset uint64, size uint64, isUnchanged bool, err error) {
+ r.offset = offset
+ r.size = size
+ r.isUnchanged = isUnchanged
+ r.err = err
+ close(r.doneChan)
+}
+
+func (r *AsyncRequest) UpdateResult(offset uint64, size uint64, isUnchanged bool, err error) {
+ r.offset = offset
+ r.size = size
+ r.isUnchanged = isUnchanged
+ r.err = err
+}
+
+func (r *AsyncRequest) Submit() {
+ close(r.doneChan)
+}
+
+func (r *AsyncRequest) IsSucceed() bool {
+ return r.err == nil
+}
diff --git a/weed/storage/needle/btree_map.go b/weed/storage/needle/btree_map.go
deleted file mode 100644
index d688b802e..000000000
--- a/weed/storage/needle/btree_map.go
+++ /dev/null
@@ -1,53 +0,0 @@
-package needle
-
-import (
- . "github.com/chrislusf/seaweedfs/weed/storage/types"
- "github.com/google/btree"
-)
-
-//This map assumes mostly inserting increasing keys
-type BtreeMap struct {
- tree *btree.BTree
-}
-
-func NewBtreeMap() *BtreeMap {
- return &BtreeMap{
- tree: btree.New(32),
- }
-}
-
-func (cm *BtreeMap) Set(key NeedleId, offset Offset, size uint32) (oldOffset Offset, oldSize uint32) {
- found := cm.tree.ReplaceOrInsert(NeedleValue{key, offset, size})
- if found != nil {
- old := found.(NeedleValue)
- return old.Offset, old.Size
- }
- return
-}
-
-func (cm *BtreeMap) Delete(key NeedleId) (oldSize uint32) {
- found := cm.tree.Delete(NeedleValue{key, 0, 0})
- if found != nil {
- old := found.(NeedleValue)
- return old.Size
- }
- return
-}
-func (cm *BtreeMap) Get(key NeedleId) (*NeedleValue, bool) {
- found := cm.tree.Get(NeedleValue{key, 0, 0})
- if found != nil {
- old := found.(NeedleValue)
- return &old, true
- }
- return nil, false
-}
-
-// Visit visits all entries or stop if any error when visiting
-func (cm *BtreeMap) Visit(visit func(NeedleValue) error) (ret error) {
- cm.tree.Ascend(func(item btree.Item) bool {
- needle := item.(NeedleValue)
- ret = visit(needle)
- return ret == nil
- })
- return ret
-}
diff --git a/weed/storage/needle/compact_map.go b/weed/storage/needle/compact_map.go
deleted file mode 100644
index 4816e0098..000000000
--- a/weed/storage/needle/compact_map.go
+++ /dev/null
@@ -1,265 +0,0 @@
-package needle
-
-import (
- . "github.com/chrislusf/seaweedfs/weed/storage/types"
- "sort"
- "sync"
-)
-
-const (
- batch = 100000
-)
-
-type SectionalNeedleId uint32
-
-const SectionalNeedleIdLimit = 1<<32 - 1
-
-type SectionalNeedleValue struct {
- Key SectionalNeedleId
- Offset Offset `comment:"Volume offset"` //since aligned to 8 bytes, range is 4G*8=32G
- Size uint32 `comment:"Size of the data portion"`
-}
-
-type CompactSection struct {
- sync.RWMutex
- values []SectionalNeedleValue
- overflow Overflow
- start NeedleId
- end NeedleId
- counter int
-}
-
-type Overflow []SectionalNeedleValue
-
-func NewCompactSection(start NeedleId) *CompactSection {
- return &CompactSection{
- values: make([]SectionalNeedleValue, batch),
- overflow: Overflow(make([]SectionalNeedleValue, 0)),
- start: start,
- }
-}
-
-//return old entry size
-func (cs *CompactSection) Set(key NeedleId, offset Offset, size uint32) (oldOffset Offset, oldSize uint32) {
- cs.Lock()
- if key > cs.end {
- cs.end = key
- }
- skey := SectionalNeedleId(key - cs.start)
- if i := cs.binarySearchValues(skey); i >= 0 {
- oldOffset, oldSize = cs.values[i].Offset, cs.values[i].Size
- //println("key", key, "old size", ret)
- cs.values[i].Offset, cs.values[i].Size = offset, size
- } else {
- needOverflow := cs.counter >= batch
- needOverflow = needOverflow || cs.counter > 0 && cs.values[cs.counter-1].Key > skey
- if needOverflow {
- //println("start", cs.start, "counter", cs.counter, "key", key)
- if oldValue, found := cs.overflow.findOverflowEntry(skey); found {
- oldOffset, oldSize = oldValue.Offset, oldValue.Size
- }
- cs.overflow = cs.overflow.setOverflowEntry(SectionalNeedleValue{Key: skey, Offset: offset, Size: size})
- } else {
- p := &cs.values[cs.counter]
- p.Key, p.Offset, p.Size = skey, offset, size
- //println("added index", cs.counter, "key", key, cs.values[cs.counter].Key)
- cs.counter++
- }
- }
- cs.Unlock()
- return
-}
-
-//return old entry size
-func (cs *CompactSection) Delete(key NeedleId) uint32 {
- skey := SectionalNeedleId(key - cs.start)
- cs.Lock()
- ret := uint32(0)
- if i := cs.binarySearchValues(skey); i >= 0 {
- if cs.values[i].Size > 0 {
- ret = cs.values[i].Size
- cs.values[i].Size = 0
- }
- }
- if v, found := cs.overflow.findOverflowEntry(skey); found {
- cs.overflow = cs.overflow.deleteOverflowEntry(skey)
- ret = v.Size
- }
- cs.Unlock()
- return ret
-}
-func (cs *CompactSection) Get(key NeedleId) (*NeedleValue, bool) {
- cs.RLock()
- skey := SectionalNeedleId(key - cs.start)
- if v, ok := cs.overflow.findOverflowEntry(skey); ok {
- cs.RUnlock()
- nv := v.toNeedleValue(cs)
- return &nv, true
- }
- if i := cs.binarySearchValues(skey); i >= 0 {
- cs.RUnlock()
- nv := cs.values[i].toNeedleValue(cs)
- return &nv, true
- }
- cs.RUnlock()
- return nil, false
-}
-func (cs *CompactSection) binarySearchValues(key SectionalNeedleId) int {
- x := sort.Search(cs.counter, func(i int) bool {
- return cs.values[i].Key >= key
- })
- if x == cs.counter {
- return -1
- }
- if cs.values[x].Key > key {
- return -2
- }
- return x
-}
-
-//This map assumes mostly inserting increasing keys
-//This map assumes mostly inserting increasing keys
-type CompactMap struct {
- list []*CompactSection
-}
-
-func NewCompactMap() *CompactMap {
- return &CompactMap{}
-}
-
-func (cm *CompactMap) Set(key NeedleId, offset Offset, size uint32) (oldOffset Offset, oldSize uint32) {
- x := cm.binarySearchCompactSection(key)
- if x < 0 || (key-cm.list[x].start) > SectionalNeedleIdLimit {
- // println(x, "adding to existing", len(cm.list), "sections, starting", key)
- cs := NewCompactSection(key)
- cm.list = append(cm.list, cs)
- x = len(cm.list) - 1
- //keep compact section sorted by start
- for x >= 0 {
- if x > 0 && cm.list[x-1].start > key {
- cm.list[x] = cm.list[x-1]
- // println("shift", x, "start", cs.start, "to", x-1)
- x = x - 1
- } else {
- cm.list[x] = cs
- // println("cs", x, "start", cs.start)
- break
- }
- }
- }
- // println(key, "set to section[", x, "].start", cm.list[x].start)
- return cm.list[x].Set(key, offset, size)
-}
-func (cm *CompactMap) Delete(key NeedleId) uint32 {
- x := cm.binarySearchCompactSection(key)
- if x < 0 {
- return uint32(0)
- }
- return cm.list[x].Delete(key)
-}
-func (cm *CompactMap) Get(key NeedleId) (*NeedleValue, bool) {
- x := cm.binarySearchCompactSection(key)
- if x < 0 {
- return nil, false
- }
- return cm.list[x].Get(key)
-}
-func (cm *CompactMap) binarySearchCompactSection(key NeedleId) int {
- l, h := 0, len(cm.list)-1
- if h < 0 {
- return -5
- }
- if cm.list[h].start <= key {
- if cm.list[h].counter < batch || key <= cm.list[h].end {
- return h
- }
- return -4
- }
- for l <= h {
- m := (l + h) / 2
- if key < cm.list[m].start {
- h = m - 1
- } else { // cm.list[m].start <= key
- if cm.list[m+1].start <= key {
- l = m + 1
- } else {
- return m
- }
- }
- }
- return -3
-}
-
-// Visit visits all entries or stop if any error when visiting
-func (cm *CompactMap) Visit(visit func(NeedleValue) error) error {
- for _, cs := range cm.list {
- cs.RLock()
- for _, v := range cs.overflow {
- if err := visit(v.toNeedleValue(cs)); err != nil {
- cs.RUnlock()
- return err
- }
- }
- for i, v := range cs.values {
- if i >= cs.counter {
- break
- }
- if _, found := cs.overflow.findOverflowEntry(v.Key); !found {
- if err := visit(v.toNeedleValue(cs)); err != nil {
- cs.RUnlock()
- return err
- }
- }
- }
- cs.RUnlock()
- }
- return nil
-}
-
-func (o Overflow) deleteOverflowEntry(key SectionalNeedleId) Overflow {
- length := len(o)
- deleteCandidate := sort.Search(length, func(i int) bool {
- return o[i].Key >= key
- })
- if deleteCandidate != length && o[deleteCandidate].Key == key {
- for i := deleteCandidate; i < length-1; i++ {
- o[i] = o[i+1]
- }
- o = o[0 : length-1]
- }
- return o
-}
-
-func (o Overflow) setOverflowEntry(needleValue SectionalNeedleValue) Overflow {
- insertCandidate := sort.Search(len(o), func(i int) bool {
- return o[i].Key >= needleValue.Key
- })
- if insertCandidate != len(o) && o[insertCandidate].Key == needleValue.Key {
- o[insertCandidate] = needleValue
- } else {
- o = append(o, needleValue)
- for i := len(o) - 1; i > insertCandidate; i-- {
- o[i] = o[i-1]
- }
- o[insertCandidate] = needleValue
- }
- return o
-}
-
-func (o Overflow) findOverflowEntry(key SectionalNeedleId) (nv SectionalNeedleValue, found bool) {
- foundCandidate := sort.Search(len(o), func(i int) bool {
- return o[i].Key >= key
- })
- if foundCandidate != len(o) && o[foundCandidate].Key == key {
- return o[foundCandidate], true
- }
- return nv, false
-}
-
-func (snv SectionalNeedleValue) toNeedleValue(cs *CompactSection) NeedleValue {
- return NeedleValue{NeedleId(snv.Key) + cs.start, snv.Offset, snv.Size}
-}
-
-func (nv NeedleValue) toSectionalNeedleValue(cs *CompactSection) SectionalNeedleValue {
- return SectionalNeedleValue{SectionalNeedleId(nv.Key - cs.start), nv.Offset, nv.Size}
-}
diff --git a/weed/storage/needle/compact_map_test.go b/weed/storage/needle/compact_map_test.go
deleted file mode 100644
index 8ed851b95..000000000
--- a/weed/storage/needle/compact_map_test.go
+++ /dev/null
@@ -1,158 +0,0 @@
-package needle
-
-import (
- . "github.com/chrislusf/seaweedfs/weed/storage/types"
- "testing"
-)
-
-func TestOverflow2(t *testing.T) {
- m := NewCompactMap()
- m.Set(NeedleId(150088), 8, 3000073)
- m.Set(NeedleId(150073), 8, 3000073)
- m.Set(NeedleId(150089), 8, 3000073)
- m.Set(NeedleId(150076), 8, 3000073)
- m.Set(NeedleId(150124), 8, 3000073)
- m.Set(NeedleId(150137), 8, 3000073)
- m.Set(NeedleId(150147), 8, 3000073)
- m.Set(NeedleId(150145), 8, 3000073)
- m.Set(NeedleId(150158), 8, 3000073)
- m.Set(NeedleId(150162), 8, 3000073)
-
- m.Visit(func(value NeedleValue) error {
- println("needle key:", value.Key)
- return nil
- })
-}
-
-func TestIssue52(t *testing.T) {
- m := NewCompactMap()
- m.Set(NeedleId(10002), 10002, 10002)
- if element, ok := m.Get(NeedleId(10002)); ok {
- println("key", 10002, "ok", ok, element.Key, element.Offset, element.Size)
- }
- m.Set(NeedleId(10001), 10001, 10001)
- if element, ok := m.Get(NeedleId(10002)); ok {
- println("key", 10002, "ok", ok, element.Key, element.Offset, element.Size)
- } else {
- t.Fatal("key 10002 missing after setting 10001")
- }
-}
-
-func TestCompactMap(t *testing.T) {
- m := NewCompactMap()
- for i := uint32(0); i < 100*batch; i += 2 {
- m.Set(NeedleId(i), Offset(i), i)
- }
-
- for i := uint32(0); i < 100*batch; i += 37 {
- m.Delete(NeedleId(i))
- }
-
- for i := uint32(0); i < 10*batch; i += 3 {
- m.Set(NeedleId(i), Offset(i+11), i+5)
- }
-
- // for i := uint32(0); i < 100; i++ {
- // if v := m.Get(Key(i)); v != nil {
- // glog.V(4).Infoln(i, "=", v.Key, v.Offset, v.Size)
- // }
- // }
-
- for i := uint32(0); i < 10*batch; i++ {
- v, ok := m.Get(NeedleId(i))
- if i%3 == 0 {
- if !ok {
- t.Fatal("key", i, "missing!")
- }
- if v.Size != i+5 {
- t.Fatal("key", i, "size", v.Size)
- }
- } else if i%37 == 0 {
- if ok && v.Size > 0 {
- t.Fatal("key", i, "should have been deleted needle value", v)
- }
- } else if i%2 == 0 {
- if v.Size != i {
- t.Fatal("key", i, "size", v.Size)
- }
- }
- }
-
- for i := uint32(10 * batch); i < 100*batch; i++ {
- v, ok := m.Get(NeedleId(i))
- if i%37 == 0 {
- if ok && v.Size > 0 {
- t.Fatal("key", i, "should have been deleted needle value", v)
- }
- } else if i%2 == 0 {
- if v == nil {
- t.Fatal("key", i, "missing")
- }
- if v.Size != i {
- t.Fatal("key", i, "size", v.Size)
- }
- }
- }
-
-}
-
-func TestOverflow(t *testing.T) {
- o := Overflow(make([]SectionalNeedleValue, 0))
-
- o = o.setOverflowEntry(SectionalNeedleValue{Key: 1, Offset: 12, Size: 12})
- o = o.setOverflowEntry(SectionalNeedleValue{Key: 2, Offset: 12, Size: 12})
- o = o.setOverflowEntry(SectionalNeedleValue{Key: 3, Offset: 12, Size: 12})
- o = o.setOverflowEntry(SectionalNeedleValue{Key: 4, Offset: 12, Size: 12})
- o = o.setOverflowEntry(SectionalNeedleValue{Key: 5, Offset: 12, Size: 12})
-
- if o[2].Key != 3 {
- t.Fatalf("expecting o[2] has key 3: %+v", o[2].Key)
- }
-
- o = o.setOverflowEntry(SectionalNeedleValue{Key: 3, Offset: 24, Size: 24})
-
- if o[2].Key != 3 {
- t.Fatalf("expecting o[2] has key 3: %+v", o[2].Key)
- }
-
- if o[2].Size != 24 {
- t.Fatalf("expecting o[2] has size 24: %+v", o[2].Size)
- }
-
- o = o.deleteOverflowEntry(4)
-
- if len(o) != 4 {
- t.Fatalf("expecting 4 entries now: %+v", o)
- }
-
- x, _ := o.findOverflowEntry(5)
- if x.Key != 5 {
- t.Fatalf("expecting entry 5 now: %+v", x)
- }
-
- for i, x := range o {
- println("overflow[", i, "]:", x.Key)
- }
- println()
-
- o = o.deleteOverflowEntry(1)
-
- for i, x := range o {
- println("overflow[", i, "]:", x.Key)
- }
- println()
-
- o = o.setOverflowEntry(SectionalNeedleValue{Key: 4, Offset: 44, Size: 44})
- for i, x := range o {
- println("overflow[", i, "]:", x.Key)
- }
- println()
-
- o = o.setOverflowEntry(SectionalNeedleValue{Key: 1, Offset: 11, Size: 11})
-
- for i, x := range o {
- println("overflow[", i, "]:", x.Key)
- }
- println()
-
-}
diff --git a/weed/storage/crc.go b/weed/storage/needle/crc.go
similarity index 77%
rename from weed/storage/crc.go
rename to weed/storage/needle/crc.go
index e49686dc8..6fd910bb7 100644
--- a/weed/storage/crc.go
+++ b/weed/storage/needle/crc.go
@@ -1,11 +1,11 @@
-package storage
+package needle
import (
- "crypto/md5"
"fmt"
- "github.com/chrislusf/seaweedfs/weed/util"
"github.com/klauspost/crc32"
+
+ "github.com/chrislusf/seaweedfs/weed/util"
)
var table = crc32.MakeTable(crc32.Castagnoli)
@@ -29,13 +29,3 @@ func (n *Needle) Etag() string {
util.Uint32toBytes(bits, uint32(n.Checksum))
return fmt.Sprintf("%x", bits)
}
-
-func (n *Needle) MD5() string {
-
- hash := md5.New()
-
- hash.Write(n.Data)
-
- return fmt.Sprintf("%x", hash.Sum(nil))
-
-}
diff --git a/weed/storage/needle/file_id.go b/weed/storage/needle/file_id.go
new file mode 100644
index 000000000..5dabb0f25
--- /dev/null
+++ b/weed/storage/needle/file_id.go
@@ -0,0 +1,81 @@
+package needle
+
+import (
+ "encoding/hex"
+ "fmt"
+ "strings"
+
+ . "github.com/chrislusf/seaweedfs/weed/storage/types"
+)
+
+type FileId struct {
+ VolumeId VolumeId
+ Key NeedleId
+ Cookie Cookie
+}
+
+func NewFileIdFromNeedle(VolumeId VolumeId, n *Needle) *FileId {
+ return &FileId{VolumeId: VolumeId, Key: n.Id, Cookie: n.Cookie}
+}
+
+func NewFileId(VolumeId VolumeId, key uint64, cookie uint32) *FileId {
+ return &FileId{VolumeId: VolumeId, Key: Uint64ToNeedleId(key), Cookie: Uint32ToCookie(cookie)}
+}
+
+// Deserialize the file id
+func ParseFileIdFromString(fid string) (*FileId, error) {
+ vid, needleKeyCookie, err := splitVolumeId(fid)
+ if err != nil {
+ return nil, err
+ }
+ volumeId, err := NewVolumeId(vid)
+ if err != nil {
+ return nil, err
+ }
+
+ nid, cookie, err := ParseNeedleIdCookie(needleKeyCookie)
+ if err != nil {
+ return nil, err
+ }
+ fileId := &FileId{VolumeId: volumeId, Key: nid, Cookie: cookie}
+ return fileId, nil
+}
+
+func (n *FileId) GetVolumeId() VolumeId {
+ return n.VolumeId
+}
+
+func (n *FileId) GetNeedleId() NeedleId {
+ return n.Key
+}
+
+func (n *FileId) GetCookie() Cookie {
+ return n.Cookie
+}
+
+func (n *FileId) GetNeedleIdCookie() string {
+ return formatNeedleIdCookie(n.Key, n.Cookie)
+}
+
+func (n *FileId) String() string {
+ return n.VolumeId.String() + "," + formatNeedleIdCookie(n.Key, n.Cookie)
+}
+
+func formatNeedleIdCookie(key NeedleId, cookie Cookie) string {
+ bytes := make([]byte, NeedleIdSize+CookieSize)
+ NeedleIdToBytes(bytes[0:NeedleIdSize], key)
+ CookieToBytes(bytes[NeedleIdSize:NeedleIdSize+CookieSize], cookie)
+ nonzero_index := 0
+ for ; bytes[nonzero_index] == 0; nonzero_index++ {
+ }
+ return hex.EncodeToString(bytes[nonzero_index:])
+}
+
+// copied from operation/delete_content.go, to cut off cycle dependency
+func splitVolumeId(fid string) (vid string, key_cookie string, err error) {
+ commaIndex := strings.Index(fid, ",")
+ if commaIndex <= 0 {
+ return "", "", fmt.Errorf("wrong fid format")
+ }
+ return fid[:commaIndex], fid[commaIndex+1:], nil
+}
diff --git a/weed/storage/needle/file_id_test.go b/weed/storage/needle/file_id_test.go
new file mode 100644
index 000000000..a1a2c61fc
--- /dev/null
+++ b/weed/storage/needle/file_id_test.go
@@ -0,0 +1,55 @@
+package needle
+
+import (
+ "github.com/chrislusf/seaweedfs/weed/storage/types"
+ "testing"
+)
+
+func TestParseFileIdFromString(t *testing.T) {
+ fidStr1 := "100,12345678"
+ _, err := ParseFileIdFromString(fidStr1)
+ if err == nil {
+ t.Errorf("%s : KeyHash is too short", fidStr1)
+ }
+
+ fidStr1 = "100, 12345678"
+ _, err = ParseFileIdFromString(fidStr1)
+ if err == nil {
+ t.Errorf("%s : needlId invalid syntax", fidStr1)
+ }
+
+ fidStr1 = "100,123456789"
+ _, err = ParseFileIdFromString(fidStr1)
+ if err != nil {
+ t.Errorf("%s : should be OK", fidStr1)
+ }
+
+ var fileId *FileId
+ fidStr1 = "100,123456789012345678901234"
+ fileId, err = ParseFileIdFromString(fidStr1)
+ if err != nil {
+ t.Errorf("%s : should be OK", fidStr1)
+ }
+ if !(fileId.VolumeId == VolumeId(100) &&
+ fileId.Key == types.NeedleId(0x1234567890123456) &&
+ fileId.Cookie == types.Cookie(types.Uint32ToCookie(uint32(0x78901234)))) {
+ t.Errorf("src : %s, dest : %v", fidStr1, fileId)
+ }
+
+ fidStr1 = "100,abcd0000abcd"
+ fileId, err = ParseFileIdFromString(fidStr1)
+ if err != nil {
+ t.Errorf("%s : should be OK", fidStr1)
+ }
+ if !(fileId.VolumeId == VolumeId(100) &&
+ fileId.Key == types.NeedleId(0xabcd) &&
+ fileId.Cookie == types.Cookie(types.Uint32ToCookie(uint32(0xabcd)))) {
+ t.Errorf("src : %s, dest : %v", fidStr1, fileId)
+ }
+
+ fidStr1 = "100,1234567890123456789012345"
+ _, err = ParseFileIdFromString(fidStr1)
+ if err == nil {
+ t.Errorf("%s : needleId is too long", fidStr1)
+ }
+}
diff --git a/weed/storage/needle.go b/weed/storage/needle/needle.go
similarity index 68%
rename from weed/storage/needle.go
rename to weed/storage/needle/needle.go
index 5bd6f7d96..4c62aa00b 100644
--- a/weed/storage/needle.go
+++ b/weed/storage/needle/needle.go
@@ -1,4 +1,4 @@
-package storage
+package needle
import (
"encoding/json"
@@ -8,9 +8,7 @@ import (
"strings"
"time"
- "github.com/chrislusf/seaweedfs/weed/images"
. "github.com/chrislusf/seaweedfs/weed/storage/types"
- "io/ioutil"
)
const (
@@ -49,53 +47,28 @@ func (n *Needle) String() (str string) {
return
}
-func ParseUpload(r *http.Request) (
- fileName string, data []byte, mimeType string, pairMap map[string]string, isGzipped bool, originalDataSize int,
- modifiedTime uint64, ttl *TTL, isChunkedFile bool, e error) {
- pairMap = make(map[string]string)
- for k, v := range r.Header {
- if len(v) > 0 && strings.HasPrefix(k, PairNamePrefix) {
- pairMap[k] = v[0]
- }
- }
-
- if r.Method == "POST" {
- fileName, data, mimeType, isGzipped, originalDataSize, isChunkedFile, e = parseMultipart(r)
- } else {
- isGzipped = false
- mimeType = r.Header.Get("Content-Type")
- fileName = ""
- data, e = ioutil.ReadAll(r.Body)
- originalDataSize = len(data)
- }
- if e != nil {
- return
- }
-
- modifiedTime, _ = strconv.ParseUint(r.FormValue("ts"), 10, 64)
- ttl, _ = ReadTTL(r.FormValue("ttl"))
-
- return
-}
-func CreateNeedleFromRequest(r *http.Request, fixJpgOrientation bool) (n *Needle, originalSize int, e error) {
- var pairMap map[string]string
- fname, mimeType, isGzipped, isChunkedFile := "", "", false, false
+func CreateNeedleFromRequest(r *http.Request, sizeLimit int64) (n *Needle, originalSize int, e error) {
n = new(Needle)
- fname, n.Data, mimeType, pairMap, isGzipped, originalSize, n.LastModified, n.Ttl, isChunkedFile, e = ParseUpload(r)
+ pu, e := ParseUpload(r, sizeLimit)
if e != nil {
return
}
- if len(fname) < 256 {
- n.Name = []byte(fname)
+ n.Data = pu.Data
+ originalSize = pu.OriginalDataSize
+ n.LastModified = pu.ModifiedTime
+ n.Ttl = pu.Ttl
+
+ if len(pu.FileName) < 256 {
+ n.Name = []byte(pu.FileName)
n.SetHasName()
}
- if len(mimeType) < 256 {
- n.Mime = []byte(mimeType)
+ if len(pu.MimeType) < 256 {
+ n.Mime = []byte(pu.MimeType)
n.SetHasMime()
}
- if len(pairMap) != 0 {
+ if len(pu.PairMap) != 0 {
trimmedPairMap := make(map[string]string)
- for k, v := range pairMap {
+ for k, v := range pu.PairMap {
trimmedPairMap[k[len(PairNamePrefix):]] = v
}
@@ -106,8 +79,8 @@ func CreateNeedleFromRequest(r *http.Request, fixJpgOrientation bool) (n *Needle
n.SetHasPairs()
}
}
- if isGzipped {
- n.SetGzipped()
+ if pu.IsGzipped {
+ n.SetIsCompressed()
}
if n.LastModified == 0 {
n.LastModified = uint64(time.Now().Unix())
@@ -117,17 +90,10 @@ func CreateNeedleFromRequest(r *http.Request, fixJpgOrientation bool) (n *Needle
n.SetHasTtl()
}
- if isChunkedFile {
+ if pu.IsChunkedFile {
n.SetIsChunkManifest()
}
- if fixJpgOrientation {
- loweredName := strings.ToLower(fname)
- if mimeType == "image/jpeg" || strings.HasSuffix(loweredName, ".jpg") || strings.HasSuffix(loweredName, ".jpeg") {
- n.Data = images.FixJpgOrientation(n.Data)
- }
- }
-
n.Checksum = NewCRC(n.Data)
commaSep := strings.LastIndex(r.URL.Path, ",")
@@ -157,7 +123,7 @@ func (n *Needle) ParsePath(fid string) (err error) {
}
if delta != "" {
if d, e := strconv.ParseUint(delta, 10, 64); e == nil {
- n.Id += NeedleId(d)
+ n.Id += Uint64ToNeedleId(d)
} else {
return e
}
diff --git a/weed/storage/needle/needle_parse_upload.go b/weed/storage/needle/needle_parse_upload.go
new file mode 100644
index 000000000..785217761
--- /dev/null
+++ b/weed/storage/needle/needle_parse_upload.go
@@ -0,0 +1,198 @@
+package needle
+
+import (
+ "crypto/md5"
+ "encoding/base64"
+ "fmt"
+ "io"
+ "io/ioutil"
+ "mime"
+ "net/http"
+ "path"
+ "path/filepath"
+ "strconv"
+ "strings"
+
+ "github.com/chrislusf/seaweedfs/weed/glog"
+ "github.com/chrislusf/seaweedfs/weed/util"
+)
+
+type ParsedUpload struct {
+ FileName string
+ Data []byte
+ MimeType string
+ PairMap map[string]string
+ IsGzipped bool
+ IsZstd bool
+ OriginalDataSize int
+ ModifiedTime uint64
+ Ttl *TTL
+ IsChunkedFile bool
+ UncompressedData []byte
+}
+
+func ParseUpload(r *http.Request, sizeLimit int64) (pu *ParsedUpload, e error) {
+ pu = &ParsedUpload{}
+ pu.PairMap = make(map[string]string)
+ for k, v := range r.Header {
+ if len(v) > 0 && strings.HasPrefix(k, PairNamePrefix) {
+ pu.PairMap[k] = v[0]
+ }
+ }
+
+ if r.Method == "POST" {
+ e = parseMultipart(r, sizeLimit, pu)
+ } else {
+ e = parsePut(r, sizeLimit, pu)
+ }
+ if e != nil {
+ return
+ }
+
+ pu.ModifiedTime, _ = strconv.ParseUint(r.FormValue("ts"), 10, 64)
+ pu.Ttl, _ = ReadTTL(r.FormValue("ttl"))
+
+ pu.OriginalDataSize = len(pu.Data)
+ pu.UncompressedData = pu.Data
+ // println("received data", len(pu.Data), "isGzipped", pu.IsCompressed, "mime", pu.MimeType, "name", pu.FileName)
+ if pu.IsGzipped {
+ if unzipped, e := util.DecompressData(pu.Data); e == nil {
+ pu.OriginalDataSize = len(unzipped)
+ pu.UncompressedData = unzipped
+ // println("ungzipped data size", len(unzipped))
+ }
+ } else {
+ ext := filepath.Base(pu.FileName)
+ mimeType := pu.MimeType
+ if mimeType == "" {
+ mimeType = http.DetectContentType(pu.Data)
+ }
+ // println("detected mimetype to", pu.MimeType)
+ if mimeType == "application/octet-stream" {
+ mimeType = ""
+ }
+ if shouldBeCompressed, iAmSure := util.IsCompressableFileType(ext, mimeType); mimeType == "" && !iAmSure || shouldBeCompressed && iAmSure {
+ // println("ext", ext, "iAmSure", iAmSure, "shouldGzip", shouldGzip, "mimeType", pu.MimeType)
+ if compressedData, err := util.GzipData(pu.Data); err == nil {
+ if len(compressedData)*10 < len(pu.Data)*9 {
+ pu.Data = compressedData
+ pu.IsGzipped = true
+ }
+ // println("gzipped data size", len(compressedData))
+ }
+ }
+ }
+
+ if expectedChecksum := r.Header.Get("Content-MD5"); expectedChecksum != "" {
+ h := md5.New()
+ h.Write(pu.UncompressedData)
+ if receivedChecksum := base64.StdEncoding.EncodeToString(h.Sum(nil)); expectedChecksum != receivedChecksum {
+ e = fmt.Errorf("Content-MD5 did not match md5 of file data [%s] != [%s]", expectedChecksum, receivedChecksum)
+ return
+ }
+ }
+
+ return
+}
+
+func parsePut(r *http.Request, sizeLimit int64, pu *ParsedUpload) (e error) {
+ pu.IsGzipped = r.Header.Get("Content-Encoding") == "gzip"
+ pu.IsZstd = r.Header.Get("Content-Encoding") == "zstd"
+ pu.MimeType = r.Header.Get("Content-Type")
+ pu.FileName = ""
+ pu.Data, e = ioutil.ReadAll(io.LimitReader(r.Body, sizeLimit+1))
+ if e == io.EOF || int64(pu.OriginalDataSize) == sizeLimit+1 {
+ io.Copy(ioutil.Discard, r.Body)
+ }
+ r.Body.Close()
+ return nil
+}
+
+func parseMultipart(r *http.Request, sizeLimit int64, pu *ParsedUpload) (e error) {
+ defer func() {
+ if e != nil && r.Body != nil {
+ io.Copy(ioutil.Discard, r.Body)
+ r.Body.Close()
+ }
+ }()
+ form, fe := r.MultipartReader()
+ if fe != nil {
+ glog.V(0).Infoln("MultipartReader [ERROR]", fe)
+ e = fe
+ return
+ }
+
+ // first multi-part item
+ part, fe := form.NextPart()
+ if fe != nil {
+ glog.V(0).Infoln("Reading Multi part [ERROR]", fe)
+ e = fe
+ return
+ }
+
+ pu.FileName = part.FileName()
+ if pu.FileName != "" {
+ pu.FileName = path.Base(pu.FileName)
+ }
+
+ pu.Data, e = ioutil.ReadAll(io.LimitReader(part, sizeLimit+1))
+ if e != nil {
+ glog.V(0).Infoln("Reading Content [ERROR]", e)
+ return
+ }
+ if len(pu.Data) == int(sizeLimit)+1 {
+ e = fmt.Errorf("file over the limited %d bytes", sizeLimit)
+ return
+ }
+
+ // if the filename is empty string, do a search on the other multi-part items
+ for pu.FileName == "" {
+ part2, fe := form.NextPart()
+ if fe != nil {
+ break // no more or on error, just safely break
+ }
+
+ fName := part2.FileName()
+
+ // found the first multi-part has filename
+ if fName != "" {
+ data2, fe2 := ioutil.ReadAll(io.LimitReader(part2, sizeLimit+1))
+ if fe2 != nil {
+ glog.V(0).Infoln("Reading Content [ERROR]", fe2)
+ e = fe2
+ return
+ }
+ if len(data2) == int(sizeLimit)+1 {
+ e = fmt.Errorf("file over the limited %d bytes", sizeLimit)
+ return
+ }
+
+ // update
+ pu.Data = data2
+ pu.FileName = path.Base(fName)
+ break
+ }
+ }
+
+ pu.IsChunkedFile, _ = strconv.ParseBool(r.FormValue("cm"))
+
+ if !pu.IsChunkedFile {
+
+ dotIndex := strings.LastIndex(pu.FileName, ".")
+ ext, mtype := "", ""
+ if dotIndex > 0 {
+ ext = strings.ToLower(pu.FileName[dotIndex:])
+ mtype = mime.TypeByExtension(ext)
+ }
+ contentType := part.Header.Get("Content-Type")
+ if contentType != "" && contentType != "application/octet-stream" && mtype != contentType {
+ pu.MimeType = contentType // only return mime type if not deductable
+ mtype = contentType
+ }
+
+ pu.IsGzipped = part.Header.Get("Content-Encoding") == "gzip"
+ pu.IsZstd = part.Header.Get("Content-Encoding") == "zstd"
+ }
+
+ return
+}
diff --git a/weed/storage/needle_read_write.go b/weed/storage/needle/needle_read_write.go
similarity index 53%
rename from weed/storage/needle_read_write.go
rename to weed/storage/needle/needle_read_write.go
index c99395f8b..9702cf939 100644
--- a/weed/storage/needle_read_write.go
+++ b/weed/storage/needle/needle_read_write.go
@@ -1,19 +1,19 @@
-package storage
+package needle
import (
"errors"
"fmt"
"io"
- "os"
+ "math"
"github.com/chrislusf/seaweedfs/weed/glog"
+ "github.com/chrislusf/seaweedfs/weed/storage/backend"
. "github.com/chrislusf/seaweedfs/weed/storage/types"
"github.com/chrislusf/seaweedfs/weed/util"
- "math"
)
const (
- FlagGzip = 0x01
+ FlagIsCompressed = 0x01
FlagHasName = 0x02
FlagHasMime = 0x04
FlagHasLastModifiedDate = 0x08
@@ -25,44 +25,30 @@ const (
)
func (n *Needle) DiskSize(version Version) int64 {
- return getActualSize(n.Size, version)
+ return GetActualSize(n.Size, version)
}
-func (n *Needle) Append(w *os.File, version Version) (offset uint64, size uint32, actualSize int64, err error) {
- if end, e := w.Seek(0, io.SeekEnd); e == nil {
- defer func(w *os.File, off int64) {
- if err != nil {
- if te := w.Truncate(end); te != nil {
- glog.V(0).Infof("Failed to truncate %s back to %d with error: %v", w.Name(), end, te)
- }
- }
- }(w, end)
- offset = uint64(end)
- } else {
- err = fmt.Errorf("Cannot Read Current Volume Position: %v", e)
- return
- }
+func (n *Needle) prepareWriteBuffer(version Version) ([]byte, uint32, int64, error) {
+
+ writeBytes := make([]byte, 0)
+
switch version {
case Version1:
- header := make([]byte, NeedleEntrySize)
+ header := make([]byte, NeedleHeaderSize)
CookieToBytes(header[0:CookieSize], n.Cookie)
NeedleIdToBytes(header[CookieSize:CookieSize+NeedleIdSize], n.Id)
n.Size = uint32(len(n.Data))
- size = n.Size
util.Uint32toBytes(header[CookieSize+NeedleIdSize:CookieSize+NeedleIdSize+SizeSize], n.Size)
- if _, err = w.Write(header); err != nil {
- return
- }
- if _, err = w.Write(n.Data); err != nil {
- return
- }
- actualSize = NeedleEntrySize + int64(n.Size)
+ size := n.Size
+ actualSize := NeedleHeaderSize + int64(n.Size)
+ writeBytes = append(writeBytes, header...)
+ writeBytes = append(writeBytes, n.Data...)
padding := PaddingLength(n.Size, version)
util.Uint32toBytes(header[0:NeedleChecksumSize], n.Checksum.Value())
- _, err = w.Write(header[0 : NeedleChecksumSize+padding])
- return
+ writeBytes = append(writeBytes, header[0:NeedleChecksumSize+padding]...)
+ return writeBytes, size, actualSize, nil
case Version2, Version3:
- header := make([]byte, NeedleEntrySize+TimestampSize) // adding timestamp to reuse it and avoid extra allocation
+ header := make([]byte, NeedleHeaderSize+TimestampSize) // adding timestamp to reuse it and avoid extra allocation
CookieToBytes(header[0:CookieSize], n.Cookie)
NeedleIdToBytes(header[CookieSize:CookieSize+NeedleIdSize], n.Id)
if len(n.Name) >= math.MaxUint8 {
@@ -91,130 +77,145 @@ func (n *Needle) Append(w *os.File, version Version) (offset uint64, size uint32
} else {
n.Size = 0
}
- size = n.DataSize
util.Uint32toBytes(header[CookieSize+NeedleIdSize:CookieSize+NeedleIdSize+SizeSize], n.Size)
- if _, err = w.Write(header[0:NeedleEntrySize]); err != nil {
- return
- }
+ writeBytes = append(writeBytes, header[0:NeedleHeaderSize]...)
if n.DataSize > 0 {
util.Uint32toBytes(header[0:4], n.DataSize)
- if _, err = w.Write(header[0:4]); err != nil {
- return
- }
- if _, err = w.Write(n.Data); err != nil {
- return
- }
+ writeBytes = append(writeBytes, header[0:4]...)
+ writeBytes = append(writeBytes, n.Data...)
util.Uint8toBytes(header[0:1], n.Flags)
- if _, err = w.Write(header[0:1]); err != nil {
- return
- }
+ writeBytes = append(writeBytes, header[0:1]...)
if n.HasName() {
util.Uint8toBytes(header[0:1], n.NameSize)
- if _, err = w.Write(header[0:1]); err != nil {
- return
- }
- if _, err = w.Write(n.Name[:n.NameSize]); err != nil {
- return
- }
+ writeBytes = append(writeBytes, header[0:1]...)
+ writeBytes = append(writeBytes, n.Name[:n.NameSize]...)
}
if n.HasMime() {
util.Uint8toBytes(header[0:1], n.MimeSize)
- if _, err = w.Write(header[0:1]); err != nil {
- return
- }
- if _, err = w.Write(n.Mime); err != nil {
- return
- }
+ writeBytes = append(writeBytes, header[0:1]...)
+ writeBytes = append(writeBytes, n.Mime...)
}
if n.HasLastModifiedDate() {
util.Uint64toBytes(header[0:8], n.LastModified)
- if _, err = w.Write(header[8-LastModifiedBytesLength : 8]); err != nil {
- return
- }
+ writeBytes = append(writeBytes, header[8-LastModifiedBytesLength:8]...)
}
if n.HasTtl() && n.Ttl != nil {
n.Ttl.ToBytes(header[0:TtlBytesLength])
- if _, err = w.Write(header[0:TtlBytesLength]); err != nil {
- return
- }
+ writeBytes = append(writeBytes, header[0:TtlBytesLength]...)
}
if n.HasPairs() {
util.Uint16toBytes(header[0:2], n.PairsSize)
- if _, err = w.Write(header[0:2]); err != nil {
- return
- }
- if _, err = w.Write(n.Pairs); err != nil {
- return
- }
+ writeBytes = append(writeBytes, header[0:2]...)
+ writeBytes = append(writeBytes, n.Pairs...)
}
}
padding := PaddingLength(n.Size, version)
util.Uint32toBytes(header[0:NeedleChecksumSize], n.Checksum.Value())
if version == Version2 {
- _, err = w.Write(header[0 : NeedleChecksumSize+padding])
+ writeBytes = append(writeBytes, header[0:NeedleChecksumSize+padding]...)
} else {
// version3
util.Uint64toBytes(header[NeedleChecksumSize:NeedleChecksumSize+TimestampSize], n.AppendAtNs)
- _, err = w.Write(header[0 : NeedleChecksumSize+TimestampSize+padding])
+ writeBytes = append(writeBytes, header[0:NeedleChecksumSize+TimestampSize+padding]...)
}
- return offset, n.DataSize, getActualSize(n.Size, version), err
+ return writeBytes, n.DataSize, GetActualSize(n.Size, version), nil
}
- return 0, 0, 0, fmt.Errorf("Unsupported Version! (%d)", version)
+
+ return writeBytes, 0, 0, fmt.Errorf("Unsupported Version! (%d)", version)
}
-func ReadNeedleBlob(r *os.File, offset int64, size uint32, version Version) (dataSlice []byte, err error) {
- dataSlice = make([]byte, int(getActualSize(size, version)))
+func (n *Needle) Append(w backend.BackendStorageFile, version Version) (offset uint64, size uint32, actualSize int64, err error) {
+
+ if end, _, e := w.GetStat(); e == nil {
+ defer func(w backend.BackendStorageFile, off int64) {
+ if err != nil {
+ if te := w.Truncate(end); te != nil {
+ glog.V(0).Infof("Failed to truncate %s back to %d with error: %v", w.Name(), end, te)
+ }
+ }
+ }(w, end)
+ offset = uint64(end)
+ } else {
+ err = fmt.Errorf("Cannot Read Current Volume Position: %v", e)
+ return
+ }
+ if offset >= MaxPossibleVolumeSize {
+ err = fmt.Errorf("Volume Size %d Exeededs %d", offset, MaxPossibleVolumeSize)
+ return
+ }
+
+ bytesToWrite, size, actualSize, err := n.prepareWriteBuffer(version)
+
+ if err == nil {
+ _, err = w.WriteAt(bytesToWrite, int64(offset))
+ }
+
+ return offset, size, actualSize, err
+}
+
+func ReadNeedleBlob(r backend.BackendStorageFile, offset int64, size uint32, version Version) (dataSlice []byte, err error) {
+
+ dataSize := GetActualSize(size, version)
+ dataSlice = make([]byte, int(dataSize))
+
_, err = r.ReadAt(dataSlice, offset)
return dataSlice, err
+
}
-func (n *Needle) ReadData(r *os.File, offset int64, size uint32, version Version) (err error) {
- bytes, err := ReadNeedleBlob(r, offset, size, version)
- if err != nil {
- return err
- }
+// ReadBytes hydrates the needle from the bytes buffer, with only n.Id is set.
+func (n *Needle) ReadBytes(bytes []byte, offset int64, size uint32, version Version) (err error) {
n.ParseNeedleHeader(bytes)
if n.Size != size {
- return fmt.Errorf("File Entry Not Found. offset %d, Needle id %d expected size %d Memory %d", offset, n.Id, n.Size, size)
+ return fmt.Errorf("entry not found: offset %d found id %d size %d, expected size %d", offset, n.Id, n.Size, size)
}
switch version {
case Version1:
- n.Data = bytes[NeedleEntrySize : NeedleEntrySize+size]
+ n.Data = bytes[NeedleHeaderSize : NeedleHeaderSize+size]
case Version2, Version3:
- n.readNeedleDataVersion2(bytes[NeedleEntrySize : NeedleEntrySize+int(n.Size)])
+ err = n.readNeedleDataVersion2(bytes[NeedleHeaderSize : NeedleHeaderSize+int(n.Size)])
}
- if size == 0 {
- return nil
+ if err != nil && err != io.EOF {
+ return err
}
- checksum := util.BytesToUint32(bytes[NeedleEntrySize+size : NeedleEntrySize+size+NeedleChecksumSize])
- newChecksum := NewCRC(n.Data)
- if checksum != newChecksum.Value() {
- return errors.New("CRC error! Data On Disk Corrupted")
+ if size > 0 {
+ checksum := util.BytesToUint32(bytes[NeedleHeaderSize+size : NeedleHeaderSize+size+NeedleChecksumSize])
+ newChecksum := NewCRC(n.Data)
+ if checksum != newChecksum.Value() {
+ return errors.New("CRC error! Data On Disk Corrupted")
+ }
+ n.Checksum = newChecksum
}
- n.Checksum = newChecksum
if version == Version3 {
- tsOffset := NeedleEntrySize + size + NeedleChecksumSize
+ tsOffset := NeedleHeaderSize + size + NeedleChecksumSize
n.AppendAtNs = util.BytesToUint64(bytes[tsOffset : tsOffset+TimestampSize])
}
return nil
}
+// ReadData hydrates the needle from the file, with only n.Id is set.
+func (n *Needle) ReadData(r backend.BackendStorageFile, offset int64, size uint32, version Version) (err error) {
+ bytes, err := ReadNeedleBlob(r, offset, size, version)
+ if err != nil {
+ return err
+ }
+ return n.ReadBytes(bytes, offset, size, version)
+}
+
func (n *Needle) ParseNeedleHeader(bytes []byte) {
n.Cookie = BytesToCookie(bytes[0:CookieSize])
n.Id = BytesToNeedleId(bytes[CookieSize : CookieSize+NeedleIdSize])
- n.Size = util.BytesToUint32(bytes[CookieSize+NeedleIdSize : NeedleEntrySize])
+ n.Size = util.BytesToUint32(bytes[CookieSize+NeedleIdSize : NeedleHeaderSize])
}
-func (n *Needle) readNeedleDataVersion2(bytes []byte) {
+func (n *Needle) readNeedleDataVersion2(bytes []byte) (err error) {
index, lenBytes := 0, len(bytes)
if index < lenBytes {
n.DataSize = util.BytesToUint32(bytes[index : index+4])
index = index + 4
if int(n.DataSize)+index > lenBytes {
- // this if clause is due to bug #87 and #93, fixed in v0.69
- // remove this clause later
- return
+ return fmt.Errorf("index out of range %d", 1)
}
n.Data = bytes[index : index+int(n.DataSize)]
index = index + int(n.DataSize)
@@ -224,53 +225,75 @@ func (n *Needle) readNeedleDataVersion2(bytes []byte) {
if index < lenBytes && n.HasName() {
n.NameSize = uint8(bytes[index])
index = index + 1
+ if int(n.NameSize)+index > lenBytes {
+ return fmt.Errorf("index out of range %d", 2)
+ }
n.Name = bytes[index : index+int(n.NameSize)]
index = index + int(n.NameSize)
}
if index < lenBytes && n.HasMime() {
n.MimeSize = uint8(bytes[index])
index = index + 1
+ if int(n.MimeSize)+index > lenBytes {
+ return fmt.Errorf("index out of range %d", 3)
+ }
n.Mime = bytes[index : index+int(n.MimeSize)]
index = index + int(n.MimeSize)
}
if index < lenBytes && n.HasLastModifiedDate() {
+ if LastModifiedBytesLength+index > lenBytes {
+ return fmt.Errorf("index out of range %d", 4)
+ }
n.LastModified = util.BytesToUint64(bytes[index : index+LastModifiedBytesLength])
index = index + LastModifiedBytesLength
}
if index < lenBytes && n.HasTtl() {
+ if TtlBytesLength+index > lenBytes {
+ return fmt.Errorf("index out of range %d", 5)
+ }
n.Ttl = LoadTTLFromBytes(bytes[index : index+TtlBytesLength])
index = index + TtlBytesLength
}
if index < lenBytes && n.HasPairs() {
+ if 2+index > lenBytes {
+ return fmt.Errorf("index out of range %d", 6)
+ }
n.PairsSize = util.BytesToUint16(bytes[index : index+2])
index += 2
+ if int(n.PairsSize)+index > lenBytes {
+ return fmt.Errorf("index out of range %d", 7)
+ }
end := index + int(n.PairsSize)
n.Pairs = bytes[index:end]
index = end
}
+ return nil
}
-func ReadNeedleHeader(r *os.File, version Version, offset int64) (n *Needle, bodyLength int64, err error) {
+func ReadNeedleHeader(r backend.BackendStorageFile, version Version, offset int64) (n *Needle, bytes []byte, bodyLength int64, err error) {
n = new(Needle)
if version == Version1 || version == Version2 || version == Version3 {
- bytes := make([]byte, NeedleEntrySize)
+ bytes = make([]byte, NeedleHeaderSize)
+
var count int
count, err = r.ReadAt(bytes, offset)
if count <= 0 || err != nil {
- return nil, 0, err
+ return nil, bytes, 0, err
}
+
n.ParseNeedleHeader(bytes)
bodyLength = NeedleBodyLength(n.Size, version)
}
+
return
}
func PaddingLength(needleSize uint32, version Version) uint32 {
if version == Version3 {
// this is same value as version2, but just listed here for clarity
- return NeedlePaddingSize - ((NeedleEntrySize + needleSize + NeedleChecksumSize + TimestampSize) % NeedlePaddingSize)
+ return NeedlePaddingSize - ((NeedleHeaderSize + needleSize + NeedleChecksumSize + TimestampSize) % NeedlePaddingSize)
}
- return NeedlePaddingSize - ((NeedleEntrySize + needleSize + NeedleChecksumSize) % NeedlePaddingSize)
+ return NeedlePaddingSize - ((NeedleHeaderSize + needleSize + NeedleChecksumSize) % NeedlePaddingSize)
}
func NeedleBodyLength(needleSize uint32, version Version) int64 {
@@ -282,36 +305,49 @@ func NeedleBodyLength(needleSize uint32, version Version) int64 {
//n should be a needle already read the header
//the input stream will read until next file entry
-func (n *Needle) ReadNeedleBody(r *os.File, version Version, offset int64, bodyLength int64) (err error) {
+func (n *Needle) ReadNeedleBody(r backend.BackendStorageFile, version Version, offset int64, bodyLength int64) (bytes []byte, err error) {
+
if bodyLength <= 0 {
+ return nil, nil
+ }
+ bytes = make([]byte, bodyLength)
+ if _, err = r.ReadAt(bytes, offset); err != nil {
+ return
+ }
+
+ err = n.ReadNeedleBodyBytes(bytes, version)
+
+ return
+}
+
+func (n *Needle) ReadNeedleBodyBytes(needleBody []byte, version Version) (err error) {
+
+ if len(needleBody) <= 0 {
return nil
}
switch version {
case Version1:
- bytes := make([]byte, bodyLength)
- if _, err = r.ReadAt(bytes, offset); err != nil {
- return
- }
- n.Data = bytes[:n.Size]
+ n.Data = needleBody[:n.Size]
n.Checksum = NewCRC(n.Data)
case Version2, Version3:
- bytes := make([]byte, bodyLength)
- if _, err = r.ReadAt(bytes, offset); err != nil {
- return
- }
- n.readNeedleDataVersion2(bytes[0:n.Size])
+ err = n.readNeedleDataVersion2(needleBody[0:n.Size])
n.Checksum = NewCRC(n.Data)
+
+ if version == Version3 {
+ tsOffset := n.Size + NeedleChecksumSize
+ n.AppendAtNs = util.BytesToUint64(needleBody[tsOffset : tsOffset+TimestampSize])
+ }
default:
- err = fmt.Errorf("Unsupported Version! (%d)", version)
+ err = fmt.Errorf("unsupported version %d!", version)
}
return
}
-func (n *Needle) IsGzipped() bool {
- return n.Flags&FlagGzip > 0
+func (n *Needle) IsCompressed() bool {
+ return n.Flags&FlagIsCompressed > 0
}
-func (n *Needle) SetGzipped() {
- n.Flags = n.Flags | FlagGzip
+func (n *Needle) SetIsCompressed() {
+ n.Flags = n.Flags | FlagIsCompressed
}
func (n *Needle) HasName() bool {
return n.Flags&FlagHasName > 0
@@ -353,3 +389,7 @@ func (n *Needle) HasPairs() bool {
func (n *Needle) SetHasPairs() {
n.Flags = n.Flags | FlagHasPairs
}
+
+func GetActualSize(size uint32, version Version) int64 {
+ return NeedleHeaderSize + NeedleBodyLength(size, version)
+}
diff --git a/weed/storage/needle_read_write_test.go b/weed/storage/needle/needle_read_write_test.go
similarity index 91%
rename from weed/storage/needle_read_write_test.go
rename to weed/storage/needle/needle_read_write_test.go
index 8bd9205c1..47582dd26 100644
--- a/weed/storage/needle_read_write_test.go
+++ b/weed/storage/needle/needle_read_write_test.go
@@ -1,12 +1,12 @@
-package storage
+package needle
import (
- "crypto/rand"
- "github.com/chrislusf/seaweedfs/weed/storage/types"
- "io"
"io/ioutil"
"os"
"testing"
+
+ "github.com/chrislusf/seaweedfs/weed/storage/backend"
+ "github.com/chrislusf/seaweedfs/weed/storage/types"
)
func TestAppend(t *testing.T) {
@@ -49,13 +49,16 @@ func TestAppend(t *testing.T) {
*/
fileSize := int64(4294967295) + 10000
- io.CopyN(tempFile, rand.Reader, fileSize)
+ tempFile.Truncate(fileSize)
defer func() {
tempFile.Close()
os.Remove(tempFile.Name())
}()
- offset, _, _, _ := n.Append(tempFile, CurrentVersion)
+ datBackend := backend.NewDiskFile(tempFile)
+ defer datBackend.Close()
+
+ offset, _, _, _ := n.Append(datBackend, CurrentVersion)
if offset != uint64(fileSize) {
t.Errorf("Fail to Append Needle.")
}
diff --git a/weed/storage/needle_test.go b/weed/storage/needle/needle_test.go
similarity index 98%
rename from weed/storage/needle_test.go
rename to weed/storage/needle/needle_test.go
index 65036409c..0f2dde98e 100644
--- a/weed/storage/needle_test.go
+++ b/weed/storage/needle/needle_test.go
@@ -1,8 +1,9 @@
-package storage
+package needle
import (
- "github.com/chrislusf/seaweedfs/weed/storage/types"
"testing"
+
+ "github.com/chrislusf/seaweedfs/weed/storage/types"
)
func TestParseKeyHash(t *testing.T) {
diff --git a/weed/storage/needle/needle_value.go b/weed/storage/needle/needle_value.go
deleted file mode 100644
index 96ee83009..000000000
--- a/weed/storage/needle/needle_value.go
+++ /dev/null
@@ -1,17 +0,0 @@
-package needle
-
-import (
- . "github.com/chrislusf/seaweedfs/weed/storage/types"
- "github.com/google/btree"
-)
-
-type NeedleValue struct {
- Key NeedleId
- Offset Offset `comment:"Volume offset"` //since aligned to 8 bytes, range is 4G*8=32G
- Size uint32 `comment:"Size of the data portion"`
-}
-
-func (this NeedleValue) Less(than btree.Item) bool {
- that := than.(NeedleValue)
- return this.Key < that.Key
-}
diff --git a/weed/storage/volume_id.go b/weed/storage/needle/volume_id.go
similarity index 51%
rename from weed/storage/volume_id.go
rename to weed/storage/needle/volume_id.go
index 0333c6cf0..3366c14bf 100644
--- a/weed/storage/volume_id.go
+++ b/weed/storage/needle/volume_id.go
@@ -1,4 +1,4 @@
-package storage
+package needle
import (
"strconv"
@@ -10,9 +10,9 @@ func NewVolumeId(vid string) (VolumeId, error) {
volumeId, err := strconv.ParseUint(vid, 10, 64)
return VolumeId(volumeId), err
}
-func (vid *VolumeId) String() string {
- return strconv.FormatUint(uint64(*vid), 10)
+func (vid VolumeId) String() string {
+ return strconv.FormatUint(uint64(vid), 10)
}
-func (vid *VolumeId) Next() VolumeId {
- return VolumeId(uint32(*vid) + 1)
+func (vid VolumeId) Next() VolumeId {
+ return VolumeId(uint32(vid) + 1)
}
diff --git a/weed/storage/needle/volume_id_test.go b/weed/storage/needle/volume_id_test.go
new file mode 100644
index 000000000..5ffcd90f6
--- /dev/null
+++ b/weed/storage/needle/volume_id_test.go
@@ -0,0 +1,45 @@
+package needle
+
+import "testing"
+
+func TestNewVolumeId(t *testing.T) {
+ if _, err := NewVolumeId("1"); err != nil {
+ t.Error(err)
+ }
+
+ if _, err := NewVolumeId("a"); err != nil {
+ t.Logf("a is not legal volume id, %v", err)
+ }
+}
+
+func TestVolumeId_String(t *testing.T) {
+ if str := VolumeId(10).String(); str != "10" {
+ t.Errorf("to string failed")
+ }
+
+ vid := VolumeId(11)
+ if str := vid.String(); str != "11" {
+ t.Errorf("to string failed")
+ }
+
+ pvid := &vid
+ if str := pvid.String(); str != "11" {
+ t.Errorf("to string failed")
+ }
+}
+
+func TestVolumeId_Next(t *testing.T) {
+ if vid := VolumeId(10).Next(); vid != VolumeId(11) {
+ t.Errorf("get next volume id failed")
+ }
+
+ vid := VolumeId(11)
+ if new := vid.Next(); new != 12 {
+ t.Errorf("get next volume id failed")
+ }
+
+ pvid := &vid
+ if new := pvid.Next(); new != 12 {
+ t.Errorf("get next volume id failed")
+ }
+}
diff --git a/weed/storage/volume_ttl.go b/weed/storage/needle/volume_ttl.go
similarity index 72%
rename from weed/storage/volume_ttl.go
rename to weed/storage/needle/volume_ttl.go
index 4318bb048..179057876 100644
--- a/weed/storage/volume_ttl.go
+++ b/weed/storage/needle/volume_ttl.go
@@ -1,4 +1,4 @@
-package storage
+package needle
import (
"strconv"
@@ -16,8 +16,8 @@ const (
)
type TTL struct {
- count byte
- unit byte
+ Count byte
+ Unit byte
}
var EMPTY_TTL = &TTL{}
@@ -43,12 +43,15 @@ func ReadTTL(ttlString string) (*TTL, error) {
}
count, err := strconv.Atoi(string(countBytes))
unit := toStoredByte(unitByte)
- return &TTL{count: byte(count), unit: unit}, err
+ return &TTL{Count: byte(count), Unit: unit}, err
}
// read stored bytes to a ttl
func LoadTTLFromBytes(input []byte) (t *TTL) {
- return &TTL{count: input[0], unit: input[1]}
+ if input[0] == 0 && input[1] == 0 {
+ return EMPTY_TTL
+ }
+ return &TTL{Count: input[0], Unit: input[1]}
}
// read stored bytes to a ttl
@@ -61,25 +64,28 @@ func LoadTTLFromUint32(ttl uint32) (t *TTL) {
// save stored bytes to an output with 2 bytes
func (t *TTL) ToBytes(output []byte) {
- output[0] = t.count
- output[1] = t.unit
+ output[0] = t.Count
+ output[1] = t.Unit
}
func (t *TTL) ToUint32() (output uint32) {
- output = uint32(t.count) << 8
- output += uint32(t.unit)
+ if t == nil || t.Count == 0 {
+ return 0
+ }
+ output = uint32(t.Count) << 8
+ output += uint32(t.Unit)
return output
}
func (t *TTL) String() string {
- if t == nil || t.count == 0 {
+ if t == nil || t.Count == 0 {
return ""
}
- if t.unit == Empty {
+ if t.Unit == Empty {
return ""
}
- countString := strconv.Itoa(int(t.count))
- switch t.unit {
+ countString := strconv.Itoa(int(t.Count))
+ switch t.Unit {
case Minute:
return countString + "m"
case Hour:
@@ -115,21 +121,21 @@ func toStoredByte(readableUnitByte byte) byte {
}
func (t TTL) Minutes() uint32 {
- switch t.unit {
+ switch t.Unit {
case Empty:
return 0
case Minute:
- return uint32(t.count)
+ return uint32(t.Count)
case Hour:
- return uint32(t.count) * 60
+ return uint32(t.Count) * 60
case Day:
- return uint32(t.count) * 60 * 24
+ return uint32(t.Count) * 60 * 24
case Week:
- return uint32(t.count) * 60 * 24 * 7
+ return uint32(t.Count) * 60 * 24 * 7
case Month:
- return uint32(t.count) * 60 * 24 * 31
+ return uint32(t.Count) * 60 * 24 * 31
case Year:
- return uint32(t.count) * 60 * 24 * 365
+ return uint32(t.Count) * 60 * 24 * 365
}
return 0
}
diff --git a/weed/storage/volume_ttl_test.go b/weed/storage/needle/volume_ttl_test.go
similarity index 98%
rename from weed/storage/volume_ttl_test.go
rename to weed/storage/needle/volume_ttl_test.go
index 216469a4c..0afebebf5 100644
--- a/weed/storage/volume_ttl_test.go
+++ b/weed/storage/needle/volume_ttl_test.go
@@ -1,4 +1,4 @@
-package storage
+package needle
import (
"testing"
diff --git a/weed/storage/volume_version.go b/weed/storage/needle/volume_version.go
similarity index 90%
rename from weed/storage/volume_version.go
rename to weed/storage/needle/volume_version.go
index fc0270c03..54daac77f 100644
--- a/weed/storage/volume_version.go
+++ b/weed/storage/needle/volume_version.go
@@ -1,4 +1,4 @@
-package storage
+package needle
type Version uint8
diff --git a/weed/storage/needle_byte_cache.go b/weed/storage/needle_byte_cache.go
deleted file mode 100644
index 78c1ea862..000000000
--- a/weed/storage/needle_byte_cache.go
+++ /dev/null
@@ -1,11 +0,0 @@
-package storage
-
-import (
- "os"
-)
-
-func getBytesForFileBlock(r *os.File, offset int64, readSize int) (dataSlice []byte, err error) {
- dataSlice = make([]byte, readSize)
- _, err = r.ReadAt(dataSlice, offset)
- return dataSlice, err
-}
diff --git a/weed/storage/needle_map.go b/weed/storage/needle_map.go
index 6d815679b..8962e78cb 100644
--- a/weed/storage/needle_map.go
+++ b/weed/storage/needle_map.go
@@ -2,27 +2,25 @@ package storage
import (
"fmt"
- "io/ioutil"
"os"
"sync"
- "github.com/chrislusf/seaweedfs/weed/storage/needle"
+ "github.com/chrislusf/seaweedfs/weed/storage/needle_map"
. "github.com/chrislusf/seaweedfs/weed/storage/types"
- "github.com/chrislusf/seaweedfs/weed/util"
)
type NeedleMapType int
const (
- NeedleMapInMemory NeedleMapType = iota
- NeedleMapLevelDb
- NeedleMapBoltDb
- NeedleMapBtree
+ NeedleMapInMemory NeedleMapType = iota
+ NeedleMapLevelDb // small memory footprint, 4MB total, 1 write buffer, 3 block buffer
+ NeedleMapLevelDbMedium // medium memory footprint, 8MB total, 3 write buffer, 5 block buffer
+ NeedleMapLevelDbLarge // large memory footprint, 12MB total, 4write buffer, 8 block buffer
)
type NeedleMapper interface {
Put(key NeedleId, offset Offset, size uint32) error
- Get(key NeedleId) (element *needle.NeedleValue, ok bool)
+ Get(key NeedleId) (element *needle_map.NeedleValue, ok bool)
Delete(key NeedleId, offset Offset) error
Close()
Destroy() error
@@ -32,15 +30,14 @@ type NeedleMapper interface {
DeletedCount() int
MaxFileKey() NeedleId
IndexFileSize() uint64
- IndexFileContent() ([]byte, error)
- IndexFileName() string
+ Sync() error
}
type baseNeedleMapper struct {
+ mapMetric
+
indexFile *os.File
indexFileAccessLock sync.Mutex
-
- mapMetric
}
func (nm *baseNeedleMapper) IndexFileSize() uint64 {
@@ -51,21 +48,8 @@ func (nm *baseNeedleMapper) IndexFileSize() uint64 {
return 0
}
-func (nm *baseNeedleMapper) IndexFileName() string {
- return nm.indexFile.Name()
-}
-
-func IdxFileEntry(bytes []byte) (key NeedleId, offset Offset, size uint32) {
- key = BytesToNeedleId(bytes[:NeedleIdSize])
- offset = BytesToOffset(bytes[NeedleIdSize : NeedleIdSize+OffsetSize])
- size = util.BytesToUint32(bytes[NeedleIdSize+OffsetSize : NeedleIdSize+OffsetSize+SizeSize])
- return
-}
func (nm *baseNeedleMapper) appendToIndexFile(key NeedleId, offset Offset, size uint32) error {
- bytes := make([]byte, NeedleIdSize+OffsetSize+SizeSize)
- NeedleIdToBytes(bytes[0:NeedleIdSize], key)
- OffsetToBytes(bytes[NeedleIdSize:NeedleIdSize+OffsetSize], offset)
- util.Uint32toBytes(bytes[NeedleIdSize+OffsetSize:NeedleIdSize+OffsetSize+SizeSize], size)
+ bytes := needle_map.ToBytes(key, offset, size)
nm.indexFileAccessLock.Lock()
defer nm.indexFileAccessLock.Unlock()
@@ -76,8 +60,7 @@ func (nm *baseNeedleMapper) appendToIndexFile(key NeedleId, offset Offset, size
_, err := nm.indexFile.Write(bytes)
return err
}
-func (nm *baseNeedleMapper) IndexFileContent() ([]byte, error) {
- nm.indexFileAccessLock.Lock()
- defer nm.indexFileAccessLock.Unlock()
- return ioutil.ReadFile(nm.indexFile.Name())
+
+func (nm *baseNeedleMapper) Sync() error {
+ return nm.indexFile.Sync()
}
diff --git a/weed/storage/needle_map/compact_map.go b/weed/storage/needle_map/compact_map.go
new file mode 100644
index 000000000..76783d0b0
--- /dev/null
+++ b/weed/storage/needle_map/compact_map.go
@@ -0,0 +1,302 @@
+package needle_map
+
+import (
+ "sort"
+ "sync"
+
+ . "github.com/chrislusf/seaweedfs/weed/storage/types"
+)
+
+const (
+ batch = 100000
+)
+
+type SectionalNeedleId uint32
+
+const SectionalNeedleIdLimit = 1<<32 - 1
+
+type SectionalNeedleValue struct {
+ Key SectionalNeedleId
+ OffsetLower OffsetLower `comment:"Volume offset"` //since aligned to 8 bytes, range is 4G*8=32G
+ Size uint32 `comment:"Size of the data portion"`
+}
+
+type SectionalNeedleValueExtra struct {
+ OffsetHigher OffsetHigher
+}
+
+type CompactSection struct {
+ sync.RWMutex
+ values []SectionalNeedleValue
+ valuesExtra []SectionalNeedleValueExtra
+ overflow Overflow
+ overflowExtra OverflowExtra
+ start NeedleId
+ end NeedleId
+ counter int
+}
+
+type Overflow []SectionalNeedleValue
+type OverflowExtra []SectionalNeedleValueExtra
+
+func NewCompactSection(start NeedleId) *CompactSection {
+ return &CompactSection{
+ values: make([]SectionalNeedleValue, batch),
+ valuesExtra: make([]SectionalNeedleValueExtra, batch),
+ overflow: Overflow(make([]SectionalNeedleValue, 0)),
+ overflowExtra: OverflowExtra(make([]SectionalNeedleValueExtra, 0)),
+ start: start,
+ }
+}
+
+//return old entry size
+func (cs *CompactSection) Set(key NeedleId, offset Offset, size uint32) (oldOffset Offset, oldSize uint32) {
+ cs.Lock()
+ if key > cs.end {
+ cs.end = key
+ }
+ skey := SectionalNeedleId(key - cs.start)
+ if i := cs.binarySearchValues(skey); i >= 0 {
+ oldOffset.OffsetHigher, oldOffset.OffsetLower, oldSize = cs.valuesExtra[i].OffsetHigher, cs.values[i].OffsetLower, cs.values[i].Size
+ //println("key", key, "old size", ret)
+ cs.valuesExtra[i].OffsetHigher, cs.values[i].OffsetLower, cs.values[i].Size = offset.OffsetHigher, offset.OffsetLower, size
+ } else {
+ needOverflow := cs.counter >= batch
+ needOverflow = needOverflow || cs.counter > 0 && cs.values[cs.counter-1].Key > skey
+ if needOverflow {
+ //println("start", cs.start, "counter", cs.counter, "key", key)
+ if oldValueExtra, oldValue, found := cs.findOverflowEntry(skey); found {
+ oldOffset.OffsetHigher, oldOffset.OffsetLower, oldSize = oldValueExtra.OffsetHigher, oldValue.OffsetLower, oldValue.Size
+ }
+ cs.setOverflowEntry(skey, offset, size)
+ } else {
+ p := &cs.values[cs.counter]
+ p.Key, cs.valuesExtra[cs.counter].OffsetHigher, p.OffsetLower, p.Size = skey, offset.OffsetHigher, offset.OffsetLower, size
+ //println("added index", cs.counter, "key", key, cs.values[cs.counter].Key)
+ cs.counter++
+ }
+ }
+ cs.Unlock()
+ return
+}
+
+func (cs *CompactSection) setOverflowEntry(skey SectionalNeedleId, offset Offset, size uint32) {
+ needleValue := SectionalNeedleValue{Key: skey, OffsetLower: offset.OffsetLower, Size: size}
+ needleValueExtra := SectionalNeedleValueExtra{OffsetHigher: offset.OffsetHigher}
+ insertCandidate := sort.Search(len(cs.overflow), func(i int) bool {
+ return cs.overflow[i].Key >= needleValue.Key
+ })
+ if insertCandidate != len(cs.overflow) && cs.overflow[insertCandidate].Key == needleValue.Key {
+ cs.overflow[insertCandidate] = needleValue
+ } else {
+ cs.overflow = append(cs.overflow, needleValue)
+ cs.overflowExtra = append(cs.overflowExtra, needleValueExtra)
+ for i := len(cs.overflow) - 1; i > insertCandidate; i-- {
+ cs.overflow[i] = cs.overflow[i-1]
+ cs.overflowExtra[i] = cs.overflowExtra[i-1]
+ }
+ cs.overflow[insertCandidate] = needleValue
+ }
+}
+
+func (cs *CompactSection) findOverflowEntry(key SectionalNeedleId) (nve SectionalNeedleValueExtra, nv SectionalNeedleValue, found bool) {
+ foundCandidate := sort.Search(len(cs.overflow), func(i int) bool {
+ return cs.overflow[i].Key >= key
+ })
+ if foundCandidate != len(cs.overflow) && cs.overflow[foundCandidate].Key == key {
+ return cs.overflowExtra[foundCandidate], cs.overflow[foundCandidate], true
+ }
+ return nve, nv, false
+}
+
+func (cs *CompactSection) deleteOverflowEntry(key SectionalNeedleId) {
+ length := len(cs.overflow)
+ deleteCandidate := sort.Search(length, func(i int) bool {
+ return cs.overflow[i].Key >= key
+ })
+ if deleteCandidate != length && cs.overflow[deleteCandidate].Key == key {
+ for i := deleteCandidate; i < length-1; i++ {
+ cs.overflow[i] = cs.overflow[i+1]
+ cs.overflowExtra[i] = cs.overflowExtra[i+1]
+ }
+ cs.overflow = cs.overflow[0 : length-1]
+ cs.overflowExtra = cs.overflowExtra[0 : length-1]
+ }
+}
+
+//return old entry size
+func (cs *CompactSection) Delete(key NeedleId) uint32 {
+ skey := SectionalNeedleId(key - cs.start)
+ cs.Lock()
+ ret := uint32(0)
+ if i := cs.binarySearchValues(skey); i >= 0 {
+ if cs.values[i].Size > 0 && cs.values[i].Size != TombstoneFileSize {
+ ret = cs.values[i].Size
+ cs.values[i].Size = TombstoneFileSize
+ }
+ }
+ if _, v, found := cs.findOverflowEntry(skey); found {
+ cs.deleteOverflowEntry(skey)
+ ret = v.Size
+ }
+ cs.Unlock()
+ return ret
+}
+func (cs *CompactSection) Get(key NeedleId) (*NeedleValue, bool) {
+ cs.RLock()
+ skey := SectionalNeedleId(key - cs.start)
+ if ve, v, ok := cs.findOverflowEntry(skey); ok {
+ cs.RUnlock()
+ nv := toNeedleValue(ve, v, cs)
+ return &nv, true
+ }
+ if i := cs.binarySearchValues(skey); i >= 0 {
+ cs.RUnlock()
+ nv := toNeedleValue(cs.valuesExtra[i], cs.values[i], cs)
+ return &nv, true
+ }
+ cs.RUnlock()
+ return nil, false
+}
+func (cs *CompactSection) binarySearchValues(key SectionalNeedleId) int {
+ x := sort.Search(cs.counter, func(i int) bool {
+ return cs.values[i].Key >= key
+ })
+ if x == cs.counter {
+ return -1
+ }
+ if cs.values[x].Key > key {
+ return -2
+ }
+ return x
+}
+
+//This map assumes mostly inserting increasing keys
+//This map assumes mostly inserting increasing keys
+type CompactMap struct {
+ list []*CompactSection
+}
+
+func NewCompactMap() *CompactMap {
+ return &CompactMap{}
+}
+
+func (cm *CompactMap) Set(key NeedleId, offset Offset, size uint32) (oldOffset Offset, oldSize uint32) {
+ x := cm.binarySearchCompactSection(key)
+ if x < 0 || (key-cm.list[x].start) > SectionalNeedleIdLimit {
+ // println(x, "adding to existing", len(cm.list), "sections, starting", key)
+ cs := NewCompactSection(key)
+ cm.list = append(cm.list, cs)
+ x = len(cm.list) - 1
+ //keep compact section sorted by start
+ for x >= 0 {
+ if x > 0 && cm.list[x-1].start > key {
+ cm.list[x] = cm.list[x-1]
+ // println("shift", x, "start", cs.start, "to", x-1)
+ x = x - 1
+ } else {
+ cm.list[x] = cs
+ // println("cs", x, "start", cs.start)
+ break
+ }
+ }
+ }
+ // println(key, "set to section[", x, "].start", cm.list[x].start)
+ return cm.list[x].Set(key, offset, size)
+}
+func (cm *CompactMap) Delete(key NeedleId) uint32 {
+ x := cm.binarySearchCompactSection(key)
+ if x < 0 {
+ return uint32(0)
+ }
+ return cm.list[x].Delete(key)
+}
+func (cm *CompactMap) Get(key NeedleId) (*NeedleValue, bool) {
+ x := cm.binarySearchCompactSection(key)
+ if x < 0 {
+ return nil, false
+ }
+ return cm.list[x].Get(key)
+}
+func (cm *CompactMap) binarySearchCompactSection(key NeedleId) int {
+ l, h := 0, len(cm.list)-1
+ if h < 0 {
+ return -5
+ }
+ if cm.list[h].start <= key {
+ if cm.list[h].counter < batch || key <= cm.list[h].end {
+ return h
+ }
+ return -4
+ }
+ for l <= h {
+ m := (l + h) / 2
+ if key < cm.list[m].start {
+ h = m - 1
+ } else { // cm.list[m].start <= key
+ if cm.list[m+1].start <= key {
+ l = m + 1
+ } else {
+ return m
+ }
+ }
+ }
+ return -3
+}
+
+// Visit visits all entries or stop if any error when visiting
+func (cm *CompactMap) AscendingVisit(visit func(NeedleValue) error) error {
+ for _, cs := range cm.list {
+ cs.RLock()
+ var i, j int
+ for i, j = 0, 0; i < len(cs.overflow) && j < len(cs.values) && j < cs.counter; {
+ if cs.overflow[i].Key < cs.values[j].Key {
+ if err := visit(toNeedleValue(cs.overflowExtra[i], cs.overflow[i], cs)); err != nil {
+ cs.RUnlock()
+ return err
+ }
+ i++
+ } else if cs.overflow[i].Key == cs.values[j].Key {
+ j++
+ } else {
+ if err := visit(toNeedleValue(cs.valuesExtra[j], cs.values[j], cs)); err != nil {
+ cs.RUnlock()
+ return err
+ }
+ j++
+ }
+ }
+ for ; i < len(cs.overflow); i++ {
+ if err := visit(toNeedleValue(cs.overflowExtra[i], cs.overflow[i], cs)); err != nil {
+ cs.RUnlock()
+ return err
+ }
+ }
+ for ; j < len(cs.values) && j < cs.counter; j++ {
+ if err := visit(toNeedleValue(cs.valuesExtra[j], cs.values[j], cs)); err != nil {
+ cs.RUnlock()
+ return err
+ }
+ }
+ cs.RUnlock()
+ }
+ return nil
+}
+
+func toNeedleValue(snve SectionalNeedleValueExtra, snv SectionalNeedleValue, cs *CompactSection) NeedleValue {
+ offset := Offset{
+ OffsetHigher: snve.OffsetHigher,
+ OffsetLower: snv.OffsetLower,
+ }
+ return NeedleValue{Key: NeedleId(snv.Key) + cs.start, Offset: offset, Size: snv.Size}
+}
+
+func (nv NeedleValue) toSectionalNeedleValue(cs *CompactSection) (SectionalNeedleValue, SectionalNeedleValueExtra) {
+ return SectionalNeedleValue{
+ SectionalNeedleId(nv.Key - cs.start),
+ nv.Offset.OffsetLower,
+ nv.Size,
+ }, SectionalNeedleValueExtra{
+ nv.Offset.OffsetHigher,
+ }
+}
diff --git a/weed/storage/needle/compact_map_perf_test.go b/weed/storage/needle_map/compact_map_perf_test.go
similarity index 72%
rename from weed/storage/needle/compact_map_perf_test.go
rename to weed/storage/needle_map/compact_map_perf_test.go
index cd21cc184..3a3648641 100644
--- a/weed/storage/needle/compact_map_perf_test.go
+++ b/weed/storage/needle_map/compact_map_perf_test.go
@@ -1,4 +1,4 @@
-package needle
+package needle_map
import (
"fmt"
@@ -28,6 +28,7 @@ go tool pprof --alloc_space needle.test mem.out
func TestMemoryUsage(t *testing.T) {
var maps []*CompactMap
+ totalRowCount := uint64(0)
startTime := time.Now()
for i := 0; i < 10; i++ {
@@ -35,11 +36,13 @@ func TestMemoryUsage(t *testing.T) {
if ie != nil {
log.Fatalln(ie)
}
- maps = append(maps, loadNewNeedleMap(indexFile))
+ m, rowCount := loadNewNeedleMap(indexFile)
+ maps = append(maps, m)
+ totalRowCount += rowCount
indexFile.Close()
- PrintMemUsage()
+ PrintMemUsage(totalRowCount)
now := time.Now()
fmt.Printf("\tTaken = %v\n", now.Sub(startTime))
startTime = now
@@ -47,17 +50,19 @@ func TestMemoryUsage(t *testing.T) {
}
-func loadNewNeedleMap(file *os.File) *CompactMap {
+func loadNewNeedleMap(file *os.File) (*CompactMap, uint64) {
m := NewCompactMap()
- bytes := make([]byte, NeedleEntrySize)
+ bytes := make([]byte, NeedleMapEntrySize)
+ rowCount := uint64(0)
count, e := file.Read(bytes)
for count > 0 && e == nil {
- for i := 0; i < count; i += NeedleEntrySize {
+ for i := 0; i < count; i += NeedleMapEntrySize {
+ rowCount++
key := BytesToNeedleId(bytes[i : i+NeedleIdSize])
offset := BytesToOffset(bytes[i+NeedleIdSize : i+NeedleIdSize+OffsetSize])
size := util.BytesToUint32(bytes[i+NeedleIdSize+OffsetSize : i+NeedleIdSize+OffsetSize+SizeSize])
- if offset > 0 {
+ if !offset.IsZero() {
m.Set(NeedleId(key), offset, size)
} else {
m.Delete(key)
@@ -67,17 +72,18 @@ func loadNewNeedleMap(file *os.File) *CompactMap {
count, e = file.Read(bytes)
}
- return m
+ return m, rowCount
}
-func PrintMemUsage() {
+func PrintMemUsage(totalRowCount uint64) {
runtime.GC()
var m runtime.MemStats
runtime.ReadMemStats(&m)
// For info on each, see: https://golang.org/pkg/runtime/#MemStats
- fmt.Printf("Alloc = %v MiB", bToMb(m.Alloc))
+ fmt.Printf("Each %.2f Bytes", float64(m.TotalAlloc)/float64(totalRowCount))
+ fmt.Printf("\tAlloc = %v MiB", bToMb(m.Alloc))
fmt.Printf("\tTotalAlloc = %v MiB", bToMb(m.TotalAlloc))
fmt.Printf("\tSys = %v MiB", bToMb(m.Sys))
fmt.Printf("\tNumGC = %v", m.NumGC)
diff --git a/weed/storage/needle_map/compact_map_test.go b/weed/storage/needle_map/compact_map_test.go
new file mode 100644
index 000000000..7eea3969a
--- /dev/null
+++ b/weed/storage/needle_map/compact_map_test.go
@@ -0,0 +1,166 @@
+package needle_map
+
+import (
+ "fmt"
+ . "github.com/chrislusf/seaweedfs/weed/storage/types"
+ "testing"
+)
+
+func TestOverflow2(t *testing.T) {
+ m := NewCompactMap()
+ _, oldSize := m.Set(NeedleId(150088), ToOffset(8), 3000073)
+ if oldSize != 0 {
+ t.Fatalf("expecting no previous data")
+ }
+ _, oldSize = m.Set(NeedleId(150088), ToOffset(8), 3000073)
+ if oldSize != 3000073 {
+ t.Fatalf("expecting previous data size is %d, not %d", 3000073, oldSize)
+ }
+ m.Set(NeedleId(150073), ToOffset(8), 3000073)
+ m.Set(NeedleId(150089), ToOffset(8), 3000073)
+ m.Set(NeedleId(150076), ToOffset(8), 3000073)
+ m.Set(NeedleId(150124), ToOffset(8), 3000073)
+ m.Set(NeedleId(150137), ToOffset(8), 3000073)
+ m.Set(NeedleId(150147), ToOffset(8), 3000073)
+ m.Set(NeedleId(150145), ToOffset(8), 3000073)
+ m.Set(NeedleId(150158), ToOffset(8), 3000073)
+ m.Set(NeedleId(150162), ToOffset(8), 3000073)
+
+ m.AscendingVisit(func(value NeedleValue) error {
+ println("needle key:", value.Key)
+ return nil
+ })
+}
+
+func TestIssue52(t *testing.T) {
+ m := NewCompactMap()
+ m.Set(NeedleId(10002), ToOffset(10002), 10002)
+ if element, ok := m.Get(NeedleId(10002)); ok {
+ fmt.Printf("key %d ok %v %d, %v, %d\n", 10002, ok, element.Key, element.Offset, element.Size)
+ }
+ m.Set(NeedleId(10001), ToOffset(10001), 10001)
+ if element, ok := m.Get(NeedleId(10002)); ok {
+ fmt.Printf("key %d ok %v %d, %v, %d\n", 10002, ok, element.Key, element.Offset, element.Size)
+ } else {
+ t.Fatal("key 10002 missing after setting 10001")
+ }
+}
+
+func TestCompactMap(t *testing.T) {
+ m := NewCompactMap()
+ for i := uint32(0); i < 100*batch; i += 2 {
+ m.Set(NeedleId(i), ToOffset(int64(i)), i)
+ }
+
+ for i := uint32(0); i < 100*batch; i += 37 {
+ m.Delete(NeedleId(i))
+ }
+
+ for i := uint32(0); i < 10*batch; i += 3 {
+ m.Set(NeedleId(i), ToOffset(int64(i+11)), i+5)
+ }
+
+ // for i := uint32(0); i < 100; i++ {
+ // if v := m.Get(Key(i)); v != nil {
+ // glog.V(4).Infoln(i, "=", v.Key, v.Offset, v.Size)
+ // }
+ // }
+
+ for i := uint32(0); i < 10*batch; i++ {
+ v, ok := m.Get(NeedleId(i))
+ if i%3 == 0 {
+ if !ok {
+ t.Fatal("key", i, "missing!")
+ }
+ if v.Size != i+5 {
+ t.Fatal("key", i, "size", v.Size)
+ }
+ } else if i%37 == 0 {
+ if ok && v.Size != TombstoneFileSize {
+ t.Fatal("key", i, "should have been deleted needle value", v)
+ }
+ } else if i%2 == 0 {
+ if v.Size != i {
+ t.Fatal("key", i, "size", v.Size)
+ }
+ }
+ }
+
+ for i := uint32(10 * batch); i < 100*batch; i++ {
+ v, ok := m.Get(NeedleId(i))
+ if i%37 == 0 {
+ if ok && v.Size != TombstoneFileSize {
+ t.Fatal("key", i, "should have been deleted needle value", v)
+ }
+ } else if i%2 == 0 {
+ if v == nil {
+ t.Fatal("key", i, "missing")
+ }
+ if v.Size != i {
+ t.Fatal("key", i, "size", v.Size)
+ }
+ }
+ }
+
+}
+
+func TestOverflow(t *testing.T) {
+ cs := NewCompactSection(1)
+
+ cs.setOverflowEntry(1, ToOffset(12), 12)
+ cs.setOverflowEntry(2, ToOffset(12), 12)
+ cs.setOverflowEntry(3, ToOffset(12), 12)
+ cs.setOverflowEntry(4, ToOffset(12), 12)
+ cs.setOverflowEntry(5, ToOffset(12), 12)
+
+ if cs.overflow[2].Key != 3 {
+ t.Fatalf("expecting o[2] has key 3: %+v", cs.overflow[2].Key)
+ }
+
+ cs.setOverflowEntry(3, ToOffset(24), 24)
+
+ if cs.overflow[2].Key != 3 {
+ t.Fatalf("expecting o[2] has key 3: %+v", cs.overflow[2].Key)
+ }
+
+ if cs.overflow[2].Size != 24 {
+ t.Fatalf("expecting o[2] has size 24: %+v", cs.overflow[2].Size)
+ }
+
+ cs.deleteOverflowEntry(4)
+
+ if len(cs.overflow) != 4 {
+ t.Fatalf("expecting 4 entries now: %+v", cs.overflow)
+ }
+
+ _, x, _ := cs.findOverflowEntry(5)
+ if x.Key != 5 {
+ t.Fatalf("expecting entry 5 now: %+v", x)
+ }
+
+ for i, x := range cs.overflow {
+ println("overflow[", i, "]:", x.Key)
+ }
+ println()
+
+ cs.deleteOverflowEntry(1)
+
+ for i, x := range cs.overflow {
+ println("overflow[", i, "]:", x.Key)
+ }
+ println()
+
+ cs.setOverflowEntry(4, ToOffset(44), 44)
+ for i, x := range cs.overflow {
+ println("overflow[", i, "]:", x.Key)
+ }
+ println()
+
+ cs.setOverflowEntry(1, ToOffset(11), 11)
+
+ for i, x := range cs.overflow {
+ println("overflow[", i, "]:", x.Key)
+ }
+ println()
+
+}
diff --git a/weed/storage/needle_map/memdb.go b/weed/storage/needle_map/memdb.go
new file mode 100644
index 000000000..a52d52a10
--- /dev/null
+++ b/weed/storage/needle_map/memdb.go
@@ -0,0 +1,119 @@
+package needle_map
+
+import (
+ "fmt"
+ "os"
+
+ "github.com/syndtr/goleveldb/leveldb"
+ "github.com/syndtr/goleveldb/leveldb/opt"
+ "github.com/syndtr/goleveldb/leveldb/storage"
+
+ "github.com/chrislusf/seaweedfs/weed/glog"
+ "github.com/chrislusf/seaweedfs/weed/storage/idx"
+ . "github.com/chrislusf/seaweedfs/weed/storage/types"
+ "github.com/chrislusf/seaweedfs/weed/util"
+)
+
+//This map uses in memory level db
+type MemDb struct {
+ db *leveldb.DB
+}
+
+func NewMemDb() *MemDb {
+ opts := &opt.Options{}
+
+ var err error
+ t := &MemDb{}
+ if t.db, err = leveldb.Open(storage.NewMemStorage(), opts); err != nil {
+ glog.V(0).Infof("MemDb fails to open: %v", err)
+ return nil
+ }
+
+ return t
+}
+
+func (cm *MemDb) Set(key NeedleId, offset Offset, size uint32) error {
+
+ bytes := ToBytes(key, offset, size)
+
+ if err := cm.db.Put(bytes[0:NeedleIdSize], bytes[NeedleIdSize:NeedleIdSize+OffsetSize+SizeSize], nil); err != nil {
+ return fmt.Errorf("failed to write temp leveldb: %v", err)
+ }
+ return nil
+}
+
+func (cm *MemDb) Delete(key NeedleId) error {
+ bytes := make([]byte, NeedleIdSize)
+ NeedleIdToBytes(bytes, key)
+ return cm.db.Delete(bytes, nil)
+
+}
+func (cm *MemDb) Get(key NeedleId) (*NeedleValue, bool) {
+ bytes := make([]byte, NeedleIdSize)
+ NeedleIdToBytes(bytes[0:NeedleIdSize], key)
+ data, err := cm.db.Get(bytes, nil)
+ if err != nil || len(data) != OffsetSize+SizeSize {
+ return nil, false
+ }
+ offset := BytesToOffset(data[0:OffsetSize])
+ size := util.BytesToUint32(data[OffsetSize : OffsetSize+SizeSize])
+ return &NeedleValue{Key: key, Offset: offset, Size: size}, true
+}
+
+// Visit visits all entries or stop if any error when visiting
+func (cm *MemDb) AscendingVisit(visit func(NeedleValue) error) (ret error) {
+ iter := cm.db.NewIterator(nil, nil)
+ for iter.Next() {
+ key := BytesToNeedleId(iter.Key())
+ data := iter.Value()
+ offset := BytesToOffset(data[0:OffsetSize])
+ size := util.BytesToUint32(data[OffsetSize : OffsetSize+SizeSize])
+
+ needle := NeedleValue{Key: key, Offset: offset, Size: size}
+ ret = visit(needle)
+ if ret != nil {
+ return
+ }
+ }
+ iter.Release()
+ ret = iter.Error()
+
+ return
+}
+
+func (cm *MemDb) SaveToIdx(idxName string) (ret error) {
+ idxFile, err := os.OpenFile(idxName, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0644)
+ if err != nil {
+ return
+ }
+ defer idxFile.Close()
+
+ return cm.AscendingVisit(func(value NeedleValue) error {
+ if value.Offset.IsZero() || value.Size == TombstoneFileSize {
+ return nil
+ }
+ _, err := idxFile.Write(value.ToBytes())
+ return err
+ })
+
+}
+
+func (cm *MemDb) LoadFromIdx(idxName string) (ret error) {
+ idxFile, err := os.OpenFile(idxName, os.O_RDONLY, 0644)
+ if err != nil {
+ return
+ }
+ defer idxFile.Close()
+
+ return idx.WalkIndexFile(idxFile, func(key NeedleId, offset Offset, size uint32) error {
+ if offset.IsZero() || size == TombstoneFileSize {
+ return cm.Delete(key)
+ }
+ return cm.Set(key, offset, size)
+ })
+
+}
+
+func (cm *MemDb) Close() {
+ cm.db.Close()
+}
diff --git a/weed/storage/needle_map/memdb_test.go b/weed/storage/needle_map/memdb_test.go
new file mode 100644
index 000000000..7b45d23f8
--- /dev/null
+++ b/weed/storage/needle_map/memdb_test.go
@@ -0,0 +1,23 @@
+package needle_map
+
+import (
+ "testing"
+
+ "github.com/chrislusf/seaweedfs/weed/storage/types"
+)
+
+func BenchmarkMemDb(b *testing.B) {
+ b.ReportAllocs()
+ for i := 0; i < b.N; i++ {
+ nm := NewMemDb()
+
+ nid := types.NeedleId(345)
+ offset := types.Offset{
+ OffsetHigher: types.OffsetHigher{},
+ OffsetLower: types.OffsetLower{},
+ }
+ nm.Set(nid, offset, 324)
+ nm.Close()
+ }
+
+}
diff --git a/weed/storage/needle_map/needle_value.go b/weed/storage/needle_map/needle_value.go
new file mode 100644
index 000000000..ef540b55e
--- /dev/null
+++ b/weed/storage/needle_map/needle_value.go
@@ -0,0 +1,30 @@
+package needle_map
+
+import (
+ . "github.com/chrislusf/seaweedfs/weed/storage/types"
+ "github.com/chrislusf/seaweedfs/weed/util"
+ "github.com/google/btree"
+)
+
+type NeedleValue struct {
+ Key NeedleId
+ Offset Offset `comment:"Volume offset"` //since aligned to 8 bytes, range is 4G*8=32G
+ Size uint32 `comment:"Size of the data portion"`
+}
+
+func (this NeedleValue) Less(than btree.Item) bool {
+ that := than.(NeedleValue)
+ return this.Key < that.Key
+}
+
+func (nv NeedleValue) ToBytes() []byte {
+ return ToBytes(nv.Key, nv.Offset, nv.Size)
+}
+
+func ToBytes(key NeedleId, offset Offset, size uint32) []byte {
+ bytes := make([]byte, NeedleIdSize+OffsetSize+SizeSize)
+ NeedleIdToBytes(bytes[0:NeedleIdSize], key)
+ OffsetToBytes(bytes[NeedleIdSize:NeedleIdSize+OffsetSize], offset)
+ util.Uint32toBytes(bytes[NeedleIdSize+OffsetSize:NeedleIdSize+OffsetSize+SizeSize], size)
+ return bytes
+}
diff --git a/weed/storage/needle/needle_value_map.go b/weed/storage/needle_map/needle_value_map.go
similarity index 77%
rename from weed/storage/needle/needle_value_map.go
rename to weed/storage/needle_map/needle_value_map.go
index 9da257443..0a5a00ef7 100644
--- a/weed/storage/needle/needle_value_map.go
+++ b/weed/storage/needle_map/needle_value_map.go
@@ -1,4 +1,4 @@
-package needle
+package needle_map
import (
. "github.com/chrislusf/seaweedfs/weed/storage/types"
@@ -8,5 +8,5 @@ type NeedleValueMap interface {
Set(key NeedleId, offset Offset, size uint32) (oldOffset Offset, oldSize uint32)
Delete(key NeedleId) uint32
Get(key NeedleId) (*NeedleValue, bool)
- Visit(visit func(NeedleValue) error) error
+ AscendingVisit(visit func(NeedleValue) error) error
}
diff --git a/weed/storage/needle_map_boltdb.go b/weed/storage/needle_map_boltdb.go
deleted file mode 100644
index a24c55a32..000000000
--- a/weed/storage/needle_map_boltdb.go
+++ /dev/null
@@ -1,185 +0,0 @@
-package storage
-
-import (
- "fmt"
- "os"
-
- "github.com/boltdb/bolt"
-
- "errors"
- "github.com/chrislusf/seaweedfs/weed/glog"
- "github.com/chrislusf/seaweedfs/weed/storage/needle"
- . "github.com/chrislusf/seaweedfs/weed/storage/types"
- "github.com/chrislusf/seaweedfs/weed/util"
-)
-
-type BoltDbNeedleMap struct {
- dbFileName string
- db *bolt.DB
- baseNeedleMapper
-}
-
-var boltdbBucket = []byte("weed")
-
-var NotFound = errors.New("not found")
-
-func NewBoltDbNeedleMap(dbFileName string, indexFile *os.File) (m *BoltDbNeedleMap, err error) {
- m = &BoltDbNeedleMap{dbFileName: dbFileName}
- m.indexFile = indexFile
- if !isBoltDbFresh(dbFileName, indexFile) {
- glog.V(0).Infof("Start to Generate %s from %s", dbFileName, indexFile.Name())
- generateBoltDbFile(dbFileName, indexFile)
- glog.V(0).Infof("Finished Generating %s from %s", dbFileName, indexFile.Name())
- }
- glog.V(1).Infof("Opening %s...", dbFileName)
- if m.db, err = bolt.Open(dbFileName, 0644, nil); err != nil {
- return
- }
- glog.V(1).Infof("Loading %s...", indexFile.Name())
- mm, indexLoadError := newNeedleMapMetricFromIndexFile(indexFile)
- if indexLoadError != nil {
- return nil, indexLoadError
- }
- m.mapMetric = *mm
- return
-}
-
-func isBoltDbFresh(dbFileName string, indexFile *os.File) bool {
- // normally we always write to index file first
- dbLogFile, err := os.Open(dbFileName)
- if err != nil {
- return false
- }
- defer dbLogFile.Close()
- dbStat, dbStatErr := dbLogFile.Stat()
- indexStat, indexStatErr := indexFile.Stat()
- if dbStatErr != nil || indexStatErr != nil {
- glog.V(0).Infof("Can not stat file: %v and %v", dbStatErr, indexStatErr)
- return false
- }
-
- return dbStat.ModTime().After(indexStat.ModTime())
-}
-
-func generateBoltDbFile(dbFileName string, indexFile *os.File) error {
- db, err := bolt.Open(dbFileName, 0644, nil)
- if err != nil {
- return err
- }
- defer db.Close()
- return WalkIndexFile(indexFile, func(key NeedleId, offset Offset, size uint32) error {
- if offset > 0 && size != TombstoneFileSize {
- boltDbWrite(db, key, offset, size)
- } else {
- boltDbDelete(db, key)
- }
- return nil
- })
-}
-
-func (m *BoltDbNeedleMap) Get(key NeedleId) (element *needle.NeedleValue, ok bool) {
- var offset Offset
- var size uint32
- bytes := make([]byte, NeedleIdSize)
- NeedleIdToBytes(bytes, key)
- err := m.db.View(func(tx *bolt.Tx) error {
- bucket := tx.Bucket(boltdbBucket)
- if bucket == nil {
- return fmt.Errorf("Bucket %q not found!", boltdbBucket)
- }
-
- data := bucket.Get(bytes)
-
- if len(data) == 0 {
- return NotFound
- }
-
- if len(data) != OffsetSize+SizeSize {
- glog.V(0).Infof("key:%v has wrong data length: %d", key, len(data))
- return fmt.Errorf("key:%v has wrong data length: %d", key, len(data))
- }
-
- offset = BytesToOffset(data[0:OffsetSize])
- size = util.BytesToUint32(data[OffsetSize : OffsetSize+SizeSize])
-
- return nil
- })
-
- if err != nil {
- return nil, false
- }
- return &needle.NeedleValue{Key: key, Offset: offset, Size: size}, true
-}
-
-func (m *BoltDbNeedleMap) Put(key NeedleId, offset Offset, size uint32) error {
- var oldSize uint32
- if oldNeedle, ok := m.Get(key); ok {
- oldSize = oldNeedle.Size
- }
- m.logPut(key, oldSize, size)
- // write to index file first
- if err := m.appendToIndexFile(key, offset, size); err != nil {
- return fmt.Errorf("cannot write to indexfile %s: %v", m.indexFile.Name(), err)
- }
- return boltDbWrite(m.db, key, offset, size)
-}
-
-func boltDbWrite(db *bolt.DB,
- key NeedleId, offset Offset, size uint32) error {
-
- bytes := make([]byte, NeedleIdSize+OffsetSize+SizeSize)
- NeedleIdToBytes(bytes[0:NeedleIdSize], key)
- OffsetToBytes(bytes[NeedleIdSize:NeedleIdSize+OffsetSize], offset)
- util.Uint32toBytes(bytes[NeedleIdSize+OffsetSize:NeedleIdSize+OffsetSize+SizeSize], size)
-
- return db.Update(func(tx *bolt.Tx) error {
- bucket, err := tx.CreateBucketIfNotExists(boltdbBucket)
- if err != nil {
- return err
- }
-
- err = bucket.Put(bytes[0:NeedleIdSize], bytes[NeedleIdSize:NeedleIdSize+OffsetSize+SizeSize])
- if err != nil {
- return err
- }
- return nil
- })
-}
-func boltDbDelete(db *bolt.DB, key NeedleId) error {
- bytes := make([]byte, NeedleIdSize)
- NeedleIdToBytes(bytes, key)
- return db.Update(func(tx *bolt.Tx) error {
- bucket, err := tx.CreateBucketIfNotExists(boltdbBucket)
- if err != nil {
- return err
- }
-
- err = bucket.Delete(bytes)
- if err != nil {
- return err
- }
- return nil
- })
-}
-
-func (m *BoltDbNeedleMap) Delete(key NeedleId, offset Offset) error {
- if oldNeedle, ok := m.Get(key); ok {
- m.logDelete(oldNeedle.Size)
- }
- // write to index file first
- if err := m.appendToIndexFile(key, offset, TombstoneFileSize); err != nil {
- return err
- }
- return boltDbDelete(m.db, key)
-}
-
-func (m *BoltDbNeedleMap) Close() {
- m.indexFile.Close()
- m.db.Close()
-}
-
-func (m *BoltDbNeedleMap) Destroy() error {
- m.Close()
- os.Remove(m.indexFile.Name())
- return os.Remove(m.dbFileName)
-}
diff --git a/weed/storage/needle_map_leveldb.go b/weed/storage/needle_map_leveldb.go
index 77d29bd87..83589c231 100644
--- a/weed/storage/needle_map_leveldb.go
+++ b/weed/storage/needle_map_leveldb.go
@@ -5,30 +5,42 @@ import (
"os"
"path/filepath"
+ "github.com/syndtr/goleveldb/leveldb/errors"
+ "github.com/syndtr/goleveldb/leveldb/opt"
+
+ "github.com/chrislusf/seaweedfs/weed/storage/idx"
+
+ "github.com/syndtr/goleveldb/leveldb"
+
"github.com/chrislusf/seaweedfs/weed/glog"
- "github.com/chrislusf/seaweedfs/weed/storage/needle"
+ "github.com/chrislusf/seaweedfs/weed/storage/needle_map"
. "github.com/chrislusf/seaweedfs/weed/storage/types"
"github.com/chrislusf/seaweedfs/weed/util"
- "github.com/syndtr/goleveldb/leveldb"
)
type LevelDbNeedleMap struct {
+ baseNeedleMapper
dbFileName string
db *leveldb.DB
- baseNeedleMapper
}
-func NewLevelDbNeedleMap(dbFileName string, indexFile *os.File) (m *LevelDbNeedleMap, err error) {
+func NewLevelDbNeedleMap(dbFileName string, indexFile *os.File, opts *opt.Options) (m *LevelDbNeedleMap, err error) {
m = &LevelDbNeedleMap{dbFileName: dbFileName}
m.indexFile = indexFile
if !isLevelDbFresh(dbFileName, indexFile) {
- glog.V(0).Infof("Start to Generate %s from %s", dbFileName, indexFile.Name())
+ glog.V(1).Infof("Start to Generate %s from %s", dbFileName, indexFile.Name())
generateLevelDbFile(dbFileName, indexFile)
- glog.V(0).Infof("Finished Generating %s from %s", dbFileName, indexFile.Name())
+ glog.V(1).Infof("Finished Generating %s from %s", dbFileName, indexFile.Name())
}
glog.V(1).Infof("Opening %s...", dbFileName)
- if m.db, err = leveldb.OpenFile(dbFileName, nil); err != nil {
- return
+
+ if m.db, err = leveldb.OpenFile(dbFileName, opts); err != nil {
+ if errors.IsCorrupted(err) {
+ m.db, err = leveldb.RecoverFile(dbFileName, opts)
+ }
+ if err != nil {
+ return
+ }
}
glog.V(1).Infof("Loading %s...", indexFile.Name())
mm, indexLoadError := newNeedleMapMetricFromIndexFile(indexFile)
@@ -62,8 +74,8 @@ func generateLevelDbFile(dbFileName string, indexFile *os.File) error {
return err
}
defer db.Close()
- return WalkIndexFile(indexFile, func(key NeedleId, offset Offset, size uint32) error {
- if offset > 0 && size != TombstoneFileSize {
+ return idx.WalkIndexFile(indexFile, func(key NeedleId, offset Offset, size uint32) error {
+ if !offset.IsZero() && size != TombstoneFileSize {
levelDbWrite(db, key, offset, size)
} else {
levelDbDelete(db, key)
@@ -72,7 +84,7 @@ func generateLevelDbFile(dbFileName string, indexFile *os.File) error {
})
}
-func (m *LevelDbNeedleMap) Get(key NeedleId) (element *needle.NeedleValue, ok bool) {
+func (m *LevelDbNeedleMap) Get(key NeedleId) (element *needle_map.NeedleValue, ok bool) {
bytes := make([]byte, NeedleIdSize)
NeedleIdToBytes(bytes[0:NeedleIdSize], key)
data, err := m.db.Get(bytes, nil)
@@ -81,7 +93,7 @@ func (m *LevelDbNeedleMap) Get(key NeedleId) (element *needle.NeedleValue, ok bo
}
offset := BytesToOffset(data[0:OffsetSize])
size := util.BytesToUint32(data[OffsetSize : OffsetSize+SizeSize])
- return &needle.NeedleValue{Key: NeedleId(key), Offset: offset, Size: size}, true
+ return &needle_map.NeedleValue{Key: key, Offset: offset, Size: size}, true
}
func (m *LevelDbNeedleMap) Put(key NeedleId, offset Offset, size uint32) error {
@@ -97,13 +109,9 @@ func (m *LevelDbNeedleMap) Put(key NeedleId, offset Offset, size uint32) error {
return levelDbWrite(m.db, key, offset, size)
}
-func levelDbWrite(db *leveldb.DB,
- key NeedleId, offset Offset, size uint32) error {
+func levelDbWrite(db *leveldb.DB, key NeedleId, offset Offset, size uint32) error {
- bytes := make([]byte, NeedleIdSize+OffsetSize+SizeSize)
- NeedleIdToBytes(bytes[0:NeedleIdSize], key)
- OffsetToBytes(bytes[NeedleIdSize:NeedleIdSize+OffsetSize], offset)
- util.Uint32toBytes(bytes[NeedleIdSize+OffsetSize:NeedleIdSize+OffsetSize+SizeSize], size)
+ bytes := needle_map.ToBytes(key, offset, size)
if err := db.Put(bytes[0:NeedleIdSize], bytes[NeedleIdSize:NeedleIdSize+OffsetSize+SizeSize], nil); err != nil {
return fmt.Errorf("failed to write leveldb: %v", err)
@@ -128,8 +136,17 @@ func (m *LevelDbNeedleMap) Delete(key NeedleId, offset Offset) error {
}
func (m *LevelDbNeedleMap) Close() {
- m.indexFile.Close()
- m.db.Close()
+ indexFileName := m.indexFile.Name()
+ if err := m.indexFile.Sync(); err != nil {
+ glog.Warningf("sync file %s failed: %v", indexFileName, err)
+ }
+ if err := m.indexFile.Close(); err != nil {
+ glog.Warningf("close index file %s failed: %v", indexFileName, err)
+ }
+
+ if err := m.db.Close(); err != nil {
+ glog.Warningf("close levelDB failed: %v", err)
+ }
}
func (m *LevelDbNeedleMap) Destroy() error {
diff --git a/weed/storage/needle_map_memory.go b/weed/storage/needle_map_memory.go
index fa5576c2b..84197912f 100644
--- a/weed/storage/needle_map_memory.go
+++ b/weed/storage/needle_map_memory.go
@@ -1,114 +1,60 @@
package storage
import (
- "io"
"os"
"github.com/chrislusf/seaweedfs/weed/glog"
- "github.com/chrislusf/seaweedfs/weed/storage/needle"
+ "github.com/chrislusf/seaweedfs/weed/storage/idx"
+ "github.com/chrislusf/seaweedfs/weed/storage/needle_map"
. "github.com/chrislusf/seaweedfs/weed/storage/types"
)
type NeedleMap struct {
- m needle.NeedleValueMap
-
baseNeedleMapper
+ m needle_map.NeedleValueMap
}
func NewCompactNeedleMap(file *os.File) *NeedleMap {
nm := &NeedleMap{
- m: needle.NewCompactMap(),
+ m: needle_map.NewCompactMap(),
}
nm.indexFile = file
return nm
}
-func NewBtreeNeedleMap(file *os.File) *NeedleMap {
- nm := &NeedleMap{
- m: needle.NewBtreeMap(),
- }
- nm.indexFile = file
- return nm
-}
-
-const (
- RowsToRead = 1024
-)
-
func LoadCompactNeedleMap(file *os.File) (*NeedleMap, error) {
nm := NewCompactNeedleMap(file)
return doLoading(file, nm)
}
-func LoadBtreeNeedleMap(file *os.File) (*NeedleMap, error) {
- nm := NewBtreeNeedleMap(file)
- return doLoading(file, nm)
-}
-
func doLoading(file *os.File, nm *NeedleMap) (*NeedleMap, error) {
- e := WalkIndexFile(file, func(key NeedleId, offset Offset, size uint32) error {
- if key > nm.MaximumFileKey {
- nm.MaximumFileKey = key
- }
- if offset > 0 && size != TombstoneFileSize {
+ e := idx.WalkIndexFile(file, func(key NeedleId, offset Offset, size uint32) error {
+ nm.MaybeSetMaxFileKey(key)
+ if !offset.IsZero() && size != TombstoneFileSize {
nm.FileCounter++
nm.FileByteCounter = nm.FileByteCounter + uint64(size)
oldOffset, oldSize := nm.m.Set(NeedleId(key), offset, size)
- // glog.V(3).Infoln("reading key", key, "offset", offset*NeedlePaddingSize, "size", size, "oldSize", oldSize)
- if oldOffset > 0 && oldSize != TombstoneFileSize {
+ if !oldOffset.IsZero() && oldSize != TombstoneFileSize {
nm.DeletionCounter++
nm.DeletionByteCounter = nm.DeletionByteCounter + uint64(oldSize)
}
} else {
oldSize := nm.m.Delete(NeedleId(key))
- // glog.V(3).Infoln("removing key", key, "offset", offset*NeedlePaddingSize, "size", size, "oldSize", oldSize)
nm.DeletionCounter++
nm.DeletionByteCounter = nm.DeletionByteCounter + uint64(oldSize)
}
return nil
})
- glog.V(1).Infof("max file key: %d for file: %s", nm.MaximumFileKey, file.Name())
+ glog.V(1).Infof("max file key: %d for file: %s", nm.MaxFileKey(), file.Name())
return nm, e
}
-// walks through the index file, calls fn function with each key, offset, size
-// stops with the error returned by the fn function
-func WalkIndexFile(r *os.File, fn func(key NeedleId, offset Offset, size uint32) error) error {
- var readerOffset int64
- bytes := make([]byte, NeedleEntrySize*RowsToRead)
- count, e := r.ReadAt(bytes, readerOffset)
- glog.V(3).Infoln("file", r.Name(), "readerOffset", readerOffset, "count", count, "e", e)
- readerOffset += int64(count)
- var (
- key NeedleId
- offset Offset
- size uint32
- i int
- )
-
- for count > 0 && e == nil || e == io.EOF {
- for i = 0; i+NeedleEntrySize <= count; i += NeedleEntrySize {
- key, offset, size = IdxFileEntry(bytes[i : i+NeedleEntrySize])
- if e = fn(key, offset, size); e != nil {
- return e
- }
- }
- if e == io.EOF {
- return nil
- }
- count, e = r.ReadAt(bytes, readerOffset)
- glog.V(3).Infoln("file", r.Name(), "readerOffset", readerOffset, "count", count, "e", e)
- readerOffset += int64(count)
- }
- return e
-}
-
func (nm *NeedleMap) Put(key NeedleId, offset Offset, size uint32) error {
_, oldSize := nm.m.Set(NeedleId(key), offset, size)
nm.logPut(key, oldSize, size)
return nm.appendToIndexFile(key, offset, size)
}
-func (nm *NeedleMap) Get(key NeedleId) (element *needle.NeedleValue, ok bool) {
+func (nm *NeedleMap) Get(key NeedleId) (element *needle_map.NeedleValue, ok bool) {
element, ok = nm.m.Get(NeedleId(key))
return
}
@@ -118,6 +64,10 @@ func (nm *NeedleMap) Delete(key NeedleId, offset Offset) error {
return nm.appendToIndexFile(key, offset, TombstoneFileSize)
}
func (nm *NeedleMap) Close() {
+ indexFileName := nm.indexFile.Name()
+ if err := nm.indexFile.Sync(); err != nil {
+ glog.Warningf("sync file %s failed, %v", indexFileName, err)
+ }
_ = nm.indexFile.Close()
}
func (nm *NeedleMap) Destroy() error {
diff --git a/weed/storage/needle_map_metric.go b/weed/storage/needle_map_metric.go
index cc3d9e028..823a04108 100644
--- a/weed/storage/needle_map_metric.go
+++ b/weed/storage/needle_map_metric.go
@@ -2,50 +2,93 @@ package storage
import (
"fmt"
+ "os"
+ "sync/atomic"
+
+ "github.com/chrislusf/seaweedfs/weed/storage/idx"
. "github.com/chrislusf/seaweedfs/weed/storage/types"
"github.com/willf/bloom"
- "os"
)
type mapMetric struct {
- DeletionCounter int `json:"DeletionCounter"`
- FileCounter int `json:"FileCounter"`
- DeletionByteCounter uint64 `json:"DeletionByteCounter"`
- FileByteCounter uint64 `json:"FileByteCounter"`
- MaximumFileKey NeedleId `json:"MaxFileKey"`
+ DeletionCounter uint32 `json:"DeletionCounter"`
+ FileCounter uint32 `json:"FileCounter"`
+ DeletionByteCounter uint64 `json:"DeletionByteCounter"`
+ FileByteCounter uint64 `json:"FileByteCounter"`
+ MaximumFileKey uint64 `json:"MaxFileKey"`
}
func (mm *mapMetric) logDelete(deletedByteCount uint32) {
- mm.DeletionByteCounter = mm.DeletionByteCounter + uint64(deletedByteCount)
- mm.DeletionCounter++
+ if mm == nil {
+ return
+ }
+ mm.LogDeletionCounter(deletedByteCount)
}
func (mm *mapMetric) logPut(key NeedleId, oldSize uint32, newSize uint32) {
- if key > mm.MaximumFileKey {
- mm.MaximumFileKey = key
+ if mm == nil {
+ return
+ }
+ mm.MaybeSetMaxFileKey(key)
+ mm.LogFileCounter(newSize)
+ if oldSize > 0 && oldSize != TombstoneFileSize {
+ mm.LogDeletionCounter(oldSize)
+ }
+}
+func (mm *mapMetric) LogFileCounter(newSize uint32) {
+ if mm == nil {
+ return
+ }
+ atomic.AddUint32(&mm.FileCounter, 1)
+ atomic.AddUint64(&mm.FileByteCounter, uint64(newSize))
+}
+func (mm *mapMetric) LogDeletionCounter(oldSize uint32) {
+ if mm == nil {
+ return
}
- mm.FileCounter++
- mm.FileByteCounter = mm.FileByteCounter + uint64(newSize)
if oldSize > 0 {
- mm.DeletionCounter++
- mm.DeletionByteCounter = mm.DeletionByteCounter + uint64(oldSize)
+ atomic.AddUint32(&mm.DeletionCounter, 1)
+ atomic.AddUint64(&mm.DeletionByteCounter, uint64(oldSize))
}
}
-
-func (mm mapMetric) ContentSize() uint64 {
- return mm.FileByteCounter
+func (mm *mapMetric) ContentSize() uint64 {
+ if mm == nil {
+ return 0
+ }
+ return atomic.LoadUint64(&mm.FileByteCounter)
}
-func (mm mapMetric) DeletedSize() uint64 {
- return mm.DeletionByteCounter
+func (mm *mapMetric) DeletedSize() uint64 {
+ if mm == nil {
+ return 0
+ }
+ return atomic.LoadUint64(&mm.DeletionByteCounter)
}
-func (mm mapMetric) FileCount() int {
- return mm.FileCounter
+func (mm *mapMetric) FileCount() int {
+ if mm == nil {
+ return 0
+ }
+ return int(atomic.LoadUint32(&mm.FileCounter))
}
-func (mm mapMetric) DeletedCount() int {
- return mm.DeletionCounter
+func (mm *mapMetric) DeletedCount() int {
+ if mm == nil {
+ return 0
+ }
+ return int(atomic.LoadUint32(&mm.DeletionCounter))
}
-func (mm mapMetric) MaxFileKey() NeedleId {
- return mm.MaximumFileKey
+func (mm *mapMetric) MaxFileKey() NeedleId {
+ if mm == nil {
+ return 0
+ }
+ t := uint64(mm.MaximumFileKey)
+ return Uint64ToNeedleId(t)
+}
+func (mm *mapMetric) MaybeSetMaxFileKey(key NeedleId) {
+ if mm == nil {
+ return
+ }
+ if key > mm.MaxFileKey() {
+ atomic.StoreUint64(&mm.MaximumFileKey, uint64(key))
+ }
}
func newNeedleMapMetricFromIndexFile(r *os.File) (mm *mapMetric, err error) {
@@ -56,9 +99,7 @@ func newNeedleMapMetricFromIndexFile(r *os.File) (mm *mapMetric, err error) {
bf = bloom.NewWithEstimates(uint(entryCount), 0.001)
}, func(key NeedleId, offset Offset, size uint32) error {
- if key > mm.MaximumFileKey {
- mm.MaximumFileKey = key
- }
+ mm.MaybeSetMaxFileKey(key)
NeedleIdToBytes(buf, key)
if size != TombstoneFileSize {
mm.FileByteCounter += uint64(size)
@@ -86,16 +127,16 @@ func reverseWalkIndexFile(r *os.File, initFn func(entryCount int64), fn func(key
return fmt.Errorf("file %s stat error: %v", r.Name(), err)
}
fileSize := fi.Size()
- if fileSize%NeedleEntrySize != 0 {
+ if fileSize%NeedleMapEntrySize != 0 {
return fmt.Errorf("unexpected file %s size: %d", r.Name(), fileSize)
}
- entryCount := fileSize / NeedleEntrySize
+ entryCount := fileSize / NeedleMapEntrySize
initFn(entryCount)
batchSize := int64(1024 * 4)
- bytes := make([]byte, NeedleEntrySize*batchSize)
+ bytes := make([]byte, NeedleMapEntrySize*batchSize)
nextBatchSize := entryCount % batchSize
if nextBatchSize == 0 {
nextBatchSize = batchSize
@@ -103,13 +144,13 @@ func reverseWalkIndexFile(r *os.File, initFn func(entryCount int64), fn func(key
remainingCount := entryCount - nextBatchSize
for remainingCount >= 0 {
- _, e := r.ReadAt(bytes[:NeedleEntrySize*nextBatchSize], NeedleEntrySize*remainingCount)
- // glog.V(0).Infoln("file", r.Name(), "readerOffset", NeedleEntrySize*remainingCount, "count", count, "e", e)
+ _, e := r.ReadAt(bytes[:NeedleMapEntrySize*nextBatchSize], NeedleMapEntrySize*remainingCount)
+ // glog.V(0).Infoln("file", r.Name(), "readerOffset", NeedleMapEntrySize*remainingCount, "count", count, "e", e)
if e != nil {
return e
}
for i := int(nextBatchSize) - 1; i >= 0; i-- {
- key, offset, size := IdxFileEntry(bytes[i*NeedleEntrySize : i*NeedleEntrySize+NeedleEntrySize])
+ key, offset, size := idx.IdxFileEntry(bytes[i*NeedleMapEntrySize : i*NeedleMapEntrySize+NeedleMapEntrySize])
if e = fn(key, offset, size); e != nil {
return e
}
diff --git a/weed/storage/needle_map_metric_test.go b/weed/storage/needle_map_metric_test.go
index 539f83a87..ae2177a30 100644
--- a/weed/storage/needle_map_metric_test.go
+++ b/weed/storage/needle_map_metric_test.go
@@ -1,17 +1,18 @@
package storage
import (
- "github.com/chrislusf/seaweedfs/weed/glog"
- . "github.com/chrislusf/seaweedfs/weed/storage/types"
"io/ioutil"
"math/rand"
"testing"
+
+ "github.com/chrislusf/seaweedfs/weed/glog"
+ . "github.com/chrislusf/seaweedfs/weed/storage/types"
)
func TestFastLoadingNeedleMapMetrics(t *testing.T) {
idxFile, _ := ioutil.TempFile("", "tmp.idx")
- nm := NewBtreeNeedleMap(idxFile)
+ nm := NewCompactNeedleMap(idxFile)
for i := 0; i < 10000; i++ {
nm.Put(Uint64ToNeedleId(uint64(i+1)), Uint32ToOffset(uint32(0)), uint32(1))
diff --git a/weed/storage/needle_map_sorted_file.go b/weed/storage/needle_map_sorted_file.go
new file mode 100644
index 000000000..e6f9258f3
--- /dev/null
+++ b/weed/storage/needle_map_sorted_file.go
@@ -0,0 +1,105 @@
+package storage
+
+import (
+ "os"
+
+ "github.com/chrislusf/seaweedfs/weed/glog"
+ "github.com/chrislusf/seaweedfs/weed/storage/erasure_coding"
+ "github.com/chrislusf/seaweedfs/weed/storage/needle_map"
+ . "github.com/chrislusf/seaweedfs/weed/storage/types"
+)
+
+type SortedFileNeedleMap struct {
+ baseNeedleMapper
+ baseFileName string
+ dbFile *os.File
+ dbFileSize int64
+}
+
+func NewSortedFileNeedleMap(baseFileName string, indexFile *os.File) (m *SortedFileNeedleMap, err error) {
+ m = &SortedFileNeedleMap{baseFileName: baseFileName}
+ m.indexFile = indexFile
+ fileName := baseFileName + ".sdx"
+ if !isSortedFileFresh(fileName, indexFile) {
+ glog.V(0).Infof("Start to Generate %s from %s", fileName, indexFile.Name())
+ erasure_coding.WriteSortedFileFromIdx(baseFileName, ".sdx")
+ glog.V(0).Infof("Finished Generating %s from %s", fileName, indexFile.Name())
+ }
+ glog.V(1).Infof("Opening %s...", fileName)
+
+ if m.dbFile, err = os.Open(baseFileName + ".sdx"); err != nil {
+ return
+ }
+ dbStat, _ := m.dbFile.Stat()
+ m.dbFileSize = dbStat.Size()
+ glog.V(1).Infof("Loading %s...", indexFile.Name())
+ mm, indexLoadError := newNeedleMapMetricFromIndexFile(indexFile)
+ if indexLoadError != nil {
+ return nil, indexLoadError
+ }
+ m.mapMetric = *mm
+ return
+}
+
+func isSortedFileFresh(dbFileName string, indexFile *os.File) bool {
+ // normally we always write to index file first
+ dbFile, err := os.Open(dbFileName)
+ if err != nil {
+ return false
+ }
+ defer dbFile.Close()
+ dbStat, dbStatErr := dbFile.Stat()
+ indexStat, indexStatErr := indexFile.Stat()
+ if dbStatErr != nil || indexStatErr != nil {
+ glog.V(0).Infof("Can not stat file: %v and %v", dbStatErr, indexStatErr)
+ return false
+ }
+
+ return dbStat.ModTime().After(indexStat.ModTime())
+}
+
+func (m *SortedFileNeedleMap) Get(key NeedleId) (element *needle_map.NeedleValue, ok bool) {
+ offset, size, err := erasure_coding.SearchNeedleFromSortedIndex(m.dbFile, m.dbFileSize, key, nil)
+ ok = err == nil
+ return &needle_map.NeedleValue{Key: key, Offset: offset, Size: size}, ok
+
+}
+
+func (m *SortedFileNeedleMap) Put(key NeedleId, offset Offset, size uint32) error {
+ return os.ErrInvalid
+}
+
+func (m *SortedFileNeedleMap) Delete(key NeedleId, offset Offset) error {
+
+ _, size, err := erasure_coding.SearchNeedleFromSortedIndex(m.dbFile, m.dbFileSize, key, nil)
+
+ if err != nil {
+ if err == erasure_coding.NotFoundError {
+ return nil
+ }
+ return err
+ }
+
+ if size == TombstoneFileSize {
+ return nil
+ }
+
+ // write to index file first
+ if err := m.appendToIndexFile(key, offset, TombstoneFileSize); err != nil {
+ return err
+ }
+ _, _, err = erasure_coding.SearchNeedleFromSortedIndex(m.dbFile, m.dbFileSize, key, erasure_coding.MarkNeedleDeleted)
+
+ return err
+}
+
+func (m *SortedFileNeedleMap) Close() {
+ m.indexFile.Close()
+ m.dbFile.Close()
+}
+
+func (m *SortedFileNeedleMap) Destroy() error {
+ m.Close()
+ os.Remove(m.indexFile.Name())
+ return os.Remove(m.baseFileName + ".sdx")
+}
diff --git a/weed/storage/needle_parse_multipart.go b/weed/storage/needle_parse_multipart.go
deleted file mode 100644
index e8d57ee38..000000000
--- a/weed/storage/needle_parse_multipart.go
+++ /dev/null
@@ -1,99 +0,0 @@
-package storage
-
-import (
- "github.com/chrislusf/seaweedfs/weed/glog"
- "github.com/chrislusf/seaweedfs/weed/operation"
- "io/ioutil"
- "mime"
- "net/http"
- "path"
- "strconv"
- "strings"
-)
-
-func parseMultipart(r *http.Request) (
- fileName string, data []byte, mimeType string, isGzipped bool, originalDataSize int, isChunkedFile bool, e error) {
- form, fe := r.MultipartReader()
- if fe != nil {
- glog.V(0).Infoln("MultipartReader [ERROR]", fe)
- e = fe
- return
- }
-
- //first multi-part item
- part, fe := form.NextPart()
- if fe != nil {
- glog.V(0).Infoln("Reading Multi part [ERROR]", fe)
- e = fe
- return
- }
-
- fileName = part.FileName()
- if fileName != "" {
- fileName = path.Base(fileName)
- }
-
- data, e = ioutil.ReadAll(part)
- if e != nil {
- glog.V(0).Infoln("Reading Content [ERROR]", e)
- return
- }
-
- //if the filename is empty string, do a search on the other multi-part items
- for fileName == "" {
- part2, fe := form.NextPart()
- if fe != nil {
- break // no more or on error, just safely break
- }
-
- fName := part2.FileName()
-
- //found the first multi-part has filename
- if fName != "" {
- data2, fe2 := ioutil.ReadAll(part2)
- if fe2 != nil {
- glog.V(0).Infoln("Reading Content [ERROR]", fe2)
- e = fe2
- return
- }
-
- //update
- data = data2
- fileName = path.Base(fName)
- break
- }
- }
-
- originalDataSize = len(data)
-
- isChunkedFile, _ = strconv.ParseBool(r.FormValue("cm"))
-
- if !isChunkedFile {
-
- dotIndex := strings.LastIndex(fileName, ".")
- ext, mtype := "", ""
- if dotIndex > 0 {
- ext = strings.ToLower(fileName[dotIndex:])
- mtype = mime.TypeByExtension(ext)
- }
- contentType := part.Header.Get("Content-Type")
- if contentType != "" && mtype != contentType {
- mimeType = contentType //only return mime type if not deductable
- mtype = contentType
- }
-
- if part.Header.Get("Content-Encoding") == "gzip" {
- if unzipped, e := operation.UnGzipData(data); e == nil {
- originalDataSize = len(unzipped)
- }
- isGzipped = true
- } else if operation.IsGzippable(ext, mtype, data) {
- if data, e = operation.GzipData(data); e != nil {
- return
- }
- isGzipped = true
- }
- }
-
- return
-}
diff --git a/weed/storage/store.go b/weed/storage/store.go
index 96c819666..0ac3381c5 100644
--- a/weed/storage/store.go
+++ b/weed/storage/store.go
@@ -2,8 +2,19 @@ package storage
import (
"fmt"
+ "path/filepath"
+ "strings"
+ "sync/atomic"
+
+ "google.golang.org/grpc"
+
"github.com/chrislusf/seaweedfs/weed/glog"
+ "github.com/chrislusf/seaweedfs/weed/pb"
"github.com/chrislusf/seaweedfs/weed/pb/master_pb"
+ "github.com/chrislusf/seaweedfs/weed/stats"
+ "github.com/chrislusf/seaweedfs/weed/storage/erasure_coding"
+ "github.com/chrislusf/seaweedfs/weed/storage/needle"
+ "github.com/chrislusf/seaweedfs/weed/storage/super_block"
. "github.com/chrislusf/seaweedfs/weed/storage/types"
)
@@ -15,6 +26,9 @@ const (
* A VolumeServer contains one Store
*/
type Store struct {
+ MasterAddress string
+ grpcDialOption grpc.DialOption
+ volumeSizeLimit uint64 //read from the master
Ip string
Port int
PublicUrl string
@@ -22,40 +36,45 @@ type Store struct {
dataCenter string //optional informaton, overwriting master setting if exists
rack string //optional information, overwriting master setting if exists
connected bool
- VolumeSizeLimit uint64 //read from the master
- Client master_pb.Seaweed_SendHeartbeatClient
NeedleMapType NeedleMapType
- NewVolumeIdChan chan VolumeId
- DeletedVolumeIdChan chan VolumeId
+ NewVolumesChan chan master_pb.VolumeShortInformationMessage
+ DeletedVolumesChan chan master_pb.VolumeShortInformationMessage
+ NewEcShardsChan chan master_pb.VolumeEcShardInformationMessage
+ DeletedEcShardsChan chan master_pb.VolumeEcShardInformationMessage
}
func (s *Store) String() (str string) {
- str = fmt.Sprintf("Ip:%s, Port:%d, PublicUrl:%s, dataCenter:%s, rack:%s, connected:%v, volumeSizeLimit:%d", s.Ip, s.Port, s.PublicUrl, s.dataCenter, s.rack, s.connected, s.VolumeSizeLimit)
+ str = fmt.Sprintf("Ip:%s, Port:%d, PublicUrl:%s, dataCenter:%s, rack:%s, connected:%v, volumeSizeLimit:%d", s.Ip, s.Port, s.PublicUrl, s.dataCenter, s.rack, s.connected, s.GetVolumeSizeLimit())
return
}
-func NewStore(port int, ip, publicUrl string, dirnames []string, maxVolumeCounts []int, needleMapKind NeedleMapType) (s *Store) {
- s = &Store{Port: port, Ip: ip, PublicUrl: publicUrl, NeedleMapType: needleMapKind}
+func NewStore(grpcDialOption grpc.DialOption, port int, ip, publicUrl string, dirnames []string, maxVolumeCounts []int, minFreeSpacePercents []float32, needleMapKind NeedleMapType) (s *Store) {
+ s = &Store{grpcDialOption: grpcDialOption, Port: port, Ip: ip, PublicUrl: publicUrl, NeedleMapType: needleMapKind}
s.Locations = make([]*DiskLocation, 0)
for i := 0; i < len(dirnames); i++ {
- location := NewDiskLocation(dirnames[i], maxVolumeCounts[i])
+ location := NewDiskLocation(dirnames[i], maxVolumeCounts[i], minFreeSpacePercents[i])
location.loadExistingVolumes(needleMapKind)
s.Locations = append(s.Locations, location)
+ stats.VolumeServerMaxVolumeCounter.Add(float64(maxVolumeCounts[i]))
}
- s.NewVolumeIdChan = make(chan VolumeId, 3)
- s.DeletedVolumeIdChan = make(chan VolumeId, 3)
+ s.NewVolumesChan = make(chan master_pb.VolumeShortInformationMessage, 3)
+ s.DeletedVolumesChan = make(chan master_pb.VolumeShortInformationMessage, 3)
+
+ s.NewEcShardsChan = make(chan master_pb.VolumeEcShardInformationMessage, 3)
+ s.DeletedEcShardsChan = make(chan master_pb.VolumeEcShardInformationMessage, 3)
+
return
}
-func (s *Store) AddVolume(volumeId VolumeId, collection string, needleMapKind NeedleMapType, replicaPlacement string, ttlString string, preallocate int64) error {
- rt, e := NewReplicaPlacementFromString(replicaPlacement)
+func (s *Store) AddVolume(volumeId needle.VolumeId, collection string, needleMapKind NeedleMapType, replicaPlacement string, ttlString string, preallocate int64, MemoryMapMaxSizeMb uint32) error {
+ rt, e := super_block.NewReplicaPlacementFromString(replicaPlacement)
if e != nil {
return e
}
- ttl, e := ReadTTL(ttlString)
+ ttl, e := needle.ReadTTL(ttlString)
if e != nil {
return e
}
- e = s.addVolume(volumeId, collection, needleMapKind, rt, ttl, preallocate)
+ e = s.addVolume(volumeId, collection, needleMapKind, rt, ttl, preallocate, MemoryMapMaxSizeMb)
return e
}
func (s *Store) DeleteCollection(collection string) (e error) {
@@ -64,12 +83,12 @@ func (s *Store) DeleteCollection(collection string) (e error) {
if e != nil {
return
}
- // let the heartbeat send the list of volumes, instead of sending the deleted volume ids to DeletedVolumeIdChan
+ // let the heartbeat send the list of volumes, instead of sending the deleted volume ids to DeletedVolumesChan
}
return
}
-func (s *Store) findVolume(vid VolumeId) *Volume {
+func (s *Store) findVolume(vid needle.VolumeId) *Volume {
for _, location := range s.Locations {
if v, found := location.FindVolume(vid); found {
return v
@@ -77,10 +96,13 @@ func (s *Store) findVolume(vid VolumeId) *Volume {
}
return nil
}
-func (s *Store) findFreeLocation() (ret *DiskLocation) {
+func (s *Store) FindFreeLocation() (ret *DiskLocation) {
max := 0
for _, location := range s.Locations {
currentFreeCount := location.MaxVolumeCount - location.VolumesLen()
+ currentFreeCount *= erasure_coding.DataShardsCount
+ currentFreeCount -= location.EcVolumesLen()
+ currentFreeCount /= erasure_coding.DataShardsCount
if currentFreeCount > max {
max = currentFreeCount
ret = location
@@ -88,16 +110,23 @@ func (s *Store) findFreeLocation() (ret *DiskLocation) {
}
return ret
}
-func (s *Store) addVolume(vid VolumeId, collection string, needleMapKind NeedleMapType, replicaPlacement *ReplicaPlacement, ttl *TTL, preallocate int64) error {
+func (s *Store) addVolume(vid needle.VolumeId, collection string, needleMapKind NeedleMapType, replicaPlacement *super_block.ReplicaPlacement, ttl *needle.TTL, preallocate int64, memoryMapMaxSizeMb uint32) error {
if s.findVolume(vid) != nil {
return fmt.Errorf("Volume Id %d already exists!", vid)
}
- if location := s.findFreeLocation(); location != nil {
+ if location := s.FindFreeLocation(); location != nil {
glog.V(0).Infof("In dir %s adds volume:%v collection:%s replicaPlacement:%v ttl:%v",
location.Directory, vid, collection, replicaPlacement, ttl)
- if volume, err := NewVolume(location.Directory, collection, vid, needleMapKind, replicaPlacement, ttl, preallocate); err == nil {
+ if volume, err := NewVolume(location.Directory, collection, vid, needleMapKind, replicaPlacement, ttl, preallocate, memoryMapMaxSizeMb); err == nil {
location.SetVolume(vid, volume)
- s.NewVolumeIdChan <- vid
+ glog.V(0).Infof("add volume %d", vid)
+ s.NewVolumesChan <- master_pb.VolumeShortInformationMessage{
+ Id: uint32(vid),
+ Collection: collection,
+ ReplicaPlacement: uint32(replicaPlacement.Byte()),
+ Version: uint32(volume.Version()),
+ Ttl: ttl.ToUint32(),
+ }
return nil
} else {
return err
@@ -106,30 +135,54 @@ func (s *Store) addVolume(vid VolumeId, collection string, needleMapKind NeedleM
return fmt.Errorf("No more free space left")
}
-func (s *Store) Status() []*VolumeInfo {
- var stats []*VolumeInfo
+func (s *Store) VolumeInfos() (allStats []*VolumeInfo) {
for _, location := range s.Locations {
- location.RLock()
- for k, v := range location.volumes {
- s := &VolumeInfo{
- Id: VolumeId(k),
- Size: v.ContentSize(),
- Collection: v.Collection,
- ReplicaPlacement: v.ReplicaPlacement,
- Version: v.Version(),
- FileCount: v.nm.FileCount(),
- DeleteCount: v.nm.DeletedCount(),
- DeletedByteCount: v.nm.DeletedSize(),
- ReadOnly: v.readOnly,
- Ttl: v.Ttl}
- stats = append(stats, s)
- }
- location.RUnlock()
+ stats := collectStatsForOneLocation(location)
+ allStats = append(allStats, stats...)
+ }
+ sortVolumeInfos(allStats)
+ return allStats
+}
+
+func collectStatsForOneLocation(location *DiskLocation) (stats []*VolumeInfo) {
+ location.volumesLock.RLock()
+ defer location.volumesLock.RUnlock()
+
+ for k, v := range location.volumes {
+ s := collectStatForOneVolume(k, v)
+ stats = append(stats, s)
}
- sortVolumeInfos(stats)
return stats
}
+func collectStatForOneVolume(vid needle.VolumeId, v *Volume) (s *VolumeInfo) {
+
+ s = &VolumeInfo{
+ Id: vid,
+ Collection: v.Collection,
+ ReplicaPlacement: v.ReplicaPlacement,
+ Version: v.Version(),
+ ReadOnly: v.IsReadOnly(),
+ Ttl: v.Ttl,
+ CompactRevision: uint32(v.CompactionRevision),
+ }
+ s.RemoteStorageName, s.RemoteStorageKey = v.RemoteStorageNameKey()
+
+ v.dataFileAccessLock.RLock()
+ defer v.dataFileAccessLock.RUnlock()
+
+ if v.nm == nil {
+ return
+ }
+
+ s.FileCount = v.nm.FileCount()
+ s.DeleteCount = v.nm.DeletedCount()
+ s.DeletedByteCount = v.nm.DeletedSize()
+ s.Size = v.nm.ContentSize()
+
+ return
+}
+
func (s *Store) SetDataCenter(dataCenter string) {
s.dataCenter = dataCenter
}
@@ -141,37 +194,48 @@ func (s *Store) CollectHeartbeat() *master_pb.Heartbeat {
var volumeMessages []*master_pb.VolumeInformationMessage
maxVolumeCount := 0
var maxFileKey NeedleId
+ collectionVolumeSize := make(map[string]uint64)
for _, location := range s.Locations {
+ var deleteVids []needle.VolumeId
maxVolumeCount = maxVolumeCount + location.MaxVolumeCount
- location.Lock()
- for k, v := range location.volumes {
- if maxFileKey < v.nm.MaxFileKey() {
- maxFileKey = v.nm.MaxFileKey()
+ location.volumesLock.RLock()
+ for _, v := range location.volumes {
+ if maxFileKey < v.MaxFileKey() {
+ maxFileKey = v.MaxFileKey()
}
- if !v.expired(s.VolumeSizeLimit) {
- volumeMessage := &master_pb.VolumeInformationMessage{
- Id: uint32(k),
- Size: uint64(v.Size()),
- Collection: v.Collection,
- FileCount: uint64(v.nm.FileCount()),
- DeleteCount: uint64(v.nm.DeletedCount()),
- DeletedByteCount: v.nm.DeletedSize(),
- ReadOnly: v.readOnly,
- ReplicaPlacement: uint32(v.ReplicaPlacement.Byte()),
- Version: uint32(v.Version()),
- Ttl: v.Ttl.ToUint32(),
- }
- volumeMessages = append(volumeMessages, volumeMessage)
+ if !v.expired(s.GetVolumeSizeLimit()) {
+ volumeMessages = append(volumeMessages, v.ToVolumeInformationMessage())
} else {
if v.expiredLongEnough(MAX_TTL_VOLUME_REMOVAL_DELAY) {
- location.deleteVolumeById(v.Id)
- glog.V(0).Infoln("volume", v.Id, "is deleted.")
+ deleteVids = append(deleteVids, v.Id)
} else {
glog.V(0).Infoln("volume", v.Id, "is expired.")
}
}
+ fileSize, _, _ := v.FileStat()
+ collectionVolumeSize[v.Collection] += fileSize
}
- location.Unlock()
+ location.volumesLock.RUnlock()
+
+ if len(deleteVids) > 0 {
+ // delete expired volumes.
+ location.volumesLock.Lock()
+ for _, vid := range deleteVids {
+ found, err := location.deleteVolumeById(vid)
+ if found {
+ if err == nil {
+ glog.V(0).Infof("volume %d is deleted", vid)
+ } else {
+ glog.V(0).Infof("delete volume %d: %v", vid, err)
+ }
+ }
+ }
+ location.volumesLock.Unlock()
+ }
+ }
+
+ for col, size := range collectionVolumeSize {
+ stats.VolumeServerDiskSizeGauge.WithLabelValues(col, "normal").Set(float64(size))
}
return &master_pb.Heartbeat{
@@ -183,85 +247,181 @@ func (s *Store) CollectHeartbeat() *master_pb.Heartbeat {
DataCenter: s.dataCenter,
Rack: s.rack,
Volumes: volumeMessages,
+ HasNoVolumes: len(volumeMessages) == 0,
}
}
+
func (s *Store) Close() {
for _, location := range s.Locations {
location.Close()
}
}
-func (s *Store) Write(i VolumeId, n *Needle) (size uint32, err error) {
+func (s *Store) WriteVolumeNeedle(i needle.VolumeId, n *needle.Needle, fsync bool) (isUnchanged bool, err error) {
if v := s.findVolume(i); v != nil {
- if v.readOnly {
- err = fmt.Errorf("Volume %d is read only", i)
+ if v.IsReadOnly() {
+ err = fmt.Errorf("volume %d is read only", i)
return
}
- // TODO: count needle size ahead
- if MaxPossibleVolumeSize >= v.ContentSize()+uint64(size) {
- _, size, err = v.writeNeedle(n)
- } else {
- err = fmt.Errorf("Volume Size Limit %d Exceeded! Current size is %d", s.VolumeSizeLimit, v.ContentSize())
- }
+ _, _, isUnchanged, err = v.writeNeedle2(n, fsync)
return
}
glog.V(0).Infoln("volume", i, "not found!")
- err = fmt.Errorf("Volume %d not found!", i)
+ err = fmt.Errorf("volume %d not found on %s:%d", i, s.Ip, s.Port)
return
}
-func (s *Store) Delete(i VolumeId, n *Needle) (uint32, error) {
- if v := s.findVolume(i); v != nil && !v.readOnly {
- return v.deleteNeedle(n)
+func (s *Store) DeleteVolumeNeedle(i needle.VolumeId, n *needle.Needle) (uint32, error) {
+ if v := s.findVolume(i); v != nil {
+ if v.noWriteOrDelete {
+ return 0, fmt.Errorf("volume %d is read only", i)
+ }
+ return v.deleteNeedle2(n)
}
- return 0, nil
+ return 0, fmt.Errorf("volume %d not found on %s:%d", i, s.Ip, s.Port)
}
-func (s *Store) ReadVolumeNeedle(i VolumeId, n *Needle) (int, error) {
+func (s *Store) ReadVolumeNeedle(i needle.VolumeId, n *needle.Needle) (int, error) {
if v := s.findVolume(i); v != nil {
return v.readNeedle(n)
}
- return 0, fmt.Errorf("Volume %d not found!", i)
+ return 0, fmt.Errorf("volume %d not found", i)
}
-func (s *Store) GetVolume(i VolumeId) *Volume {
+func (s *Store) GetVolume(i needle.VolumeId) *Volume {
return s.findVolume(i)
}
-func (s *Store) HasVolume(i VolumeId) bool {
+func (s *Store) HasVolume(i needle.VolumeId) bool {
v := s.findVolume(i)
return v != nil
}
-func (s *Store) MountVolume(i VolumeId) error {
+func (s *Store) MarkVolumeReadonly(i needle.VolumeId) error {
+ v := s.findVolume(i)
+ if v == nil {
+ return fmt.Errorf("volume %d not found", i)
+ }
+ v.noWriteOrDelete = true
+ return nil
+}
+
+func (s *Store) MountVolume(i needle.VolumeId) error {
for _, location := range s.Locations {
if found := location.LoadVolume(i, s.NeedleMapType); found == true {
- s.NewVolumeIdChan <- VolumeId(i)
+ glog.V(0).Infof("mount volume %d", i)
+ v := s.findVolume(i)
+ s.NewVolumesChan <- master_pb.VolumeShortInformationMessage{
+ Id: uint32(v.Id),
+ Collection: v.Collection,
+ ReplicaPlacement: uint32(v.ReplicaPlacement.Byte()),
+ Version: uint32(v.Version()),
+ Ttl: v.Ttl.ToUint32(),
+ }
return nil
}
}
- return fmt.Errorf("Volume %d not found on disk", i)
+ return fmt.Errorf("volume %d not found on disk", i)
}
-func (s *Store) UnmountVolume(i VolumeId) error {
+func (s *Store) UnmountVolume(i needle.VolumeId) error {
+ v := s.findVolume(i)
+ if v == nil {
+ return nil
+ }
+ message := master_pb.VolumeShortInformationMessage{
+ Id: uint32(v.Id),
+ Collection: v.Collection,
+ ReplicaPlacement: uint32(v.ReplicaPlacement.Byte()),
+ Version: uint32(v.Version()),
+ Ttl: v.Ttl.ToUint32(),
+ }
+
for _, location := range s.Locations {
if err := location.UnloadVolume(i); err == nil {
- s.DeletedVolumeIdChan <- VolumeId(i)
+ glog.V(0).Infof("UnmountVolume %d", i)
+ s.DeletedVolumesChan <- message
return nil
}
}
- return fmt.Errorf("Volume %d not found on disk", i)
+ return fmt.Errorf("volume %d not found on disk", i)
}
-func (s *Store) DeleteVolume(i VolumeId) error {
+func (s *Store) DeleteVolume(i needle.VolumeId) error {
+ v := s.findVolume(i)
+ if v == nil {
+ return fmt.Errorf("delete volume %d not found on disk", i)
+ }
+ message := master_pb.VolumeShortInformationMessage{
+ Id: uint32(v.Id),
+ Collection: v.Collection,
+ ReplicaPlacement: uint32(v.ReplicaPlacement.Byte()),
+ Version: uint32(v.Version()),
+ Ttl: v.Ttl.ToUint32(),
+ }
for _, location := range s.Locations {
- if error := location.deleteVolumeById(i); error == nil {
- s.DeletedVolumeIdChan <- VolumeId(i)
+ if found, error := location.deleteVolumeById(i); found && error == nil {
+ glog.V(0).Infof("DeleteVolume %d", i)
+ s.DeletedVolumesChan <- message
return nil
}
}
- return fmt.Errorf("Volume %d not found on disk", i)
+ return fmt.Errorf("volume %d not found on disk", i)
+}
+
+func (s *Store) ConfigureVolume(i needle.VolumeId, replication string) error {
+
+ for _, location := range s.Locations {
+ fileInfo, found := location.LocateVolume(i)
+ if !found {
+ continue
+ }
+ // load, modify, save
+ baseFileName := strings.TrimSuffix(fileInfo.Name(), filepath.Ext(fileInfo.Name()))
+ vifFile := filepath.Join(location.Directory, baseFileName+".vif")
+ volumeInfo, _, err := pb.MaybeLoadVolumeInfo(vifFile)
+ if err != nil {
+ return fmt.Errorf("volume %d fail to load vif", i)
+ }
+ volumeInfo.Replication = replication
+ err = pb.SaveVolumeInfo(vifFile, volumeInfo)
+ if err != nil {
+ return fmt.Errorf("volume %d fail to save vif", i)
+ }
+ return nil
+ }
+
+ return fmt.Errorf("volume %d not found on disk", i)
+}
+
+func (s *Store) SetVolumeSizeLimit(x uint64) {
+ atomic.StoreUint64(&s.volumeSizeLimit, x)
+}
+
+func (s *Store) GetVolumeSizeLimit() uint64 {
+ return atomic.LoadUint64(&s.volumeSizeLimit)
+}
+
+func (s *Store) MaybeAdjustVolumeMax() (hasChanges bool) {
+ volumeSizeLimit := s.GetVolumeSizeLimit()
+ for _, diskLocation := range s.Locations {
+ if diskLocation.MaxVolumeCount == 0 {
+ diskStatus := stats.NewDiskStatus(diskLocation.Directory)
+ unusedSpace := diskLocation.UnUsedSpace(volumeSizeLimit)
+ unclaimedSpaces := int64(diskStatus.Free) - int64(unusedSpace)
+ volCount := diskLocation.VolumesLen()
+ maxVolumeCount := volCount
+ if unclaimedSpaces > int64(volumeSizeLimit) {
+ maxVolumeCount += int(uint64(unclaimedSpaces)/volumeSizeLimit) - 1
+ }
+ diskLocation.MaxVolumeCount = maxVolumeCount
+ glog.V(0).Infof("disk %s max %d unclaimedSpace:%dMB, unused:%dMB volumeSizeLimit:%dMB",
+ diskLocation.Directory, maxVolumeCount, unclaimedSpaces/1024/1024, unusedSpace/1024/1024, volumeSizeLimit/1024/1024)
+ hasChanges = true
+ }
+ }
+ return
}
diff --git a/weed/storage/store_ec.go b/weed/storage/store_ec.go
new file mode 100644
index 000000000..2b0df439c
--- /dev/null
+++ b/weed/storage/store_ec.go
@@ -0,0 +1,387 @@
+package storage
+
+import (
+ "context"
+ "fmt"
+ "io"
+ "sort"
+ "sync"
+ "time"
+
+ "github.com/klauspost/reedsolomon"
+
+ "github.com/chrislusf/seaweedfs/weed/glog"
+ "github.com/chrislusf/seaweedfs/weed/operation"
+ "github.com/chrislusf/seaweedfs/weed/pb/master_pb"
+ "github.com/chrislusf/seaweedfs/weed/pb/volume_server_pb"
+ "github.com/chrislusf/seaweedfs/weed/stats"
+ "github.com/chrislusf/seaweedfs/weed/storage/erasure_coding"
+ "github.com/chrislusf/seaweedfs/weed/storage/needle"
+ "github.com/chrislusf/seaweedfs/weed/storage/types"
+)
+
+func (s *Store) CollectErasureCodingHeartbeat() *master_pb.Heartbeat {
+ var ecShardMessages []*master_pb.VolumeEcShardInformationMessage
+ collectionEcShardSize := make(map[string]int64)
+ for _, location := range s.Locations {
+ location.ecVolumesLock.RLock()
+ for _, ecShards := range location.ecVolumes {
+ ecShardMessages = append(ecShardMessages, ecShards.ToVolumeEcShardInformationMessage()...)
+
+ for _, ecShard := range ecShards.Shards {
+ collectionEcShardSize[ecShards.Collection] += ecShard.Size()
+ }
+ }
+ location.ecVolumesLock.RUnlock()
+ }
+
+ for col, size := range collectionEcShardSize {
+ stats.VolumeServerDiskSizeGauge.WithLabelValues(col, "ec").Set(float64(size))
+ }
+
+ return &master_pb.Heartbeat{
+ EcShards: ecShardMessages,
+ HasNoEcShards: len(ecShardMessages) == 0,
+ }
+
+}
+
+func (s *Store) MountEcShards(collection string, vid needle.VolumeId, shardId erasure_coding.ShardId) error {
+ for _, location := range s.Locations {
+ if err := location.LoadEcShard(collection, vid, shardId); err == nil {
+ glog.V(0).Infof("MountEcShards %d.%d", vid, shardId)
+
+ var shardBits erasure_coding.ShardBits
+
+ s.NewEcShardsChan <- master_pb.VolumeEcShardInformationMessage{
+ Id: uint32(vid),
+ Collection: collection,
+ EcIndexBits: uint32(shardBits.AddShardId(shardId)),
+ }
+ return nil
+ } else {
+ return fmt.Errorf("%s load ec shard %d.%d: %v", location.Directory, vid, shardId, err)
+ }
+ }
+
+ return fmt.Errorf("MountEcShards %d.%d not found on disk", vid, shardId)
+}
+
+func (s *Store) UnmountEcShards(vid needle.VolumeId, shardId erasure_coding.ShardId) error {
+
+ ecShard, found := s.findEcShard(vid, shardId)
+ if !found {
+ return nil
+ }
+
+ var shardBits erasure_coding.ShardBits
+ message := master_pb.VolumeEcShardInformationMessage{
+ Id: uint32(vid),
+ Collection: ecShard.Collection,
+ EcIndexBits: uint32(shardBits.AddShardId(shardId)),
+ }
+
+ for _, location := range s.Locations {
+ if deleted := location.UnloadEcShard(vid, shardId); deleted {
+ glog.V(0).Infof("UnmountEcShards %d.%d", vid, shardId)
+ s.DeletedEcShardsChan <- message
+ return nil
+ }
+ }
+
+ return fmt.Errorf("UnmountEcShards %d.%d not found on disk", vid, shardId)
+}
+
+func (s *Store) findEcShard(vid needle.VolumeId, shardId erasure_coding.ShardId) (*erasure_coding.EcVolumeShard, bool) {
+ for _, location := range s.Locations {
+ if v, found := location.FindEcShard(vid, shardId); found {
+ return v, found
+ }
+ }
+ return nil, false
+}
+
+func (s *Store) FindEcVolume(vid needle.VolumeId) (*erasure_coding.EcVolume, bool) {
+ for _, location := range s.Locations {
+ if s, found := location.FindEcVolume(vid); found {
+ return s, true
+ }
+ }
+ return nil, false
+}
+
+func (s *Store) DestroyEcVolume(vid needle.VolumeId) {
+ for _, location := range s.Locations {
+ location.DestroyEcVolume(vid)
+ }
+}
+
+func (s *Store) ReadEcShardNeedle(vid needle.VolumeId, n *needle.Needle) (int, error) {
+ for _, location := range s.Locations {
+ if localEcVolume, found := location.FindEcVolume(vid); found {
+
+ offset, size, intervals, err := localEcVolume.LocateEcShardNeedle(n.Id, localEcVolume.Version)
+ if err != nil {
+ return 0, fmt.Errorf("locate in local ec volume: %v", err)
+ }
+ if size == types.TombstoneFileSize {
+ return 0, fmt.Errorf("entry %s is deleted", n.Id)
+ }
+
+ glog.V(3).Infof("read ec volume %d offset %d size %d intervals:%+v", vid, offset.ToAcutalOffset(), size, intervals)
+
+ if len(intervals) > 1 {
+ glog.V(3).Infof("ReadEcShardNeedle needle id %s intervals:%+v", n.String(), intervals)
+ }
+ bytes, isDeleted, err := s.readEcShardIntervals(vid, n.Id, localEcVolume, intervals)
+ if err != nil {
+ return 0, fmt.Errorf("ReadEcShardIntervals: %v", err)
+ }
+ if isDeleted {
+ return 0, fmt.Errorf("ec entry %s is deleted", n.Id)
+ }
+
+ err = n.ReadBytes(bytes, offset.ToAcutalOffset(), size, localEcVolume.Version)
+ if err != nil {
+ return 0, fmt.Errorf("readbytes: %v", err)
+ }
+
+ return len(bytes), nil
+ }
+ }
+ return 0, fmt.Errorf("ec shard %d not found", vid)
+}
+
+func (s *Store) readEcShardIntervals(vid needle.VolumeId, needleId types.NeedleId, ecVolume *erasure_coding.EcVolume, intervals []erasure_coding.Interval) (data []byte, is_deleted bool, err error) {
+
+ if err = s.cachedLookupEcShardLocations(ecVolume); err != nil {
+ return nil, false, fmt.Errorf("failed to locate shard via master grpc %s: %v", s.MasterAddress, err)
+ }
+
+ for i, interval := range intervals {
+ if d, isDeleted, e := s.readOneEcShardInterval(needleId, ecVolume, interval); e != nil {
+ return nil, isDeleted, e
+ } else {
+ if isDeleted {
+ is_deleted = true
+ }
+ if i == 0 {
+ data = d
+ } else {
+ data = append(data, d...)
+ }
+ }
+ }
+ return
+}
+
+func (s *Store) readOneEcShardInterval(needleId types.NeedleId, ecVolume *erasure_coding.EcVolume, interval erasure_coding.Interval) (data []byte, is_deleted bool, err error) {
+ shardId, actualOffset := interval.ToShardIdAndOffset(erasure_coding.ErasureCodingLargeBlockSize, erasure_coding.ErasureCodingSmallBlockSize)
+ data = make([]byte, interval.Size)
+ if shard, found := ecVolume.FindEcVolumeShard(shardId); found {
+ if _, err = shard.ReadAt(data, actualOffset); err != nil {
+ glog.V(0).Infof("read local ec shard %d.%d offset %d: %v", ecVolume.VolumeId, shardId, actualOffset, err)
+ return
+ }
+ } else {
+ ecVolume.ShardLocationsLock.RLock()
+ sourceDataNodes, hasShardIdLocation := ecVolume.ShardLocations[shardId]
+ ecVolume.ShardLocationsLock.RUnlock()
+
+ // try reading directly
+ if hasShardIdLocation {
+ _, is_deleted, err = s.readRemoteEcShardInterval(sourceDataNodes, needleId, ecVolume.VolumeId, shardId, data, actualOffset)
+ if err == nil {
+ return
+ }
+ glog.V(0).Infof("clearing ec shard %d.%d locations: %v", ecVolume.VolumeId, shardId, err)
+ forgetShardId(ecVolume, shardId)
+ }
+
+ // try reading by recovering from other shards
+ _, is_deleted, err = s.recoverOneRemoteEcShardInterval(needleId, ecVolume, shardId, data, actualOffset)
+ if err == nil {
+ return
+ }
+ glog.V(0).Infof("recover ec shard %d.%d : %v", ecVolume.VolumeId, shardId, err)
+ }
+ return
+}
+
+func forgetShardId(ecVolume *erasure_coding.EcVolume, shardId erasure_coding.ShardId) {
+ // failed to access the source data nodes, clear it up
+ ecVolume.ShardLocationsLock.Lock()
+ delete(ecVolume.ShardLocations, shardId)
+ ecVolume.ShardLocationsLock.Unlock()
+}
+
+func (s *Store) cachedLookupEcShardLocations(ecVolume *erasure_coding.EcVolume) (err error) {
+
+ shardCount := len(ecVolume.ShardLocations)
+ if shardCount < erasure_coding.DataShardsCount &&
+ ecVolume.ShardLocationsRefreshTime.Add(11*time.Second).After(time.Now()) ||
+ shardCount == erasure_coding.TotalShardsCount &&
+ ecVolume.ShardLocationsRefreshTime.Add(37*time.Minute).After(time.Now()) ||
+ shardCount >= erasure_coding.DataShardsCount &&
+ ecVolume.ShardLocationsRefreshTime.Add(7*time.Minute).After(time.Now()) {
+ // still fresh
+ return nil
+ }
+
+ glog.V(3).Infof("lookup and cache ec volume %d locations", ecVolume.VolumeId)
+
+ err = operation.WithMasterServerClient(s.MasterAddress, s.grpcDialOption, func(masterClient master_pb.SeaweedClient) error {
+ req := &master_pb.LookupEcVolumeRequest{
+ VolumeId: uint32(ecVolume.VolumeId),
+ }
+ resp, err := masterClient.LookupEcVolume(context.Background(), req)
+ if err != nil {
+ return fmt.Errorf("lookup ec volume %d: %v", ecVolume.VolumeId, err)
+ }
+ if len(resp.ShardIdLocations) < erasure_coding.DataShardsCount {
+ return fmt.Errorf("only %d shards found but %d required", len(resp.ShardIdLocations), erasure_coding.DataShardsCount)
+ }
+
+ ecVolume.ShardLocationsLock.Lock()
+ for _, shardIdLocations := range resp.ShardIdLocations {
+ shardId := erasure_coding.ShardId(shardIdLocations.ShardId)
+ delete(ecVolume.ShardLocations, shardId)
+ for _, loc := range shardIdLocations.Locations {
+ ecVolume.ShardLocations[shardId] = append(ecVolume.ShardLocations[shardId], loc.Url)
+ }
+ }
+ ecVolume.ShardLocationsRefreshTime = time.Now()
+ ecVolume.ShardLocationsLock.Unlock()
+
+ return nil
+ })
+ return
+}
+
+func (s *Store) readRemoteEcShardInterval(sourceDataNodes []string, needleId types.NeedleId, vid needle.VolumeId, shardId erasure_coding.ShardId, buf []byte, offset int64) (n int, is_deleted bool, err error) {
+
+ if len(sourceDataNodes) == 0 {
+ return 0, false, fmt.Errorf("failed to find ec shard %d.%d", vid, shardId)
+ }
+
+ for _, sourceDataNode := range sourceDataNodes {
+ glog.V(3).Infof("read remote ec shard %d.%d from %s", vid, shardId, sourceDataNode)
+ n, is_deleted, err = s.doReadRemoteEcShardInterval(sourceDataNode, needleId, vid, shardId, buf, offset)
+ if err == nil {
+ return
+ }
+ glog.V(1).Infof("read remote ec shard %d.%d from %s: %v", vid, shardId, sourceDataNode, err)
+ }
+
+ return
+}
+
+func (s *Store) doReadRemoteEcShardInterval(sourceDataNode string, needleId types.NeedleId, vid needle.VolumeId, shardId erasure_coding.ShardId, buf []byte, offset int64) (n int, is_deleted bool, err error) {
+
+ err = operation.WithVolumeServerClient(sourceDataNode, s.grpcDialOption, func(client volume_server_pb.VolumeServerClient) error {
+
+ // copy data slice
+ shardReadClient, err := client.VolumeEcShardRead(context.Background(), &volume_server_pb.VolumeEcShardReadRequest{
+ VolumeId: uint32(vid),
+ ShardId: uint32(shardId),
+ Offset: offset,
+ Size: int64(len(buf)),
+ FileKey: uint64(needleId),
+ })
+ if err != nil {
+ return fmt.Errorf("failed to start reading ec shard %d.%d from %s: %v", vid, shardId, sourceDataNode, err)
+ }
+
+ for {
+ resp, receiveErr := shardReadClient.Recv()
+ if receiveErr == io.EOF {
+ break
+ }
+ if receiveErr != nil {
+ return fmt.Errorf("receiving ec shard %d.%d from %s: %v", vid, shardId, sourceDataNode, err)
+ }
+ if resp.IsDeleted {
+ is_deleted = true
+ }
+ copy(buf[n:n+len(resp.Data)], resp.Data)
+ n += len(resp.Data)
+ }
+
+ return nil
+ })
+ if err != nil {
+ return 0, is_deleted, fmt.Errorf("read ec shard %d.%d from %s: %v", vid, shardId, sourceDataNode, err)
+ }
+
+ return
+}
+
+func (s *Store) recoverOneRemoteEcShardInterval(needleId types.NeedleId, ecVolume *erasure_coding.EcVolume, shardIdToRecover erasure_coding.ShardId, buf []byte, offset int64) (n int, is_deleted bool, err error) {
+ glog.V(3).Infof("recover ec shard %d.%d from other locations", ecVolume.VolumeId, shardIdToRecover)
+
+ enc, err := reedsolomon.New(erasure_coding.DataShardsCount, erasure_coding.ParityShardsCount)
+ if err != nil {
+ return 0, false, fmt.Errorf("failed to create encoder: %v", err)
+ }
+
+ bufs := make([][]byte, erasure_coding.TotalShardsCount)
+
+ var wg sync.WaitGroup
+ ecVolume.ShardLocationsLock.RLock()
+ for shardId, locations := range ecVolume.ShardLocations {
+
+ // skip currnent shard or empty shard
+ if shardId == shardIdToRecover {
+ continue
+ }
+ if len(locations) == 0 {
+ glog.V(3).Infof("readRemoteEcShardInterval missing %d.%d from %+v", ecVolume.VolumeId, shardId, locations)
+ continue
+ }
+
+ // read from remote locations
+ wg.Add(1)
+ go func(shardId erasure_coding.ShardId, locations []string) {
+ defer wg.Done()
+ data := make([]byte, len(buf))
+ nRead, isDeleted, readErr := s.readRemoteEcShardInterval(locations, needleId, ecVolume.VolumeId, shardId, data, offset)
+ if readErr != nil {
+ glog.V(3).Infof("recover: readRemoteEcShardInterval %d.%d %d bytes from %+v: %v", ecVolume.VolumeId, shardId, nRead, locations, readErr)
+ forgetShardId(ecVolume, shardId)
+ }
+ if isDeleted {
+ is_deleted = true
+ }
+ if nRead == len(buf) {
+ bufs[shardId] = data
+ }
+ }(shardId, locations)
+ }
+ ecVolume.ShardLocationsLock.RUnlock()
+
+ wg.Wait()
+
+ if err = enc.ReconstructData(bufs); err != nil {
+ glog.V(3).Infof("recovered ec shard %d.%d failed: %v", ecVolume.VolumeId, shardIdToRecover, err)
+ return 0, false, err
+ }
+ glog.V(4).Infof("recovered ec shard %d.%d from other locations", ecVolume.VolumeId, shardIdToRecover)
+
+ copy(buf, bufs[shardIdToRecover])
+
+ return len(buf), is_deleted, nil
+}
+
+func (s *Store) EcVolumes() (ecVolumes []*erasure_coding.EcVolume) {
+ for _, location := range s.Locations {
+ location.ecVolumesLock.RLock()
+ for _, v := range location.ecVolumes {
+ ecVolumes = append(ecVolumes, v)
+ }
+ location.ecVolumesLock.RUnlock()
+ }
+ sort.Slice(ecVolumes, func(i, j int) bool {
+ return ecVolumes[i].VolumeId > ecVolumes[j].VolumeId
+ })
+ return ecVolumes
+}
diff --git a/weed/storage/store_ec_delete.go b/weed/storage/store_ec_delete.go
new file mode 100644
index 000000000..4a75fb20b
--- /dev/null
+++ b/weed/storage/store_ec_delete.go
@@ -0,0 +1,105 @@
+package storage
+
+import (
+ "context"
+ "fmt"
+
+ "github.com/chrislusf/seaweedfs/weed/glog"
+ "github.com/chrislusf/seaweedfs/weed/operation"
+ "github.com/chrislusf/seaweedfs/weed/pb/volume_server_pb"
+ "github.com/chrislusf/seaweedfs/weed/storage/erasure_coding"
+ "github.com/chrislusf/seaweedfs/weed/storage/needle"
+ "github.com/chrislusf/seaweedfs/weed/storage/types"
+)
+
+func (s *Store) DeleteEcShardNeedle(ecVolume *erasure_coding.EcVolume, n *needle.Needle, cookie types.Cookie) (int64, error) {
+
+ count, err := s.ReadEcShardNeedle(ecVolume.VolumeId, n)
+
+ if err != nil {
+ return 0, err
+ }
+
+ if cookie != n.Cookie {
+ return 0, fmt.Errorf("unexpected cookie %x", cookie)
+ }
+
+ if err = s.doDeleteNeedleFromAtLeastOneRemoteEcShards(ecVolume, n.Id); err != nil {
+ return 0, err
+ }
+
+ return int64(count), nil
+
+}
+
+func (s *Store) doDeleteNeedleFromAtLeastOneRemoteEcShards(ecVolume *erasure_coding.EcVolume, needleId types.NeedleId) error {
+
+ _, _, intervals, err := ecVolume.LocateEcShardNeedle(needleId, ecVolume.Version)
+
+ if len(intervals) == 0 {
+ return erasure_coding.NotFoundError
+ }
+
+ shardId, _ := intervals[0].ToShardIdAndOffset(erasure_coding.ErasureCodingLargeBlockSize, erasure_coding.ErasureCodingSmallBlockSize)
+
+ hasDeletionSuccess := false
+ err = s.doDeleteNeedleFromRemoteEcShardServers(shardId, ecVolume, needleId)
+ if err == nil {
+ hasDeletionSuccess = true
+ }
+
+ for shardId = erasure_coding.DataShardsCount; shardId < erasure_coding.TotalShardsCount; shardId++ {
+ if parityDeletionError := s.doDeleteNeedleFromRemoteEcShardServers(shardId, ecVolume, needleId); parityDeletionError == nil {
+ hasDeletionSuccess = true
+ }
+ }
+
+ if hasDeletionSuccess {
+ return nil
+ }
+
+ return err
+
+}
+
+func (s *Store) doDeleteNeedleFromRemoteEcShardServers(shardId erasure_coding.ShardId, ecVolume *erasure_coding.EcVolume, needleId types.NeedleId) error {
+
+ ecVolume.ShardLocationsLock.RLock()
+ sourceDataNodes, hasShardLocations := ecVolume.ShardLocations[shardId]
+ ecVolume.ShardLocationsLock.RUnlock()
+
+ if !hasShardLocations {
+ return fmt.Errorf("ec shard %d.%d not located", ecVolume.VolumeId, shardId)
+ }
+
+ for _, sourceDataNode := range sourceDataNodes {
+ glog.V(4).Infof("delete from remote ec shard %d.%d from %s", ecVolume.VolumeId, shardId, sourceDataNode)
+ err := s.doDeleteNeedleFromRemoteEcShard(sourceDataNode, ecVolume.VolumeId, ecVolume.Collection, ecVolume.Version, needleId)
+ if err != nil {
+ return err
+ }
+ glog.V(1).Infof("delete from remote ec shard %d.%d from %s: %v", ecVolume.VolumeId, shardId, sourceDataNode, err)
+ }
+
+ return nil
+
+}
+
+func (s *Store) doDeleteNeedleFromRemoteEcShard(sourceDataNode string, vid needle.VolumeId, collection string, version needle.Version, needleId types.NeedleId) error {
+
+ return operation.WithVolumeServerClient(sourceDataNode, s.grpcDialOption, func(client volume_server_pb.VolumeServerClient) error {
+
+ // copy data slice
+ _, err := client.VolumeEcBlobDelete(context.Background(), &volume_server_pb.VolumeEcBlobDeleteRequest{
+ VolumeId: uint32(vid),
+ Collection: collection,
+ FileKey: uint64(needleId),
+ Version: uint32(version),
+ })
+ if err != nil {
+ return fmt.Errorf("failed to delete from ec shard %d on %s: %v", vid, sourceDataNode, err)
+ }
+ return nil
+ })
+
+}
diff --git a/weed/storage/store_vacuum.go b/weed/storage/store_vacuum.go
index cc0521491..38159496e 100644
--- a/weed/storage/store_vacuum.go
+++ b/weed/storage/store_vacuum.go
@@ -2,29 +2,31 @@ package storage
import (
"fmt"
+
"github.com/chrislusf/seaweedfs/weed/glog"
+ "github.com/chrislusf/seaweedfs/weed/storage/needle"
)
-func (s *Store) CheckCompactVolume(volumeId VolumeId) (float64, error) {
+func (s *Store) CheckCompactVolume(volumeId needle.VolumeId) (float64, error) {
if v := s.findVolume(volumeId); v != nil {
glog.V(3).Infof("volumd %d garbage level: %f", volumeId, v.garbageLevel())
return v.garbageLevel(), nil
}
return 0, fmt.Errorf("volume id %d is not found during check compact", volumeId)
}
-func (s *Store) CompactVolume(vid VolumeId, preallocate int64) error {
+func (s *Store) CompactVolume(vid needle.VolumeId, preallocate int64, compactionBytePerSecond int64) error {
if v := s.findVolume(vid); v != nil {
- return v.Compact(preallocate)
+ return v.Compact2(preallocate, compactionBytePerSecond)
}
return fmt.Errorf("volume id %d is not found during compact", vid)
}
-func (s *Store) CommitCompactVolume(vid VolumeId) error {
+func (s *Store) CommitCompactVolume(vid needle.VolumeId) error {
if v := s.findVolume(vid); v != nil {
- return v.commitCompact()
+ return v.CommitCompact()
}
return fmt.Errorf("volume id %d is not found during commit compact", vid)
}
-func (s *Store) CommitCleanupVolume(vid VolumeId) error {
+func (s *Store) CommitCleanupVolume(vid needle.VolumeId) error {
if v := s.findVolume(vid); v != nil {
return v.cleanupCompact()
}
diff --git a/weed/storage/replica_placement.go b/weed/storage/super_block/replica_placement.go
similarity index 98%
rename from weed/storage/replica_placement.go
rename to weed/storage/super_block/replica_placement.go
index c1aca52eb..fcccbba7d 100644
--- a/weed/storage/replica_placement.go
+++ b/weed/storage/super_block/replica_placement.go
@@ -1,4 +1,4 @@
-package storage
+package super_block
import (
"errors"
diff --git a/weed/storage/replica_placement_test.go b/weed/storage/super_block/replica_placement_test.go
similarity index 93%
rename from weed/storage/replica_placement_test.go
rename to weed/storage/super_block/replica_placement_test.go
index 7968af7cb..7742ba548 100644
--- a/weed/storage/replica_placement_test.go
+++ b/weed/storage/super_block/replica_placement_test.go
@@ -1,4 +1,4 @@
-package storage
+package super_block
import (
"testing"
diff --git a/weed/storage/super_block/super_block.go b/weed/storage/super_block/super_block.go
new file mode 100644
index 000000000..f48cd0bdc
--- /dev/null
+++ b/weed/storage/super_block/super_block.go
@@ -0,0 +1,69 @@
+package super_block
+
+import (
+ "github.com/golang/protobuf/proto"
+
+ "github.com/chrislusf/seaweedfs/weed/glog"
+ "github.com/chrislusf/seaweedfs/weed/pb/master_pb"
+ "github.com/chrislusf/seaweedfs/weed/storage/needle"
+ "github.com/chrislusf/seaweedfs/weed/util"
+)
+
+const (
+ SuperBlockSize = 8
+)
+
+/*
+* Super block currently has 8 bytes allocated for each volume.
+* Byte 0: version, 1 or 2
+* Byte 1: Replica Placement strategy, 000, 001, 002, 010, etc
+* Byte 2 and byte 3: Time to live. See TTL for definition
+* Byte 4 and byte 5: The number of times the volume has been compacted.
+* Rest bytes: Reserved
+ */
+type SuperBlock struct {
+ Version needle.Version
+ ReplicaPlacement *ReplicaPlacement
+ Ttl *needle.TTL
+ CompactionRevision uint16
+ Extra *master_pb.SuperBlockExtra
+ ExtraSize uint16
+}
+
+func (s *SuperBlock) BlockSize() int {
+ switch s.Version {
+ case needle.Version2, needle.Version3:
+ return SuperBlockSize + int(s.ExtraSize)
+ }
+ return SuperBlockSize
+}
+
+func (s *SuperBlock) Bytes() []byte {
+ header := make([]byte, SuperBlockSize)
+ header[0] = byte(s.Version)
+ header[1] = s.ReplicaPlacement.Byte()
+ s.Ttl.ToBytes(header[2:4])
+ util.Uint16toBytes(header[4:6], s.CompactionRevision)
+
+ if s.Extra != nil {
+ extraData, err := proto.Marshal(s.Extra)
+ if err != nil {
+ glog.Fatalf("cannot marshal super block extra %+v: %v", s.Extra, err)
+ }
+ extraSize := len(extraData)
+ if extraSize > 256*256-2 {
+ // reserve a couple of bits for future extension
+ glog.Fatalf("super block extra size is %d bigger than %d", extraSize, 256*256-2)
+ }
+ s.ExtraSize = uint16(extraSize)
+ util.Uint16toBytes(header[6:8], s.ExtraSize)
+
+ header = append(header, extraData...)
+ }
+
+ return header
+}
+
+func (s *SuperBlock) Initialized() bool {
+ return s.ReplicaPlacement != nil && s.Ttl != nil
+}
diff --git a/weed/storage/super_block/super_block_read.go.go b/weed/storage/super_block/super_block_read.go.go
new file mode 100644
index 000000000..9eb12e116
--- /dev/null
+++ b/weed/storage/super_block/super_block_read.go.go
@@ -0,0 +1,44 @@
+package super_block
+
+import (
+ "fmt"
+
+ "github.com/golang/protobuf/proto"
+
+ "github.com/chrislusf/seaweedfs/weed/pb/master_pb"
+ "github.com/chrislusf/seaweedfs/weed/storage/backend"
+ "github.com/chrislusf/seaweedfs/weed/storage/needle"
+ "github.com/chrislusf/seaweedfs/weed/util"
+)
+
+// ReadSuperBlock reads from data file and load it into volume's super block
+func ReadSuperBlock(datBackend backend.BackendStorageFile) (superBlock SuperBlock, err error) {
+
+ header := make([]byte, SuperBlockSize)
+ if _, e := datBackend.ReadAt(header, 0); e != nil {
+ err = fmt.Errorf("cannot read volume %s super block: %v", datBackend.Name(), e)
+ return
+ }
+
+ superBlock.Version = needle.Version(header[0])
+ if superBlock.ReplicaPlacement, err = NewReplicaPlacementFromByte(header[1]); err != nil {
+ err = fmt.Errorf("cannot read replica type: %s", err.Error())
+ return
+ }
+ superBlock.Ttl = needle.LoadTTLFromBytes(header[2:4])
+ superBlock.CompactionRevision = util.BytesToUint16(header[4:6])
+ superBlock.ExtraSize = util.BytesToUint16(header[6:8])
+
+ if superBlock.ExtraSize > 0 {
+ // read more
+ extraData := make([]byte, int(superBlock.ExtraSize))
+ superBlock.Extra = &master_pb.SuperBlockExtra{}
+ err = proto.Unmarshal(extraData, superBlock.Extra)
+ if err != nil {
+ err = fmt.Errorf("cannot read volume %s super block extra: %v", datBackend.Name(), err)
+ return
+ }
+ }
+
+ return
+}
diff --git a/weed/storage/volume_super_block_test.go b/weed/storage/super_block/super_block_test.go
similarity index 58%
rename from weed/storage/volume_super_block_test.go
rename to weed/storage/super_block/super_block_test.go
index 13db4b194..25699070d 100644
--- a/weed/storage/volume_super_block_test.go
+++ b/weed/storage/super_block/super_block_test.go
@@ -1,21 +1,23 @@
-package storage
+package super_block
import (
"testing"
+
+ "github.com/chrislusf/seaweedfs/weed/storage/needle"
)
func TestSuperBlockReadWrite(t *testing.T) {
rp, _ := NewReplicaPlacementFromByte(byte(001))
- ttl, _ := ReadTTL("15d")
+ ttl, _ := needle.ReadTTL("15d")
s := &SuperBlock{
- version: CurrentVersion,
+ Version: needle.CurrentVersion,
ReplicaPlacement: rp,
Ttl: ttl,
}
bytes := s.Bytes()
- if !(bytes[2] == 15 && bytes[3] == Day) {
+ if !(bytes[2] == 15 && bytes[3] == needle.Day) {
println("byte[2]:", bytes[2], "byte[3]:", bytes[3])
t.Fail()
}
diff --git a/weed/storage/types/needle_id_128bit.go b/weed/storage/types/needle_id_128bit.go
deleted file mode 100644
index 3af0b7285..000000000
--- a/weed/storage/types/needle_id_128bit.go
+++ /dev/null
@@ -1,44 +0,0 @@
-// +build 128BitNeedleId
-
-package types
-
-import (
- "encoding/hex"
-)
-
-const (
- NeedleIdSize = 16
- NeedleIdEmpty = ""
-)
-
-// this is a 128 bit needle id implementation.
-// Usually a FileId has 32bit volume id, 64bit needle id, 32 bit cookie.
-// But when your system is using UUID, which is 128 bit, a custom 128-bit needle id can be easier to manage.
-// Caveat: In this mode, the fildId from master /dir/assign can not be directly used.
-// Only the volume id and cookie from the fileId are usuable.
-type NeedleId string
-
-func NeedleIdToBytes(bytes []byte, needleId NeedleId) {
- hex.Decode(bytes, []byte(needleId))
-}
-
-// NeedleIdToUint64 used to send max needle id to master
-func NeedleIdToUint64(needleId NeedleId) uint64 {
- return 0
-}
-
-func Uint64ToNeedleId(needleId uint64) NeedleId {
- return NeedleId("")
-}
-
-func BytesToNeedleId(bytes []byte) (needleId NeedleId) {
- return NeedleId(hex.EncodeToString(bytes))
-}
-
-func (k NeedleId) String() string {
- return string(k)
-}
-
-func ParseNeedleId(idString string) (NeedleId, error) {
- return NeedleId(idString), nil
-}
diff --git a/weed/storage/types/needle_id_type.go b/weed/storage/types/needle_id_type.go
index 4a890cd16..32a296613 100644
--- a/weed/storage/types/needle_id_type.go
+++ b/weed/storage/types/needle_id_type.go
@@ -1,5 +1,3 @@
-// +build !128BitNeedleId
-
package types
import (
diff --git a/weed/storage/types/needle_types.go b/weed/storage/types/needle_types.go
index ce4e601e4..2ebb392db 100644
--- a/weed/storage/types/needle_types.go
+++ b/weed/storage/types/needle_types.go
@@ -7,18 +7,28 @@ import (
"strconv"
)
-type Offset uint32
+type Offset struct {
+ OffsetHigher
+ OffsetLower
+}
+
+type OffsetLower struct {
+ b3 byte
+ b2 byte
+ b1 byte
+ b0 byte // the smaller byte
+}
+
type Cookie uint32
const (
- OffsetSize = 4
- SizeSize = 4 // uint32 size
- NeedleEntrySize = NeedleIdSize + OffsetSize + SizeSize
- TimestampSize = 8 // int64 size
- NeedlePaddingSize = 8
- MaxPossibleVolumeSize = 4 * 1024 * 1024 * 1024 * 8
- TombstoneFileSize = math.MaxUint32
- CookieSize = 4
+ SizeSize = 4 // uint32 size
+ NeedleHeaderSize = CookieSize + NeedleIdSize + SizeSize
+ NeedleMapEntrySize = NeedleIdSize + OffsetSize + SizeSize
+ TimestampSize = 8 // int64 size
+ NeedlePaddingSize = 8
+ TombstoneFileSize = math.MaxUint32
+ CookieSize = 4
)
func CookieToBytes(bytes []byte, cookie Cookie) {
@@ -39,15 +49,3 @@ func ParseCookie(cookieString string) (Cookie, error) {
}
return Cookie(cookie), nil
}
-
-func OffsetToBytes(bytes []byte, offset Offset) {
- util.Uint32toBytes(bytes, uint32(offset))
-}
-
-func Uint32ToOffset(offset uint32) Offset {
- return Offset(offset)
-}
-
-func BytesToOffset(bytes []byte) Offset {
- return Offset(util.BytesToUint32(bytes[0:4]))
-}
diff --git a/weed/storage/types/offset_4bytes.go b/weed/storage/types/offset_4bytes.go
new file mode 100644
index 000000000..d53147e21
--- /dev/null
+++ b/weed/storage/types/offset_4bytes.go
@@ -0,0 +1,63 @@
+// +build !5BytesOffset
+
+package types
+
+import (
+ "fmt"
+)
+
+type OffsetHigher struct {
+ // b4 byte
+}
+
+const (
+ OffsetSize = 4
+ MaxPossibleVolumeSize uint64 = 4 * 1024 * 1024 * 1024 * 8 // 32GB
+)
+
+func OffsetToBytes(bytes []byte, offset Offset) {
+ bytes[3] = offset.b0
+ bytes[2] = offset.b1
+ bytes[1] = offset.b2
+ bytes[0] = offset.b3
+}
+
+// only for testing, will be removed later.
+func Uint32ToOffset(offset uint32) Offset {
+ return Offset{
+ OffsetLower: OffsetLower{
+ b0: byte(offset),
+ b1: byte(offset >> 8),
+ b2: byte(offset >> 16),
+ b3: byte(offset >> 24),
+ },
+ }
+}
+
+func BytesToOffset(bytes []byte) Offset {
+ return Offset{
+ OffsetLower: OffsetLower{
+ b0: bytes[3],
+ b1: bytes[2],
+ b2: bytes[1],
+ b3: bytes[0],
+ },
+ }
+}
+
+func (offset Offset) IsZero() bool {
+ return offset.b0 == 0 && offset.b1 == 0 && offset.b2 == 0 && offset.b3 == 0
+}
+
+func ToOffset(offset int64) Offset {
+ smaller := uint32(offset / int64(NeedlePaddingSize))
+ return Uint32ToOffset(smaller)
+}
+
+func (offset Offset) ToAcutalOffset() (actualOffset int64) {
+ return (int64(offset.b0) + int64(offset.b1)<<8 + int64(offset.b2)<<16 + int64(offset.b3)<<24) * int64(NeedlePaddingSize)
+}
+
+func (offset Offset) String() string {
+ return fmt.Sprintf("%d", int64(offset.b0)+int64(offset.b1)<<8+int64(offset.b2)<<16+int64(offset.b3)<<24)
+}
diff --git a/weed/storage/types/offset_5bytes.go b/weed/storage/types/offset_5bytes.go
new file mode 100644
index 000000000..05c6d2f39
--- /dev/null
+++ b/weed/storage/types/offset_5bytes.go
@@ -0,0 +1,80 @@
+// +build 5BytesOffset
+
+package types
+
+import (
+ "fmt"
+)
+
+type OffsetHigher struct {
+ b4 byte
+}
+
+const (
+ OffsetSize = 4 + 1
+ MaxPossibleVolumeSize uint64 = 4 * 1024 * 1024 * 1024 * 8 * 256 /* 256 is from the extra byte */ // 8TB
+)
+
+func OffsetToBytes(bytes []byte, offset Offset) {
+ bytes[4] = offset.b4
+ bytes[3] = offset.b0
+ bytes[2] = offset.b1
+ bytes[1] = offset.b2
+ bytes[0] = offset.b3
+}
+
+// only for testing, will be removed later.
+func Uint32ToOffset(offset uint32) Offset {
+ return Offset{
+ OffsetHigher: OffsetHigher{
+ b4: byte(offset >> 32),
+ },
+ OffsetLower: OffsetLower{
+ b0: byte(offset),
+ b1: byte(offset >> 8),
+ b2: byte(offset >> 16),
+ b3: byte(offset >> 24),
+ },
+ }
+}
+
+func BytesToOffset(bytes []byte) Offset {
+ return Offset{
+ OffsetHigher: OffsetHigher{
+ b4: bytes[4],
+ },
+ OffsetLower: OffsetLower{
+ b0: bytes[3],
+ b1: bytes[2],
+ b2: bytes[1],
+ b3: bytes[0],
+ },
+ }
+}
+
+func (offset Offset) IsZero() bool {
+ return offset.b0 == 0 && offset.b1 == 0 && offset.b2 == 0 && offset.b3 == 0 && offset.b4 == 0
+}
+
+func ToOffset(offset int64) Offset {
+ smaller := offset / int64(NeedlePaddingSize)
+ return Offset{
+ OffsetHigher: OffsetHigher{
+ b4: byte(smaller >> 32),
+ },
+ OffsetLower: OffsetLower{
+ b0: byte(smaller),
+ b1: byte(smaller >> 8),
+ b2: byte(smaller >> 16),
+ b3: byte(smaller >> 24),
+ },
+ }
+}
+
+func (offset Offset) ToAcutalOffset() (actualOffset int64) {
+ return (int64(offset.b0) + int64(offset.b1)<<8 + int64(offset.b2)<<16 + int64(offset.b3)<<24 + int64(offset.b4)<<32) * int64(NeedlePaddingSize)
+}
+
+func (offset Offset) String() string {
+ return fmt.Sprintf("%d", int64(offset.b0)+int64(offset.b1)<<8+int64(offset.b2)<<16+int64(offset.b3)<<24+int64(offset.b4)<<32)
+}
diff --git a/weed/storage/volume.go b/weed/storage/volume.go
index 07c72ecb4..73fdb417d 100644
--- a/weed/storage/volume.go
+++ b/weed/storage/volume.go
@@ -2,75 +2,154 @@ package storage
import (
"fmt"
- "os"
"path"
+ "strconv"
"sync"
"time"
+ "github.com/chrislusf/seaweedfs/weed/pb/master_pb"
+ "github.com/chrislusf/seaweedfs/weed/pb/volume_server_pb"
+ "github.com/chrislusf/seaweedfs/weed/stats"
+ "github.com/chrislusf/seaweedfs/weed/storage/backend"
+ "github.com/chrislusf/seaweedfs/weed/storage/needle"
+ "github.com/chrislusf/seaweedfs/weed/storage/super_block"
+ "github.com/chrislusf/seaweedfs/weed/storage/types"
+
"github.com/chrislusf/seaweedfs/weed/glog"
)
type Volume struct {
- Id VolumeId
- dir string
- Collection string
- dataFile *os.File
- nm NeedleMapper
- compactingWg sync.WaitGroup
- needleMapKind NeedleMapType
- readOnly bool
-
- SuperBlock
-
- dataFileAccessLock sync.Mutex
- lastModifiedTime uint64 //unix time in seconds
+ Id needle.VolumeId
+ dir string
+ Collection string
+ DataBackend backend.BackendStorageFile
+ nm NeedleMapper
+ needleMapKind NeedleMapType
+ noWriteOrDelete bool // if readonly, either noWriteOrDelete or noWriteCanDelete
+ noWriteCanDelete bool // if readonly, either noWriteOrDelete or noWriteCanDelete
+ hasRemoteFile bool // if the volume has a remote file
+ MemoryMapMaxSizeMb uint32
+
+ super_block.SuperBlock
+
+ dataFileAccessLock sync.RWMutex
+ asyncRequestsChan chan *needle.AsyncRequest
+ lastModifiedTsSeconds uint64 // unix time in seconds
+ lastAppendAtNs uint64 // unix time in nanoseconds
lastCompactIndexOffset uint64
lastCompactRevision uint16
+
+ isCompacting bool
+
+ volumeInfo *volume_server_pb.VolumeInfo
+ location *DiskLocation
}
-func NewVolume(dirname string, collection string, id VolumeId, needleMapKind NeedleMapType, replicaPlacement *ReplicaPlacement, ttl *TTL, preallocate int64) (v *Volume, e error) {
+func NewVolume(dirname string, collection string, id needle.VolumeId, needleMapKind NeedleMapType, replicaPlacement *super_block.ReplicaPlacement, ttl *needle.TTL, preallocate int64, memoryMapMaxSizeMb uint32) (v *Volume, e error) {
// if replicaPlacement is nil, the superblock will be loaded from disk
- v = &Volume{dir: dirname, Collection: collection, Id: id}
- v.SuperBlock = SuperBlock{ReplicaPlacement: replicaPlacement, Ttl: ttl}
+ v = &Volume{dir: dirname, Collection: collection, Id: id, MemoryMapMaxSizeMb: memoryMapMaxSizeMb,
+ asyncRequestsChan: make(chan *needle.AsyncRequest, 128)}
+ v.SuperBlock = super_block.SuperBlock{ReplicaPlacement: replicaPlacement, Ttl: ttl}
v.needleMapKind = needleMapKind
e = v.load(true, true, needleMapKind, preallocate)
+ v.startWorker()
return
}
+
func (v *Volume) String() string {
- return fmt.Sprintf("Id:%v, dir:%s, Collection:%s, dataFile:%v, nm:%v, readOnly:%v", v.Id, v.dir, v.Collection, v.dataFile, v.nm, v.readOnly)
+ return fmt.Sprintf("Id:%v, dir:%s, Collection:%s, dataFile:%v, nm:%v, noWrite:%v canDelete:%v", v.Id, v.dir, v.Collection, v.DataBackend, v.nm, v.noWriteOrDelete || v.noWriteCanDelete, v.noWriteCanDelete)
}
-func (v *Volume) FileName() (fileName string) {
- if v.Collection == "" {
- fileName = path.Join(v.dir, v.Id.String())
+func VolumeFileName(dir string, collection string, id int) (fileName string) {
+ idString := strconv.Itoa(id)
+ if collection == "" {
+ fileName = path.Join(dir, idString)
} else {
- fileName = path.Join(v.dir, v.Collection+"_"+v.Id.String())
+ fileName = path.Join(dir, collection+"_"+idString)
}
return
}
-func (v *Volume) DataFile() *os.File {
- return v.dataFile
+
+func (v *Volume) FileName() (fileName string) {
+ return VolumeFileName(v.dir, v.Collection, int(v.Id))
}
-func (v *Volume) Version() Version {
- return v.SuperBlock.Version()
+func (v *Volume) Version() needle.Version {
+ if v.volumeInfo.Version != 0 {
+ v.SuperBlock.Version = needle.Version(v.volumeInfo.Version)
+ }
+ return v.SuperBlock.Version
}
-func (v *Volume) Size() int64 {
- v.dataFileAccessLock.Lock()
- defer v.dataFileAccessLock.Unlock()
+func (v *Volume) FileStat() (datSize uint64, idxSize uint64, modTime time.Time) {
+ v.dataFileAccessLock.RLock()
+ defer v.dataFileAccessLock.RUnlock()
- if v.dataFile == nil {
- return 0
+ if v.DataBackend == nil {
+ return
}
- stat, e := v.dataFile.Stat()
+ datFileSize, modTime, e := v.DataBackend.GetStat()
if e == nil {
- return stat.Size()
+ return uint64(datFileSize), v.nm.IndexFileSize(), modTime
+ }
+ glog.V(0).Infof("Failed to read file size %s %v", v.DataBackend.Name(), e)
+ return // -1 causes integer overflow and the volume to become unwritable.
+}
+
+func (v *Volume) ContentSize() uint64 {
+ v.dataFileAccessLock.RLock()
+ defer v.dataFileAccessLock.RUnlock()
+ if v.nm == nil {
+ return 0
+ }
+ return v.nm.ContentSize()
+}
+
+func (v *Volume) DeletedSize() uint64 {
+ v.dataFileAccessLock.RLock()
+ defer v.dataFileAccessLock.RUnlock()
+ if v.nm == nil {
+ return 0
+ }
+ return v.nm.DeletedSize()
+}
+
+func (v *Volume) FileCount() uint64 {
+ v.dataFileAccessLock.RLock()
+ defer v.dataFileAccessLock.RUnlock()
+ if v.nm == nil {
+ return 0
}
- glog.V(0).Infof("Failed to read file size %s %v", v.dataFile.Name(), e)
- return 0 // -1 causes integer overflow and the volume to become unwritable.
+ return uint64(v.nm.FileCount())
+}
+
+func (v *Volume) DeletedCount() uint64 {
+ v.dataFileAccessLock.RLock()
+ defer v.dataFileAccessLock.RUnlock()
+ if v.nm == nil {
+ return 0
+ }
+ return uint64(v.nm.DeletedCount())
+}
+
+func (v *Volume) MaxFileKey() types.NeedleId {
+ v.dataFileAccessLock.RLock()
+ defer v.dataFileAccessLock.RUnlock()
+ if v.nm == nil {
+ return 0
+ }
+ return v.nm.MaxFileKey()
+}
+
+func (v *Volume) IndexFileSize() uint64 {
+ v.dataFileAccessLock.RLock()
+ defer v.dataFileAccessLock.RUnlock()
+ if v.nm == nil {
+ return 0
+ }
+ return v.nm.IndexFileSize()
}
// Close cleanly shuts down this volume
@@ -81,9 +160,10 @@ func (v *Volume) Close() {
v.nm.Close()
v.nm = nil
}
- if v.dataFile != nil {
- _ = v.dataFile.Close()
- v.dataFile = nil
+ if v.DataBackend != nil {
+ _ = v.DataBackend.Close()
+ v.DataBackend = nil
+ stats.VolumeServerVolumeCounter.WithLabelValues(v.Collection, "volume").Dec()
}
}
@@ -91,17 +171,13 @@ func (v *Volume) NeedToReplicate() bool {
return v.ReplicaPlacement.GetCopyCount() > 1
}
-func (v *Volume) ContentSize() uint64 {
- return v.nm.ContentSize()
-}
-
// volume is expired if modified time + volume ttl < now
// except when volume is empty
// or when the volume does not have a ttl
// or when volumeSizeLimit is 0 when server just starts
func (v *Volume) expired(volumeSizeLimit uint64) bool {
if volumeSizeLimit == 0 {
- //skip if we don't know size limit
+ // skip if we don't know size limit
return false
}
if v.ContentSize() == 0 {
@@ -110,9 +186,9 @@ func (v *Volume) expired(volumeSizeLimit uint64) bool {
if v.Ttl == nil || v.Ttl.Minutes() == 0 {
return false
}
- glog.V(1).Infof("now:%v lastModified:%v", time.Now().Unix(), v.lastModifiedTime)
- livedMinutes := (time.Now().Unix() - int64(v.lastModifiedTime)) / 60
- glog.V(1).Infof("ttl:%v lived:%v", v.Ttl, livedMinutes)
+ glog.V(2).Infof("now:%v lastModified:%v", time.Now().Unix(), v.lastModifiedTsSeconds)
+ livedMinutes := (time.Now().Unix() - int64(v.lastModifiedTsSeconds)) / 60
+ glog.V(2).Infof("ttl:%v lived:%v", v.Ttl, livedMinutes)
if int64(v.Ttl.Minutes()) < livedMinutes {
return true
}
@@ -129,8 +205,45 @@ func (v *Volume) expiredLongEnough(maxDelayMinutes uint32) bool {
removalDelay = maxDelayMinutes
}
- if uint64(v.Ttl.Minutes()+removalDelay)*60+v.lastModifiedTime < uint64(time.Now().Unix()) {
+ if uint64(v.Ttl.Minutes()+removalDelay)*60+v.lastModifiedTsSeconds < uint64(time.Now().Unix()) {
return true
}
return false
}
+
+func (v *Volume) ToVolumeInformationMessage() *master_pb.VolumeInformationMessage {
+ size, _, modTime := v.FileStat()
+
+ volumInfo := &master_pb.VolumeInformationMessage{
+ Id: uint32(v.Id),
+ Size: size,
+ Collection: v.Collection,
+ FileCount: v.FileCount(),
+ DeleteCount: v.DeletedCount(),
+ DeletedByteCount: v.DeletedSize(),
+ ReadOnly: v.IsReadOnly(),
+ ReplicaPlacement: uint32(v.ReplicaPlacement.Byte()),
+ Version: uint32(v.Version()),
+ Ttl: v.Ttl.ToUint32(),
+ CompactRevision: uint32(v.SuperBlock.CompactionRevision),
+ ModifiedAtSecond: modTime.Unix(),
+ }
+
+ volumInfo.RemoteStorageName, volumInfo.RemoteStorageKey = v.RemoteStorageNameKey()
+
+ return volumInfo
+}
+
+func (v *Volume) RemoteStorageNameKey() (storageName, storageKey string) {
+ if v.volumeInfo == nil {
+ return
+ }
+ if len(v.volumeInfo.GetFiles()) == 0 {
+ return
+ }
+ return v.volumeInfo.GetFiles()[0].BackendName(), v.volumeInfo.GetFiles()[0].GetKey()
+}
+
+func (v *Volume) IsReadOnly() bool {
+ return v.noWriteOrDelete || v.noWriteCanDelete || v.location.isDiskSpaceLow
+}
diff --git a/weed/storage/volume_backup.go b/weed/storage/volume_backup.go
new file mode 100644
index 000000000..f7075fe2b
--- /dev/null
+++ b/weed/storage/volume_backup.go
@@ -0,0 +1,260 @@
+package storage
+
+import (
+ "context"
+ "fmt"
+ "io"
+ "os"
+
+ "google.golang.org/grpc"
+
+ "github.com/chrislusf/seaweedfs/weed/operation"
+ "github.com/chrislusf/seaweedfs/weed/pb/volume_server_pb"
+ "github.com/chrislusf/seaweedfs/weed/storage/idx"
+ "github.com/chrislusf/seaweedfs/weed/storage/needle"
+ "github.com/chrislusf/seaweedfs/weed/storage/super_block"
+ . "github.com/chrislusf/seaweedfs/weed/storage/types"
+)
+
+func (v *Volume) GetVolumeSyncStatus() *volume_server_pb.VolumeSyncStatusResponse {
+ v.dataFileAccessLock.RLock()
+ defer v.dataFileAccessLock.RUnlock()
+
+ var syncStatus = &volume_server_pb.VolumeSyncStatusResponse{}
+ if datSize, _, err := v.DataBackend.GetStat(); err == nil {
+ syncStatus.TailOffset = uint64(datSize)
+ }
+ syncStatus.Collection = v.Collection
+ syncStatus.IdxFileSize = v.nm.IndexFileSize()
+ syncStatus.CompactRevision = uint32(v.SuperBlock.CompactionRevision)
+ syncStatus.Ttl = v.SuperBlock.Ttl.String()
+ syncStatus.Replication = v.SuperBlock.ReplicaPlacement.String()
+ return syncStatus
+}
+
+// The volume sync with a master volume via 2 steps:
+// 1. The slave checks master side to find subscription checkpoint
+// to setup the replication.
+// 2. The slave receives the updates from master
+
+/*
+Assume the slave volume needs to follow the master volume.
+
+The master volume could be compacted, and could be many files ahead of
+slave volume.
+
+Step 0: // implemented in command/backup.go, to avoid dat file size overflow.
+0.1 If slave compact version is less than the master, do a local compaction, and set
+local compact version the same as the master.
+0.2 If the slave size is still bigger than the master, discard local copy and do a full copy.
+
+Step 1:
+The slave volume ask the master by the last modification time t.
+The master do a binary search in volume (use .idx as an array, and check the appendAtNs in .dat file),
+to find the first entry with appendAtNs > t.
+
+Step 2:
+The master send content bytes to the slave. The bytes are not chunked by needle.
+
+Step 3:
+The slave generate the needle map for the new bytes. (This may be optimized to incrementally
+update needle map when receiving new .dat bytes. But seems not necessary now.)
+
+*/
+
+func (v *Volume) IncrementalBackup(volumeServer string, grpcDialOption grpc.DialOption) error {
+
+ startFromOffset, _, _ := v.FileStat()
+ appendAtNs, err := v.findLastAppendAtNs()
+ if err != nil {
+ return err
+ }
+
+ writeOffset := int64(startFromOffset)
+
+ err = operation.WithVolumeServerClient(volumeServer, grpcDialOption, func(client volume_server_pb.VolumeServerClient) error {
+
+ stream, err := client.VolumeIncrementalCopy(context.Background(), &volume_server_pb.VolumeIncrementalCopyRequest{
+ VolumeId: uint32(v.Id),
+ SinceNs: appendAtNs,
+ })
+ if err != nil {
+ return err
+ }
+
+ for {
+ resp, recvErr := stream.Recv()
+ if recvErr != nil {
+ if recvErr == io.EOF {
+ break
+ } else {
+ return recvErr
+ }
+ }
+
+ n, writeErr := v.DataBackend.WriteAt(resp.FileContent, writeOffset)
+ if writeErr != nil {
+ return writeErr
+ }
+ writeOffset += int64(n)
+ }
+
+ return nil
+
+ })
+
+ if err != nil {
+ return err
+ }
+
+ // add to needle map
+ return ScanVolumeFileFrom(v.Version(), v.DataBackend, int64(startFromOffset), &VolumeFileScanner4GenIdx{v: v})
+
+}
+
+func (v *Volume) findLastAppendAtNs() (uint64, error) {
+ offset, err := v.locateLastAppendEntry()
+ if err != nil {
+ return 0, err
+ }
+ if offset.IsZero() {
+ return 0, nil
+ }
+ return v.readAppendAtNs(offset)
+}
+
+func (v *Volume) locateLastAppendEntry() (Offset, error) {
+ indexFile, e := os.OpenFile(v.FileName()+".idx", os.O_RDONLY, 0644)
+ if e != nil {
+ return Offset{}, fmt.Errorf("cannot read %s.idx: %v", v.FileName(), e)
+ }
+ defer indexFile.Close()
+
+ fi, err := indexFile.Stat()
+ if err != nil {
+ return Offset{}, fmt.Errorf("file %s stat error: %v", indexFile.Name(), err)
+ }
+ fileSize := fi.Size()
+ if fileSize%NeedleMapEntrySize != 0 {
+ return Offset{}, fmt.Errorf("unexpected file %s size: %d", indexFile.Name(), fileSize)
+ }
+ if fileSize == 0 {
+ return Offset{}, nil
+ }
+
+ bytes := make([]byte, NeedleMapEntrySize)
+ n, e := indexFile.ReadAt(bytes, fileSize-NeedleMapEntrySize)
+ if n != NeedleMapEntrySize {
+ return Offset{}, fmt.Errorf("file %s read error: %v", indexFile.Name(), e)
+ }
+ _, offset, _ := idx.IdxFileEntry(bytes)
+
+ return offset, nil
+}
+
+func (v *Volume) readAppendAtNs(offset Offset) (uint64, error) {
+
+ n, _, bodyLength, err := needle.ReadNeedleHeader(v.DataBackend, v.SuperBlock.Version, offset.ToAcutalOffset())
+ if err != nil {
+ return 0, fmt.Errorf("ReadNeedleHeader: %v", err)
+ }
+ _, err = n.ReadNeedleBody(v.DataBackend, v.SuperBlock.Version, offset.ToAcutalOffset()+int64(NeedleHeaderSize), bodyLength)
+ if err != nil {
+ return 0, fmt.Errorf("ReadNeedleBody offset %d, bodyLength %d: %v", offset.ToAcutalOffset(), bodyLength, err)
+ }
+ return n.AppendAtNs, nil
+
+}
+
+// on server side
+func (v *Volume) BinarySearchByAppendAtNs(sinceNs uint64) (offset Offset, isLast bool, err error) {
+ indexFile, openErr := os.OpenFile(v.FileName()+".idx", os.O_RDONLY, 0644)
+ if openErr != nil {
+ err = fmt.Errorf("cannot read %s.idx: %v", v.FileName(), openErr)
+ return
+ }
+ defer indexFile.Close()
+
+ fi, statErr := indexFile.Stat()
+ if statErr != nil {
+ err = fmt.Errorf("file %s stat error: %v", indexFile.Name(), statErr)
+ return
+ }
+ fileSize := fi.Size()
+ if fileSize%NeedleMapEntrySize != 0 {
+ err = fmt.Errorf("unexpected file %s size: %d", indexFile.Name(), fileSize)
+ return
+ }
+
+ bytes := make([]byte, NeedleMapEntrySize)
+ entryCount := fileSize / NeedleMapEntrySize
+ l := int64(0)
+ h := entryCount
+
+ for l < h {
+
+ m := (l + h) / 2
+
+ if m == entryCount {
+ return Offset{}, true, nil
+ }
+
+ // read the appendAtNs for entry m
+ offset, err = v.readAppendAtNsForIndexEntry(indexFile, bytes, m)
+ if err != nil {
+ return
+ }
+
+ mNs, nsReadErr := v.readAppendAtNs(offset)
+ if nsReadErr != nil {
+ err = nsReadErr
+ return
+ }
+
+ // move the boundary
+ if mNs <= sinceNs {
+ l = m + 1
+ } else {
+ h = m
+ }
+
+ }
+
+ if l == entryCount {
+ return Offset{}, true, nil
+ }
+
+ offset, err = v.readAppendAtNsForIndexEntry(indexFile, bytes, l)
+
+ return offset, false, err
+
+}
+
+// bytes is of size NeedleMapEntrySize
+func (v *Volume) readAppendAtNsForIndexEntry(indexFile *os.File, bytes []byte, m int64) (Offset, error) {
+ if _, readErr := indexFile.ReadAt(bytes, m*NeedleMapEntrySize); readErr != nil && readErr != io.EOF {
+ return Offset{}, readErr
+ }
+ _, offset, _ := idx.IdxFileEntry(bytes)
+ return offset, nil
+}
+
+// generate the volume idx
+type VolumeFileScanner4GenIdx struct {
+ v *Volume
+}
+
+func (scanner *VolumeFileScanner4GenIdx) VisitSuperBlock(superBlock super_block.SuperBlock) error {
+ return nil
+
+}
+func (scanner *VolumeFileScanner4GenIdx) ReadNeedleBody() bool {
+ return false
+}
+
+func (scanner *VolumeFileScanner4GenIdx) VisitNeedle(n *needle.Needle, offset int64, needleHeader, needleBody []byte) error {
+ if n.Size > 0 && n.Size != TombstoneFileSize {
+ return scanner.v.nm.Put(n.Id, ToOffset(offset), n.Size)
+ }
+ return scanner.v.nm.Delete(n.Id, ToOffset(offset))
+}
diff --git a/weed/storage/volume_backup_test.go b/weed/storage/volume_backup_test.go
new file mode 100644
index 000000000..3291d203a
--- /dev/null
+++ b/weed/storage/volume_backup_test.go
@@ -0,0 +1,39 @@
+package storage
+
+import "testing"
+
+func TestBinarySearch(t *testing.T) {
+ var testInput []int
+ testInput = []int{-1, 0, 3, 5, 9, 12}
+
+ if 3 != binarySearchForLargerThanTarget(testInput, 4) {
+ t.Errorf("failed to find target %d", 4)
+ }
+ if 3 != binarySearchForLargerThanTarget(testInput, 3) {
+ t.Errorf("failed to find target %d", 3)
+ }
+ if 6 != binarySearchForLargerThanTarget(testInput, 12) {
+ t.Errorf("failed to find target %d", 12)
+ }
+ if 1 != binarySearchForLargerThanTarget(testInput, -1) {
+ t.Errorf("failed to find target %d", -1)
+ }
+ if 0 != binarySearchForLargerThanTarget(testInput, -2) {
+ t.Errorf("failed to find target %d", -2)
+ }
+
+}
+
+func binarySearchForLargerThanTarget(nums []int, target int) int {
+ l := 0
+ h := len(nums)
+ for l < h {
+ m := (l + h) / 2
+ if nums[m] <= target {
+ l = m + 1
+ } else {
+ h = m
+ }
+ }
+ return l
+}
diff --git a/weed/storage/volume_checking.go b/weed/storage/volume_checking.go
index 12c282be9..c33f0049a 100644
--- a/weed/storage/volume_checking.go
+++ b/weed/storage/volume_checking.go
@@ -4,41 +4,41 @@ import (
"fmt"
"os"
+ "github.com/chrislusf/seaweedfs/weed/storage/backend"
+ "github.com/chrislusf/seaweedfs/weed/storage/idx"
+ "github.com/chrislusf/seaweedfs/weed/storage/needle"
. "github.com/chrislusf/seaweedfs/weed/storage/types"
"github.com/chrislusf/seaweedfs/weed/util"
)
-func getActualSize(size uint32, version Version) int64 {
- return NeedleEntrySize + NeedleBodyLength(size, version)
-}
-
-func CheckVolumeDataIntegrity(v *Volume, indexFile *os.File) error {
+func CheckVolumeDataIntegrity(v *Volume, indexFile *os.File) (lastAppendAtNs uint64, e error) {
var indexSize int64
- var e error
if indexSize, e = verifyIndexFileIntegrity(indexFile); e != nil {
- return fmt.Errorf("verifyIndexFileIntegrity %s failed: %v", indexFile.Name(), e)
+ return 0, fmt.Errorf("verifyIndexFileIntegrity %s failed: %v", indexFile.Name(), e)
}
if indexSize == 0 {
- return nil
+ return 0, nil
}
var lastIdxEntry []byte
- if lastIdxEntry, e = readIndexEntryAtOffset(indexFile, indexSize-NeedleEntrySize); e != nil {
- return fmt.Errorf("readLastIndexEntry %s failed: %v", indexFile.Name(), e)
+ if lastIdxEntry, e = readIndexEntryAtOffset(indexFile, indexSize-NeedleMapEntrySize); e != nil {
+ return 0, fmt.Errorf("readLastIndexEntry %s failed: %v", indexFile.Name(), e)
}
- key, offset, size := IdxFileEntry(lastIdxEntry)
- if offset == 0 || size == TombstoneFileSize {
- return nil
+ key, offset, size := idx.IdxFileEntry(lastIdxEntry)
+ if offset.IsZero() {
+ return 0, nil
}
- if e = verifyNeedleIntegrity(v.dataFile, v.Version(), int64(offset)*NeedlePaddingSize, key, size); e != nil {
- return fmt.Errorf("verifyNeedleIntegrity %s failed: %v", indexFile.Name(), e)
+ if size == TombstoneFileSize {
+ size = 0
}
-
- return nil
+ if lastAppendAtNs, e = verifyNeedleIntegrity(v.DataBackend, v.Version(), offset.ToAcutalOffset(), key, size); e != nil {
+ return lastAppendAtNs, fmt.Errorf("verifyNeedleIntegrity %s failed: %v", indexFile.Name(), e)
+ }
+ return
}
func verifyIndexFileIntegrity(indexFile *os.File) (indexSize int64, err error) {
if indexSize, err = util.GetFileSize(indexFile); err == nil {
- if indexSize%NeedleEntrySize != 0 {
+ if indexSize%NeedleMapEntrySize != 0 {
err = fmt.Errorf("index file's size is %d bytes, maybe corrupted", indexSize)
}
}
@@ -50,19 +50,18 @@ func readIndexEntryAtOffset(indexFile *os.File, offset int64) (bytes []byte, err
err = fmt.Errorf("offset %d for index file is invalid", offset)
return
}
- bytes = make([]byte, NeedleEntrySize)
+ bytes = make([]byte, NeedleMapEntrySize)
_, err = indexFile.ReadAt(bytes, offset)
return
}
-func verifyNeedleIntegrity(datFile *os.File, v Version, offset int64, key NeedleId, size uint32) error {
- n := new(Needle)
- err := n.ReadData(datFile, offset, size, v)
- if err != nil {
- return err
+func verifyNeedleIntegrity(datFile backend.BackendStorageFile, v needle.Version, offset int64, key NeedleId, size uint32) (lastAppendAtNs uint64, err error) {
+ n := new(needle.Needle)
+ if err = n.ReadData(datFile, offset, size, v); err != nil {
+ return n.AppendAtNs, fmt.Errorf("read data [%d,%d) : %v", offset, offset+int64(size), err)
}
if n.Id != key {
- return fmt.Errorf("index key %#x does not match needle's Id %#x", key, n.Id)
+ return n.AppendAtNs, fmt.Errorf("index key %#x does not match needle's Id %#x", key, n.Id)
}
- return nil
+ return n.AppendAtNs, err
}
diff --git a/weed/storage/volume_create.go b/weed/storage/volume_create.go
deleted file mode 100644
index 6b3a17439..000000000
--- a/weed/storage/volume_create.go
+++ /dev/null
@@ -1,17 +0,0 @@
-// +build !linux
-
-package storage
-
-import (
- "os"
-
- "github.com/chrislusf/seaweedfs/weed/glog"
-)
-
-func createVolumeFile(fileName string, preallocate int64) (file *os.File, e error) {
- file, e = os.OpenFile(fileName, os.O_RDWR|os.O_CREATE, 0644)
- if preallocate > 0 {
- glog.V(0).Infof("Preallocated disk space for %s is not supported", fileName)
- }
- return file, e
-}
diff --git a/weed/storage/volume_create_linux.go b/weed/storage/volume_create_linux.go
deleted file mode 100644
index 8f6bab2fe..000000000
--- a/weed/storage/volume_create_linux.go
+++ /dev/null
@@ -1,19 +0,0 @@
-// +build linux
-
-package storage
-
-import (
- "os"
- "syscall"
-
- "github.com/chrislusf/seaweedfs/weed/glog"
-)
-
-func createVolumeFile(fileName string, preallocate int64) (file *os.File, e error) {
- file, e = os.OpenFile(fileName, os.O_RDWR|os.O_CREATE, 0644)
- if preallocate != 0 {
- syscall.Fallocate(int(file.Fd()), 1, 0, preallocate)
- glog.V(0).Infof("Preallocated %d bytes disk space for %s", preallocate, fileName)
- }
- return file, e
-}
diff --git a/weed/storage/volume_info.go b/weed/storage/volume_info.go
index f6614a9de..313818cde 100644
--- a/weed/storage/volume_info.go
+++ b/weed/storage/volume_info.go
@@ -5,46 +5,94 @@ import (
"sort"
"github.com/chrislusf/seaweedfs/weed/pb/master_pb"
+ "github.com/chrislusf/seaweedfs/weed/storage/needle"
+ "github.com/chrislusf/seaweedfs/weed/storage/super_block"
)
type VolumeInfo struct {
- Id VolumeId
- Size uint64
- ReplicaPlacement *ReplicaPlacement
- Ttl *TTL
- Collection string
- Version Version
- FileCount int
- DeleteCount int
- DeletedByteCount uint64
- ReadOnly bool
+ Id needle.VolumeId
+ Size uint64
+ ReplicaPlacement *super_block.ReplicaPlacement
+ Ttl *needle.TTL
+ Collection string
+ Version needle.Version
+ FileCount int
+ DeleteCount int
+ DeletedByteCount uint64
+ ReadOnly bool
+ CompactRevision uint32
+ ModifiedAtSecond int64
+ RemoteStorageName string
+ RemoteStorageKey string
}
func NewVolumeInfo(m *master_pb.VolumeInformationMessage) (vi VolumeInfo, err error) {
vi = VolumeInfo{
- Id: VolumeId(m.Id),
- Size: m.Size,
- Collection: m.Collection,
- FileCount: int(m.FileCount),
- DeleteCount: int(m.DeleteCount),
- DeletedByteCount: m.DeletedByteCount,
- ReadOnly: m.ReadOnly,
- Version: Version(m.Version),
+ Id: needle.VolumeId(m.Id),
+ Size: m.Size,
+ Collection: m.Collection,
+ FileCount: int(m.FileCount),
+ DeleteCount: int(m.DeleteCount),
+ DeletedByteCount: m.DeletedByteCount,
+ ReadOnly: m.ReadOnly,
+ Version: needle.Version(m.Version),
+ CompactRevision: m.CompactRevision,
+ ModifiedAtSecond: m.ModifiedAtSecond,
+ RemoteStorageName: m.RemoteStorageName,
+ RemoteStorageKey: m.RemoteStorageKey,
}
- rp, e := NewReplicaPlacementFromByte(byte(m.ReplicaPlacement))
+ rp, e := super_block.NewReplicaPlacementFromByte(byte(m.ReplicaPlacement))
if e != nil {
return vi, e
}
vi.ReplicaPlacement = rp
- vi.Ttl = LoadTTLFromUint32(m.Ttl)
+ vi.Ttl = needle.LoadTTLFromUint32(m.Ttl)
return vi, nil
}
+func NewVolumeInfoFromShort(m *master_pb.VolumeShortInformationMessage) (vi VolumeInfo, err error) {
+ vi = VolumeInfo{
+ Id: needle.VolumeId(m.Id),
+ Collection: m.Collection,
+ Version: needle.Version(m.Version),
+ }
+ rp, e := super_block.NewReplicaPlacementFromByte(byte(m.ReplicaPlacement))
+ if e != nil {
+ return vi, e
+ }
+ vi.ReplicaPlacement = rp
+ vi.Ttl = needle.LoadTTLFromUint32(m.Ttl)
+ return vi, nil
+}
+
+func (vi VolumeInfo) IsRemote() bool {
+ return vi.RemoteStorageName != ""
+}
+
func (vi VolumeInfo) String() string {
return fmt.Sprintf("Id:%d, Size:%d, ReplicaPlacement:%s, Collection:%s, Version:%v, FileCount:%d, DeleteCount:%d, DeletedByteCount:%d, ReadOnly:%v",
vi.Id, vi.Size, vi.ReplicaPlacement, vi.Collection, vi.Version, vi.FileCount, vi.DeleteCount, vi.DeletedByteCount, vi.ReadOnly)
}
+func (vi VolumeInfo) ToVolumeInformationMessage() *master_pb.VolumeInformationMessage {
+ return &master_pb.VolumeInformationMessage{
+ Id: uint32(vi.Id),
+ Size: uint64(vi.Size),
+ Collection: vi.Collection,
+ FileCount: uint64(vi.FileCount),
+ DeleteCount: uint64(vi.DeleteCount),
+ DeletedByteCount: vi.DeletedByteCount,
+ ReadOnly: vi.ReadOnly,
+ ReplicaPlacement: uint32(vi.ReplicaPlacement.Byte()),
+ Version: uint32(vi.Version),
+ Ttl: vi.Ttl.ToUint32(),
+ CompactRevision: vi.CompactRevision,
+ ModifiedAtSecond: vi.ModifiedAtSecond,
+ RemoteStorageName: vi.RemoteStorageName,
+ RemoteStorageKey: vi.RemoteStorageKey,
+ }
+}
+
/*VolumesInfo sorting*/
type volumeInfos []*VolumeInfo
diff --git a/weed/storage/volume_info_test.go b/weed/storage/volume_info_test.go
index 9a9c43ad2..5b1bacb52 100644
--- a/weed/storage/volume_info_test.go
+++ b/weed/storage/volume_info_test.go
@@ -1,6 +1,10 @@
package storage
-import "testing"
+import (
+ "testing"
+
+ "github.com/chrislusf/seaweedfs/weed/storage/needle"
+)
func TestSortVolumeInfos(t *testing.T) {
vis := []*VolumeInfo{
@@ -16,7 +20,7 @@ func TestSortVolumeInfos(t *testing.T) {
}
sortVolumeInfos(vis)
for i := 0; i < len(vis); i++ {
- if vis[i].Id != VolumeId(i+1) {
+ if vis[i].Id != needle.VolumeId(i+1) {
t.Fatal()
}
}
diff --git a/weed/storage/volume_loading.go b/weed/storage/volume_loading.go
index 37a6e07b2..73e2de02b 100644
--- a/weed/storage/volume_loading.go
+++ b/weed/storage/volume_loading.go
@@ -3,118 +3,148 @@ package storage
import (
"fmt"
"os"
- "time"
+
+ "github.com/syndtr/goleveldb/leveldb/opt"
"github.com/chrislusf/seaweedfs/weed/glog"
+ "github.com/chrislusf/seaweedfs/weed/stats"
+ "github.com/chrislusf/seaweedfs/weed/storage/backend"
+ "github.com/chrislusf/seaweedfs/weed/storage/needle"
+ "github.com/chrislusf/seaweedfs/weed/storage/super_block"
+ "github.com/chrislusf/seaweedfs/weed/util"
)
-func loadVolumeWithoutIndex(dirname string, collection string, id VolumeId, needleMapKind NeedleMapType) (v *Volume, e error) {
+func loadVolumeWithoutIndex(dirname string, collection string, id needle.VolumeId, needleMapKind NeedleMapType) (v *Volume, err error) {
v = &Volume{dir: dirname, Collection: collection, Id: id}
- v.SuperBlock = SuperBlock{}
+ v.SuperBlock = super_block.SuperBlock{}
v.needleMapKind = needleMapKind
- e = v.load(false, false, needleMapKind, 0)
+ err = v.load(false, false, needleMapKind, 0)
return
}
-func (v *Volume) load(alsoLoadIndex bool, createDatIfMissing bool, needleMapKind NeedleMapType, preallocate int64) error {
- var e error
+func (v *Volume) load(alsoLoadIndex bool, createDatIfMissing bool, needleMapKind NeedleMapType, preallocate int64) (err error) {
fileName := v.FileName()
alreadyHasSuperBlock := false
- if exists, canRead, canWrite, modifiedTime, fileSize := checkFile(fileName + ".dat"); exists {
+ hasVolumeInfoFile := v.maybeLoadVolumeInfo() && v.volumeInfo.Version != 0
+
+ if v.HasRemoteFile() {
+ v.noWriteCanDelete = true
+ v.noWriteOrDelete = false
+ glog.V(0).Infof("loading volume %d from remote %v", v.Id, v.volumeInfo.Files)
+ v.LoadRemoteFile()
+ alreadyHasSuperBlock = true
+ } else if exists, canRead, canWrite, modifiedTime, fileSize := util.CheckFile(fileName + ".dat"); exists {
+ // open dat file
if !canRead {
return fmt.Errorf("cannot read Volume Data file %s.dat", fileName)
}
+ var dataFile *os.File
if canWrite {
- v.dataFile, e = os.OpenFile(fileName+".dat", os.O_RDWR|os.O_CREATE, 0644)
- v.lastModifiedTime = uint64(modifiedTime.Unix())
+ dataFile, err = os.OpenFile(fileName+".dat", os.O_RDWR|os.O_CREATE, 0644)
} else {
glog.V(0).Infoln("opening " + fileName + ".dat in READONLY mode")
- v.dataFile, e = os.Open(fileName + ".dat")
- v.readOnly = true
+ dataFile, err = os.Open(fileName + ".dat")
+ v.noWriteOrDelete = true
}
- if fileSize >= _SuperBlockSize {
+ v.lastModifiedTsSeconds = uint64(modifiedTime.Unix())
+ if fileSize >= super_block.SuperBlockSize {
alreadyHasSuperBlock = true
}
+ v.DataBackend = backend.NewDiskFile(dataFile)
} else {
if createDatIfMissing {
- v.dataFile, e = createVolumeFile(fileName+".dat", preallocate)
+ v.DataBackend, err = backend.CreateVolumeFile(fileName+".dat", preallocate, v.MemoryMapMaxSizeMb)
} else {
return fmt.Errorf("Volume Data file %s.dat does not exist.", fileName)
}
}
- if e != nil {
- if !os.IsPermission(e) {
- return fmt.Errorf("cannot load Volume Data %s.dat: %v", fileName, e)
+ if err != nil {
+ if !os.IsPermission(err) {
+ return fmt.Errorf("cannot load Volume Data %s.dat: %v", fileName, err)
} else {
- return fmt.Errorf("load data file %s.dat: %v", fileName, e)
+ return fmt.Errorf("load data file %s.dat: %v", fileName, err)
}
}
if alreadyHasSuperBlock {
- e = v.readSuperBlock()
+ err = v.readSuperBlock()
} else {
- e = v.maybeWriteSuperBlock()
+ if !v.SuperBlock.Initialized() {
+ return fmt.Errorf("volume %s.dat not initialized", fileName)
+ }
+ err = v.maybeWriteSuperBlock()
}
- if e == nil && alsoLoadIndex {
+ if err == nil && alsoLoadIndex {
var indexFile *os.File
- if v.readOnly {
- glog.V(1).Infoln("open to read file", fileName+".idx")
- if indexFile, e = os.OpenFile(fileName+".idx", os.O_RDONLY, 0644); e != nil {
- return fmt.Errorf("cannot read Volume Index %s.idx: %v", fileName, e)
+ if v.noWriteOrDelete {
+ glog.V(0).Infoln("open to read file", fileName+".idx")
+ if indexFile, err = os.OpenFile(fileName+".idx", os.O_RDONLY, 0644); err != nil {
+ return fmt.Errorf("cannot read Volume Index %s.idx: %v", fileName, err)
}
} else {
glog.V(1).Infoln("open to write file", fileName+".idx")
- if indexFile, e = os.OpenFile(fileName+".idx", os.O_RDWR|os.O_CREATE, 0644); e != nil {
- return fmt.Errorf("cannot write Volume Index %s.idx: %v", fileName, e)
+ if indexFile, err = os.OpenFile(fileName+".idx", os.O_RDWR|os.O_CREATE, 0644); err != nil {
+ return fmt.Errorf("cannot write Volume Index %s.idx: %v", fileName, err)
}
}
- if e = CheckVolumeDataIntegrity(v, indexFile); e != nil {
- v.readOnly = true
- glog.V(0).Infof("volumeDataIntegrityChecking failed %v", e)
+ if v.lastAppendAtNs, err = CheckVolumeDataIntegrity(v, indexFile); err != nil {
+ v.noWriteOrDelete = true
+ glog.V(0).Infof("volumeDataIntegrityChecking failed %v", err)
}
- switch needleMapKind {
- case NeedleMapInMemory:
- glog.V(0).Infoln("loading index", fileName+".idx", "to memory readonly", v.readOnly)
- if v.nm, e = LoadCompactNeedleMap(indexFile); e != nil {
- glog.V(0).Infof("loading index %s to memory error: %v", fileName+".idx", e)
- }
- case NeedleMapLevelDb:
- glog.V(0).Infoln("loading leveldb", fileName+".ldb")
- if v.nm, e = NewLevelDbNeedleMap(fileName+".ldb", indexFile); e != nil {
- glog.V(0).Infof("loading leveldb %s error: %v", fileName+".ldb", e)
- }
- case NeedleMapBoltDb:
- glog.V(0).Infoln("loading boltdb", fileName+".bdb")
- if v.nm, e = NewBoltDbNeedleMap(fileName+".bdb", indexFile); e != nil {
- glog.V(0).Infof("loading boltdb %s error: %v", fileName+".bdb", e)
+
+ if v.noWriteOrDelete || v.noWriteCanDelete {
+ if v.nm, err = NewSortedFileNeedleMap(fileName, indexFile); err != nil {
+ glog.V(0).Infof("loading sorted db %s error: %v", fileName+".sdx", err)
}
- case NeedleMapBtree:
- glog.V(0).Infoln("loading index", fileName+".idx", "to btree readonly", v.readOnly)
- if v.nm, e = LoadBtreeNeedleMap(indexFile); e != nil {
- glog.V(0).Infof("loading index %s to btree error: %v", fileName+".idx", e)
+ } else {
+ switch needleMapKind {
+ case NeedleMapInMemory:
+ glog.V(0).Infoln("loading index", fileName+".idx", "to memory")
+ if v.nm, err = LoadCompactNeedleMap(indexFile); err != nil {
+ glog.V(0).Infof("loading index %s to memory error: %v", fileName+".idx", err)
+ }
+ case NeedleMapLevelDb:
+ glog.V(0).Infoln("loading leveldb", fileName+".ldb")
+ opts := &opt.Options{
+ BlockCacheCapacity: 2 * 1024 * 1024, // default value is 8MiB
+ WriteBuffer: 1 * 1024 * 1024, // default value is 4MiB
+ CompactionTableSizeMultiplier: 10, // default value is 1
+ }
+ if v.nm, err = NewLevelDbNeedleMap(fileName+".ldb", indexFile, opts); err != nil {
+ glog.V(0).Infof("loading leveldb %s error: %v", fileName+".ldb", err)
+ }
+ case NeedleMapLevelDbMedium:
+ glog.V(0).Infoln("loading leveldb medium", fileName+".ldb")
+ opts := &opt.Options{
+ BlockCacheCapacity: 4 * 1024 * 1024, // default value is 8MiB
+ WriteBuffer: 2 * 1024 * 1024, // default value is 4MiB
+ CompactionTableSizeMultiplier: 10, // default value is 1
+ }
+ if v.nm, err = NewLevelDbNeedleMap(fileName+".ldb", indexFile, opts); err != nil {
+ glog.V(0).Infof("loading leveldb %s error: %v", fileName+".ldb", err)
+ }
+ case NeedleMapLevelDbLarge:
+ glog.V(0).Infoln("loading leveldb large", fileName+".ldb")
+ opts := &opt.Options{
+ BlockCacheCapacity: 8 * 1024 * 1024, // default value is 8MiB
+ WriteBuffer: 4 * 1024 * 1024, // default value is 4MiB
+ CompactionTableSizeMultiplier: 10, // default value is 1
+ }
+ if v.nm, err = NewLevelDbNeedleMap(fileName+".ldb", indexFile, opts); err != nil {
+ glog.V(0).Infof("loading leveldb %s error: %v", fileName+".ldb", err)
+ }
}
}
}
- return e
-}
-
-func checkFile(filename string) (exists, canRead, canWrite bool, modTime time.Time, fileSize int64) {
- exists = true
- fi, err := os.Stat(filename)
- if os.IsNotExist(err) {
- exists = false
- return
- }
- if fi.Mode()&0400 != 0 {
- canRead = true
- }
- if fi.Mode()&0200 != 0 {
- canWrite = true
+ if !hasVolumeInfoFile {
+ v.volumeInfo.Version = uint32(v.SuperBlock.Version)
+ v.SaveVolumeInfo()
}
- modTime = fi.ModTime()
- fileSize = fi.Size()
- return
+
+ stats.VolumeServerVolumeCounter.WithLabelValues(v.Collection, "volume").Inc()
+
+ return err
}
diff --git a/weed/storage/volume_read_write.go b/weed/storage/volume_read_write.go
index ed9729c84..edb5f48d8 100644
--- a/weed/storage/volume_read_write.go
+++ b/weed/storage/volume_read_write.go
@@ -9,6 +9,9 @@ import (
"time"
"github.com/chrislusf/seaweedfs/weed/glog"
+ "github.com/chrislusf/seaweedfs/weed/storage/backend"
+ "github.com/chrislusf/seaweedfs/weed/storage/needle"
+ "github.com/chrislusf/seaweedfs/weed/storage/super_block"
. "github.com/chrislusf/seaweedfs/weed/storage/types"
)
@@ -16,19 +19,20 @@ var ErrorNotFound = errors.New("not found")
// isFileUnchanged checks whether this needle to write is same as last one.
// It requires serialized access in the same volume.
-func (v *Volume) isFileUnchanged(n *Needle) bool {
+func (v *Volume) isFileUnchanged(n *needle.Needle) bool {
if v.Ttl.String() != "" {
return false
}
+
nv, ok := v.nm.Get(n.Id)
- if ok && nv.Offset > 0 {
- oldNeedle := new(Needle)
- err := oldNeedle.ReadData(v.dataFile, int64(nv.Offset)*NeedlePaddingSize, nv.Size, v.Version())
+ if ok && !nv.Offset.IsZero() && nv.Size != TombstoneFileSize {
+ oldNeedle := new(needle.Needle)
+ err := oldNeedle.ReadData(v.DataBackend, nv.Offset.ToAcutalOffset(), nv.Size, v.Version())
if err != nil {
- glog.V(0).Infof("Failed to check updated file %v", err)
+ glog.V(0).Infof("Failed to check updated file at offset %d size %d: %v", nv.Offset.ToAcutalOffset(), nv.Size, err)
return false
}
- if oldNeedle.Checksum == n.Checksum && bytes.Equal(oldNeedle.Data, n.Data) {
+ if oldNeedle.Cookie == n.Cookie && oldNeedle.Checksum == n.Checksum && bytes.Equal(oldNeedle.Data, n.Data) {
n.DataSize = oldNeedle.DataSize
return true
}
@@ -38,93 +42,170 @@ func (v *Volume) isFileUnchanged(n *Needle) bool {
// Destroy removes everything related to this volume
func (v *Volume) Destroy() (err error) {
- if v.readOnly {
- err = fmt.Errorf("%s is read-only", v.dataFile.Name())
+ if v.isCompacting {
+ err = fmt.Errorf("volume %d is compacting", v.Id)
return
}
+ close(v.asyncRequestsChan)
+ storageName, storageKey := v.RemoteStorageNameKey()
+ if v.HasRemoteFile() && storageName != "" && storageKey != "" {
+ if backendStorage, found := backend.BackendStorages[storageName]; found {
+ backendStorage.DeleteFile(storageKey)
+ }
+ }
v.Close()
os.Remove(v.FileName() + ".dat")
os.Remove(v.FileName() + ".idx")
+ os.Remove(v.FileName() + ".vif")
+ os.Remove(v.FileName() + ".sdx")
os.Remove(v.FileName() + ".cpd")
os.Remove(v.FileName() + ".cpx")
- os.Remove(v.FileName() + ".ldb")
- os.Remove(v.FileName() + ".bdb")
+ os.RemoveAll(v.FileName() + ".ldb")
return
}
-// AppendBlob append a blob to end of the data file, used in replication
-func (v *Volume) AppendBlob(b []byte) (offset int64, err error) {
- if v.readOnly {
- err = fmt.Errorf("%s is read-only", v.dataFile.Name())
- return
- }
+func (v *Volume) asyncRequestAppend(request *needle.AsyncRequest) {
+ v.asyncRequestsChan <- request
+}
+
+func (v *Volume) syncWrite(n *needle.Needle) (offset uint64, size uint32, isUnchanged bool, err error) {
+ // glog.V(4).Infof("writing needle %s", needle.NewFileIdFromNeedle(v.Id, n).String())
+ actualSize := needle.GetActualSize(uint32(len(n.Data)), v.Version())
+
v.dataFileAccessLock.Lock()
defer v.dataFileAccessLock.Unlock()
- if offset, err = v.dataFile.Seek(0, 2); err != nil {
- glog.V(0).Infof("failed to seek the end of file: %v", err)
+
+ if MaxPossibleVolumeSize < v.nm.ContentSize()+uint64(actualSize) {
+ err = fmt.Errorf("volume size limit %d exceeded! current size is %d", MaxPossibleVolumeSize, v.nm.ContentSize())
+ return
+ }
+ if v.isFileUnchanged(n) {
+ size = n.DataSize
+ isUnchanged = true
return
}
- //ensure file writing starting from aligned positions
- if offset%NeedlePaddingSize != 0 {
- offset = offset + (NeedlePaddingSize - offset%NeedlePaddingSize)
- if offset, err = v.dataFile.Seek(offset, 0); err != nil {
- glog.V(0).Infof("failed to align in datafile %s: %v", v.dataFile.Name(), err)
+
+ // check whether existing needle cookie matches
+ nv, ok := v.nm.Get(n.Id)
+ if ok {
+ existingNeedle, _, _, existingNeedleReadErr := needle.ReadNeedleHeader(v.DataBackend, v.Version(), nv.Offset.ToAcutalOffset())
+ if existingNeedleReadErr != nil {
+ err = fmt.Errorf("reading existing needle: %v", existingNeedleReadErr)
return
}
+ if existingNeedle.Cookie != n.Cookie {
+ glog.V(0).Infof("write cookie mismatch: existing %x, new %x", existingNeedle.Cookie, n.Cookie)
+ err = fmt.Errorf("mismatching cookie %x", n.Cookie)
+ return
+ }
+ }
+
+ // append to dat file
+ n.AppendAtNs = uint64(time.Now().UnixNano())
+ if offset, size, _, err = n.Append(v.DataBackend, v.Version()); err != nil {
+ return
+ }
+
+ v.lastAppendAtNs = n.AppendAtNs
+
+ // add to needle map
+ if !ok || uint64(nv.Offset.ToAcutalOffset()) < offset {
+ if err = v.nm.Put(n.Id, ToOffset(int64(offset)), n.Size); err != nil {
+ glog.V(4).Infof("failed to save in needle map %d: %v", n.Id, err)
+ }
+ }
+ if v.lastModifiedTsSeconds < n.LastModified {
+ v.lastModifiedTsSeconds = n.LastModified
}
- _, err = v.dataFile.Write(b)
return
}
-func (v *Volume) writeNeedle(n *Needle) (offset uint64, size uint32, err error) {
- glog.V(4).Infof("writing needle %s", NewFileIdFromNeedle(v.Id, n).String())
- if v.readOnly {
- err = fmt.Errorf("%s is read-only", v.dataFile.Name())
+func (v *Volume) writeNeedle2(n *needle.Needle, fsync bool) (offset uint64, size uint32, isUnchanged bool, err error) {
+ // glog.V(4).Infof("writing needle %s", needle.NewFileIdFromNeedle(v.Id, n).String())
+ if n.Ttl == needle.EMPTY_TTL && v.Ttl != needle.EMPTY_TTL {
+ n.SetHasTtl()
+ n.Ttl = v.Ttl
+ }
+
+ if !fsync {
+ return v.syncWrite(n)
+ } else {
+ asyncRequest := needle.NewAsyncRequest(n, true)
+ // using len(n.Data) here instead of n.Size before n.Size is populated in n.Append()
+ asyncRequest.ActualSize = needle.GetActualSize(uint32(len(n.Data)), v.Version())
+
+ v.asyncRequestAppend(asyncRequest)
+ offset, _, isUnchanged, err = asyncRequest.WaitComplete()
+
return
}
- v.dataFileAccessLock.Lock()
- defer v.dataFileAccessLock.Unlock()
+}
+
+func (v *Volume) doWriteRequest(n *needle.Needle) (offset uint64, size uint32, isUnchanged bool, err error) {
+ // glog.V(4).Infof("writing needle %s", needle.NewFileIdFromNeedle(v.Id, n).String())
if v.isFileUnchanged(n) {
size = n.DataSize
- glog.V(4).Infof("needle is unchanged!")
+ isUnchanged = true
return
}
+ // check whether existing needle cookie matches
+ nv, ok := v.nm.Get(n.Id)
+ if ok {
+ existingNeedle, _, _, existingNeedleReadErr := needle.ReadNeedleHeader(v.DataBackend, v.Version(), nv.Offset.ToAcutalOffset())
+ if existingNeedleReadErr != nil {
+ err = fmt.Errorf("reading existing needle: %v", existingNeedleReadErr)
+ return
+ }
+ if existingNeedle.Cookie != n.Cookie {
+ glog.V(0).Infof("write cookie mismatch: existing %x, new %x", existingNeedle.Cookie, n.Cookie)
+ err = fmt.Errorf("mismatching cookie %x", n.Cookie)
+ return
+ }
+ }
+
+ // append to dat file
n.AppendAtNs = uint64(time.Now().UnixNano())
- if offset, size, _, err = n.Append(v.dataFile, v.Version()); err != nil {
+ if offset, size, _, err = n.Append(v.DataBackend, v.Version()); err != nil {
return
}
+ v.lastAppendAtNs = n.AppendAtNs
- nv, ok := v.nm.Get(n.Id)
- if !ok || uint64(nv.Offset)*NeedlePaddingSize < offset {
- if err = v.nm.Put(n.Id, Offset(offset/NeedlePaddingSize), n.Size); err != nil {
+ // add to needle map
+ if !ok || uint64(nv.Offset.ToAcutalOffset()) < offset {
+ if err = v.nm.Put(n.Id, ToOffset(int64(offset)), n.Size); err != nil {
glog.V(4).Infof("failed to save in needle map %d: %v", n.Id, err)
}
}
- if v.lastModifiedTime < n.LastModified {
- v.lastModifiedTime = n.LastModified
+ if v.lastModifiedTsSeconds < n.LastModified {
+ v.lastModifiedTsSeconds = n.LastModified
}
return
}
-func (v *Volume) deleteNeedle(n *Needle) (uint32, error) {
- glog.V(4).Infof("delete needle %s", NewFileIdFromNeedle(v.Id, n).String())
- if v.readOnly {
- return 0, fmt.Errorf("%s is read-only", v.dataFile.Name())
- }
+func (v *Volume) syncDelete(n *needle.Needle) (uint32, error) {
+ glog.V(4).Infof("delete needle %s", needle.NewFileIdFromNeedle(v.Id, n).String())
+ actualSize := needle.GetActualSize(0, v.Version())
v.dataFileAccessLock.Lock()
defer v.dataFileAccessLock.Unlock()
+
+ if MaxPossibleVolumeSize < v.nm.ContentSize()+uint64(actualSize) {
+ err := fmt.Errorf("volume size limit %d exceeded! current size is %d", MaxPossibleVolumeSize, v.nm.ContentSize())
+ return 0, err
+ }
+
nv, ok := v.nm.Get(n.Id)
//fmt.Println("key", n.Id, "volume offset", nv.Offset, "data_size", n.Size, "cached size", nv.Size)
if ok && nv.Size != TombstoneFileSize {
size := nv.Size
n.Data = nil
n.AppendAtNs = uint64(time.Now().UnixNano())
- offset, _, _, err := n.Append(v.dataFile, v.Version())
+ offset, _, _, err := n.Append(v.DataBackend, v.Version())
if err != nil {
return size, err
}
- if err = v.nm.Delete(n.Id, Offset(offset/NeedlePaddingSize)); err != nil {
+ v.lastAppendAtNs = n.AppendAtNs
+ if err = v.nm.Delete(n.Id, ToOffset(int64(offset))); err != nil {
return size, err
}
return size, err
@@ -132,15 +213,52 @@ func (v *Volume) deleteNeedle(n *Needle) (uint32, error) {
return 0, nil
}
-// read fills in Needle content by looking up n.Id from NeedleMapper
-func (v *Volume) readNeedle(n *Needle) (int, error) {
+func (v *Volume) deleteNeedle2(n *needle.Needle) (uint32, error) {
+ // todo: delete info is always appended no fsync, it may need fsync in future
+ fsync := false
+
+ if !fsync {
+ return v.syncDelete(n)
+ } else {
+ asyncRequest := needle.NewAsyncRequest(n, false)
+ asyncRequest.ActualSize = needle.GetActualSize(0, v.Version())
+
+ v.asyncRequestAppend(asyncRequest)
+ _, size, _, err := asyncRequest.WaitComplete()
+
+ return uint32(size), err
+ }
+}
+
+func (v *Volume) doDeleteRequest(n *needle.Needle) (uint32, error) {
+ glog.V(4).Infof("delete needle %s", needle.NewFileIdFromNeedle(v.Id, n).String())
nv, ok := v.nm.Get(n.Id)
- if !ok || nv.Offset == 0 {
- v.compactingWg.Wait()
- nv, ok = v.nm.Get(n.Id)
- if !ok || nv.Offset == 0 {
- return -1, ErrorNotFound
+ //fmt.Println("key", n.Id, "volume offset", nv.Offset, "data_size", n.Size, "cached size", nv.Size)
+ if ok && nv.Size != TombstoneFileSize {
+ size := nv.Size
+ n.Data = nil
+ n.AppendAtNs = uint64(time.Now().UnixNano())
+ offset, _, _, err := n.Append(v.DataBackend, v.Version())
+ if err != nil {
+ return size, err
}
+ v.lastAppendAtNs = n.AppendAtNs
+ if err = v.nm.Delete(n.Id, ToOffset(int64(offset))); err != nil {
+ return size, err
+ }
+ return size, err
+ }
+ return 0, nil
+}
+
+// read fills in Needle content by looking up n.Id from NeedleMapper
+func (v *Volume) readNeedle(n *needle.Needle) (int, error) {
+ v.dataFileAccessLock.RLock()
+ defer v.dataFileAccessLock.RUnlock()
+
+ nv, ok := v.nm.Get(n.Id)
+ if !ok || nv.Offset.IsZero() {
+ return -1, ErrorNotFound
}
if nv.Size == TombstoneFileSize {
return -1, errors.New("already deleted")
@@ -148,7 +266,7 @@ func (v *Volume) readNeedle(n *Needle) (int, error) {
if nv.Size == 0 {
return 0, nil
}
- err := n.ReadData(v.dataFile, int64(nv.Offset)*NeedlePaddingSize, nv.Size, v.Version())
+ err := n.ReadData(v.DataBackend, nv.Offset.ToAcutalOffset(), nv.Size, v.Version())
if err != nil {
return 0, err
}
@@ -169,57 +287,142 @@ func (v *Volume) readNeedle(n *Needle) (int, error) {
return -1, ErrorNotFound
}
+func (v *Volume) startWorker() {
+ go func() {
+ chanClosed := false
+ for {
+ // chan closed. go thread will exit
+ if chanClosed {
+ break
+ }
+ currentRequests := make([]*needle.AsyncRequest, 0, 128)
+ currentBytesToWrite := int64(0)
+ for {
+ request, ok := <-v.asyncRequestsChan
+ //volume may be closed
+ if !ok {
+ chanClosed = true
+ break
+ }
+ if MaxPossibleVolumeSize < v.ContentSize()+uint64(currentBytesToWrite+request.ActualSize) {
+ request.Complete(0, 0, false,
+ fmt.Errorf("volume size limit %d exceeded! current size is %d", MaxPossibleVolumeSize, v.ContentSize()))
+ break
+ }
+ currentRequests = append(currentRequests, request)
+ currentBytesToWrite += request.ActualSize
+ // submit at most 4M bytes or 128 requests at one time to decrease request delay.
+ // it also need to break if there is no data in channel to avoid io hang.
+ if currentBytesToWrite >= 4*1024*1024 || len(currentRequests) >= 128 || len(v.asyncRequestsChan) == 0 {
+ break
+ }
+ }
+ if len(currentRequests) == 0 {
+ continue
+ }
+ v.dataFileAccessLock.Lock()
+ end, _, e := v.DataBackend.GetStat()
+ if e != nil {
+ for i := 0; i < len(currentRequests); i++ {
+ currentRequests[i].Complete(0, 0, false,
+ fmt.Errorf("cannot read current volume position: %v", e))
+ }
+ v.dataFileAccessLock.Unlock()
+ continue
+ }
+
+ for i := 0; i < len(currentRequests); i++ {
+ if currentRequests[i].IsWriteRequest {
+ offset, size, isUnchanged, err := v.doWriteRequest(currentRequests[i].N)
+ currentRequests[i].UpdateResult(offset, uint64(size), isUnchanged, err)
+ } else {
+ size, err := v.doDeleteRequest(currentRequests[i].N)
+ currentRequests[i].UpdateResult(0, uint64(size), false, err)
+ }
+ }
+
+ // if sync error, data is not reliable, we should mark the completed request as fail and rollback
+ if err := v.DataBackend.Sync(); err != nil {
+ // todo: this may generate dirty data or cause data inconsistent, may be weed need to panic?
+ if te := v.DataBackend.Truncate(end); te != nil {
+ glog.V(0).Infof("Failed to truncate %s back to %d with error: %v", v.DataBackend.Name(), end, te)
+ }
+ for i := 0; i < len(currentRequests); i++ {
+ if currentRequests[i].IsSucceed() {
+ currentRequests[i].UpdateResult(0, 0, false, err)
+ }
+ }
+ }
+
+ for i := 0; i < len(currentRequests); i++ {
+ currentRequests[i].Submit()
+ }
+ v.dataFileAccessLock.Unlock()
+ }
+ }()
+}
+
type VolumeFileScanner interface {
- VisitSuperBlock(SuperBlock) error
+ VisitSuperBlock(super_block.SuperBlock) error
ReadNeedleBody() bool
- VisitNeedle(n *Needle, offset int64) error
+ VisitNeedle(n *needle.Needle, offset int64, needleHeader, needleBody []byte) error
}
-func ScanVolumeFile(dirname string, collection string, id VolumeId,
+func ScanVolumeFile(dirname string, collection string, id needle.VolumeId,
needleMapKind NeedleMapType,
volumeFileScanner VolumeFileScanner) (err error) {
var v *Volume
if v, err = loadVolumeWithoutIndex(dirname, collection, id, needleMapKind); err != nil {
- return fmt.Errorf("Failed to load volume %d: %v", id, err)
+ return fmt.Errorf("failed to load volume %d: %v", id, err)
}
- if err = volumeFileScanner.VisitSuperBlock(v.SuperBlock); err != nil {
- return fmt.Errorf("Failed to process volume %d super block: %v", id, err)
+ if v.volumeInfo.Version == 0 {
+ if err = volumeFileScanner.VisitSuperBlock(v.SuperBlock); err != nil {
+ return fmt.Errorf("failed to process volume %d super block: %v", id, err)
+ }
}
defer v.Close()
version := v.Version()
offset := int64(v.SuperBlock.BlockSize())
- n, rest, e := ReadNeedleHeader(v.dataFile, version, offset)
+
+ return ScanVolumeFileFrom(version, v.DataBackend, offset, volumeFileScanner)
+}
+
+func ScanVolumeFileFrom(version needle.Version, datBackend backend.BackendStorageFile, offset int64, volumeFileScanner VolumeFileScanner) (err error) {
+ n, nh, rest, e := needle.ReadNeedleHeader(datBackend, version, offset)
if e != nil {
- err = fmt.Errorf("cannot read needle header: %v", e)
- return
+ if e == io.EOF {
+ return nil
+ }
+ return fmt.Errorf("cannot read %s at offset %d: %v", datBackend.Name(), offset, e)
}
for n != nil {
+ var needleBody []byte
if volumeFileScanner.ReadNeedleBody() {
- if err = n.ReadNeedleBody(v.dataFile, version, offset+NeedleEntrySize, rest); err != nil {
+ if needleBody, err = n.ReadNeedleBody(datBackend, version, offset+NeedleHeaderSize, rest); err != nil {
glog.V(0).Infof("cannot read needle body: %v", err)
//err = fmt.Errorf("cannot read needle body: %v", err)
//return
}
}
- err = volumeFileScanner.VisitNeedle(n, offset)
+ err := volumeFileScanner.VisitNeedle(n, offset, nh, needleBody)
if err == io.EOF {
return nil
}
if err != nil {
glog.V(0).Infof("visit needle error: %v", err)
+ return fmt.Errorf("visit needle error: %v", err)
}
- offset += NeedleEntrySize + rest
+ offset += NeedleHeaderSize + rest
glog.V(4).Infof("==> new entry offset %d", offset)
- if n, rest, err = ReadNeedleHeader(v.dataFile, version, offset); err != nil {
+ if n, nh, rest, err = needle.ReadNeedleHeader(datBackend, version, offset); err != nil {
if err == io.EOF {
return nil
}
- return fmt.Errorf("cannot read needle header: %v", err)
+ return fmt.Errorf("cannot read needle header at offset %d: %v", offset, err)
}
glog.V(4).Infof("new entry needle size:%d rest:%d", n.Size, rest)
}
-
- return
+ return nil
}
diff --git a/weed/storage/volume_super_block.go b/weed/storage/volume_super_block.go
index 6435a051f..5e913e062 100644
--- a/weed/storage/volume_super_block.go
+++ b/weed/storage/volume_super_block.go
@@ -5,83 +5,29 @@ import (
"os"
"github.com/chrislusf/seaweedfs/weed/glog"
- "github.com/chrislusf/seaweedfs/weed/pb/master_pb"
- "github.com/chrislusf/seaweedfs/weed/util"
- "github.com/golang/protobuf/proto"
+ "github.com/chrislusf/seaweedfs/weed/storage/backend"
+ "github.com/chrislusf/seaweedfs/weed/storage/needle"
+ "github.com/chrislusf/seaweedfs/weed/storage/super_block"
)
-const (
- _SuperBlockSize = 8
-)
-
-/*
-* Super block currently has 8 bytes allocated for each volume.
-* Byte 0: version, 1 or 2
-* Byte 1: Replica Placement strategy, 000, 001, 002, 010, etc
-* Byte 2 and byte 3: Time to live. See TTL for definition
-* Byte 4 and byte 5: The number of times the volume has been compacted.
-* Rest bytes: Reserved
- */
-type SuperBlock struct {
- version Version
- ReplicaPlacement *ReplicaPlacement
- Ttl *TTL
- CompactRevision uint16
- Extra *master_pb.SuperBlockExtra
- extraSize uint16
-}
-
-func (s *SuperBlock) BlockSize() int {
- switch s.version {
- case Version2, Version3:
- return _SuperBlockSize + int(s.extraSize)
- }
- return _SuperBlockSize
-}
-
-func (s *SuperBlock) Version() Version {
- return s.version
-}
-func (s *SuperBlock) Bytes() []byte {
- header := make([]byte, _SuperBlockSize)
- header[0] = byte(s.version)
- header[1] = s.ReplicaPlacement.Byte()
- s.Ttl.ToBytes(header[2:4])
- util.Uint16toBytes(header[4:6], s.CompactRevision)
-
- if s.Extra != nil {
- extraData, err := proto.Marshal(s.Extra)
- if err != nil {
- glog.Fatalf("cannot marshal super block extra %+v: %v", s.Extra, err)
- }
- extraSize := len(extraData)
- if extraSize > 256*256-2 {
- // reserve a couple of bits for future extension
- glog.Fatalf("super block extra size is %d bigger than %d", extraSize, 256*256-2)
- }
- s.extraSize = uint16(extraSize)
- util.Uint16toBytes(header[6:8], s.extraSize)
-
- header = append(header, extraData...)
- }
-
- return header
-}
-
func (v *Volume) maybeWriteSuperBlock() error {
- stat, e := v.dataFile.Stat()
+
+ datSize, _, e := v.DataBackend.GetStat()
if e != nil {
- glog.V(0).Infof("failed to stat datafile %s: %v", v.dataFile.Name(), e)
+ glog.V(0).Infof("failed to stat datafile %s: %v", v.DataBackend.Name(), e)
return e
}
- if stat.Size() == 0 {
- v.SuperBlock.version = CurrentVersion
- _, e = v.dataFile.Write(v.SuperBlock.Bytes())
+ if datSize == 0 {
+ v.SuperBlock.Version = needle.CurrentVersion
+ _, e = v.DataBackend.WriteAt(v.SuperBlock.Bytes(), 0)
if e != nil && os.IsPermission(e) {
//read-only, but zero length - recreate it!
- if v.dataFile, e = os.Create(v.dataFile.Name()); e == nil {
- if _, e = v.dataFile.Write(v.SuperBlock.Bytes()); e == nil {
- v.readOnly = false
+ var dataFile *os.File
+ if dataFile, e = os.Create(v.DataBackend.Name()); e == nil {
+ v.DataBackend = backend.NewDiskFile(dataFile)
+ if _, e = v.DataBackend.WriteAt(v.SuperBlock.Bytes(), 0); e == nil {
+ v.noWriteOrDelete = false
+ v.noWriteCanDelete = false
}
}
}
@@ -90,40 +36,13 @@ func (v *Volume) maybeWriteSuperBlock() error {
}
func (v *Volume) readSuperBlock() (err error) {
- v.SuperBlock, err = ReadSuperBlock(v.dataFile)
- return err
-}
-
-// ReadSuperBlock reads from data file and load it into volume's super block
-func ReadSuperBlock(dataFile *os.File) (superBlock SuperBlock, err error) {
- if _, err = dataFile.Seek(0, 0); err != nil {
- err = fmt.Errorf("cannot seek to the beginning of %s: %v", dataFile.Name(), err)
- return
- }
- header := make([]byte, _SuperBlockSize)
- if _, e := dataFile.Read(header); e != nil {
- err = fmt.Errorf("cannot read volume %s super block: %v", dataFile.Name(), e)
- return
- }
- superBlock.version = Version(header[0])
- if superBlock.ReplicaPlacement, err = NewReplicaPlacementFromByte(header[1]); err != nil {
- err = fmt.Errorf("cannot read replica type: %s", err.Error())
- return
- }
- superBlock.Ttl = LoadTTLFromBytes(header[2:4])
- superBlock.CompactRevision = util.BytesToUint16(header[4:6])
- superBlock.extraSize = util.BytesToUint16(header[6:8])
-
- if superBlock.extraSize > 0 {
- // read more
- extraData := make([]byte, int(superBlock.extraSize))
- superBlock.Extra = &master_pb.SuperBlockExtra{}
- err = proto.Unmarshal(extraData, superBlock.Extra)
- if err != nil {
- err = fmt.Errorf("cannot read volume %s super block extra: %v", dataFile.Name(), err)
- return
+ v.SuperBlock, err = super_block.ReadSuperBlock(v.DataBackend)
+ if v.volumeInfo != nil && v.volumeInfo.Replication != "" {
+ if replication, err := super_block.NewReplicaPlacementFromString(v.volumeInfo.Replication); err != nil {
+ return fmt.Errorf("Error parse volume %d replication %s : %v", v.Id, v.volumeInfo.Replication, err)
+ } else {
+ v.SuperBlock.ReplicaPlacement = replication
}
}
-
- return
+ return err
}
diff --git a/weed/storage/volume_sync.go b/weed/storage/volume_sync.go
deleted file mode 100644
index 137a9b4ca..000000000
--- a/weed/storage/volume_sync.go
+++ /dev/null
@@ -1,224 +0,0 @@
-package storage
-
-import (
- "context"
- "fmt"
- "io"
- "os"
- "sort"
-
- "github.com/chrislusf/seaweedfs/weed/glog"
- "github.com/chrislusf/seaweedfs/weed/operation"
- "github.com/chrislusf/seaweedfs/weed/pb/volume_server_pb"
- "github.com/chrislusf/seaweedfs/weed/storage/needle"
- . "github.com/chrislusf/seaweedfs/weed/storage/types"
-)
-
-// The volume sync with a master volume via 2 steps:
-// 1. The slave checks master side to find subscription checkpoint
-// to setup the replication.
-// 2. The slave receives the updates from master
-
-/*
-Assume the slave volume needs to follow the master volume.
-
-The master volume could be compacted, and could be many files ahead of
-slave volume.
-
-Step 1:
-The slave volume will ask the master volume for a snapshot
-of (existing file entries, last offset, number of compacted times).
-
-For each entry x in master existing file entries:
- if x does not exist locally:
- add x locally
-
-For each entry y in local slave existing file entries:
- if y does not exist on master:
- delete y locally
-
-Step 2:
-After this, use the last offset and number of compacted times to request
-the master volume to send a new file, and keep looping. If the number of
-compacted times is changed, go back to step 1 (very likely this can be
-optimized more later).
-
-*/
-
-func (v *Volume) Synchronize(volumeServer string) (err error) {
- var lastCompactRevision uint16 = 0
- var compactRevision uint16 = 0
- var masterMap *needle.CompactMap
- for i := 0; i < 3; i++ {
- if masterMap, _, compactRevision, err = fetchVolumeFileEntries(volumeServer, v.Id); err != nil {
- return fmt.Errorf("Failed to sync volume %d entries with %s: %v", v.Id, volumeServer, err)
- }
- if lastCompactRevision != compactRevision && lastCompactRevision != 0 {
- if err = v.Compact(0); err != nil {
- return fmt.Errorf("Compact Volume before synchronizing %v", err)
- }
- if err = v.commitCompact(); err != nil {
- return fmt.Errorf("Commit Compact before synchronizing %v", err)
- }
- }
- lastCompactRevision = compactRevision
- if err = v.trySynchronizing(volumeServer, masterMap, compactRevision); err == nil {
- return
- }
- }
- return
-}
-
-type ByOffset []needle.NeedleValue
-
-func (a ByOffset) Len() int { return len(a) }
-func (a ByOffset) Swap(i, j int) { a[i], a[j] = a[j], a[i] }
-func (a ByOffset) Less(i, j int) bool { return a[i].Offset < a[j].Offset }
-
-// trySynchronizing sync with remote volume server incrementally by
-// make up the local and remote delta.
-func (v *Volume) trySynchronizing(volumeServer string, masterMap *needle.CompactMap, compactRevision uint16) error {
- slaveIdxFile, err := os.Open(v.nm.IndexFileName())
- if err != nil {
- return fmt.Errorf("Open volume %d index file: %v", v.Id, err)
- }
- defer slaveIdxFile.Close()
- slaveMap, err := LoadBtreeNeedleMap(slaveIdxFile)
- if err != nil {
- return fmt.Errorf("Load volume %d index file: %v", v.Id, err)
- }
- var delta []needle.NeedleValue
- if err := masterMap.Visit(func(needleValue needle.NeedleValue) error {
- if needleValue.Key == NeedleIdEmpty {
- return nil
- }
- if _, ok := slaveMap.Get(needleValue.Key); ok {
- return nil // skip intersection
- }
- delta = append(delta, needleValue)
- return nil
- }); err != nil {
- return fmt.Errorf("Add master entry: %v", err)
- }
- if err := slaveMap.m.Visit(func(needleValue needle.NeedleValue) error {
- if needleValue.Key == NeedleIdEmpty {
- return nil
- }
- if _, ok := masterMap.Get(needleValue.Key); ok {
- return nil // skip intersection
- }
- needleValue.Size = 0
- delta = append(delta, needleValue)
- return nil
- }); err != nil {
- return fmt.Errorf("Remove local entry: %v", err)
- }
-
- // simulate to same ordering of remote .dat file needle entries
- sort.Sort(ByOffset(delta))
-
- // make up the delta
- fetchCount := 0
- for _, needleValue := range delta {
- if needleValue.Size == 0 {
- // remove file entry from local
- v.removeNeedle(needleValue.Key)
- continue
- }
- // add master file entry to local data file
- if err := v.fetchNeedle(volumeServer, needleValue, compactRevision); err != nil {
- glog.V(0).Infof("Fetch needle %v from %s: %v", needleValue, volumeServer, err)
- return err
- }
- fetchCount++
- }
- glog.V(1).Infof("Fetched %d needles from %s", fetchCount, volumeServer)
- return nil
-}
-
-func fetchVolumeFileEntries(volumeServer string, vid VolumeId) (m *needle.CompactMap, lastOffset uint64, compactRevision uint16, err error) {
- m = needle.NewCompactMap()
-
- syncStatus, err := operation.GetVolumeSyncStatus(volumeServer, uint32(vid))
- if err != nil {
- return m, 0, 0, err
- }
-
- total := 0
- err = operation.GetVolumeIdxEntries(volumeServer, uint32(vid), func(key NeedleId, offset Offset, size uint32) {
- // println("remote key", key, "offset", offset*NeedlePaddingSize, "size", size)
- if offset > 0 && size != TombstoneFileSize {
- m.Set(NeedleId(key), offset, size)
- } else {
- m.Delete(NeedleId(key))
- }
- total++
- })
-
- glog.V(2).Infof("server %s volume %d, entries %d, last offset %d, revision %d", volumeServer, vid, total, syncStatus.TailOffset, syncStatus.CompactRevision)
- return m, syncStatus.TailOffset, uint16(syncStatus.CompactRevision), err
-
-}
-
-func (v *Volume) GetVolumeSyncStatus() *volume_server_pb.VolumeSyncStatusResponse {
- var syncStatus = &volume_server_pb.VolumeSyncStatusResponse{}
- if stat, err := v.dataFile.Stat(); err == nil {
- syncStatus.TailOffset = uint64(stat.Size())
- }
- syncStatus.Collection = v.Collection
- syncStatus.IdxFileSize = v.nm.IndexFileSize()
- syncStatus.CompactRevision = uint32(v.SuperBlock.CompactRevision)
- syncStatus.Ttl = v.SuperBlock.Ttl.String()
- syncStatus.Replication = v.SuperBlock.ReplicaPlacement.String()
- return syncStatus
-}
-
-func (v *Volume) IndexFileContent() ([]byte, error) {
- return v.nm.IndexFileContent()
-}
-
-// removeNeedle removes one needle by needle key
-func (v *Volume) removeNeedle(key NeedleId) {
- n := new(Needle)
- n.Id = key
- v.deleteNeedle(n)
-}
-
-// fetchNeedle fetches a remote volume needle by vid, id, offset
-// The compact revision is checked first in case the remote volume
-// is compacted and the offset is invalid any more.
-func (v *Volume) fetchNeedle(volumeServer string, needleValue needle.NeedleValue, compactRevision uint16) error {
-
- return operation.WithVolumeServerClient(volumeServer, func(client volume_server_pb.VolumeServerClient) error {
- stream, err := client.VolumeSyncData(context.Background(), &volume_server_pb.VolumeSyncDataRequest{
- VolumdId: uint32(v.Id),
- Revision: uint32(compactRevision),
- Offset: uint32(needleValue.Offset),
- Size: uint32(needleValue.Size),
- NeedleId: needleValue.Key.String(),
- })
- if err != nil {
- return err
- }
- var fileContent []byte
- for {
- resp, err := stream.Recv()
- if err == io.EOF {
- break
- }
- if err != nil {
- return fmt.Errorf("read needle %v: %v", needleValue.Key.String(), err)
- }
- fileContent = append(fileContent, resp.FileContent...)
- }
-
- offset, err := v.AppendBlob(fileContent)
- if err != nil {
- return fmt.Errorf("Appending volume %d error: %v", v.Id, err)
- }
- // println("add key", needleValue.Key, "offset", offset, "size", needleValue.Size)
- v.nm.Put(needleValue.Key, Offset(offset/NeedlePaddingSize), needleValue.Size)
- return nil
- })
-
-}
diff --git a/weed/storage/volume_tier.go b/weed/storage/volume_tier.go
new file mode 100644
index 000000000..fd7b08654
--- /dev/null
+++ b/weed/storage/volume_tier.go
@@ -0,0 +1,50 @@
+package storage
+
+import (
+ "github.com/chrislusf/seaweedfs/weed/glog"
+ "github.com/chrislusf/seaweedfs/weed/pb"
+ "github.com/chrislusf/seaweedfs/weed/pb/volume_server_pb"
+ "github.com/chrislusf/seaweedfs/weed/storage/backend"
+ _ "github.com/chrislusf/seaweedfs/weed/storage/backend/s3_backend"
+)
+
+func (v *Volume) GetVolumeInfo() *volume_server_pb.VolumeInfo {
+ return v.volumeInfo
+}
+
+func (v *Volume) maybeLoadVolumeInfo() (found bool) {
+
+ v.volumeInfo, v.hasRemoteFile, _ = pb.MaybeLoadVolumeInfo(v.FileName() + ".vif")
+
+ if v.hasRemoteFile {
+ glog.V(0).Infof("volume %d is tiered to %s as %s and read only", v.Id,
+ v.volumeInfo.Files[0].BackendName(), v.volumeInfo.Files[0].Key)
+ }
+
+ return
+
+}
+
+func (v *Volume) HasRemoteFile() bool {
+ return v.hasRemoteFile
+}
+
+func (v *Volume) LoadRemoteFile() error {
+ tierFile := v.volumeInfo.GetFiles()[0]
+ backendStorage := backend.BackendStorages[tierFile.BackendName()]
+
+ if v.DataBackend != nil {
+ v.DataBackend.Close()
+ }
+
+ v.DataBackend = backendStorage.NewStorageFile(tierFile.Key, v.volumeInfo)
+ return nil
+}
+
+func (v *Volume) SaveVolumeInfo() error {
+
+ tierFileName := v.FileName() + ".vif"
+
+ return pb.SaveVolumeInfo(tierFileName, v.volumeInfo)
+
+}
diff --git a/weed/storage/volume_vacuum.go b/weed/storage/volume_vacuum.go
index b4870423c..ed8172909 100644
--- a/weed/storage/volume_vacuum.go
+++ b/weed/storage/volume_vacuum.go
@@ -3,9 +3,16 @@ package storage
import (
"fmt"
"os"
+ "runtime"
"time"
"github.com/chrislusf/seaweedfs/weed/glog"
+ "github.com/chrislusf/seaweedfs/weed/stats"
+ "github.com/chrislusf/seaweedfs/weed/storage/backend"
+ idx2 "github.com/chrislusf/seaweedfs/weed/storage/idx"
+ "github.com/chrislusf/seaweedfs/weed/storage/needle"
+ "github.com/chrislusf/seaweedfs/weed/storage/needle_map"
+ "github.com/chrislusf/seaweedfs/weed/storage/super_block"
. "github.com/chrislusf/seaweedfs/weed/storage/types"
"github.com/chrislusf/seaweedfs/weed/util"
)
@@ -14,46 +21,100 @@ func (v *Volume) garbageLevel() float64 {
if v.ContentSize() == 0 {
return 0
}
- return float64(v.nm.DeletedSize()) / float64(v.ContentSize())
+ deletedSize := v.DeletedSize()
+ fileSize := v.ContentSize()
+ if v.DeletedCount() > 0 && v.DeletedSize() == 0 {
+ // this happens for .sdx converted back to normal .idx
+ // where deleted entry size is missing
+ datFileSize, _, _ := v.FileStat()
+ deletedSize = datFileSize - fileSize - super_block.SuperBlockSize
+ fileSize = datFileSize
+ }
+ return float64(deletedSize) / float64(fileSize)
}
-func (v *Volume) Compact(preallocate int64) error {
+// compact a volume based on deletions in .dat files
+func (v *Volume) Compact(preallocate int64, compactionBytePerSecond int64) error {
+
+ if v.MemoryMapMaxSizeMb != 0 { //it makes no sense to compact in memory
+ return nil
+ }
glog.V(3).Infof("Compacting volume %d ...", v.Id)
//no need to lock for copy on write
//v.accessLock.Lock()
//defer v.accessLock.Unlock()
//glog.V(3).Infof("Got Compaction lock...")
+ v.isCompacting = true
+ defer func() {
+ v.isCompacting = false
+ }()
filePath := v.FileName()
- v.lastCompactIndexOffset = v.nm.IndexFileSize()
- v.lastCompactRevision = v.SuperBlock.CompactRevision
+ v.lastCompactIndexOffset = v.IndexFileSize()
+ v.lastCompactRevision = v.SuperBlock.CompactionRevision
glog.V(3).Infof("creating copies for volume %d ,last offset %d...", v.Id, v.lastCompactIndexOffset)
- return v.copyDataAndGenerateIndexFile(filePath+".cpd", filePath+".cpx", preallocate)
+ if err := v.DataBackend.Sync(); err != nil {
+ glog.V(0).Infof("compact fail to sync volume %d", v.Id)
+ }
+ if err := v.nm.Sync(); err != nil {
+ glog.V(0).Infof("compact fail to sync volume idx %d", v.Id)
+ }
+ return v.copyDataAndGenerateIndexFile(filePath+".cpd", filePath+".cpx", preallocate, compactionBytePerSecond)
}
-func (v *Volume) Compact2() error {
+// compact a volume based on deletions in .idx files
+func (v *Volume) Compact2(preallocate int64, compactionBytePerSecond int64) error {
+
+ if v.MemoryMapMaxSizeMb != 0 { //it makes no sense to compact in memory
+ return nil
+ }
glog.V(3).Infof("Compact2 volume %d ...", v.Id)
+
+ v.isCompacting = true
+ defer func() {
+ v.isCompacting = false
+ }()
+
filePath := v.FileName()
+ v.lastCompactIndexOffset = v.IndexFileSize()
+ v.lastCompactRevision = v.SuperBlock.CompactionRevision
glog.V(3).Infof("creating copies for volume %d ...", v.Id)
- return v.copyDataBasedOnIndexFile(filePath+".cpd", filePath+".cpx")
+ if err := v.DataBackend.Sync(); err != nil {
+ glog.V(0).Infof("compact2 fail to sync volume dat %d: %v", v.Id, err)
+ }
+ if err := v.nm.Sync(); err != nil {
+ glog.V(0).Infof("compact2 fail to sync volume idx %d: %v", v.Id, err)
+ }
+ return copyDataBasedOnIndexFile(filePath+".dat", filePath+".idx", filePath+".cpd", filePath+".cpx", v.SuperBlock, v.Version(), preallocate, compactionBytePerSecond)
}
-func (v *Volume) commitCompact() error {
+func (v *Volume) CommitCompact() error {
+ if v.MemoryMapMaxSizeMb != 0 { //it makes no sense to compact in memory
+ return nil
+ }
glog.V(0).Infof("Committing volume %d vacuuming...", v.Id)
+
+ v.isCompacting = true
+ defer func() {
+ v.isCompacting = false
+ }()
+
v.dataFileAccessLock.Lock()
defer v.dataFileAccessLock.Unlock()
+
glog.V(3).Infof("Got volume %d committing lock...", v.Id)
- v.compactingWg.Add(1)
- defer v.compactingWg.Done()
v.nm.Close()
- if err := v.dataFile.Close(); err != nil {
- glog.V(0).Infof("fail to close volume %d", v.Id)
+ if v.DataBackend != nil {
+ if err := v.DataBackend.Close(); err != nil {
+ glog.V(0).Infof("fail to close volume %d", v.Id)
+ }
}
- v.dataFile = nil
+ v.DataBackend = nil
+ stats.VolumeServerVolumeCounter.WithLabelValues(v.Collection, "volume").Dec()
var e error
if e = v.makeupDiff(v.FileName()+".cpd", v.FileName()+".cpx", v.FileName()+".dat", v.FileName()+".idx"); e != nil {
- glog.V(0).Infof("makeupDiff in commitCompact volume %d failed %v", v.Id, e)
+ glog.V(0).Infof("makeupDiff in CommitCompact volume %d failed %v", v.Id, e)
e = os.Remove(v.FileName() + ".cpd")
if e != nil {
return e
@@ -63,6 +124,16 @@ func (v *Volume) commitCompact() error {
return e
}
} else {
+ if runtime.GOOS == "windows" {
+ e = os.RemoveAll(v.FileName() + ".dat")
+ if e != nil {
+ return e
+ }
+ e = os.RemoveAll(v.FileName() + ".idx")
+ if e != nil {
+ return e
+ }
+ }
var e error
if e = os.Rename(v.FileName()+".cpd", v.FileName()+".dat"); e != nil {
return fmt.Errorf("rename %s: %v", v.FileName()+".cpd", e)
@@ -76,7 +147,6 @@ func (v *Volume) commitCompact() error {
//time.Sleep(20 * time.Second)
os.RemoveAll(v.FileName() + ".ldb")
- os.RemoveAll(v.FileName() + ".bdb")
glog.V(3).Infof("Loading volume %d commit file...", v.Id)
if e = v.load(true, false, v.needleMapKind, 0); e != nil {
@@ -99,14 +169,15 @@ func (v *Volume) cleanupCompact() error {
return nil
}
-func fetchCompactRevisionFromDatFile(file *os.File) (compactRevision uint16, err error) {
- superBlock, err := ReadSuperBlock(file)
+func fetchCompactRevisionFromDatFile(datBackend backend.BackendStorageFile) (compactRevision uint16, err error) {
+ superBlock, err := super_block.ReadSuperBlock(datBackend)
if err != nil {
return 0, err
}
- return superBlock.CompactRevision, nil
+ return superBlock.CompactionRevision, nil
}
+// if old .dat and .idx files are updated, this func tries to apply the same changes to new files accordingly
func (v *Volume) makeupDiff(newDatFileName, newIdxFileName, oldDatFileName, oldIdxFileName string) (err error) {
var indexSize int64
@@ -114,8 +185,10 @@ func (v *Volume) makeupDiff(newDatFileName, newIdxFileName, oldDatFileName, oldI
defer oldIdxFile.Close()
oldDatFile, err := os.Open(oldDatFileName)
- defer oldDatFile.Close()
+ oldDatBackend := backend.NewDiskFile(oldDatFile)
+ defer oldDatBackend.Close()
+ // skip if the old .idx file has not changed
if indexSize, err = verifyIndexFileIntegrity(oldIdxFile); err != nil {
return fmt.Errorf("verifyIndexFileIntegrity %s failed: %v", oldIdxFileName, err)
}
@@ -123,7 +196,8 @@ func (v *Volume) makeupDiff(newDatFileName, newIdxFileName, oldDatFileName, oldI
return nil
}
- oldDatCompactRevision, err := fetchCompactRevisionFromDatFile(oldDatFile)
+ // fail if the old .dat file has changed to a new revision
+ oldDatCompactRevision, err := fetchCompactRevisionFromDatFile(oldDatBackend)
if err != nil {
return fmt.Errorf("fetchCompactRevisionFromDatFile src %s failed: %v", oldDatFile.Name(), err)
}
@@ -137,12 +211,12 @@ func (v *Volume) makeupDiff(newDatFileName, newIdxFileName, oldDatFileName, oldI
}
incrementedHasUpdatedIndexEntry := make(map[NeedleId]keyField)
- for idxOffset := indexSize - NeedleEntrySize; uint64(idxOffset) >= v.lastCompactIndexOffset; idxOffset -= NeedleEntrySize {
+ for idxOffset := indexSize - NeedleMapEntrySize; uint64(idxOffset) >= v.lastCompactIndexOffset; idxOffset -= NeedleMapEntrySize {
var IdxEntry []byte
if IdxEntry, err = readIndexEntryAtOffset(oldIdxFile, idxOffset); err != nil {
return fmt.Errorf("readIndexEntry %s at offset %d failed: %v", oldIdxFileName, idxOffset, err)
}
- key, offset, size := IdxFileEntry(IdxEntry)
+ key, offset, size := idx2.IdxFileEntry(IdxEntry)
glog.V(4).Infof("key %d offset %d size %d", key, offset, size)
if _, found := incrementedHasUpdatedIndexEntry[key]; !found {
incrementedHasUpdatedIndexEntry[key] = keyField{
@@ -164,7 +238,8 @@ func (v *Volume) makeupDiff(newDatFileName, newIdxFileName, oldDatFileName, oldI
if dst, err = os.OpenFile(newDatFileName, os.O_RDWR, 0644); err != nil {
return fmt.Errorf("open dat file %s failed: %v", newDatFileName, err)
}
- defer dst.Close()
+ dstDatBackend := backend.NewDiskFile(dst)
+ defer dstDatBackend.Close()
if idx, err = os.OpenFile(newIdxFileName, os.O_RDWR, 0644); err != nil {
return fmt.Errorf("open idx file %s failed: %v", newIdxFileName, err)
@@ -172,7 +247,7 @@ func (v *Volume) makeupDiff(newDatFileName, newIdxFileName, oldDatFileName, oldI
defer idx.Close()
var newDatCompactRevision uint16
- newDatCompactRevision, err = fetchCompactRevisionFromDatFile(dst)
+ newDatCompactRevision, err = fetchCompactRevisionFromDatFile(dstDatBackend)
if err != nil {
return fmt.Errorf("fetchCompactRevisionFromDatFile dst %s failed: %v", dst.Name(), err)
}
@@ -180,11 +255,9 @@ func (v *Volume) makeupDiff(newDatFileName, newIdxFileName, oldDatFileName, oldI
return fmt.Errorf("oldDatFile %s 's compact revision is %d while newDatFile %s 's compact revision is %d", oldDatFileName, oldDatCompactRevision, newDatFileName, newDatCompactRevision)
}
- idxEntryBytes := make([]byte, NeedleIdSize+OffsetSize+SizeSize)
for key, increIdxEntry := range incrementedHasUpdatedIndexEntry {
- NeedleIdToBytes(idxEntryBytes[0:NeedleIdSize], key)
- OffsetToBytes(idxEntryBytes[NeedleIdSize:NeedleIdSize+OffsetSize], increIdxEntry.offset)
- util.Uint32toBytes(idxEntryBytes[NeedleIdSize+OffsetSize:NeedleIdSize+OffsetSize+SizeSize], increIdxEntry.size)
+
+ idxEntryBytes := needle_map.ToBytes(key, increIdxEntry.offset, increIdxEntry.size)
var offset int64
if offset, err = dst.Seek(0, 2); err != nil {
@@ -194,30 +267,30 @@ func (v *Volume) makeupDiff(newDatFileName, newIdxFileName, oldDatFileName, oldI
//ensure file writing starting from aligned positions
if offset%NeedlePaddingSize != 0 {
offset = offset + (NeedlePaddingSize - offset%NeedlePaddingSize)
- if offset, err = v.dataFile.Seek(offset, 0); err != nil {
- glog.V(0).Infof("failed to align in datafile %s: %v", v.dataFile.Name(), err)
+ if offset, err = dst.Seek(offset, 0); err != nil {
+ glog.V(0).Infof("failed to align in datafile %s: %v", dst.Name(), err)
return
}
}
//updated needle
- if increIdxEntry.offset != 0 && increIdxEntry.size != 0 && increIdxEntry.size != TombstoneFileSize {
+ if !increIdxEntry.offset.IsZero() && increIdxEntry.size != 0 && increIdxEntry.size != TombstoneFileSize {
//even the needle cache in memory is hit, the need_bytes is correct
- glog.V(4).Infof("file %d offset %d size %d", key, int64(increIdxEntry.offset)*NeedlePaddingSize, increIdxEntry.size)
+ glog.V(4).Infof("file %d offset %d size %d", key, increIdxEntry.offset.ToAcutalOffset(), increIdxEntry.size)
var needleBytes []byte
- needleBytes, err = ReadNeedleBlob(oldDatFile, int64(increIdxEntry.offset)*NeedlePaddingSize, increIdxEntry.size, v.Version())
+ needleBytes, err = needle.ReadNeedleBlob(oldDatBackend, increIdxEntry.offset.ToAcutalOffset(), increIdxEntry.size, v.Version())
if err != nil {
- return fmt.Errorf("ReadNeedleBlob %s key %d offset %d size %d failed: %v", oldDatFile.Name(), key, int64(increIdxEntry.offset)*NeedlePaddingSize, increIdxEntry.size, err)
+ return fmt.Errorf("ReadNeedleBlob %s key %d offset %d size %d failed: %v", oldDatFile.Name(), key, increIdxEntry.offset.ToAcutalOffset(), increIdxEntry.size, err)
}
dst.Write(needleBytes)
util.Uint32toBytes(idxEntryBytes[8:12], uint32(offset/NeedlePaddingSize))
} else { //deleted needle
//fakeDelNeedle 's default Data field is nil
- fakeDelNeedle := new(Needle)
+ fakeDelNeedle := new(needle.Needle)
fakeDelNeedle.Id = key
fakeDelNeedle.Cookie = 0x12345678
fakeDelNeedle.AppendAtNs = uint64(time.Now().UnixNano())
- _, _, _, err = fakeDelNeedle.Append(dst, v.Version())
+ _, _, _, err = fakeDelNeedle.Append(dstDatBackend, v.Version())
if err != nil {
return fmt.Errorf("append deleted %d failed: %v", key, err)
}
@@ -235,18 +308,19 @@ func (v *Volume) makeupDiff(newDatFileName, newIdxFileName, oldDatFileName, oldI
}
type VolumeFileScanner4Vacuum struct {
- version Version
- v *Volume
- dst *os.File
- nm *NeedleMap
- newOffset int64
- now uint64
+ version needle.Version
+ v *Volume
+ dstBackend backend.BackendStorageFile
+ nm *needle_map.MemDb
+ newOffset int64
+ now uint64
+ writeThrottler *util.WriteThrottler
}
-func (scanner *VolumeFileScanner4Vacuum) VisitSuperBlock(superBlock SuperBlock) error {
- scanner.version = superBlock.Version()
- superBlock.CompactRevision++
- _, err := scanner.dst.Write(superBlock.Bytes())
+func (scanner *VolumeFileScanner4Vacuum) VisitSuperBlock(superBlock super_block.SuperBlock) error {
+ scanner.version = superBlock.Version
+ superBlock.CompactionRevision++
+ _, err := scanner.dstBackend.WriteAt(superBlock.Bytes(), 0)
scanner.newOffset = int64(superBlock.BlockSize())
return err
@@ -255,105 +329,119 @@ func (scanner *VolumeFileScanner4Vacuum) ReadNeedleBody() bool {
return true
}
-func (scanner *VolumeFileScanner4Vacuum) VisitNeedle(n *Needle, offset int64) error {
+func (scanner *VolumeFileScanner4Vacuum) VisitNeedle(n *needle.Needle, offset int64, needleHeader, needleBody []byte) error {
if n.HasTtl() && scanner.now >= n.LastModified+uint64(scanner.v.Ttl.Minutes()*60) {
return nil
}
nv, ok := scanner.v.nm.Get(n.Id)
glog.V(4).Infoln("needle expected offset ", offset, "ok", ok, "nv", nv)
- if ok && int64(nv.Offset)*NeedlePaddingSize == offset && nv.Size > 0 {
- if err := scanner.nm.Put(n.Id, Offset(scanner.newOffset/NeedlePaddingSize), n.Size); err != nil {
+ if ok && nv.Offset.ToAcutalOffset() == offset && nv.Size > 0 && nv.Size != TombstoneFileSize {
+ if err := scanner.nm.Set(n.Id, ToOffset(scanner.newOffset), n.Size); err != nil {
return fmt.Errorf("cannot put needle: %s", err)
}
- if _, _, _, err := n.Append(scanner.dst, scanner.v.Version()); err != nil {
+ if _, _, _, err := n.Append(scanner.dstBackend, scanner.v.Version()); err != nil {
return fmt.Errorf("cannot append needle: %s", err)
}
- scanner.newOffset += n.DiskSize(scanner.version)
+ delta := n.DiskSize(scanner.version)
+ scanner.newOffset += delta
+ scanner.writeThrottler.MaybeSlowdown(delta)
glog.V(4).Infoln("saving key", n.Id, "volume offset", offset, "=>", scanner.newOffset, "data_size", n.Size)
}
return nil
}
-func (v *Volume) copyDataAndGenerateIndexFile(dstName, idxName string, preallocate int64) (err error) {
+func (v *Volume) copyDataAndGenerateIndexFile(dstName, idxName string, preallocate int64, compactionBytePerSecond int64) (err error) {
var (
- dst, idx *os.File
+ dst backend.BackendStorageFile
)
- if dst, err = createVolumeFile(dstName, preallocate); err != nil {
+ if dst, err = backend.CreateVolumeFile(dstName, preallocate, 0); err != nil {
return
}
defer dst.Close()
- if idx, err = os.OpenFile(idxName, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0644); err != nil {
- return
- }
- defer idx.Close()
+ nm := needle_map.NewMemDb()
+ defer nm.Close()
scanner := &VolumeFileScanner4Vacuum{
- v: v,
- now: uint64(time.Now().Unix()),
- nm: NewBtreeNeedleMap(idx),
- dst: dst,
+ v: v,
+ now: uint64(time.Now().Unix()),
+ nm: nm,
+ dstBackend: dst,
+ writeThrottler: util.NewWriteThrottler(compactionBytePerSecond),
}
err = ScanVolumeFile(v.dir, v.Collection, v.Id, v.needleMapKind, scanner)
+ if err != nil {
+ return nil
+ }
+
+ err = nm.SaveToIdx(idxName)
return
}
-func (v *Volume) copyDataBasedOnIndexFile(dstName, idxName string) (err error) {
+func copyDataBasedOnIndexFile(srcDatName, srcIdxName, dstDatName, datIdxName string, sb super_block.SuperBlock, version needle.Version, preallocate int64, compactionBytePerSecond int64) (err error) {
var (
- dst, idx, oldIndexFile *os.File
+ srcDatBackend, dstDatBackend backend.BackendStorageFile
+ dataFile *os.File
)
- if dst, err = os.OpenFile(dstName, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0644); err != nil {
+ if dstDatBackend, err = backend.CreateVolumeFile(dstDatName, preallocate, 0); err != nil {
return
}
- defer dst.Close()
+ defer dstDatBackend.Close()
- if idx, err = os.OpenFile(idxName, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0644); err != nil {
+ oldNm := needle_map.NewMemDb()
+ defer oldNm.Close()
+ newNm := needle_map.NewMemDb()
+ defer newNm.Close()
+ if err = oldNm.LoadFromIdx(srcIdxName); err != nil {
return
}
- defer idx.Close()
-
- if oldIndexFile, err = os.OpenFile(v.FileName()+".idx", os.O_RDONLY, 0644); err != nil {
- return
+ if dataFile, err = os.Open(srcDatName); err != nil {
+ return err
}
- defer oldIndexFile.Close()
+ srcDatBackend = backend.NewDiskFile(dataFile)
+ defer srcDatBackend.Close()
- nm := NewBtreeNeedleMap(idx)
now := uint64(time.Now().Unix())
- v.SuperBlock.CompactRevision++
- dst.Write(v.SuperBlock.Bytes())
- newOffset := int64(v.SuperBlock.BlockSize())
+ sb.CompactionRevision++
+ dstDatBackend.WriteAt(sb.Bytes(), 0)
+ newOffset := int64(sb.BlockSize())
- WalkIndexFile(oldIndexFile, func(key NeedleId, offset Offset, size uint32) error {
- if offset == 0 || size == TombstoneFileSize {
+ writeThrottler := util.NewWriteThrottler(compactionBytePerSecond)
+
+ oldNm.AscendingVisit(func(value needle_map.NeedleValue) error {
+
+ offset, size := value.Offset, value.Size
+
+ if offset.IsZero() || size == TombstoneFileSize {
return nil
}
- nv, ok := v.nm.Get(key)
- if !ok {
+ n := new(needle.Needle)
+ err := n.ReadData(srcDatBackend, offset.ToAcutalOffset(), size, version)
+ if err != nil {
return nil
}
- n := new(Needle)
- n.ReadData(v.dataFile, int64(offset)*NeedlePaddingSize, size, v.Version())
-
- if n.HasTtl() && now >= n.LastModified+uint64(v.Ttl.Minutes()*60) {
+ if n.HasTtl() && now >= n.LastModified+uint64(sb.Ttl.Minutes()*60) {
return nil
}
- glog.V(4).Infoln("needle expected offset ", offset, "ok", ok, "nv", nv)
- if nv.Offset == offset && nv.Size > 0 {
- if err = nm.Put(n.Id, Offset(newOffset/NeedlePaddingSize), n.Size); err != nil {
- return fmt.Errorf("cannot put needle: %s", err)
- }
- if _, _, _, err = n.Append(dst, v.Version()); err != nil {
- return fmt.Errorf("cannot append needle: %s", err)
- }
- newOffset += n.DiskSize(v.Version())
- glog.V(3).Infoln("saving key", n.Id, "volume offset", offset, "=>", newOffset, "data_size", n.Size)
+ if err = newNm.Set(n.Id, ToOffset(newOffset), n.Size); err != nil {
+ return fmt.Errorf("cannot put needle: %s", err)
+ }
+ if _, _, _, err = n.Append(dstDatBackend, sb.Version); err != nil {
+ return fmt.Errorf("cannot append needle: %s", err)
}
+ delta := n.DiskSize(version)
+ newOffset += delta
+ writeThrottler.MaybeSlowdown(delta)
+ glog.V(4).Infoln("saving key", n.Id, "volume offset", offset, "=>", newOffset, "data_size", n.Size)
+
return nil
})
+ newNm.SaveToIdx(datIdxName)
+
return
}
diff --git a/weed/storage/volume_vacuum_test.go b/weed/storage/volume_vacuum_test.go
index 0bc24037d..1b5161e63 100644
--- a/weed/storage/volume_vacuum_test.go
+++ b/weed/storage/volume_vacuum_test.go
@@ -1,11 +1,15 @@
package storage
import (
- "github.com/chrislusf/seaweedfs/weed/storage/types"
"io/ioutil"
"math/rand"
"os"
"testing"
+ "time"
+
+ "github.com/chrislusf/seaweedfs/weed/storage/needle"
+ "github.com/chrislusf/seaweedfs/weed/storage/super_block"
+ "github.com/chrislusf/seaweedfs/weed/storage/types"
)
/*
@@ -43,7 +47,7 @@ func TestMakeDiff(t *testing.T) {
v := new(Volume)
//lastCompactIndexOffset value is the index file size before step 4
v.lastCompactIndexOffset = 96
- v.SuperBlock.version = 0x2
+ v.SuperBlock.Version = 0x2
/*
err := v.makeupDiff(
"/yourpath/1.cpd",
@@ -65,13 +69,13 @@ func TestCompaction(t *testing.T) {
}
defer os.RemoveAll(dir) // clean up
- v, err := NewVolume(dir, "", 1, NeedleMapInMemory, &ReplicaPlacement{}, &TTL{}, 0)
+ v, err := NewVolume(dir, "", 1, NeedleMapInMemory, &super_block.ReplicaPlacement{}, &needle.TTL{}, 0, 0)
if err != nil {
t.Fatalf("volume creation: %v", err)
}
- beforeCommitFileCount := 1000
- afterCommitFileCount := 1000
+ beforeCommitFileCount := 10000
+ afterCommitFileCount := 10000
infos := make([]*needleInfo, beforeCommitFileCount+afterCommitFileCount)
@@ -79,17 +83,20 @@ func TestCompaction(t *testing.T) {
doSomeWritesDeletes(i, v, t, infos)
}
- v.Compact(0)
+ startTime := time.Now()
+ v.Compact2(0, 0)
+ speed := float64(v.ContentSize()) / time.Now().Sub(startTime).Seconds()
+ t.Logf("compaction speed: %.2f bytes/s", speed)
for i := 1; i <= afterCommitFileCount; i++ {
doSomeWritesDeletes(i+beforeCommitFileCount, v, t, infos)
}
- v.commitCompact()
+ v.CommitCompact()
v.Close()
- v, err = NewVolume(dir, "", 1, NeedleMapInMemory, nil, nil, 0)
+ v, err = NewVolume(dir, "", 1, NeedleMapInMemory, nil, nil, 0, 0)
if err != nil {
t.Fatalf("volume reloading: %v", err)
}
@@ -122,7 +129,7 @@ func TestCompaction(t *testing.T) {
}
func doSomeWritesDeletes(i int, v *Volume, t *testing.T, infos []*needleInfo) {
n := newRandomNeedle(uint64(i))
- _, size, err := v.writeNeedle(n)
+ _, size, _, err := v.writeNeedle2(n, false)
if err != nil {
t.Fatalf("write file %d: %v", i, err)
}
@@ -134,7 +141,7 @@ func doSomeWritesDeletes(i int, v *Volume, t *testing.T, infos []*needleInfo) {
if rand.Float64() < 0.03 {
toBeDeleted := rand.Intn(i) + 1
oldNeedle := newEmptyNeedle(uint64(toBeDeleted))
- v.deleteNeedle(oldNeedle)
+ v.deleteNeedle2(oldNeedle)
// println("deleted file", toBeDeleted)
infos[toBeDeleted-1] = &needleInfo{
size: 0,
@@ -145,21 +152,21 @@ func doSomeWritesDeletes(i int, v *Volume, t *testing.T, infos []*needleInfo) {
type needleInfo struct {
size uint32
- crc CRC
+ crc needle.CRC
}
-func newRandomNeedle(id uint64) *Needle {
- n := new(Needle)
+func newRandomNeedle(id uint64) *needle.Needle {
+ n := new(needle.Needle)
n.Data = make([]byte, rand.Intn(1024))
rand.Read(n.Data)
- n.Checksum = NewCRC(n.Data)
+ n.Checksum = needle.NewCRC(n.Data)
n.Id = types.Uint64ToNeedleId(id)
return n
}
-func newEmptyNeedle(id uint64) *Needle {
- n := new(Needle)
+func newEmptyNeedle(id uint64) *needle.Needle {
+ n := new(needle.Needle)
n.Id = types.Uint64ToNeedleId(id)
return n
}
diff --git a/weed/tools/read_index.go b/weed/tools/read_index.go
deleted file mode 100644
index d53f489ea..000000000
--- a/weed/tools/read_index.go
+++ /dev/null
@@ -1,29 +0,0 @@
-package main
-
-import (
- "flag"
- "fmt"
- "log"
- "os"
-
- "github.com/chrislusf/seaweedfs/weed/storage"
- "github.com/chrislusf/seaweedfs/weed/storage/types"
-)
-
-var (
- indexFileName = flag.String("file", "", ".idx file to analyze")
-)
-
-func main() {
- flag.Parse()
- indexFile, err := os.OpenFile(*indexFileName, os.O_RDONLY, 0644)
- if err != nil {
- log.Fatalf("Create Volume Index [ERROR] %s\n", err)
- }
- defer indexFile.Close()
-
- storage.WalkIndexFile(indexFile, func(key types.NeedleId, offset types.Offset, size uint32) error {
- fmt.Printf("key %d, offset %d, size %d, nextOffset %d\n", key, offset*8, size, int64(offset)*types.NeedlePaddingSize+int64(size))
- return nil
- })
-}
diff --git a/weed/topology/allocate_volume.go b/weed/topology/allocate_volume.go
index 55796ab43..e5dc48652 100644
--- a/weed/topology/allocate_volume.go
+++ b/weed/topology/allocate_volume.go
@@ -2,29 +2,28 @@ package topology
import (
"context"
- "time"
"github.com/chrislusf/seaweedfs/weed/operation"
"github.com/chrislusf/seaweedfs/weed/pb/volume_server_pb"
- "github.com/chrislusf/seaweedfs/weed/storage"
+ "github.com/chrislusf/seaweedfs/weed/storage/needle"
+ "google.golang.org/grpc"
)
type AllocateVolumeResult struct {
Error string
}
-func AllocateVolume(dn *DataNode, vid storage.VolumeId, option *VolumeGrowOption) error {
+func AllocateVolume(dn *DataNode, grpcDialOption grpc.DialOption, vid needle.VolumeId, option *VolumeGrowOption) error {
- return operation.WithVolumeServerClient(dn.Url(), func(client volume_server_pb.VolumeServerClient) error {
- ctx, cancel := context.WithTimeout(context.Background(), time.Duration(5*time.Second))
- defer cancel()
+ return operation.WithVolumeServerClient(dn.Url(), grpcDialOption, func(client volume_server_pb.VolumeServerClient) error {
- _, deleteErr := client.AssignVolume(ctx, &volume_server_pb.AssignVolumeRequest{
- VolumdId: uint32(vid),
- Collection: option.Collection,
- Replication: option.ReplicaPlacement.String(),
- Ttl: option.Ttl.String(),
- Preallocate: option.Prealloacte,
+ _, deleteErr := client.AllocateVolume(context.Background(), &volume_server_pb.AllocateVolumeRequest{
+ VolumeId: uint32(vid),
+ Collection: option.Collection,
+ Replication: option.ReplicaPlacement.String(),
+ Ttl: option.Ttl.String(),
+ Preallocate: option.Prealloacte,
+ MemoryMapMaxSizeMb: option.MemoryMapMaxSizeMb,
})
return deleteErr
})
diff --git a/weed/topology/cluster_commands.go b/weed/topology/cluster_commands.go
index 7a36c25ec..152691ccb 100644
--- a/weed/topology/cluster_commands.go
+++ b/weed/topology/cluster_commands.go
@@ -3,14 +3,14 @@ package topology
import (
"github.com/chrislusf/raft"
"github.com/chrislusf/seaweedfs/weed/glog"
- "github.com/chrislusf/seaweedfs/weed/storage"
+ "github.com/chrislusf/seaweedfs/weed/storage/needle"
)
type MaxVolumeIdCommand struct {
- MaxVolumeId storage.VolumeId `json:"maxVolumeId"`
+ MaxVolumeId needle.VolumeId `json:"maxVolumeId"`
}
-func NewMaxVolumeIdCommand(value storage.VolumeId) *MaxVolumeIdCommand {
+func NewMaxVolumeIdCommand(value needle.VolumeId) *MaxVolumeIdCommand {
return &MaxVolumeIdCommand{
MaxVolumeId: value,
}
diff --git a/weed/topology/collection.go b/weed/topology/collection.go
index a17f0c961..5b410d1eb 100644
--- a/weed/topology/collection.go
+++ b/weed/topology/collection.go
@@ -3,18 +3,24 @@ package topology
import (
"fmt"
- "github.com/chrislusf/seaweedfs/weed/storage"
+ "github.com/chrislusf/seaweedfs/weed/storage/needle"
+ "github.com/chrislusf/seaweedfs/weed/storage/super_block"
"github.com/chrislusf/seaweedfs/weed/util"
)
type Collection struct {
Name string
volumeSizeLimit uint64
+ replicationAsMin bool
storageType2VolumeLayout *util.ConcurrentReadMap
}
-func NewCollection(name string, volumeSizeLimit uint64) *Collection {
- c := &Collection{Name: name, volumeSizeLimit: volumeSizeLimit}
+func NewCollection(name string, volumeSizeLimit uint64, replicationAsMin bool) *Collection {
+ c := &Collection{
+ Name: name,
+ volumeSizeLimit: volumeSizeLimit,
+ replicationAsMin: replicationAsMin,
+ }
c.storageType2VolumeLayout = util.NewConcurrentReadMap()
return c
}
@@ -23,18 +29,18 @@ func (c *Collection) String() string {
return fmt.Sprintf("Name:%s, volumeSizeLimit:%d, storageType2VolumeLayout:%v", c.Name, c.volumeSizeLimit, c.storageType2VolumeLayout)
}
-func (c *Collection) GetOrCreateVolumeLayout(rp *storage.ReplicaPlacement, ttl *storage.TTL) *VolumeLayout {
+func (c *Collection) GetOrCreateVolumeLayout(rp *super_block.ReplicaPlacement, ttl *needle.TTL) *VolumeLayout {
keyString := rp.String()
if ttl != nil {
keyString += ttl.String()
}
vl := c.storageType2VolumeLayout.Get(keyString, func() interface{} {
- return NewVolumeLayout(rp, ttl, c.volumeSizeLimit)
+ return NewVolumeLayout(rp, ttl, c.volumeSizeLimit, c.replicationAsMin)
})
return vl.(*VolumeLayout)
}
-func (c *Collection) Lookup(vid storage.VolumeId) []*DataNode {
+func (c *Collection) Lookup(vid needle.VolumeId) []*DataNode {
for _, vl := range c.storageType2VolumeLayout.Items() {
if vl != nil {
if list := vl.(*VolumeLayout).Lookup(vid); list != nil {
diff --git a/weed/topology/data_center.go b/weed/topology/data_center.go
index bcf2dfd31..dc3accb71 100644
--- a/weed/topology/data_center.go
+++ b/weed/topology/data_center.go
@@ -1,5 +1,7 @@
package topology
+import "github.com/chrislusf/seaweedfs/weed/pb/master_pb"
+
type DataCenter struct {
NodeImpl
}
@@ -38,3 +40,19 @@ func (dc *DataCenter) ToMap() interface{} {
m["Racks"] = racks
return m
}
+
+func (dc *DataCenter) ToDataCenterInfo() *master_pb.DataCenterInfo {
+ m := &master_pb.DataCenterInfo{
+ Id: string(dc.Id()),
+ VolumeCount: uint64(dc.GetVolumeCount()),
+ MaxVolumeCount: uint64(dc.GetMaxVolumeCount()),
+ FreeVolumeCount: uint64(dc.FreeSpace()),
+ ActiveVolumeCount: uint64(dc.GetActiveVolumeCount()),
+ RemoteVolumeCount: uint64(dc.GetRemoteVolumeCount()),
+ }
+ for _, c := range dc.Children() {
+ rack := c.(*Rack)
+ m.RackInfos = append(m.RackInfos, rack.ToRackInfo())
+ }
+ return m
+}
diff --git a/weed/topology/data_node.go b/weed/topology/data_node.go
index 6ea6d3938..efdf5285b 100644
--- a/weed/topology/data_node.go
+++ b/weed/topology/data_node.go
@@ -2,7 +2,13 @@ package topology
import (
"fmt"
+ "github.com/chrislusf/seaweedfs/weed/util"
"strconv"
+ "sync"
+
+ "github.com/chrislusf/seaweedfs/weed/pb/master_pb"
+ "github.com/chrislusf/seaweedfs/weed/storage/erasure_coding"
+ "github.com/chrislusf/seaweedfs/weed/storage/needle"
"github.com/chrislusf/seaweedfs/weed/glog"
"github.com/chrislusf/seaweedfs/weed/storage"
@@ -10,18 +16,21 @@ import (
type DataNode struct {
NodeImpl
- volumes map[storage.VolumeId]storage.VolumeInfo
- Ip string
- Port int
- PublicUrl string
- LastSeen int64 // unix time in seconds
+ volumes map[needle.VolumeId]storage.VolumeInfo
+ Ip string
+ Port int
+ PublicUrl string
+ LastSeen int64 // unix time in seconds
+ ecShards map[needle.VolumeId]*erasure_coding.EcVolumeInfo
+ ecShardsLock sync.RWMutex
}
func NewDataNode(id string) *DataNode {
s := &DataNode{}
s.id = NodeId(id)
s.nodeType = "DataNode"
- s.volumes = make(map[storage.VolumeId]storage.VolumeInfo)
+ s.volumes = make(map[needle.VolumeId]storage.VolumeInfo)
+ s.ecShards = make(map[needle.VolumeId]*erasure_coding.EcVolumeInfo)
s.NodeImpl.value = s
return s
}
@@ -32,25 +41,37 @@ func (dn *DataNode) String() string {
return fmt.Sprintf("Node:%s, volumes:%v, Ip:%s, Port:%d, PublicUrl:%s", dn.NodeImpl.String(), dn.volumes, dn.Ip, dn.Port, dn.PublicUrl)
}
-func (dn *DataNode) AddOrUpdateVolume(v storage.VolumeInfo) (isNew bool) {
+func (dn *DataNode) AddOrUpdateVolume(v storage.VolumeInfo) (isNew, isChangedRO bool) {
dn.Lock()
defer dn.Unlock()
- if _, ok := dn.volumes[v.Id]; !ok {
+ if oldV, ok := dn.volumes[v.Id]; !ok {
dn.volumes[v.Id] = v
dn.UpAdjustVolumeCountDelta(1)
+ if v.IsRemote() {
+ dn.UpAdjustRemoteVolumeCountDelta(1)
+ }
if !v.ReadOnly {
dn.UpAdjustActiveVolumeCountDelta(1)
}
dn.UpAdjustMaxVolumeId(v.Id)
isNew = true
} else {
+ if oldV.IsRemote() != v.IsRemote() {
+ if v.IsRemote() {
+ dn.UpAdjustRemoteVolumeCountDelta(1)
+ }
+ if oldV.IsRemote() {
+ dn.UpAdjustRemoteVolumeCountDelta(-1)
+ }
+ }
+ isChangedRO = dn.volumes[v.Id].ReadOnly != v.ReadOnly
dn.volumes[v.Id] = v
}
return
}
-func (dn *DataNode) UpdateVolumes(actualVolumes []storage.VolumeInfo) (newVolumes, deletedVolumes []storage.VolumeInfo) {
- actualVolumeMap := make(map[storage.VolumeId]storage.VolumeInfo)
+func (dn *DataNode) UpdateVolumes(actualVolumes []storage.VolumeInfo) (newVolumes, deletedVolumes, changeRO []storage.VolumeInfo) {
+ actualVolumeMap := make(map[needle.VolumeId]storage.VolumeInfo)
for _, v := range actualVolumes {
actualVolumeMap[v.Id] = v
}
@@ -61,15 +82,42 @@ func (dn *DataNode) UpdateVolumes(actualVolumes []storage.VolumeInfo) (newVolume
delete(dn.volumes, vid)
deletedVolumes = append(deletedVolumes, v)
dn.UpAdjustVolumeCountDelta(-1)
- dn.UpAdjustActiveVolumeCountDelta(-1)
+ if v.IsRemote() {
+ dn.UpAdjustRemoteVolumeCountDelta(-1)
+ }
+ if !v.ReadOnly {
+ dn.UpAdjustActiveVolumeCountDelta(-1)
+ }
}
}
dn.Unlock()
for _, v := range actualVolumes {
- isNew := dn.AddOrUpdateVolume(v)
+ isNew, isChangedRO := dn.AddOrUpdateVolume(v)
if isNew {
newVolumes = append(newVolumes, v)
}
+ if isChangedRO {
+ changeRO = append(changeRO, v)
+ }
+ }
+ return
+}
+
+func (dn *DataNode) DeltaUpdateVolumes(newlVolumes, deletedVolumes []storage.VolumeInfo) {
+ dn.Lock()
+ for _, v := range deletedVolumes {
+ delete(dn.volumes, v.Id)
+ dn.UpAdjustVolumeCountDelta(-1)
+ if v.IsRemote() {
+ dn.UpAdjustRemoteVolumeCountDelta(-1)
+ }
+ if !v.ReadOnly {
+ dn.UpAdjustActiveVolumeCountDelta(-1)
+ }
+ }
+ dn.Unlock()
+ for _, v := range newlVolumes {
+ dn.AddOrUpdateVolume(v)
}
return
}
@@ -83,7 +131,7 @@ func (dn *DataNode) GetVolumes() (ret []storage.VolumeInfo) {
return ret
}
-func (dn *DataNode) GetVolumesById(id storage.VolumeId) (storage.VolumeInfo, error) {
+func (dn *DataNode) GetVolumesById(id needle.VolumeId) (storage.VolumeInfo, error) {
dn.RLock()
defer dn.RUnlock()
vInfo, ok := dn.volumes[id]
@@ -123,8 +171,41 @@ func (dn *DataNode) ToMap() interface{} {
ret := make(map[string]interface{})
ret["Url"] = dn.Url()
ret["Volumes"] = dn.GetVolumeCount()
+ ret["VolumeIds"] = dn.GetVolumeIds()
+ ret["EcShards"] = dn.GetEcShardCount()
ret["Max"] = dn.GetMaxVolumeCount()
ret["Free"] = dn.FreeSpace()
ret["PublicUrl"] = dn.PublicUrl
return ret
}
+
+func (dn *DataNode) ToDataNodeInfo() *master_pb.DataNodeInfo {
+ m := &master_pb.DataNodeInfo{
+ Id: string(dn.Id()),
+ VolumeCount: uint64(dn.GetVolumeCount()),
+ MaxVolumeCount: uint64(dn.GetMaxVolumeCount()),
+ FreeVolumeCount: uint64(dn.FreeSpace()),
+ ActiveVolumeCount: uint64(dn.GetActiveVolumeCount()),
+ RemoteVolumeCount: uint64(dn.GetRemoteVolumeCount()),
+ }
+ for _, v := range dn.GetVolumes() {
+ m.VolumeInfos = append(m.VolumeInfos, v.ToVolumeInformationMessage())
+ }
+ for _, ecv := range dn.GetEcShards() {
+ m.EcShardInfos = append(m.EcShardInfos, ecv.ToVolumeEcShardInformationMessage())
+ }
+ return m
+}
+
+// GetVolumeIds returns the human readable volume ids limited to count of max 100.
+func (dn *DataNode) GetVolumeIds() string {
+ dn.RLock()
+ defer dn.RUnlock()
+ ids := make([]int, 0, len(dn.volumes))
+
+ for k := range dn.volumes {
+ ids = append(ids, int(k))
+ }
+
+ return util.HumanReadableIntsMax(100, ids...)
+}
diff --git a/weed/topology/data_node_ec.go b/weed/topology/data_node_ec.go
new file mode 100644
index 000000000..75c8784fe
--- /dev/null
+++ b/weed/topology/data_node_ec.go
@@ -0,0 +1,135 @@
+package topology
+
+import (
+ "github.com/chrislusf/seaweedfs/weed/storage/erasure_coding"
+ "github.com/chrislusf/seaweedfs/weed/storage/needle"
+)
+
+func (dn *DataNode) GetEcShards() (ret []*erasure_coding.EcVolumeInfo) {
+ dn.RLock()
+ for _, ecVolumeInfo := range dn.ecShards {
+ ret = append(ret, ecVolumeInfo)
+ }
+ dn.RUnlock()
+ return ret
+}
+
+func (dn *DataNode) UpdateEcShards(actualShards []*erasure_coding.EcVolumeInfo) (newShards, deletedShards []*erasure_coding.EcVolumeInfo) {
+ // prepare the new ec shard map
+ actualEcShardMap := make(map[needle.VolumeId]*erasure_coding.EcVolumeInfo)
+ for _, ecShards := range actualShards {
+ actualEcShardMap[ecShards.VolumeId] = ecShards
+ }
+
+ // found out the newShards and deletedShards
+ var newShardCount, deletedShardCount int
+ dn.ecShardsLock.RLock()
+ for vid, ecShards := range dn.ecShards {
+ if actualEcShards, ok := actualEcShardMap[vid]; !ok {
+ // dn registered ec shards not found in the new set of ec shards
+ deletedShards = append(deletedShards, ecShards)
+ deletedShardCount += ecShards.ShardIdCount()
+ } else {
+ // found, but maybe the actual shard could be missing
+ a := actualEcShards.Minus(ecShards)
+ if a.ShardIdCount() > 0 {
+ newShards = append(newShards, a)
+ newShardCount += a.ShardIdCount()
+ }
+ d := ecShards.Minus(actualEcShards)
+ if d.ShardIdCount() > 0 {
+ deletedShards = append(deletedShards, d)
+ deletedShardCount += d.ShardIdCount()
+ }
+ }
+ }
+ for _, ecShards := range actualShards {
+ if _, found := dn.ecShards[ecShards.VolumeId]; !found {
+ newShards = append(newShards, ecShards)
+ newShardCount += ecShards.ShardIdCount()
+ }
+ }
+ dn.ecShardsLock.RUnlock()
+
+ if len(newShards) > 0 || len(deletedShards) > 0 {
+ // if changed, set to the new ec shard map
+ dn.ecShardsLock.Lock()
+ dn.ecShards = actualEcShardMap
+ dn.UpAdjustEcShardCountDelta(int64(newShardCount - deletedShardCount))
+ dn.ecShardsLock.Unlock()
+ }
+
+ return
+}
+
+func (dn *DataNode) DeltaUpdateEcShards(newShards, deletedShards []*erasure_coding.EcVolumeInfo) {
+
+ for _, newShard := range newShards {
+ dn.AddOrUpdateEcShard(newShard)
+ }
+
+ for _, deletedShard := range deletedShards {
+ dn.DeleteEcShard(deletedShard)
+ }
+
+}
+
+func (dn *DataNode) AddOrUpdateEcShard(s *erasure_coding.EcVolumeInfo) {
+ dn.ecShardsLock.Lock()
+ defer dn.ecShardsLock.Unlock()
+
+ delta := 0
+ if existing, ok := dn.ecShards[s.VolumeId]; !ok {
+ dn.ecShards[s.VolumeId] = s
+ delta = s.ShardBits.ShardIdCount()
+ } else {
+ oldCount := existing.ShardBits.ShardIdCount()
+ existing.ShardBits = existing.ShardBits.Plus(s.ShardBits)
+ delta = existing.ShardBits.ShardIdCount() - oldCount
+ }
+
+ dn.UpAdjustEcShardCountDelta(int64(delta))
+
+}
+
+func (dn *DataNode) DeleteEcShard(s *erasure_coding.EcVolumeInfo) {
+ dn.ecShardsLock.Lock()
+ defer dn.ecShardsLock.Unlock()
+
+ if existing, ok := dn.ecShards[s.VolumeId]; ok {
+ oldCount := existing.ShardBits.ShardIdCount()
+ existing.ShardBits = existing.ShardBits.Minus(s.ShardBits)
+ delta := existing.ShardBits.ShardIdCount() - oldCount
+ dn.UpAdjustEcShardCountDelta(int64(delta))
+ if existing.ShardBits.ShardIdCount() == 0 {
+ delete(dn.ecShards, s.VolumeId)
+ }
+ }
+
+}
+
+func (dn *DataNode) HasVolumesById(id needle.VolumeId) (hasVolumeId bool) {
+
+ // check whether normal volumes has this volume id
+ dn.RLock()
+ _, ok := dn.volumes[id]
+ if ok {
+ hasVolumeId = true
+ }
+ dn.RUnlock()
+
+ if hasVolumeId {
+ return
+ }
+
+ // check whether ec shards has this volume id
+ dn.ecShardsLock.RLock()
+ _, ok = dn.ecShards[id]
+ if ok {
+ hasVolumeId = true
+ }
+ dn.ecShardsLock.RUnlock()
+
+ return
+
+}
diff --git a/weed/topology/node.go b/weed/topology/node.go
index b7d2f79ec..114417edf 100644
--- a/weed/topology/node.go
+++ b/weed/topology/node.go
@@ -5,26 +5,32 @@ import (
"math/rand"
"strings"
"sync"
+ "sync/atomic"
"github.com/chrislusf/seaweedfs/weed/glog"
- "github.com/chrislusf/seaweedfs/weed/storage"
+ "github.com/chrislusf/seaweedfs/weed/storage/erasure_coding"
+ "github.com/chrislusf/seaweedfs/weed/storage/needle"
)
type NodeId string
type Node interface {
Id() NodeId
String() string
- FreeSpace() int
- ReserveOneVolume(r int) (*DataNode, error)
- UpAdjustMaxVolumeCountDelta(maxVolumeCountDelta int)
- UpAdjustVolumeCountDelta(volumeCountDelta int)
- UpAdjustActiveVolumeCountDelta(activeVolumeCountDelta int)
- UpAdjustMaxVolumeId(vid storage.VolumeId)
+ FreeSpace() int64
+ ReserveOneVolume(r int64) (*DataNode, error)
+ UpAdjustMaxVolumeCountDelta(maxVolumeCountDelta int64)
+ UpAdjustVolumeCountDelta(volumeCountDelta int64)
+ UpAdjustRemoteVolumeCountDelta(remoteVolumeCountDelta int64)
+ UpAdjustEcShardCountDelta(ecShardCountDelta int64)
+ UpAdjustActiveVolumeCountDelta(activeVolumeCountDelta int64)
+ UpAdjustMaxVolumeId(vid needle.VolumeId)
- GetVolumeCount() int
- GetActiveVolumeCount() int
- GetMaxVolumeCount() int
- GetMaxVolumeId() storage.VolumeId
+ GetVolumeCount() int64
+ GetEcShardCount() int64
+ GetActiveVolumeCount() int64
+ GetRemoteVolumeCount() int64
+ GetMaxVolumeCount() int64
+ GetMaxVolumeId() needle.VolumeId
SetParent(Node)
LinkChildNode(node Node)
UnlinkChildNode(nodeId NodeId)
@@ -39,14 +45,16 @@ type Node interface {
GetValue() interface{} //get reference to the topology,dc,rack,datanode
}
type NodeImpl struct {
+ volumeCount int64
+ remoteVolumeCount int64
+ activeVolumeCount int64
+ ecShardCount int64
+ maxVolumeCount int64
id NodeId
- volumeCount int
- activeVolumeCount int
- maxVolumeCount int
parent Node
sync.RWMutex // lock children
children map[NodeId]Node
- maxVolumeId storage.VolumeId
+ maxVolumeId needle.VolumeId
//for rack, data center, topology
nodeType string
@@ -54,56 +62,64 @@ type NodeImpl struct {
}
// the first node must satisfy filterFirstNodeFn(), the rest nodes must have one free slot
-func (n *NodeImpl) RandomlyPickNodes(numberOfNodes int, filterFirstNodeFn func(dn Node) error) (firstNode Node, restNodes []Node, err error) {
- candidates := make([]Node, 0, len(n.children))
+func (n *NodeImpl) PickNodesByWeight(numberOfNodes int, filterFirstNodeFn func(dn Node) error) (firstNode Node, restNodes []Node, err error) {
+ var totalWeights int64
var errs []string
n.RLock()
+ candidates := make([]Node, 0, len(n.children))
+ candidatesWeights := make([]int64, 0, len(n.children))
+ //pick nodes which has enough free volumes as candidates, and use free volumes number as node weight.
for _, node := range n.children {
- if err := filterFirstNodeFn(node); err == nil {
- candidates = append(candidates, node)
- } else {
- errs = append(errs, string(node.Id())+":"+err.Error())
+ if node.FreeSpace() <= 0 {
+ continue
}
+ totalWeights += node.FreeSpace()
+ candidates = append(candidates, node)
+ candidatesWeights = append(candidatesWeights, node.FreeSpace())
}
n.RUnlock()
- if len(candidates) == 0 {
- return nil, nil, errors.New("No matching data node found! \n" + strings.Join(errs, "\n"))
+ if len(candidates) < numberOfNodes {
+ glog.V(0).Infoln(n.Id(), "failed to pick", numberOfNodes, "from ", len(candidates), "node candidates")
+ return nil, nil, errors.New("No enough data node found!")
}
- firstNode = candidates[rand.Intn(len(candidates))]
- glog.V(2).Infoln(n.Id(), "picked main node:", firstNode.Id())
- restNodes = make([]Node, numberOfNodes-1)
- candidates = candidates[:0]
- n.RLock()
- for _, node := range n.children {
- if node.Id() == firstNode.Id() {
- continue
- }
- if node.FreeSpace() <= 0 {
- continue
+ //pick nodes randomly by weights, the node picked earlier has higher final weights
+ sortedCandidates := make([]Node, 0, len(candidates))
+ for i := 0; i < len(candidates); i++ {
+ weightsInterval := rand.Int63n(totalWeights)
+ lastWeights := int64(0)
+ for k, weights := range candidatesWeights {
+ if (weightsInterval >= lastWeights) && (weightsInterval < lastWeights+weights) {
+ sortedCandidates = append(sortedCandidates, candidates[k])
+ candidatesWeights[k] = 0
+ totalWeights -= weights
+ break
+ }
+ lastWeights += weights
}
- glog.V(2).Infoln("select rest node candidate:", node.Id())
- candidates = append(candidates, node)
}
- n.RUnlock()
- glog.V(2).Infoln(n.Id(), "picking", numberOfNodes-1, "from rest", len(candidates), "node candidates")
- ret := len(restNodes) == 0
- for k, node := range candidates {
- if k < len(restNodes) {
- restNodes[k] = node
- if k == len(restNodes)-1 {
- ret = true
+
+ restNodes = make([]Node, 0, numberOfNodes-1)
+ ret := false
+ n.RLock()
+ for k, node := range sortedCandidates {
+ if err := filterFirstNodeFn(node); err == nil {
+ firstNode = node
+ if k >= numberOfNodes-1 {
+ restNodes = sortedCandidates[:numberOfNodes-1]
+ } else {
+ restNodes = append(restNodes, sortedCandidates[:k]...)
+ restNodes = append(restNodes, sortedCandidates[k+1:numberOfNodes]...)
}
+ ret = true
+ break
} else {
- r := rand.Intn(k + 1)
- if r < len(restNodes) {
- restNodes[r] = node
- }
+ errs = append(errs, string(node.Id())+":"+err.Error())
}
}
+ n.RUnlock()
if !ret {
- glog.V(2).Infoln(n.Id(), "failed to pick", numberOfNodes-1, "from rest", len(candidates), "node candidates")
- err = errors.New("No enough data node found!")
+ return nil, nil, errors.New("No matching data node found! \n" + strings.Join(errs, "\n"))
}
return
}
@@ -126,8 +142,12 @@ func (n *NodeImpl) String() string {
func (n *NodeImpl) Id() NodeId {
return n.id
}
-func (n *NodeImpl) FreeSpace() int {
- return n.maxVolumeCount - n.volumeCount
+func (n *NodeImpl) FreeSpace() int64 {
+ freeVolumeSlotCount := n.maxVolumeCount + n.remoteVolumeCount - n.volumeCount
+ if n.ecShardCount > 0 {
+ freeVolumeSlotCount = freeVolumeSlotCount - n.ecShardCount/erasure_coding.DataShardsCount - 1
+ }
+ return freeVolumeSlotCount
}
func (n *NodeImpl) SetParent(node Node) {
n.parent = node
@@ -146,7 +166,7 @@ func (n *NodeImpl) Parent() Node {
func (n *NodeImpl) GetValue() interface{} {
return n.value
}
-func (n *NodeImpl) ReserveOneVolume(r int) (assignedNode *DataNode, err error) {
+func (n *NodeImpl) ReserveOneVolume(r int64) (assignedNode *DataNode, err error) {
n.RLock()
defer n.RUnlock()
for _, node := range n.children {
@@ -171,25 +191,52 @@ func (n *NodeImpl) ReserveOneVolume(r int) (assignedNode *DataNode, err error) {
return nil, errors.New("No free volume slot found!")
}
-func (n *NodeImpl) UpAdjustMaxVolumeCountDelta(maxVolumeCountDelta int) { //can be negative
- n.maxVolumeCount += maxVolumeCountDelta
+func (n *NodeImpl) UpAdjustMaxVolumeCountDelta(maxVolumeCountDelta int64) { //can be negative
+ if maxVolumeCountDelta == 0 {
+ return
+ }
+ atomic.AddInt64(&n.maxVolumeCount, maxVolumeCountDelta)
if n.parent != nil {
n.parent.UpAdjustMaxVolumeCountDelta(maxVolumeCountDelta)
}
}
-func (n *NodeImpl) UpAdjustVolumeCountDelta(volumeCountDelta int) { //can be negative
- n.volumeCount += volumeCountDelta
+func (n *NodeImpl) UpAdjustVolumeCountDelta(volumeCountDelta int64) { //can be negative
+ if volumeCountDelta == 0 {
+ return
+ }
+ atomic.AddInt64(&n.volumeCount, volumeCountDelta)
if n.parent != nil {
n.parent.UpAdjustVolumeCountDelta(volumeCountDelta)
}
}
-func (n *NodeImpl) UpAdjustActiveVolumeCountDelta(activeVolumeCountDelta int) { //can be negative
- n.activeVolumeCount += activeVolumeCountDelta
+func (n *NodeImpl) UpAdjustRemoteVolumeCountDelta(remoteVolumeCountDelta int64) { //can be negative
+ if remoteVolumeCountDelta == 0 {
+ return
+ }
+ atomic.AddInt64(&n.remoteVolumeCount, remoteVolumeCountDelta)
+ if n.parent != nil {
+ n.parent.UpAdjustRemoteVolumeCountDelta(remoteVolumeCountDelta)
+ }
+}
+func (n *NodeImpl) UpAdjustEcShardCountDelta(ecShardCountDelta int64) { //can be negative
+ if ecShardCountDelta == 0 {
+ return
+ }
+ atomic.AddInt64(&n.ecShardCount, ecShardCountDelta)
+ if n.parent != nil {
+ n.parent.UpAdjustEcShardCountDelta(ecShardCountDelta)
+ }
+}
+func (n *NodeImpl) UpAdjustActiveVolumeCountDelta(activeVolumeCountDelta int64) { //can be negative
+ if activeVolumeCountDelta == 0 {
+ return
+ }
+ atomic.AddInt64(&n.activeVolumeCount, activeVolumeCountDelta)
if n.parent != nil {
n.parent.UpAdjustActiveVolumeCountDelta(activeVolumeCountDelta)
}
}
-func (n *NodeImpl) UpAdjustMaxVolumeId(vid storage.VolumeId) { //can be negative
+func (n *NodeImpl) UpAdjustMaxVolumeId(vid needle.VolumeId) { //can be negative
if n.maxVolumeId < vid {
n.maxVolumeId = vid
if n.parent != nil {
@@ -197,16 +244,22 @@ func (n *NodeImpl) UpAdjustMaxVolumeId(vid storage.VolumeId) { //can be negative
}
}
}
-func (n *NodeImpl) GetMaxVolumeId() storage.VolumeId {
+func (n *NodeImpl) GetMaxVolumeId() needle.VolumeId {
return n.maxVolumeId
}
-func (n *NodeImpl) GetVolumeCount() int {
+func (n *NodeImpl) GetVolumeCount() int64 {
return n.volumeCount
}
-func (n *NodeImpl) GetActiveVolumeCount() int {
+func (n *NodeImpl) GetEcShardCount() int64 {
+ return n.ecShardCount
+}
+func (n *NodeImpl) GetRemoteVolumeCount() int64 {
+ return n.remoteVolumeCount
+}
+func (n *NodeImpl) GetActiveVolumeCount() int64 {
return n.activeVolumeCount
}
-func (n *NodeImpl) GetMaxVolumeCount() int {
+func (n *NodeImpl) GetMaxVolumeCount() int64 {
return n.maxVolumeCount
}
@@ -218,6 +271,8 @@ func (n *NodeImpl) LinkChildNode(node Node) {
n.UpAdjustMaxVolumeCountDelta(node.GetMaxVolumeCount())
n.UpAdjustMaxVolumeId(node.GetMaxVolumeId())
n.UpAdjustVolumeCountDelta(node.GetVolumeCount())
+ n.UpAdjustRemoteVolumeCountDelta(node.GetRemoteVolumeCount())
+ n.UpAdjustEcShardCountDelta(node.GetEcShardCount())
n.UpAdjustActiveVolumeCountDelta(node.GetActiveVolumeCount())
node.SetParent(n)
glog.V(0).Infoln(n, "adds child", node.Id())
@@ -232,6 +287,8 @@ func (n *NodeImpl) UnlinkChildNode(nodeId NodeId) {
node.SetParent(nil)
delete(n.children, node.Id())
n.UpAdjustVolumeCountDelta(-node.GetVolumeCount())
+ n.UpAdjustRemoteVolumeCountDelta(-node.GetRemoteVolumeCount())
+ n.UpAdjustEcShardCountDelta(-node.GetEcShardCount())
n.UpAdjustActiveVolumeCountDelta(-node.GetActiveVolumeCount())
n.UpAdjustMaxVolumeCountDelta(-node.GetMaxVolumeCount())
glog.V(0).Infoln(n, "removes", node.Id())
diff --git a/weed/topology/rack.go b/weed/topology/rack.go
index a48d64323..1921c0c05 100644
--- a/weed/topology/rack.go
+++ b/weed/topology/rack.go
@@ -1,6 +1,7 @@
package topology
import (
+ "github.com/chrislusf/seaweedfs/weed/pb/master_pb"
"strconv"
"time"
)
@@ -27,7 +28,7 @@ func (r *Rack) FindDataNode(ip string, port int) *DataNode {
}
return nil
}
-func (r *Rack) GetOrCreateDataNode(ip string, port int, publicUrl string, maxVolumeCount int) *DataNode {
+func (r *Rack) GetOrCreateDataNode(ip string, port int, publicUrl string, maxVolumeCount int64) *DataNode {
for _, c := range r.Children() {
dn := c.(*DataNode)
if dn.MatchLocation(ip, port) {
@@ -58,3 +59,19 @@ func (r *Rack) ToMap() interface{} {
m["DataNodes"] = dns
return m
}
+
+func (r *Rack) ToRackInfo() *master_pb.RackInfo {
+ m := &master_pb.RackInfo{
+ Id: string(r.Id()),
+ VolumeCount: uint64(r.GetVolumeCount()),
+ MaxVolumeCount: uint64(r.GetMaxVolumeCount()),
+ FreeVolumeCount: uint64(r.FreeSpace()),
+ ActiveVolumeCount: uint64(r.GetActiveVolumeCount()),
+ RemoteVolumeCount: uint64(r.GetRemoteVolumeCount()),
+ }
+ for _, c := range r.Children() {
+ dn := c.(*DataNode)
+ m.DataNodeInfos = append(m.DataNodeInfos, dn.ToDataNodeInfo())
+ }
+ return m
+}
diff --git a/weed/topology/store_replicate.go b/weed/topology/store_replicate.go
index c73fb706a..481e72fe0 100644
--- a/weed/topology/store_replicate.go
+++ b/weed/topology/store_replicate.go
@@ -1,7 +1,6 @@
package topology
import (
- "bytes"
"encoding/json"
"errors"
"fmt"
@@ -14,101 +13,113 @@ import (
"github.com/chrislusf/seaweedfs/weed/operation"
"github.com/chrislusf/seaweedfs/weed/security"
"github.com/chrislusf/seaweedfs/weed/storage"
+ "github.com/chrislusf/seaweedfs/weed/storage/needle"
"github.com/chrislusf/seaweedfs/weed/util"
)
-func ReplicatedWrite(masterNode string, s *storage.Store,
- volumeId storage.VolumeId, needle *storage.Needle,
- r *http.Request) (size uint32, errorStatus string) {
+func ReplicatedWrite(masterNode string, s *storage.Store, volumeId needle.VolumeId, n *needle.Needle, r *http.Request) (isUnchanged bool, err error) {
//check JWT
jwt := security.GetJwt(r)
- ret, err := s.Write(volumeId, needle)
- needToReplicate := !s.HasVolume(volumeId)
- if err != nil {
- errorStatus = "Failed to write to local disk (" + err.Error() + ")"
- size = ret
- return
+ // check whether this is a replicated write request
+ var remoteLocations []operation.Location
+ if r.FormValue("type") != "replicate" {
+ // this is the initial request
+ remoteLocations, err = getWritableRemoteReplications(s, volumeId, masterNode)
+ if err != nil {
+ glog.V(0).Infoln(err)
+ return
+ }
}
- needToReplicate = needToReplicate || s.GetVolume(volumeId).NeedToReplicate()
- if !needToReplicate {
- needToReplicate = s.GetVolume(volumeId).NeedToReplicate()
+ // read fsync value
+ fsync := false
+ if r.FormValue("fsync") == "true" {
+ fsync = true
}
- if needToReplicate { //send to other replica locations
- if r.FormValue("type") != "replicate" {
-
- if err = distributedOperation(masterNode, s, volumeId, func(location operation.Location) error {
- u := url.URL{
- Scheme: "http",
- Host: location.Url,
- Path: r.URL.Path,
- }
- q := url.Values{
- "type": {"replicate"},
- }
- if needle.LastModified > 0 {
- q.Set("ts", strconv.FormatUint(needle.LastModified, 10))
- }
- if needle.IsChunkedManifest() {
- q.Set("cm", "true")
+
+ if s.GetVolume(volumeId) != nil {
+ isUnchanged, err = s.WriteVolumeNeedle(volumeId, n, fsync)
+ if err != nil {
+ err = fmt.Errorf("failed to write to local disk: %v", err)
+ glog.V(0).Infoln(err)
+ return
+ }
+ }
+
+ if len(remoteLocations) > 0 { //send to other replica locations
+ if err = distributedOperation(remoteLocations, s, func(location operation.Location) error {
+ u := url.URL{
+ Scheme: "http",
+ Host: location.Url,
+ Path: r.URL.Path,
+ }
+ q := url.Values{
+ "type": {"replicate"},
+ "ttl": {n.Ttl.String()},
+ }
+ if n.LastModified > 0 {
+ q.Set("ts", strconv.FormatUint(n.LastModified, 10))
+ }
+ if n.IsChunkedManifest() {
+ q.Set("cm", "true")
+ }
+ u.RawQuery = q.Encode()
+
+ pairMap := make(map[string]string)
+ if n.HasPairs() {
+ tmpMap := make(map[string]string)
+ err := json.Unmarshal(n.Pairs, &tmpMap)
+ if err != nil {
+ glog.V(0).Infoln("Unmarshal pairs error:", err)
}
- u.RawQuery = q.Encode()
-
- pairMap := make(map[string]string)
- if needle.HasPairs() {
- tmpMap := make(map[string]string)
- err := json.Unmarshal(needle.Pairs, &tmpMap)
- if err != nil {
- glog.V(0).Infoln("Unmarshal pairs error:", err)
- }
- for k, v := range tmpMap {
- pairMap[storage.PairNamePrefix+k] = v
- }
+ for k, v := range tmpMap {
+ pairMap[needle.PairNamePrefix+k] = v
}
-
- _, err := operation.Upload(u.String(),
- string(needle.Name), bytes.NewReader(needle.Data), needle.IsGzipped(), string(needle.Mime),
- pairMap, jwt)
- return err
- }); err != nil {
- ret = 0
- errorStatus = fmt.Sprintf("Failed to write to replicas for volume %d: %v", volumeId, err)
}
+
+ // volume server do not know about encryption
+ _, err := operation.UploadData(u.String(), string(n.Name), false, n.Data, n.IsCompressed(), string(n.Mime), pairMap, jwt)
+ return err
+ }); err != nil {
+ err = fmt.Errorf("failed to write to replicas for volume %d: %v", volumeId, err)
+ glog.V(0).Infoln(err)
}
}
- size = ret
return
}
func ReplicatedDelete(masterNode string, store *storage.Store,
- volumeId storage.VolumeId, n *storage.Needle,
- r *http.Request) (uint32, error) {
+ volumeId needle.VolumeId, n *needle.Needle,
+ r *http.Request) (size uint32, err error) {
//check JWT
jwt := security.GetJwt(r)
- ret, err := store.Delete(volumeId, n)
+ var remoteLocations []operation.Location
+ if r.FormValue("type") != "replicate" {
+ remoteLocations, err = getWritableRemoteReplications(store, volumeId, masterNode)
+ if err != nil {
+ glog.V(0).Infoln(err)
+ return
+ }
+ }
+
+ size, err = store.DeleteVolumeNeedle(volumeId, n)
if err != nil {
glog.V(0).Infoln("delete error:", err)
- return ret, err
+ return
}
- needToReplicate := !store.HasVolume(volumeId)
- if !needToReplicate && ret > 0 {
- needToReplicate = store.GetVolume(volumeId).NeedToReplicate()
- }
- if needToReplicate { //send to other replica locations
- if r.FormValue("type") != "replicate" {
- if err = distributedOperation(masterNode, store, volumeId, func(location operation.Location) error {
- return util.Delete("http://"+location.Url+r.URL.Path+"?type=replicate", jwt)
- }); err != nil {
- ret = 0
- }
+ if len(remoteLocations) > 0 { //send to other replica locations
+ if err = distributedOperation(remoteLocations, store, func(location operation.Location) error {
+ return util.Delete("http://"+location.Url+r.URL.Path+"?type=replicate", string(jwt))
+ }); err != nil {
+ size = 0
}
}
- return ret, err
+ return
}
type DistributedOperationResult map[string]error
@@ -131,32 +142,53 @@ type RemoteResult struct {
Error error
}
-func distributedOperation(masterNode string, store *storage.Store, volumeId storage.VolumeId, op func(location operation.Location) error) error {
- if lookupResult, lookupErr := operation.Lookup(masterNode, volumeId.String()); lookupErr == nil {
- length := 0
- selfUrl := (store.Ip + ":" + strconv.Itoa(store.Port))
- results := make(chan RemoteResult)
+func distributedOperation(locations []operation.Location, store *storage.Store, op func(location operation.Location) error) error {
+ length := len(locations)
+ results := make(chan RemoteResult)
+ for _, location := range locations {
+ go func(location operation.Location, results chan RemoteResult) {
+ results <- RemoteResult{location.Url, op(location)}
+ }(location, results)
+ }
+ ret := DistributedOperationResult(make(map[string]error))
+ for i := 0; i < length; i++ {
+ result := <-results
+ ret[result.Host] = result.Error
+ }
+
+ return ret.Error()
+}
+
+func getWritableRemoteReplications(s *storage.Store, volumeId needle.VolumeId, masterNode string) (
+ remoteLocations []operation.Location, err error) {
+
+ v := s.GetVolume(volumeId)
+ if v != nil && v.ReplicaPlacement.GetCopyCount() == 1 {
+ return
+ }
+
+ // not on local store, or has replications
+ lookupResult, lookupErr := operation.Lookup(masterNode, volumeId.String())
+ if lookupErr == nil {
+ selfUrl := s.Ip + ":" + strconv.Itoa(s.Port)
for _, location := range lookupResult.Locations {
if location.Url != selfUrl {
- length++
- go func(location operation.Location, results chan RemoteResult) {
- results <- RemoteResult{location.Url, op(location)}
- }(location, results)
+ remoteLocations = append(remoteLocations, location)
}
}
- ret := DistributedOperationResult(make(map[string]error))
- for i := 0; i < length; i++ {
- result := <-results
- ret[result.Host] = result.Error
- }
- if volume := store.GetVolume(volumeId); volume != nil {
- if length+1 < volume.ReplicaPlacement.GetCopyCount() {
- return fmt.Errorf("replicating opetations [%d] is less than volume's replication copy count [%d]", length+1, volume.ReplicaPlacement.GetCopyCount())
- }
- }
- return ret.Error()
} else {
- glog.V(0).Infoln()
- return fmt.Errorf("Failed to lookup for %d: %v", volumeId, lookupErr)
+ err = fmt.Errorf("failed to lookup for %d: %v", volumeId, lookupErr)
+ return
}
+
+ if v != nil {
+ // has one local and has remote replications
+ copyCount := v.ReplicaPlacement.GetCopyCount()
+ if len(lookupResult.Locations) < copyCount {
+ err = fmt.Errorf("replicating opetations [%d] is less than volume %d replication copy count [%d]",
+ len(lookupResult.Locations), volumeId, copyCount)
+ }
+ }
+
+ return
}
diff --git a/weed/topology/topology.go b/weed/topology/topology.go
index 4242bfa05..993f444a7 100644
--- a/weed/topology/topology.go
+++ b/weed/topology/topology.go
@@ -2,24 +2,33 @@ package topology
import (
"errors"
+ "fmt"
"math/rand"
+ "sync"
"github.com/chrislusf/raft"
+
"github.com/chrislusf/seaweedfs/weed/glog"
"github.com/chrislusf/seaweedfs/weed/pb/master_pb"
"github.com/chrislusf/seaweedfs/weed/sequence"
"github.com/chrislusf/seaweedfs/weed/storage"
+ "github.com/chrislusf/seaweedfs/weed/storage/needle"
+ "github.com/chrislusf/seaweedfs/weed/storage/super_block"
"github.com/chrislusf/seaweedfs/weed/util"
)
type Topology struct {
+ vacuumLockCounter int64
NodeImpl
- collectionMap *util.ConcurrentReadMap
+ collectionMap *util.ConcurrentReadMap
+ ecShardMap map[needle.VolumeId]*EcShardLocations
+ ecShardMapLock sync.RWMutex
pulse int64
- volumeSizeLimit uint64
+ volumeSizeLimit uint64
+ replicationAsMin bool
Sequence sequence.Sequencer
@@ -30,15 +39,17 @@ type Topology struct {
RaftServer raft.Server
}
-func NewTopology(id string, seq sequence.Sequencer, volumeSizeLimit uint64, pulse int) *Topology {
+func NewTopology(id string, seq sequence.Sequencer, volumeSizeLimit uint64, pulse int, replicationAsMin bool) *Topology {
t := &Topology{}
t.id = NodeId(id)
t.nodeType = "Topology"
t.NodeImpl.value = t
t.children = make(map[NodeId]Node)
t.collectionMap = util.NewConcurrentReadMap()
+ t.ecShardMap = make(map[needle.VolumeId]*EcShardLocations)
t.pulse = int64(pulse)
t.volumeSizeLimit = volumeSizeLimit
+ t.replicationAsMin = replicationAsMin
t.Sequence = seq
@@ -50,8 +61,13 @@ func NewTopology(id string, seq sequence.Sequencer, volumeSizeLimit uint64, puls
}
func (t *Topology) IsLeader() bool {
- if leader, e := t.Leader(); e == nil {
- return leader == t.RaftServer.Name()
+ if t.RaftServer != nil {
+ if t.RaftServer.State() == raft.Leader {
+ return true
+ }
+ if t.RaftServer.Leader() == "" {
+ return true
+ }
}
return false
}
@@ -66,13 +82,13 @@ func (t *Topology) Leader() (string, error) {
if l == "" {
// We are a single node cluster, we are the leader
- return t.RaftServer.Name(), errors.New("Raft Server not initialized!")
+ return t.RaftServer.Name(), nil
}
return l, nil
}
-func (t *Topology) Lookup(collection string, vid storage.VolumeId) []*DataNode {
+func (t *Topology) Lookup(collection string, vid needle.VolumeId) (dataNodes []*DataNode) {
//maybe an issue if lots of collections?
if collection == "" {
for _, c := range t.collectionMap.Items() {
@@ -85,14 +101,24 @@ func (t *Topology) Lookup(collection string, vid storage.VolumeId) []*DataNode {
return c.(*Collection).Lookup(vid)
}
}
+
+ if locations, found := t.LookupEcShards(vid); found {
+ for _, loc := range locations.Locations {
+ dataNodes = append(dataNodes, loc...)
+ }
+ return dataNodes
+ }
+
return nil
}
-func (t *Topology) NextVolumeId() storage.VolumeId {
+func (t *Topology) NextVolumeId() (needle.VolumeId, error) {
vid := t.GetMaxVolumeId()
next := vid.Next()
- go t.RaftServer.Do(NewMaxVolumeIdCommand(next))
- return next
+ if _, err := t.RaftServer.Do(NewMaxVolumeIdCommand(next)); err != nil {
+ return 0, err
+ }
+ return next, nil
}
func (t *Topology) HasWritableVolume(option *VolumeGrowOption) bool {
@@ -102,19 +128,43 @@ func (t *Topology) HasWritableVolume(option *VolumeGrowOption) bool {
func (t *Topology) PickForWrite(count uint64, option *VolumeGrowOption) (string, uint64, *DataNode, error) {
vid, count, datanodes, err := t.GetVolumeLayout(option.Collection, option.ReplicaPlacement, option.Ttl).PickForWrite(count, option)
- if err != nil || datanodes.Length() == 0 {
- return "", 0, nil, errors.New("No writable volumes available!")
+ if err != nil {
+ return "", 0, nil, fmt.Errorf("failed to find writable volumes for collection:%s replication:%s ttl:%s error: %v", option.Collection, option.ReplicaPlacement.String(), option.Ttl.String(), err)
+ }
+ if datanodes.Length() == 0 {
+ return "", 0, nil, fmt.Errorf("no writable volumes available for collection:%s replication:%s ttl:%s", option.Collection, option.ReplicaPlacement.String(), option.Ttl.String())
}
- fileId, count := t.Sequence.NextFileId(count)
- return storage.NewFileId(*vid, fileId, rand.Uint32()).String(), count, datanodes.Head(), nil
+ fileId := t.Sequence.NextFileId(count)
+ return needle.NewFileId(*vid, fileId, rand.Uint32()).String(), count, datanodes.Head(), nil
}
-func (t *Topology) GetVolumeLayout(collectionName string, rp *storage.ReplicaPlacement, ttl *storage.TTL) *VolumeLayout {
+func (t *Topology) GetVolumeLayout(collectionName string, rp *super_block.ReplicaPlacement, ttl *needle.TTL) *VolumeLayout {
return t.collectionMap.Get(collectionName, func() interface{} {
- return NewCollection(collectionName, t.volumeSizeLimit)
+ return NewCollection(collectionName, t.volumeSizeLimit, t.replicationAsMin)
}).(*Collection).GetOrCreateVolumeLayout(rp, ttl)
}
+func (t *Topology) ListCollections(includeNormalVolumes, includeEcVolumes bool) (ret []string) {
+
+ mapOfCollections := make(map[string]bool)
+ for _, c := range t.collectionMap.Items() {
+ mapOfCollections[c.(*Collection).Name] = true
+ }
+
+ if includeEcVolumes {
+ t.ecShardMapLock.RLock()
+ for _, ecVolumeLocation := range t.ecShardMap {
+ mapOfCollections[ecVolumeLocation.Collection] = true
+ }
+ t.ecShardMapLock.RUnlock()
+ }
+
+ for k := range mapOfCollections {
+ ret = append(ret, k)
+ }
+ return ret
+}
+
func (t *Topology) FindCollection(collectionName string) (*Collection, bool) {
c, hasCollection := t.collectionMap.Find(collectionName)
if !hasCollection {
@@ -152,6 +202,7 @@ func (t *Topology) GetOrCreateDataCenter(dcName string) *DataCenter {
}
func (t *Topology) SyncDataNodeRegistration(volumes []*master_pb.VolumeInformationMessage, dn *DataNode) (newVolumes, deletedVolumes []storage.VolumeInfo) {
+ // convert into in memory struct storage.VolumeInfo
var volumeInfos []storage.VolumeInfo
for _, v := range volumes {
if vi, err := storage.NewVolumeInfo(v); err == nil {
@@ -160,12 +211,48 @@ func (t *Topology) SyncDataNodeRegistration(volumes []*master_pb.VolumeInformati
glog.V(0).Infof("Fail to convert joined volume information: %v", err)
}
}
- newVolumes, deletedVolumes = dn.UpdateVolumes(volumeInfos)
- for _, v := range volumeInfos {
+ // find out the delta volumes
+ var changedVolumes []storage.VolumeInfo
+ newVolumes, deletedVolumes, changedVolumes = dn.UpdateVolumes(volumeInfos)
+ for _, v := range newVolumes {
t.RegisterVolumeLayout(v, dn)
}
for _, v := range deletedVolumes {
t.UnRegisterVolumeLayout(v, dn)
}
+ for _, v := range changedVolumes {
+ vl := t.GetVolumeLayout(v.Collection, v.ReplicaPlacement, v.Ttl)
+ vl.ensureCorrectWritables(&v)
+ }
+ return
+}
+
+func (t *Topology) IncrementalSyncDataNodeRegistration(newVolumes, deletedVolumes []*master_pb.VolumeShortInformationMessage, dn *DataNode) {
+ var newVis, oldVis []storage.VolumeInfo
+ for _, v := range newVolumes {
+ vi, err := storage.NewVolumeInfoFromShort(v)
+ if err != nil {
+ glog.V(0).Infof("NewVolumeInfoFromShort %v: %v", v, err)
+ continue
+ }
+ newVis = append(newVis, vi)
+ }
+ for _, v := range deletedVolumes {
+ vi, err := storage.NewVolumeInfoFromShort(v)
+ if err != nil {
+ glog.V(0).Infof("NewVolumeInfoFromShort %v: %v", v, err)
+ continue
+ }
+ oldVis = append(oldVis, vi)
+ }
+ dn.DeltaUpdateVolumes(newVis, oldVis)
+
+ for _, vi := range newVis {
+ t.RegisterVolumeLayout(vi, dn)
+ }
+ for _, vi := range oldVis {
+ t.UnRegisterVolumeLayout(vi, dn)
+ }
+
return
}
diff --git a/weed/topology/topology_ec.go b/weed/topology/topology_ec.go
new file mode 100644
index 000000000..93b39bb5d
--- /dev/null
+++ b/weed/topology/topology_ec.go
@@ -0,0 +1,173 @@
+package topology
+
+import (
+ "github.com/chrislusf/seaweedfs/weed/glog"
+ "github.com/chrislusf/seaweedfs/weed/pb/master_pb"
+ "github.com/chrislusf/seaweedfs/weed/storage/erasure_coding"
+ "github.com/chrislusf/seaweedfs/weed/storage/needle"
+)
+
+type EcShardLocations struct {
+ Collection string
+ Locations [erasure_coding.TotalShardsCount][]*DataNode
+}
+
+func (t *Topology) SyncDataNodeEcShards(shardInfos []*master_pb.VolumeEcShardInformationMessage, dn *DataNode) (newShards, deletedShards []*erasure_coding.EcVolumeInfo) {
+ // convert into in memory struct storage.VolumeInfo
+ var shards []*erasure_coding.EcVolumeInfo
+ for _, shardInfo := range shardInfos {
+ shards = append(shards,
+ erasure_coding.NewEcVolumeInfo(
+ shardInfo.Collection,
+ needle.VolumeId(shardInfo.Id),
+ erasure_coding.ShardBits(shardInfo.EcIndexBits)))
+ }
+ // find out the delta volumes
+ newShards, deletedShards = dn.UpdateEcShards(shards)
+ for _, v := range newShards {
+ t.RegisterEcShards(v, dn)
+ }
+ for _, v := range deletedShards {
+ t.UnRegisterEcShards(v, dn)
+ }
+ return
+}
+
+func (t *Topology) IncrementalSyncDataNodeEcShards(newEcShards, deletedEcShards []*master_pb.VolumeEcShardInformationMessage, dn *DataNode) {
+ // convert into in memory struct storage.VolumeInfo
+ var newShards, deletedShards []*erasure_coding.EcVolumeInfo
+ for _, shardInfo := range newEcShards {
+ newShards = append(newShards,
+ erasure_coding.NewEcVolumeInfo(
+ shardInfo.Collection,
+ needle.VolumeId(shardInfo.Id),
+ erasure_coding.ShardBits(shardInfo.EcIndexBits)))
+ }
+ for _, shardInfo := range deletedEcShards {
+ deletedShards = append(deletedShards,
+ erasure_coding.NewEcVolumeInfo(
+ shardInfo.Collection,
+ needle.VolumeId(shardInfo.Id),
+ erasure_coding.ShardBits(shardInfo.EcIndexBits)))
+ }
+
+ dn.DeltaUpdateEcShards(newShards, deletedShards)
+
+ for _, v := range newShards {
+ t.RegisterEcShards(v, dn)
+ }
+ for _, v := range deletedShards {
+ t.UnRegisterEcShards(v, dn)
+ }
+ return
+}
+
+func NewEcShardLocations(collection string) *EcShardLocations {
+ return &EcShardLocations{
+ Collection: collection,
+ }
+}
+
+func (loc *EcShardLocations) AddShard(shardId erasure_coding.ShardId, dn *DataNode) (added bool) {
+ dataNodes := loc.Locations[shardId]
+ for _, n := range dataNodes {
+ if n.Id() == dn.Id() {
+ return false
+ }
+ }
+ loc.Locations[shardId] = append(dataNodes, dn)
+ return true
+}
+
+func (loc *EcShardLocations) DeleteShard(shardId erasure_coding.ShardId, dn *DataNode) (deleted bool) {
+ dataNodes := loc.Locations[shardId]
+ foundIndex := -1
+ for index, n := range dataNodes {
+ if n.Id() == dn.Id() {
+ foundIndex = index
+ }
+ }
+ if foundIndex < 0 {
+ return false
+ }
+ loc.Locations[shardId] = append(dataNodes[:foundIndex], dataNodes[foundIndex+1:]...)
+ return true
+}
+
+func (t *Topology) RegisterEcShards(ecShardInfos *erasure_coding.EcVolumeInfo, dn *DataNode) {
+
+ t.ecShardMapLock.Lock()
+ defer t.ecShardMapLock.Unlock()
+
+ locations, found := t.ecShardMap[ecShardInfos.VolumeId]
+ if !found {
+ locations = NewEcShardLocations(ecShardInfos.Collection)
+ t.ecShardMap[ecShardInfos.VolumeId] = locations
+ }
+ for _, shardId := range ecShardInfos.ShardIds() {
+ locations.AddShard(shardId, dn)
+ }
+}
+
+func (t *Topology) UnRegisterEcShards(ecShardInfos *erasure_coding.EcVolumeInfo, dn *DataNode) {
+ glog.Infof("removing ec shard info:%+v", ecShardInfos)
+ t.ecShardMapLock.Lock()
+ defer t.ecShardMapLock.Unlock()
+
+ locations, found := t.ecShardMap[ecShardInfos.VolumeId]
+ if !found {
+ return
+ }
+ for _, shardId := range ecShardInfos.ShardIds() {
+ locations.DeleteShard(shardId, dn)
+ }
+}
+
+func (t *Topology) LookupEcShards(vid needle.VolumeId) (locations *EcShardLocations, found bool) {
+ t.ecShardMapLock.RLock()
+ defer t.ecShardMapLock.RUnlock()
+
+ locations, found = t.ecShardMap[vid]
+
+ return
+}
+
+func (t *Topology) ListEcServersByCollection(collection string) (dataNodes []string) {
+ t.ecShardMapLock.RLock()
+ defer t.ecShardMapLock.RUnlock()
+
+ dateNodeMap := make(map[string]bool)
+ for _, ecVolumeLocation := range t.ecShardMap {
+ if ecVolumeLocation.Collection == collection {
+ for _, locations := range ecVolumeLocation.Locations {
+ for _, loc := range locations {
+ dateNodeMap[string(loc.Id())] = true
+ }
+ }
+ }
+ }
+
+ for k, _ := range dateNodeMap {
+ dataNodes = append(dataNodes, k)
+ }
+
+ return
+}
+
+func (t *Topology) DeleteEcCollection(collection string) {
+ t.ecShardMapLock.Lock()
+ defer t.ecShardMapLock.Unlock()
+
+ var vids []needle.VolumeId
+ for vid, ecVolumeLocation := range t.ecShardMap {
+ if ecVolumeLocation.Collection == collection {
+ vids = append(vids, vid)
+ }
+ }
+
+ for _, vid := range vids {
+ delete(t.ecShardMap, vid)
+ }
+
+ return
+}
diff --git a/weed/topology/topology_event_handling.go b/weed/topology/topology_event_handling.go
index a301103eb..068bd401e 100644
--- a/weed/topology/topology_event_handling.go
+++ b/weed/topology/topology_event_handling.go
@@ -1,6 +1,7 @@
package topology
import (
+ "google.golang.org/grpc"
"math/rand"
"time"
@@ -8,7 +9,7 @@ import (
"github.com/chrislusf/seaweedfs/weed/storage"
)
-func (t *Topology) StartRefreshWritableVolumes(garbageThreshold float64, preallocate int64) {
+func (t *Topology) StartRefreshWritableVolumes(grpcDialOption grpc.DialOption, garbageThreshold float64, preallocate int64) {
go func() {
for {
if t.IsLeader() {
@@ -22,7 +23,7 @@ func (t *Topology) StartRefreshWritableVolumes(garbageThreshold float64, preallo
c := time.Tick(15 * time.Minute)
for _ = range c {
if t.IsLeader() {
- t.Vacuum(garbageThreshold, preallocate)
+ t.Vacuum(grpcDialOption, garbageThreshold, preallocate)
}
}
}(garbageThreshold)
@@ -58,6 +59,7 @@ func (t *Topology) UnRegisterDataNode(dn *DataNode) {
vl.SetVolumeUnavailable(dn, v.Id)
}
dn.UpAdjustVolumeCountDelta(-dn.GetVolumeCount())
+ dn.UpAdjustRemoteVolumeCountDelta(-dn.GetRemoteVolumeCount())
dn.UpAdjustActiveVolumeCountDelta(-dn.GetActiveVolumeCount())
dn.UpAdjustMaxVolumeCountDelta(-dn.GetMaxVolumeCount())
if dn.Parent() != nil {
diff --git a/weed/topology/topology_map.go b/weed/topology/topology_map.go
index 769ba0e2a..73c55d77d 100644
--- a/weed/topology/topology_map.go
+++ b/weed/topology/topology_map.go
@@ -23,7 +23,7 @@ func (t *Topology) ToMap() interface{} {
}
}
}
- m["layouts"] = layouts
+ m["Layouts"] = layouts
return m
}
@@ -68,9 +68,28 @@ func (t *Topology) ToVolumeLocations() (volumeLocations []*master_pb.VolumeLocat
for _, v := range dn.GetVolumes() {
volumeLocation.NewVids = append(volumeLocation.NewVids, uint32(v.Id))
}
+ for _, s := range dn.GetEcShards() {
+ volumeLocation.NewVids = append(volumeLocation.NewVids, uint32(s.VolumeId))
+ }
volumeLocations = append(volumeLocations, volumeLocation)
}
}
}
return
}
+
+func (t *Topology) ToTopologyInfo() *master_pb.TopologyInfo {
+ m := &master_pb.TopologyInfo{
+ Id: string(t.Id()),
+ VolumeCount: uint64(t.GetVolumeCount()),
+ MaxVolumeCount: uint64(t.GetMaxVolumeCount()),
+ FreeVolumeCount: uint64(t.FreeSpace()),
+ ActiveVolumeCount: uint64(t.GetActiveVolumeCount()),
+ RemoteVolumeCount: uint64(t.GetRemoteVolumeCount()),
+ }
+ for _, c := range t.Children() {
+ dc := c.(*DataCenter)
+ m.DataCenterInfos = append(m.DataCenterInfos, dc.ToDataCenterInfo())
+ }
+ return m
+}
diff --git a/weed/topology/topology_test.go b/weed/topology/topology_test.go
index 07dc9c67b..2fe381ca2 100644
--- a/weed/topology/topology_test.go
+++ b/weed/topology/topology_test.go
@@ -4,6 +4,9 @@ import (
"github.com/chrislusf/seaweedfs/weed/pb/master_pb"
"github.com/chrislusf/seaweedfs/weed/sequence"
"github.com/chrislusf/seaweedfs/weed/storage"
+ "github.com/chrislusf/seaweedfs/weed/storage/needle"
+ "github.com/chrislusf/seaweedfs/weed/storage/super_block"
+
"testing"
)
@@ -20,7 +23,7 @@ func TestRemoveDataCenter(t *testing.T) {
}
func TestHandlingVolumeServerHeartbeat(t *testing.T) {
- topo := NewTopology("weedfs", sequence.NewMemorySequencer(), 32*1024, 5)
+ topo := NewTopology("weedfs", sequence.NewMemorySequencer(), 32*1024, 5, false)
dc := topo.GetOrCreateDataCenter("dc1")
rack := dc.GetOrCreateRack("rack1")
@@ -39,7 +42,7 @@ func TestHandlingVolumeServerHeartbeat(t *testing.T) {
DeletedByteCount: 34524,
ReadOnly: false,
ReplicaPlacement: uint32(0),
- Version: uint32(1),
+ Version: uint32(needle.CurrentVersion),
Ttl: 0,
}
volumeMessages = append(volumeMessages, volumeMessage)
@@ -47,8 +50,8 @@ func TestHandlingVolumeServerHeartbeat(t *testing.T) {
topo.SyncDataNodeRegistration(volumeMessages, dn)
- assert(t, "activeVolumeCount1", topo.activeVolumeCount, volumeCount)
- assert(t, "volumeCount", topo.volumeCount, volumeCount)
+ assert(t, "activeVolumeCount1", int(topo.activeVolumeCount), volumeCount)
+ assert(t, "volumeCount", int(topo.volumeCount), volumeCount)
}
{
@@ -64,20 +67,68 @@ func TestHandlingVolumeServerHeartbeat(t *testing.T) {
DeletedByteCount: 345240,
ReadOnly: false,
ReplicaPlacement: uint32(0),
- Version: uint32(1),
+ Version: uint32(needle.CurrentVersion),
Ttl: 0,
}
volumeMessages = append(volumeMessages, volumeMessage)
}
topo.SyncDataNodeRegistration(volumeMessages, dn)
- assert(t, "activeVolumeCount1", topo.activeVolumeCount, volumeCount)
- assert(t, "volumeCount", topo.volumeCount, volumeCount)
+ //rp, _ := storage.NewReplicaPlacementFromString("000")
+ //layout := topo.GetVolumeLayout("", rp, needle.EMPTY_TTL)
+ //assert(t, "writables", len(layout.writables), volumeCount)
+
+ assert(t, "activeVolumeCount1", int(topo.activeVolumeCount), volumeCount)
+ assert(t, "volumeCount", int(topo.volumeCount), volumeCount)
+ }
+
+ {
+ volumeCount := 6
+ newVolumeShortMessage := &master_pb.VolumeShortInformationMessage{
+ Id: uint32(3),
+ Collection: "",
+ ReplicaPlacement: uint32(0),
+ Version: uint32(needle.CurrentVersion),
+ Ttl: 0,
+ }
+ topo.IncrementalSyncDataNodeRegistration(
+ []*master_pb.VolumeShortInformationMessage{newVolumeShortMessage},
+ nil,
+ dn)
+ rp, _ := super_block.NewReplicaPlacementFromString("000")
+ layout := topo.GetVolumeLayout("", rp, needle.EMPTY_TTL)
+ assert(t, "writables after repeated add", len(layout.writables), volumeCount)
+
+ assert(t, "activeVolumeCount1", int(topo.activeVolumeCount), volumeCount)
+ assert(t, "volumeCount", int(topo.volumeCount), volumeCount)
+
+ topo.IncrementalSyncDataNodeRegistration(
+ nil,
+ []*master_pb.VolumeShortInformationMessage{newVolumeShortMessage},
+ dn)
+ assert(t, "writables after deletion", len(layout.writables), volumeCount-1)
+ assert(t, "activeVolumeCount1", int(topo.activeVolumeCount), volumeCount-1)
+ assert(t, "volumeCount", int(topo.volumeCount), volumeCount-1)
+
+ topo.IncrementalSyncDataNodeRegistration(
+ []*master_pb.VolumeShortInformationMessage{newVolumeShortMessage},
+ nil,
+ dn)
+
+ for vid, _ := range layout.vid2location {
+ println("after add volume id", vid)
+ }
+ for _, vid := range layout.writables {
+ println("after add writable volume id", vid)
+ }
+
+ assert(t, "writables after add back", len(layout.writables), volumeCount)
+
}
topo.UnRegisterDataNode(dn)
- assert(t, "activeVolumeCount2", topo.activeVolumeCount, 0)
+ assert(t, "activeVolumeCount2", int(topo.activeVolumeCount), 0)
}
@@ -89,27 +140,28 @@ func assert(t *testing.T, message string, actual, expected int) {
func TestAddRemoveVolume(t *testing.T) {
- topo := NewTopology("weedfs", sequence.NewMemorySequencer(), 32*1024, 5)
+ topo := NewTopology("weedfs", sequence.NewMemorySequencer(), 32*1024, 5, false)
dc := topo.GetOrCreateDataCenter("dc1")
rack := dc.GetOrCreateRack("rack1")
dn := rack.GetOrCreateDataNode("127.0.0.1", 34534, "127.0.0.1", 25)
v := storage.VolumeInfo{
- Id: storage.VolumeId(1),
+ Id: needle.VolumeId(1),
Size: 100,
Collection: "xcollection",
FileCount: 123,
DeleteCount: 23,
DeletedByteCount: 45,
ReadOnly: false,
- Version: storage.CurrentVersion,
- ReplicaPlacement: &storage.ReplicaPlacement{},
- Ttl: storage.EMPTY_TTL,
+ Version: needle.CurrentVersion,
+ ReplicaPlacement: &super_block.ReplicaPlacement{},
+ Ttl: needle.EMPTY_TTL,
}
dn.UpdateVolumes([]storage.VolumeInfo{v})
topo.RegisterVolumeLayout(v, dn)
+ topo.RegisterVolumeLayout(v, dn)
if _, hasCollection := topo.FindCollection(v.Collection); !hasCollection {
t.Errorf("collection %v should exist", v.Collection)
diff --git a/weed/topology/topology_vacuum.go b/weed/topology/topology_vacuum.go
index d6b09314b..789a01330 100644
--- a/weed/topology/topology_vacuum.go
+++ b/weed/topology/topology_vacuum.go
@@ -2,31 +2,38 @@ package topology
import (
"context"
+ "sync/atomic"
"time"
+ "google.golang.org/grpc"
+
+ "github.com/chrislusf/seaweedfs/weed/storage/needle"
+
"github.com/chrislusf/seaweedfs/weed/glog"
"github.com/chrislusf/seaweedfs/weed/operation"
"github.com/chrislusf/seaweedfs/weed/pb/volume_server_pb"
- "github.com/chrislusf/seaweedfs/weed/storage"
)
-func batchVacuumVolumeCheck(vl *VolumeLayout, vid storage.VolumeId, locationlist *VolumeLocationList, garbageThreshold float64) bool {
- ch := make(chan bool, locationlist.Length())
+func batchVacuumVolumeCheck(grpcDialOption grpc.DialOption, vl *VolumeLayout, vid needle.VolumeId,
+ locationlist *VolumeLocationList, garbageThreshold float64) (*VolumeLocationList, bool) {
+ ch := make(chan int, locationlist.Length())
+ errCount := int32(0)
for index, dn := range locationlist.list {
- go func(index int, url string, vid storage.VolumeId) {
- err := operation.WithVolumeServerClient(url, func(volumeServerClient volume_server_pb.VolumeServerClient) error {
- ctx, cancel := context.WithTimeout(context.Background(), time.Duration(5*time.Second))
- defer cancel()
-
- resp, err := volumeServerClient.VacuumVolumeCheck(ctx, &volume_server_pb.VacuumVolumeCheckRequest{
- VolumdId: uint32(vid),
+ go func(index int, url string, vid needle.VolumeId) {
+ err := operation.WithVolumeServerClient(url, grpcDialOption, func(volumeServerClient volume_server_pb.VolumeServerClient) error {
+ resp, err := volumeServerClient.VacuumVolumeCheck(context.Background(), &volume_server_pb.VacuumVolumeCheckRequest{
+ VolumeId: uint32(vid),
})
if err != nil {
- ch <- false
+ atomic.AddInt32(&errCount, 1)
+ ch <- -1
return err
}
- isNeeded := resp.GarbageRatio > garbageThreshold
- ch <- isNeeded
+ if resp.GarbageRatio >= garbageThreshold {
+ ch <- index
+ } else {
+ ch <- -1
+ }
return nil
})
if err != nil {
@@ -34,27 +41,33 @@ func batchVacuumVolumeCheck(vl *VolumeLayout, vid storage.VolumeId, locationlist
}
}(index, dn.Url(), vid)
}
- isCheckSuccess := true
- for _ = range locationlist.list {
+ vacuumLocationList := NewVolumeLocationList()
+ for range locationlist.list {
select {
- case canVacuum := <-ch:
- isCheckSuccess = isCheckSuccess && canVacuum
+ case index := <-ch:
+ if index != -1 {
+ vacuumLocationList.list = append(vacuumLocationList.list, locationlist.list[index])
+ }
case <-time.After(30 * time.Minute):
- isCheckSuccess = false
- break
+ return vacuumLocationList, false
}
}
- return isCheckSuccess
+ return vacuumLocationList, errCount == 0 && len(vacuumLocationList.list) > 0
}
-func batchVacuumVolumeCompact(vl *VolumeLayout, vid storage.VolumeId, locationlist *VolumeLocationList, preallocate int64) bool {
+func batchVacuumVolumeCompact(grpcDialOption grpc.DialOption, vl *VolumeLayout, vid needle.VolumeId,
+ locationlist *VolumeLocationList, preallocate int64) bool {
+ vl.accessLock.Lock()
vl.removeFromWritable(vid)
+ vl.accessLock.Unlock()
+
ch := make(chan bool, locationlist.Length())
for index, dn := range locationlist.list {
- go func(index int, url string, vid storage.VolumeId) {
+ go func(index int, url string, vid needle.VolumeId) {
glog.V(0).Infoln(index, "Start vacuuming", vid, "on", url)
- err := operation.WithVolumeServerClient(url, func(volumeServerClient volume_server_pb.VolumeServerClient) error {
+ err := operation.WithVolumeServerClient(url, grpcDialOption, func(volumeServerClient volume_server_pb.VolumeServerClient) error {
_, err := volumeServerClient.VacuumVolumeCompact(context.Background(), &volume_server_pb.VacuumVolumeCompactRequest{
- VolumdId: uint32(vid),
+ VolumeId: uint32(vid),
+ Preallocate: preallocate,
})
return err
})
@@ -68,45 +81,50 @@ func batchVacuumVolumeCompact(vl *VolumeLayout, vid storage.VolumeId, locationli
}(index, dn.Url(), vid)
}
isVacuumSuccess := true
- for _ = range locationlist.list {
+ for range locationlist.list {
select {
case canCommit := <-ch:
isVacuumSuccess = isVacuumSuccess && canCommit
case <-time.After(30 * time.Minute):
- isVacuumSuccess = false
- break
+ return false
}
}
return isVacuumSuccess
}
-func batchVacuumVolumeCommit(vl *VolumeLayout, vid storage.VolumeId, locationlist *VolumeLocationList) bool {
+func batchVacuumVolumeCommit(grpcDialOption grpc.DialOption, vl *VolumeLayout, vid needle.VolumeId, locationlist *VolumeLocationList) bool {
isCommitSuccess := true
+ isReadOnly := false
for _, dn := range locationlist.list {
- glog.V(0).Infoln("Start Commiting vacuum", vid, "on", dn.Url())
- err := operation.WithVolumeServerClient(dn.Url(), func(volumeServerClient volume_server_pb.VolumeServerClient) error {
- _, err := volumeServerClient.VacuumVolumeCommit(context.Background(), &volume_server_pb.VacuumVolumeCommitRequest{
- VolumdId: uint32(vid),
+ glog.V(0).Infoln("Start Committing vacuum", vid, "on", dn.Url())
+ err := operation.WithVolumeServerClient(dn.Url(), grpcDialOption, func(volumeServerClient volume_server_pb.VolumeServerClient) error {
+ resp, err := volumeServerClient.VacuumVolumeCommit(context.Background(), &volume_server_pb.VacuumVolumeCommitRequest{
+ VolumeId: uint32(vid),
})
+ if resp.IsReadOnly {
+ isReadOnly = true
+ }
return err
})
if err != nil {
glog.Errorf("Error when committing vacuum %d on %s: %v", vid, dn.Url(), err)
isCommitSuccess = false
} else {
- glog.V(0).Infof("Complete Commiting vacuum %d on %s", vid, dn.Url())
+ glog.V(0).Infof("Complete Committing vacuum %d on %s", vid, dn.Url())
}
- if isCommitSuccess {
- vl.SetVolumeAvailable(dn, vid)
+ }
+ if isCommitSuccess {
+ for _, dn := range locationlist.list {
+ vl.SetVolumeAvailable(dn, vid, isReadOnly)
}
}
return isCommitSuccess
}
-func batchVacuumVolumeCleanup(vl *VolumeLayout, vid storage.VolumeId, locationlist *VolumeLocationList) {
+func batchVacuumVolumeCleanup(grpcDialOption grpc.DialOption, vl *VolumeLayout, vid needle.VolumeId, locationlist *VolumeLocationList) {
for _, dn := range locationlist.list {
glog.V(0).Infoln("Start cleaning up", vid, "on", dn.Url())
- err := operation.WithVolumeServerClient(dn.Url(), func(volumeServerClient volume_server_pb.VolumeServerClient) error {
+ err := operation.WithVolumeServerClient(dn.Url(), grpcDialOption, func(volumeServerClient volume_server_pb.VolumeServerClient) error {
_, err := volumeServerClient.VacuumVolumeCleanup(context.Background(), &volume_server_pb.VacuumVolumeCleanupRequest{
- VolumdId: uint32(vid),
+ VolumeId: uint32(vid),
})
return err
})
@@ -118,24 +136,34 @@ func batchVacuumVolumeCleanup(vl *VolumeLayout, vid storage.VolumeId, locationli
}
}
-func (t *Topology) Vacuum(garbageThreshold float64, preallocate int64) int {
+func (t *Topology) Vacuum(grpcDialOption grpc.DialOption, garbageThreshold float64, preallocate int64) int {
+
+ // if there is vacuum going on, return immediately
+ swapped := atomic.CompareAndSwapInt64(&t.vacuumLockCounter, 0, 1)
+ if !swapped {
+ return 0
+ }
+ defer atomic.StoreInt64(&t.vacuumLockCounter, 0)
+
+ // now only one vacuum process going on
+
glog.V(1).Infof("Start vacuum on demand with threshold: %f", garbageThreshold)
for _, col := range t.collectionMap.Items() {
c := col.(*Collection)
for _, vl := range c.storageType2VolumeLayout.Items() {
if vl != nil {
volumeLayout := vl.(*VolumeLayout)
- vacuumOneVolumeLayout(volumeLayout, c, garbageThreshold, preallocate)
+ vacuumOneVolumeLayout(grpcDialOption, volumeLayout, c, garbageThreshold, preallocate)
}
}
}
return 0
}
-func vacuumOneVolumeLayout(volumeLayout *VolumeLayout, c *Collection, garbageThreshold float64, preallocate int64) {
+func vacuumOneVolumeLayout(grpcDialOption grpc.DialOption, volumeLayout *VolumeLayout, c *Collection, garbageThreshold float64, preallocate int64) {
volumeLayout.accessLock.RLock()
- tmpMap := make(map[storage.VolumeId]*VolumeLocationList)
+ tmpMap := make(map[needle.VolumeId]*VolumeLocationList)
for vid, locationList := range volumeLayout.vid2location {
tmpMap[vid] = locationList
}
@@ -152,11 +180,12 @@ func vacuumOneVolumeLayout(volumeLayout *VolumeLayout, c *Collection, garbageThr
}
glog.V(2).Infof("check vacuum on collection:%s volume:%d", c.Name, vid)
- if batchVacuumVolumeCheck(volumeLayout, vid, locationList, garbageThreshold) {
- if batchVacuumVolumeCompact(volumeLayout, vid, locationList, preallocate) {
- batchVacuumVolumeCommit(volumeLayout, vid, locationList)
+ if vacuumLocationList, needVacuum := batchVacuumVolumeCheck(
+ grpcDialOption, volumeLayout, vid, locationList, garbageThreshold); needVacuum {
+ if batchVacuumVolumeCompact(grpcDialOption, volumeLayout, vid, vacuumLocationList, preallocate) {
+ batchVacuumVolumeCommit(grpcDialOption, volumeLayout, vid, vacuumLocationList)
} else {
- batchVacuumVolumeCleanup(volumeLayout, vid, locationList)
+ batchVacuumVolumeCleanup(grpcDialOption, volumeLayout, vid, vacuumLocationList)
}
}
}
diff --git a/weed/topology/volume_growth.go b/weed/topology/volume_growth.go
index 9bf013ca6..58b5702bf 100644
--- a/weed/topology/volume_growth.go
+++ b/weed/topology/volume_growth.go
@@ -5,6 +5,12 @@ import (
"math/rand"
"sync"
+ "github.com/chrislusf/seaweedfs/weed/storage/needle"
+ "github.com/chrislusf/seaweedfs/weed/storage/super_block"
+ "github.com/chrislusf/seaweedfs/weed/util"
+
+ "google.golang.org/grpc"
+
"github.com/chrislusf/seaweedfs/weed/glog"
"github.com/chrislusf/seaweedfs/weed/storage"
)
@@ -18,13 +24,14 @@ This package is created to resolve these replica placement issues:
*/
type VolumeGrowOption struct {
- Collection string
- ReplicaPlacement *storage.ReplicaPlacement
- Ttl *storage.TTL
- Prealloacte int64
- DataCenter string
- Rack string
- DataNode string
+ Collection string
+ ReplicaPlacement *super_block.ReplicaPlacement
+ Ttl *needle.TTL
+ Prealloacte int64
+ DataCenter string
+ Rack string
+ DataNode string
+ MemoryMapMaxSizeMb uint32
}
type VolumeGrowth struct {
@@ -42,47 +49,59 @@ func NewDefaultVolumeGrowth() *VolumeGrowth {
// one replication type may need rp.GetCopyCount() actual volumes
// given copyCount, how many logical volumes to create
func (vg *VolumeGrowth) findVolumeCount(copyCount int) (count int) {
+ v := util.GetViper()
+ v.SetDefault("master.volume_growth.copy_1", 7)
+ v.SetDefault("master.volume_growth.copy_2", 6)
+ v.SetDefault("master.volume_growth.copy_3", 3)
+ v.SetDefault("master.volume_growth.copy_other", 1)
switch copyCount {
case 1:
- count = 7
+ count = v.GetInt("master.volume_growth.copy_1")
case 2:
- count = 6
+ count = v.GetInt("master.volume_growth.copy_2")
case 3:
- count = 3
+ count = v.GetInt("master.volume_growth.copy_3")
default:
- count = 1
+ count = v.GetInt("master.volume_growth.copy_other")
}
return
}
-func (vg *VolumeGrowth) AutomaticGrowByType(option *VolumeGrowOption, topo *Topology) (count int, err error) {
- count, err = vg.GrowByCountAndType(vg.findVolumeCount(option.ReplicaPlacement.GetCopyCount()), option, topo)
+func (vg *VolumeGrowth) AutomaticGrowByType(option *VolumeGrowOption, grpcDialOption grpc.DialOption, topo *Topology, targetCount int) (count int, err error) {
+ if targetCount == 0 {
+ targetCount = vg.findVolumeCount(option.ReplicaPlacement.GetCopyCount())
+ }
+ count, err = vg.GrowByCountAndType(grpcDialOption, targetCount, option, topo)
if count > 0 && count%option.ReplicaPlacement.GetCopyCount() == 0 {
return count, nil
}
return count, err
}
-func (vg *VolumeGrowth) GrowByCountAndType(targetCount int, option *VolumeGrowOption, topo *Topology) (counter int, err error) {
+func (vg *VolumeGrowth) GrowByCountAndType(grpcDialOption grpc.DialOption, targetCount int, option *VolumeGrowOption, topo *Topology) (counter int, err error) {
vg.accessLock.Lock()
defer vg.accessLock.Unlock()
for i := 0; i < targetCount; i++ {
- if c, e := vg.findAndGrow(topo, option); e == nil {
+ if c, e := vg.findAndGrow(grpcDialOption, topo, option); e == nil {
counter += c
} else {
+ glog.V(0).Infof("create %d volume, created %d: %v", targetCount, counter, e)
return counter, e
}
}
return
}
-func (vg *VolumeGrowth) findAndGrow(topo *Topology, option *VolumeGrowOption) (int, error) {
+func (vg *VolumeGrowth) findAndGrow(grpcDialOption grpc.DialOption, topo *Topology, option *VolumeGrowOption) (int, error) {
servers, e := vg.findEmptySlotsForOneVolume(topo, option)
if e != nil {
return 0, e
}
- vid := topo.NextVolumeId()
- err := vg.grow(topo, vid, option, servers...)
+ vid, raftErr := topo.NextVolumeId()
+ if raftErr != nil {
+ return 0, raftErr
+ }
+ err := vg.grow(grpcDialOption, topo, vid, option, servers...)
return len(servers), err
}
@@ -94,14 +113,14 @@ func (vg *VolumeGrowth) findAndGrow(topo *Topology, option *VolumeGrowOption) (i
func (vg *VolumeGrowth) findEmptySlotsForOneVolume(topo *Topology, option *VolumeGrowOption) (servers []*DataNode, err error) {
//find main datacenter and other data centers
rp := option.ReplicaPlacement
- mainDataCenter, otherDataCenters, dc_err := topo.RandomlyPickNodes(rp.DiffDataCenterCount+1, func(node Node) error {
+ mainDataCenter, otherDataCenters, dc_err := topo.PickNodesByWeight(rp.DiffDataCenterCount+1, func(node Node) error {
if option.DataCenter != "" && node.IsDataCenter() && node.Id() != NodeId(option.DataCenter) {
return fmt.Errorf("Not matching preferred data center:%s", option.DataCenter)
}
if len(node.Children()) < rp.DiffRackCount+1 {
return fmt.Errorf("Only has %d racks, not enough for %d.", len(node.Children()), rp.DiffRackCount+1)
}
- if node.FreeSpace() < rp.DiffRackCount+rp.SameRackCount+1 {
+ if node.FreeSpace() < int64(rp.DiffRackCount+rp.SameRackCount+1) {
return fmt.Errorf("Free:%d < Expected:%d", node.FreeSpace(), rp.DiffRackCount+rp.SameRackCount+1)
}
possibleRacksCount := 0
@@ -126,11 +145,11 @@ func (vg *VolumeGrowth) findEmptySlotsForOneVolume(topo *Topology, option *Volum
}
//find main rack and other racks
- mainRack, otherRacks, rackErr := mainDataCenter.(*DataCenter).RandomlyPickNodes(rp.DiffRackCount+1, func(node Node) error {
+ mainRack, otherRacks, rackErr := mainDataCenter.(*DataCenter).PickNodesByWeight(rp.DiffRackCount+1, func(node Node) error {
if option.Rack != "" && node.IsRack() && node.Id() != NodeId(option.Rack) {
return fmt.Errorf("Not matching preferred rack:%s", option.Rack)
}
- if node.FreeSpace() < rp.SameRackCount+1 {
+ if node.FreeSpace() < int64(rp.SameRackCount+1) {
return fmt.Errorf("Free:%d < Expected:%d", node.FreeSpace(), rp.SameRackCount+1)
}
if len(node.Children()) < rp.SameRackCount+1 {
@@ -153,7 +172,7 @@ func (vg *VolumeGrowth) findEmptySlotsForOneVolume(topo *Topology, option *Volum
}
//find main rack and other racks
- mainServer, otherServers, serverErr := mainRack.(*Rack).RandomlyPickNodes(rp.SameRackCount+1, func(node Node) error {
+ mainServer, otherServers, serverErr := mainRack.(*Rack).PickNodesByWeight(rp.SameRackCount+1, func(node Node) error {
if option.DataNode != "" && node.IsDataNode() && node.Id() != NodeId(option.DataNode) {
return fmt.Errorf("Not matching preferred data node:%s", option.DataNode)
}
@@ -171,7 +190,7 @@ func (vg *VolumeGrowth) findEmptySlotsForOneVolume(topo *Topology, option *Volum
servers = append(servers, server.(*DataNode))
}
for _, rack := range otherRacks {
- r := rand.Intn(rack.FreeSpace())
+ r := rand.Int63n(rack.FreeSpace())
if server, e := rack.ReserveOneVolume(r); e == nil {
servers = append(servers, server)
} else {
@@ -179,7 +198,7 @@ func (vg *VolumeGrowth) findEmptySlotsForOneVolume(topo *Topology, option *Volum
}
}
for _, datacenter := range otherDataCenters {
- r := rand.Intn(datacenter.FreeSpace())
+ r := rand.Int63n(datacenter.FreeSpace())
if server, e := datacenter.ReserveOneVolume(r); e == nil {
servers = append(servers, server)
} else {
@@ -189,16 +208,16 @@ func (vg *VolumeGrowth) findEmptySlotsForOneVolume(topo *Topology, option *Volum
return
}
-func (vg *VolumeGrowth) grow(topo *Topology, vid storage.VolumeId, option *VolumeGrowOption, servers ...*DataNode) error {
+func (vg *VolumeGrowth) grow(grpcDialOption grpc.DialOption, topo *Topology, vid needle.VolumeId, option *VolumeGrowOption, servers ...*DataNode) error {
for _, server := range servers {
- if err := AllocateVolume(server, vid, option); err == nil {
+ if err := AllocateVolume(server, grpcDialOption, vid, option); err == nil {
vi := storage.VolumeInfo{
Id: vid,
Size: 0,
Collection: option.Collection,
ReplicaPlacement: option.ReplicaPlacement,
Ttl: option.Ttl,
- Version: storage.CurrentVersion,
+ Version: needle.CurrentVersion,
}
server.AddOrUpdateVolume(vi)
topo.RegisterVolumeLayout(vi, server)
diff --git a/weed/topology/volume_growth_test.go b/weed/topology/volume_growth_test.go
index f983df1ec..bc9083fd2 100644
--- a/weed/topology/volume_growth_test.go
+++ b/weed/topology/volume_growth_test.go
@@ -7,6 +7,8 @@ import (
"github.com/chrislusf/seaweedfs/weed/sequence"
"github.com/chrislusf/seaweedfs/weed/storage"
+ "github.com/chrislusf/seaweedfs/weed/storage/needle"
+ "github.com/chrislusf/seaweedfs/weed/storage/super_block"
)
var topologyLayout = `
@@ -79,7 +81,7 @@ func setup(topologyLayout string) *Topology {
fmt.Println("data:", data)
//need to connect all nodes first before server adding volumes
- topo := NewTopology("weedfs", sequence.NewMemorySequencer(), 32*1024, 5)
+ topo := NewTopology("weedfs", sequence.NewMemorySequencer(), 32*1024, 5, false)
mTopology := data.(map[string]interface{})
for dcKey, dcValue := range mTopology {
dc := NewDataCenter(dcKey)
@@ -96,12 +98,12 @@ func setup(topologyLayout string) *Topology {
for _, v := range serverMap["volumes"].([]interface{}) {
m := v.(map[string]interface{})
vi := storage.VolumeInfo{
- Id: storage.VolumeId(int64(m["id"].(float64))),
+ Id: needle.VolumeId(int64(m["id"].(float64))),
Size: uint64(m["size"].(float64)),
- Version: storage.CurrentVersion}
+ Version: needle.CurrentVersion}
server.AddOrUpdateVolume(vi)
}
- server.UpAdjustMaxVolumeCountDelta(int(serverMap["limit"].(float64)))
+ server.UpAdjustMaxVolumeCountDelta(int64(serverMap["limit"].(float64)))
}
}
}
@@ -112,7 +114,7 @@ func setup(topologyLayout string) *Topology {
func TestFindEmptySlotsForOneVolume(t *testing.T) {
topo := setup(topologyLayout)
vg := NewDefaultVolumeGrowth()
- rp, _ := storage.NewReplicaPlacementFromString("002")
+ rp, _ := super_block.NewReplicaPlacementFromString("002")
volumeGrowOption := &VolumeGrowOption{
Collection: "",
ReplicaPlacement: rp,
@@ -129,3 +131,212 @@ func TestFindEmptySlotsForOneVolume(t *testing.T) {
fmt.Println("assigned node :", server.Id())
}
}
+
+var topologyLayout2 = `
+{
+ "dc1":{
+ "rack1":{
+ "server111":{
+ "volumes":[
+ {"id":1, "size":12312},
+ {"id":2, "size":12312},
+ {"id":3, "size":12312}
+ ],
+ "limit":300
+ },
+ "server112":{
+ "volumes":[
+ {"id":4, "size":12312},
+ {"id":5, "size":12312},
+ {"id":6, "size":12312}
+ ],
+ "limit":300
+ },
+ "server113":{
+ "volumes":[],
+ "limit":300
+ },
+ "server114":{
+ "volumes":[],
+ "limit":300
+ },
+ "server115":{
+ "volumes":[],
+ "limit":300
+ },
+ "server116":{
+ "volumes":[],
+ "limit":300
+ }
+ },
+ "rack2":{
+ "server121":{
+ "volumes":[
+ {"id":4, "size":12312},
+ {"id":5, "size":12312},
+ {"id":6, "size":12312}
+ ],
+ "limit":300
+ },
+ "server122":{
+ "volumes":[],
+ "limit":300
+ },
+ "server123":{
+ "volumes":[
+ {"id":2, "size":12312},
+ {"id":3, "size":12312},
+ {"id":4, "size":12312}
+ ],
+ "limit":300
+ },
+ "server124":{
+ "volumes":[],
+ "limit":300
+ },
+ "server125":{
+ "volumes":[],
+ "limit":300
+ },
+ "server126":{
+ "volumes":[],
+ "limit":300
+ }
+ },
+ "rack3":{
+ "server131":{
+ "volumes":[],
+ "limit":300
+ },
+ "server132":{
+ "volumes":[],
+ "limit":300
+ },
+ "server133":{
+ "volumes":[],
+ "limit":300
+ },
+ "server134":{
+ "volumes":[],
+ "limit":300
+ },
+ "server135":{
+ "volumes":[],
+ "limit":300
+ },
+ "server136":{
+ "volumes":[],
+ "limit":300
+ }
+ }
+ }
+}
+`
+
+func TestReplication011(t *testing.T) {
+ topo := setup(topologyLayout2)
+ vg := NewDefaultVolumeGrowth()
+ rp, _ := super_block.NewReplicaPlacementFromString("011")
+ volumeGrowOption := &VolumeGrowOption{
+ Collection: "MAIL",
+ ReplicaPlacement: rp,
+ DataCenter: "dc1",
+ Rack: "",
+ DataNode: "",
+ }
+ servers, err := vg.findEmptySlotsForOneVolume(topo, volumeGrowOption)
+ if err != nil {
+ fmt.Println("finding empty slots error :", err)
+ t.Fail()
+ }
+ for _, server := range servers {
+ fmt.Println("assigned node :", server.Id())
+ }
+}
+
+var topologyLayout3 = `
+{
+ "dc1":{
+ "rack1":{
+ "server111":{
+ "volumes":[],
+ "limit":2000
+ }
+ }
+ },
+ "dc2":{
+ "rack2":{
+ "server222":{
+ "volumes":[],
+ "limit":2000
+ }
+ }
+ },
+ "dc3":{
+ "rack3":{
+ "server333":{
+ "volumes":[],
+ "limit":1000
+ }
+ }
+ },
+ "dc4":{
+ "rack4":{
+ "server444":{
+ "volumes":[],
+ "limit":1000
+ }
+ }
+ },
+ "dc5":{
+ "rack5":{
+ "server555":{
+ "volumes":[],
+ "limit":500
+ }
+ }
+ },
+ "dc6":{
+ "rack6":{
+ "server666":{
+ "volumes":[],
+ "limit":500
+ }
+ }
+ }
+}
+`
+
+func TestFindEmptySlotsForOneVolumeScheduleByWeight(t *testing.T) {
+ topo := setup(topologyLayout3)
+ vg := NewDefaultVolumeGrowth()
+ rp, _ := super_block.NewReplicaPlacementFromString("100")
+ volumeGrowOption := &VolumeGrowOption{
+ Collection: "Weight",
+ ReplicaPlacement: rp,
+ DataCenter: "",
+ Rack: "",
+ DataNode: "",
+ }
+
+ distribution := map[NodeId]int{}
+ // assign 1000 volumes
+ for i := 0; i < 1000; i++ {
+ servers, err := vg.findEmptySlotsForOneVolume(topo, volumeGrowOption)
+ if err != nil {
+ fmt.Println("finding empty slots error :", err)
+ t.Fail()
+ }
+ for _, server := range servers {
+ // fmt.Println("assigned node :", server.Id())
+ if _, ok := distribution[server.id]; !ok {
+ distribution[server.id] = 0
+ }
+ distribution[server.id] += 1
+ }
+ }
+
+ for k, v := range distribution {
+ fmt.Printf("%s : %d\n", k, v)
+ }
+}
diff --git a/weed/topology/volume_layout.go b/weed/topology/volume_layout.go
index 71a071e2f..9e84fd2da 100644
--- a/weed/topology/volume_layout.go
+++ b/weed/topology/volume_layout.go
@@ -9,17 +9,20 @@ import (
"github.com/chrislusf/seaweedfs/weed/glog"
"github.com/chrislusf/seaweedfs/weed/storage"
+ "github.com/chrislusf/seaweedfs/weed/storage/needle"
+ "github.com/chrislusf/seaweedfs/weed/storage/super_block"
)
// mapping from volume to its locations, inverted from server to volume
type VolumeLayout struct {
- rp *storage.ReplicaPlacement
- ttl *storage.TTL
- vid2location map[storage.VolumeId]*VolumeLocationList
- writables []storage.VolumeId // transient array of writable volume id
- readonlyVolumes map[storage.VolumeId]bool // transient set of readonly volumes
- oversizedVolumes map[storage.VolumeId]bool // set of oversized volumes
+ rp *super_block.ReplicaPlacement
+ ttl *needle.TTL
+ vid2location map[needle.VolumeId]*VolumeLocationList
+ writables []needle.VolumeId // transient array of writable volume id
+ readonlyVolumes map[needle.VolumeId]bool // transient set of readonly volumes
+ oversizedVolumes map[needle.VolumeId]bool // set of oversized volumes
volumeSizeLimit uint64
+ replicationAsMin bool
accessLock sync.RWMutex
}
@@ -29,15 +32,16 @@ type VolumeLayoutStats struct {
FileCount uint64
}
-func NewVolumeLayout(rp *storage.ReplicaPlacement, ttl *storage.TTL, volumeSizeLimit uint64) *VolumeLayout {
+func NewVolumeLayout(rp *super_block.ReplicaPlacement, ttl *needle.TTL, volumeSizeLimit uint64, replicationAsMin bool) *VolumeLayout {
return &VolumeLayout{
rp: rp,
ttl: ttl,
- vid2location: make(map[storage.VolumeId]*VolumeLocationList),
- writables: *new([]storage.VolumeId),
- readonlyVolumes: make(map[storage.VolumeId]bool),
- oversizedVolumes: make(map[storage.VolumeId]bool),
+ vid2location: make(map[needle.VolumeId]*VolumeLocationList),
+ writables: *new([]needle.VolumeId),
+ readonlyVolumes: make(map[needle.VolumeId]bool),
+ oversizedVolumes: make(map[needle.VolumeId]bool),
volumeSizeLimit: volumeSizeLimit,
+ replicationAsMin: replicationAsMin,
}
}
@@ -49,6 +53,9 @@ func (vl *VolumeLayout) RegisterVolume(v *storage.VolumeInfo, dn *DataNode) {
vl.accessLock.Lock()
defer vl.accessLock.Unlock()
+ defer vl.ensureCorrectWritables(v)
+ defer vl.rememberOversizedVolume(v)
+
if _, ok := vl.vid2location[v.Id]; !ok {
vl.vid2location[v.Id] = NewVolumeLocationList()
}
@@ -57,7 +64,7 @@ func (vl *VolumeLayout) RegisterVolume(v *storage.VolumeInfo, dn *DataNode) {
for _, dn := range vl.vid2location[v.Id].list {
if vInfo, err := dn.GetVolumesById(v.Id); err == nil {
if vInfo.ReadOnly {
- glog.V(3).Infof("vid %d removed from writable", v.Id)
+ glog.V(1).Infof("vid %d removed from writable", v.Id)
vl.removeFromWritable(v.Id)
vl.readonlyVolumes[v.Id] = true
return
@@ -65,23 +72,16 @@ func (vl *VolumeLayout) RegisterVolume(v *storage.VolumeInfo, dn *DataNode) {
delete(vl.readonlyVolumes, v.Id)
}
} else {
- glog.V(3).Infof("vid %d removed from writable", v.Id)
+ glog.V(1).Infof("vid %d removed from writable", v.Id)
vl.removeFromWritable(v.Id)
delete(vl.readonlyVolumes, v.Id)
return
}
}
- if vl.vid2location[v.Id].Length() == vl.rp.GetCopyCount() && vl.isWritable(v) {
- if _, ok := vl.oversizedVolumes[v.Id]; !ok {
- vl.addToWritable(v.Id)
- }
- } else {
- vl.rememberOversizedVolumne(v)
- vl.removeFromWritable(v.Id)
- }
+
}
-func (vl *VolumeLayout) rememberOversizedVolumne(v *storage.VolumeInfo) {
+func (vl *VolumeLayout) rememberOversizedVolume(v *storage.VolumeInfo) {
if vl.isOversized(v) {
vl.oversizedVolumes[v.Id] = true
}
@@ -91,17 +91,31 @@ func (vl *VolumeLayout) UnRegisterVolume(v *storage.VolumeInfo, dn *DataNode) {
vl.accessLock.Lock()
defer vl.accessLock.Unlock()
- vl.removeFromWritable(v.Id)
- delete(vl.vid2location, v.Id)
+ // remove from vid2location map
+ location, ok := vl.vid2location[v.Id]
+ if !ok {
+ return
+ }
+
+ if location.Remove(dn) {
+
+ vl.ensureCorrectWritables(v)
+
+ if location.Length() == 0 {
+ delete(vl.vid2location, v.Id)
+ }
+
+ }
}
-func (vl *VolumeLayout) addToWritable(vid storage.VolumeId) {
- for _, id := range vl.writables {
- if vid == id {
- return
+func (vl *VolumeLayout) ensureCorrectWritables(v *storage.VolumeInfo) {
+ if vl.enoughCopies(v.Id) && vl.isWritable(v) {
+ if _, ok := vl.oversizedVolumes[v.Id]; !ok {
+ vl.setVolumeWritable(v.Id)
}
+ } else {
+ vl.removeFromWritable(v.Id)
}
- vl.writables = append(vl.writables, vid)
}
func (vl *VolumeLayout) isOversized(v *storage.VolumeInfo) bool {
@@ -110,7 +124,7 @@ func (vl *VolumeLayout) isOversized(v *storage.VolumeInfo) bool {
func (vl *VolumeLayout) isWritable(v *storage.VolumeInfo) bool {
return !vl.isOversized(v) &&
- v.Version == storage.CurrentVersion &&
+ v.Version == needle.CurrentVersion &&
!v.ReadOnly
}
@@ -121,7 +135,7 @@ func (vl *VolumeLayout) isEmpty() bool {
return len(vl.vid2location) == 0
}
-func (vl *VolumeLayout) Lookup(vid storage.VolumeId) []*DataNode {
+func (vl *VolumeLayout) Lookup(vid needle.VolumeId) []*DataNode {
vl.accessLock.RLock()
defer vl.accessLock.RUnlock()
@@ -141,7 +155,7 @@ func (vl *VolumeLayout) ListVolumeServers() (nodes []*DataNode) {
return
}
-func (vl *VolumeLayout) PickForWrite(count uint64, option *VolumeGrowOption) (*storage.VolumeId, uint64, *VolumeLocationList, error) {
+func (vl *VolumeLayout) PickForWrite(count uint64, option *VolumeGrowOption) (*needle.VolumeId, uint64, *VolumeLocationList, error) {
vl.accessLock.RLock()
defer vl.accessLock.RUnlock()
@@ -158,7 +172,7 @@ func (vl *VolumeLayout) PickForWrite(count uint64, option *VolumeGrowOption) (*s
}
return nil, 0, nil, errors.New("Strangely vid " + vid.String() + " is on no machine!")
}
- var vid storage.VolumeId
+ var vid needle.VolumeId
var locationList *VolumeLocationList
counter := 0
for _, v := range vl.writables {
@@ -205,7 +219,7 @@ func (vl *VolumeLayout) GetActiveVolumeCount(option *VolumeGrowOption) int {
return counter
}
-func (vl *VolumeLayout) removeFromWritable(vid storage.VolumeId) bool {
+func (vl *VolumeLayout) removeFromWritable(vid needle.VolumeId) bool {
toDeleteIndex := -1
for k, id := range vl.writables {
if id == vid {
@@ -220,7 +234,7 @@ func (vl *VolumeLayout) removeFromWritable(vid storage.VolumeId) bool {
}
return false
}
-func (vl *VolumeLayout) setVolumeWritable(vid storage.VolumeId) bool {
+func (vl *VolumeLayout) setVolumeWritable(vid needle.VolumeId) bool {
for _, v := range vl.writables {
if v == vid {
return false
@@ -231,7 +245,7 @@ func (vl *VolumeLayout) setVolumeWritable(vid storage.VolumeId) bool {
return true
}
-func (vl *VolumeLayout) SetVolumeUnavailable(dn *DataNode, vid storage.VolumeId) bool {
+func (vl *VolumeLayout) SetVolumeUnavailable(dn *DataNode, vid needle.VolumeId) bool {
vl.accessLock.Lock()
defer vl.accessLock.Unlock()
@@ -245,18 +259,34 @@ func (vl *VolumeLayout) SetVolumeUnavailable(dn *DataNode, vid storage.VolumeId)
}
return false
}
-func (vl *VolumeLayout) SetVolumeAvailable(dn *DataNode, vid storage.VolumeId) bool {
+func (vl *VolumeLayout) SetVolumeAvailable(dn *DataNode, vid needle.VolumeId, isReadOnly bool) bool {
vl.accessLock.Lock()
defer vl.accessLock.Unlock()
+ vInfo, err := dn.GetVolumesById(vid)
+ if err != nil {
+ return false
+ }
+
vl.vid2location[vid].Set(dn)
- if vl.vid2location[vid].Length() >= vl.rp.GetCopyCount() {
+
+ if vInfo.ReadOnly || isReadOnly {
+ return false
+ }
+
+ if vl.enoughCopies(vid) {
return vl.setVolumeWritable(vid)
}
return false
}
-func (vl *VolumeLayout) SetVolumeCapacityFull(vid storage.VolumeId) bool {
+func (vl *VolumeLayout) enoughCopies(vid needle.VolumeId) bool {
+ locations := vl.vid2location[vid].Length()
+ desired := vl.rp.GetCopyCount()
+ return locations == desired || (vl.replicationAsMin && locations > desired)
+}
+
+func (vl *VolumeLayout) SetVolumeCapacityFull(vid needle.VolumeId) bool {
vl.accessLock.Lock()
defer vl.accessLock.Unlock()
diff --git a/weed/topology/volume_location_list.go b/weed/topology/volume_location_list.go
index 8d5881333..8905c54b5 100644
--- a/weed/topology/volume_location_list.go
+++ b/weed/topology/volume_location_list.go
@@ -3,7 +3,7 @@ package topology
import (
"fmt"
- "github.com/chrislusf/seaweedfs/weed/storage"
+ "github.com/chrislusf/seaweedfs/weed/storage/needle"
)
type VolumeLocationList struct {
@@ -66,7 +66,7 @@ func (dnll *VolumeLocationList) Refresh(freshThreshHold int64) {
}
}
-func (dnll *VolumeLocationList) Stats(vid storage.VolumeId, freshThreshHold int64) (size uint64, fileCount int) {
+func (dnll *VolumeLocationList) Stats(vid needle.VolumeId, freshThreshHold int64) (size uint64, fileCount int) {
for _, dnl := range dnll.list {
if dnl.LastSeen < freshThreshHold {
vinfo, err := dnl.GetVolumesById(vid)
diff --git a/weed/util/bounded_tree/bounded_tree.go b/weed/util/bounded_tree/bounded_tree.go
new file mode 100644
index 000000000..40b9c4e47
--- /dev/null
+++ b/weed/util/bounded_tree/bounded_tree.go
@@ -0,0 +1,166 @@
+package bounded_tree
+
+import (
+ "sync"
+
+ "github.com/chrislusf/seaweedfs/weed/glog"
+ "github.com/chrislusf/seaweedfs/weed/util"
+)
+
+type Node struct {
+ Parent *Node
+ Name string
+ Children map[string]*Node
+}
+
+type BoundedTree struct {
+ root *Node
+ sync.Mutex
+}
+
+func NewBoundedTree() *BoundedTree {
+ return &BoundedTree{
+ root: &Node{
+ Name: "/",
+ },
+ }
+}
+
+type VisitNodeFunc func(path util.FullPath) (childDirectories []string, err error)
+
+// If the path is not visited, call the visitFn for each level of directory
+// No action if the directory has been visited before or does not exist.
+// A leaf node, which has no children, represents a directory not visited.
+// A non-leaf node or a non-existing node represents a directory already visited, or does not need to visit.
+func (t *BoundedTree) EnsureVisited(p util.FullPath, visitFn VisitNodeFunc) {
+ t.Lock()
+ defer t.Unlock()
+
+ if t.root == nil {
+ return
+ }
+ components := p.Split()
+ // fmt.Printf("components %v %d\n", components, len(components))
+ if canDelete := t.ensureVisited(t.root, util.FullPath("/"), components, 0, visitFn); canDelete {
+ t.root = nil
+ }
+}
+
+func (t *BoundedTree) ensureVisited(n *Node, currentPath util.FullPath, components []string, i int, visitFn VisitNodeFunc) (canDeleteNode bool) {
+
+ // println("ensureVisited", currentPath, i)
+
+ if n == nil {
+ // fmt.Printf("%s null\n", currentPath)
+ return
+ }
+
+ if n.isVisited() {
+ // fmt.Printf("%s visited %v\n", currentPath, n.Name)
+ } else {
+ // fmt.Printf("ensure %v\n", currentPath)
+
+ children, err := visitFn(currentPath)
+ if err != nil {
+ glog.V(0).Infof("failed to visit %s: %v", currentPath, err)
+ return
+ }
+
+ if len(children) == 0 {
+ // fmt.Printf(" canDelete %v without children\n", currentPath)
+ return true
+ }
+
+ n.Children = make(map[string]*Node)
+ for _, child := range children {
+ // fmt.Printf(" add child %v %v\n", currentPath, child)
+ n.Children[child] = &Node{
+ Name: child,
+ }
+ }
+ }
+
+ if i >= len(components) {
+ return
+ }
+
+ // fmt.Printf(" check child %v %v\n", currentPath, components[i])
+
+ toVisitNode, found := n.Children[components[i]]
+ if !found {
+ // fmt.Printf(" did not find child %v %v\n", currentPath, components[i])
+ return
+ }
+
+ // fmt.Printf(" ensureVisited %v %v\n", currentPath, toVisitNode.Name)
+
+ if canDelete := t.ensureVisited(toVisitNode, currentPath.Child(components[i]), components, i+1, visitFn); canDelete {
+
+ // fmt.Printf(" delete %v %v\n", currentPath, components[i])
+ delete(n.Children, components[i])
+
+ if len(n.Children) == 0 {
+ // fmt.Printf(" canDelete %v\n", currentPath)
+ return true
+ }
+ }
+
+ return false
+
+}
+
+func (n *Node) isVisited() bool {
+ if n == nil {
+ return true
+ }
+ if len(n.Children) > 0 {
+ return true
+ }
+ return false
+}
+
+func (n *Node) getChild(childName string) *Node {
+ if n == nil {
+ return nil
+ }
+ if len(n.Children) > 0 {
+ return n.Children[childName]
+ }
+ return nil
+}
+
+func (t *BoundedTree) HasVisited(p util.FullPath) bool {
+
+ if t.root == nil {
+ return true
+ }
+
+ components := p.Split()
+ // fmt.Printf("components %v %d\n", components, len(components))
+ return t.hasVisited(t.root, util.FullPath("/"), components, 0)
+}
+
+func (t *BoundedTree) hasVisited(n *Node, currentPath util.FullPath, components []string, i int) bool {
+
+ if n == nil {
+ return true
+ }
+
+ if !n.isVisited() {
+ return false
+ }
+
+ // fmt.Printf(" hasVisited child %v %+v %d\n", currentPath, components, i)
+
+ if i >= len(components) {
+ return true
+ }
+
+ toVisitNode, found := n.Children[components[i]]
+ if !found {
+ return true
+ }
+
+ return t.hasVisited(toVisitNode, currentPath.Child(components[i]), components, i+1)
+
+}
diff --git a/weed/util/bounded_tree/bounded_tree_test.go b/weed/util/bounded_tree/bounded_tree_test.go
new file mode 100644
index 000000000..2328f0497
--- /dev/null
+++ b/weed/util/bounded_tree/bounded_tree_test.go
@@ -0,0 +1,131 @@
+package bounded_tree
+
+import (
+ "fmt"
+ "testing"
+
+ "github.com/stretchr/testify/assert"
+
+ "github.com/chrislusf/seaweedfs/weed/util"
+)
+
+
+var (
+
+ visitFn = func(path util.FullPath) (childDirectories []string, err error) {
+ fmt.Printf(" visit %v ...\n", path)
+ switch path {
+ case "/":
+ return []string{"a", "g", "h"}, nil
+ case "/a":
+ return []string{"b", "f"}, nil
+ case "/a/b":
+ return []string{"c", "e"}, nil
+ case "/a/b/c":
+ return []string{"d"}, nil
+ case "/a/b/c/d":
+ return []string{"i", "j"}, nil
+ case "/a/b/c/d/i":
+ return []string{}, nil
+ case "/a/b/c/d/j":
+ return []string{}, nil
+ case "/a/b/e":
+ return []string{}, nil
+ case "/a/f":
+ return []string{}, nil
+ }
+ return nil, nil
+ }
+
+
+ printMap = func(m map[string]*Node) {
+ for k := range m {
+ println(" >", k)
+ }
+ }
+
+
+)
+
+func TestBoundedTree(t *testing.T) {
+
+ // a/b/c/d/i
+ // a/b/c/d/j
+ // a/b/c/d
+ // a/b/e
+ // a/f
+ // g
+ // h
+
+ tree := NewBoundedTree()
+
+ tree.EnsureVisited(util.FullPath("/a/b/c"), visitFn)
+
+ assert.Equal(t, true, tree.HasVisited(util.FullPath("/a/b")))
+ assert.Equal(t, true, tree.HasVisited(util.FullPath("/a/b/c")))
+ assert.Equal(t, false, tree.HasVisited(util.FullPath("/a/b/c/d")))
+ assert.Equal(t, false, tree.HasVisited(util.FullPath("/a/b/e")))
+ assert.Equal(t, false, tree.HasVisited(util.FullPath("/a/f")))
+ assert.Equal(t, false, tree.HasVisited(util.FullPath("/g")))
+ assert.Equal(t, false, tree.HasVisited(util.FullPath("/h")))
+ assert.Equal(t, true, tree.HasVisited(util.FullPath("/")))
+ assert.Equal(t, true, tree.HasVisited(util.FullPath("/x")))
+ assert.Equal(t, false, tree.HasVisited(util.FullPath("/a/b/e/x")))
+
+ printMap(tree.root.Children)
+
+ a := tree.root.getChild("a")
+
+ b := a.getChild("b")
+ if !b.isVisited() {
+ t.Errorf("expect visited /a/b")
+ }
+ c := b.getChild("c")
+ if !c.isVisited() {
+ t.Errorf("expect visited /a/b/c")
+ }
+
+ d := c.getChild("d")
+ if d.isVisited() {
+ t.Errorf("expect unvisited /a/b/c/d")
+ }
+
+ tree.EnsureVisited(util.FullPath("/a/b/c/d"), visitFn)
+ tree.EnsureVisited(util.FullPath("/a/b/c/d/i"), visitFn)
+ tree.EnsureVisited(util.FullPath("/a/b/c/d/j"), visitFn)
+ tree.EnsureVisited(util.FullPath("/a/b/e"), visitFn)
+ tree.EnsureVisited(util.FullPath("/a/f"), visitFn)
+
+ printMap(tree.root.Children)
+
+}
+
+func TestEmptyBoundedTree(t *testing.T) {
+
+ // g
+ // h
+
+ tree := NewBoundedTree()
+
+ visitFn := func(path util.FullPath) (childDirectories []string, err error) {
+ fmt.Printf(" visit %v ...\n", path)
+ switch path {
+ case "/":
+ return []string{"g", "h"}, nil
+ }
+ t.Fatalf("expected visit %s", path)
+ return nil, nil
+ }
+
+ tree.EnsureVisited(util.FullPath("/a/b"), visitFn)
+
+ tree.EnsureVisited(util.FullPath("/a/b"), visitFn)
+
+ printMap(tree.root.Children)
+
+ assert.Equal(t, true, tree.HasVisited(util.FullPath("/a/b")))
+ assert.Equal(t, true, tree.HasVisited(util.FullPath("/a")))
+ assert.Equal(t, false, tree.HasVisited(util.FullPath("/g")))
+ assert.Equal(t, false, tree.HasVisited(util.FullPath("/g/x")))
+
+}
diff --git a/weed/util/bytes.go b/weed/util/bytes.go
index dfa4ae665..0650919c0 100644
--- a/weed/util/bytes.go
+++ b/weed/util/bytes.go
@@ -1,5 +1,27 @@
package util
+import (
+ "crypto/md5"
+ "fmt"
+ "io"
+)
+
+// BytesToHumanReadable returns the converted human readable representation of the bytes.
+func BytesToHumanReadable(b uint64) string {
+ const unit = 1024
+ if b < unit {
+ return fmt.Sprintf("%d B", b)
+ }
+
+ div, exp := uint64(unit), 0
+ for n := b / unit; n >= unit; n /= unit {
+ div *= unit
+ exp++
+ }
+
+ return fmt.Sprintf("%.2f %ciB", float64(b)/float64(div), "KMGTPE"[exp])
+}
+
// big endian
func BytesToUint64(b []byte) (v uint64) {
@@ -43,3 +65,52 @@ func Uint16toBytes(b []byte, v uint16) {
func Uint8toBytes(b []byte, v uint8) {
b[0] = byte(v)
}
+
+// returns a 64 bit big int
+func HashStringToLong(dir string) (v int64) {
+ h := md5.New()
+ io.WriteString(h, dir)
+
+ b := h.Sum(nil)
+
+ v += int64(b[0])
+ v <<= 8
+ v += int64(b[1])
+ v <<= 8
+ v += int64(b[2])
+ v <<= 8
+ v += int64(b[3])
+ v <<= 8
+ v += int64(b[4])
+ v <<= 8
+ v += int64(b[5])
+ v <<= 8
+ v += int64(b[6])
+ v <<= 8
+ v += int64(b[7])
+
+ return
+}
+
+func HashToInt32(data []byte) (v int32) {
+ h := md5.New()
+ h.Write(data)
+
+ b := h.Sum(nil)
+
+ v += int32(b[0])
+ v <<= 8
+ v += int32(b[1])
+ v <<= 8
+ v += int32(b[2])
+ v <<= 8
+ v += int32(b[3])
+
+ return
+}
+
+func Md5(data []byte) string {
+ hash := md5.New()
+ hash.Write(data)
+ return fmt.Sprintf("%x", hash.Sum(nil))
+}
diff --git a/weed/util/chunk_cache/chunk_cache.go b/weed/util/chunk_cache/chunk_cache.go
new file mode 100644
index 000000000..17b64fb6c
--- /dev/null
+++ b/weed/util/chunk_cache/chunk_cache.go
@@ -0,0 +1,128 @@
+package chunk_cache
+
+import (
+ "sync"
+
+ "github.com/chrislusf/seaweedfs/weed/glog"
+ "github.com/chrislusf/seaweedfs/weed/storage/needle"
+)
+
+const (
+ memCacheSizeLimit = 1024 * 1024
+ onDiskCacheSizeLimit0 = memCacheSizeLimit
+ onDiskCacheSizeLimit1 = 4 * memCacheSizeLimit
+)
+
+// a global cache for recently accessed file chunks
+type ChunkCache struct {
+ memCache *ChunkCacheInMemory
+ diskCaches []*OnDiskCacheLayer
+ sync.RWMutex
+}
+
+func NewChunkCache(maxEntries int64, dir string, diskSizeMB int64) *ChunkCache {
+
+ c := &ChunkCache{
+ memCache: NewChunkCacheInMemory(maxEntries),
+ }
+ c.diskCaches = make([]*OnDiskCacheLayer, 3)
+ c.diskCaches[0] = NewOnDiskCacheLayer(dir, "c0_1", diskSizeMB/4, 4)
+ c.diskCaches[1] = NewOnDiskCacheLayer(dir, "c1_4", diskSizeMB/4, 4)
+ c.diskCaches[2] = NewOnDiskCacheLayer(dir, "cache", diskSizeMB/2, 4)
+
+ return c
+}
+
+func (c *ChunkCache) GetChunk(fileId string, chunkSize uint64) (data []byte) {
+ if c == nil {
+ return
+ }
+
+ c.RLock()
+ defer c.RUnlock()
+
+ return c.doGetChunk(fileId, chunkSize)
+}
+
+func (c *ChunkCache) doGetChunk(fileId string, chunkSize uint64) (data []byte) {
+
+ if chunkSize < memCacheSizeLimit {
+ data = c.memCache.GetChunk(fileId)
+ if len(data) >= int(chunkSize) {
+ return data
+ }
+ }
+
+ fid, err := needle.ParseFileIdFromString(fileId)
+ if err != nil {
+ glog.Errorf("failed to parse file id %s", fileId)
+ return nil
+ }
+
+ if chunkSize < onDiskCacheSizeLimit0 {
+ data = c.diskCaches[0].getChunk(fid.Key)
+ if len(data) >= int(chunkSize) {
+ return data
+ }
+ }
+ if chunkSize < onDiskCacheSizeLimit1 {
+ data = c.diskCaches[1].getChunk(fid.Key)
+ if len(data) >= int(chunkSize) {
+ return data
+ }
+ }
+ {
+ data = c.diskCaches[2].getChunk(fid.Key)
+ if len(data) >= int(chunkSize) {
+ return data
+ }
+ }
+
+ return nil
+
+}
+
+func (c *ChunkCache) SetChunk(fileId string, data []byte) {
+ if c == nil {
+ return
+ }
+ c.Lock()
+ defer c.Unlock()
+
+ glog.V(4).Infof("SetChunk %s size %d\n", fileId, len(data))
+
+ c.doSetChunk(fileId, data)
+}
+
+func (c *ChunkCache) doSetChunk(fileId string, data []byte) {
+
+ if len(data) < memCacheSizeLimit {
+ c.memCache.SetChunk(fileId, data)
+ }
+
+ fid, err := needle.ParseFileIdFromString(fileId)
+ if err != nil {
+ glog.Errorf("failed to parse file id %s", fileId)
+ return
+ }
+
+ if len(data) < onDiskCacheSizeLimit0 {
+ c.diskCaches[0].setChunk(fid.Key, data)
+ } else if len(data) < onDiskCacheSizeLimit1 {
+ c.diskCaches[1].setChunk(fid.Key, data)
+ } else {
+ c.diskCaches[2].setChunk(fid.Key, data)
+ }
+
+}
+
+func (c *ChunkCache) Shutdown() {
+ if c == nil {
+ return
+ }
+ c.Lock()
+ defer c.Unlock()
+ for _, diskCache := range c.diskCaches {
+ diskCache.shutdown()
+ }
+}
diff --git a/weed/util/chunk_cache/chunk_cache_in_memory.go b/weed/util/chunk_cache/chunk_cache_in_memory.go
new file mode 100644
index 000000000..931e45e9a
--- /dev/null
+++ b/weed/util/chunk_cache/chunk_cache_in_memory.go
@@ -0,0 +1,36 @@
+package chunk_cache
+
+import (
+ "time"
+
+ "github.com/karlseguin/ccache"
+)
+
+// a global cache for recently accessed file chunks
+type ChunkCacheInMemory struct {
+ cache *ccache.Cache
+}
+
+func NewChunkCacheInMemory(maxEntries int64) *ChunkCacheInMemory {
+ pruneCount := maxEntries >> 3
+ if pruneCount <= 0 {
+ pruneCount = 500
+ }
+ return &ChunkCacheInMemory{
+ cache: ccache.New(ccache.Configure().MaxSize(maxEntries).ItemsToPrune(uint32(pruneCount))),
+ }
+}
+
+func (c *ChunkCacheInMemory) GetChunk(fileId string) []byte {
+ item := c.cache.Get(fileId)
+ if item == nil {
+ return nil
+ }
+ data := item.Value().([]byte)
+ item.Extend(time.Hour)
+ return data
+}
+
+func (c *ChunkCacheInMemory) SetChunk(fileId string, data []byte) {
+ c.cache.Set(fileId, data, time.Hour)
+}
diff --git a/weed/util/chunk_cache/chunk_cache_on_disk.go b/weed/util/chunk_cache/chunk_cache_on_disk.go
new file mode 100644
index 000000000..d74f87b0c
--- /dev/null
+++ b/weed/util/chunk_cache/chunk_cache_on_disk.go
@@ -0,0 +1,145 @@
+package chunk_cache
+
+import (
+ "fmt"
+ "os"
+ "time"
+
+ "github.com/syndtr/goleveldb/leveldb/opt"
+
+ "github.com/chrislusf/seaweedfs/weed/glog"
+ "github.com/chrislusf/seaweedfs/weed/storage"
+ "github.com/chrislusf/seaweedfs/weed/storage/backend"
+ "github.com/chrislusf/seaweedfs/weed/storage/types"
+ "github.com/chrislusf/seaweedfs/weed/util"
+)
+
+// This implements an on disk cache
+// The entries are an FIFO with a size limit
+
+type ChunkCacheVolume struct {
+ DataBackend backend.BackendStorageFile
+ nm storage.NeedleMapper
+ fileName string
+ smallBuffer []byte
+ sizeLimit int64
+ lastModTime time.Time
+ fileSize int64
+}
+
+func LoadOrCreateChunkCacheVolume(fileName string, preallocate int64) (*ChunkCacheVolume, error) {
+
+ v := &ChunkCacheVolume{
+ smallBuffer: make([]byte, types.NeedlePaddingSize),
+ fileName: fileName,
+ sizeLimit: preallocate,
+ }
+
+ var err error
+
+ if exists, canRead, canWrite, modTime, fileSize := util.CheckFile(v.fileName + ".dat"); exists {
+ if !canRead {
+ return nil, fmt.Errorf("cannot read cache file %s.dat", v.fileName)
+ }
+ if !canWrite {
+ return nil, fmt.Errorf("cannot write cache file %s.dat", v.fileName)
+ }
+ if dataFile, err := os.OpenFile(v.fileName+".dat", os.O_RDWR|os.O_CREATE, 0644); err != nil {
+ return nil, fmt.Errorf("cannot create cache file %s.dat: %v", v.fileName, err)
+ } else {
+ v.DataBackend = backend.NewDiskFile(dataFile)
+ v.lastModTime = modTime
+ v.fileSize = fileSize
+ }
+ } else {
+ if v.DataBackend, err = backend.CreateVolumeFile(v.fileName+".dat", preallocate, 0); err != nil {
+ return nil, fmt.Errorf("cannot create cache file %s.dat: %v", v.fileName, err)
+ }
+ v.lastModTime = time.Now()
+ }
+
+ var indexFile *os.File
+ if indexFile, err = os.OpenFile(v.fileName+".idx", os.O_RDWR|os.O_CREATE, 0644); err != nil {
+ return nil, fmt.Errorf("cannot write cache index %s.idx: %v", v.fileName, err)
+ }
+
+ glog.V(0).Infoln("loading leveldb", v.fileName+".ldb")
+ opts := &opt.Options{
+ BlockCacheCapacity: 2 * 1024 * 1024, // default value is 8MiB
+ WriteBuffer: 1 * 1024 * 1024, // default value is 4MiB
+ CompactionTableSizeMultiplier: 10, // default value is 1
+ }
+ if v.nm, err = storage.NewLevelDbNeedleMap(v.fileName+".ldb", indexFile, opts); err != nil {
+ return nil, fmt.Errorf("loading leveldb %s error: %v", v.fileName+".ldb", err)
+ }
+
+ return v, nil
+
+}
+
+func (v *ChunkCacheVolume) Shutdown() {
+ if v.DataBackend != nil {
+ v.DataBackend.Close()
+ v.DataBackend = nil
+ }
+ if v.nm != nil {
+ v.nm.Close()
+ v.nm = nil
+ }
+}
+
+func (v *ChunkCacheVolume) destroy() {
+ v.Shutdown()
+ os.Remove(v.fileName + ".dat")
+ os.Remove(v.fileName + ".idx")
+ os.RemoveAll(v.fileName + ".ldb")
+}
+
+func (v *ChunkCacheVolume) Reset() (*ChunkCacheVolume, error) {
+ v.destroy()
+ return LoadOrCreateChunkCacheVolume(v.fileName, v.sizeLimit)
+}
+
+func (v *ChunkCacheVolume) GetNeedle(key types.NeedleId) ([]byte, error) {
+
+ nv, ok := v.nm.Get(key)
+ if !ok {
+ return nil, storage.ErrorNotFound
+ }
+ data := make([]byte, nv.Size)
+ if readSize, readErr := v.DataBackend.ReadAt(data, nv.Offset.ToAcutalOffset()); readErr != nil {
+ return nil, fmt.Errorf("read %s.dat [%d,%d): %v",
+ v.fileName, nv.Offset.ToAcutalOffset(), nv.Offset.ToAcutalOffset()+int64(nv.Size), readErr)
+ } else {
+ if readSize != int(nv.Size) {
+ return nil, fmt.Errorf("read %d, expected %d", readSize, nv.Size)
+ }
+ }
+
+ return data, nil
+}
+
+func (v *ChunkCacheVolume) WriteNeedle(key types.NeedleId, data []byte) error {
+
+ offset := v.fileSize
+
+ written, err := v.DataBackend.WriteAt(data, offset)
+ if err != nil {
+ return err
+ } else if written != len(data) {
+ return fmt.Errorf("partial written %d, expected %d", written, len(data))
+ }
+
+ v.fileSize += int64(written)
+ extraSize := written % types.NeedlePaddingSize
+ if extraSize != 0 {
+ v.DataBackend.WriteAt(v.smallBuffer[:types.NeedlePaddingSize-extraSize], offset+int64(written))
+ v.fileSize += int64(types.NeedlePaddingSize - extraSize)
+ }
+
+ if err := v.nm.Put(key, types.ToOffset(offset), uint32(len(data))); err != nil {
+ return err
+ }
+
+ return nil
+}
diff --git a/weed/util/chunk_cache/chunk_cache_on_disk_test.go b/weed/util/chunk_cache/chunk_cache_on_disk_test.go
new file mode 100644
index 000000000..f061f2ba2
--- /dev/null
+++ b/weed/util/chunk_cache/chunk_cache_on_disk_test.go
@@ -0,0 +1,59 @@
+package chunk_cache
+
+import (
+ "bytes"
+ "fmt"
+ "io/ioutil"
+ "math/rand"
+ "os"
+ "testing"
+)
+
+func TestOnDisk(t *testing.T) {
+
+ tmpDir, _ := ioutil.TempDir("", "c")
+ defer os.RemoveAll(tmpDir)
+
+ totalDiskSizeMb := int64(32)
+
+ cache := NewChunkCache(0, tmpDir, totalDiskSizeMb)
+
+ writeCount := 5
+ type test_data struct {
+ data []byte
+ fileId string
+ size uint64
+ }
+ testData := make([]*test_data, writeCount)
+ for i := 0; i < writeCount; i++ {
+ buff := make([]byte, 1024*1024)
+ rand.Read(buff)
+ testData[i] = &test_data{
+ data: buff,
+ fileId: fmt.Sprintf("1,%daabbccdd", i+1),
+ size: uint64(len(buff)),
+ }
+ cache.SetChunk(testData[i].fileId, testData[i].data)
+ }
+
+ for i := 0; i < writeCount; i++ {
+ data := cache.GetChunk(testData[i].fileId, testData[i].size)
+ if bytes.Compare(data, testData[i].data) != 0 {
+ t.Errorf("failed to write to and read from cache: %d", i)
+ }
+ }
+
+ cache.Shutdown()
+
+ cache = NewChunkCache(0, tmpDir, totalDiskSizeMb)
+
+ for i := 0; i < writeCount; i++ {
+ data := cache.GetChunk(testData[i].fileId, testData[i].size)
+ if bytes.Compare(data, testData[i].data) != 0 {
+ t.Errorf("failed to write to and read from cache: %d", i)
+ }
+ }
+
+ cache.Shutdown()
+
+}
diff --git a/weed/util/chunk_cache/on_disk_cache_layer.go b/weed/util/chunk_cache/on_disk_cache_layer.go
new file mode 100644
index 000000000..c3192b548
--- /dev/null
+++ b/weed/util/chunk_cache/on_disk_cache_layer.go
@@ -0,0 +1,91 @@
+package chunk_cache
+
+import (
+ "fmt"
+ "path"
+ "sort"
+
+ "github.com/chrislusf/seaweedfs/weed/glog"
+ "github.com/chrislusf/seaweedfs/weed/storage"
+ "github.com/chrislusf/seaweedfs/weed/storage/types"
+)
+
+type OnDiskCacheLayer struct {
+ diskCaches []*ChunkCacheVolume
+}
+
+func NewOnDiskCacheLayer(dir, namePrefix string, diskSizeMB int64, segmentCount int) *OnDiskCacheLayer {
+
+ volumeCount, volumeSize := int(diskSizeMB/30000), int64(30000)
+ if volumeCount < segmentCount {
+ volumeCount, volumeSize = segmentCount, diskSizeMB/int64(segmentCount)
+ }
+
+ c := &OnDiskCacheLayer{}
+ for i := 0; i < volumeCount; i++ {
+ fileName := path.Join(dir, fmt.Sprintf("%s_%d", namePrefix, i))
+ diskCache, err := LoadOrCreateChunkCacheVolume(fileName, volumeSize*1024*1024)
+ if err != nil {
+ glog.Errorf("failed to add cache %s : %v", fileName, err)
+ } else {
+ c.diskCaches = append(c.diskCaches, diskCache)
+ }
+ }
+
+ // keep newest cache to the front
+ sort.Slice(c.diskCaches, func(i, j int) bool {
+ return c.diskCaches[i].lastModTime.After(c.diskCaches[j].lastModTime)
+ })
+
+ return c
+}
+
+func (c *OnDiskCacheLayer) setChunk(needleId types.NeedleId, data []byte) {
+
+ if c.diskCaches[0].fileSize+int64(len(data)) > c.diskCaches[0].sizeLimit {
+ t, resetErr := c.diskCaches[len(c.diskCaches)-1].Reset()
+ if resetErr != nil {
+ glog.Errorf("failed to reset cache file %s", c.diskCaches[len(c.diskCaches)-1].fileName)
+ return
+ }
+ for i := len(c.diskCaches) - 1; i > 0; i-- {
+ c.diskCaches[i] = c.diskCaches[i-1]
+ }
+ c.diskCaches[0] = t
+ }
+
+ if err := c.diskCaches[0].WriteNeedle(needleId, data); err != nil {
+ glog.V(0).Infof("cache write %v size %d: %v", needleId, len(data), err)
+ }
+
+}
+
+func (c *OnDiskCacheLayer) getChunk(needleId types.NeedleId) (data []byte) {
+
+ var err error
+
+ for _, diskCache := range c.diskCaches {
+ data, err = diskCache.GetNeedle(needleId)
+ if err == storage.ErrorNotFound {
+ continue
+ }
+ if err != nil {
+ glog.Errorf("failed to read cache file %s id %d", diskCache.fileName, needleId)
+ continue
+ }
+ if len(data) != 0 {
+ return
+ }
+ }
+
+ return nil
+
+}
+
+func (c *OnDiskCacheLayer) shutdown() {
+
+ for _, diskCache := range c.diskCaches {
+ diskCache.Shutdown()
+ }
+
+}
diff --git a/weed/util/cipher.go b/weed/util/cipher.go
new file mode 100644
index 000000000..f044c2ca3
--- /dev/null
+++ b/weed/util/cipher.go
@@ -0,0 +1,60 @@
+package util
+
+import (
+ "crypto/aes"
+ "crypto/cipher"
+ "crypto/rand"
+ "errors"
+ "io"
+
+ "github.com/chrislusf/seaweedfs/weed/glog"
+)
+
+type CipherKey []byte
+
+func GenCipherKey() CipherKey {
+ key := make([]byte, 32)
+ if _, err := io.ReadFull(rand.Reader, key); err != nil {
+ glog.Fatalf("random key gen: %v", err)
+ }
+ return CipherKey(key)
+}
+
+func Encrypt(plaintext []byte, key CipherKey) ([]byte, error) {
+ c, err := aes.NewCipher(key)
+ if err != nil {
+ return nil, err
+ }
+
+ gcm, err := cipher.NewGCM(c)
+ if err != nil {
+ return nil, err
+ }
+
+ nonce := make([]byte, gcm.NonceSize())
+ if _, err = io.ReadFull(rand.Reader, nonce); err != nil {
+ return nil, err
+ }
+
+ return gcm.Seal(nonce, nonce, plaintext, nil), nil
+}
+
+func Decrypt(ciphertext []byte, key CipherKey) ([]byte, error) {
+ c, err := aes.NewCipher(key)
+ if err != nil {
+ return nil, err
+ }
+
+ gcm, err := cipher.NewGCM(c)
+ if err != nil {
+ return nil, err
+ }
+
+ nonceSize := gcm.NonceSize()
+ if len(ciphertext) < nonceSize {
+ return nil, errors.New("ciphertext too short")
+ }
+
+ nonce, ciphertext := ciphertext[:nonceSize], ciphertext[nonceSize:]
+ return gcm.Open(nil, nonce, ciphertext, nil)
+}
diff --git a/weed/util/cipher_test.go b/weed/util/cipher_test.go
new file mode 100644
index 000000000..026c96ea3
--- /dev/null
+++ b/weed/util/cipher_test.go
@@ -0,0 +1,17 @@
+package util
+
+import (
+ "encoding/base64"
+ "testing"
+)
+
+func TestSameAsJavaImplementation(t *testing.T) {
+ str := "QVVhmqg112NMT7F+G/7QPynqSln3xPIhKdFGmTVKZD6IS0noyr2Z5kXFF6fPjZ/7Hq8kRhlmLeeqZUccxyaZHezOdgkjS6d4NTdHf5IjXzk7"
+ cipherText, _ := base64.StdEncoding.DecodeString(str)
+ secretKey := []byte("256-bit key for AES 256 GCM encr")
+ plantext, err := Decrypt(cipherText, CipherKey(secretKey))
+ if err != nil {
+ println(err.Error())
+ }
+ println(string(plantext))
+}
diff --git a/weed/util/compression.go b/weed/util/compression.go
new file mode 100644
index 000000000..b526f47c9
--- /dev/null
+++ b/weed/util/compression.go
@@ -0,0 +1,126 @@
+package util
+
+import (
+ "bytes"
+ "compress/flate"
+ "compress/gzip"
+ "fmt"
+ "io/ioutil"
+ "strings"
+
+ "github.com/chrislusf/seaweedfs/weed/glog"
+ "github.com/klauspost/compress/zstd"
+)
+
+func GzipData(input []byte) ([]byte, error) {
+ buf := new(bytes.Buffer)
+ w, _ := gzip.NewWriterLevel(buf, flate.BestSpeed)
+ if _, err := w.Write(input); err != nil {
+ glog.V(2).Infoln("error compressing data:", err)
+ return nil, err
+ }
+ if err := w.Close(); err != nil {
+ glog.V(2).Infoln("error closing compressed data:", err)
+ return nil, err
+ }
+ return buf.Bytes(), nil
+}
+
+var zstdEncoder, _ = zstd.NewWriter(nil)
+
+func ZstdData(input []byte) ([]byte, error) {
+ return zstdEncoder.EncodeAll(input, nil), nil
+}
+
+func DecompressData(input []byte) ([]byte, error) {
+ if IsGzippedContent(input) {
+ return ungzipData(input)
+ }
+ if IsZstdContent(input) {
+ return unzstdData(input)
+ }
+ return nil, fmt.Errorf("unsupported compression")
+}
+
+func ungzipData(input []byte) ([]byte, error) {
+ buf := bytes.NewBuffer(input)
+ r, _ := gzip.NewReader(buf)
+ defer r.Close()
+ output, err := ioutil.ReadAll(r)
+ if err != nil {
+ glog.V(2).Infoln("error uncompressing data:", err)
+ }
+ return output, err
+}
+
+var decoder, _ = zstd.NewReader(nil)
+func unzstdData(input []byte) ([]byte, error) {
+ return decoder.DecodeAll(input, nil)
+}
+
+func IsGzippedContent(data []byte) bool {
+ if len(data) < 2 {
+ return false
+ }
+ return data[0] == 31 && data[1] == 139
+}
+
+func IsZstdContent(data []byte) bool {
+ if len(data) < 4 {
+ return false
+ }
+ return data[3] == 0xFD && data[2] == 0x2F && data[1] == 0xB5 && data[0] == 0x28
+}
+
+/*
+* Default not to compressed since compression can be done on client side.
+ */func IsCompressableFileType(ext, mtype string) (shouldBeCompressed, iAmSure bool) {
+
+ // text
+ if strings.HasPrefix(mtype, "text/") {
+ return true, true
+ }
+
+ // images
+ switch ext {
+ case ".svg", ".bmp", ".wav":
+ return true, true
+ }
+ if strings.HasPrefix(mtype, "image/") {
+ return false, true
+ }
+
+ // by file name extension
+ switch ext {
+ case ".zip", ".rar", ".gz", ".bz2", ".xz", ".zst":
+ return false, true
+ case ".pdf", ".txt", ".html", ".htm", ".css", ".js", ".json":
+ return true, true
+ case ".php", ".java", ".go", ".rb", ".c", ".cpp", ".h", ".hpp":
+ return true, true
+ case ".png", ".jpg", ".jpeg":
+ return false, true
+ }
+
+ // by mime type
+ if strings.HasPrefix(mtype, "application/") {
+ if strings.HasSuffix(mtype, "zstd") {
+ return false, true
+ }
+ if strings.HasSuffix(mtype, "xml") {
+ return true, true
+ }
+ if strings.HasSuffix(mtype, "script") {
+ return true, true
+ }
+ }
+
+ if strings.HasPrefix(mtype, "audio/") {
+ switch strings.TrimPrefix(mtype, "audio/") {
+ case "wave", "wav", "x-wav", "x-pn-wav":
+ return true, true
+ }
+ }
+
+ return false, false
+}
diff --git a/weed/util/compression_test.go b/weed/util/compression_test.go
new file mode 100644
index 000000000..b515e8988
--- /dev/null
+++ b/weed/util/compression_test.go
@@ -0,0 +1,21 @@
+package util
+
+import (
+ "testing"
+
+ "golang.org/x/tools/godoc/util"
+)
+
+func TestIsGzippable(t *testing.T) {
+ buf := make([]byte, 1024)
+
+ isText := util.IsText(buf)
+
+ if isText {
+ t.Error("buf with zeros are not text")
+ }
+
+ compressed, _ := GzipData(buf)
+
+ t.Logf("compressed size %d\n", len(compressed))
+}
diff --git a/weed/util/config.go b/weed/util/config.go
index 77cab3019..7b6e92f08 100644
--- a/weed/util/config.go
+++ b/weed/util/config.go
@@ -1,10 +1,51 @@
package util
+import (
+ "strings"
+
+ "github.com/spf13/viper"
+
+ "github.com/chrislusf/seaweedfs/weed/glog"
+)
+
type Configuration interface {
GetString(key string) string
GetBool(key string) bool
GetInt(key string) int
- GetInt64(key string) int64
- GetFloat64(key string) float64
GetStringSlice(key string) []string
+ SetDefault(key string, value interface{})
+}
+
+func LoadConfiguration(configFileName string, required bool) (loaded bool) {
+
+ // find a filer store
+ viper.SetConfigName(configFileName) // name of config file (without extension)
+ viper.AddConfigPath(".") // optionally look for config in the working directory
+ viper.AddConfigPath("$HOME/.seaweedfs") // call multiple times to add many search paths
+ viper.AddConfigPath("/etc/seaweedfs/") // path to look for the config file in
+
+ glog.V(1).Infof("Reading %s.toml from %s", configFileName, viper.ConfigFileUsed())
+
+ if err := viper.MergeInConfig(); err != nil { // Handle errors reading the config file
+ glog.V(1).Infof("Reading %s: %v", viper.ConfigFileUsed(), err)
+ if required {
+ glog.Fatalf("Failed to load %s.toml file from current directory, or $HOME/.seaweedfs/, or /etc/seaweedfs/"+
+ "\n\nPlease use this command to generate the default %s.toml file\n"+
+ " weed scaffold -config=%s -output=.\n\n\n",
+ configFileName, configFileName, configFileName)
+ } else {
+ return false
+ }
+ }
+
+ return true
+}
+
+func GetViper() *viper.Viper {
+ v := &viper.Viper{}
+ *v = *viper.GetViper()
+ v.AutomaticEnv()
+ v.SetEnvPrefix("weed")
+ v.SetEnvKeyReplacer(strings.NewReplacer(".", "_"))
+ return v
}
diff --git a/weed/util/constants.go b/weed/util/constants.go
index 9ddf07261..3433c550b 100644
--- a/weed/util/constants.go
+++ b/weed/util/constants.go
@@ -1,5 +1,14 @@
package util
-const (
- VERSION = "1.23"
+import (
+ "fmt"
)
+
+var (
+ VERSION = fmt.Sprintf("%s %d.%d", sizeLimit, 1, 84)
+ COMMIT = ""
+)
+
+func Version() string {
+ return VERSION + " " + COMMIT
+}
diff --git a/weed/util/constants_4bytes.go b/weed/util/constants_4bytes.go
new file mode 100644
index 000000000..a29d9d3b0
--- /dev/null
+++ b/weed/util/constants_4bytes.go
@@ -0,0 +1,8 @@
+// +build !5BytesOffset
+
+package util
+
+const (
+ sizeLimit = "30GB"
+ VolumeSizeLimitGB = 30
+)
diff --git a/weed/util/constants_5bytes.go b/weed/util/constants_5bytes.go
new file mode 100644
index 000000000..91ce4066f
--- /dev/null
+++ b/weed/util/constants_5bytes.go
@@ -0,0 +1,8 @@
+// +build 5BytesOffset
+
+package util
+
+const (
+ sizeLimit = "8000GB"
+ VolumeSizeLimitGB = 8000
+)
diff --git a/weed/util/file_util.go b/weed/util/file_util.go
index 8ff2978ba..ff725830b 100644
--- a/weed/util/file_util.go
+++ b/weed/util/file_util.go
@@ -3,6 +3,7 @@ package util
import (
"errors"
"os"
+ "time"
"github.com/chrislusf/seaweedfs/weed/glog"
)
@@ -30,3 +31,35 @@ func GetFileSize(file *os.File) (size int64, err error) {
}
return
}
+
+func FileExists(filename string) bool {
+
+ _, err := os.Stat(filename)
+ if os.IsNotExist(err) {
+ return false
+ }
+ return true
+
+}
+
+func CheckFile(filename string) (exists, canRead, canWrite bool, modTime time.Time, fileSize int64) {
+ exists = true
+ fi, err := os.Stat(filename)
+ if os.IsNotExist(err) {
+ exists = false
+ return
+ }
+ if err != nil {
+ glog.Errorf("check %s: %v", filename, err)
+ return
+ }
+ if fi.Mode()&0400 != 0 {
+ canRead = true
+ }
+ if fi.Mode()&0200 != 0 {
+ canWrite = true
+ }
+ modTime = fi.ModTime()
+ fileSize = fi.Size()
+ return
+}
diff --git a/weed/util/file_util_non_posix.go b/weed/util/file_util_non_posix.go
new file mode 100644
index 000000000..ffcfef6d5
--- /dev/null
+++ b/weed/util/file_util_non_posix.go
@@ -0,0 +1,12 @@
+// +build linux darwin freebsd netbsd openbsd plan9 solaris zos
+
+package util
+
+import (
+ "os"
+ "syscall"
+)
+
+func GetFileUidGid(fi os.FileInfo) (uid, gid uint32) {
+ return fi.Sys().(*syscall.Stat_t).Uid, fi.Sys().(*syscall.Stat_t).Gid
+}
diff --git a/weed/util/file_util_posix.go b/weed/util/file_util_posix.go
new file mode 100644
index 000000000..22ca60b3b
--- /dev/null
+++ b/weed/util/file_util_posix.go
@@ -0,0 +1,11 @@
+// +build windows
+
+package util
+
+import (
+ "os"
+)
+
+func GetFileUidGid(fi os.FileInfo) (uid, gid uint32) {
+ return 0, 0
+}
diff --git a/weed/util/fullpath.go b/weed/util/fullpath.go
new file mode 100644
index 000000000..4ce8a2f90
--- /dev/null
+++ b/weed/util/fullpath.go
@@ -0,0 +1,56 @@
+package util
+
+import (
+ "path/filepath"
+ "strings"
+)
+
+type FullPath string
+
+func NewFullPath(dir, name string) FullPath {
+ return FullPath(dir).Child(name)
+}
+
+func (fp FullPath) DirAndName() (string, string) {
+ dir, name := filepath.Split(string(fp))
+ if dir == "/" {
+ return dir, name
+ }
+ if len(dir) < 1 {
+ return "/", ""
+ }
+ return dir[:len(dir)-1], name
+}
+
+func (fp FullPath) Name() string {
+ _, name := filepath.Split(string(fp))
+ return name
+}
+
+func (fp FullPath) Child(name string) FullPath {
+ dir := string(fp)
+ if strings.HasSuffix(dir, "/") {
+ return FullPath(dir + name)
+ }
+ return FullPath(dir + "/" + name)
+}
+
+func (fp FullPath) AsInode() uint64 {
+ return uint64(HashStringToLong(string(fp)))
+}
+
+// split, but skipping the root
+func (fp FullPath) Split() []string {
+ if fp == "" || fp == "/" {
+ return []string{}
+ }
+ return strings.Split(string(fp)[1:], "/")
+}
+
+func Join(names ...string) string {
+ return filepath.ToSlash(filepath.Join(names...))
+}
+
+func JoinPath(names ...string) FullPath {
+ return FullPath(Join(names...))
+}
diff --git a/weed/util/pprof.go b/weed/util/grace/pprof.go
similarity index 97%
rename from weed/util/pprof.go
rename to weed/util/grace/pprof.go
index a2621ceee..14686bfc8 100644
--- a/weed/util/pprof.go
+++ b/weed/util/grace/pprof.go
@@ -1,4 +1,4 @@
-package util
+package grace
import (
"os"
diff --git a/weed/util/signal_handling.go b/weed/util/grace/signal_handling.go
similarity index 98%
rename from weed/util/signal_handling.go
rename to weed/util/grace/signal_handling.go
index 99447e8be..7cca46764 100644
--- a/weed/util/signal_handling.go
+++ b/weed/util/grace/signal_handling.go
@@ -1,6 +1,6 @@
// +build !plan9
-package util
+package grace
import (
"os"
diff --git a/weed/util/signal_handling_notsupported.go b/weed/util/grace/signal_handling_notsupported.go
similarity index 78%
rename from weed/util/signal_handling_notsupported.go
rename to weed/util/grace/signal_handling_notsupported.go
index c389cfb7e..5335915a1 100644
--- a/weed/util/signal_handling_notsupported.go
+++ b/weed/util/grace/signal_handling_notsupported.go
@@ -1,6 +1,6 @@
// +build plan9
-package util
+package grace
func OnInterrupt(fn func()) {
}
diff --git a/weed/util/grpc_client_server.go b/weed/util/grpc_client_server.go
deleted file mode 100644
index d029d21ae..000000000
--- a/weed/util/grpc_client_server.go
+++ /dev/null
@@ -1,87 +0,0 @@
-package util
-
-import (
- "fmt"
- "strconv"
- "strings"
- "sync"
- "time"
-
- "google.golang.org/grpc"
- "google.golang.org/grpc/keepalive"
-)
-
-var (
- // cache grpc connections
- grpcClients = make(map[string]*grpc.ClientConn)
- grpcClientsLock sync.Mutex
-)
-
-func NewGrpcServer() *grpc.Server {
- return grpc.NewServer(grpc.KeepaliveParams(keepalive.ServerParameters{
- Time: 10 * time.Second, // wait time before ping if no activity
- Timeout: 20 * time.Second, // ping timeout
- }), grpc.KeepaliveEnforcementPolicy(keepalive.EnforcementPolicy{
- MinTime: 60 * time.Second, // min time a client should wait before sending a ping
- }))
-}
-
-func GrpcDial(address string, opts ...grpc.DialOption) (*grpc.ClientConn, error) {
- // opts = append(opts, grpc.WithBlock())
- // opts = append(opts, grpc.WithTimeout(time.Duration(5*time.Second)))
- opts = append(opts, grpc.WithInsecure())
- opts = append(opts, grpc.WithKeepaliveParams(keepalive.ClientParameters{
- Time: 30 * time.Second, // client ping server if no activity for this long
- Timeout: 20 * time.Second,
- }))
-
- return grpc.Dial(address, opts...)
-}
-
-func WithCachedGrpcClient(fn func(*grpc.ClientConn) error, address string, opts ...grpc.DialOption) error {
-
- grpcClientsLock.Lock()
-
- existingConnection, found := grpcClients[address]
- if found {
- grpcClientsLock.Unlock()
- return fn(existingConnection)
- }
-
- grpcConnection, err := GrpcDial(address, opts...)
- if err != nil {
- grpcClientsLock.Unlock()
- return fmt.Errorf("fail to dial %s: %v", address, err)
- }
-
- grpcClients[address] = grpcConnection
- grpcClientsLock.Unlock()
-
- err = fn(grpcConnection)
- if err != nil {
- grpcClientsLock.Lock()
- delete(grpcClients, address)
- grpcClientsLock.Unlock()
- }
-
- return err
-}
-
-func ParseServerToGrpcAddress(server string, optionalGrpcPort int) (serverGrpcAddress string, err error) {
- hostnameAndPort := strings.Split(server, ":")
- if len(hostnameAndPort) != 2 {
- return "", fmt.Errorf("The server should have hostname:port format: %v", hostnameAndPort)
- }
-
- filerPort, parseErr := strconv.ParseUint(hostnameAndPort[1], 10, 64)
- if parseErr != nil {
- return "", fmt.Errorf("The server port parse error: %v", parseErr)
- }
-
- filerGrpcPort := int(filerPort) + 10000
- if optionalGrpcPort != 0 {
- filerGrpcPort = optionalGrpcPort
- }
-
- return fmt.Sprintf("%s:%d", hostnameAndPort[0], filerGrpcPort), nil
-}
diff --git a/weed/util/http_util.go b/weed/util/http_util.go
index 21e0a678d..c67eb3276 100644
--- a/weed/util/http_util.go
+++ b/weed/util/http_util.go
@@ -11,9 +11,8 @@ import (
"net/http"
"net/url"
"strings"
- "time"
- "github.com/chrislusf/seaweedfs/weed/security"
+ "github.com/chrislusf/seaweedfs/weed/glog"
)
var (
@@ -27,23 +26,22 @@ func init() {
}
client = &http.Client{
Transport: Transport,
- Timeout: 5 * time.Second,
}
}
func PostBytes(url string, body []byte) ([]byte, error) {
- r, err := client.Post(url, "application/octet-stream", bytes.NewReader(body))
+ r, err := client.Post(url, "", bytes.NewReader(body))
if err != nil {
return nil, fmt.Errorf("Post to %s: %v", url, err)
}
defer r.Body.Close()
- if r.StatusCode >= 400 {
- return nil, fmt.Errorf("%s: %s", url, r.Status)
- }
b, err := ioutil.ReadAll(r.Body)
if err != nil {
return nil, fmt.Errorf("Read response body: %v", err)
}
+ if r.StatusCode >= 400 {
+ return nil, fmt.Errorf("%s: %s", url, r.Status)
+ }
return b, nil
}
@@ -90,14 +88,14 @@ func Head(url string) (http.Header, error) {
if err != nil {
return nil, err
}
- defer r.Body.Close()
+ defer CloseResponse(r)
if r.StatusCode >= 400 {
return nil, fmt.Errorf("%s: %s", url, r.Status)
}
return r.Header, nil
}
-func Delete(url string, jwt security.EncodedJwt) error {
+func Delete(url string, jwt string) error {
req, err := http.NewRequest("DELETE", url, nil)
if jwt != "" {
req.Header.Set("Authorization", "BEARER "+string(jwt))
@@ -119,7 +117,7 @@ func Delete(url string, jwt security.EncodedJwt) error {
return nil
}
m := make(map[string]interface{})
- if e := json.Unmarshal(body, m); e == nil {
+ if e := json.Unmarshal(body, &m); e == nil {
if s, ok := m["error"].(string); ok {
return errors.New(s)
}
@@ -132,7 +130,7 @@ func GetBufferStream(url string, values url.Values, allocatedBytes []byte, eachB
if err != nil {
return err
}
- defer r.Body.Close()
+ defer CloseResponse(r)
if r.StatusCode != 200 {
return fmt.Errorf("%s: %s", url, r.Status)
}
@@ -155,7 +153,7 @@ func GetUrlStream(url string, values url.Values, readFn func(io.Reader) error) e
if err != nil {
return err
}
- defer r.Body.Close()
+ defer CloseResponse(r)
if r.StatusCode != 200 {
return fmt.Errorf("%s: %s", url, r.Status)
}
@@ -191,11 +189,22 @@ func NormalizeUrl(url string) string {
return "http://" + url
}
-func ReadUrl(fileUrl string, offset int64, size int, buf []byte, isReadRange bool) (n int64, e error) {
+func ReadUrl(fileUrl string, cipherKey []byte, isContentCompressed bool, isFullChunk bool, offset int64, size int, buf []byte) (int64, error) {
+
+ if cipherKey != nil {
+ var n int
+ err := readEncryptedUrl(fileUrl, cipherKey, isContentCompressed, isFullChunk, offset, size, func(data []byte) {
+ n = copy(buf, data)
+ })
+ return int64(n), err
+ }
- req, _ := http.NewRequest("GET", fileUrl, nil)
- if isReadRange {
- req.Header.Add("Range", fmt.Sprintf("bytes=%d-%d", offset, offset+int64(size)))
+ req, err := http.NewRequest("GET", fileUrl, nil)
+ if err != nil {
+ return 0, err
+ }
+ if !isFullChunk {
+ req.Header.Add("Range", fmt.Sprintf("bytes=%d-%d", offset, offset+int64(size)-1))
} else {
req.Header.Set("Accept-Encoding", "gzip")
}
@@ -211,7 +220,8 @@ func ReadUrl(fileUrl string, offset int64, size int, buf []byte, isReadRange boo
}
var reader io.ReadCloser
- switch r.Header.Get("Content-Encoding") {
+ contentEncoding := r.Header.Get("Content-Encoding")
+ switch contentEncoding {
case "gzip":
reader, err = gzip.NewReader(r.Body)
defer reader.Close()
@@ -219,55 +229,125 @@ func ReadUrl(fileUrl string, offset int64, size int, buf []byte, isReadRange boo
reader = r.Body
}
- var i, m int
+ var (
+ i, m int
+ n int64
+ )
+ // refers to https://github.com/golang/go/blob/master/src/bytes/buffer.go#L199
+ // commit id c170b14c2c1cfb2fd853a37add92a82fd6eb4318
for {
m, err = reader.Read(buf[i:])
- if m == 0 {
- return
- }
i += m
n += int64(m)
if err == io.EOF {
return n, nil
}
- if e != nil {
- return n, e
+ if err != nil {
+ return n, err
+ }
+ if n == int64(len(buf)) {
+ break
}
}
-
+ // drains the response body to avoid memory leak
+ data, _ := ioutil.ReadAll(reader)
+ if len(data) != 0 {
+ glog.V(1).Infof("%s reader has remaining %d bytes", contentEncoding, len(data))
+ }
+ return n, err
}
-func ReadUrlAsStream(fileUrl string, offset int64, size int, fn func(data []byte)) (n int64, e error) {
+func ReadUrlAsStream(fileUrl string, cipherKey []byte, isContentGzipped bool, isFullChunk bool, offset int64, size int, fn func(data []byte)) error {
+
+ if cipherKey != nil {
+ return readEncryptedUrl(fileUrl, cipherKey, isContentGzipped, isFullChunk, offset, size, fn)
+ }
+
+ req, err := http.NewRequest("GET", fileUrl, nil)
+ if err != nil {
+ return err
+ }
- req, _ := http.NewRequest("GET", fileUrl, nil)
- req.Header.Add("Range", fmt.Sprintf("bytes=%d-%d", offset, offset+int64(size)))
+ if !isFullChunk {
+ req.Header.Add("Range", fmt.Sprintf("bytes=%d-%d", offset, offset+int64(size)-1))
+ }
r, err := client.Do(req)
if err != nil {
- return 0, err
+ return err
}
- defer r.Body.Close()
+ defer CloseResponse(r)
if r.StatusCode >= 400 {
- return 0, fmt.Errorf("%s: %s", fileUrl, r.Status)
+ return fmt.Errorf("%s: %s", fileUrl, r.Status)
}
- var m int
+ var (
+ m int
+ )
buf := make([]byte, 64*1024)
for {
m, err = r.Body.Read(buf)
- if m == 0 {
- return
- }
fn(buf[:m])
- n += int64(m)
if err == io.EOF {
- return n, nil
+ return nil
+ }
+ if err != nil {
+ return err
}
- if e != nil {
- return n, e
+ }
+
+}
+
+func readEncryptedUrl(fileUrl string, cipherKey []byte, isContentCompressed bool, isFullChunk bool, offset int64, size int, fn func(data []byte)) error {
+ encryptedData, err := Get(fileUrl)
+ if err != nil {
+ return fmt.Errorf("fetch %s: %v", fileUrl, err)
+ }
+ decryptedData, err := Decrypt(encryptedData, CipherKey(cipherKey))
+ if err != nil {
+ return fmt.Errorf("decrypt %s: %v", fileUrl, err)
+ }
+ if isContentCompressed {
+ decryptedData, err = DecompressData(decryptedData)
+ if err != nil {
+ return fmt.Errorf("unzip decrypt %s: %v", fileUrl, err)
}
}
+ if len(decryptedData) < int(offset)+size {
+ return fmt.Errorf("read decrypted %s size %d [%d, %d)", fileUrl, len(decryptedData), offset, int(offset)+size)
+ }
+ if isFullChunk {
+ fn(decryptedData)
+ } else {
+ fn(decryptedData[int(offset) : int(offset)+size])
+ }
+ return nil
+}
+
+func ReadUrlAsReaderCloser(fileUrl string, rangeHeader string) (io.ReadCloser, error) {
+
+ req, err := http.NewRequest("GET", fileUrl, nil)
+ if err != nil {
+ return nil, err
+ }
+ if rangeHeader != "" {
+ req.Header.Add("Range", rangeHeader)
+ }
+
+ r, err := client.Do(req)
+ if err != nil {
+ return nil, err
+ }
+ if r.StatusCode >= 400 {
+ return nil, fmt.Errorf("%s: %s", fileUrl, r.Status)
+ }
+
+ return r.Body, nil
+}
+func CloseResponse(resp *http.Response) {
+ io.Copy(ioutil.Discard, resp.Body)
+ resp.Body.Close()
}
diff --git a/weed/util/httpdown/http_down.go b/weed/util/httpdown/http_down.go
new file mode 100644
index 000000000..5cbd9611c
--- /dev/null
+++ b/weed/util/httpdown/http_down.go
@@ -0,0 +1,395 @@
+// Package httpdown provides http.ConnState enabled graceful termination of
+// http.Server.
+// based on github.com/facebookarchive/httpdown, who's licence is MIT-licence,
+// we add a feature of supporting for http TLS
+package httpdown
+
+import (
+ "crypto/tls"
+ "fmt"
+ "net"
+ "net/http"
+ "os"
+ "os/signal"
+ "sync"
+ "syscall"
+ "time"
+
+ "github.com/facebookgo/clock"
+ "github.com/facebookgo/stats"
+)
+
+const (
+ defaultStopTimeout = time.Minute
+ defaultKillTimeout = time.Minute
+)
+
+// A Server allows encapsulates the process of accepting new connections and
+// serving them, and gracefully shutting down the listener without dropping
+// active connections.
+type Server interface {
+ // Wait waits for the serving loop to finish. This will happen when Stop is
+ // called, at which point it returns no error, or if there is an error in the
+ // serving loop. You must call Wait after calling Serve or ListenAndServe.
+ Wait() error
+
+ // Stop stops the listener. It will block until all connections have been
+ // closed.
+ Stop() error
+}
+
+// HTTP defines the configuration for serving a http.Server. Multiple calls to
+// Serve or ListenAndServe can be made on the same HTTP instance. The default
+// timeouts of 1 minute each result in a maximum of 2 minutes before a Stop()
+// returns.
+type HTTP struct {
+ // StopTimeout is the duration before we begin force closing connections.
+ // Defaults to 1 minute.
+ StopTimeout time.Duration
+
+ // KillTimeout is the duration before which we completely give up and abort
+ // even though we still have connected clients. This is useful when a large
+ // number of client connections exist and closing them can take a long time.
+ // Note, this is in addition to the StopTimeout. Defaults to 1 minute.
+ KillTimeout time.Duration
+
+ // Stats is optional. If provided, it will be used to record various metrics.
+ Stats stats.Client
+
+ // Clock allows for testing timing related functionality. Do not specify this
+ // in production code.
+ Clock clock.Clock
+
+ // when set CertFile and KeyFile, the httpDown will start a http with TLS.
+ // Files containing a certificate and matching private key for the
+ // server must be provided if neither the Server's
+ // TLSConfig.Certificates nor TLSConfig.GetCertificate are populated.
+ // If the certificate is signed by a certificate authority, the
+ // certFile should be the concatenation of the server's certificate,
+ // any intermediates, and the CA's certificate.
+ CertFile, KeyFile string
+}
+
+// Serve provides the low-level API which is useful if you're creating your own
+// net.Listener.
+func (h HTTP) Serve(s *http.Server, l net.Listener) Server {
+ stopTimeout := h.StopTimeout
+ if stopTimeout == 0 {
+ stopTimeout = defaultStopTimeout
+ }
+ killTimeout := h.KillTimeout
+ if killTimeout == 0 {
+ killTimeout = defaultKillTimeout
+ }
+ klock := h.Clock
+ if klock == nil {
+ klock = clock.New()
+ }
+
+ ss := &server{
+ stopTimeout: stopTimeout,
+ killTimeout: killTimeout,
+ stats: h.Stats,
+ clock: klock,
+ oldConnState: s.ConnState,
+ listener: l,
+ server: s,
+ serveDone: make(chan struct{}),
+ serveErr: make(chan error, 1),
+ new: make(chan net.Conn),
+ active: make(chan net.Conn),
+ idle: make(chan net.Conn),
+ closed: make(chan net.Conn),
+ stop: make(chan chan struct{}),
+ kill: make(chan chan struct{}),
+ certFile: h.CertFile,
+ keyFile: h.KeyFile,
+ }
+ s.ConnState = ss.connState
+ go ss.manage()
+ go ss.serve()
+ return ss
+}
+
+// ListenAndServe returns a Server for the given http.Server. It is equivalent
+// to ListenAndServe from the standard library, but returns immediately.
+// Requests will be accepted in a background goroutine. If the http.Server has
+// a non-nil TLSConfig, a TLS enabled listener will be setup.
+func (h HTTP) ListenAndServe(s *http.Server) (Server, error) {
+ addr := s.Addr
+ if addr == "" {
+ if s.TLSConfig == nil {
+ addr = ":http"
+ } else {
+ addr = ":https"
+ }
+ }
+ l, err := net.Listen("tcp", addr)
+ if err != nil {
+ stats.BumpSum(h.Stats, "listen.error", 1)
+ return nil, err
+ }
+ if s.TLSConfig != nil {
+ l = tls.NewListener(l, s.TLSConfig)
+ }
+ return h.Serve(s, l), nil
+}
+
+// server manages the serving process and allows for gracefully stopping it.
+type server struct {
+ stopTimeout time.Duration
+ killTimeout time.Duration
+ stats stats.Client
+ clock clock.Clock
+
+ oldConnState func(net.Conn, http.ConnState)
+ server *http.Server
+ serveDone chan struct{}
+ serveErr chan error
+ listener net.Listener
+
+ new chan net.Conn
+ active chan net.Conn
+ idle chan net.Conn
+ closed chan net.Conn
+ stop chan chan struct{}
+ kill chan chan struct{}
+
+ stopOnce sync.Once
+ stopErr error
+
+ certFile, keyFile string
+}
+
+func (s *server) connState(c net.Conn, cs http.ConnState) {
+ if s.oldConnState != nil {
+ s.oldConnState(c, cs)
+ }
+
+ switch cs {
+ case http.StateNew:
+ s.new <- c
+ case http.StateActive:
+ s.active <- c
+ case http.StateIdle:
+ s.idle <- c
+ case http.StateHijacked, http.StateClosed:
+ s.closed <- c
+ }
+}
+
+func (s *server) manage() {
+ defer func() {
+ close(s.new)
+ close(s.active)
+ close(s.idle)
+ close(s.closed)
+ close(s.stop)
+ close(s.kill)
+ }()
+
+ var stopDone chan struct{}
+
+ conns := map[net.Conn]http.ConnState{}
+ var countNew, countActive, countIdle float64
+
+ // decConn decrements the count associated with the current state of the
+ // given connection.
+ decConn := func(c net.Conn) {
+ switch conns[c] {
+ default:
+ panic(fmt.Errorf("unknown existing connection: %s", c))
+ case http.StateNew:
+ countNew--
+ case http.StateActive:
+ countActive--
+ case http.StateIdle:
+ countIdle--
+ }
+ }
+
+ // setup a ticker to report various values every minute. if we don't have a
+ // Stats implementation provided, we Stop it so it never ticks.
+ statsTicker := s.clock.Ticker(time.Minute)
+ if s.stats == nil {
+ statsTicker.Stop()
+ }
+
+ for {
+ select {
+ case <-statsTicker.C:
+ // we'll only get here when s.stats is not nil
+ s.stats.BumpAvg("http-state.new", countNew)
+ s.stats.BumpAvg("http-state.active", countActive)
+ s.stats.BumpAvg("http-state.idle", countIdle)
+ s.stats.BumpAvg("http-state.total", countNew+countActive+countIdle)
+ case c := <-s.new:
+ conns[c] = http.StateNew
+ countNew++
+ case c := <-s.active:
+ decConn(c)
+ countActive++
+
+ conns[c] = http.StateActive
+ case c := <-s.idle:
+ decConn(c)
+ countIdle++
+
+ conns[c] = http.StateIdle
+
+ // if we're already stopping, close it
+ if stopDone != nil {
+ c.Close()
+ }
+ case c := <-s.closed:
+ stats.BumpSum(s.stats, "conn.closed", 1)
+ decConn(c)
+ delete(conns, c)
+
+ // if we're waiting to stop and are all empty, we just closed the last
+ // connection and we're done.
+ if stopDone != nil && len(conns) == 0 {
+ close(stopDone)
+ return
+ }
+ case stopDone = <-s.stop:
+ // if we're already all empty, we're already done
+ if len(conns) == 0 {
+ close(stopDone)
+ return
+ }
+
+ // close current idle connections right away
+ for c, cs := range conns {
+ if cs == http.StateIdle {
+ c.Close()
+ }
+ }
+
+ // continue the loop and wait for all the ConnState updates which will
+ // eventually close(stopDone) and return from this goroutine.
+
+ case killDone := <-s.kill:
+ // force close all connections
+ stats.BumpSum(s.stats, "kill.conn.count", float64(len(conns)))
+ for c := range conns {
+ c.Close()
+ }
+
+ // don't block the kill.
+ close(killDone)
+
+ // continue the loop and we wait for all the ConnState updates and will
+ // return from this goroutine when we're all done. otherwise we'll try to
+ // send those ConnState updates on closed channels.
+
+ }
+ }
+}
+
+func (s *server) serve() {
+ stats.BumpSum(s.stats, "serve", 1)
+ if s.certFile == "" && s.keyFile == "" {
+ s.serveErr <- s.server.Serve(s.listener)
+ } else {
+ s.serveErr <- s.server.ServeTLS(s.listener, s.certFile, s.keyFile)
+ }
+ close(s.serveDone)
+ close(s.serveErr)
+}
+
+func (s *server) Wait() error {
+ if err := <-s.serveErr; !isUseOfClosedError(err) {
+ return err
+ }
+ return nil
+}
+
+func (s *server) Stop() error {
+ s.stopOnce.Do(func() {
+ defer stats.BumpTime(s.stats, "stop.time").End()
+ stats.BumpSum(s.stats, "stop", 1)
+
+ // first disable keep-alive for new connections
+ s.server.SetKeepAlivesEnabled(false)
+
+ // then close the listener so new connections can't connect come thru
+ closeErr := s.listener.Close()
+ <-s.serveDone
+
+ // then trigger the background goroutine to stop and wait for it
+ stopDone := make(chan struct{})
+ s.stop <- stopDone
+
+ // wait for stop
+ select {
+ case <-stopDone:
+ case <-s.clock.After(s.stopTimeout):
+ defer stats.BumpTime(s.stats, "kill.time").End()
+ stats.BumpSum(s.stats, "kill", 1)
+
+ // stop timed out, wait for kill
+ killDone := make(chan struct{})
+ s.kill <- killDone
+ select {
+ case <-killDone:
+ case <-s.clock.After(s.killTimeout):
+ // kill timed out, give up
+ stats.BumpSum(s.stats, "kill.timeout", 1)
+ }
+ }
+
+ if closeErr != nil && !isUseOfClosedError(closeErr) {
+ stats.BumpSum(s.stats, "listener.close.error", 1)
+ s.stopErr = closeErr
+ }
+ })
+ return s.stopErr
+}
+
+func isUseOfClosedError(err error) bool {
+ if err == nil {
+ return false
+ }
+ if opErr, ok := err.(*net.OpError); ok {
+ err = opErr.Err
+ }
+ return err.Error() == "use of closed network connection"
+}
+
+// ListenAndServe is a convenience function to serve and wait for a SIGTERM
+// or SIGINT before shutting down.
+func ListenAndServe(s *http.Server, hd *HTTP) error {
+ if hd == nil {
+ hd = &HTTP{}
+ }
+ hs, err := hd.ListenAndServe(s)
+ if err != nil {
+ return err
+ }
+
+ waiterr := make(chan error, 1)
+ go func() {
+ defer close(waiterr)
+ waiterr <- hs.Wait()
+ }()
+
+ signals := make(chan os.Signal, 10)
+ signal.Notify(signals, syscall.SIGTERM, syscall.SIGINT)
+
+ select {
+ case err := <-waiterr:
+ if err != nil {
+ return err
+ }
+ case <-signals:
+ signal.Stop(signals)
+ if err := hs.Stop(); err != nil {
+ return err
+ }
+ if err := <-waiterr; err != nil {
+ return err
+ }
+ }
+ return nil
+}
diff --git a/weed/util/inits.go b/weed/util/inits.go
new file mode 100644
index 000000000..378878012
--- /dev/null
+++ b/weed/util/inits.go
@@ -0,0 +1,52 @@
+package util
+
+import (
+ "fmt"
+ "sort"
+)
+
+// HumanReadableIntsMax joins a serials of inits into a smart one like 1-3 5 ... for human readable.
+func HumanReadableIntsMax(max int, ids ...int) string {
+ if len(ids) <= max {
+ return HumanReadableInts(ids...)
+ }
+
+ return HumanReadableInts(ids[:max]...) + " ..."
+}
+
+// HumanReadableInts joins a serials of inits into a smart one like 1-3 5 7-10 for human readable.
+func HumanReadableInts(ids ...int) string {
+ sort.Ints(ids)
+
+ s := ""
+ start := 0
+ last := 0
+
+ for i, v := range ids {
+ if i == 0 {
+ start = v
+ last = v
+ s = fmt.Sprintf("%d", v)
+ continue
+ }
+
+ if last+1 == v {
+ last = v
+ continue
+ }
+
+ if last > start {
+ s += fmt.Sprintf("-%d", last)
+ }
+
+ s += fmt.Sprintf(" %d", v)
+ start = v
+ last = v
+ }
+
+ if last != start {
+ s += fmt.Sprintf("-%d", last)
+ }
+
+ return s
+}
diff --git a/weed/util/inits_test.go b/weed/util/inits_test.go
new file mode 100644
index 000000000..f2c9b701f
--- /dev/null
+++ b/weed/util/inits_test.go
@@ -0,0 +1,19 @@
+package util
+
+import (
+ "github.com/stretchr/testify/assert"
+ "testing"
+)
+
+func TestHumanReadableIntsMax(t *testing.T) {
+ assert.Equal(t, "1-2 ...", HumanReadableIntsMax(2, 1, 2, 3))
+ assert.Equal(t, "1 3 ...", HumanReadableIntsMax(2, 1, 3, 5))
+}
+
+func TestHumanReadableInts(t *testing.T) {
+ assert.Equal(t, "1-3", HumanReadableInts(1, 2, 3))
+ assert.Equal(t, "1 3", HumanReadableInts(1, 3))
+ assert.Equal(t, "1 3 5", HumanReadableInts(5, 1, 3))
+ assert.Equal(t, "1-3 5", HumanReadableInts(1, 2, 3, 5))
+ assert.Equal(t, "1-3 5 7-9", HumanReadableInts(7, 9, 8, 1, 2, 3, 5))
+}
diff --git a/weed/util/log_buffer/log_buffer.go b/weed/util/log_buffer/log_buffer.go
new file mode 100644
index 000000000..b02c45b52
--- /dev/null
+++ b/weed/util/log_buffer/log_buffer.go
@@ -0,0 +1,278 @@
+package log_buffer
+
+import (
+ "bytes"
+ "sync"
+ "time"
+
+ "github.com/golang/protobuf/proto"
+
+ "github.com/chrislusf/seaweedfs/weed/glog"
+ "github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
+ "github.com/chrislusf/seaweedfs/weed/util"
+)
+
+const BufferSize = 4 * 1024 * 1024
+const PreviousBufferCount = 3
+
+type dataToFlush struct {
+ startTime time.Time
+ stopTime time.Time
+ data *bytes.Buffer
+}
+
+type LogBuffer struct {
+ prevBuffers *SealedBuffers
+ buf []byte
+ idx []int
+ pos int
+ startTime time.Time
+ stopTime time.Time
+ sizeBuf []byte
+ flushInterval time.Duration
+ flushFn func(startTime, stopTime time.Time, buf []byte)
+ notifyFn func()
+ isStopping bool
+ flushChan chan *dataToFlush
+ lastTsNs int64
+ sync.RWMutex
+}
+
+func NewLogBuffer(flushInterval time.Duration, flushFn func(startTime, stopTime time.Time, buf []byte), notifyFn func()) *LogBuffer {
+ lb := &LogBuffer{
+ prevBuffers: newSealedBuffers(PreviousBufferCount),
+ buf: make([]byte, BufferSize),
+ sizeBuf: make([]byte, 4),
+ flushInterval: flushInterval,
+ flushFn: flushFn,
+ notifyFn: notifyFn,
+ flushChan: make(chan *dataToFlush, 256),
+ }
+ go lb.loopFlush()
+ go lb.loopInterval()
+ return lb
+}
+
+func (m *LogBuffer) AddToBuffer(partitionKey, data []byte) {
+
+ m.Lock()
+ defer func() {
+ m.Unlock()
+ if m.notifyFn != nil {
+ m.notifyFn()
+ }
+ }()
+
+ // need to put the timestamp inside the lock
+ ts := time.Now()
+ tsNs := ts.UnixNano()
+ if m.lastTsNs >= tsNs {
+ // this is unlikely to happen, but just in case
+ tsNs = m.lastTsNs + 1
+ ts = time.Unix(0, tsNs)
+ }
+ m.lastTsNs = tsNs
+ logEntry := &filer_pb.LogEntry{
+ TsNs: tsNs,
+ PartitionKeyHash: util.HashToInt32(partitionKey),
+ Data: data,
+ }
+
+ logEntryData, _ := proto.Marshal(logEntry)
+
+ size := len(logEntryData)
+
+ if m.pos == 0 {
+ m.startTime = ts
+ }
+
+ if m.startTime.Add(m.flushInterval).Before(ts) || len(m.buf)-m.pos < size+4 {
+ m.flushChan <- m.copyToFlush()
+ m.startTime = ts
+ if len(m.buf) < size+4 {
+ m.buf = make([]byte, 2*size+4)
+ }
+ }
+ m.stopTime = ts
+
+ m.idx = append(m.idx, m.pos)
+ util.Uint32toBytes(m.sizeBuf, uint32(size))
+ copy(m.buf[m.pos:m.pos+4], m.sizeBuf)
+ copy(m.buf[m.pos+4:m.pos+4+size], logEntryData)
+ m.pos += size + 4
+
+ // fmt.Printf("entry size %d total %d count %d, buffer:%p\n", size, m.pos, len(m.idx), m)
+
+}
+
+func (m *LogBuffer) Shutdown() {
+ m.Lock()
+ defer m.Unlock()
+
+ if m.isStopping {
+ return
+ }
+ m.isStopping = true
+ toFlush := m.copyToFlush()
+ m.flushChan <- toFlush
+ close(m.flushChan)
+}
+
+func (m *LogBuffer) loopFlush() {
+ for d := range m.flushChan {
+ if d != nil {
+ // fmt.Printf("flush [%v, %v] size %d\n", d.startTime, d.stopTime, len(d.data.Bytes()))
+ m.flushFn(d.startTime, d.stopTime, d.data.Bytes())
+ d.releaseMemory()
+ }
+ }
+}
+
+func (m *LogBuffer) loopInterval() {
+ for !m.isStopping {
+ time.Sleep(m.flushInterval)
+ m.Lock()
+ if m.isStopping {
+ m.Unlock()
+ return
+ }
+ // println("loop interval")
+ toFlush := m.copyToFlush()
+ m.flushChan <- toFlush
+ m.Unlock()
+ }
+}
+
+func (m *LogBuffer) copyToFlush() *dataToFlush {
+
+ if m.flushFn != nil && m.pos > 0 {
+ // fmt.Printf("flush buffer %d pos %d empty space %d\n", len(m.buf), m.pos, len(m.buf)-m.pos)
+ d := &dataToFlush{
+ startTime: m.startTime,
+ stopTime: m.stopTime,
+ data: copiedBytes(m.buf[:m.pos]),
+ }
+ // fmt.Printf("flusing [0,%d) with %d entries\n", m.pos, len(m.idx))
+ m.buf = m.prevBuffers.SealBuffer(m.startTime, m.stopTime, m.buf, m.pos)
+ m.pos = 0
+ m.idx = m.idx[:0]
+ return d
+ }
+ return nil
+}
+
+func (d *dataToFlush) releaseMemory() {
+ d.data.Reset()
+ bufferPool.Put(d.data)
+}
+
+func (m *LogBuffer) ReadFromBuffer(lastReadTime time.Time) (bufferCopy *bytes.Buffer) {
+ m.RLock()
+ defer m.RUnlock()
+
+ /*
+ fmt.Printf("read buffer %p: %v last stop time: [%v,%v], pos %d, entries:%d, prevBufs:%d\n", m, lastReadTime, m.startTime, m.stopTime, m.pos, len(m.idx), len(m.prevBuffers.buffers))
+ for i, prevBuf := range m.prevBuffers.buffers {
+ fmt.Printf(" prev %d : %s\n", i, prevBuf.String())
+ }
+ */
+
+ if lastReadTime.Equal(m.stopTime) {
+ return nil
+ }
+ if lastReadTime.After(m.stopTime) {
+ // glog.Fatalf("unexpected last read time %v, older than latest %v", lastReadTime, m.stopTime)
+ return nil
+ }
+ if lastReadTime.Before(m.startTime) {
+ // println("checking ", lastReadTime.UnixNano())
+ for i, buf := range m.prevBuffers.buffers {
+ if buf.startTime.After(lastReadTime) {
+ if i == 0 {
+ // println("return the earliest in memory", buf.startTime.UnixNano())
+ return copiedBytes(buf.buf[:buf.size])
+ }
+ // println("return the", i, "th in memory", buf.startTime.UnixNano())
+ return copiedBytes(buf.buf[:buf.size])
+ }
+ if !buf.startTime.After(lastReadTime) && buf.stopTime.After(lastReadTime) {
+ pos := buf.locateByTs(lastReadTime)
+ // fmt.Printf("locate buffer[%d] pos %d\n", i, pos)
+ return copiedBytes(buf.buf[pos:buf.size])
+ }
+ }
+ // println("return the current buf", lastReadTime.UnixNano())
+ return copiedBytes(m.buf[:m.pos])
+ }
+
+ lastTs := lastReadTime.UnixNano()
+ l, h := 0, len(m.idx)-1
+
+ /*
+ for i, pos := range m.idx {
+ logEntry, ts := readTs(m.buf, pos)
+ event := &filer_pb.SubscribeMetadataResponse{}
+ proto.Unmarshal(logEntry.Data, event)
+ entry := event.EventNotification.OldEntry
+ if entry == nil {
+ entry = event.EventNotification.NewEntry
+ }
+ fmt.Printf("entry %d ts: %v offset:%d dir:%s name:%s\n", i, time.Unix(0, ts), pos, event.Directory, entry.Name)
+ }
+ fmt.Printf("l=%d, h=%d\n", l, h)
+ */
+
+ for l <= h {
+ mid := (l + h) / 2
+ pos := m.idx[mid]
+ _, t := readTs(m.buf, pos)
+ if t <= lastTs {
+ l = mid + 1
+ } else if lastTs < t {
+ var prevT int64
+ if mid > 0 {
+ _, prevT = readTs(m.buf, m.idx[mid-1])
+ }
+ if prevT <= lastTs {
+ // fmt.Printf("found l=%d, m-1=%d(ts=%d), m=%d(ts=%d), h=%d [%d, %d) \n", l, mid-1, prevT, mid, t, h, pos, m.pos)
+ return copiedBytes(m.buf[pos:m.pos])
+ }
+ h = mid
+ }
+ // fmt.Printf("l=%d, h=%d\n", l, h)
+ }
+
+ // FIXME: this could be that the buffer has been flushed already
+ return nil
+
+}
+func (m *LogBuffer) ReleaseMeory(b *bytes.Buffer) {
+ bufferPool.Put(b)
+}
+
+var bufferPool = sync.Pool{
+ New: func() interface{} {
+ return new(bytes.Buffer)
+ },
+}
+
+func copiedBytes(buf []byte) (copied *bytes.Buffer) {
+ copied = bufferPool.Get().(*bytes.Buffer)
+ copied.Reset()
+ copied.Write(buf)
+ return
+}
+
+func readTs(buf []byte, pos int) (size int, ts int64) {
+
+ size = int(util.BytesToUint32(buf[pos : pos+4]))
+ entryData := buf[pos+4 : pos+4+size]
+ logEntry := &filer_pb.LogEntry{}
+
+ err := proto.Unmarshal(entryData, logEntry)
+ if err != nil {
+ glog.Fatalf("unexpected unmarshal filer_pb.LogEntry: %v", err)
+ }
+ return size, logEntry.TsNs
+
+}
diff --git a/weed/util/log_buffer/log_buffer_test.go b/weed/util/log_buffer/log_buffer_test.go
new file mode 100644
index 000000000..f9ccc95c2
--- /dev/null
+++ b/weed/util/log_buffer/log_buffer_test.go
@@ -0,0 +1,42 @@
+package log_buffer
+
+import (
+ "fmt"
+ "math/rand"
+ "testing"
+ "time"
+
+ "github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
+)
+
+func TestNewLogBufferFirstBuffer(t *testing.T) {
+ lb := NewLogBuffer(time.Minute, func(startTime, stopTime time.Time, buf []byte) {
+
+ }, func() {
+
+ })
+
+ startTime := time.Now()
+
+ messageSize := 1024
+ messageCount := 5000
+ var buf = make([]byte, messageSize)
+ for i := 0; i < messageCount; i++ {
+ rand.Read(buf)
+ lb.AddToBuffer(nil, buf)
+ }
+
+ receivedmessageCount := 0
+ lb.LoopProcessLogData(startTime, func() bool {
+ // stop if no more messages
+ return false
+ }, func(logEntry *filer_pb.LogEntry) error {
+ receivedmessageCount++
+ return nil
+ })
+
+ if receivedmessageCount != messageCount {
+ fmt.Printf("sent %d received %d\n", messageCount, receivedmessageCount)
+ }
+
+}
diff --git a/weed/util/log_buffer/log_read.go b/weed/util/log_buffer/log_read.go
new file mode 100644
index 000000000..2b73a8064
--- /dev/null
+++ b/weed/util/log_buffer/log_read.go
@@ -0,0 +1,77 @@
+package log_buffer
+
+import (
+ "bytes"
+ "time"
+
+ "github.com/golang/protobuf/proto"
+
+ "github.com/chrislusf/seaweedfs/weed/glog"
+ "github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
+ "github.com/chrislusf/seaweedfs/weed/util"
+)
+
+func (logBuffer *LogBuffer) LoopProcessLogData(
+ startTreadTime time.Time,
+ waitForDataFn func() bool,
+ eachLogDataFn func(logEntry *filer_pb.LogEntry) error) (err error) {
+ // loop through all messages
+ var bytesBuf *bytes.Buffer
+ lastReadTime := startTreadTime
+ defer func() {
+ if bytesBuf != nil {
+ logBuffer.ReleaseMeory(bytesBuf)
+ }
+ }()
+
+ for {
+
+ if bytesBuf != nil {
+ logBuffer.ReleaseMeory(bytesBuf)
+ }
+ bytesBuf = logBuffer.ReadFromBuffer(lastReadTime)
+ // fmt.Printf("ReadFromBuffer by %v\n", lastReadTime)
+ if bytesBuf == nil {
+ if waitForDataFn() {
+ continue
+ } else {
+ return
+ }
+ }
+
+ buf := bytesBuf.Bytes()
+ // fmt.Printf("ReadFromBuffer by %v size %d\n", lastReadTime, len(buf))
+
+ batchSize := 0
+ var startReadTime time.Time
+
+ for pos := 0; pos+4 < len(buf); {
+
+ size := util.BytesToUint32(buf[pos : pos+4])
+ entryData := buf[pos+4 : pos+4+int(size)]
+
+ // fmt.Printf("read buffer read %d [%d,%d) from [0,%d)\n", batchSize, pos, pos+int(size)+4, len(buf))
+
+ logEntry := &filer_pb.LogEntry{}
+ if err = proto.Unmarshal(entryData, logEntry); err != nil {
+ glog.Errorf("unexpected unmarshal messaging_pb.Message: %v", err)
+ pos += 4 + int(size)
+ continue
+ }
+ lastReadTime = time.Unix(0, logEntry.TsNs)
+ if startReadTime.IsZero() {
+ startReadTime = lastReadTime
+ }
+
+ if err = eachLogDataFn(logEntry); err != nil {
+ return
+ }
+
+ pos += 4 + int(size)
+ batchSize++
+ }
+
+ // fmt.Printf("sent message ts[%d,%d] size %d\n", startReadTime.UnixNano(), lastReadTime.UnixNano(), batchSize)
+ }
+
+}
diff --git a/weed/util/log_buffer/sealed_buffer.go b/weed/util/log_buffer/sealed_buffer.go
new file mode 100644
index 000000000..d133cf8d3
--- /dev/null
+++ b/weed/util/log_buffer/sealed_buffer.go
@@ -0,0 +1,62 @@
+package log_buffer
+
+import (
+ "fmt"
+ "time"
+)
+
+type MemBuffer struct {
+ buf []byte
+ size int
+ startTime time.Time
+ stopTime time.Time
+}
+
+type SealedBuffers struct {
+ buffers []*MemBuffer
+}
+
+func newSealedBuffers(size int) *SealedBuffers {
+ sbs := &SealedBuffers{}
+
+ sbs.buffers = make([]*MemBuffer, size)
+ for i := 0; i < size; i++ {
+ sbs.buffers[i] = &MemBuffer{
+ buf: make([]byte, BufferSize),
+ }
+ }
+
+ return sbs
+}
+
+func (sbs *SealedBuffers) SealBuffer(startTime, stopTime time.Time, buf []byte, pos int) (newBuf []byte) {
+ oldMemBuffer := sbs.buffers[0]
+ size := len(sbs.buffers)
+ for i := 0; i < size-1; i++ {
+ sbs.buffers[i].buf = sbs.buffers[i+1].buf
+ sbs.buffers[i].size = sbs.buffers[i+1].size
+ sbs.buffers[i].startTime = sbs.buffers[i+1].startTime
+ sbs.buffers[i].stopTime = sbs.buffers[i+1].stopTime
+ }
+ sbs.buffers[size-1].buf = buf
+ sbs.buffers[size-1].size = pos
+ sbs.buffers[size-1].startTime = startTime
+ sbs.buffers[size-1].stopTime = stopTime
+ return oldMemBuffer.buf
+}
+
+func (mb *MemBuffer) locateByTs(lastReadTime time.Time) (pos int) {
+ lastReadTs := lastReadTime.UnixNano()
+ for pos < len(mb.buf) {
+ size, t := readTs(mb.buf, pos)
+ if t > lastReadTs {
+ return
+ }
+ pos += size + 4
+ }
+ return len(mb.buf)
+}
+
+func (mb *MemBuffer) String() string {
+ return fmt.Sprintf("[%v,%v] bytes:%d", mb.startTime, mb.stopTime, mb.size)
+}
diff --git a/weed/util/net_timeout.go b/weed/util/net_timeout.go
index b8068e67f..f057a8f5b 100644
--- a/weed/util/net_timeout.go
+++ b/weed/util/net_timeout.go
@@ -35,6 +35,7 @@ type Conn struct {
net.Conn
ReadTimeout time.Duration
WriteTimeout time.Duration
+ isClosed bool
}
func (c *Conn) Read(b []byte) (count int, e error) {
@@ -68,7 +69,10 @@ func (c *Conn) Write(b []byte) (count int, e error) {
func (c *Conn) Close() error {
err := c.Conn.Close()
if err == nil {
- stats.ConnectionClose()
+ if !c.isClosed {
+ stats.ConnectionClose()
+ c.isClosed = true
+ }
}
return err
}
diff --git a/weed/util/network.go b/weed/util/network.go
new file mode 100644
index 000000000..7108cfea6
--- /dev/null
+++ b/weed/util/network.go
@@ -0,0 +1,25 @@
+package util
+
+import (
+ "net"
+
+ "github.com/chrislusf/seaweedfs/weed/glog"
+)
+
+func DetectedHostAddress() string {
+ addrs, err := net.InterfaceAddrs()
+ if err != nil {
+ glog.V(0).Infof("failed to detect ip address: %v", err)
+ return ""
+ }
+
+ for _, a := range addrs {
+ if ipnet, ok := a.(*net.IPNet); ok && !ipnet.IP.IsLoopback() {
+ if ipnet.IP.To4() != nil {
+ return ipnet.IP.String()
+ }
+ }
+ }
+
+ return "localhost"
+}
diff --git a/weed/util/parse.go b/weed/util/parse.go
index 0a8317c19..0955db682 100644
--- a/weed/util/parse.go
+++ b/weed/util/parse.go
@@ -1,7 +1,10 @@
package util
import (
+ "fmt"
+ "net/url"
"strconv"
+ "strings"
)
func ParseInt(text string, defaultValue int) int {
@@ -24,3 +27,37 @@ func ParseUint64(text string, defaultValue uint64) uint64 {
}
return count
}
+
+func ParseFilerUrl(entryPath string) (filerServer string, filerPort int64, path string, err error) {
+ if !strings.HasPrefix(entryPath, "http://") && !strings.HasPrefix(entryPath, "https://") {
+ entryPath = "http://" + entryPath
+ }
+
+ var u *url.URL
+ u, err = url.Parse(entryPath)
+ if err != nil {
+ return
+ }
+ filerServer = u.Hostname()
+ portString := u.Port()
+ if portString != "" {
+ filerPort, err = strconv.ParseInt(portString, 10, 32)
+ }
+ path = u.Path
+ return
+}
+
+func ParseHostPort(hostPort string) (filerServer string, filerPort int64, err error) {
+ parts := strings.Split(hostPort, ":")
+ if len(parts) != 2 {
+ err = fmt.Errorf("failed to parse %s\n", hostPort)
+ return
+ }
+
+ filerPort, err = strconv.ParseInt(parts[1], 10, 64)
+ if err == nil {
+ filerServer = parts[0]
+ }
+
+ return
+}
diff --git a/weed/util/queue.go b/weed/util/queue.go
new file mode 100644
index 000000000..1e6211e0d
--- /dev/null
+++ b/weed/util/queue.go
@@ -0,0 +1,61 @@
+package util
+
+import "sync"
+
+type node struct {
+ data interface{}
+ next *node
+}
+
+type Queue struct {
+ head *node
+ tail *node
+ count int
+ sync.RWMutex
+}
+
+func NewQueue() *Queue {
+ q := &Queue{}
+ return q
+}
+
+func (q *Queue) Len() int {
+ q.RLock()
+ defer q.RUnlock()
+ return q.count
+}
+
+func (q *Queue) Enqueue(item interface{}) {
+ q.Lock()
+ defer q.Unlock()
+
+ n := &node{data: item}
+
+ if q.tail == nil {
+ q.tail = n
+ q.head = n
+ } else {
+ q.tail.next = n
+ q.tail = n
+ }
+ q.count++
+}
+
+func (q *Queue) Dequeue() interface{} {
+ q.Lock()
+ defer q.Unlock()
+
+ if q.head == nil {
+ return nil
+ }
+
+ n := q.head
+ q.head = n.next
+
+ if q.head == nil {
+ q.tail = nil
+ }
+ q.count--
+
+ return n.data
+}
diff --git a/weed/util/queue_unbounded.go b/weed/util/queue_unbounded.go
new file mode 100644
index 000000000..496b9f844
--- /dev/null
+++ b/weed/util/queue_unbounded.go
@@ -0,0 +1,45 @@
+package util
+
+import "sync"
+
+type UnboundedQueue struct {
+ outbound []string
+ outboundLock sync.RWMutex
+ inbound []string
+ inboundLock sync.RWMutex
+}
+
+func NewUnboundedQueue() *UnboundedQueue {
+ q := &UnboundedQueue{}
+ return q
+}
+
+func (q *UnboundedQueue) EnQueue(items ...string) {
+ q.inboundLock.Lock()
+ defer q.inboundLock.Unlock()
+
+ q.inbound = append(q.inbound, items...)
+
+}
+
+func (q *UnboundedQueue) Consume(fn func([]string)) {
+ q.outboundLock.Lock()
+ defer q.outboundLock.Unlock()
+
+ if len(q.outbound) == 0 {
+ q.inboundLock.Lock()
+ inbountLen := len(q.inbound)
+ if inbountLen > 0 {
+ t := q.outbound
+ q.outbound = q.inbound
+ q.inbound = t
+ }
+ q.inboundLock.Unlock()
+ }
+
+ if len(q.outbound) > 0 {
+ fn(q.outbound)
+ q.outbound = q.outbound[:0]
+ }
+
+}
diff --git a/weed/util/queue_unbounded_test.go b/weed/util/queue_unbounded_test.go
new file mode 100644
index 000000000..2d02032cb
--- /dev/null
+++ b/weed/util/queue_unbounded_test.go
@@ -0,0 +1,25 @@
+package util
+
+import "testing"
+
+func TestEnqueueAndConsume(t *testing.T) {
+
+ q := NewUnboundedQueue()
+
+ q.EnQueue("1", "2", "3")
+
+ f := func(items []string) {
+ for _, t := range items {
+ println(t)
+ }
+ println("-----------------------")
+ }
+ q.Consume(f)
+
+ q.Consume(f)
+
+ q.EnQueue("4", "5")
+ q.EnQueue("6", "7")
+ q.Consume(f)
+
+}
diff --git a/weed/util/throttler.go b/weed/util/throttler.go
new file mode 100644
index 000000000..873161e37
--- /dev/null
+++ b/weed/util/throttler.go
@@ -0,0 +1,34 @@
+package util
+
+import "time"
+
+type WriteThrottler struct {
+ compactionBytePerSecond int64
+ lastSizeCounter int64
+ lastSizeCheckTime time.Time
+}
+
+func NewWriteThrottler(bytesPerSecond int64) *WriteThrottler {
+ return &WriteThrottler{
+ compactionBytePerSecond: bytesPerSecond,
+ lastSizeCheckTime: time.Now(),
+ }
+}
+
+func (wt *WriteThrottler) MaybeSlowdown(delta int64) {
+ if wt.compactionBytePerSecond > 0 {
+ wt.lastSizeCounter += delta
+ now := time.Now()
+ elapsedDuration := now.Sub(wt.lastSizeCheckTime)
+ if elapsedDuration > 100*time.Millisecond {
+ overLimitBytes := wt.lastSizeCounter - wt.compactionBytePerSecond/10
+ if overLimitBytes > 0 {
+ overRatio := float64(overLimitBytes) / float64(wt.compactionBytePerSecond)
+ sleepTime := time.Duration(overRatio*1000) * time.Millisecond
+ // glog.V(0).Infof("currently %d bytes, limit to %d bytes, over by %d bytes, sleeping %v over %.4f", wt.lastSizeCounter, wt.compactionBytePerSecond/10, overLimitBytes, sleepTime, overRatio)
+ time.Sleep(sleepTime)
+ }
+ wt.lastSizeCounter, wt.lastSizeCheckTime = 0, time.Now()
+ }
+ }
+}
diff --git a/weed/wdclient/exclusive_locks/exclusive_locker.go b/weed/wdclient/exclusive_locks/exclusive_locker.go
new file mode 100644
index 000000000..1ecfe6ce2
--- /dev/null
+++ b/weed/wdclient/exclusive_locks/exclusive_locker.go
@@ -0,0 +1,111 @@
+package exclusive_locks
+
+import (
+ "context"
+ "sync/atomic"
+ "time"
+
+ "github.com/chrislusf/seaweedfs/weed/glog"
+ "github.com/chrislusf/seaweedfs/weed/pb/master_pb"
+ "github.com/chrislusf/seaweedfs/weed/wdclient"
+)
+
+const (
+ RenewInteval = 4 * time.Second
+ SafeRenewInteval = 3 * time.Second
+ InitLockInteval = 1 * time.Second
+ AdminLockName = "admin"
+)
+
+type ExclusiveLocker struct {
+ masterClient *wdclient.MasterClient
+ token int64
+ lockTsNs int64
+ isLocking bool
+}
+
+func NewExclusiveLocker(masterClient *wdclient.MasterClient) *ExclusiveLocker {
+ return &ExclusiveLocker{
+ masterClient: masterClient,
+ }
+}
+func (l *ExclusiveLocker) IsLocking() bool {
+ return l.isLocking
+}
+
+func (l *ExclusiveLocker) GetToken() (token int64, lockTsNs int64) {
+ for time.Unix(0, atomic.LoadInt64(&l.lockTsNs)).Add(SafeRenewInteval).Before(time.Now()) {
+ // wait until now is within the safe lock period, no immediate renewal to change the token
+ time.Sleep(100 * time.Millisecond)
+ }
+ return atomic.LoadInt64(&l.token), atomic.LoadInt64(&l.lockTsNs)
+}
+
+func (l *ExclusiveLocker) RequestLock() {
+ if l.isLocking {
+ return
+ }
+
+ // retry to get the lease
+ for {
+ if err := l.masterClient.WithClient(func(client master_pb.SeaweedClient) error {
+ resp, err := client.LeaseAdminToken(context.Background(), &master_pb.LeaseAdminTokenRequest{
+ PreviousToken: atomic.LoadInt64(&l.token),
+ PreviousLockTime: atomic.LoadInt64(&l.lockTsNs),
+ LockName: AdminLockName,
+ })
+ if err == nil {
+ atomic.StoreInt64(&l.token, resp.Token)
+ atomic.StoreInt64(&l.lockTsNs, resp.LockTsNs)
+ }
+ return err
+ }); err != nil {
+ // println("leasing problem", err.Error())
+ time.Sleep(InitLockInteval)
+ } else {
+ break
+ }
+ }
+
+ l.isLocking = true
+
+ // start a goroutine to renew the lease
+ go func() {
+ for l.isLocking {
+ if err := l.masterClient.WithClient(func(client master_pb.SeaweedClient) error {
+ resp, err := client.LeaseAdminToken(context.Background(), &master_pb.LeaseAdminTokenRequest{
+ PreviousToken: atomic.LoadInt64(&l.token),
+ PreviousLockTime: atomic.LoadInt64(&l.lockTsNs),
+ LockName: AdminLockName,
+ })
+ if err == nil {
+ atomic.StoreInt64(&l.token, resp.Token)
+ atomic.StoreInt64(&l.lockTsNs, resp.LockTsNs)
+ // println("ts", l.lockTsNs, "token", l.token)
+ }
+ return err
+ }); err != nil {
+ glog.Errorf("failed to renew lock: %v", err)
+ return
+ } else {
+ time.Sleep(RenewInteval)
+ }
+
+ }
+ }()
+
+}
+
+func (l *ExclusiveLocker) ReleaseLock() {
+ l.isLocking = false
+ l.masterClient.WithClient(func(client master_pb.SeaweedClient) error {
+ client.ReleaseAdminToken(context.Background(), &master_pb.ReleaseAdminTokenRequest{
+ PreviousToken: atomic.LoadInt64(&l.token),
+ PreviousLockTime: atomic.LoadInt64(&l.lockTsNs),
+ LockName: AdminLockName,
+ })
+ return nil
+ })
+ atomic.StoreInt64(&l.token, 0)
+ atomic.StoreInt64(&l.lockTsNs, 0)
+}
diff --git a/weed/wdclient/masterclient.go b/weed/wdclient/masterclient.go
index f58c28504..4c066d535 100644
--- a/weed/wdclient/masterclient.go
+++ b/weed/wdclient/masterclient.go
@@ -2,30 +2,35 @@ package wdclient
import (
"context"
- "fmt"
+ "math/rand"
"time"
+ "google.golang.org/grpc"
+
"github.com/chrislusf/seaweedfs/weed/glog"
+ "github.com/chrislusf/seaweedfs/weed/pb"
"github.com/chrislusf/seaweedfs/weed/pb/master_pb"
- "github.com/chrislusf/seaweedfs/weed/util"
- "math/rand"
)
type MasterClient struct {
- ctx context.Context
- name string
- currentMaster string
- masters []string
+ clientType string
+ clientHost string
+ grpcPort uint32
+ currentMaster string
+ masters []string
+ grpcDialOption grpc.DialOption
vidMap
}
-func NewMasterClient(ctx context.Context, clientName string, masters []string) *MasterClient {
+func NewMasterClient(grpcDialOption grpc.DialOption, clientType string, clientHost string, clientGrpcPort uint32, masters []string) *MasterClient {
return &MasterClient{
- ctx: ctx,
- name: clientName,
- masters: masters,
- vidMap: newVidMap(),
+ clientType: clientType,
+ clientHost: clientHost,
+ grpcPort: clientGrpcPort,
+ masters: masters,
+ grpcDialOption: grpcDialOption,
+ vidMap: newVidMap(),
}
}
@@ -40,7 +45,7 @@ func (mc *MasterClient) WaitUntilConnected() {
}
func (mc *MasterClient) KeepConnectedToMaster() {
- glog.V(0).Infof("%s bootstraps with masters %v", mc.name, mc.masters)
+ glog.V(1).Infof("%s masterClient bootstraps with masters %v", mc.clientType, mc.masters)
for {
mc.tryAllMasters()
time.Sleep(time.Second)
@@ -48,69 +53,78 @@ func (mc *MasterClient) KeepConnectedToMaster() {
}
func (mc *MasterClient) tryAllMasters() {
+ nextHintedLeader := ""
for _, master := range mc.masters {
- glog.V(0).Infof("Connecting to master %v", master)
- gprcErr := withMasterClient(master, func(client master_pb.SeaweedClient) error {
- stream, err := client.KeepConnected(context.Background())
+ nextHintedLeader = mc.tryConnectToMaster(master)
+ for nextHintedLeader != "" {
+ nextHintedLeader = mc.tryConnectToMaster(nextHintedLeader)
+ }
+
+ mc.currentMaster = ""
+ mc.vidMap = newVidMap()
+ }
+}
+
+func (mc *MasterClient) tryConnectToMaster(master string) (nextHintedLeader string) {
+ glog.V(1).Infof("%s masterClient Connecting to master %v", mc.clientType, master)
+ gprcErr := pb.WithMasterClient(master, mc.grpcDialOption, func(client master_pb.SeaweedClient) error {
+
+ stream, err := client.KeepConnected(context.Background())
+ if err != nil {
+ glog.V(0).Infof("%s masterClient failed to keep connected to %s: %v", mc.clientType, master, err)
+ return err
+ }
+
+ if err = stream.Send(&master_pb.KeepConnectedRequest{Name: mc.clientType, GrpcPort: mc.grpcPort}); err != nil {
+ glog.V(0).Infof("%s masterClient failed to send to %s: %v", mc.clientType, master, err)
+ return err
+ }
+
+ glog.V(1).Infof("%s masterClient Connected to %v", mc.clientType, master)
+ mc.currentMaster = master
+
+ for {
+ volumeLocation, err := stream.Recv()
if err != nil {
- glog.V(0).Infof("failed to keep connected to %s: %v", master, err)
+ glog.V(0).Infof("%s masterClient failed to receive from %s: %v", mc.clientType, master, err)
return err
}
- if err = stream.Send(&master_pb.ClientListenRequest{Name: mc.name}); err != nil {
- glog.V(0).Infof("failed to send to %s: %v", master, err)
- return err
+ // maybe the leader is changed
+ if volumeLocation.Leader != "" {
+ glog.V(0).Infof("redirected to leader %v", volumeLocation.Leader)
+ nextHintedLeader = volumeLocation.Leader
+ return nil
}
- for {
- if volumeLocation, err := stream.Recv(); err != nil {
- glog.V(0).Infof("failed to receive from %s: %v", master, err)
- return err
- } else {
- loc := Location{
- Url: volumeLocation.Url,
- PublicUrl: volumeLocation.PublicUrl,
- }
- for _, newVid := range volumeLocation.NewVids {
- mc.addLocation(newVid, loc)
- }
- for _, deletedVid := range volumeLocation.DeletedVids {
- mc.deleteLocation(deletedVid, loc)
- }
-
- if mc.currentMaster == "" {
- glog.V(0).Infof("Connected to %v", master)
- mc.currentMaster = master
- }
-
- }
+ // process new volume location
+ loc := Location{
+ Url: volumeLocation.Url,
+ PublicUrl: volumeLocation.PublicUrl,
+ }
+ for _, newVid := range volumeLocation.NewVids {
+ glog.V(1).Infof("%s: %s masterClient adds volume %d", mc.clientType, loc.Url, newVid)
+ mc.addLocation(newVid, loc)
+ }
+ for _, deletedVid := range volumeLocation.DeletedVids {
+ glog.V(1).Infof("%s: %s masterClient removes volume %d", mc.clientType, loc.Url, deletedVid)
+ mc.deleteLocation(deletedVid, loc)
}
-
- })
-
- if gprcErr != nil {
- glog.V(0).Infof("%s failed to connect with master %v: %v", mc.name, master, gprcErr)
}
- mc.currentMaster = ""
+ })
+ if gprcErr != nil {
+ glog.V(0).Infof("%s masterClient failed to connect with master %v: %v", mc.clientType, master, gprcErr)
}
+ return
}
-func withMasterClient(master string, fn func(client master_pb.SeaweedClient) error) error {
-
- masterGrpcAddress, parseErr := util.ParseServerToGrpcAddress(master, 0)
- if parseErr != nil {
- return fmt.Errorf("failed to parse master grpc %v", master)
- }
-
- grpcConnection, err := util.GrpcDial(masterGrpcAddress)
- if err != nil {
- return fmt.Errorf("fail to dial %s: %v", master, err)
+func (mc *MasterClient) WithClient(fn func(client master_pb.SeaweedClient) error) error {
+ for mc.currentMaster == "" {
+ time.Sleep(3 * time.Second)
}
- defer grpcConnection.Close()
-
- client := master_pb.NewSeaweedClient(grpcConnection)
-
- return fn(client)
+ return pb.WithMasterClient(mc.currentMaster, mc.grpcDialOption, func(client master_pb.SeaweedClient) error {
+ return fn(client)
+ })
}
diff --git a/weed/wdclient/vid_map.go b/weed/wdclient/vid_map.go
index aef29f56f..97df49cb6 100644
--- a/weed/wdclient/vid_map.go
+++ b/weed/wdclient/vid_map.go
@@ -3,14 +3,18 @@ package wdclient
import (
"errors"
"fmt"
- "math/rand"
"strconv"
"strings"
"sync"
+ "sync/atomic"
"github.com/chrislusf/seaweedfs/weed/glog"
)
+const (
+ maxCursorIndex = 4096
+)
+
type Location struct {
Url string `json:"url,omitempty"`
PublicUrl string `json:"publicUrl,omitempty"`
@@ -19,14 +23,27 @@ type Location struct {
type vidMap struct {
sync.RWMutex
vid2Locations map[uint32][]Location
+
+ cursor int32
}
func newVidMap() vidMap {
return vidMap{
vid2Locations: make(map[uint32][]Location),
+ cursor: -1,
}
}
+func (vc *vidMap) getLocationIndex(length int) (int, error) {
+ if length <= 0 {
+ return 0, fmt.Errorf("invalid length: %d", length)
+ }
+ if atomic.LoadInt32(&vc.cursor) == maxCursorIndex {
+ atomic.CompareAndSwapInt32(&vc.cursor, maxCursorIndex, -1)
+ }
+ return int(atomic.AddInt32(&vc.cursor, 1)) % length, nil
+}
+
func (vc *vidMap) LookupVolumeServerUrl(vid string) (serverUrl string, err error) {
id, err := strconv.Atoi(vid)
if err != nil {
@@ -34,12 +51,7 @@ func (vc *vidMap) LookupVolumeServerUrl(vid string) (serverUrl string, err error
return "", err
}
- locations := vc.GetLocations(uint32(id))
- if len(locations) == 0 {
- return "", fmt.Errorf("volume %d not found", id)
- }
-
- return locations[rand.Intn(len(locations))].Url, nil
+ return vc.GetRandomLocation(uint32(id))
}
func (vc *vidMap) LookupFileId(fileId string) (fullUrl string, err error) {
@@ -66,20 +78,42 @@ func (vc *vidMap) LookupVolumeServer(fileId string) (volumeServer string, err er
return serverUrl, nil
}
-func (vc *vidMap) GetVidLocations(vid string) (locations []Location) {
+func (vc *vidMap) GetVidLocations(vid string) (locations []Location, err error) {
id, err := strconv.Atoi(vid)
if err != nil {
glog.V(1).Infof("Unknown volume id %s", vid)
- return nil
+ return nil, fmt.Errorf("Unknown volume id %s", vid)
+ }
+ foundLocations, found := vc.GetLocations(uint32(id))
+ if found {
+ return foundLocations, nil
}
- return vc.GetLocations(uint32(id))
+ return nil, fmt.Errorf("volume id %s not found", vid)
}
-func (vc *vidMap) GetLocations(vid uint32) (locations []Location) {
+func (vc *vidMap) GetLocations(vid uint32) (locations []Location, found bool) {
vc.RLock()
defer vc.RUnlock()
- return vc.vid2Locations[vid]
+ locations, found = vc.vid2Locations[vid]
+ return
+}
+
+func (vc *vidMap) GetRandomLocation(vid uint32) (serverUrl string, err error) {
+ vc.RLock()
+ defer vc.RUnlock()
+
+ locations := vc.vid2Locations[vid]
+ if len(locations) == 0 {
+ return "", fmt.Errorf("volume %d not found", vid)
+ }
+
+ index, err := vc.getLocationIndex(len(locations))
+ if err != nil {
+ return "", fmt.Errorf("volume %d: %v", vid, err)
+ }
+
+ return locations[index].Url, nil
}
func (vc *vidMap) addLocation(vid uint32, location Location) {
@@ -114,6 +148,7 @@ func (vc *vidMap) deleteLocation(vid uint32, location Location) {
for i, loc := range locations {
if loc.Url == location.Url {
vc.vid2Locations[vid] = append(locations[0:i], locations[i+1:]...)
+ break
}
}
diff --git a/weed/wdclient/vid_map_test.go b/weed/wdclient/vid_map_test.go
new file mode 100644
index 000000000..87be2fc25
--- /dev/null
+++ b/weed/wdclient/vid_map_test.go
@@ -0,0 +1,76 @@
+package wdclient
+
+import (
+ "fmt"
+ "testing"
+)
+
+func TestLocationIndex(t *testing.T) {
+ vm := vidMap{}
+ // test must be failed
+ mustFailed := func(length int) {
+ _, err := vm.getLocationIndex(length)
+ if err == nil {
+ t.Errorf("length %d must be failed", length)
+ }
+ if err.Error() != fmt.Sprintf("invalid length: %d", length) {
+ t.Errorf("length %d must be failed. error: %v", length, err)
+ }
+ }
+
+ mustFailed(-1)
+ mustFailed(0)
+
+ mustOk := func(length, cursor, expect int) {
+ if length <= 0 {
+ t.Fatal("please don't do this")
+ }
+ vm.cursor = int32(cursor)
+ got, err := vm.getLocationIndex(length)
+ if err != nil {
+ t.Errorf("length: %d, why? %v\n", length, err)
+ return
+ }
+ if got != expect {
+ t.Errorf("cursor: %d, length: %d, expect: %d, got: %d\n", cursor, length, expect, got)
+ return
+ }
+ }
+
+ for i := -1; i < 100; i++ {
+ mustOk(7, i, (i+1)%7)
+ }
+
+ // when cursor reaches MaxInt64
+ mustOk(7, maxCursorIndex, 0)
+
+ // test with constructor
+ vm = newVidMap()
+ length := 7
+ for i := 0; i < 100; i++ {
+ got, err := vm.getLocationIndex(length)
+ if err != nil {
+ t.Errorf("length: %d, why? %v\n", length, err)
+ return
+ }
+ if got != i%length {
+ t.Errorf("length: %d, i: %d, got: %d\n", length, i, got)
+ }
+ }
+}
+
+func BenchmarkLocationIndex(b *testing.B) {
+ b.SetParallelism(8)
+ vm := vidMap{
+ cursor: maxCursorIndex - 4000,
+ }
+ b.ResetTimer()
+ b.RunParallel(func(pb *testing.PB) {
+ for pb.Next() {
+ _, err := vm.getLocationIndex(3)
+ if err != nil {
+ b.Error(err)
+ }
+ }
+ })
+}
diff --git a/weed/wdclient/wdclient.go b/weed/wdclient/wdclient.go
deleted file mode 100644
index 722f4d061..000000000
--- a/weed/wdclient/wdclient.go
+++ /dev/null
@@ -1,15 +0,0 @@
-package wdclient
-
-import (
- "context"
-)
-
-type SeaweedClient struct {
- *MasterClient
-}
-
-func NewSeaweedClient(ctx context.Context, clientName string, masters []string) *SeaweedClient {
- return &SeaweedClient{
- MasterClient: NewMasterClient(ctx, clientName, masters),
- }
-}
diff --git a/weed/weed.go b/weed/weed.go
index 340da6625..ecb0ba2a4 100644
--- a/weed/weed.go
+++ b/weed/weed.go
@@ -21,7 +21,6 @@ import (
)
var IsDebug *bool
-var server *string
var commands = command.Commands