@@ -147,6 +147,7 @@
org.apache.hadoop
hadoop-client
${hadoop.version}
+ provided
com.github.chrislusf
@@ -157,6 +158,7 @@
org.apache.hadoop
hadoop-common
${hadoop.version}
+ provided
diff --git a/other/java/hdfs3/src/main/java/seaweed/hdfs/BufferedByteBufferReadableInputStream.java b/other/java/hdfs3/src/main/java/seaweed/hdfs/BufferedByteBufferReadableInputStream.java
new file mode 100644
index 000000000..3d0b68a52
--- /dev/null
+++ b/other/java/hdfs3/src/main/java/seaweed/hdfs/BufferedByteBufferReadableInputStream.java
@@ -0,0 +1,25 @@
+package seaweed.hdfs;
+
+import org.apache.hadoop.fs.*;
+
+import java.io.IOException;
+import java.nio.ByteBuffer;
+
+public class BufferedByteBufferReadableInputStream extends BufferedFSInputStream implements ByteBufferReadable {
+
+ public BufferedByteBufferReadableInputStream(FSInputStream in, int size) {
+ super(in, size);
+ if (!(in instanceof Seekable) || !(in instanceof PositionedReadable)) {
+ throw new IllegalArgumentException("In is not an instance of Seekable or PositionedReadable");
+ }
+ }
+
+ @Override
+ public int read(ByteBuffer buf) throws IOException {
+ if (this.in instanceof ByteBufferReadable) {
+ return ((ByteBufferReadable)this.in).read(buf);
+ } else {
+ throw new UnsupportedOperationException("Byte-buffer read unsupported by input stream");
+ }
+ }
+}
diff --git a/other/java/hdfs3/src/main/java/seaweed/hdfs/ReadBuffer.java b/other/java/hdfs3/src/main/java/seaweed/hdfs/ReadBuffer.java
deleted file mode 100644
index 926d0b83b..000000000
--- a/other/java/hdfs3/src/main/java/seaweed/hdfs/ReadBuffer.java
+++ /dev/null
@@ -1,137 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package seaweed.hdfs;
-
-import java.util.concurrent.CountDownLatch;
-
-class ReadBuffer {
-
- private SeaweedInputStream stream;
- private long offset; // offset within the file for the buffer
- private int length; // actual length, set after the buffer is filles
- private int requestedLength; // requested length of the read
- private byte[] buffer; // the buffer itself
- private int bufferindex = -1; // index in the buffers array in Buffer manager
- private ReadBufferStatus status; // status of the buffer
- private CountDownLatch latch = null; // signaled when the buffer is done reading, so any client
- // waiting on this buffer gets unblocked
-
- // fields to help with eviction logic
- private long timeStamp = 0; // tick at which buffer became available to read
- private boolean isFirstByteConsumed = false;
- private boolean isLastByteConsumed = false;
- private boolean isAnyByteConsumed = false;
-
- public SeaweedInputStream getStream() {
- return stream;
- }
-
- public void setStream(SeaweedInputStream stream) {
- this.stream = stream;
- }
-
- public long getOffset() {
- return offset;
- }
-
- public void setOffset(long offset) {
- this.offset = offset;
- }
-
- public int getLength() {
- return length;
- }
-
- public void setLength(int length) {
- this.length = length;
- }
-
- public int getRequestedLength() {
- return requestedLength;
- }
-
- public void setRequestedLength(int requestedLength) {
- this.requestedLength = requestedLength;
- }
-
- public byte[] getBuffer() {
- return buffer;
- }
-
- public void setBuffer(byte[] buffer) {
- this.buffer = buffer;
- }
-
- public int getBufferindex() {
- return bufferindex;
- }
-
- public void setBufferindex(int bufferindex) {
- this.bufferindex = bufferindex;
- }
-
- public ReadBufferStatus getStatus() {
- return status;
- }
-
- public void setStatus(ReadBufferStatus status) {
- this.status = status;
- }
-
- public CountDownLatch getLatch() {
- return latch;
- }
-
- public void setLatch(CountDownLatch latch) {
- this.latch = latch;
- }
-
- public long getTimeStamp() {
- return timeStamp;
- }
-
- public void setTimeStamp(long timeStamp) {
- this.timeStamp = timeStamp;
- }
-
- public boolean isFirstByteConsumed() {
- return isFirstByteConsumed;
- }
-
- public void setFirstByteConsumed(boolean isFirstByteConsumed) {
- this.isFirstByteConsumed = isFirstByteConsumed;
- }
-
- public boolean isLastByteConsumed() {
- return isLastByteConsumed;
- }
-
- public void setLastByteConsumed(boolean isLastByteConsumed) {
- this.isLastByteConsumed = isLastByteConsumed;
- }
-
- public boolean isAnyByteConsumed() {
- return isAnyByteConsumed;
- }
-
- public void setAnyByteConsumed(boolean isAnyByteConsumed) {
- this.isAnyByteConsumed = isAnyByteConsumed;
- }
-
-}
diff --git a/other/java/hdfs3/src/main/java/seaweed/hdfs/ReadBufferManager.java b/other/java/hdfs3/src/main/java/seaweed/hdfs/ReadBufferManager.java
deleted file mode 100644
index 5b1e21529..000000000
--- a/other/java/hdfs3/src/main/java/seaweed/hdfs/ReadBufferManager.java
+++ /dev/null
@@ -1,394 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package seaweed.hdfs;
-
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import java.util.Collection;
-import java.util.LinkedList;
-import java.util.Queue;
-import java.util.Stack;
-import java.util.concurrent.CountDownLatch;
-
-/**
- * The Read Buffer Manager for Rest AbfsClient.
- */
-final class ReadBufferManager {
- private static final Logger LOGGER = LoggerFactory.getLogger(ReadBufferManager.class);
-
- private static final int NUM_BUFFERS = 16;
- private static final int BLOCK_SIZE = 4 * 1024 * 1024;
- private static final int NUM_THREADS = 8;
- private static final int THRESHOLD_AGE_MILLISECONDS = 3000; // have to see if 3 seconds is a good threshold
-
- private Thread[] threads = new Thread[NUM_THREADS];
- private byte[][] buffers; // array of byte[] buffers, to hold the data that is read
- private Stack freeList = new Stack<>(); // indices in buffers[] array that are available
-
- private Queue readAheadQueue = new LinkedList<>(); // queue of requests that are not picked up by any worker thread yet
- private LinkedList inProgressList = new LinkedList<>(); // requests being processed by worker threads
- private LinkedList completedReadList = new LinkedList<>(); // buffers available for reading
- private static final ReadBufferManager BUFFER_MANAGER; // singleton, initialized in static initialization block
-
- static {
- BUFFER_MANAGER = new ReadBufferManager();
- BUFFER_MANAGER.init();
- }
-
- static ReadBufferManager getBufferManager() {
- return BUFFER_MANAGER;
- }
-
- private void init() {
- buffers = new byte[NUM_BUFFERS][];
- for (int i = 0; i < NUM_BUFFERS; i++) {
- buffers[i] = new byte[BLOCK_SIZE]; // same buffers are reused. The byte array never goes back to GC
- freeList.add(i);
- }
- for (int i = 0; i < NUM_THREADS; i++) {
- Thread t = new Thread(new ReadBufferWorker(i));
- t.setDaemon(true);
- threads[i] = t;
- t.setName("SeaweedFS-prefetch-" + i);
- t.start();
- }
- ReadBufferWorker.UNLEASH_WORKERS.countDown();
- }
-
- // hide instance constructor
- private ReadBufferManager() {
- }
-
-
- /*
- *
- * SeaweedInputStream-facing methods
- *
- */
-
-
- /**
- * {@link SeaweedInputStream} calls this method to queue read-aheads.
- *
- * @param stream The {@link SeaweedInputStream} for which to do the read-ahead
- * @param requestedOffset The offset in the file which shoukd be read
- * @param requestedLength The length to read
- */
- void queueReadAhead(final SeaweedInputStream stream, final long requestedOffset, final int requestedLength) {
- if (LOGGER.isTraceEnabled()) {
- LOGGER.trace("Start Queueing readAhead for {} offset {} length {}",
- stream.getPath(), requestedOffset, requestedLength);
- }
- ReadBuffer buffer;
- synchronized (this) {
- if (isAlreadyQueued(stream, requestedOffset)) {
- return; // already queued, do not queue again
- }
- if (freeList.isEmpty() && !tryEvict()) {
- return; // no buffers available, cannot queue anything
- }
-
- buffer = new ReadBuffer();
- buffer.setStream(stream);
- buffer.setOffset(requestedOffset);
- buffer.setLength(0);
- buffer.setRequestedLength(requestedLength);
- buffer.setStatus(ReadBufferStatus.NOT_AVAILABLE);
- buffer.setLatch(new CountDownLatch(1));
-
- Integer bufferIndex = freeList.pop(); // will return a value, since we have checked size > 0 already
-
- buffer.setBuffer(buffers[bufferIndex]);
- buffer.setBufferindex(bufferIndex);
- readAheadQueue.add(buffer);
- notifyAll();
- }
- if (LOGGER.isTraceEnabled()) {
- LOGGER.trace("Done q-ing readAhead for file {} offset {} buffer idx {}",
- stream.getPath(), requestedOffset, buffer.getBufferindex());
- }
- }
-
-
- /**
- * {@link SeaweedInputStream} calls this method read any bytes already available in a buffer (thereby saving a
- * remote read). This returns the bytes if the data already exists in buffer. If there is a buffer that is reading
- * the requested offset, then this method blocks until that read completes. If the data is queued in a read-ahead
- * but not picked up by a worker thread yet, then it cancels that read-ahead and reports cache miss. This is because
- * depending on worker thread availability, the read-ahead may take a while - the calling thread can do it's own
- * read to get the data faster (copmared to the read waiting in queue for an indeterminate amount of time).
- *
- * @param stream the file to read bytes for
- * @param position the offset in the file to do a read for
- * @param length the length to read
- * @param buffer the buffer to read data into. Note that the buffer will be written into from offset 0.
- * @return the number of bytes read
- */
- int getBlock(final SeaweedInputStream stream, final long position, final int length, final byte[] buffer) {
- // not synchronized, so have to be careful with locking
- if (LOGGER.isTraceEnabled()) {
- LOGGER.trace("getBlock for file {} position {} thread {}",
- stream.getPath(), position, Thread.currentThread().getName());
- }
-
- waitForProcess(stream, position);
-
- int bytesRead = 0;
- synchronized (this) {
- bytesRead = getBlockFromCompletedQueue(stream, position, length, buffer);
- }
- if (bytesRead > 0) {
- if (LOGGER.isTraceEnabled()) {
- LOGGER.trace("Done read from Cache for {} position {} length {}",
- stream.getPath(), position, bytesRead);
- }
- return bytesRead;
- }
-
- // otherwise, just say we got nothing - calling thread can do its own read
- return 0;
- }
-
- /*
- *
- * Internal methods
- *
- */
-
- private void waitForProcess(final SeaweedInputStream stream, final long position) {
- ReadBuffer readBuf;
- synchronized (this) {
- clearFromReadAheadQueue(stream, position);
- readBuf = getFromList(inProgressList, stream, position);
- }
- if (readBuf != null) { // if in in-progress queue, then block for it
- try {
- if (LOGGER.isTraceEnabled()) {
- LOGGER.trace("got a relevant read buffer for file {} offset {} buffer idx {}",
- stream.getPath(), readBuf.getOffset(), readBuf.getBufferindex());
- }
- readBuf.getLatch().await(); // blocking wait on the caller stream's thread
- // Note on correctness: readBuf gets out of inProgressList only in 1 place: after worker thread
- // is done processing it (in doneReading). There, the latch is set after removing the buffer from
- // inProgressList. So this latch is safe to be outside the synchronized block.
- // Putting it in synchronized would result in a deadlock, since this thread would be holding the lock
- // while waiting, so no one will be able to change any state. If this becomes more complex in the future,
- // then the latch cane be removed and replaced with wait/notify whenever inProgressList is touched.
- } catch (InterruptedException ex) {
- Thread.currentThread().interrupt();
- }
- if (LOGGER.isTraceEnabled()) {
- LOGGER.trace("latch done for file {} buffer idx {} length {}",
- stream.getPath(), readBuf.getBufferindex(), readBuf.getLength());
- }
- }
- }
-
- /**
- * If any buffer in the completedlist can be reclaimed then reclaim it and return the buffer to free list.
- * The objective is to find just one buffer - there is no advantage to evicting more than one.
- *
- * @return whether the eviction succeeeded - i.e., were we able to free up one buffer
- */
- private synchronized boolean tryEvict() {
- ReadBuffer nodeToEvict = null;
- if (completedReadList.size() <= 0) {
- return false; // there are no evict-able buffers
- }
-
- // first, try buffers where all bytes have been consumed (approximated as first and last bytes consumed)
- for (ReadBuffer buf : completedReadList) {
- if (buf.isFirstByteConsumed() && buf.isLastByteConsumed()) {
- nodeToEvict = buf;
- break;
- }
- }
- if (nodeToEvict != null) {
- return evict(nodeToEvict);
- }
-
- // next, try buffers where any bytes have been consumed (may be a bad idea? have to experiment and see)
- for (ReadBuffer buf : completedReadList) {
- if (buf.isAnyByteConsumed()) {
- nodeToEvict = buf;
- break;
- }
- }
-
- if (nodeToEvict != null) {
- return evict(nodeToEvict);
- }
-
- // next, try any old nodes that have not been consumed
- long earliestBirthday = Long.MAX_VALUE;
- for (ReadBuffer buf : completedReadList) {
- if (buf.getTimeStamp() < earliestBirthday) {
- nodeToEvict = buf;
- earliestBirthday = buf.getTimeStamp();
- }
- }
- if ((currentTimeMillis() - earliestBirthday > THRESHOLD_AGE_MILLISECONDS) && (nodeToEvict != null)) {
- return evict(nodeToEvict);
- }
-
- // nothing can be evicted
- return false;
- }
-
- private boolean evict(final ReadBuffer buf) {
- freeList.push(buf.getBufferindex());
- completedReadList.remove(buf);
- if (LOGGER.isTraceEnabled()) {
- LOGGER.trace("Evicting buffer idx {}; was used for file {} offset {} length {}",
- buf.getBufferindex(), buf.getStream().getPath(), buf.getOffset(), buf.getLength());
- }
- return true;
- }
-
- private boolean isAlreadyQueued(final SeaweedInputStream stream, final long requestedOffset) {
- // returns true if any part of the buffer is already queued
- return (isInList(readAheadQueue, stream, requestedOffset)
- || isInList(inProgressList, stream, requestedOffset)
- || isInList(completedReadList, stream, requestedOffset));
- }
-
- private boolean isInList(final Collection list, final SeaweedInputStream stream, final long requestedOffset) {
- return (getFromList(list, stream, requestedOffset) != null);
- }
-
- private ReadBuffer getFromList(final Collection list, final SeaweedInputStream stream, final long requestedOffset) {
- for (ReadBuffer buffer : list) {
- if (buffer.getStream() == stream) {
- if (buffer.getStatus() == ReadBufferStatus.AVAILABLE
- && requestedOffset >= buffer.getOffset()
- && requestedOffset < buffer.getOffset() + buffer.getLength()) {
- return buffer;
- } else if (requestedOffset >= buffer.getOffset()
- && requestedOffset < buffer.getOffset() + buffer.getRequestedLength()) {
- return buffer;
- }
- }
- }
- return null;
- }
-
- private void clearFromReadAheadQueue(final SeaweedInputStream stream, final long requestedOffset) {
- ReadBuffer buffer = getFromList(readAheadQueue, stream, requestedOffset);
- if (buffer != null) {
- readAheadQueue.remove(buffer);
- notifyAll(); // lock is held in calling method
- freeList.push(buffer.getBufferindex());
- }
- }
-
- private int getBlockFromCompletedQueue(final SeaweedInputStream stream, final long position, final int length,
- final byte[] buffer) {
- ReadBuffer buf = getFromList(completedReadList, stream, position);
- if (buf == null || position >= buf.getOffset() + buf.getLength()) {
- return 0;
- }
- int cursor = (int) (position - buf.getOffset());
- int availableLengthInBuffer = buf.getLength() - cursor;
- int lengthToCopy = Math.min(length, availableLengthInBuffer);
- System.arraycopy(buf.getBuffer(), cursor, buffer, 0, lengthToCopy);
- if (cursor == 0) {
- buf.setFirstByteConsumed(true);
- }
- if (cursor + lengthToCopy == buf.getLength()) {
- buf.setLastByteConsumed(true);
- }
- buf.setAnyByteConsumed(true);
- return lengthToCopy;
- }
-
- /*
- *
- * ReadBufferWorker-thread-facing methods
- *
- */
-
- /**
- * ReadBufferWorker thread calls this to get the next buffer that it should work on.
- *
- * @return {@link ReadBuffer}
- * @throws InterruptedException if thread is interrupted
- */
- ReadBuffer getNextBlockToRead() throws InterruptedException {
- ReadBuffer buffer = null;
- synchronized (this) {
- //buffer = readAheadQueue.take(); // blocking method
- while (readAheadQueue.size() == 0) {
- wait();
- }
- buffer = readAheadQueue.remove();
- notifyAll();
- if (buffer == null) {
- return null; // should never happen
- }
- buffer.setStatus(ReadBufferStatus.READING_IN_PROGRESS);
- inProgressList.add(buffer);
- }
- if (LOGGER.isTraceEnabled()) {
- LOGGER.trace("ReadBufferWorker picked file {} for offset {}",
- buffer.getStream().getPath(), buffer.getOffset());
- }
- return buffer;
- }
-
- /**
- * ReadBufferWorker thread calls this method to post completion.
- *
- * @param buffer the buffer whose read was completed
- * @param result the {@link ReadBufferStatus} after the read operation in the worker thread
- * @param bytesActuallyRead the number of bytes that the worker thread was actually able to read
- */
- void doneReading(final ReadBuffer buffer, final ReadBufferStatus result, final int bytesActuallyRead) {
- if (LOGGER.isTraceEnabled()) {
- LOGGER.trace("ReadBufferWorker completed file {} for offset {} bytes {}",
- buffer.getStream().getPath(), buffer.getOffset(), bytesActuallyRead);
- }
- synchronized (this) {
- inProgressList.remove(buffer);
- if (result == ReadBufferStatus.AVAILABLE && bytesActuallyRead > 0) {
- buffer.setStatus(ReadBufferStatus.AVAILABLE);
- buffer.setTimeStamp(currentTimeMillis());
- buffer.setLength(bytesActuallyRead);
- completedReadList.add(buffer);
- } else {
- freeList.push(buffer.getBufferindex());
- // buffer should go out of scope after the end of the calling method in ReadBufferWorker, and eligible for GC
- }
- }
- //outside the synchronized, since anyone receiving a wake-up from the latch must see safe-published results
- buffer.getLatch().countDown(); // wake up waiting threads (if any)
- }
-
- /**
- * Similar to System.currentTimeMillis, except implemented with System.nanoTime().
- * System.currentTimeMillis can go backwards when system clock is changed (e.g., with NTP time synchronization),
- * making it unsuitable for measuring time intervals. nanotime is strictly monotonically increasing per CPU core.
- * Note: it is not monotonic across Sockets, and even within a CPU, its only the
- * more recent parts which share a clock across all cores.
- *
- * @return current time in milliseconds
- */
- private long currentTimeMillis() {
- return System.nanoTime() / 1000 / 1000;
- }
-}
diff --git a/other/java/hdfs3/src/main/java/seaweed/hdfs/ReadBufferWorker.java b/other/java/hdfs3/src/main/java/seaweed/hdfs/ReadBufferWorker.java
deleted file mode 100644
index 6ffbc4644..000000000
--- a/other/java/hdfs3/src/main/java/seaweed/hdfs/ReadBufferWorker.java
+++ /dev/null
@@ -1,70 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package seaweed.hdfs;
-
-import java.util.concurrent.CountDownLatch;
-
-class ReadBufferWorker implements Runnable {
-
- protected static final CountDownLatch UNLEASH_WORKERS = new CountDownLatch(1);
- private int id;
-
- ReadBufferWorker(final int id) {
- this.id = id;
- }
-
- /**
- * return the ID of ReadBufferWorker.
- */
- public int getId() {
- return this.id;
- }
-
- /**
- * Waits until a buffer becomes available in ReadAheadQueue.
- * Once a buffer becomes available, reads the file specified in it and then posts results back to buffer manager.
- * Rinse and repeat. Forever.
- */
- public void run() {
- try {
- UNLEASH_WORKERS.await();
- } catch (InterruptedException ex) {
- Thread.currentThread().interrupt();
- }
- ReadBufferManager bufferManager = ReadBufferManager.getBufferManager();
- ReadBuffer buffer;
- while (true) {
- try {
- buffer = bufferManager.getNextBlockToRead(); // blocks, until a buffer is available for this thread
- } catch (InterruptedException ex) {
- Thread.currentThread().interrupt();
- return;
- }
- if (buffer != null) {
- try {
- // do the actual read, from the file.
- int bytesRead = buffer.getStream().readRemote(buffer.getOffset(), buffer.getBuffer(), 0, buffer.getRequestedLength());
- bufferManager.doneReading(buffer, ReadBufferStatus.AVAILABLE, bytesRead); // post result back to ReadBufferManager
- } catch (Exception ex) {
- bufferManager.doneReading(buffer, ReadBufferStatus.READ_FAILED, 0);
- }
- }
- }
- }
-}
diff --git a/other/java/hdfs2/src/main/java/seaweed/hdfs/ReadBufferStatus.java b/other/java/hdfs3/src/main/java/seaweed/hdfs/SeaweedAbstractFileSystem.java
similarity index 64%
rename from other/java/hdfs2/src/main/java/seaweed/hdfs/ReadBufferStatus.java
rename to other/java/hdfs3/src/main/java/seaweed/hdfs/SeaweedAbstractFileSystem.java
index d63674977..e021401aa 100644
--- a/other/java/hdfs2/src/main/java/seaweed/hdfs/ReadBufferStatus.java
+++ b/other/java/hdfs3/src/main/java/seaweed/hdfs/SeaweedAbstractFileSystem.java
@@ -18,12 +18,18 @@
package seaweed.hdfs;
-/**
- * The ReadBufferStatus for Rest AbfsClient
- */
-public enum ReadBufferStatus {
- NOT_AVAILABLE, // buffers sitting in readaheadqueue have this stats
- READING_IN_PROGRESS, // reading is in progress on this buffer. Buffer should be in inProgressList
- AVAILABLE, // data is available in buffer. It should be in completedList
- READ_FAILED // read completed, but failed.
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.DelegateToFileSystem;
+
+import java.io.IOException;
+import java.net.URI;
+import java.net.URISyntaxException;
+
+public class SeaweedAbstractFileSystem extends DelegateToFileSystem {
+
+ SeaweedAbstractFileSystem(final URI uri, final Configuration conf)
+ throws IOException, URISyntaxException {
+ super(uri, new SeaweedFileSystem(), conf, "seaweedfs", false);
+ }
+
}
diff --git a/other/java/hdfs3/src/main/java/seaweed/hdfs/SeaweedFileSystem.java b/other/java/hdfs3/src/main/java/seaweed/hdfs/SeaweedFileSystem.java
index c12da8261..25395db7a 100644
--- a/other/java/hdfs3/src/main/java/seaweed/hdfs/SeaweedFileSystem.java
+++ b/other/java/hdfs3/src/main/java/seaweed/hdfs/SeaweedFileSystem.java
@@ -5,31 +5,31 @@ import org.apache.hadoop.fs.*;
import org.apache.hadoop.fs.permission.AclEntry;
import org.apache.hadoop.fs.permission.AclStatus;
import org.apache.hadoop.fs.permission.FsPermission;
-import org.apache.hadoop.security.AccessControlException;
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.util.Progressable;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
+import seaweedfs.client.FilerProto;
import java.io.FileNotFoundException;
import java.io.IOException;
-import java.io.InputStream;
import java.io.OutputStream;
import java.net.URI;
import java.util.EnumSet;
import java.util.List;
import java.util.Map;
-import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.IO_FILE_BUFFER_SIZE_KEY;
-
public class SeaweedFileSystem extends FileSystem {
- public static final int FS_SEAWEED_DEFAULT_PORT = 8888;
public static final String FS_SEAWEED_FILER_HOST = "fs.seaweed.filer.host";
public static final String FS_SEAWEED_FILER_PORT = "fs.seaweed.filer.port";
+ public static final int FS_SEAWEED_DEFAULT_PORT = 8888;
+ public static final String FS_SEAWEED_BUFFER_SIZE = "fs.seaweed.buffer.size";
+ public static final String FS_SEAWEED_REPLICATION = "fs.seaweed.replication";
+ public static final String FS_SEAWEED_VOLUME_SERVER_ACCESS = "fs.seaweed.volume.server.access";
+ public static final int FS_SEAWEED_DEFAULT_BUFFER_SIZE = 4 * 1024 * 1024;
private static final Logger LOG = LoggerFactory.getLogger(SeaweedFileSystem.class);
- private static int BUFFER_SIZE = 16 * 1024 * 1024;
private URI uri;
private Path workingDirectory = new Path("/");
@@ -60,15 +60,19 @@ public class SeaweedFileSystem extends FileSystem {
port = (port == -1) ? FS_SEAWEED_DEFAULT_PORT : port;
conf.setInt(FS_SEAWEED_FILER_PORT, port);
- conf.setInt(IO_FILE_BUFFER_SIZE_KEY, BUFFER_SIZE);
-
setConf(conf);
this.uri = uri;
- seaweedFileSystemStore = new SeaweedFileSystemStore(host, port);
+ seaweedFileSystemStore = new SeaweedFileSystemStore(host, port, conf);
}
+ @Override
+ public void close() throws IOException {
+ super.close();
+ this.seaweedFileSystemStore.close();
+ }
+
@Override
public FSDataInputStream open(Path path, int bufferSize) throws IOException {
@@ -77,8 +81,9 @@ public class SeaweedFileSystem extends FileSystem {
path = qualify(path);
try {
- InputStream inputStream = seaweedFileSystemStore.openFileForRead(path, statistics, bufferSize);
- return new FSDataInputStream(inputStream);
+ int seaweedBufferSize = this.getConf().getInt(FS_SEAWEED_BUFFER_SIZE, FS_SEAWEED_DEFAULT_BUFFER_SIZE);
+ FSInputStream inputStream = seaweedFileSystemStore.openFileForRead(path, statistics);
+ return new FSDataInputStream(new BufferedByteBufferReadableInputStream(inputStream, 4 * seaweedBufferSize));
} catch (Exception ex) {
LOG.warn("open path: {} bufferSize:{}", path, bufferSize, ex);
return null;
@@ -94,8 +99,9 @@ public class SeaweedFileSystem extends FileSystem {
path = qualify(path);
try {
- String replicaPlacement = String.format("%03d", replication - 1);
- OutputStream outputStream = seaweedFileSystemStore.createFile(path, overwrite, permission, bufferSize, replicaPlacement);
+ String replicaPlacement = this.getConf().get(FS_SEAWEED_REPLICATION, String.format("%03d", replication - 1));
+ int seaweedBufferSize = this.getConf().getInt(FS_SEAWEED_BUFFER_SIZE, FS_SEAWEED_DEFAULT_BUFFER_SIZE);
+ OutputStream outputStream = seaweedFileSystemStore.createFile(path, overwrite, permission, seaweedBufferSize, replicaPlacement);
return new FSDataOutputStream(outputStream, statistics);
} catch (Exception ex) {
LOG.warn("create path: {} bufferSize:{} blockSize:{}", path, bufferSize, blockSize, ex);
@@ -105,8 +111,9 @@ public class SeaweedFileSystem extends FileSystem {
/**
* {@inheritDoc}
+ *
* @throws FileNotFoundException if the parent directory is not present -or
- * is not a directory.
+ * is not a directory.
*/
@Override
public FSDataOutputStream createNonRecursive(Path path,
@@ -123,9 +130,10 @@ public class SeaweedFileSystem extends FileSystem {
throw new FileAlreadyExistsException("Not a directory: " + parent);
}
}
+ int seaweedBufferSize = this.getConf().getInt(FS_SEAWEED_BUFFER_SIZE, FS_SEAWEED_DEFAULT_BUFFER_SIZE);
return create(path, permission,
flags.contains(CreateFlag.OVERWRITE), bufferSize,
- replication, blockSize, progress);
+ replication, seaweedBufferSize, progress);
}
@Override
@@ -135,7 +143,8 @@ public class SeaweedFileSystem extends FileSystem {
path = qualify(path);
try {
- OutputStream outputStream = seaweedFileSystemStore.createFile(path, false, null, bufferSize, "");
+ int seaweedBufferSize = this.getConf().getInt(FS_SEAWEED_BUFFER_SIZE, FS_SEAWEED_DEFAULT_BUFFER_SIZE);
+ OutputStream outputStream = seaweedFileSystemStore.createFile(path, false, null, seaweedBufferSize, "");
return new FSDataOutputStream(outputStream, statistics);
} catch (Exception ex) {
LOG.warn("append path: {} bufferSize:{}", path, bufferSize, ex);
@@ -144,7 +153,7 @@ public class SeaweedFileSystem extends FileSystem {
}
@Override
- public boolean rename(Path src, Path dst) {
+ public boolean rename(Path src, Path dst) throws IOException {
LOG.debug("rename path: {} => {}", src, dst);
@@ -155,12 +164,13 @@ public class SeaweedFileSystem extends FileSystem {
if (src.equals(dst)) {
return true;
}
- FileStatus dstFileStatus = getFileStatus(dst);
+ FilerProto.Entry entry = seaweedFileSystemStore.lookupEntry(dst);
- String sourceFileName = src.getName();
Path adjustedDst = dst;
- if (dstFileStatus != null) {
+ if (entry != null) {
+ FileStatus dstFileStatus = getFileStatus(dst);
+ String sourceFileName = src.getName();
if (!dstFileStatus.isDirectory()) {
return false;
}
@@ -175,18 +185,20 @@ public class SeaweedFileSystem extends FileSystem {
}
@Override
- public boolean delete(Path path, boolean recursive) {
+ public boolean delete(Path path, boolean recursive) throws IOException {
LOG.debug("delete path: {} recursive:{}", path, recursive);
path = qualify(path);
- FileStatus fileStatus = getFileStatus(path);
+ FilerProto.Entry entry = seaweedFileSystemStore.lookupEntry(path);
- if (fileStatus == null) {
+ if (entry == null) {
return true;
}
+ FileStatus fileStatus = getFileStatus(path);
+
return seaweedFileSystemStore.deleteEntries(path, fileStatus.isDirectory(), recursive);
}
@@ -222,9 +234,9 @@ public class SeaweedFileSystem extends FileSystem {
path = qualify(path);
- FileStatus fileStatus = getFileStatus(path);
+ FilerProto.Entry entry = seaweedFileSystemStore.lookupEntry(path);
- if (fileStatus == null) {
+ if (entry == null) {
UserGroupInformation currentUser = UserGroupInformation.getCurrentUser();
return seaweedFileSystemStore.createDirectory(path, currentUser,
@@ -233,6 +245,8 @@ public class SeaweedFileSystem extends FileSystem {
}
+ FileStatus fileStatus = getFileStatus(path);
+
if (fileStatus.isDirectory()) {
return true;
} else {
@@ -241,7 +255,7 @@ public class SeaweedFileSystem extends FileSystem {
}
@Override
- public FileStatus getFileStatus(Path path) {
+ public FileStatus getFileStatus(Path path) throws IOException {
LOG.debug("getFileStatus path: {}", path);
@@ -335,9 +349,7 @@ public class SeaweedFileSystem extends FileSystem {
@Override
public void createSymlink(final Path target, final Path link,
- final boolean createParent) throws AccessControlException,
- FileAlreadyExistsException, FileNotFoundException,
- ParentNotDirectoryException, UnsupportedFileSystemException,
+ final boolean createParent) throws
IOException {
// Supporting filesystems should override this method
throw new UnsupportedOperationException(
diff --git a/other/java/hdfs3/src/main/java/seaweed/hdfs/SeaweedFileSystemStore.java b/other/java/hdfs3/src/main/java/seaweed/hdfs/SeaweedFileSystemStore.java
index 774c090e8..f4e8c9349 100644
--- a/other/java/hdfs3/src/main/java/seaweed/hdfs/SeaweedFileSystemStore.java
+++ b/other/java/hdfs3/src/main/java/seaweed/hdfs/SeaweedFileSystemStore.java
@@ -1,5 +1,7 @@
package seaweed.hdfs;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FSInputStream;
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
@@ -7,30 +9,43 @@ import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.security.UserGroupInformation;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
-import seaweedfs.client.FilerClient;
-import seaweedfs.client.FilerGrpcClient;
-import seaweedfs.client.FilerProto;
-import seaweedfs.client.SeaweedRead;
+import seaweedfs.client.*;
import java.io.FileNotFoundException;
import java.io.IOException;
-import java.io.InputStream;
import java.io.OutputStream;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.List;
+import static seaweed.hdfs.SeaweedFileSystem.*;
+
public class SeaweedFileSystemStore {
private static final Logger LOG = LoggerFactory.getLogger(SeaweedFileSystemStore.class);
- private FilerGrpcClient filerGrpcClient;
private FilerClient filerClient;
+ private Configuration conf;
- public SeaweedFileSystemStore(String host, int port) {
+ public SeaweedFileSystemStore(String host, int port, Configuration conf) {
int grpcPort = 10000 + port;
- filerGrpcClient = new FilerGrpcClient(host, grpcPort);
- filerClient = new FilerClient(filerGrpcClient);
+ filerClient = new FilerClient(host, grpcPort);
+ this.conf = conf;
+ String volumeServerAccessMode = this.conf.get(FS_SEAWEED_VOLUME_SERVER_ACCESS, "direct");
+ if (volumeServerAccessMode.equals("publicUrl")) {
+ filerClient.setAccessVolumeServerByPublicUrl();
+ } else if (volumeServerAccessMode.equals("filerProxy")) {
+ filerClient.setAccessVolumeServerByFilerProxy();
+ }
+
+ }
+
+ public void close() {
+ try {
+ this.filerClient.shutdown();
+ } catch (InterruptedException e) {
+ e.printStackTrace();
+ }
}
public static String getParentDirectory(Path path) {
@@ -61,9 +76,19 @@ public class SeaweedFileSystemStore {
);
}
- public FileStatus[] listEntries(final Path path) {
+ public FileStatus[] listEntries(final Path path) throws IOException {
LOG.debug("listEntries path: {}", path);
+ FileStatus pathStatus = getFileStatus(path);
+
+ if (pathStatus == null) {
+ return new FileStatus[0];
+ }
+
+ if (!pathStatus.isDirectory()) {
+ return new FileStatus[]{pathStatus};
+ }
+
List fileStatuses = new ArrayList();
List entries = filerClient.listEntries(path.toUri().getPath());
@@ -74,14 +99,16 @@ public class SeaweedFileSystemStore {
fileStatuses.add(fileStatus);
}
+ LOG.debug("listEntries path: {} size {}", fileStatuses, fileStatuses.size());
return fileStatuses.toArray(new FileStatus[0]);
+
}
- public FileStatus getFileStatus(final Path path) {
+ public FileStatus getFileStatus(final Path path) throws IOException {
FilerProto.Entry entry = lookupEntry(path);
if (entry == null) {
- return null;
+ throw new FileNotFoundException("File does not exist: " + path);
}
LOG.debug("doGetFileStatus path:{} entry:{}", path, entry);
@@ -111,10 +138,10 @@ public class SeaweedFileSystemStore {
private FileStatus doGetFileStatus(Path path, FilerProto.Entry entry) {
FilerProto.FuseAttributes attributes = entry.getAttributes();
- long length = SeaweedRead.totalSize(entry.getChunksList());
+ long length = SeaweedRead.fileSize(entry);
boolean isDir = entry.getIsDirectory();
int block_replication = 1;
- int blocksize = 512;
+ int blocksize = this.conf.getInt(FS_SEAWEED_BUFFER_SIZE, FS_SEAWEED_DEFAULT_BUFFER_SIZE);
long modification_time = attributes.getMtime() * 1000; // milliseconds
long access_time = 0;
FsPermission permission = FsPermission.createImmutable((short) attributes.getFileMode());
@@ -124,7 +151,7 @@ public class SeaweedFileSystemStore {
modification_time, access_time, permission, owner, group, null, path);
}
- private FilerProto.Entry lookupEntry(Path path) {
+ public FilerProto.Entry lookupEntry(Path path) {
return filerClient.lookupEntry(getParentDirectory(path), path.getName());
@@ -170,9 +197,10 @@ public class SeaweedFileSystemStore {
if (existingEntry != null) {
entry = FilerProto.Entry.newBuilder();
entry.mergeFrom(existingEntry);
+ entry.clearContent();
entry.getAttributesBuilder().setMtime(now);
LOG.debug("createFile merged entry path:{} entry:{} from:{}", path, entry, existingEntry);
- writePosition = SeaweedRead.totalSize(existingEntry.getChunksList());
+ writePosition = SeaweedRead.fileSize(existingEntry);
replication = existingEntry.getAttributes().getReplication();
}
}
@@ -189,30 +217,27 @@ public class SeaweedFileSystemStore {
.clearGroupName()
.addAllGroupName(Arrays.asList(userGroupInformation.getGroupNames()))
);
+ SeaweedWrite.writeMeta(filerClient, getParentDirectory(path), entry);
}
- return new SeaweedOutputStream(filerGrpcClient, path, entry, writePosition, bufferSize, replication);
+ return new SeaweedHadoopOutputStream(filerClient, path.toString(), entry, writePosition, bufferSize, replication);
}
- public InputStream openFileForRead(final Path path, FileSystem.Statistics statistics,
- int bufferSize) throws IOException {
+ public FSInputStream openFileForRead(final Path path, FileSystem.Statistics statistics) throws IOException {
- LOG.debug("openFileForRead path:{} bufferSize:{}", path, bufferSize);
+ LOG.debug("openFileForRead path:{}", path);
- int readAheadQueueDepth = 2;
FilerProto.Entry entry = lookupEntry(path);
if (entry == null) {
throw new FileNotFoundException("read non-exist file " + path);
}
- return new SeaweedInputStream(filerGrpcClient,
+ return new SeaweedHadoopInputStream(filerClient,
statistics,
path.toUri().getPath(),
- entry,
- bufferSize,
- readAheadQueueDepth);
+ entry);
}
public void setOwner(Path path, String owner, String group) {
diff --git a/other/java/hdfs3/src/main/java/seaweed/hdfs/SeaweedHadoopInputStream.java b/other/java/hdfs3/src/main/java/seaweed/hdfs/SeaweedHadoopInputStream.java
new file mode 100644
index 000000000..f26eae597
--- /dev/null
+++ b/other/java/hdfs3/src/main/java/seaweed/hdfs/SeaweedHadoopInputStream.java
@@ -0,0 +1,150 @@
+package seaweed.hdfs;
+
+// based on org.apache.hadoop.fs.azurebfs.services.AbfsInputStream
+
+import org.apache.hadoop.fs.ByteBufferReadable;
+import org.apache.hadoop.fs.FSInputStream;
+import org.apache.hadoop.fs.FileSystem.Statistics;
+import seaweedfs.client.FilerClient;
+import seaweedfs.client.FilerProto;
+import seaweedfs.client.SeaweedInputStream;
+
+import java.io.EOFException;
+import java.io.IOException;
+import java.nio.ByteBuffer;
+
+public class SeaweedHadoopInputStream extends FSInputStream implements ByteBufferReadable {
+
+ private final SeaweedInputStream seaweedInputStream;
+ private final Statistics statistics;
+
+ public SeaweedHadoopInputStream(
+ final FilerClient filerClient,
+ final Statistics statistics,
+ final String path,
+ final FilerProto.Entry entry) throws IOException {
+ this.seaweedInputStream = new SeaweedInputStream(filerClient, path, entry);
+ this.statistics = statistics;
+ }
+
+ @Override
+ public int read() throws IOException {
+ return seaweedInputStream.read();
+ }
+
+ @Override
+ public int read(final byte[] b, final int off, final int len) throws IOException {
+ return seaweedInputStream.read(b, off, len);
+ }
+
+ // implement ByteBufferReadable
+ @Override
+ public synchronized int read(ByteBuffer buf) throws IOException {
+ int bytesRead = seaweedInputStream.read(buf);
+
+ if (bytesRead > 0) {
+ if (statistics != null) {
+ statistics.incrementBytesRead(bytesRead);
+ }
+ }
+
+ return bytesRead;
+ }
+
+ /**
+ * Seek to given position in stream.
+ *
+ * @param n position to seek to
+ * @throws IOException if there is an error
+ * @throws EOFException if attempting to seek past end of file
+ */
+ @Override
+ public synchronized void seek(long n) throws IOException {
+ seaweedInputStream.seek(n);
+ }
+
+ @Override
+ public synchronized long skip(long n) throws IOException {
+ return seaweedInputStream.skip(n);
+ }
+
+ /**
+ * Return the size of the remaining available bytes
+ * if the size is less than or equal to {@link Integer#MAX_VALUE},
+ * otherwise, return {@link Integer#MAX_VALUE}.
+ *
+ * This is to match the behavior of DFSInputStream.available(),
+ * which some clients may rely on (HBase write-ahead log reading in
+ * particular).
+ */
+ @Override
+ public synchronized int available() throws IOException {
+ return seaweedInputStream.available();
+ }
+
+ /**
+ * Returns the length of the file that this stream refers to. Note that the length returned is the length
+ * as of the time the Stream was opened. Specifically, if there have been subsequent appends to the file,
+ * they wont be reflected in the returned length.
+ *
+ * @return length of the file.
+ * @throws IOException if the stream is closed
+ */
+ public long length() throws IOException {
+ return seaweedInputStream.length();
+ }
+
+ /**
+ * Return the current offset from the start of the file
+ *
+ * @throws IOException throws {@link IOException} if there is an error
+ */
+ @Override
+ public synchronized long getPos() throws IOException {
+ return seaweedInputStream.getPos();
+ }
+
+ /**
+ * Seeks a different copy of the data. Returns true if
+ * found a new source, false otherwise.
+ *
+ * @throws IOException throws {@link IOException} if there is an error
+ */
+ @Override
+ public boolean seekToNewSource(long l) throws IOException {
+ return false;
+ }
+
+ @Override
+ public synchronized void close() throws IOException {
+ seaweedInputStream.close();
+ }
+
+ /**
+ * Not supported by this stream. Throws {@link UnsupportedOperationException}
+ *
+ * @param readlimit ignored
+ */
+ @Override
+ public synchronized void mark(int readlimit) {
+ throw new UnsupportedOperationException("mark()/reset() not supported on this stream");
+ }
+
+ /**
+ * Not supported by this stream. Throws {@link UnsupportedOperationException}
+ */
+ @Override
+ public synchronized void reset() throws IOException {
+ throw new UnsupportedOperationException("mark()/reset() not supported on this stream");
+ }
+
+ /**
+ * gets whether mark and reset are supported by {@code ADLFileInputStream}. Always returns false.
+ *
+ * @return always {@code false}
+ */
+ @Override
+ public boolean markSupported() {
+ return false;
+ }
+}
diff --git a/other/java/hdfs3/src/main/java/seaweed/hdfs/SeaweedHadoopOutputStream.java b/other/java/hdfs3/src/main/java/seaweed/hdfs/SeaweedHadoopOutputStream.java
new file mode 100644
index 000000000..1740312fe
--- /dev/null
+++ b/other/java/hdfs3/src/main/java/seaweed/hdfs/SeaweedHadoopOutputStream.java
@@ -0,0 +1,64 @@
+package seaweed.hdfs;
+
+// adapted from org.apache.hadoop.fs.azurebfs.services.AbfsOutputStream
+
+import org.apache.hadoop.fs.StreamCapabilities;
+import org.apache.hadoop.fs.Syncable;
+import seaweedfs.client.FilerClient;
+import seaweedfs.client.FilerProto;
+import seaweedfs.client.SeaweedOutputStream;
+
+import java.io.IOException;
+import java.util.Locale;
+
+public class SeaweedHadoopOutputStream extends SeaweedOutputStream implements Syncable, StreamCapabilities {
+
+ public SeaweedHadoopOutputStream(FilerClient filerClient, final String path, FilerProto.Entry.Builder entry,
+ final long position, final int bufferSize, final String replication) {
+ super(filerClient, path, entry, position, bufferSize, replication);
+ }
+
+ /**
+ * Similar to posix fsync, flush out the data in client's user buffer
+ * all the way to the disk device (but the disk may have it in its cache).
+ *
+ * @throws IOException if error occurs
+ */
+ @Override
+ public void hsync() throws IOException {
+ if (supportFlush) {
+ flushInternal();
+ }
+ }
+
+ /**
+ * Flush out the data in client's user buffer. After the return of
+ * this call, new readers will see the data.
+ *
+ * @throws IOException if any error occurs
+ */
+ @Override
+ public void hflush() throws IOException {
+ if (supportFlush) {
+ flushInternal();
+ }
+ }
+
+ /**
+ * Query the stream for a specific capability.
+ *
+ * @param capability string to query the stream support for.
+ * @return true for hsync and hflush.
+ */
+ @Override
+ public boolean hasCapability(String capability) {
+ switch (capability.toLowerCase(Locale.ENGLISH)) {
+ case StreamCapabilities.HSYNC:
+ case StreamCapabilities.HFLUSH:
+ return supportFlush;
+ default:
+ return false;
+ }
+ }
+
+}
diff --git a/other/java/hdfs3/src/main/java/seaweed/hdfs/SeaweedInputStream.java b/other/java/hdfs3/src/main/java/seaweed/hdfs/SeaweedInputStream.java
deleted file mode 100644
index 90c14c772..000000000
--- a/other/java/hdfs3/src/main/java/seaweed/hdfs/SeaweedInputStream.java
+++ /dev/null
@@ -1,371 +0,0 @@
-package seaweed.hdfs;
-
-// based on org.apache.hadoop.fs.azurebfs.services.AbfsInputStream
-
-import com.google.common.base.Preconditions;
-import org.apache.hadoop.fs.FSExceptionMessages;
-import org.apache.hadoop.fs.FSInputStream;
-import org.apache.hadoop.fs.FileSystem.Statistics;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-import seaweedfs.client.FilerGrpcClient;
-import seaweedfs.client.FilerProto;
-import seaweedfs.client.SeaweedRead;
-
-import java.io.EOFException;
-import java.io.IOException;
-import java.util.List;
-
-public class SeaweedInputStream extends FSInputStream {
-
- private static final Logger LOG = LoggerFactory.getLogger(SeaweedInputStream.class);
-
- private final FilerGrpcClient filerGrpcClient;
- private final Statistics statistics;
- private final String path;
- private final FilerProto.Entry entry;
- private final List visibleIntervalList;
- private final long contentLength;
- private final int bufferSize; // default buffer size
- private final int readAheadQueueDepth; // initialized in constructor
- private final boolean readAheadEnabled; // whether enable readAhead;
-
- private byte[] buffer = null; // will be initialized on first use
-
- private long fCursor = 0; // cursor of buffer within file - offset of next byte to read from remote server
- private long fCursorAfterLastRead = -1;
- private int bCursor = 0; // cursor of read within buffer - offset of next byte to be returned from buffer
- private int limit = 0; // offset of next byte to be read into buffer from service (i.e., upper marker+1
- // of valid bytes in buffer)
- private boolean closed = false;
-
- public SeaweedInputStream(
- final FilerGrpcClient filerGrpcClient,
- final Statistics statistics,
- final String path,
- final FilerProto.Entry entry,
- final int bufferSize,
- final int readAheadQueueDepth) {
- this.filerGrpcClient = filerGrpcClient;
- this.statistics = statistics;
- this.path = path;
- this.entry = entry;
- this.contentLength = SeaweedRead.totalSize(entry.getChunksList());
- this.bufferSize = bufferSize;
- this.readAheadQueueDepth = (readAheadQueueDepth >= 0) ? readAheadQueueDepth : Runtime.getRuntime().availableProcessors();
- this.readAheadEnabled = true;
-
- this.visibleIntervalList = SeaweedRead.nonOverlappingVisibleIntervals(entry.getChunksList());
-
- LOG.debug("new path:{} entry:{} visibleIntervalList:{}", path, entry, visibleIntervalList);
-
- }
-
- public String getPath() {
- return path;
- }
-
- @Override
- public int read() throws IOException {
- byte[] b = new byte[1];
- int numberOfBytesRead = read(b, 0, 1);
- if (numberOfBytesRead < 0) {
- return -1;
- } else {
- return (b[0] & 0xFF);
- }
- }
-
- @Override
- public synchronized int read(final byte[] b, final int off, final int len) throws IOException {
- int currentOff = off;
- int currentLen = len;
- int lastReadBytes;
- int totalReadBytes = 0;
- do {
- lastReadBytes = readOneBlock(b, currentOff, currentLen);
- if (lastReadBytes > 0) {
- currentOff += lastReadBytes;
- currentLen -= lastReadBytes;
- totalReadBytes += lastReadBytes;
- }
- if (currentLen <= 0 || currentLen > b.length - currentOff) {
- break;
- }
- } while (lastReadBytes > 0);
- return totalReadBytes > 0 ? totalReadBytes : lastReadBytes;
- }
-
- private int readOneBlock(final byte[] b, final int off, final int len) throws IOException {
- if (closed) {
- throw new IOException(FSExceptionMessages.STREAM_IS_CLOSED);
- }
-
- Preconditions.checkNotNull(b);
-
- if (len == 0) {
- return 0;
- }
-
- if (this.available() == 0) {
- return -1;
- }
-
- if (off < 0 || len < 0 || len > b.length - off) {
- throw new IndexOutOfBoundsException();
- }
-
- //If buffer is empty, then fill the buffer.
- if (bCursor == limit) {
- //If EOF, then return -1
- if (fCursor >= contentLength) {
- return -1;
- }
-
- long bytesRead = 0;
- //reset buffer to initial state - i.e., throw away existing data
- bCursor = 0;
- limit = 0;
- if (buffer == null) {
- buffer = new byte[bufferSize];
- }
-
- // Enable readAhead when reading sequentially
- if (-1 == fCursorAfterLastRead || fCursorAfterLastRead == fCursor || b.length >= bufferSize) {
- bytesRead = readInternal(fCursor, buffer, 0, bufferSize, false);
- } else {
- bytesRead = readInternal(fCursor, buffer, 0, b.length, true);
- }
-
- if (bytesRead == -1) {
- return -1;
- }
-
- limit += bytesRead;
- fCursor += bytesRead;
- fCursorAfterLastRead = fCursor;
- }
-
- //If there is anything in the buffer, then return lesser of (requested bytes) and (bytes in buffer)
- //(bytes returned may be less than requested)
- int bytesRemaining = limit - bCursor;
- int bytesToRead = Math.min(len, bytesRemaining);
- System.arraycopy(buffer, bCursor, b, off, bytesToRead);
- bCursor += bytesToRead;
- if (statistics != null) {
- statistics.incrementBytesRead(bytesToRead);
- }
- return bytesToRead;
- }
-
-
- private int readInternal(final long position, final byte[] b, final int offset, final int length,
- final boolean bypassReadAhead) throws IOException {
- if (readAheadEnabled && !bypassReadAhead) {
- // try reading from read-ahead
- if (offset != 0) {
- throw new IllegalArgumentException("readahead buffers cannot have non-zero buffer offsets");
- }
- int receivedBytes;
-
- // queue read-aheads
- int numReadAheads = this.readAheadQueueDepth;
- long nextSize;
- long nextOffset = position;
- while (numReadAheads > 0 && nextOffset < contentLength) {
- nextSize = Math.min((long) bufferSize, contentLength - nextOffset);
- ReadBufferManager.getBufferManager().queueReadAhead(this, nextOffset, (int) nextSize);
- nextOffset = nextOffset + nextSize;
- numReadAheads--;
- }
-
- // try reading from buffers first
- receivedBytes = ReadBufferManager.getBufferManager().getBlock(this, position, length, b);
- if (receivedBytes > 0) {
- return receivedBytes;
- }
-
- // got nothing from read-ahead, do our own read now
- receivedBytes = readRemote(position, b, offset, length);
- return receivedBytes;
- } else {
- return readRemote(position, b, offset, length);
- }
- }
-
- int readRemote(long position, byte[] b, int offset, int length) throws IOException {
- if (position < 0) {
- throw new IllegalArgumentException("attempting to read from negative offset");
- }
- if (position >= contentLength) {
- return -1; // Hadoop prefers -1 to EOFException
- }
- if (b == null) {
- throw new IllegalArgumentException("null byte array passed in to read() method");
- }
- if (offset >= b.length) {
- throw new IllegalArgumentException("offset greater than length of array");
- }
- if (length < 0) {
- throw new IllegalArgumentException("requested read length is less than zero");
- }
- if (length > (b.length - offset)) {
- throw new IllegalArgumentException("requested read length is more than will fit after requested offset in buffer");
- }
-
- long bytesRead = SeaweedRead.read(filerGrpcClient, visibleIntervalList, position, b, offset, length);
- if (bytesRead > Integer.MAX_VALUE) {
- throw new IOException("Unexpected Content-Length");
- }
- return (int) bytesRead;
- }
-
- /**
- * Seek to given position in stream.
- *
- * @param n position to seek to
- * @throws IOException if there is an error
- * @throws EOFException if attempting to seek past end of file
- */
- @Override
- public synchronized void seek(long n) throws IOException {
- if (closed) {
- throw new IOException(FSExceptionMessages.STREAM_IS_CLOSED);
- }
- if (n < 0) {
- throw new EOFException(FSExceptionMessages.NEGATIVE_SEEK);
- }
- if (n > contentLength) {
- throw new EOFException(FSExceptionMessages.CANNOT_SEEK_PAST_EOF);
- }
-
- if (n >= fCursor - limit && n <= fCursor) { // within buffer
- bCursor = (int) (n - (fCursor - limit));
- return;
- }
-
- // next read will read from here
- fCursor = n;
-
- //invalidate buffer
- limit = 0;
- bCursor = 0;
- }
-
- @Override
- public synchronized long skip(long n) throws IOException {
- if (closed) {
- throw new IOException(FSExceptionMessages.STREAM_IS_CLOSED);
- }
- long currentPos = getPos();
- if (currentPos == contentLength) {
- if (n > 0) {
- throw new EOFException(FSExceptionMessages.CANNOT_SEEK_PAST_EOF);
- }
- }
- long newPos = currentPos + n;
- if (newPos < 0) {
- newPos = 0;
- n = newPos - currentPos;
- }
- if (newPos > contentLength) {
- newPos = contentLength;
- n = newPos - currentPos;
- }
- seek(newPos);
- return n;
- }
-
- /**
- * Return the size of the remaining available bytes
- * if the size is less than or equal to {@link Integer#MAX_VALUE},
- * otherwise, return {@link Integer#MAX_VALUE}.
- *
- * This is to match the behavior of DFSInputStream.available(),
- * which some clients may rely on (HBase write-ahead log reading in
- * particular).
- */
- @Override
- public synchronized int available() throws IOException {
- if (closed) {
- throw new IOException(
- FSExceptionMessages.STREAM_IS_CLOSED);
- }
- final long remaining = this.contentLength - this.getPos();
- return remaining <= Integer.MAX_VALUE
- ? (int) remaining : Integer.MAX_VALUE;
- }
-
- /**
- * Returns the length of the file that this stream refers to. Note that the length returned is the length
- * as of the time the Stream was opened. Specifically, if there have been subsequent appends to the file,
- * they wont be reflected in the returned length.
- *
- * @return length of the file.
- * @throws IOException if the stream is closed
- */
- public long length() throws IOException {
- if (closed) {
- throw new IOException(FSExceptionMessages.STREAM_IS_CLOSED);
- }
- return contentLength;
- }
-
- /**
- * Return the current offset from the start of the file
- *
- * @throws IOException throws {@link IOException} if there is an error
- */
- @Override
- public synchronized long getPos() throws IOException {
- if (closed) {
- throw new IOException(FSExceptionMessages.STREAM_IS_CLOSED);
- }
- return fCursor - limit + bCursor;
- }
-
- /**
- * Seeks a different copy of the data. Returns true if
- * found a new source, false otherwise.
- *
- * @throws IOException throws {@link IOException} if there is an error
- */
- @Override
- public boolean seekToNewSource(long l) throws IOException {
- return false;
- }
-
- @Override
- public synchronized void close() throws IOException {
- closed = true;
- buffer = null; // de-reference the buffer so it can be GC'ed sooner
- }
-
- /**
- * Not supported by this stream. Throws {@link UnsupportedOperationException}
- *
- * @param readlimit ignored
- */
- @Override
- public synchronized void mark(int readlimit) {
- throw new UnsupportedOperationException("mark()/reset() not supported on this stream");
- }
-
- /**
- * Not supported by this stream. Throws {@link UnsupportedOperationException}
- */
- @Override
- public synchronized void reset() throws IOException {
- throw new UnsupportedOperationException("mark()/reset() not supported on this stream");
- }
-
- /**
- * gets whether mark and reset are supported by {@code ADLFileInputStream}. Always returns false.
- *
- * @return always {@code false}
- */
- @Override
- public boolean markSupported() {
- return false;
- }
-}
diff --git a/other/java/hdfs3/src/main/java/seaweed/hdfs/SeaweedOutputStream.java b/other/java/hdfs3/src/main/java/seaweed/hdfs/SeaweedOutputStream.java
deleted file mode 100644
index 4f307ff96..000000000
--- a/other/java/hdfs3/src/main/java/seaweed/hdfs/SeaweedOutputStream.java
+++ /dev/null
@@ -1,335 +0,0 @@
-package seaweed.hdfs;
-
-// adapted from org.apache.hadoop.fs.azurebfs.services.AbfsOutputStream
-
-import com.google.common.base.Preconditions;
-import org.apache.hadoop.fs.FSExceptionMessages;
-import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.fs.StreamCapabilities;
-import org.apache.hadoop.fs.Syncable;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-import seaweedfs.client.FilerGrpcClient;
-import seaweedfs.client.FilerProto;
-import seaweedfs.client.SeaweedWrite;
-
-import java.io.IOException;
-import java.io.InterruptedIOException;
-import java.io.OutputStream;
-import java.util.Locale;
-import java.util.concurrent.Callable;
-import java.util.concurrent.ConcurrentLinkedDeque;
-import java.util.concurrent.ExecutorCompletionService;
-import java.util.concurrent.Future;
-import java.util.concurrent.LinkedBlockingQueue;
-import java.util.concurrent.ThreadPoolExecutor;
-import java.util.concurrent.TimeUnit;
-
-import static seaweed.hdfs.SeaweedFileSystemStore.getParentDirectory;
-
-public class SeaweedOutputStream extends OutputStream implements Syncable, StreamCapabilities {
-
- private static final Logger LOG = LoggerFactory.getLogger(SeaweedOutputStream.class);
-
- private final FilerGrpcClient filerGrpcClient;
- private final Path path;
- private final int bufferSize;
- private final int maxConcurrentRequestCount;
- private final ThreadPoolExecutor threadExecutor;
- private final ExecutorCompletionService completionService;
- private FilerProto.Entry.Builder entry;
- private long position;
- private boolean closed;
- private boolean supportFlush = true;
- private volatile IOException lastError;
- private long lastFlushOffset;
- private long lastTotalAppendOffset = 0;
- private byte[] buffer;
- private int bufferIndex;
- private ConcurrentLinkedDeque writeOperations;
- private String replication = "000";
-
- public SeaweedOutputStream(FilerGrpcClient filerGrpcClient, final Path path, FilerProto.Entry.Builder entry,
- final long position, final int bufferSize, final String replication) {
- this.filerGrpcClient = filerGrpcClient;
- this.replication = replication;
- this.path = path;
- this.position = position;
- this.closed = false;
- this.lastError = null;
- this.lastFlushOffset = 0;
- this.bufferSize = bufferSize;
- this.buffer = new byte[bufferSize];
- this.bufferIndex = 0;
- this.writeOperations = new ConcurrentLinkedDeque<>();
-
- this.maxConcurrentRequestCount = 4 * Runtime.getRuntime().availableProcessors();
-
- this.threadExecutor
- = new ThreadPoolExecutor(maxConcurrentRequestCount,
- maxConcurrentRequestCount,
- 10L,
- TimeUnit.SECONDS,
- new LinkedBlockingQueue());
- this.completionService = new ExecutorCompletionService<>(this.threadExecutor);
-
- this.entry = entry;
-
- }
-
- private synchronized void flushWrittenBytesToServiceInternal(final long offset) throws IOException {
-
- LOG.debug("SeaweedWrite.writeMeta path: {} entry:{}", path, entry);
-
- try {
- SeaweedWrite.writeMeta(filerGrpcClient, getParentDirectory(path), entry);
- } catch (Exception ex) {
- throw new IOException(ex);
- }
- this.lastFlushOffset = offset;
- }
-
- @Override
- public void write(final int byteVal) throws IOException {
- write(new byte[]{(byte) (byteVal & 0xFF)});
- }
-
- @Override
- public synchronized void write(final byte[] data, final int off, final int length)
- throws IOException {
- maybeThrowLastError();
-
- Preconditions.checkArgument(data != null, "null data");
-
- if (off < 0 || length < 0 || length > data.length - off) {
- throw new IndexOutOfBoundsException();
- }
-
- int currentOffset = off;
- int writableBytes = bufferSize - bufferIndex;
- int numberOfBytesToWrite = length;
-
- while (numberOfBytesToWrite > 0) {
- if (writableBytes <= numberOfBytesToWrite) {
- System.arraycopy(data, currentOffset, buffer, bufferIndex, writableBytes);
- bufferIndex += writableBytes;
- writeCurrentBufferToService();
- currentOffset += writableBytes;
- numberOfBytesToWrite = numberOfBytesToWrite - writableBytes;
- } else {
- System.arraycopy(data, currentOffset, buffer, bufferIndex, numberOfBytesToWrite);
- bufferIndex += numberOfBytesToWrite;
- numberOfBytesToWrite = 0;
- }
-
- writableBytes = bufferSize - bufferIndex;
- }
- }
-
- /**
- * Flushes this output stream and forces any buffered output bytes to be
- * written out. If any data remains in the payload it is committed to the
- * service. Data is queued for writing and forced out to the service
- * before the call returns.
- */
- @Override
- public void flush() throws IOException {
- if (supportFlush) {
- flushInternalAsync();
- }
- }
-
- /**
- * Similar to posix fsync, flush out the data in client's user buffer
- * all the way to the disk device (but the disk may have it in its cache).
- *
- * @throws IOException if error occurs
- */
- @Override
- public void hsync() throws IOException {
- if (supportFlush) {
- flushInternal();
- }
- }
-
- /**
- * Flush out the data in client's user buffer. After the return of
- * this call, new readers will see the data.
- *
- * @throws IOException if any error occurs
- */
- @Override
- public void hflush() throws IOException {
- if (supportFlush) {
- flushInternal();
- }
- }
-
- /**
- * Query the stream for a specific capability.
- *
- * @param capability string to query the stream support for.
- * @return true for hsync and hflush.
- */
- @Override
- public boolean hasCapability(String capability) {
- switch (capability.toLowerCase(Locale.ENGLISH)) {
- case StreamCapabilities.HSYNC:
- case StreamCapabilities.HFLUSH:
- return supportFlush;
- default:
- return false;
- }
- }
-
- /**
- * Force all data in the output stream to be written to Azure storage.
- * Wait to return until this is complete. Close the access to the stream and
- * shutdown the upload thread pool.
- * If the blob was created, its lease will be released.
- * Any error encountered caught in threads and stored will be rethrown here
- * after cleanup.
- */
- @Override
- public synchronized void close() throws IOException {
- if (closed) {
- return;
- }
-
- LOG.debug("close path: {}", path);
- try {
- flushInternal();
- threadExecutor.shutdown();
- } finally {
- lastError = new IOException(FSExceptionMessages.STREAM_IS_CLOSED);
- buffer = null;
- bufferIndex = 0;
- closed = true;
- writeOperations.clear();
- if (!threadExecutor.isShutdown()) {
- threadExecutor.shutdownNow();
- }
- }
- }
-
- private synchronized void writeCurrentBufferToService() throws IOException {
- if (bufferIndex == 0) {
- return;
- }
-
- final byte[] bytes = buffer;
- final int bytesLength = bufferIndex;
-
- buffer = new byte[bufferSize];
- bufferIndex = 0;
- final long offset = position;
- position += bytesLength;
-
- if (threadExecutor.getQueue().size() >= maxConcurrentRequestCount * 2) {
- waitForTaskToComplete();
- }
-
- final Future job = completionService.submit(new Callable() {
- @Override
- public Void call() throws Exception {
- // originally: client.append(path, offset, bytes, 0, bytesLength);
- SeaweedWrite.writeData(entry, replication, filerGrpcClient, offset, bytes, 0, bytesLength);
- return null;
- }
- });
-
- writeOperations.add(new WriteOperation(job, offset, bytesLength));
-
- // Try to shrink the queue
- shrinkWriteOperationQueue();
- }
-
- private void waitForTaskToComplete() throws IOException {
- boolean completed;
- for (completed = false; completionService.poll() != null; completed = true) {
- // keep polling until there is no data
- }
-
- if (!completed) {
- try {
- completionService.take();
- } catch (InterruptedException e) {
- lastError = (IOException) new InterruptedIOException(e.toString()).initCause(e);
- throw lastError;
- }
- }
- }
-
- private void maybeThrowLastError() throws IOException {
- if (lastError != null) {
- throw lastError;
- }
- }
-
- /**
- * Try to remove the completed write operations from the beginning of write
- * operation FIFO queue.
- */
- private synchronized void shrinkWriteOperationQueue() throws IOException {
- try {
- while (writeOperations.peek() != null && writeOperations.peek().task.isDone()) {
- writeOperations.peek().task.get();
- lastTotalAppendOffset += writeOperations.peek().length;
- writeOperations.remove();
- }
- } catch (Exception e) {
- lastError = new IOException(e);
- throw lastError;
- }
- }
-
- private synchronized void flushInternal() throws IOException {
- maybeThrowLastError();
- writeCurrentBufferToService();
- flushWrittenBytesToService();
- }
-
- private synchronized void flushInternalAsync() throws IOException {
- maybeThrowLastError();
- writeCurrentBufferToService();
- flushWrittenBytesToServiceAsync();
- }
-
- private synchronized void flushWrittenBytesToService() throws IOException {
- for (WriteOperation writeOperation : writeOperations) {
- try {
- writeOperation.task.get();
- } catch (Exception ex) {
- lastError = new IOException(ex);
- throw lastError;
- }
- }
- LOG.debug("flushWrittenBytesToService: {} position:{}", path, position);
- flushWrittenBytesToServiceInternal(position);
- }
-
- private synchronized void flushWrittenBytesToServiceAsync() throws IOException {
- shrinkWriteOperationQueue();
-
- if (this.lastTotalAppendOffset > this.lastFlushOffset) {
- this.flushWrittenBytesToServiceInternal(this.lastTotalAppendOffset);
- }
- }
-
- private static class WriteOperation {
- private final Future task;
- private final long startOffset;
- private final long length;
-
- WriteOperation(final Future task, final long startOffset, final long length) {
- Preconditions.checkNotNull(task, "task");
- Preconditions.checkArgument(startOffset >= 0, "startOffset");
- Preconditions.checkArgument(length >= 0, "length");
-
- this.task = task;
- this.startOffset = startOffset;
- this.length = length;
- }
- }
-
-}
diff --git a/other/java/s3copier/pom.xml b/other/java/s3copier/pom.xml
index f8cb9e91c..c3ff30932 100644
--- a/other/java/s3copier/pom.xml
+++ b/other/java/s3copier/pom.xml
@@ -28,7 +28,7 @@
junit
junit
- 3.8.1
+ 4.13.1
test
diff --git a/other/metrics/grafana_seaweedfs.json b/other/metrics/grafana_seaweedfs.json
new file mode 100644
index 000000000..074a3531f
--- /dev/null
+++ b/other/metrics/grafana_seaweedfs.json
@@ -0,0 +1,1856 @@
+{
+ "__inputs": [
+ {
+ "name": "DS_PROMETHEUS-DEV",
+ "label": "prometheus-dev",
+ "description": "",
+ "type": "datasource",
+ "pluginId": "prometheus",
+ "pluginName": "Prometheus"
+ }
+ ],
+ "__requires": [
+ {
+ "type": "grafana",
+ "id": "grafana",
+ "name": "Grafana",
+ "version": "4.6.2"
+ },
+ {
+ "type": "panel",
+ "id": "graph",
+ "name": "Graph",
+ "version": ""
+ },
+ {
+ "type": "datasource",
+ "id": "prometheus",
+ "name": "Prometheus",
+ "version": "1.0.0"
+ }
+ ],
+ "annotations": {
+ "list": [
+ {
+ "builtIn": 1,
+ "datasource": "${DS_PROMETHEUS-DEV}",
+ "enable": true,
+ "hide": true,
+ "iconColor": "rgba(0, 211, 255, 1)",
+ "limit": 100,
+ "name": "Annotations & Alerts",
+ "showIn": 0,
+ "type": "dashboard"
+ }
+ ]
+ },
+ "editable": true,
+ "gnetId": 10423,
+ "graphTooltip": 0,
+ "hideControls": false,
+ "id": null,
+ "links": [],
+ "refresh": "30s",
+ "rows": [
+ {
+ "collapse": true,
+ "height": 251,
+ "panels": [
+ {
+ "aliasColors": {},
+ "bars": false,
+ "dashLength": 10,
+ "dashes": false,
+ "datasource": "${DS_PROMETHEUS-DEV}",
+ "editable": true,
+ "error": false,
+ "fill": 1,
+ "grid": {},
+ "id": 46,
+ "legend": {
+ "avg": false,
+ "current": false,
+ "max": false,
+ "min": false,
+ "show": true,
+ "total": false,
+ "values": false
+ },
+ "lines": true,
+ "linewidth": 1,
+ "links": [],
+ "nullPointMode": "null as zero",
+ "percentage": false,
+ "pointradius": 5,
+ "points": false,
+ "renderer": "flot",
+ "seriesOverrides": [],
+ "spaceLength": 10,
+ "span": 4,
+ "stack": false,
+ "steppedLine": false,
+ "targets": [
+ {
+ "expr": "histogram_quantile(0.90, sum(rate(SeaweedFS_filer_request_seconds_bucket[1m])) by (le))",
+ "format": "time_series",
+ "hide": false,
+ "intervalFactor": 2,
+ "legendFormat": "average",
+ "refId": "A",
+ "step": 60
+ },
+ {
+ "expr": "histogram_quantile(0.90, sum(rate(SeaweedFS_filer_request_seconds_bucket[1m])) by (le, type))",
+ "format": "time_series",
+ "hide": false,
+ "intervalFactor": 2,
+ "legendFormat": "{{type}}",
+ "refId": "B",
+ "step": 60
+ }
+ ],
+ "thresholds": [],
+ "timeFrom": null,
+ "timeShift": null,
+ "title": "Filer Request Duration 90th percentile",
+ "tooltip": {
+ "msResolution": true,
+ "shared": true,
+ "sort": 0,
+ "value_type": "cumulative"
+ },
+ "type": "graph",
+ "xaxis": {
+ "buckets": null,
+ "mode": "time",
+ "name": null,
+ "show": true,
+ "values": []
+ },
+ "yaxes": [
+ {
+ "format": "s",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": 0,
+ "show": true
+ },
+ {
+ "format": "short",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": false
+ }
+ ]
+ },
+ {
+ "aliasColors": {},
+ "bars": false,
+ "dashLength": 10,
+ "dashes": false,
+ "datasource": "${DS_PROMETHEUS-DEV}",
+ "editable": true,
+ "error": false,
+ "fill": 1,
+ "grid": {},
+ "id": 49,
+ "legend": {
+ "avg": false,
+ "current": false,
+ "max": false,
+ "min": false,
+ "show": true,
+ "total": false,
+ "values": false
+ },
+ "lines": true,
+ "linewidth": 1,
+ "links": [],
+ "nullPointMode": "null as zero",
+ "percentage": false,
+ "pointradius": 5,
+ "points": false,
+ "renderer": "flot",
+ "seriesOverrides": [],
+ "spaceLength": 10,
+ "span": 4,
+ "stack": false,
+ "steppedLine": false,
+ "targets": [
+ {
+ "expr": "histogram_quantile(0.95, sum(rate(SeaweedFS_filer_request_seconds_bucket[1m])) by (le))",
+ "format": "time_series",
+ "hide": false,
+ "intervalFactor": 2,
+ "legendFormat": "average",
+ "refId": "A",
+ "step": 60
+ },
+ {
+ "expr": "histogram_quantile(0.95, sum(rate(SeaweedFS_filer_request_seconds_bucket[1m])) by (le, type))",
+ "format": "time_series",
+ "hide": false,
+ "intervalFactor": 2,
+ "legendFormat": "{{type}}",
+ "refId": "B",
+ "step": 60
+ },
+ {
+ "expr": "",
+ "format": "time_series",
+ "intervalFactor": 2,
+ "refId": "C"
+ }
+ ],
+ "thresholds": [],
+ "timeFrom": null,
+ "timeShift": null,
+ "title": "Filer Request Duration 95th percentile",
+ "tooltip": {
+ "msResolution": true,
+ "shared": true,
+ "sort": 0,
+ "value_type": "cumulative"
+ },
+ "type": "graph",
+ "xaxis": {
+ "buckets": null,
+ "mode": "time",
+ "name": null,
+ "show": true,
+ "values": []
+ },
+ "yaxes": [
+ {
+ "format": "s",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": 0,
+ "show": true
+ },
+ {
+ "format": "short",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": false
+ }
+ ]
+ },
+ {
+ "aliasColors": {},
+ "bars": false,
+ "dashLength": 10,
+ "dashes": false,
+ "datasource": "${DS_PROMETHEUS-DEV}",
+ "editable": true,
+ "error": false,
+ "fill": 1,
+ "grid": {},
+ "id": 45,
+ "legend": {
+ "avg": false,
+ "current": false,
+ "max": false,
+ "min": false,
+ "show": true,
+ "total": false,
+ "values": false
+ },
+ "lines": true,
+ "linewidth": 1,
+ "links": [],
+ "nullPointMode": "null as zero",
+ "percentage": false,
+ "pointradius": 5,
+ "points": false,
+ "renderer": "flot",
+ "seriesOverrides": [],
+ "spaceLength": 10,
+ "span": 4,
+ "stack": false,
+ "steppedLine": false,
+ "targets": [
+ {
+ "expr": "histogram_quantile(0.99, sum(rate(SeaweedFS_filer_request_seconds_bucket[1m])) by (le))",
+ "format": "time_series",
+ "hide": false,
+ "intervalFactor": 2,
+ "legendFormat": "average",
+ "refId": "A",
+ "step": 60
+ },
+ {
+ "expr": "histogram_quantile(0.99, sum(rate(SeaweedFS_filer_request_seconds_bucket[1m])) by (le, type))",
+ "format": "time_series",
+ "hide": false,
+ "intervalFactor": 2,
+ "legendFormat": "{{type}}",
+ "refId": "B",
+ "step": 60
+ },
+ {
+ "expr": "",
+ "format": "time_series",
+ "intervalFactor": 2,
+ "refId": "C"
+ }
+ ],
+ "thresholds": [],
+ "timeFrom": null,
+ "timeShift": null,
+ "title": "Filer Request Duration 99th percentile",
+ "tooltip": {
+ "msResolution": true,
+ "shared": true,
+ "sort": 0,
+ "value_type": "cumulative"
+ },
+ "type": "graph",
+ "xaxis": {
+ "buckets": null,
+ "mode": "time",
+ "name": null,
+ "show": true,
+ "values": []
+ },
+ "yaxes": [
+ {
+ "format": "s",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": 0,
+ "show": true
+ },
+ {
+ "format": "short",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": false
+ }
+ ]
+ },
+ {
+ "aliasColors": {},
+ "bars": false,
+ "dashLength": 10,
+ "dashes": false,
+ "datasource": "${DS_PROMETHEUS-DEV}",
+ "editable": true,
+ "error": false,
+ "fill": 0,
+ "grid": {},
+ "id": 2,
+ "legend": {
+ "alignAsTable": true,
+ "avg": false,
+ "current": true,
+ "hideEmpty": true,
+ "hideZero": true,
+ "max": true,
+ "min": false,
+ "rightSide": true,
+ "show": true,
+ "sideWidth": 250,
+ "sort": "max",
+ "sortDesc": true,
+ "total": false,
+ "values": true
+ },
+ "lines": true,
+ "linewidth": 1,
+ "links": [],
+ "minSpan": 12,
+ "nullPointMode": "null as zero",
+ "percentage": false,
+ "pointradius": 5,
+ "points": false,
+ "renderer": "flot",
+ "seriesOverrides": [
+ {
+ "alias": "total",
+ "lines": false
+ }
+ ],
+ "spaceLength": 10,
+ "span": 12,
+ "stack": false,
+ "steppedLine": false,
+ "targets": [
+ {
+ "expr": "rate(SeaweedFS_filer_request_total[1m])",
+ "format": "time_series",
+ "intervalFactor": 2,
+ "legendFormat": "{{type}}",
+ "refId": "A",
+ "step": 30
+ }
+ ],
+ "thresholds": [],
+ "timeFrom": null,
+ "timeShift": null,
+ "title": "Filer QPS",
+ "tooltip": {
+ "msResolution": true,
+ "shared": true,
+ "sort": 2,
+ "value_type": "individual"
+ },
+ "type": "graph",
+ "xaxis": {
+ "buckets": null,
+ "mode": "time",
+ "name": null,
+ "show": true,
+ "values": []
+ },
+ "yaxes": [
+ {
+ "format": "short",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": "0",
+ "show": true
+ },
+ {
+ "format": "short",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ }
+ ]
+ }
+ ],
+ "repeat": null,
+ "repeatIteration": null,
+ "repeatRowId": null,
+ "showTitle": true,
+ "title": "Filer",
+ "titleSize": "h6"
+ },
+ {
+ "collapse": false,
+ "height": 250,
+ "panels": [
+ {
+ "aliasColors": {},
+ "bars": false,
+ "dashLength": 10,
+ "dashes": false,
+ "datasource": "${DS_PROMETHEUS-DEV}",
+ "editable": true,
+ "error": false,
+ "fill": 1,
+ "grid": {},
+ "id": 56,
+ "legend": {
+ "avg": false,
+ "current": false,
+ "max": false,
+ "min": false,
+ "show": true,
+ "total": false,
+ "values": false
+ },
+ "lines": true,
+ "linewidth": 1,
+ "links": [],
+ "nullPointMode": "null as zero",
+ "percentage": false,
+ "pointradius": 5,
+ "points": false,
+ "renderer": "flot",
+ "seriesOverrides": [],
+ "spaceLength": 10,
+ "span": 4,
+ "stack": false,
+ "steppedLine": false,
+ "targets": [
+ {
+ "expr": "histogram_quantile(0.90, sum(rate(SeaweedFS_s3_request_seconds_bucket[1m])) by (le))",
+ "format": "time_series",
+ "hide": false,
+ "intervalFactor": 2,
+ "legendFormat": "average",
+ "refId": "A",
+ "step": 60
+ },
+ {
+ "expr": "histogram_quantile(0.90, sum(rate(SeaweedFS_s3_request_seconds_bucket[1m])) by (le, type))",
+ "format": "time_series",
+ "hide": false,
+ "intervalFactor": 2,
+ "legendFormat": "{{type}}",
+ "refId": "B",
+ "step": 60
+ }
+ ],
+ "thresholds": [],
+ "timeFrom": null,
+ "timeShift": null,
+ "title": "S3 Request Duration 90th percentile",
+ "tooltip": {
+ "msResolution": true,
+ "shared": true,
+ "sort": 0,
+ "value_type": "cumulative"
+ },
+ "type": "graph",
+ "xaxis": {
+ "buckets": null,
+ "mode": "time",
+ "name": null,
+ "show": true,
+ "values": []
+ },
+ "yaxes": [
+ {
+ "format": "s",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": 0,
+ "show": true
+ },
+ {
+ "format": "short",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": false
+ }
+ ]
+ },
+ {
+ "aliasColors": {},
+ "bars": false,
+ "dashLength": 10,
+ "dashes": false,
+ "datasource": "${DS_PROMETHEUS-DEV}",
+ "editable": true,
+ "error": false,
+ "fill": 1,
+ "grid": {},
+ "id": 57,
+ "legend": {
+ "avg": false,
+ "current": false,
+ "max": false,
+ "min": false,
+ "show": true,
+ "total": false,
+ "values": false
+ },
+ "lines": true,
+ "linewidth": 1,
+ "links": [],
+ "nullPointMode": "null as zero",
+ "percentage": false,
+ "pointradius": 5,
+ "points": false,
+ "renderer": "flot",
+ "seriesOverrides": [],
+ "spaceLength": 10,
+ "span": 4,
+ "stack": false,
+ "steppedLine": false,
+ "targets": [
+ {
+ "expr": "histogram_quantile(0.95, sum(rate(SeaweedFS_s3_request_seconds_bucket[1m])) by (le))",
+ "format": "time_series",
+ "hide": false,
+ "intervalFactor": 2,
+ "legendFormat": "average",
+ "refId": "A",
+ "step": 60
+ },
+ {
+ "expr": "histogram_quantile(0.95, sum(rate(SeaweedFS_s3_request_seconds_bucket[1m])) by (le, type))",
+ "format": "time_series",
+ "hide": false,
+ "intervalFactor": 2,
+ "legendFormat": "{{type}}",
+ "refId": "B",
+ "step": 60
+ }
+ ],
+ "thresholds": [],
+ "timeFrom": null,
+ "timeShift": null,
+ "title": "S3 Request Duration 95th percentile",
+ "tooltip": {
+ "msResolution": true,
+ "shared": true,
+ "sort": 0,
+ "value_type": "cumulative"
+ },
+ "type": "graph",
+ "xaxis": {
+ "buckets": null,
+ "mode": "time",
+ "name": null,
+ "show": true,
+ "values": []
+ },
+ "yaxes": [
+ {
+ "format": "s",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": 0,
+ "show": true
+ },
+ {
+ "format": "short",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": false
+ }
+ ]
+ },
+ {
+ "aliasColors": {},
+ "bars": false,
+ "dashLength": 10,
+ "dashes": false,
+ "datasource": "${DS_PROMETHEUS-DEV}",
+ "editable": true,
+ "error": false,
+ "fill": 1,
+ "grid": {},
+ "id": 58,
+ "legend": {
+ "avg": false,
+ "current": false,
+ "max": false,
+ "min": false,
+ "show": true,
+ "total": false,
+ "values": false
+ },
+ "lines": true,
+ "linewidth": 1,
+ "links": [],
+ "nullPointMode": "null as zero",
+ "percentage": false,
+ "pointradius": 5,
+ "points": false,
+ "renderer": "flot",
+ "seriesOverrides": [],
+ "spaceLength": 10,
+ "span": 4,
+ "stack": false,
+ "steppedLine": false,
+ "targets": [
+ {
+ "expr": "histogram_quantile(0.99, sum(rate(SeaweedFS_s3_request_seconds_bucket[1m])) by (le))",
+ "format": "time_series",
+ "hide": false,
+ "intervalFactor": 2,
+ "legendFormat": "average",
+ "refId": "A",
+ "step": 60
+ },
+ {
+ "expr": "histogram_quantile(0.99, sum(rate(SeaweedFS_s3_request_seconds_bucket[1m])) by (le, type))",
+ "format": "time_series",
+ "hide": false,
+ "intervalFactor": 2,
+ "legendFormat": "{{type}}",
+ "refId": "B",
+ "step": 60
+ }
+ ],
+ "thresholds": [],
+ "timeFrom": null,
+ "timeShift": null,
+ "title": "S3 Request Duration 99th percentile",
+ "tooltip": {
+ "msResolution": true,
+ "shared": true,
+ "sort": 0,
+ "value_type": "cumulative"
+ },
+ "type": "graph",
+ "xaxis": {
+ "buckets": null,
+ "mode": "time",
+ "name": null,
+ "show": true,
+ "values": []
+ },
+ "yaxes": [
+ {
+ "format": "s",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": 0,
+ "show": true
+ },
+ {
+ "format": "short",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": false
+ }
+ ]
+ },
+ {
+ "aliasColors": {},
+ "bars": false,
+ "dashLength": 10,
+ "dashes": false,
+ "datasource": "${DS_PROMETHEUS-DEV}",
+ "editable": true,
+ "error": false,
+ "fill": 0,
+ "grid": {},
+ "id": 55,
+ "legend": {
+ "alignAsTable": true,
+ "avg": false,
+ "current": true,
+ "hideEmpty": true,
+ "hideZero": true,
+ "max": true,
+ "min": false,
+ "rightSide": true,
+ "show": true,
+ "sideWidth": 250,
+ "sort": "max",
+ "sortDesc": true,
+ "total": false,
+ "values": true
+ },
+ "lines": true,
+ "linewidth": 1,
+ "links": [],
+ "minSpan": 12,
+ "nullPointMode": "null as zero",
+ "percentage": false,
+ "pointradius": 5,
+ "points": false,
+ "renderer": "flot",
+ "seriesOverrides": [
+ {
+ "alias": "total",
+ "lines": false
+ }
+ ],
+ "spaceLength": 10,
+ "span": 12,
+ "stack": false,
+ "steppedLine": false,
+ "targets": [
+ {
+ "expr": "rate(SeaweedFS_s3_request_total[1m])",
+ "format": "time_series",
+ "intervalFactor": 2,
+ "legendFormat": "{{type}}",
+ "refId": "A",
+ "step": 30
+ }
+ ],
+ "thresholds": [],
+ "timeFrom": null,
+ "timeShift": null,
+ "title": "S3 API QPS",
+ "tooltip": {
+ "msResolution": true,
+ "shared": true,
+ "sort": 2,
+ "value_type": "individual"
+ },
+ "type": "graph",
+ "xaxis": {
+ "buckets": null,
+ "mode": "time",
+ "name": null,
+ "show": true,
+ "values": []
+ },
+ "yaxes": [
+ {
+ "format": "short",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": "0",
+ "show": true
+ },
+ {
+ "format": "short",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ }
+ ]
+ },
+ {
+ "aliasColors": {},
+ "bars": false,
+ "dashLength": 10,
+ "dashes": false,
+ "datasource": "${DS_PROMETHEUS-DEV}",
+ "editable": true,
+ "error": false,
+ "fill": 0,
+ "grid": {},
+ "hideTimeOverride": false,
+ "id": 59,
+ "legend": {
+ "alignAsTable": true,
+ "avg": false,
+ "current": true,
+ "hideEmpty": true,
+ "hideZero": true,
+ "max": true,
+ "min": false,
+ "rightSide": true,
+ "show": true,
+ "sideWidth": 250,
+ "sort": "max",
+ "sortDesc": true,
+ "total": false,
+ "values": true
+ },
+ "lines": true,
+ "linewidth": 1,
+ "links": [],
+ "minSpan": 12,
+ "nullPointMode": "null as zero",
+ "percentage": false,
+ "pointradius": 5,
+ "points": false,
+ "renderer": "flot",
+ "seriesOverrides": [
+ {
+ "alias": "total",
+ "lines": false
+ }
+ ],
+ "spaceLength": 10,
+ "span": 12,
+ "stack": false,
+ "steppedLine": false,
+ "targets": [
+ {
+ "expr": "sum by (type) (SeaweedFS_s3_request_total{type=~'PUT|COPY|POST|LIST'})*0.000005",
+ "format": "time_series",
+ "hide": false,
+ "intervalFactor": 2,
+ "legendFormat": "{{type}} requests",
+ "refId": "A",
+ "step": 30
+ },
+ {
+ "expr": "sum (SeaweedFS_s3_request_total{type=~'PUT|COPY|POST|LIST'})*0.000005",
+ "format": "time_series",
+ "hide": false,
+ "intervalFactor": 2,
+ "legendFormat": "All PUT, COPY, POST, LIST",
+ "refId": "C",
+ "step": 30
+ },
+ {
+ "expr": "sum (SeaweedFS_s3_request_total{type!~'PUT|COPY|POST|LIST'})*0.0000004",
+ "format": "time_series",
+ "hide": false,
+ "intervalFactor": 2,
+ "legendFormat": "GET and all other",
+ "refId": "B"
+ },
+ {
+ "expr": "sum by (type) (SeaweedFS_s3_request_total{type!~'PUT|COPY|POST|LIST'})*0.0000004",
+ "format": "time_series",
+ "hide": false,
+ "intervalFactor": 2,
+ "legendFormat": "{{type}} requests",
+ "refId": "D"
+ }
+ ],
+ "thresholds": [],
+ "timeFrom": "1M",
+ "timeShift": null,
+ "title": "S3 API Monthly Cost if on AWS",
+ "tooltip": {
+ "msResolution": true,
+ "shared": true,
+ "sort": 2,
+ "value_type": "individual"
+ },
+ "type": "graph",
+ "xaxis": {
+ "buckets": null,
+ "mode": "time",
+ "name": null,
+ "show": true,
+ "values": []
+ },
+ "yaxes": [
+ {
+ "format": "currencyUSD",
+ "label": "Cost in US$",
+ "logBase": 1,
+ "max": null,
+ "min": "0",
+ "show": true
+ },
+ {
+ "format": "currencyUSD",
+ "label": "Write Cost",
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": false
+ }
+ ]
+ }
+ ],
+ "repeat": null,
+ "repeatIteration": null,
+ "repeatRowId": null,
+ "showTitle": true,
+ "title": "S3 Gateway",
+ "titleSize": "h6"
+ },
+ {
+ "collapse": true,
+ "height": 252,
+ "panels": [
+ {
+ "aliasColors": {},
+ "bars": false,
+ "dashLength": 10,
+ "dashes": false,
+ "datasource": "${DS_PROMETHEUS-DEV}",
+ "editable": true,
+ "error": false,
+ "fill": 1,
+ "grid": {},
+ "id": 47,
+ "legend": {
+ "alignAsTable": false,
+ "avg": false,
+ "current": false,
+ "max": false,
+ "min": false,
+ "show": false,
+ "total": false,
+ "values": false
+ },
+ "lines": true,
+ "linewidth": 2,
+ "links": [],
+ "nullPointMode": "null as zero",
+ "percentage": false,
+ "pointradius": 5,
+ "points": false,
+ "renderer": "flot",
+ "seriesOverrides": [],
+ "spaceLength": 10,
+ "span": 6,
+ "stack": false,
+ "steppedLine": false,
+ "targets": [
+ {
+ "expr": "histogram_quantile(0.99, sum(rate(SeaweedFS_volumeServer_request_seconds_bucket[1m])) by (le, exported_instance))",
+ "format": "time_series",
+ "hide": false,
+ "intervalFactor": 2,
+ "legendFormat": "{{exported_instance}}",
+ "refId": "B"
+ },
+ {
+ "expr": "histogram_quantile(0.99, sum(rate(SeaweedFS_volumeServer_request_seconds_bucket[1m])) by (le))",
+ "format": "time_series",
+ "intervalFactor": 2,
+ "legendFormat": "average",
+ "refId": "C"
+ }
+ ],
+ "thresholds": [],
+ "timeFrom": null,
+ "timeShift": null,
+ "title": "Volume Server Request Duration 99th percentile",
+ "tooltip": {
+ "msResolution": false,
+ "shared": true,
+ "sort": 0,
+ "value_type": "cumulative"
+ },
+ "type": "graph",
+ "xaxis": {
+ "buckets": null,
+ "mode": "time",
+ "name": null,
+ "show": true,
+ "values": []
+ },
+ "yaxes": [
+ {
+ "format": "s",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": 0,
+ "show": true
+ },
+ {
+ "format": "short",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ }
+ ]
+ },
+ {
+ "aliasColors": {},
+ "bars": false,
+ "dashLength": 10,
+ "dashes": false,
+ "datasource": "${DS_PROMETHEUS-DEV}",
+ "editable": true,
+ "error": false,
+ "fill": 1,
+ "grid": {},
+ "id": 40,
+ "legend": {
+ "alignAsTable": true,
+ "avg": false,
+ "current": false,
+ "hideEmpty": true,
+ "hideZero": true,
+ "max": false,
+ "min": false,
+ "rightSide": true,
+ "show": true,
+ "sort": "total",
+ "sortDesc": true,
+ "total": true,
+ "values": true
+ },
+ "lines": true,
+ "linewidth": 2,
+ "links": [],
+ "nullPointMode": "null as zero",
+ "percentage": false,
+ "pointradius": 5,
+ "points": false,
+ "renderer": "flot",
+ "seriesOverrides": [],
+ "spaceLength": 10,
+ "span": 6,
+ "stack": false,
+ "steppedLine": false,
+ "targets": [
+ {
+ "expr": "sum(rate(SeaweedFS_volumeServer_request_total[1m])) by (type)",
+ "format": "time_series",
+ "intervalFactor": 2,
+ "legendFormat": "{{type}}",
+ "refId": "A",
+ "step": 4
+ }
+ ],
+ "thresholds": [],
+ "timeFrom": null,
+ "timeShift": null,
+ "title": "Volume Server QPS",
+ "tooltip": {
+ "msResolution": false,
+ "shared": true,
+ "sort": 0,
+ "value_type": "cumulative"
+ },
+ "type": "graph",
+ "xaxis": {
+ "buckets": null,
+ "mode": "time",
+ "name": null,
+ "show": true,
+ "values": []
+ },
+ "yaxes": [
+ {
+ "format": "short",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ },
+ {
+ "format": "short",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ }
+ ]
+ },
+ {
+ "aliasColors": {},
+ "bars": false,
+ "dashLength": 10,
+ "dashes": false,
+ "datasource": "${DS_PROMETHEUS-DEV}",
+ "fill": 1,
+ "id": 48,
+ "legend": {
+ "avg": false,
+ "current": false,
+ "max": false,
+ "min": false,
+ "show": true,
+ "total": false,
+ "values": false
+ },
+ "lines": true,
+ "linewidth": 1,
+ "links": [],
+ "nullPointMode": "null",
+ "percentage": false,
+ "pointradius": 5,
+ "points": false,
+ "renderer": "flot",
+ "seriesOverrides": [],
+ "spaceLength": 10,
+ "span": 12,
+ "stack": false,
+ "steppedLine": false,
+ "targets": [
+ {
+ "expr": "sum(SeaweedFS_volumeServer_volumes) by (collection, type)",
+ "format": "time_series",
+ "hide": false,
+ "intervalFactor": 2,
+ "legendFormat": "{{collection}} {{type}}",
+ "refId": "A"
+ },
+ {
+ "expr": "sum(SeaweedFS_volumeServer_max_volumes)",
+ "format": "time_series",
+ "intervalFactor": 2,
+ "legendFormat": "Total",
+ "refId": "B"
+ }
+ ],
+ "thresholds": [],
+ "timeFrom": null,
+ "timeShift": null,
+ "title": "Volume Count",
+ "tooltip": {
+ "shared": true,
+ "sort": 0,
+ "value_type": "individual"
+ },
+ "type": "graph",
+ "xaxis": {
+ "buckets": null,
+ "mode": "time",
+ "name": null,
+ "show": true,
+ "values": []
+ },
+ "yaxes": [
+ {
+ "format": "short",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ },
+ {
+ "format": "short",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ }
+ ]
+ },
+ {
+ "aliasColors": {},
+ "bars": false,
+ "dashLength": 10,
+ "dashes": false,
+ "datasource": "${DS_PROMETHEUS-DEV}",
+ "fill": 1,
+ "id": 50,
+ "legend": {
+ "avg": false,
+ "current": false,
+ "max": false,
+ "min": false,
+ "show": true,
+ "total": false,
+ "values": false
+ },
+ "lines": true,
+ "linewidth": 1,
+ "links": [],
+ "nullPointMode": "null",
+ "percentage": false,
+ "pointradius": 5,
+ "points": false,
+ "renderer": "flot",
+ "seriesOverrides": [],
+ "spaceLength": 10,
+ "span": 12,
+ "stack": false,
+ "steppedLine": false,
+ "targets": [
+ {
+ "expr": "sum(SeaweedFS_volumeServer_total_disk_size) by (collection, type)",
+ "format": "time_series",
+ "hide": false,
+ "intervalFactor": 2,
+ "legendFormat": "{{collection}} {{type}}",
+ "refId": "A"
+ },
+ {
+ "expr": "sum(SeaweedFS_volumeServer_total_disk_size)",
+ "format": "time_series",
+ "intervalFactor": 2,
+ "legendFormat": "Total",
+ "refId": "B"
+ }
+ ],
+ "thresholds": [],
+ "timeFrom": null,
+ "timeShift": null,
+ "title": "Used Disk Space by Collection and Type",
+ "tooltip": {
+ "shared": true,
+ "sort": 0,
+ "value_type": "individual"
+ },
+ "type": "graph",
+ "xaxis": {
+ "buckets": null,
+ "mode": "time",
+ "name": null,
+ "show": true,
+ "values": []
+ },
+ "yaxes": [
+ {
+ "format": "bytes",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ },
+ {
+ "format": "short",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ }
+ ]
+ },
+ {
+ "aliasColors": {},
+ "bars": false,
+ "dashLength": 10,
+ "dashes": false,
+ "datasource": "${DS_PROMETHEUS-DEV}",
+ "fill": 1,
+ "id": 51,
+ "legend": {
+ "avg": false,
+ "current": false,
+ "max": false,
+ "min": false,
+ "show": true,
+ "total": false,
+ "values": false
+ },
+ "lines": true,
+ "linewidth": 1,
+ "links": [],
+ "nullPointMode": "null",
+ "percentage": false,
+ "pointradius": 5,
+ "points": false,
+ "renderer": "flot",
+ "seriesOverrides": [],
+ "spaceLength": 10,
+ "span": 12,
+ "stack": false,
+ "steppedLine": false,
+ "targets": [
+ {
+ "expr": "sum(SeaweedFS_volumeServer_total_disk_size) by (exported_instance)",
+ "format": "time_series",
+ "hide": false,
+ "intervalFactor": 2,
+ "legendFormat": "{{exported_instance}}",
+ "refId": "A"
+ }
+ ],
+ "thresholds": [],
+ "timeFrom": null,
+ "timeShift": null,
+ "title": "Used Disk Space by Host",
+ "tooltip": {
+ "shared": true,
+ "sort": 0,
+ "value_type": "individual"
+ },
+ "type": "graph",
+ "xaxis": {
+ "buckets": null,
+ "mode": "time",
+ "name": null,
+ "show": true,
+ "values": []
+ },
+ "yaxes": [
+ {
+ "format": "bytes",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ },
+ {
+ "format": "short",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ }
+ ]
+ }
+ ],
+ "repeat": null,
+ "repeatIteration": null,
+ "repeatRowId": null,
+ "showTitle": true,
+ "title": "Volume Server",
+ "titleSize": "h6"
+ },
+ {
+ "collapse": true,
+ "height": 251,
+ "panels": [
+ {
+ "aliasColors": {},
+ "bars": false,
+ "dashLength": 10,
+ "dashes": false,
+ "datasource": "${DS_PROMETHEUS-DEV}",
+ "editable": true,
+ "error": false,
+ "fill": 1,
+ "grid": {},
+ "id": 12,
+ "legend": {
+ "alignAsTable": false,
+ "avg": false,
+ "current": false,
+ "max": false,
+ "min": false,
+ "show": false,
+ "total": false,
+ "values": false
+ },
+ "lines": true,
+ "linewidth": 2,
+ "links": [],
+ "nullPointMode": "null as zero",
+ "percentage": false,
+ "pointradius": 5,
+ "points": false,
+ "renderer": "flot",
+ "seriesOverrides": [],
+ "spaceLength": 10,
+ "span": 6,
+ "stack": false,
+ "steppedLine": false,
+ "targets": [
+ {
+ "expr": "histogram_quantile(0.99, sum(rate(SeaweedFS_filerStore_request_seconds_bucket[1m])) by (le, type))",
+ "format": "time_series",
+ "intervalFactor": 2,
+ "legendFormat": "{{type}}",
+ "refId": "B"
+ }
+ ],
+ "thresholds": [],
+ "timeFrom": null,
+ "timeShift": null,
+ "title": "Filer Store Request Duration 99th percentile",
+ "tooltip": {
+ "msResolution": false,
+ "shared": true,
+ "sort": 0,
+ "value_type": "cumulative"
+ },
+ "type": "graph",
+ "xaxis": {
+ "buckets": null,
+ "mode": "time",
+ "name": null,
+ "show": true,
+ "values": []
+ },
+ "yaxes": [
+ {
+ "format": "s",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": 0,
+ "show": true
+ },
+ {
+ "format": "short",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ }
+ ]
+ },
+ {
+ "aliasColors": {},
+ "bars": false,
+ "dashLength": 10,
+ "dashes": false,
+ "datasource": "${DS_PROMETHEUS-DEV}",
+ "editable": true,
+ "error": false,
+ "fill": 1,
+ "grid": {},
+ "id": 14,
+ "legend": {
+ "alignAsTable": true,
+ "avg": true,
+ "current": true,
+ "hideEmpty": false,
+ "hideZero": false,
+ "max": false,
+ "min": false,
+ "rightSide": true,
+ "show": true,
+ "total": false,
+ "values": true
+ },
+ "lines": true,
+ "linewidth": 2,
+ "links": [],
+ "nullPointMode": "null as zero",
+ "percentage": false,
+ "pointradius": 5,
+ "points": false,
+ "renderer": "flot",
+ "seriesOverrides": [],
+ "spaceLength": 10,
+ "span": 6,
+ "stack": false,
+ "steppedLine": false,
+ "targets": [
+ {
+ "expr": "sum(rate(SeaweedFS_filerStore_request_total [1m])) by (type)",
+ "format": "time_series",
+ "intervalFactor": 2,
+ "legendFormat": "{{type}}",
+ "refId": "B"
+ }
+ ],
+ "thresholds": [],
+ "timeFrom": null,
+ "timeShift": null,
+ "title": "Filer Store QPS",
+ "tooltip": {
+ "msResolution": false,
+ "shared": true,
+ "sort": 0,
+ "value_type": "cumulative"
+ },
+ "type": "graph",
+ "xaxis": {
+ "buckets": null,
+ "mode": "time",
+ "name": null,
+ "show": true,
+ "values": []
+ },
+ "yaxes": [
+ {
+ "format": "short",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": 0,
+ "show": true
+ },
+ {
+ "format": "short",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ }
+ ]
+ }
+ ],
+ "repeat": null,
+ "repeatIteration": null,
+ "repeatRowId": null,
+ "showTitle": true,
+ "title": "Filer Store",
+ "titleSize": "h6"
+ },
+ {
+ "collapse": true,
+ "height": 242,
+ "panels": [
+ {
+ "aliasColors": {},
+ "bars": false,
+ "dashLength": 10,
+ "dashes": false,
+ "datasource": "${DS_PROMETHEUS-DEV}",
+ "editable": true,
+ "error": false,
+ "fill": 1,
+ "grid": {},
+ "id": 52,
+ "legend": {
+ "alignAsTable": false,
+ "avg": false,
+ "current": false,
+ "max": false,
+ "min": false,
+ "show": false,
+ "total": false,
+ "values": false
+ },
+ "lines": true,
+ "linewidth": 2,
+ "links": [],
+ "nullPointMode": "null as zero",
+ "percentage": false,
+ "pointradius": 5,
+ "points": false,
+ "renderer": "flot",
+ "seriesOverrides": [],
+ "spaceLength": 10,
+ "span": 6,
+ "stack": false,
+ "steppedLine": false,
+ "targets": [
+ {
+ "expr": "go_memstats_alloc_bytes{exported_job=\"filer\"}",
+ "format": "time_series",
+ "hide": false,
+ "intervalFactor": 2,
+ "legendFormat": "bytes allocated",
+ "refId": "B"
+ },
+ {
+ "expr": "rate(go_memstats_alloc_bytes_total{exported_job=\"filer\"}[30s])",
+ "format": "time_series",
+ "hide": false,
+ "intervalFactor": 2,
+ "legendFormat": "alloc rate",
+ "refId": "A"
+ },
+ {
+ "expr": "go_memstats_stack_inuse_bytes{exported_job=\"filer\"}",
+ "format": "time_series",
+ "hide": false,
+ "intervalFactor": 2,
+ "legendFormat": "stack inuse",
+ "refId": "C"
+ },
+ {
+ "expr": "go_memstats_heap_inuse_bytes{exported_job=\"filer\"}",
+ "format": "time_series",
+ "hide": false,
+ "intervalFactor": 2,
+ "legendFormat": "heap inuse",
+ "refId": "D"
+ }
+ ],
+ "thresholds": [],
+ "timeFrom": null,
+ "timeShift": null,
+ "title": "Filer Go Memory Stats",
+ "tooltip": {
+ "msResolution": false,
+ "shared": true,
+ "sort": 0,
+ "value_type": "cumulative"
+ },
+ "type": "graph",
+ "xaxis": {
+ "buckets": null,
+ "mode": "time",
+ "name": null,
+ "show": true,
+ "values": []
+ },
+ "yaxes": [
+ {
+ "format": "bytes",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": 0,
+ "show": true
+ },
+ {
+ "format": "Bps",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ }
+ ]
+ },
+ {
+ "aliasColors": {},
+ "bars": false,
+ "dashLength": 10,
+ "dashes": false,
+ "datasource": "${DS_PROMETHEUS-DEV}",
+ "editable": true,
+ "error": false,
+ "fill": 1,
+ "grid": {},
+ "id": 54,
+ "legend": {
+ "alignAsTable": false,
+ "avg": false,
+ "current": false,
+ "max": false,
+ "min": false,
+ "show": false,
+ "total": false,
+ "values": false
+ },
+ "lines": true,
+ "linewidth": 2,
+ "links": [],
+ "nullPointMode": "null as zero",
+ "percentage": false,
+ "pointradius": 5,
+ "points": false,
+ "renderer": "flot",
+ "seriesOverrides": [],
+ "spaceLength": 10,
+ "span": 6,
+ "stack": false,
+ "steppedLine": false,
+ "targets": [
+ {
+ "expr": "go_gc_duration_seconds{exported_job=\"filer\"}",
+ "format": "time_series",
+ "intervalFactor": 2,
+ "legendFormat": "{{quantile}}",
+ "refId": "B"
+ }
+ ],
+ "thresholds": [],
+ "timeFrom": null,
+ "timeShift": null,
+ "title": "Filer Go GC duration quantiles",
+ "tooltip": {
+ "msResolution": false,
+ "shared": true,
+ "sort": 0,
+ "value_type": "cumulative"
+ },
+ "type": "graph",
+ "xaxis": {
+ "buckets": null,
+ "mode": "time",
+ "name": null,
+ "show": true,
+ "values": []
+ },
+ "yaxes": [
+ {
+ "format": "s",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": 0,
+ "show": true
+ },
+ {
+ "format": "Bps",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ }
+ ]
+ },
+ {
+ "aliasColors": {},
+ "bars": false,
+ "dashLength": 10,
+ "dashes": false,
+ "datasource": "${DS_PROMETHEUS-DEV}",
+ "editable": true,
+ "error": false,
+ "fill": 1,
+ "grid": {},
+ "id": 53,
+ "legend": {
+ "alignAsTable": false,
+ "avg": false,
+ "current": false,
+ "max": false,
+ "min": false,
+ "show": false,
+ "total": false,
+ "values": false
+ },
+ "lines": true,
+ "linewidth": 2,
+ "links": [],
+ "nullPointMode": "null as zero",
+ "percentage": false,
+ "pointradius": 5,
+ "points": false,
+ "renderer": "flot",
+ "seriesOverrides": [],
+ "spaceLength": 10,
+ "span": 12,
+ "stack": false,
+ "steppedLine": false,
+ "targets": [
+ {
+ "expr": "go_goroutines{exported_job=\"filer\"}",
+ "format": "time_series",
+ "intervalFactor": 2,
+ "legendFormat": "{{exported_instance}}",
+ "refId": "B"
+ }
+ ],
+ "thresholds": [],
+ "timeFrom": null,
+ "timeShift": null,
+ "title": "Filer Go Routines",
+ "tooltip": {
+ "msResolution": false,
+ "shared": true,
+ "sort": 0,
+ "value_type": "cumulative"
+ },
+ "type": "graph",
+ "xaxis": {
+ "buckets": null,
+ "mode": "time",
+ "name": null,
+ "show": true,
+ "values": []
+ },
+ "yaxes": [
+ {
+ "format": "none",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": 0,
+ "show": true
+ },
+ {
+ "format": "short",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ }
+ ]
+ }
+ ],
+ "repeat": null,
+ "repeatIteration": null,
+ "repeatRowId": null,
+ "showTitle": true,
+ "title": "Filer Instances",
+ "titleSize": "h6"
+ }
+ ],
+ "schemaVersion": 14,
+ "style": "dark",
+ "tags": [],
+ "templating": {
+ "list": []
+ },
+ "time": {
+ "from": "now-30d",
+ "to": "now"
+ },
+ "timepicker": {
+ "refresh_intervals": [
+ "5s",
+ "10s",
+ "30s",
+ "1m",
+ "5m",
+ "15m",
+ "30m",
+ "1h",
+ "2h",
+ "1d"
+ ],
+ "time_options": [
+ "5m",
+ "15m",
+ "1h",
+ "6h",
+ "12h",
+ "24h",
+ "2d",
+ "7d",
+ "30d"
+ ]
+ },
+ "timezone": "browser",
+ "title": "SeaweedFS",
+ "version": 2
+}
diff --git a/other/metrics/grafana_seaweedfs_k8s.json b/other/metrics/grafana_seaweedfs_k8s.json
new file mode 100644
index 000000000..348198e52
--- /dev/null
+++ b/other/metrics/grafana_seaweedfs_k8s.json
@@ -0,0 +1,2362 @@
+{
+ "annotations": {
+ "list": [
+ {
+ "builtIn": 1,
+ "datasource": "$DS_PROMETHEUS",
+ "enable": true,
+ "hide": true,
+ "iconColor": "rgba(0, 211, 255, 1)",
+ "limit": 100,
+ "name": "Annotations & Alerts",
+ "showIn": 0,
+ "type": "dashboard"
+ }
+ ]
+ },
+ "editable": true,
+ "gnetId": 10423,
+ "graphTooltip": 0,
+ "id": 3690,
+ "iteration": 1602763266349,
+ "links": [],
+ "panels": [
+ {
+ "collapsed": false,
+ "datasource": null,
+ "gridPos": {
+ "h": 1,
+ "w": 24,
+ "x": 0,
+ "y": 0
+ },
+ "id": 60,
+ "panels": [],
+ "title": "S3 api",
+ "type": "row"
+ },
+ {
+ "aliasColors": {},
+ "bars": false,
+ "dashLength": 10,
+ "dashes": false,
+ "datasource": "$DS_PROMETHEUS",
+ "editable": true,
+ "error": false,
+ "fieldConfig": {
+ "defaults": {
+ "custom": {}
+ },
+ "overrides": []
+ },
+ "fill": 0,
+ "fillGradient": 0,
+ "grid": {},
+ "gridPos": {
+ "h": 7,
+ "w": 12,
+ "x": 0,
+ "y": 1
+ },
+ "hiddenSeries": false,
+ "id": 63,
+ "legend": {
+ "alignAsTable": false,
+ "avg": false,
+ "current": true,
+ "hideEmpty": true,
+ "hideZero": true,
+ "max": false,
+ "min": false,
+ "rightSide": false,
+ "show": true,
+ "sideWidth": 250,
+ "sort": "max",
+ "sortDesc": true,
+ "total": false,
+ "values": true
+ },
+ "lines": true,
+ "linewidth": 1,
+ "links": [],
+ "maxPerRow": 1,
+ "nullPointMode": "null as zero",
+ "percentage": false,
+ "pluginVersion": "7.1.0",
+ "pointradius": 5,
+ "points": false,
+ "renderer": "flot",
+ "seriesOverrides": [
+ {
+ "alias": "total",
+ "lines": false
+ }
+ ],
+ "spaceLength": 10,
+ "stack": false,
+ "steppedLine": false,
+ "targets": [
+ {
+ "expr": "sum(rate(SeaweedFS_s3_request_total{namespace=\"$namespace\",service=~\"$service-api\",type=~\"$method\"}[1m])) by (code)",
+ "format": "time_series",
+ "interval": "",
+ "intervalFactor": 2,
+ "legendFormat": "{{code}}",
+ "refId": "A",
+ "step": 30
+ }
+ ],
+ "thresholds": [],
+ "timeFrom": null,
+ "timeRegions": [],
+ "timeShift": null,
+ "title": "S3 QPS by statusCode",
+ "tooltip": {
+ "msResolution": true,
+ "shared": true,
+ "sort": 2,
+ "value_type": "individual"
+ },
+ "type": "graph",
+ "xaxis": {
+ "buckets": null,
+ "mode": "time",
+ "name": null,
+ "show": true,
+ "values": []
+ },
+ "yaxes": [
+ {
+ "format": "short",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": "0",
+ "show": true
+ },
+ {
+ "format": "short",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ }
+ ],
+ "yaxis": {
+ "align": false,
+ "alignLevel": null
+ }
+ },
+ {
+ "aliasColors": {},
+ "bars": false,
+ "dashLength": 10,
+ "dashes": false,
+ "datasource": "$DS_PROMETHEUS",
+ "editable": true,
+ "error": false,
+ "fieldConfig": {
+ "defaults": {
+ "custom": {}
+ },
+ "overrides": []
+ },
+ "fill": 0,
+ "fillGradient": 0,
+ "grid": {},
+ "gridPos": {
+ "h": 7,
+ "w": 12,
+ "x": 12,
+ "y": 1
+ },
+ "hiddenSeries": false,
+ "id": 62,
+ "legend": {
+ "alignAsTable": false,
+ "avg": false,
+ "current": true,
+ "hideEmpty": true,
+ "hideZero": true,
+ "max": false,
+ "min": false,
+ "rightSide": false,
+ "show": true,
+ "sideWidth": 250,
+ "sort": "max",
+ "sortDesc": true,
+ "total": false,
+ "values": true
+ },
+ "lines": true,
+ "linewidth": 1,
+ "links": [],
+ "maxPerRow": 1,
+ "nullPointMode": "null as zero",
+ "percentage": false,
+ "pluginVersion": "7.1.0",
+ "pointradius": 5,
+ "points": false,
+ "renderer": "flot",
+ "seriesOverrides": [
+ {
+ "alias": "total",
+ "lines": false
+ }
+ ],
+ "spaceLength": 10,
+ "stack": false,
+ "steppedLine": false,
+ "targets": [
+ {
+ "expr": "sum(rate(SeaweedFS_s3_request_total{namespace=\"$namespace\",service=~\"$service-api\"}[1m])) by (type)",
+ "format": "time_series",
+ "interval": "",
+ "intervalFactor": 2,
+ "legendFormat": "{{type}}",
+ "refId": "A",
+ "step": 30
+ }
+ ],
+ "thresholds": [],
+ "timeFrom": null,
+ "timeRegions": [],
+ "timeShift": null,
+ "title": "S3 QPS by method",
+ "tooltip": {
+ "msResolution": true,
+ "shared": true,
+ "sort": 2,
+ "value_type": "individual"
+ },
+ "type": "graph",
+ "xaxis": {
+ "buckets": null,
+ "mode": "time",
+ "name": null,
+ "show": true,
+ "values": []
+ },
+ "yaxes": [
+ {
+ "format": "short",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": "0",
+ "show": true
+ },
+ {
+ "format": "short",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ }
+ ],
+ "yaxis": {
+ "align": false,
+ "alignLevel": null
+ }
+ },
+ {
+ "aliasColors": {},
+ "bars": false,
+ "dashLength": 10,
+ "dashes": false,
+ "datasource": "$DS_PROMETHEUS",
+ "editable": true,
+ "error": false,
+ "fieldConfig": {
+ "defaults": {
+ "custom": {}
+ },
+ "overrides": []
+ },
+ "fill": 1,
+ "fillGradient": 0,
+ "grid": {},
+ "gridPos": {
+ "h": 7,
+ "w": 8,
+ "x": 0,
+ "y": 8
+ },
+ "hiddenSeries": false,
+ "id": 68,
+ "legend": {
+ "avg": false,
+ "current": false,
+ "max": false,
+ "min": false,
+ "show": true,
+ "total": false,
+ "values": false
+ },
+ "lines": true,
+ "linewidth": 1,
+ "links": [],
+ "nullPointMode": "null as zero",
+ "percentage": false,
+ "pluginVersion": "7.1.0",
+ "pointradius": 5,
+ "points": false,
+ "renderer": "flot",
+ "seriesOverrides": [],
+ "spaceLength": 10,
+ "stack": false,
+ "steppedLine": false,
+ "targets": [
+ {
+ "expr": "histogram_quantile(0.80, sum(rate(SeaweedFS_s3_request_seconds_bucket{namespace=\"$namespace\",service=~\"$service-api\"}[1m])) by (le))",
+ "format": "time_series",
+ "hide": false,
+ "interval": "",
+ "intervalFactor": 2,
+ "legendFormat": "average",
+ "refId": "A",
+ "step": 60
+ },
+ {
+ "expr": "histogram_quantile(0.80, sum(rate(SeaweedFS_s3_request_seconds_bucket{namespace=\"$namespace\",service=~\"$service-api\",type=~\"$method\"}[1m])) by (le, type))",
+ "format": "time_series",
+ "hide": false,
+ "interval": "",
+ "intervalFactor": 2,
+ "legendFormat": "{{type}}",
+ "refId": "B",
+ "step": 60
+ }
+ ],
+ "thresholds": [],
+ "timeFrom": null,
+ "timeRegions": [],
+ "timeShift": null,
+ "title": "S3 Request Duration 80th percentile",
+ "tooltip": {
+ "msResolution": true,
+ "shared": true,
+ "sort": 0,
+ "value_type": "cumulative"
+ },
+ "type": "graph",
+ "xaxis": {
+ "buckets": null,
+ "mode": "time",
+ "name": null,
+ "show": true,
+ "values": []
+ },
+ "yaxes": [
+ {
+ "format": "s",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": 0,
+ "show": true
+ },
+ {
+ "format": "short",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": false
+ }
+ ],
+ "yaxis": {
+ "align": false,
+ "alignLevel": null
+ }
+ },
+ {
+ "aliasColors": {},
+ "bars": false,
+ "dashLength": 10,
+ "dashes": false,
+ "datasource": "$DS_PROMETHEUS",
+ "editable": true,
+ "error": false,
+ "fieldConfig": {
+ "defaults": {
+ "custom": {}
+ },
+ "overrides": []
+ },
+ "fill": 1,
+ "fillGradient": 0,
+ "grid": {},
+ "gridPos": {
+ "h": 7,
+ "w": 8,
+ "x": 8,
+ "y": 8
+ },
+ "hiddenSeries": false,
+ "id": 67,
+ "legend": {
+ "avg": false,
+ "current": false,
+ "max": false,
+ "min": false,
+ "show": true,
+ "total": false,
+ "values": false
+ },
+ "lines": true,
+ "linewidth": 1,
+ "links": [],
+ "nullPointMode": "null as zero",
+ "percentage": false,
+ "pluginVersion": "7.1.0",
+ "pointradius": 5,
+ "points": false,
+ "renderer": "flot",
+ "seriesOverrides": [],
+ "spaceLength": 10,
+ "stack": false,
+ "steppedLine": false,
+ "targets": [
+ {
+ "expr": "histogram_quantile(0.95, sum(rate(SeaweedFS_s3_request_seconds_bucket{namespace=\"$namespace\",service=~\"$service-api\"}[1m])) by (le))",
+ "format": "time_series",
+ "hide": false,
+ "interval": "",
+ "intervalFactor": 2,
+ "legendFormat": "average",
+ "refId": "A",
+ "step": 60
+ },
+ {
+ "expr": "histogram_quantile(0.95, sum(rate(SeaweedFS_s3_request_seconds_bucket{namespace=\"$namespace\",service=~\"$service-api\", type=~\"$method\"}[1m])) by (le, type))",
+ "format": "time_series",
+ "hide": false,
+ "interval": "",
+ "intervalFactor": 2,
+ "legendFormat": "{{type}}",
+ "refId": "B",
+ "step": 60
+ }
+ ],
+ "thresholds": [],
+ "timeFrom": null,
+ "timeRegions": [],
+ "timeShift": null,
+ "title": "S3 Request Duration 95th percentile",
+ "tooltip": {
+ "msResolution": true,
+ "shared": true,
+ "sort": 0,
+ "value_type": "cumulative"
+ },
+ "type": "graph",
+ "xaxis": {
+ "buckets": null,
+ "mode": "time",
+ "name": null,
+ "show": true,
+ "values": []
+ },
+ "yaxes": [
+ {
+ "format": "s",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": 0,
+ "show": true
+ },
+ {
+ "format": "short",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": false
+ }
+ ],
+ "yaxis": {
+ "align": false,
+ "alignLevel": null
+ }
+ },
+ {
+ "aliasColors": {},
+ "bars": false,
+ "dashLength": 10,
+ "dashes": false,
+ "datasource": "$DS_PROMETHEUS",
+ "editable": true,
+ "error": false,
+ "fieldConfig": {
+ "defaults": {
+ "custom": {}
+ },
+ "overrides": []
+ },
+ "fill": 1,
+ "fillGradient": 0,
+ "grid": {},
+ "gridPos": {
+ "h": 7,
+ "w": 8,
+ "x": 16,
+ "y": 8
+ },
+ "hiddenSeries": false,
+ "id": 65,
+ "legend": {
+ "avg": false,
+ "current": false,
+ "max": false,
+ "min": false,
+ "show": true,
+ "total": false,
+ "values": false
+ },
+ "lines": true,
+ "linewidth": 1,
+ "links": [],
+ "nullPointMode": "null as zero",
+ "percentage": false,
+ "pluginVersion": "7.1.0",
+ "pointradius": 5,
+ "points": false,
+ "renderer": "flot",
+ "seriesOverrides": [],
+ "spaceLength": 10,
+ "stack": false,
+ "steppedLine": false,
+ "targets": [
+ {
+ "expr": "histogram_quantile(0.99, sum(rate(SeaweedFS_s3_request_seconds_bucket{namespace=\"$namespace\",service=~\"$service-api\"}[1m])) by (le))",
+ "format": "time_series",
+ "hide": false,
+ "interval": "",
+ "intervalFactor": 2,
+ "legendFormat": "average",
+ "refId": "A",
+ "step": 60
+ },
+ {
+ "expr": "histogram_quantile(0.99, sum(rate(SeaweedFS_s3_request_seconds_bucket{namespace=\"$namespace\",service=~\"$service-api\", type=~\"$method\"}[1m])) by (le, type))",
+ "format": "time_series",
+ "hide": false,
+ "interval": "",
+ "intervalFactor": 2,
+ "legendFormat": "{{type}}",
+ "refId": "B",
+ "step": 60
+ }
+ ],
+ "thresholds": [],
+ "timeFrom": null,
+ "timeRegions": [],
+ "timeShift": null,
+ "title": "S3 Request Duration 99th percentile",
+ "tooltip": {
+ "msResolution": true,
+ "shared": true,
+ "sort": 0,
+ "value_type": "cumulative"
+ },
+ "type": "graph",
+ "xaxis": {
+ "buckets": null,
+ "mode": "time",
+ "name": null,
+ "show": true,
+ "values": []
+ },
+ "yaxes": [
+ {
+ "format": "s",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": 0,
+ "show": true
+ },
+ {
+ "format": "short",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": false
+ }
+ ],
+ "yaxis": {
+ "align": false,
+ "alignLevel": null
+ }
+ },
+ {
+ "collapsed": false,
+ "datasource": null,
+ "gridPos": {
+ "h": 1,
+ "w": 24,
+ "x": 0,
+ "y": 15
+ },
+ "id": 55,
+ "panels": [],
+ "repeat": null,
+ "title": "Filer",
+ "type": "row"
+ },
+ {
+ "aliasColors": {},
+ "bars": false,
+ "dashLength": 10,
+ "dashes": false,
+ "datasource": "$DS_PROMETHEUS",
+ "editable": true,
+ "error": false,
+ "fieldConfig": {
+ "defaults": {
+ "custom": {}
+ },
+ "overrides": []
+ },
+ "fill": 1,
+ "fillGradient": 0,
+ "grid": {},
+ "gridPos": {
+ "h": 7,
+ "w": 8,
+ "x": 0,
+ "y": 16
+ },
+ "hiddenSeries": false,
+ "id": 46,
+ "legend": {
+ "avg": false,
+ "current": false,
+ "max": false,
+ "min": false,
+ "show": true,
+ "total": false,
+ "values": false
+ },
+ "lines": true,
+ "linewidth": 1,
+ "links": [],
+ "nullPointMode": "null as zero",
+ "percentage": false,
+ "pluginVersion": "7.1.0",
+ "pointradius": 5,
+ "points": false,
+ "renderer": "flot",
+ "seriesOverrides": [],
+ "spaceLength": 10,
+ "stack": false,
+ "steppedLine": false,
+ "targets": [
+ {
+ "expr": "histogram_quantile(0.80, sum(rate(SeaweedFS_filer_request_seconds_bucket{namespace=\"$namespace\",service=~\"$service-api\"}[1m])) by (le))",
+ "format": "time_series",
+ "hide": false,
+ "interval": "",
+ "intervalFactor": 2,
+ "legendFormat": "average",
+ "refId": "A",
+ "step": 60
+ },
+ {
+ "expr": "histogram_quantile(0.80, sum(rate(SeaweedFS_filer_request_seconds_bucket{namespace=\"$namespace\",service=~\"$service-api\"}[1m])) by (le, type))",
+ "format": "time_series",
+ "hide": true,
+ "interval": "",
+ "intervalFactor": 2,
+ "legendFormat": "{{type}}",
+ "refId": "B",
+ "step": 60
+ }
+ ],
+ "thresholds": [],
+ "timeFrom": null,
+ "timeRegions": [],
+ "timeShift": null,
+ "title": "Filer Request Duration 80th percentile",
+ "tooltip": {
+ "msResolution": true,
+ "shared": true,
+ "sort": 0,
+ "value_type": "cumulative"
+ },
+ "type": "graph",
+ "xaxis": {
+ "buckets": null,
+ "mode": "time",
+ "name": null,
+ "show": true,
+ "values": []
+ },
+ "yaxes": [
+ {
+ "format": "s",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": 0,
+ "show": true
+ },
+ {
+ "format": "short",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": false
+ }
+ ],
+ "yaxis": {
+ "align": false,
+ "alignLevel": null
+ }
+ },
+ {
+ "aliasColors": {},
+ "bars": false,
+ "dashLength": 10,
+ "dashes": false,
+ "datasource": "$DS_PROMETHEUS",
+ "editable": true,
+ "error": false,
+ "fieldConfig": {
+ "defaults": {
+ "custom": {}
+ },
+ "overrides": []
+ },
+ "fill": 1,
+ "fillGradient": 0,
+ "grid": {},
+ "gridPos": {
+ "h": 7,
+ "w": 8,
+ "x": 8,
+ "y": 16
+ },
+ "hiddenSeries": false,
+ "id": 49,
+ "legend": {
+ "avg": false,
+ "current": false,
+ "max": false,
+ "min": false,
+ "show": true,
+ "total": false,
+ "values": false
+ },
+ "lines": true,
+ "linewidth": 1,
+ "links": [],
+ "nullPointMode": "null as zero",
+ "percentage": false,
+ "pluginVersion": "7.1.0",
+ "pointradius": 5,
+ "points": false,
+ "renderer": "flot",
+ "seriesOverrides": [],
+ "spaceLength": 10,
+ "stack": false,
+ "steppedLine": false,
+ "targets": [
+ {
+ "expr": "histogram_quantile(0.95, sum(rate(SeaweedFS_filer_request_seconds_bucket{namespace=\"$namespace\",service=~\"$service-api\"}[1m])) by (le))",
+ "format": "time_series",
+ "hide": false,
+ "intervalFactor": 2,
+ "legendFormat": "average",
+ "refId": "A",
+ "step": 60
+ },
+ {
+ "expr": "histogram_quantile(0.95, sum(rate(SeaweedFS_filer_request_seconds_bucket{namespace=\"$namespace\",service=~\"$service-api\"}[1m])) by (le, type))",
+ "format": "time_series",
+ "hide": false,
+ "interval": "",
+ "intervalFactor": 2,
+ "legendFormat": "{{type}}",
+ "refId": "B",
+ "step": 60
+ },
+ {
+ "expr": "",
+ "format": "time_series",
+ "intervalFactor": 2,
+ "refId": "C"
+ }
+ ],
+ "thresholds": [],
+ "timeFrom": null,
+ "timeRegions": [],
+ "timeShift": null,
+ "title": "Filer Request Duration 95th percentile",
+ "tooltip": {
+ "msResolution": true,
+ "shared": true,
+ "sort": 0,
+ "value_type": "cumulative"
+ },
+ "type": "graph",
+ "xaxis": {
+ "buckets": null,
+ "mode": "time",
+ "name": null,
+ "show": true,
+ "values": []
+ },
+ "yaxes": [
+ {
+ "format": "s",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": 0,
+ "show": true
+ },
+ {
+ "format": "short",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": false
+ }
+ ],
+ "yaxis": {
+ "align": false,
+ "alignLevel": null
+ }
+ },
+ {
+ "aliasColors": {},
+ "bars": false,
+ "dashLength": 10,
+ "dashes": false,
+ "datasource": "$DS_PROMETHEUS",
+ "editable": true,
+ "error": false,
+ "fieldConfig": {
+ "defaults": {
+ "custom": {}
+ },
+ "overrides": []
+ },
+ "fill": 1,
+ "fillGradient": 0,
+ "grid": {},
+ "gridPos": {
+ "h": 7,
+ "w": 8,
+ "x": 16,
+ "y": 16
+ },
+ "hiddenSeries": false,
+ "id": 66,
+ "legend": {
+ "avg": false,
+ "current": false,
+ "max": false,
+ "min": false,
+ "show": true,
+ "total": false,
+ "values": false
+ },
+ "lines": true,
+ "linewidth": 1,
+ "links": [],
+ "nullPointMode": "null as zero",
+ "percentage": false,
+ "pluginVersion": "7.1.0",
+ "pointradius": 5,
+ "points": false,
+ "renderer": "flot",
+ "seriesOverrides": [],
+ "spaceLength": 10,
+ "stack": false,
+ "steppedLine": false,
+ "targets": [
+ {
+ "expr": "histogram_quantile(0.95, sum(rate(SeaweedFS_filer_request_seconds_bucket{namespace=\"$namespace\",service=~\"$service-api\"}[1m])) by (le))",
+ "format": "time_series",
+ "hide": false,
+ "intervalFactor": 2,
+ "legendFormat": "average",
+ "refId": "A",
+ "step": 60
+ },
+ {
+ "expr": "histogram_quantile(0.95, sum(rate(SeaweedFS_filer_request_seconds_bucket{namespace=\"$namespace\",service=~\"$service-api\"}[1m])) by (le, type))",
+ "format": "time_series",
+ "hide": false,
+ "interval": "",
+ "intervalFactor": 2,
+ "legendFormat": "{{type}}",
+ "refId": "B",
+ "step": 60
+ },
+ {
+ "expr": "",
+ "format": "time_series",
+ "intervalFactor": 2,
+ "refId": "C"
+ }
+ ],
+ "thresholds": [],
+ "timeFrom": null,
+ "timeRegions": [],
+ "timeShift": null,
+ "title": "Filer Request Duration 95th percentile",
+ "tooltip": {
+ "msResolution": true,
+ "shared": true,
+ "sort": 0,
+ "value_type": "cumulative"
+ },
+ "type": "graph",
+ "xaxis": {
+ "buckets": null,
+ "mode": "time",
+ "name": null,
+ "show": true,
+ "values": []
+ },
+ "yaxes": [
+ {
+ "format": "s",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": 0,
+ "show": true
+ },
+ {
+ "format": "short",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": false
+ }
+ ],
+ "yaxis": {
+ "align": false,
+ "alignLevel": null
+ }
+ },
+ {
+ "aliasColors": {},
+ "bars": false,
+ "dashLength": 10,
+ "dashes": false,
+ "datasource": "$DS_PROMETHEUS",
+ "editable": true,
+ "error": false,
+ "fieldConfig": {
+ "defaults": {
+ "custom": {}
+ },
+ "overrides": []
+ },
+ "fill": 0,
+ "fillGradient": 0,
+ "grid": {},
+ "gridPos": {
+ "h": 7,
+ "w": 24,
+ "x": 0,
+ "y": 23
+ },
+ "hiddenSeries": false,
+ "id": 2,
+ "legend": {
+ "alignAsTable": true,
+ "avg": false,
+ "current": true,
+ "hideEmpty": true,
+ "hideZero": true,
+ "max": true,
+ "min": false,
+ "rightSide": true,
+ "show": true,
+ "sideWidth": 250,
+ "sort": "max",
+ "sortDesc": true,
+ "total": false,
+ "values": true
+ },
+ "lines": true,
+ "linewidth": 1,
+ "links": [],
+ "maxPerRow": 1,
+ "nullPointMode": "null as zero",
+ "percentage": false,
+ "pluginVersion": "7.1.0",
+ "pointradius": 5,
+ "points": false,
+ "renderer": "flot",
+ "seriesOverrides": [
+ {
+ "alias": "total",
+ "lines": false
+ }
+ ],
+ "spaceLength": 10,
+ "stack": false,
+ "steppedLine": false,
+ "targets": [
+ {
+ "expr": "rate(SeaweedFS_filer_request_total{namespace=\"$namespace\",service=~\"$service-api\"}[1m])",
+ "format": "time_series",
+ "interval": "",
+ "intervalFactor": 2,
+ "legendFormat": "{{type}}",
+ "refId": "A",
+ "step": 30
+ }
+ ],
+ "thresholds": [],
+ "timeFrom": null,
+ "timeRegions": [],
+ "timeShift": null,
+ "title": "Filer QPS",
+ "tooltip": {
+ "msResolution": true,
+ "shared": true,
+ "sort": 2,
+ "value_type": "individual"
+ },
+ "type": "graph",
+ "xaxis": {
+ "buckets": null,
+ "mode": "time",
+ "name": null,
+ "show": true,
+ "values": []
+ },
+ "yaxes": [
+ {
+ "format": "short",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": "0",
+ "show": true
+ },
+ {
+ "format": "short",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ }
+ ],
+ "yaxis": {
+ "align": false,
+ "alignLevel": null
+ }
+ },
+ {
+ "collapsed": false,
+ "datasource": null,
+ "gridPos": {
+ "h": 1,
+ "w": 24,
+ "x": 0,
+ "y": 30
+ },
+ "id": 56,
+ "panels": [],
+ "repeat": null,
+ "title": "Volume Server",
+ "type": "row"
+ },
+ {
+ "aliasColors": {},
+ "bars": false,
+ "dashLength": 10,
+ "dashes": false,
+ "datasource": "$DS_PROMETHEUS",
+ "editable": true,
+ "error": false,
+ "fieldConfig": {
+ "defaults": {
+ "custom": {}
+ },
+ "overrides": []
+ },
+ "fill": 1,
+ "fillGradient": 0,
+ "grid": {},
+ "gridPos": {
+ "h": 7,
+ "w": 12,
+ "x": 0,
+ "y": 31
+ },
+ "hiddenSeries": false,
+ "id": 47,
+ "legend": {
+ "alignAsTable": false,
+ "avg": false,
+ "current": false,
+ "max": false,
+ "min": false,
+ "show": false,
+ "total": false,
+ "values": false
+ },
+ "lines": true,
+ "linewidth": 2,
+ "links": [],
+ "nullPointMode": "null as zero",
+ "percentage": false,
+ "pluginVersion": "7.1.0",
+ "pointradius": 5,
+ "points": false,
+ "renderer": "flot",
+ "seriesOverrides": [],
+ "spaceLength": 10,
+ "stack": false,
+ "steppedLine": false,
+ "targets": [
+ {
+ "expr": "histogram_quantile(0.99, sum(rate(SeaweedFS_volumeServer_request_seconds_bucket{namespace=\"$namespace\",service=~\"$service-volume\"}[1m])) by (le, exported_instance))",
+ "format": "time_series",
+ "hide": false,
+ "interval": "",
+ "intervalFactor": 2,
+ "legendFormat": "{{exported_instance}}",
+ "refId": "B"
+ },
+ {
+ "expr": "histogram_quantile(0.99, sum(rate(SeaweedFS_volumeServer_request_seconds_bucket{namespace=\"$namespace\",service=~\"$service-volume\"}[1m])) by (le))",
+ "format": "time_series",
+ "intervalFactor": 2,
+ "legendFormat": "average",
+ "refId": "C"
+ }
+ ],
+ "thresholds": [],
+ "timeFrom": null,
+ "timeRegions": [],
+ "timeShift": null,
+ "title": "Volume Server Request Duration 99th percentile",
+ "tooltip": {
+ "msResolution": false,
+ "shared": true,
+ "sort": 0,
+ "value_type": "cumulative"
+ },
+ "type": "graph",
+ "xaxis": {
+ "buckets": null,
+ "mode": "time",
+ "name": null,
+ "show": true,
+ "values": []
+ },
+ "yaxes": [
+ {
+ "format": "s",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": 0,
+ "show": true
+ },
+ {
+ "format": "short",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ }
+ ],
+ "yaxis": {
+ "align": false,
+ "alignLevel": null
+ }
+ },
+ {
+ "aliasColors": {},
+ "bars": false,
+ "dashLength": 10,
+ "dashes": false,
+ "datasource": "$DS_PROMETHEUS",
+ "editable": true,
+ "error": false,
+ "fieldConfig": {
+ "defaults": {
+ "custom": {}
+ },
+ "overrides": []
+ },
+ "fill": 1,
+ "fillGradient": 0,
+ "grid": {},
+ "gridPos": {
+ "h": 7,
+ "w": 12,
+ "x": 12,
+ "y": 31
+ },
+ "hiddenSeries": false,
+ "id": 40,
+ "legend": {
+ "alignAsTable": true,
+ "avg": false,
+ "current": false,
+ "hideEmpty": true,
+ "hideZero": true,
+ "max": false,
+ "min": false,
+ "rightSide": true,
+ "show": true,
+ "sort": "total",
+ "sortDesc": true,
+ "total": true,
+ "values": true
+ },
+ "lines": true,
+ "linewidth": 2,
+ "links": [],
+ "nullPointMode": "null as zero",
+ "percentage": false,
+ "pluginVersion": "7.1.0",
+ "pointradius": 5,
+ "points": false,
+ "renderer": "flot",
+ "seriesOverrides": [],
+ "spaceLength": 10,
+ "stack": false,
+ "steppedLine": false,
+ "targets": [
+ {
+ "expr": "sum(rate(SeaweedFS_volumeServer_request_total{namespace=\"$namespace\",service=~\"$service-volume\"}[1m])) by (type)",
+ "format": "time_series",
+ "interval": "",
+ "intervalFactor": 2,
+ "legendFormat": "{{type}}",
+ "refId": "A",
+ "step": 4
+ }
+ ],
+ "thresholds": [],
+ "timeFrom": null,
+ "timeRegions": [],
+ "timeShift": null,
+ "title": "Volume Server QPS",
+ "tooltip": {
+ "msResolution": false,
+ "shared": true,
+ "sort": 0,
+ "value_type": "cumulative"
+ },
+ "type": "graph",
+ "xaxis": {
+ "buckets": null,
+ "mode": "time",
+ "name": null,
+ "show": true,
+ "values": []
+ },
+ "yaxes": [
+ {
+ "format": "short",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ },
+ {
+ "format": "short",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ }
+ ],
+ "yaxis": {
+ "align": false,
+ "alignLevel": null
+ }
+ },
+ {
+ "aliasColors": {},
+ "bars": false,
+ "dashLength": 10,
+ "dashes": false,
+ "datasource": "$DS_PROMETHEUS",
+ "fieldConfig": {
+ "defaults": {
+ "custom": {}
+ },
+ "overrides": []
+ },
+ "fill": 1,
+ "fillGradient": 0,
+ "gridPos": {
+ "h": 7,
+ "w": 24,
+ "x": 0,
+ "y": 38
+ },
+ "hiddenSeries": false,
+ "id": 48,
+ "legend": {
+ "avg": false,
+ "current": true,
+ "max": false,
+ "min": false,
+ "show": true,
+ "total": false,
+ "values": true
+ },
+ "lines": true,
+ "linewidth": 1,
+ "links": [],
+ "nullPointMode": "null as zero",
+ "percentage": true,
+ "pluginVersion": "7.1.0",
+ "pointradius": 5,
+ "points": false,
+ "renderer": "flot",
+ "seriesOverrides": [],
+ "spaceLength": 10,
+ "stack": true,
+ "steppedLine": false,
+ "targets": [
+ {
+ "expr": "sum(SeaweedFS_volumeServer_volumes{namespace=\"$namespace\",service=~\"$service-volume\"}) by (collection, type)",
+ "format": "time_series",
+ "hide": false,
+ "interval": "",
+ "intervalFactor": 2,
+ "legendFormat": "{{collection}} {{type}}",
+ "refId": "A"
+ },
+ {
+ "expr": "sum(max(SeaweedFS_volumeServer_max_volumes{namespace=\"$namespace\",service=~\"$service-volume\"}) by (pod))",
+ "format": "time_series",
+ "hide": false,
+ "instant": false,
+ "interval": "",
+ "intervalFactor": 2,
+ "legendFormat": "Total",
+ "refId": "B"
+ },
+ {
+ "expr": "sum(max(SeaweedFS_volumeServer_read_only_volumes{namespace=\"$namespace\",service=~\"$service-volume\"}) by (pod))",
+ "interval": "",
+ "legendFormat": "Read only",
+ "refId": "C"
+ }
+ ],
+ "thresholds": [],
+ "timeFrom": null,
+ "timeRegions": [],
+ "timeShift": null,
+ "title": "Volume Count",
+ "tooltip": {
+ "shared": true,
+ "sort": 0,
+ "value_type": "individual"
+ },
+ "transformations": [],
+ "type": "graph",
+ "xaxis": {
+ "buckets": null,
+ "mode": "time",
+ "name": null,
+ "show": true,
+ "values": []
+ },
+ "yaxes": [
+ {
+ "format": "short",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ },
+ {
+ "format": "short",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ }
+ ],
+ "yaxis": {
+ "align": false,
+ "alignLevel": null
+ }
+ },
+ {
+ "aliasColors": {},
+ "bars": false,
+ "dashLength": 10,
+ "dashes": false,
+ "datasource": "$DS_PROMETHEUS",
+ "fieldConfig": {
+ "defaults": {
+ "custom": {}
+ },
+ "overrides": []
+ },
+ "fill": 1,
+ "fillGradient": 0,
+ "gridPos": {
+ "h": 7,
+ "w": 24,
+ "x": 0,
+ "y": 45
+ },
+ "hiddenSeries": false,
+ "id": 50,
+ "legend": {
+ "avg": false,
+ "current": false,
+ "max": false,
+ "min": false,
+ "show": true,
+ "total": false,
+ "values": false
+ },
+ "lines": true,
+ "linewidth": 1,
+ "links": [],
+ "nullPointMode": "null",
+ "percentage": false,
+ "pluginVersion": "7.1.0",
+ "pointradius": 5,
+ "points": false,
+ "renderer": "flot",
+ "seriesOverrides": [],
+ "spaceLength": 10,
+ "stack": false,
+ "steppedLine": false,
+ "targets": [
+ {
+ "expr": "sum(SeaweedFS_volumeServer_total_disk_size{namespace=\"$namespace\",service=~\"$service-volume\"}) by (collection, type)",
+ "format": "time_series",
+ "hide": false,
+ "interval": "",
+ "intervalFactor": 2,
+ "legendFormat": "{{collection}} {{type}}",
+ "refId": "A"
+ },
+ {
+ "expr": "sum(SeaweedFS_volumeServer_total_disk_size{namespace=\"$namespace\",service=~\"$service-volume\"})",
+ "format": "time_series",
+ "hide": true,
+ "interval": "",
+ "intervalFactor": 2,
+ "legendFormat": "Total",
+ "refId": "B"
+ }
+ ],
+ "thresholds": [],
+ "timeFrom": null,
+ "timeRegions": [],
+ "timeShift": null,
+ "title": "Used Disk Space by Collection and Type",
+ "tooltip": {
+ "shared": true,
+ "sort": 0,
+ "value_type": "individual"
+ },
+ "type": "graph",
+ "xaxis": {
+ "buckets": null,
+ "mode": "time",
+ "name": null,
+ "show": true,
+ "values": []
+ },
+ "yaxes": [
+ {
+ "format": "bytes",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ },
+ {
+ "format": "short",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ }
+ ],
+ "yaxis": {
+ "align": false,
+ "alignLevel": null
+ }
+ },
+ {
+ "aliasColors": {},
+ "bars": false,
+ "dashLength": 10,
+ "dashes": false,
+ "datasource": "$DS_PROMETHEUS",
+ "fieldConfig": {
+ "defaults": {
+ "custom": {}
+ },
+ "overrides": []
+ },
+ "fill": 1,
+ "fillGradient": 0,
+ "gridPos": {
+ "h": 7,
+ "w": 24,
+ "x": 0,
+ "y": 52
+ },
+ "hiddenSeries": false,
+ "id": 51,
+ "legend": {
+ "avg": false,
+ "current": false,
+ "max": false,
+ "min": false,
+ "show": true,
+ "total": false,
+ "values": false
+ },
+ "lines": true,
+ "linewidth": 1,
+ "links": [],
+ "nullPointMode": "null",
+ "percentage": false,
+ "pluginVersion": "7.1.0",
+ "pointradius": 5,
+ "points": false,
+ "renderer": "flot",
+ "seriesOverrides": [],
+ "spaceLength": 10,
+ "stack": false,
+ "steppedLine": false,
+ "targets": [
+ {
+ "expr": "sum(max(SeaweedFS_volumeServer_total_disk_size{namespace=\"$namespace\",service=~\"$service-volume\"}) by (collection,pod)) by (pod)",
+ "format": "time_series",
+ "hide": false,
+ "interval": "",
+ "intervalFactor": 2,
+ "legendFormat": "{{pod}}",
+ "refId": "A"
+ }
+ ],
+ "thresholds": [],
+ "timeFrom": null,
+ "timeRegions": [],
+ "timeShift": null,
+ "title": "Used Disk Space by Host",
+ "tooltip": {
+ "shared": true,
+ "sort": 0,
+ "value_type": "individual"
+ },
+ "type": "graph",
+ "xaxis": {
+ "buckets": null,
+ "mode": "time",
+ "name": null,
+ "show": true,
+ "values": []
+ },
+ "yaxes": [
+ {
+ "format": "bytes",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ },
+ {
+ "format": "short",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ }
+ ],
+ "yaxis": {
+ "align": false,
+ "alignLevel": null
+ }
+ },
+ {
+ "collapsed": false,
+ "datasource": null,
+ "gridPos": {
+ "h": 1,
+ "w": 24,
+ "x": 0,
+ "y": 59
+ },
+ "id": 57,
+ "panels": [],
+ "repeat": null,
+ "title": "Filer Store",
+ "type": "row"
+ },
+ {
+ "aliasColors": {},
+ "bars": false,
+ "dashLength": 10,
+ "dashes": false,
+ "datasource": "$DS_PROMETHEUS",
+ "editable": true,
+ "error": false,
+ "fieldConfig": {
+ "defaults": {
+ "custom": {}
+ },
+ "overrides": []
+ },
+ "fill": 1,
+ "fillGradient": 0,
+ "grid": {},
+ "gridPos": {
+ "h": 7,
+ "w": 12,
+ "x": 0,
+ "y": 60
+ },
+ "hiddenSeries": false,
+ "id": 12,
+ "legend": {
+ "alignAsTable": false,
+ "avg": false,
+ "current": false,
+ "max": false,
+ "min": false,
+ "show": false,
+ "total": false,
+ "values": false
+ },
+ "lines": true,
+ "linewidth": 2,
+ "links": [],
+ "nullPointMode": "null as zero",
+ "percentage": false,
+ "pluginVersion": "7.1.0",
+ "pointradius": 5,
+ "points": false,
+ "renderer": "flot",
+ "seriesOverrides": [],
+ "spaceLength": 10,
+ "stack": false,
+ "steppedLine": false,
+ "targets": [
+ {
+ "expr": "histogram_quantile(0.99, sum(rate(SeaweedFS_filerStore_request_seconds_bucket{namespace=\"$namespace\",service=~\"$service-api\"}[1m])) by (le, type))",
+ "format": "time_series",
+ "interval": "",
+ "intervalFactor": 2,
+ "legendFormat": "{{type}}",
+ "refId": "B"
+ }
+ ],
+ "thresholds": [],
+ "timeFrom": null,
+ "timeRegions": [],
+ "timeShift": null,
+ "title": "Filer Store Request Duration 99th percentile",
+ "tooltip": {
+ "msResolution": false,
+ "shared": true,
+ "sort": 0,
+ "value_type": "cumulative"
+ },
+ "type": "graph",
+ "xaxis": {
+ "buckets": null,
+ "mode": "time",
+ "name": null,
+ "show": true,
+ "values": []
+ },
+ "yaxes": [
+ {
+ "format": "s",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": 0,
+ "show": true
+ },
+ {
+ "format": "short",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ }
+ ],
+ "yaxis": {
+ "align": false,
+ "alignLevel": null
+ }
+ },
+ {
+ "aliasColors": {},
+ "bars": false,
+ "dashLength": 10,
+ "dashes": false,
+ "datasource": "$DS_PROMETHEUS",
+ "editable": true,
+ "error": false,
+ "fieldConfig": {
+ "defaults": {
+ "custom": {}
+ },
+ "overrides": []
+ },
+ "fill": 1,
+ "fillGradient": 0,
+ "grid": {},
+ "gridPos": {
+ "h": 7,
+ "w": 12,
+ "x": 12,
+ "y": 60
+ },
+ "hiddenSeries": false,
+ "id": 14,
+ "legend": {
+ "alignAsTable": true,
+ "avg": true,
+ "current": true,
+ "hideEmpty": false,
+ "hideZero": false,
+ "max": false,
+ "min": false,
+ "rightSide": true,
+ "show": true,
+ "total": false,
+ "values": true
+ },
+ "lines": true,
+ "linewidth": 2,
+ "links": [],
+ "nullPointMode": "null as zero",
+ "percentage": false,
+ "pluginVersion": "7.1.0",
+ "pointradius": 5,
+ "points": false,
+ "renderer": "flot",
+ "seriesOverrides": [],
+ "spaceLength": 10,
+ "stack": false,
+ "steppedLine": false,
+ "targets": [
+ {
+ "expr": "sum(rate(SeaweedFS_filerStore_request_total{namespace=\"$namespace\",service=~\"$service-api\"}[1m])) by (type)",
+ "format": "time_series",
+ "interval": "",
+ "intervalFactor": 2,
+ "legendFormat": "{{type}}",
+ "refId": "B"
+ }
+ ],
+ "thresholds": [],
+ "timeFrom": null,
+ "timeRegions": [],
+ "timeShift": null,
+ "title": "Filer Store QPS",
+ "tooltip": {
+ "msResolution": false,
+ "shared": true,
+ "sort": 0,
+ "value_type": "cumulative"
+ },
+ "type": "graph",
+ "xaxis": {
+ "buckets": null,
+ "mode": "time",
+ "name": null,
+ "show": true,
+ "values": []
+ },
+ "yaxes": [
+ {
+ "format": "short",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": 0,
+ "show": true
+ },
+ {
+ "format": "short",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ }
+ ],
+ "yaxis": {
+ "align": false,
+ "alignLevel": null
+ }
+ },
+ {
+ "collapsed": false,
+ "datasource": null,
+ "gridPos": {
+ "h": 1,
+ "w": 24,
+ "x": 0,
+ "y": 67
+ },
+ "id": 58,
+ "panels": [],
+ "repeat": null,
+ "title": "Filer Instances",
+ "type": "row"
+ },
+ {
+ "aliasColors": {},
+ "bars": false,
+ "dashLength": 10,
+ "dashes": false,
+ "datasource": "$DS_PROMETHEUS",
+ "editable": true,
+ "error": false,
+ "fieldConfig": {
+ "defaults": {
+ "custom": {}
+ },
+ "overrides": []
+ },
+ "fill": 1,
+ "fillGradient": 0,
+ "grid": {},
+ "gridPos": {
+ "h": 7,
+ "w": 12,
+ "x": 0,
+ "y": 68
+ },
+ "hiddenSeries": false,
+ "id": 52,
+ "legend": {
+ "alignAsTable": false,
+ "avg": false,
+ "current": false,
+ "max": false,
+ "min": false,
+ "show": false,
+ "total": false,
+ "values": false
+ },
+ "lines": true,
+ "linewidth": 2,
+ "links": [],
+ "nullPointMode": "null as zero",
+ "percentage": false,
+ "pluginVersion": "7.1.0",
+ "pointradius": 5,
+ "points": false,
+ "renderer": "flot",
+ "seriesOverrides": [],
+ "spaceLength": 10,
+ "stack": false,
+ "steppedLine": false,
+ "targets": [
+ {
+ "expr": "go_memstats_alloc_bytes{namespace=~\"$namespace\", endpoint=\"swfs-filer-metrics\"}",
+ "format": "time_series",
+ "hide": false,
+ "interval": "",
+ "intervalFactor": 2,
+ "legendFormat": "bytes allocated",
+ "refId": "B"
+ },
+ {
+ "expr": "rate(go_memstats_alloc_bytes_total{namespace=~\"$namespace\", endpoint=\"swfs-.*-metrics\"}[30s])",
+ "format": "time_series",
+ "hide": false,
+ "interval": "",
+ "intervalFactor": 2,
+ "legendFormat": "alloc rate",
+ "refId": "A"
+ },
+ {
+ "expr": "go_memstats_stack_inuse_bytes{namespace=~\"$namespace\", endpoint=\"swfs-filer-metrics\"}",
+ "format": "time_series",
+ "hide": true,
+ "interval": "",
+ "intervalFactor": 2,
+ "legendFormat": "stack inuse",
+ "refId": "C"
+ },
+ {
+ "expr": "go_memstats_heap_inuse_bytes{namespace=~\"$namespace\", endpoint=\"swfs-filer-metrics\"}",
+ "format": "time_series",
+ "hide": true,
+ "interval": "",
+ "intervalFactor": 2,
+ "legendFormat": "heap inuse",
+ "refId": "D"
+ }
+ ],
+ "thresholds": [],
+ "timeFrom": null,
+ "timeRegions": [],
+ "timeShift": null,
+ "title": "Filer Go Memory Stats",
+ "tooltip": {
+ "msResolution": false,
+ "shared": true,
+ "sort": 0,
+ "value_type": "cumulative"
+ },
+ "type": "graph",
+ "xaxis": {
+ "buckets": null,
+ "mode": "time",
+ "name": null,
+ "show": true,
+ "values": []
+ },
+ "yaxes": [
+ {
+ "format": "bytes",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": 0,
+ "show": true
+ },
+ {
+ "format": "Bps",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ }
+ ],
+ "yaxis": {
+ "align": false,
+ "alignLevel": null
+ }
+ },
+ {
+ "aliasColors": {},
+ "bars": false,
+ "dashLength": 10,
+ "dashes": false,
+ "datasource": "$DS_PROMETHEUS",
+ "editable": true,
+ "error": false,
+ "fieldConfig": {
+ "defaults": {
+ "custom": {}
+ },
+ "overrides": []
+ },
+ "fill": 1,
+ "fillGradient": 0,
+ "grid": {},
+ "gridPos": {
+ "h": 7,
+ "w": 12,
+ "x": 12,
+ "y": 68
+ },
+ "hiddenSeries": false,
+ "id": 54,
+ "legend": {
+ "alignAsTable": false,
+ "avg": false,
+ "current": false,
+ "max": false,
+ "min": false,
+ "show": false,
+ "total": false,
+ "values": false
+ },
+ "lines": true,
+ "linewidth": 2,
+ "links": [],
+ "nullPointMode": "null as zero",
+ "percentage": false,
+ "pluginVersion": "7.1.0",
+ "pointradius": 5,
+ "points": false,
+ "renderer": "flot",
+ "seriesOverrides": [],
+ "spaceLength": 10,
+ "stack": false,
+ "steppedLine": false,
+ "targets": [
+ {
+ "expr": "go_gc_duration_seconds{namespace=~\"$namespace\", endpoint=\"swfs-filer-metrics\"}",
+ "format": "time_series",
+ "interval": "",
+ "intervalFactor": 2,
+ "legendFormat": "{{quantile}}",
+ "refId": "B"
+ }
+ ],
+ "thresholds": [],
+ "timeFrom": null,
+ "timeRegions": [],
+ "timeShift": null,
+ "title": "Filer Go GC duration quantiles",
+ "tooltip": {
+ "msResolution": false,
+ "shared": true,
+ "sort": 0,
+ "value_type": "cumulative"
+ },
+ "type": "graph",
+ "xaxis": {
+ "buckets": null,
+ "mode": "time",
+ "name": null,
+ "show": true,
+ "values": []
+ },
+ "yaxes": [
+ {
+ "format": "s",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": 0,
+ "show": true
+ },
+ {
+ "format": "Bps",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ }
+ ],
+ "yaxis": {
+ "align": false,
+ "alignLevel": null
+ }
+ },
+ {
+ "aliasColors": {},
+ "bars": false,
+ "dashLength": 10,
+ "dashes": false,
+ "datasource": "$DS_PROMETHEUS",
+ "editable": true,
+ "error": false,
+ "fieldConfig": {
+ "defaults": {
+ "custom": {}
+ },
+ "overrides": []
+ },
+ "fill": 1,
+ "fillGradient": 0,
+ "grid": {},
+ "gridPos": {
+ "h": 7,
+ "w": 24,
+ "x": 0,
+ "y": 75
+ },
+ "hiddenSeries": false,
+ "id": 53,
+ "legend": {
+ "alignAsTable": false,
+ "avg": false,
+ "current": false,
+ "max": false,
+ "min": false,
+ "show": false,
+ "total": false,
+ "values": false
+ },
+ "lines": true,
+ "linewidth": 2,
+ "links": [],
+ "nullPointMode": "null as zero",
+ "percentage": false,
+ "pluginVersion": "7.1.0",
+ "pointradius": 5,
+ "points": false,
+ "renderer": "flot",
+ "seriesOverrides": [],
+ "spaceLength": 10,
+ "stack": false,
+ "steppedLine": false,
+ "targets": [
+ {
+ "expr": "go_goroutines{namespace=~\"$namespace\", endpoint=\"swfs-filer-metrics\"}",
+ "format": "time_series",
+ "interval": "",
+ "intervalFactor": 2,
+ "legendFormat": "{{pod}}",
+ "refId": "B"
+ }
+ ],
+ "thresholds": [],
+ "timeFrom": null,
+ "timeRegions": [],
+ "timeShift": null,
+ "title": "Filer Go Routines",
+ "tooltip": {
+ "msResolution": false,
+ "shared": true,
+ "sort": 0,
+ "value_type": "cumulative"
+ },
+ "type": "graph",
+ "xaxis": {
+ "buckets": null,
+ "mode": "time",
+ "name": null,
+ "show": true,
+ "values": []
+ },
+ "yaxes": [
+ {
+ "format": "none",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": 0,
+ "show": true
+ },
+ {
+ "format": "short",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ }
+ ],
+ "yaxis": {
+ "align": false,
+ "alignLevel": null
+ }
+ }
+ ],
+ "refresh": "30s",
+ "schemaVersion": 26,
+ "style": "dark",
+ "tags": [],
+ "templating": {
+ "list": [
+ {
+ "current": {
+ "selected": true,
+ "text": "clickhouse-prom",
+ "value": "clickhouse-prom"
+ },
+ "hide": 0,
+ "includeAll": false,
+ "label": "Datasource",
+ "multi": false,
+ "name": "DS_PROMETHEUS",
+ "options": [],
+ "query": "prometheus",
+ "queryValue": "",
+ "refresh": 1,
+ "regex": "",
+ "skipUrlSync": false,
+ "type": "datasource"
+ },
+ {
+ "allValue": null,
+ "current": {
+ "selected": false,
+ "text": "s3",
+ "value": "s3"
+ },
+ "datasource": "$DS_PROMETHEUS",
+ "definition": "label_values({endpoint=\"swfs-filer-metrics\"}, namespace)",
+ "hide": 0,
+ "includeAll": false,
+ "label": "namespace",
+ "multi": false,
+ "name": "namespace",
+ "options": [
+ {
+ "selected": true,
+ "text": "s3",
+ "value": "s3"
+ }
+ ],
+ "query": "label_values({endpoint=\"swfs-filer-metrics\"}, namespace)",
+ "refresh": 0,
+ "regex": "",
+ "skipUrlSync": false,
+ "sort": 1,
+ "tagValuesQuery": "",
+ "tags": [],
+ "tagsQuery": "",
+ "type": "query",
+ "useTags": false
+ },
+ {
+ "allValue": "",
+ "current": {
+ "selected": true,
+ "text": "fast",
+ "value": "fast"
+ },
+ "datasource": "$DS_PROMETHEUS",
+ "definition": "label_values({namespace=\"$namespace\"}, service)",
+ "hide": 0,
+ "includeAll": true,
+ "label": "service",
+ "multi": false,
+ "name": "service",
+ "options": [
+ {
+ "selected": false,
+ "text": "All",
+ "value": "$__all"
+ },
+ {
+ "selected": true,
+ "text": "fast",
+ "value": "fast"
+ },
+ {
+ "selected": false,
+ "text": "slow",
+ "value": "slow"
+ }
+ ],
+ "query": "label_values({namespace=\"$namespace\"}, service)",
+ "refresh": 0,
+ "regex": "/(\\w+)-master/",
+ "skipUrlSync": false,
+ "sort": 1,
+ "tagValuesQuery": "",
+ "tags": [],
+ "tagsQuery": "",
+ "type": "query",
+ "useTags": false
+ },
+ {
+ "allValue": "",
+ "current": {
+ "selected": false,
+ "text": "All",
+ "value": "$__all"
+ },
+ "datasource": "$DS_PROMETHEUS",
+ "definition": "label_values(SeaweedFS_s3_request_total{namespace=\"$namespace\"}, type)",
+ "hide": 0,
+ "includeAll": true,
+ "label": "method",
+ "multi": false,
+ "name": "method",
+ "options": [
+ {
+ "selected": true,
+ "text": "All",
+ "value": "$__all"
+ },
+ {
+ "selected": false,
+ "text": "DELETE",
+ "value": "DELETE"
+ },
+ {
+ "selected": false,
+ "text": "GET",
+ "value": "GET"
+ },
+ {
+ "selected": false,
+ "text": "LIST",
+ "value": "LIST"
+ },
+ {
+ "selected": false,
+ "text": "POST",
+ "value": "POST"
+ },
+ {
+ "selected": false,
+ "text": "PUT",
+ "value": "PUT"
+ }
+ ],
+ "query": "label_values(SeaweedFS_s3_request_total{namespace=\"$namespace\"}, type)",
+ "refresh": 0,
+ "regex": "",
+ "skipUrlSync": false,
+ "sort": 1,
+ "tagValuesQuery": "",
+ "tags": [],
+ "tagsQuery": "",
+ "type": "query",
+ "useTags": false
+ }
+ ]
+ },
+ "time": {
+ "from": "now-12h",
+ "to": "now"
+ },
+ "timepicker": {
+ "refresh_intervals": [
+ "10s",
+ "30s",
+ "1m",
+ "5m",
+ "15m",
+ "30m",
+ "1h",
+ "2h",
+ "1d"
+ ],
+ "time_options": [
+ "5m",
+ "15m",
+ "1h",
+ "6h",
+ "12h",
+ "24h",
+ "2d",
+ "7d",
+ "30d"
+ ]
+ },
+ "timezone": "browser",
+ "title": "SeaweedFS",
+ "version": 2
+}
diff --git a/snap/README.md b/snap/README.md
new file mode 100644
index 000000000..5752bd4af
--- /dev/null
+++ b/snap/README.md
@@ -0,0 +1,49 @@
+Hi
+
+This PR adds support for building a snap package of seaweedfs. Snaps are cross distro Linux software packages. One snap can be installed on Ubuntu all supported LTS and non LTS releases from 14.04 onward. Additionally they can installed on Debian, Manjaro, Fedora, OpenSUSE and others. Making a snap of seaweedfs enables you to provide automatic updates on your schedule to your users via the snap store.
+
+If accepted, you can use snapcraft locally, a CI system such as travis or circle-ci, or our free build system (build.snapcraft.io) to create snaps and upload to the store (snapcraft.io/store). The store supports
+
+To test this PR locally, I used an Ubuntu 16.04 VM, with the following steps.
+
+```
+snap install snapcraft --classic
+git clone https://github.com/popey/seaweedfs
+cd seaweedfs
+git checkout add-snapcraft
+snapcraft
+```
+
+The generated a .snap file from the tip of master (I could have checked out a stable release instead). It can be installed with:-
+
+ snap install seaweedfs_0.99+git30.79371c0-dirty_amd64.snap --dangerous
+
+(the --dangerous is necessary because we’re installing an app which hasn’t gone through the snap store review process)
+
+Once installed, the (namespaced) weed command can be executed. If you accept this and land the snap in the store, we can request an ‘alias’ so users can use the ‘weed’ command rather than the namespaced ‘seaweedfs.weed’
+
+- Run the command
+- Create sample config. Snaps are securely confined so their home directory is in a different place
+ mkdir ~/snap/seaweedfs/current/.seaweedfs
+ seaweedfs.weed scaffold > ~/snap/seaweed/current/.seaweedfs/filer.toml
+- Run a server
+ seaweedfs.weed server
+- Run a benchmark
+ seaweedfs.weed benchmark
+
+Results from my test run: https://paste.ubuntu.com/p/95Xk8zFQ7w/
+
+If landed, you will need to:-
+
+- Register an account in the snap store https://snapcraft.io/account
+- Register the ‘seaweedfs’ name in the store
+ - snapcraft login
+ - snapcraft register seaweedfs
+- Upload a built snap to the store
+ - snapcraft push seaweedfs_0.99+git30.79371c0-dirty_amd64.snap --release edge
+- Test installing on a clean Ubuntu 16.04 machine
+ - snap install seaweedfs --edge
+
+The store supports multiple risk levels as “channels” with the edge channel typically used to host the latest build from git master. Stable is where stable releases are pushed. Optionally beta and candidate channels can also be used if needed.
+
+Once you are happy, you can push a stable release to the stable channel, update the store page, and promote the application online (we can help there).
diff --git a/snap/snapcraft.yaml b/snap/snapcraft.yaml
new file mode 100644
index 000000000..6449e9bfb
--- /dev/null
+++ b/snap/snapcraft.yaml
@@ -0,0 +1,53 @@
+# Name of snap as registered in the store
+name: seaweedfs
+# Automatically derive snap version from git tags
+version: git
+# Short human readable name as seen in 'snap find $SNAPNAME'
+summary: SeaweedFS
+# Longer multi-line description found in 'snap info $SNAPNAME'
+description: |
+ SeaweedFS is a simple and highly scalable distributed file system,
+ to store and serve billions of files fast!
+ SeaweedFS implements an object store with O(1) disk seek,
+ transparent cloud integration, and an optional Filer with POSIX interface,
+ supporting S3 API, Rack-Aware Erasure Coding for warm storage,
+ FUSE mount, Hadoop compatible, WebDAV.
+
+# Grade is stable for snaps expected to land in the stable channel
+grade: stable
+# Uses the strict confinement model and uses interfaces to open up access to
+# resources on the target host
+confinement: strict
+
+# List of parts which comprise the snap
+parts:
+ # The main part which defines how to build the application in the snap
+ seaweedfs:
+ # This part needs a newer version of golang, so we use a separate part
+ # which defines how to get a newer golang during the build
+ after: [go]
+ # The go plugin knows how to build go applications into a snap
+ plugin: go
+ # Snapcraft will look in this location for the source of the application
+ source: .
+ go-importpath: github.com/chrislusf/seaweedfs
+ go:
+ # Defines the version of golang which will be bootstrapped into the snap
+ source-tag: go1.14
+
+# Apps exposes the binaries inside the snap to the host system once installed
+apps:
+ # We expose the weed command.
+ # This differs from the snap name, so it will be namespaced as seaweedfs.weed
+ # An alias can be added to expose this as 'weed' if requested in the snapcraft forum
+ weed:
+ # The path to the binary inside the snap, relative to the $SNAP home
+ command: bin/weed
+ # Plugs connect the snap to resources on the host system. We enable network connectivity
+ # We also add home and removable-media (latter not autoconnected by default)
+ # so users can access files in their home or on removable disks
+ plugs:
+ - network
+ - network-bind
+ - home
+ - removable-media
diff --git a/test/sample.idx b/test/data/sample.idx
similarity index 100%
rename from test/sample.idx
rename to test/data/sample.idx
diff --git a/test/random_access/pom.xml b/test/random_access/pom.xml
new file mode 100644
index 000000000..36fe6b256
--- /dev/null
+++ b/test/random_access/pom.xml
@@ -0,0 +1,58 @@
+
+
+ 4.0.0
+ com.seaweedfs.test
+ random_access
+ jar
+ 1.0-SNAPSHOT
+
+
+ 30.0-jre
+
+
+
+
+ com.google.guava
+ guava
+ ${guava.version}
+
+
+ org.slf4j
+ slf4j-api
+ 1.7.25
+
+
+ junit
+ junit
+ 4.13.1
+ test
+
+
+ com.esotericsoftware.kryo
+ kryo
+ 2.24.0
+
+
+
+
+
+
+ kr.motd.maven
+ os-maven-plugin
+ 1.6.2
+
+
+
+
+ org.apache.maven.plugins
+ maven-compiler-plugin
+
+ 8
+ 8
+
+
+
+
+
+
diff --git a/test/random_access/src/main/java/seaweedfs/client/btree/BTreePersistentIndexedCache.java b/test/random_access/src/main/java/seaweedfs/client/btree/BTreePersistentIndexedCache.java
new file mode 100644
index 000000000..8409c40b3
--- /dev/null
+++ b/test/random_access/src/main/java/seaweedfs/client/btree/BTreePersistentIndexedCache.java
@@ -0,0 +1,753 @@
+/*
+ * Copyright 2010 the original author or authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package seaweedfs.client.btree;
+
+import com.google.common.collect.ImmutableSet;
+import seaweedfs.client.btree.serialize.Serializer;
+import seaweedfs.client.btree.serialize.kryo.KryoBackedDecoder;
+import seaweedfs.client.btree.serialize.kryo.KryoBackedEncoder;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.io.DataInputStream;
+import java.io.DataOutputStream;
+import java.io.File;
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.Collection;
+import java.util.Collections;
+import java.util.Comparator;
+import java.util.List;
+
+// todo - stream serialised value to file
+// todo - handle hash collisions (properly, this time)
+// todo - don't store null links to child blocks in leaf index blocks
+// todo - align block boundaries
+// todo - thread safety control
+// todo - merge small values into a single data block
+// todo - discard when file corrupt
+// todo - include data directly in index entry when serializer can guarantee small fixed sized data
+// todo - free list leaks disk space
+// todo - merge adjacent free blocks
+// todo - use more efficient lookup for free block with nearest size
+@SuppressWarnings("unchecked")
+public class BTreePersistentIndexedCache {
+ private static final Logger LOGGER = LoggerFactory.getLogger(BTreePersistentIndexedCache.class);
+ private final File cacheFile;
+ private final KeyHasher keyHasher;
+ private final Serializer serializer;
+ private final short maxChildIndexEntries;
+ private final int minIndexChildNodes;
+ private final StateCheckBlockStore store;
+ private HeaderBlock header;
+
+ public BTreePersistentIndexedCache(File cacheFile, Serializer keySerializer, Serializer valueSerializer) {
+ this(cacheFile, keySerializer, valueSerializer, (short) 512, 512);
+ }
+
+ public BTreePersistentIndexedCache(File cacheFile, Serializer keySerializer, Serializer valueSerializer,
+ short maxChildIndexEntries, int maxFreeListEntries) {
+ this.cacheFile = cacheFile;
+ this.keyHasher = new KeyHasher(keySerializer);
+ this.serializer = valueSerializer;
+ this.maxChildIndexEntries = maxChildIndexEntries;
+ this.minIndexChildNodes = maxChildIndexEntries / 2;
+ BlockStore cachingStore = new CachingBlockStore(new FileBackedBlockStore(cacheFile), ImmutableSet.of(IndexBlock.class, FreeListBlockStore.FreeListBlock.class));
+ this.store = new StateCheckBlockStore(new FreeListBlockStore(cachingStore, maxFreeListEntries));
+ try {
+ open();
+ } catch (Exception e) {
+ throw new UncheckedIOException(String.format("Could not open %s.", this), e);
+ }
+ }
+
+ @Override
+ public String toString() {
+ return "cache " + cacheFile.getName() + " (" + cacheFile + ")";
+ }
+
+ private void open() throws Exception {
+ LOGGER.debug("Opening {}", this);
+ try {
+ doOpen();
+ } catch (CorruptedCacheException e) {
+ rebuild();
+ }
+ }
+
+ private void doOpen() throws Exception {
+ BlockStore.Factory factory = new BlockStore.Factory() {
+ @Override
+ public Object create(Class extends BlockPayload> type) {
+ if (type == HeaderBlock.class) {
+ return new HeaderBlock();
+ }
+ if (type == IndexBlock.class) {
+ return new IndexBlock();
+ }
+ if (type == DataBlock.class) {
+ return new DataBlock();
+ }
+ throw new UnsupportedOperationException();
+ }
+ };
+ Runnable initAction = new Runnable() {
+ @Override
+ public void run() {
+ header = new HeaderBlock();
+ store.write(header);
+ header.index.newRoot();
+ store.flush();
+ }
+ };
+
+ store.open(initAction, factory);
+ header = store.readFirst(HeaderBlock.class);
+ }
+
+ public V get(K key) {
+ try {
+ try {
+ DataBlock block = header.getRoot().get(key);
+ if (block != null) {
+ return block.getValue();
+ }
+ return null;
+ } catch (CorruptedCacheException e) {
+ rebuild();
+ return null;
+ }
+ } catch (Exception e) {
+ throw new UncheckedIOException(String.format("Could not read entry '%s' from %s.", key, this), e);
+ }
+ }
+
+ public void put(K key, V value) {
+ try {
+ long hashCode = keyHasher.getHashCode(key);
+ Lookup lookup = header.getRoot().find(hashCode);
+ DataBlock newBlock = null;
+ if (lookup.entry != null) {
+ DataBlock block = store.read(lookup.entry.dataBlock, DataBlock.class);
+ DataBlockUpdateResult updateResult = block.useNewValue(value);
+ if (updateResult.isFailed()) {
+ store.remove(block);
+ newBlock = new DataBlock(value, updateResult.getSerializedValue());
+ }
+ } else {
+ newBlock = new DataBlock(value);
+ }
+ if (newBlock != null) {
+ store.write(newBlock);
+ lookup.indexBlock.put(hashCode, newBlock.getPos());
+ }
+ store.flush();
+ } catch (Exception e) {
+ throw new UncheckedIOException(String.format("Could not add entry '%s' to %s.", key, this), e);
+ }
+ }
+
+ public void remove(K key) {
+ try {
+ Lookup lookup = header.getRoot().find(key);
+ if (lookup.entry == null) {
+ return;
+ }
+ lookup.indexBlock.remove(lookup.entry);
+ DataBlock block = store.read(lookup.entry.dataBlock, DataBlock.class);
+ store.remove(block);
+ store.flush();
+ } catch (Exception e) {
+ throw new UncheckedIOException(String.format("Could not remove entry '%s' from %s.", key, this), e);
+ }
+ }
+
+ private IndexBlock load(BlockPointer pos, IndexRoot root, IndexBlock parent, int index) {
+ IndexBlock block = store.read(pos, IndexBlock.class);
+ block.root = root;
+ block.parent = parent;
+ block.parentEntryIndex = index;
+ return block;
+ }
+
+ public void reset() {
+ close();
+ try {
+ open();
+ } catch (Exception e) {
+ throw new UncheckedIOException(e);
+ }
+ }
+
+ public void close() {
+ LOGGER.debug("Closing {}", this);
+ try {
+ store.close();
+ } catch (Exception e) {
+ throw new UncheckedIOException(e);
+ }
+ }
+
+ public boolean isOpen() {
+ return store.isOpen();
+ }
+
+ private void rebuild() {
+ LOGGER.warn("{} is corrupt. Discarding.", this);
+ try {
+ clear();
+ } catch (Exception e) {
+ LOGGER.warn("{} couldn't be rebuilt. Closing.", this);
+ close();
+ }
+ }
+
+ public void verify() {
+ try {
+ doVerify();
+ } catch (Exception e) {
+ throw new UncheckedIOException(String.format("Some problems were found when checking the integrity of %s.",
+ this), e);
+ }
+ }
+
+ private void doVerify() throws Exception {
+ List blocks = new ArrayList();
+
+ HeaderBlock header = store.readFirst(HeaderBlock.class);
+ blocks.add(header);
+ verifyTree(header.getRoot(), "", blocks, Long.MAX_VALUE, true);
+
+ Collections.sort(blocks, new Comparator() {
+ @Override
+ public int compare(BlockPayload block, BlockPayload block1) {
+ return block.getPos().compareTo(block1.getPos());
+ }
+ });
+
+ for (int i = 0; i < blocks.size() - 1; i++) {
+ Block b1 = blocks.get(i).getBlock();
+ Block b2 = blocks.get(i + 1).getBlock();
+ if (b1.getPos().getPos() + b1.getSize() > b2.getPos().getPos()) {
+ throw new IOException(String.format("%s overlaps with %s", b1, b2));
+ }
+ }
+ }
+
+ private void verifyTree(IndexBlock current, String prefix, Collection blocks, long maxValue,
+ boolean loadData) throws Exception {
+ blocks.add(current);
+
+ if (!prefix.equals("") && current.entries.size() < maxChildIndexEntries / 2) {
+ throw new IOException(String.format("Too few entries found in %s", current));
+ }
+ if (current.entries.size() > maxChildIndexEntries) {
+ throw new IOException(String.format("Too many entries found in %s", current));
+ }
+
+ boolean isLeaf = current.entries.size() == 0 || current.entries.get(0).childIndexBlock.isNull();
+ if (isLeaf ^ current.tailPos.isNull()) {
+ throw new IOException(String.format("Mismatched leaf/tail-node in %s", current));
+ }
+
+ long min = Long.MIN_VALUE;
+ for (IndexEntry entry : current.entries) {
+ if (isLeaf ^ entry.childIndexBlock.isNull()) {
+ throw new IOException(String.format("Mismatched leaf/non-leaf entry in %s", current));
+ }
+ if (entry.hashCode >= maxValue || entry.hashCode <= min) {
+ throw new IOException(String.format("Out-of-order key in %s", current));
+ }
+ min = entry.hashCode;
+ if (!entry.childIndexBlock.isNull()) {
+ IndexBlock child = store.read(entry.childIndexBlock, IndexBlock.class);
+ verifyTree(child, " " + prefix, blocks, entry.hashCode, loadData);
+ }
+ if (loadData) {
+ DataBlock block = store.read(entry.dataBlock, DataBlock.class);
+ blocks.add(block);
+ }
+ }
+ if (!current.tailPos.isNull()) {
+ IndexBlock tail = store.read(current.tailPos, IndexBlock.class);
+ verifyTree(tail, " " + prefix, blocks, maxValue, loadData);
+ }
+ }
+
+ public void clear() {
+ store.clear();
+ close();
+ try {
+ doOpen();
+ } catch (Exception e) {
+ throw new UncheckedIOException(e);
+ }
+ }
+
+ private class IndexRoot {
+ private BlockPointer rootPos = BlockPointer.start();
+ private HeaderBlock owner;
+
+ private IndexRoot(HeaderBlock owner) {
+ this.owner = owner;
+ }
+
+ public void setRootPos(BlockPointer rootPos) {
+ this.rootPos = rootPos;
+ store.write(owner);
+ }
+
+ public IndexBlock getRoot() {
+ return load(rootPos, this, null, 0);
+ }
+
+ public IndexBlock newRoot() {
+ IndexBlock block = new IndexBlock();
+ store.write(block);
+ setRootPos(block.getPos());
+ return block;
+ }
+ }
+
+ private class HeaderBlock extends BlockPayload {
+ private IndexRoot index;
+
+ private HeaderBlock() {
+ index = new IndexRoot(this);
+ }
+
+ @Override
+ protected byte getType() {
+ return 0x55;
+ }
+
+ @Override
+ protected int getSize() {
+ return Block.LONG_SIZE + Block.SHORT_SIZE;
+ }
+
+ @Override
+ protected void read(DataInputStream instr) throws Exception {
+ index.rootPos = BlockPointer.pos(instr.readLong());
+
+ short actualChildIndexEntries = instr.readShort();
+ if (actualChildIndexEntries != maxChildIndexEntries) {
+ throw blockCorruptedException();
+ }
+ }
+
+ @Override
+ protected void write(DataOutputStream outstr) throws Exception {
+ outstr.writeLong(index.rootPos.getPos());
+ outstr.writeShort(maxChildIndexEntries);
+ }
+
+ public IndexBlock getRoot() throws Exception {
+ return index.getRoot();
+ }
+ }
+
+ private class IndexBlock extends BlockPayload {
+ private final List entries = new ArrayList();
+ private BlockPointer tailPos = BlockPointer.start();
+ // Transient fields
+ private IndexBlock parent;
+ private int parentEntryIndex;
+ private IndexRoot root;
+
+ @Override
+ protected byte getType() {
+ return 0x77;
+ }
+
+ @Override
+ protected int getSize() {
+ return Block.INT_SIZE + Block.LONG_SIZE + (3 * Block.LONG_SIZE) * maxChildIndexEntries;
+ }
+
+ @Override
+ public void read(DataInputStream instr) throws IOException {
+ int count = instr.readInt();
+ entries.clear();
+ for (int i = 0; i < count; i++) {
+ IndexEntry entry = new IndexEntry();
+ entry.hashCode = instr.readLong();
+ entry.dataBlock = BlockPointer.pos(instr.readLong());
+ entry.childIndexBlock = BlockPointer.pos(instr.readLong());
+ entries.add(entry);
+ }
+ tailPos = BlockPointer.pos(instr.readLong());
+ }
+
+ @Override
+ public void write(DataOutputStream outstr) throws IOException {
+ outstr.writeInt(entries.size());
+ for (IndexEntry entry : entries) {
+ outstr.writeLong(entry.hashCode);
+ outstr.writeLong(entry.dataBlock.getPos());
+ outstr.writeLong(entry.childIndexBlock.getPos());
+ }
+ outstr.writeLong(tailPos.getPos());
+ }
+
+ public void put(long hashCode, BlockPointer pos) throws Exception {
+ int index = Collections.binarySearch(entries, new IndexEntry(hashCode));
+ IndexEntry entry;
+ if (index >= 0) {
+ entry = entries.get(index);
+ } else {
+ assert tailPos.isNull();
+ entry = new IndexEntry();
+ entry.hashCode = hashCode;
+ entry.childIndexBlock = BlockPointer.start();
+ index = -index - 1;
+ entries.add(index, entry);
+ }
+
+ entry.dataBlock = pos;
+ store.write(this);
+
+ maybeSplit();
+ }
+
+ private void maybeSplit() throws Exception {
+ if (entries.size() > maxChildIndexEntries) {
+ int splitPos = entries.size() / 2;
+ IndexEntry splitEntry = entries.remove(splitPos);
+ if (parent == null) {
+ parent = root.newRoot();
+ }
+ IndexBlock sibling = new IndexBlock();
+ store.write(sibling);
+ List siblingEntries = entries.subList(splitPos, entries.size());
+ sibling.entries.addAll(siblingEntries);
+ siblingEntries.clear();
+ sibling.tailPos = tailPos;
+ tailPos = splitEntry.childIndexBlock;
+ splitEntry.childIndexBlock = BlockPointer.start();
+ parent.add(this, splitEntry, sibling);
+ }
+ }
+
+ private void add(IndexBlock left, IndexEntry entry, IndexBlock right) throws Exception {
+ int index = left.parentEntryIndex;
+ if (index < entries.size()) {
+ IndexEntry parentEntry = entries.get(index);
+ assert parentEntry.childIndexBlock.equals(left.getPos());
+ parentEntry.childIndexBlock = right.getPos();
+ } else {
+ assert index == entries.size() && (tailPos.isNull() || tailPos.equals(left.getPos()));
+ tailPos = right.getPos();
+ }
+ entries.add(index, entry);
+ entry.childIndexBlock = left.getPos();
+ store.write(this);
+
+ maybeSplit();
+ }
+
+ public DataBlock get(K key) throws Exception {
+ Lookup lookup = find(key);
+ if (lookup.entry == null) {
+ return null;
+ }
+
+ return store.read(lookup.entry.dataBlock, DataBlock.class);
+ }
+
+ public Lookup find(K key) throws Exception {
+ long checksum = keyHasher.getHashCode(key);
+ return find(checksum);
+ }
+
+ private Lookup find(long hashCode) throws Exception {
+ int index = Collections.binarySearch(entries, new IndexEntry(hashCode));
+ if (index >= 0) {
+ return new Lookup(this, entries.get(index));
+ }
+
+ index = -index - 1;
+ BlockPointer childBlockPos;
+ if (index == entries.size()) {
+ childBlockPos = tailPos;
+ } else {
+ childBlockPos = entries.get(index).childIndexBlock;
+ }
+ if (childBlockPos.isNull()) {
+ return new Lookup(this, null);
+ }
+
+ IndexBlock childBlock = load(childBlockPos, root, this, index);
+ return childBlock.find(hashCode);
+ }
+
+ public void remove(IndexEntry entry) throws Exception {
+ int index = entries.indexOf(entry);
+ assert index >= 0;
+ entries.remove(index);
+ store.write(this);
+
+ if (entry.childIndexBlock.isNull()) {
+ maybeMerge();
+ } else {
+ // Not a leaf node. Move up an entry from a leaf node, then possibly merge the leaf node
+ IndexBlock leafBlock = load(entry.childIndexBlock, root, this, index);
+ leafBlock = leafBlock.findHighestLeaf();
+ IndexEntry highestEntry = leafBlock.entries.remove(leafBlock.entries.size() - 1);
+ highestEntry.childIndexBlock = entry.childIndexBlock;
+ entries.add(index, highestEntry);
+ store.write(leafBlock);
+ leafBlock.maybeMerge();
+ }
+ }
+
+ private void maybeMerge() throws Exception {
+ if (parent == null) {
+ // This is the root block. Can have any number of children <= maxChildIndexEntries
+ if (entries.size() == 0 && !tailPos.isNull()) {
+ // This is an empty root block, discard it
+ header.index.setRootPos(tailPos);
+ store.remove(this);
+ }
+ return;
+ }
+
+ // This is not the root block. Must have children >= minIndexChildNodes
+ if (entries.size() >= minIndexChildNodes) {
+ return;
+ }
+
+ // Attempt to merge with the left sibling
+ IndexBlock left = parent.getPrevious(this);
+ if (left != null) {
+ assert entries.size() + left.entries.size() <= maxChildIndexEntries * 2;
+ if (left.entries.size() > minIndexChildNodes) {
+ // There are enough entries in this block and the left sibling to make up 2 blocks, so redistribute
+ // the entries evenly between them
+ left.mergeFrom(this);
+ left.maybeSplit();
+ return;
+ } else {
+ // There are only enough entries to make up 1 block, so move the entries of the left sibling into
+ // this block and discard the left sibling. Might also need to merge the parent
+ left.mergeFrom(this);
+ parent.maybeMerge();
+ return;
+ }
+ }
+
+ // Attempt to merge with the right sibling
+ IndexBlock right = parent.getNext(this);
+ if (right != null) {
+ assert entries.size() + right.entries.size() <= maxChildIndexEntries * 2;
+ if (right.entries.size() > minIndexChildNodes) {
+ // There are enough entries in this block and the right sibling to make up 2 blocks, so redistribute
+ // the entries evenly between them
+ mergeFrom(right);
+ maybeSplit();
+ return;
+ } else {
+ // There are only enough entries to make up 1 block, so move the entries of the right sibling into
+ // this block and discard this block. Might also need to merge the parent
+ mergeFrom(right);
+ parent.maybeMerge();
+ return;
+ }
+ }
+
+ // Should not happen
+ throw new IllegalStateException(String.format("%s does not have any siblings.", getBlock()));
+ }
+
+ private void mergeFrom(IndexBlock right) throws Exception {
+ IndexEntry newChildEntry = parent.entries.remove(parentEntryIndex);
+ if (right.getPos().equals(parent.tailPos)) {
+ parent.tailPos = getPos();
+ } else {
+ IndexEntry newParentEntry = parent.entries.get(parentEntryIndex);
+ assert newParentEntry.childIndexBlock.equals(right.getPos());
+ newParentEntry.childIndexBlock = getPos();
+ }
+ entries.add(newChildEntry);
+ entries.addAll(right.entries);
+ newChildEntry.childIndexBlock = tailPos;
+ tailPos = right.tailPos;
+ store.write(parent);
+ store.write(this);
+ store.remove(right);
+ }
+
+ private IndexBlock getNext(IndexBlock indexBlock) throws Exception {
+ int index = indexBlock.parentEntryIndex + 1;
+ if (index > entries.size()) {
+ return null;
+ }
+ if (index == entries.size()) {
+ return load(tailPos, root, this, index);
+ }
+ return load(entries.get(index).childIndexBlock, root, this, index);
+ }
+
+ private IndexBlock getPrevious(IndexBlock indexBlock) throws Exception {
+ int index = indexBlock.parentEntryIndex - 1;
+ if (index < 0) {
+ return null;
+ }
+ return load(entries.get(index).childIndexBlock, root, this, index);
+ }
+
+ private IndexBlock findHighestLeaf() throws Exception {
+ if (tailPos.isNull()) {
+ return this;
+ }
+ return load(tailPos, root, this, entries.size()).findHighestLeaf();
+ }
+ }
+
+ private static class IndexEntry implements Comparable {
+ long hashCode;
+ BlockPointer dataBlock;
+ BlockPointer childIndexBlock;
+
+ private IndexEntry() {
+ }
+
+ private IndexEntry(long hashCode) {
+ this.hashCode = hashCode;
+ }
+
+ @Override
+ public int compareTo(IndexEntry indexEntry) {
+ if (hashCode > indexEntry.hashCode) {
+ return 1;
+ }
+ if (hashCode < indexEntry.hashCode) {
+ return -1;
+ }
+ return 0;
+ }
+ }
+
+ private class Lookup {
+ final IndexBlock indexBlock;
+ final IndexEntry entry;
+
+ private Lookup(IndexBlock indexBlock, IndexEntry entry) {
+ this.indexBlock = indexBlock;
+ this.entry = entry;
+ }
+ }
+
+ private class DataBlock extends BlockPayload {
+ private int size;
+ private StreamByteBuffer buffer;
+ private V value;
+
+ private DataBlock() {
+ }
+
+ public DataBlock(V value) throws Exception {
+ this.value = value;
+ setValue(value);
+ size = buffer.totalBytesUnread();
+ }
+
+ public DataBlock(V value, StreamByteBuffer buffer) throws Exception {
+ this.value = value;
+ this.buffer = buffer;
+ size = buffer.totalBytesUnread();
+ }
+
+ public void setValue(V value) throws Exception {
+ buffer = StreamByteBuffer.createWithChunkSizeInDefaultRange(size);
+ KryoBackedEncoder encoder = new KryoBackedEncoder(buffer.getOutputStream());
+ serializer.write(encoder, value);
+ encoder.flush();
+ }
+
+ public V getValue() throws Exception {
+ if (value == null) {
+ value = serializer.read(new KryoBackedDecoder(buffer.getInputStream()));
+ buffer = null;
+ }
+ return value;
+ }
+
+ @Override
+ protected byte getType() {
+ return 0x33;
+ }
+
+ @Override
+ protected int getSize() {
+ return 2 * Block.INT_SIZE + size;
+ }
+
+ @Override
+ public void read(DataInputStream instr) throws Exception {
+ size = instr.readInt();
+ int bytes = instr.readInt();
+ buffer = StreamByteBuffer.of(instr, bytes);
+ }
+
+ @Override
+ public void write(DataOutputStream outstr) throws Exception {
+ outstr.writeInt(size);
+ outstr.writeInt(buffer.totalBytesUnread());
+ buffer.writeTo(outstr);
+ buffer = null;
+ }
+
+ public DataBlockUpdateResult useNewValue(V value) throws Exception {
+ setValue(value);
+ boolean ok = buffer.totalBytesUnread() <= size;
+ if (ok) {
+ this.value = value;
+ store.write(this);
+ return DataBlockUpdateResult.success();
+ } else {
+ return DataBlockUpdateResult.failed(buffer);
+ }
+ }
+ }
+
+ private static class DataBlockUpdateResult {
+ private static final DataBlockUpdateResult SUCCESS = new DataBlockUpdateResult(true, null);
+ private final boolean success;
+ private final StreamByteBuffer serializedValue;
+
+ private DataBlockUpdateResult(boolean success, StreamByteBuffer serializedValue) {
+ this.success = success;
+ this.serializedValue = serializedValue;
+ }
+
+ static DataBlockUpdateResult success() {
+ return SUCCESS;
+ }
+
+ static DataBlockUpdateResult failed(StreamByteBuffer serializedValue) {
+ return new DataBlockUpdateResult(false, serializedValue);
+ }
+
+ public boolean isFailed() {
+ return !success;
+ }
+
+ public StreamByteBuffer getSerializedValue() {
+ return serializedValue;
+ }
+ }
+}
diff --git a/test/random_access/src/main/java/seaweedfs/client/btree/Block.java b/test/random_access/src/main/java/seaweedfs/client/btree/Block.java
new file mode 100644
index 000000000..f3ecb2421
--- /dev/null
+++ b/test/random_access/src/main/java/seaweedfs/client/btree/Block.java
@@ -0,0 +1,59 @@
+/*
+ * Copyright 2009 the original author or authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package seaweedfs.client.btree;
+
+public abstract class Block {
+ static final int LONG_SIZE = 8;
+ static final int INT_SIZE = 4;
+ static final int SHORT_SIZE = 2;
+
+ private BlockPayload payload;
+
+ protected Block(BlockPayload payload) {
+ this.payload = payload;
+ payload.setBlock(this);
+ }
+
+ public BlockPayload getPayload() {
+ return payload;
+ }
+
+ protected void detach() {
+ payload.setBlock(null);
+ payload = null;
+ }
+
+ public abstract BlockPointer getPos();
+
+ public abstract int getSize();
+
+ public abstract RuntimeException blockCorruptedException();
+
+ @Override
+ public String toString() {
+ return payload.getClass().getSimpleName() + " " + getPos();
+ }
+
+ public BlockPointer getNextPos() {
+ return BlockPointer.pos(getPos().getPos() + getSize());
+ }
+
+ public abstract boolean hasPos();
+
+ public abstract void setPos(BlockPointer pos);
+
+ public abstract void setSize(int size);
+}
diff --git a/test/random_access/src/main/java/seaweedfs/client/btree/BlockPayload.java b/test/random_access/src/main/java/seaweedfs/client/btree/BlockPayload.java
new file mode 100644
index 000000000..d14af26c7
--- /dev/null
+++ b/test/random_access/src/main/java/seaweedfs/client/btree/BlockPayload.java
@@ -0,0 +1,51 @@
+/*
+ * Copyright 2009 the original author or authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package seaweedfs.client.btree;
+
+import java.io.DataInputStream;
+import java.io.DataOutputStream;
+
+public abstract class BlockPayload {
+ private Block block;
+
+ public Block getBlock() {
+ return block;
+ }
+
+ public void setBlock(Block block) {
+ this.block = block;
+ }
+
+ public BlockPointer getPos() {
+ return getBlock().getPos();
+ }
+
+ public BlockPointer getNextPos() {
+ return getBlock().getNextPos();
+ }
+
+ protected abstract int getSize();
+
+ protected abstract byte getType();
+
+ protected abstract void read(DataInputStream inputStream) throws Exception;
+
+ protected abstract void write(DataOutputStream outputStream) throws Exception;
+
+ protected RuntimeException blockCorruptedException() {
+ return getBlock().blockCorruptedException();
+ }
+}
diff --git a/test/random_access/src/main/java/seaweedfs/client/btree/BlockPointer.java b/test/random_access/src/main/java/seaweedfs/client/btree/BlockPointer.java
new file mode 100644
index 000000000..38bff7d97
--- /dev/null
+++ b/test/random_access/src/main/java/seaweedfs/client/btree/BlockPointer.java
@@ -0,0 +1,75 @@
+/*
+ * Copyright 2009 the original author or authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package seaweedfs.client.btree;
+
+import com.google.common.primitives.Longs;
+
+public class BlockPointer implements Comparable {
+
+ private static final BlockPointer NULL = new BlockPointer(-1);
+
+ public static BlockPointer start() {
+ return NULL;
+ }
+
+ public static BlockPointer pos(long pos) {
+ if (pos < -1) {
+ throw new CorruptedCacheException("block pointer must be >= -1, but was" + pos);
+ }
+ if (pos == -1) {
+ return NULL;
+ }
+ return new BlockPointer(pos);
+ }
+
+ private final long pos;
+
+ private BlockPointer(long pos) {
+ this.pos = pos;
+ }
+
+ public boolean isNull() {
+ return pos < 0;
+ }
+
+ public long getPos() {
+ return pos;
+ }
+
+ @Override
+ public String toString() {
+ return String.valueOf(pos);
+ }
+
+ @Override
+ public boolean equals(Object obj) {
+ if (obj == null || obj.getClass() != getClass()) {
+ return false;
+ }
+ BlockPointer other = (BlockPointer) obj;
+ return pos == other.pos;
+ }
+
+ @Override
+ public int hashCode() {
+ return Longs.hashCode(pos);
+ }
+
+ @Override
+ public int compareTo(BlockPointer o) {
+ return Longs.compare(pos, o.pos);
+ }
+}
diff --git a/test/random_access/src/main/java/seaweedfs/client/btree/BlockStore.java b/test/random_access/src/main/java/seaweedfs/client/btree/BlockStore.java
new file mode 100644
index 000000000..141eb70fe
--- /dev/null
+++ b/test/random_access/src/main/java/seaweedfs/client/btree/BlockStore.java
@@ -0,0 +1,68 @@
+/*
+ * Copyright 2009 the original author or authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package seaweedfs.client.btree;
+
+public interface BlockStore {
+ /**
+ * Opens this store, calling the given action if the store is empty.
+ */
+ void open(Runnable initAction, Factory factory);
+
+ /**
+ * Closes this store.
+ */
+ void close();
+
+ /**
+ * Discards all blocks from this store.
+ */
+ void clear();
+
+ /**
+ * Removes the given block from this store.
+ */
+ void remove(BlockPayload block);
+
+ /**
+ * Reads the first block from this store.
+ */
+ T readFirst(Class payloadType);
+
+ /**
+ * Reads a block from this store.
+ */
+ T read(BlockPointer pos, Class payloadType);
+
+ /**
+ * Writes a block to this store, adding the block if required.
+ */
+ void write(BlockPayload block);
+
+ /**
+ * Adds a new block to this store. Allocates space for the block, but does not write the contents of the block
+ * until {@link #write(BlockPayload)} is called.
+ */
+ void attach(BlockPayload block);
+
+ /**
+ * Flushes any pending updates for this store.
+ */
+ void flush();
+
+ interface Factory {
+ Object create(Class extends BlockPayload> type);
+ }
+}
diff --git a/test/random_access/src/main/java/seaweedfs/client/btree/BufferCaster.java b/test/random_access/src/main/java/seaweedfs/client/btree/BufferCaster.java
new file mode 100644
index 000000000..a43160211
--- /dev/null
+++ b/test/random_access/src/main/java/seaweedfs/client/btree/BufferCaster.java
@@ -0,0 +1,30 @@
+/*
+ * Copyright 2018 the original author or authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package seaweedfs.client.btree;
+
+import java.nio.Buffer;
+
+public class BufferCaster {
+ /**
+ * Without this cast, when the code compiled by Java 9+ is executed on Java 8, it will throw
+ * java.lang.NoSuchMethodError: Method flip()Ljava/nio/ByteBuffer; does not exist in class java.nio.ByteBuffer
+ */
+ @SuppressWarnings("RedundantCast")
+ public static Buffer cast(T byteBuffer) {
+ return (Buffer) byteBuffer;
+ }
+}
diff --git a/test/random_access/src/main/java/seaweedfs/client/btree/ByteInput.java b/test/random_access/src/main/java/seaweedfs/client/btree/ByteInput.java
new file mode 100644
index 000000000..2030a8cde
--- /dev/null
+++ b/test/random_access/src/main/java/seaweedfs/client/btree/ByteInput.java
@@ -0,0 +1,74 @@
+/*
+ * Copyright 2014 the original author or authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package seaweedfs.client.btree;
+
+import com.google.common.io.CountingInputStream;
+
+import java.io.BufferedInputStream;
+import java.io.DataInputStream;
+import java.io.IOException;
+import java.io.InputStream;
+import java.io.RandomAccessFile;
+
+/**
+ * Allows a stream of bytes to be read from a particular location of some backing byte stream.
+ */
+class ByteInput {
+ private final RandomAccessFile file;
+ private final ResettableBufferedInputStream bufferedInputStream;
+ private CountingInputStream countingInputStream;
+
+ public ByteInput(RandomAccessFile file) {
+ this.file = file;
+ bufferedInputStream = new ResettableBufferedInputStream(new RandomAccessFileInputStream(file));
+ }
+
+ /**
+ * Starts reading from the given offset.
+ */
+ public DataInputStream start(long offset) throws IOException {
+ file.seek(offset);
+ bufferedInputStream.clear();
+ countingInputStream = new CountingInputStream(bufferedInputStream);
+ return new DataInputStream(countingInputStream);
+ }
+
+ /**
+ * Returns the number of bytes read since {@link #start(long)} was called.
+ */
+ public long getBytesRead() {
+ return countingInputStream.getCount();
+ }
+
+ /**
+ * Finishes reading, resetting any buffered state.
+ */
+ public void done() {
+ countingInputStream = null;
+ }
+
+ private static class ResettableBufferedInputStream extends BufferedInputStream {
+ ResettableBufferedInputStream(InputStream input) {
+ super(input);
+ }
+
+ void clear() {
+ count = 0;
+ pos = 0;
+ }
+ }
+}
diff --git a/test/random_access/src/main/java/seaweedfs/client/btree/ByteOutput.java b/test/random_access/src/main/java/seaweedfs/client/btree/ByteOutput.java
new file mode 100644
index 000000000..dfb24cfd0
--- /dev/null
+++ b/test/random_access/src/main/java/seaweedfs/client/btree/ByteOutput.java
@@ -0,0 +1,74 @@
+/*
+ * Copyright 2014 the original author or authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package seaweedfs.client.btree;
+
+import com.google.common.io.CountingOutputStream;
+
+import java.io.BufferedOutputStream;
+import java.io.DataOutputStream;
+import java.io.IOException;
+import java.io.OutputStream;
+import java.io.RandomAccessFile;
+
+/**
+ * Allows a stream of bytes to be written to a particular location of some backing byte stream.
+ */
+class ByteOutput {
+ private final RandomAccessFile file;
+ private final ResettableBufferedOutputStream bufferedOutputStream;
+ private CountingOutputStream countingOutputStream;
+
+ public ByteOutput(RandomAccessFile file) {
+ this.file = file;
+ bufferedOutputStream = new ResettableBufferedOutputStream(new RandomAccessFileOutputStream(file));
+ }
+
+ /**
+ * Starts writing to the given offset. Can be beyond the current length of the file.
+ */
+ public DataOutputStream start(long offset) throws IOException {
+ file.seek(offset);
+ bufferedOutputStream.clear();
+ countingOutputStream = new CountingOutputStream(bufferedOutputStream);
+ return new DataOutputStream(countingOutputStream);
+ }
+
+ /**
+ * Returns the number of byte written since {@link #start(long)} was called.
+ */
+ public long getBytesWritten() {
+ return countingOutputStream.getCount();
+ }
+
+ /**
+ * Finishes writing, flushing and resetting any buffered state
+ */
+ public void done() throws IOException {
+ countingOutputStream.flush();
+ countingOutputStream = null;
+ }
+
+ private static class ResettableBufferedOutputStream extends BufferedOutputStream {
+ ResettableBufferedOutputStream(OutputStream output) {
+ super(output);
+ }
+
+ void clear() {
+ count = 0;
+ }
+ }
+}
diff --git a/test/random_access/src/main/java/seaweedfs/client/btree/CachingBlockStore.java b/test/random_access/src/main/java/seaweedfs/client/btree/CachingBlockStore.java
new file mode 100644
index 000000000..308838b1d
--- /dev/null
+++ b/test/random_access/src/main/java/seaweedfs/client/btree/CachingBlockStore.java
@@ -0,0 +1,129 @@
+/*
+ * Copyright 2009 the original author or authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package seaweedfs.client.btree;
+
+import com.google.common.cache.Cache;
+import com.google.common.cache.CacheBuilder;
+import com.google.common.collect.ImmutableSet;
+
+import javax.annotation.Nullable;
+import java.util.Collection;
+import java.util.Iterator;
+import java.util.LinkedHashMap;
+import java.util.Map;
+
+public class CachingBlockStore implements BlockStore {
+ private final BlockStore store;
+ private final Map dirty = new LinkedHashMap();
+ private final Cache indexBlockCache = CacheBuilder.newBuilder().maximumSize(100).concurrencyLevel(1).build();
+ private final ImmutableSet> cacheableBlockTypes;
+
+ public CachingBlockStore(BlockStore store, Collection> cacheableBlockTypes) {
+ this.store = store;
+ this.cacheableBlockTypes = ImmutableSet.copyOf(cacheableBlockTypes);
+ }
+
+ @Override
+ public void open(Runnable initAction, Factory factory) {
+ store.open(initAction, factory);
+ }
+
+ @Override
+ public void close() {
+ flush();
+ indexBlockCache.invalidateAll();
+ store.close();
+ }
+
+ @Override
+ public void clear() {
+ dirty.clear();
+ indexBlockCache.invalidateAll();
+ store.clear();
+ }
+
+ @Override
+ public void flush() {
+ Iterator iterator = dirty.values().iterator();
+ while (iterator.hasNext()) {
+ BlockPayload block = iterator.next();
+ iterator.remove();
+ store.write(block);
+ }
+ store.flush();
+ }
+
+ @Override
+ public void attach(BlockPayload block) {
+ store.attach(block);
+ }
+
+ @Override
+ public void remove(BlockPayload block) {
+ dirty.remove(block.getPos());
+ if (isCacheable(block)) {
+ indexBlockCache.invalidate(block.getPos());
+ }
+ store.remove(block);
+ }
+
+ @Override
+ public T readFirst(Class payloadType) {
+ T block = store.readFirst(payloadType);
+ maybeCache(block);
+ return block;
+ }
+
+ @Override
+ public T read(BlockPointer pos, Class payloadType) {
+ T block = payloadType.cast(dirty.get(pos));
+ if (block != null) {
+ return block;
+ }
+ block = maybeGetFromCache(pos, payloadType);
+ if (block != null) {
+ return block;
+ }
+ block = store.read(pos, payloadType);
+ maybeCache(block);
+ return block;
+ }
+
+ @Nullable
+ private T maybeGetFromCache(BlockPointer pos, Class payloadType) {
+ if (cacheableBlockTypes.contains(payloadType)) {
+ return payloadType.cast(indexBlockCache.getIfPresent(pos));
+ }
+ return null;
+ }
+
+ @Override
+ public void write(BlockPayload block) {
+ store.attach(block);
+ maybeCache(block);
+ dirty.put(block.getPos(), block);
+ }
+
+ private void maybeCache(T block) {
+ if (isCacheable(block)) {
+ indexBlockCache.put(block.getPos(), block);
+ }
+ }
+
+ private boolean isCacheable(T block) {
+ return cacheableBlockTypes.contains(block.getClass());
+ }
+}
diff --git a/test/random_access/src/main/java/seaweedfs/client/btree/CorruptedCacheException.java b/test/random_access/src/main/java/seaweedfs/client/btree/CorruptedCacheException.java
new file mode 100644
index 000000000..8f9ac1240
--- /dev/null
+++ b/test/random_access/src/main/java/seaweedfs/client/btree/CorruptedCacheException.java
@@ -0,0 +1,22 @@
+/*
+ * Copyright 2009 the original author or authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package seaweedfs.client.btree;
+
+class CorruptedCacheException extends RuntimeException {
+ CorruptedCacheException(String message) {
+ super(message);
+ }
+}
diff --git a/test/random_access/src/main/java/seaweedfs/client/btree/FileBackedBlockStore.java b/test/random_access/src/main/java/seaweedfs/client/btree/FileBackedBlockStore.java
new file mode 100644
index 000000000..556db3647
--- /dev/null
+++ b/test/random_access/src/main/java/seaweedfs/client/btree/FileBackedBlockStore.java
@@ -0,0 +1,274 @@
+/*
+ * Copyright 2009 the original author or authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package seaweedfs.client.btree;
+
+import java.io.DataInputStream;
+import java.io.DataOutputStream;
+import java.io.File;
+import java.io.FileNotFoundException;
+import java.io.IOException;
+import java.io.RandomAccessFile;
+
+public class FileBackedBlockStore implements BlockStore {
+ private final File cacheFile;
+ private RandomAccessFile file;
+ private ByteOutput output;
+ private ByteInput input;
+ private long nextBlock;
+ private Factory factory;
+ private long currentFileSize;
+
+ public FileBackedBlockStore(File cacheFile) {
+ this.cacheFile = cacheFile;
+ }
+
+ @Override
+ public String toString() {
+ return "cache '" + cacheFile + "'";
+ }
+
+ @Override
+ public void open(Runnable runnable, Factory factory) {
+ this.factory = factory;
+ try {
+ cacheFile.getParentFile().mkdirs();
+ file = openRandomAccessFile();
+ output = new ByteOutput(file);
+ input = new ByteInput(file);
+ currentFileSize = file.length();
+ nextBlock = currentFileSize;
+ if (currentFileSize == 0) {
+ runnable.run();
+ }
+ } catch (IOException e) {
+ throw new UncheckedIOException(e);
+ }
+ }
+
+ private RandomAccessFile openRandomAccessFile() throws FileNotFoundException {
+ try {
+ return randomAccessFile("rw");
+ } catch (FileNotFoundException e) {
+ return randomAccessFile("r");
+ }
+ }
+
+ private RandomAccessFile randomAccessFile(String mode) throws FileNotFoundException {
+ return new RandomAccessFile(cacheFile, mode);
+ }
+
+ @Override
+ public void close() {
+ try {
+ file.close();
+ } catch (IOException e) {
+ throw new UncheckedIOException(e);
+ }
+ }
+
+ @Override
+ public void clear() {
+ try {
+ file.setLength(0);
+ currentFileSize = 0;
+ } catch (IOException e) {
+ throw new UncheckedIOException(e);
+ }
+ nextBlock = 0;
+ }
+
+ @Override
+ public void attach(BlockPayload block) {
+ if (block.getBlock() == null) {
+ block.setBlock(new BlockImpl(block));
+ }
+ }
+
+ @Override
+ public void remove(BlockPayload block) {
+ BlockImpl blockImpl = (BlockImpl) block.getBlock();
+ blockImpl.detach();
+ }
+
+ @Override
+ public void flush() {
+ }
+
+ @Override
+ public T readFirst(Class payloadType) {
+ return read(BlockPointer.pos(0), payloadType);
+ }
+
+ @Override
+ public T read(BlockPointer pos, Class payloadType) {
+ assert !pos.isNull();
+ try {
+ T payload = payloadType.cast(factory.create(payloadType));
+ BlockImpl block = new BlockImpl(payload, pos);
+ block.read();
+ return payload;
+ } catch (CorruptedCacheException e) {
+ throw e;
+ } catch (Exception e) {
+ throw new UncheckedIOException(e);
+ }
+ }
+
+ @Override
+ public void write(BlockPayload block) {
+ BlockImpl blockImpl = (BlockImpl) block.getBlock();
+ try {
+ blockImpl.write();
+ } catch (CorruptedCacheException e) {
+ throw e;
+ } catch (Exception e) {
+ throw new UncheckedIOException(e);
+ }
+ }
+
+ private long alloc(long length) {
+ long pos = nextBlock;
+ nextBlock += length;
+ return pos;
+ }
+
+ private final class BlockImpl extends Block {
+ private static final int HEADER_SIZE = 1 + INT_SIZE; // type, payload size
+ private static final int TAIL_SIZE = INT_SIZE;
+
+ private BlockPointer pos;
+ private int payloadSize;
+
+ private BlockImpl(BlockPayload payload, BlockPointer pos) {
+ this(payload);
+ setPos(pos);
+ }
+
+ public BlockImpl(BlockPayload payload) {
+ super(payload);
+ pos = null;
+ payloadSize = -1;
+ }
+
+ @Override
+ public boolean hasPos() {
+ return pos != null;
+ }
+
+ @Override
+ public BlockPointer getPos() {
+ if (pos == null) {
+ pos = BlockPointer.pos(alloc(getSize()));
+ }
+ return pos;
+ }
+
+ @Override
+ public void setPos(BlockPointer pos) {
+ assert this.pos == null && !pos.isNull();
+ this.pos = pos;
+ }
+
+ @Override
+ public int getSize() {
+ if (payloadSize < 0) {
+ payloadSize = getPayload().getSize();
+ }
+ return payloadSize + HEADER_SIZE + TAIL_SIZE;
+ }
+
+ @Override
+ public void setSize(int size) {
+ int newPayloadSize = size - HEADER_SIZE - TAIL_SIZE;
+ assert newPayloadSize >= payloadSize;
+ payloadSize = newPayloadSize;
+ }
+
+ public void write() throws Exception {
+ long pos = getPos().getPos();
+
+ DataOutputStream outputStream = output.start(pos);
+
+ BlockPayload payload = getPayload();
+
+ // Write header
+ outputStream.writeByte(payload.getType());
+ outputStream.writeInt(payloadSize);
+ long finalSize = pos + HEADER_SIZE + TAIL_SIZE + payloadSize;
+
+ // Write body
+ payload.write(outputStream);
+
+ // Write count
+ long bytesWritten = output.getBytesWritten();
+ if (bytesWritten > Integer.MAX_VALUE) {
+ throw new IllegalArgumentException("Block payload exceeds maximum size");
+ }
+ outputStream.writeInt((int) bytesWritten);
+ output.done();
+
+ // System.out.println(String.format("wrote [%d,%d)", pos, pos + bytesWritten + 4));
+
+ // Pad
+ if (currentFileSize < finalSize) {
+ // System.out.println(String.format("pad length %d => %d", currentFileSize, finalSize));
+ file.setLength(finalSize);
+ currentFileSize = finalSize;
+ }
+ }
+
+ public void read() throws Exception {
+ long pos = getPos().getPos();
+ assert pos >= 0;
+ if (pos + HEADER_SIZE >= currentFileSize) {
+ throw blockCorruptedException();
+ }
+
+ DataInputStream inputStream = input.start(pos);
+
+ BlockPayload payload = getPayload();
+
+ // Read header
+ byte type = inputStream.readByte();
+ if (type != payload.getType()) {
+ throw blockCorruptedException();
+ }
+
+ // Read body
+ payloadSize = inputStream.readInt();
+ if (pos + HEADER_SIZE + TAIL_SIZE + payloadSize > currentFileSize) {
+ throw blockCorruptedException();
+ }
+ payload.read(inputStream);
+
+ // Read and verify count
+ long actualCount = input.getBytesRead();
+ long count = inputStream.readInt();
+ if (actualCount != count) {
+ System.out.println(String.format("read expected %d actual %d, pos %d payloadSize %d currentFileSize %d", count, actualCount, pos, payloadSize, currentFileSize));
+ throw blockCorruptedException();
+ }
+ input.done();
+ }
+
+ @Override
+ public RuntimeException blockCorruptedException() {
+ return new CorruptedCacheException(String.format("Corrupted %s found in %s.", this,
+ FileBackedBlockStore.this));
+ }
+ }
+
+}
diff --git a/test/random_access/src/main/java/seaweedfs/client/btree/FreeListBlockStore.java b/test/random_access/src/main/java/seaweedfs/client/btree/FreeListBlockStore.java
new file mode 100644
index 000000000..c2cd640f9
--- /dev/null
+++ b/test/random_access/src/main/java/seaweedfs/client/btree/FreeListBlockStore.java
@@ -0,0 +1,283 @@
+/*
+ * Copyright 2009 the original author or authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package seaweedfs.client.btree;
+
+import java.io.DataInputStream;
+import java.io.DataOutputStream;
+import java.util.ArrayList;
+import java.util.Collections;
+import java.util.List;
+
+public class FreeListBlockStore implements BlockStore {
+ private final BlockStore store;
+ private final BlockStore freeListStore;
+ private final int maxBlockEntries;
+ private FreeListBlock freeListBlock;
+
+ public FreeListBlockStore(BlockStore store, int maxBlockEntries) {
+ this.store = store;
+ freeListStore = this;
+ this.maxBlockEntries = maxBlockEntries;
+ }
+
+ @Override
+ public void open(final Runnable initAction, final Factory factory) {
+ Runnable freeListInitAction = new Runnable() {
+ @Override
+ public void run() {
+ freeListBlock = new FreeListBlock();
+ store.write(freeListBlock);
+ store.flush();
+ initAction.run();
+ }
+ };
+ Factory freeListFactory = new Factory() {
+ @Override
+ public Object create(Class extends BlockPayload> type) {
+ if (type == FreeListBlock.class) {
+ return new FreeListBlock();
+ }
+ return factory.create(type);
+ }
+ };
+
+ store.open(freeListInitAction, freeListFactory);
+ freeListBlock = store.readFirst(FreeListBlock.class);
+ }
+
+ @Override
+ public void close() {
+ freeListBlock = null;
+ store.close();
+ }
+
+ @Override
+ public void clear() {
+ store.clear();
+ }
+
+ @Override
+ public void remove(BlockPayload block) {
+ Block container = block.getBlock();
+ store.remove(block);
+ freeListBlock.add(container.getPos(), container.getSize());
+ }
+
+ @Override
+ public T readFirst(Class payloadType) {
+ return store.read(freeListBlock.getNextPos(), payloadType);
+ }
+
+ @Override
+ public T read(BlockPointer pos, Class payloadType) {
+ return store.read(pos, payloadType);
+ }
+
+ @Override
+ public void write(BlockPayload block) {
+ attach(block);
+ store.write(block);
+ }
+
+ @Override
+ public void attach(BlockPayload block) {
+ store.attach(block);
+ freeListBlock.alloc(block.getBlock());
+ }
+
+ @Override
+ public void flush() {
+ store.flush();
+ }
+
+ private void verify() {
+ FreeListBlock block = store.readFirst(FreeListBlock.class);
+ verify(block, Integer.MAX_VALUE);
+ }
+
+ private void verify(FreeListBlock block, int maxValue) {
+ if (block.largestInNextBlock > maxValue) {
+ throw new RuntimeException("corrupt free list");
+ }
+ int current = 0;
+ for (FreeListEntry entry : block.entries) {
+ if (entry.size > maxValue) {
+ throw new RuntimeException("corrupt free list");
+ }
+ if (entry.size < block.largestInNextBlock) {
+ throw new RuntimeException("corrupt free list");
+ }
+ if (entry.size < current) {
+ throw new RuntimeException("corrupt free list");
+ }
+ current = entry.size;
+ }
+ if (!block.nextBlock.isNull()) {
+ verify(store.read(block.nextBlock, FreeListBlock.class), block.largestInNextBlock);
+ }
+ }
+
+ public class FreeListBlock extends BlockPayload {
+ private List entries = new ArrayList();
+ private int largestInNextBlock;
+ private BlockPointer nextBlock = BlockPointer.start();
+ // Transient fields
+ private FreeListBlock prev;
+ private FreeListBlock next;
+
+ @Override
+ protected int getSize() {
+ return Block.LONG_SIZE + Block.INT_SIZE + Block.INT_SIZE + maxBlockEntries * (Block.LONG_SIZE
+ + Block.INT_SIZE);
+ }
+
+ @Override
+ protected byte getType() {
+ return 0x44;
+ }
+
+ @Override
+ protected void read(DataInputStream inputStream) throws Exception {
+ nextBlock = BlockPointer.pos(inputStream.readLong());
+ largestInNextBlock = inputStream.readInt();
+ int count = inputStream.readInt();
+ for (int i = 0; i < count; i++) {
+ BlockPointer pos = BlockPointer.pos(inputStream.readLong());
+ int size = inputStream.readInt();
+ entries.add(new FreeListEntry(pos, size));
+ }
+ }
+
+ @Override
+ protected void write(DataOutputStream outputStream) throws Exception {
+ outputStream.writeLong(nextBlock.getPos());
+ outputStream.writeInt(largestInNextBlock);
+ outputStream.writeInt(entries.size());
+ for (FreeListEntry entry : entries) {
+ outputStream.writeLong(entry.pos.getPos());
+ outputStream.writeInt(entry.size);
+ }
+ }
+
+ public void add(BlockPointer pos, int size) {
+ assert !pos.isNull() && size >= 0;
+ if (size == 0) {
+ return;
+ }
+
+ if (size < largestInNextBlock) {
+ FreeListBlock next = getNextBlock();
+ next.add(pos, size);
+ return;
+ }
+
+ FreeListEntry entry = new FreeListEntry(pos, size);
+ int index = Collections.binarySearch(entries, entry);
+ if (index < 0) {
+ index = -index - 1;
+ }
+ entries.add(index, entry);
+
+ if (entries.size() > maxBlockEntries) {
+ FreeListBlock newBlock = new FreeListBlock();
+ newBlock.largestInNextBlock = largestInNextBlock;
+ newBlock.nextBlock = nextBlock;
+ newBlock.prev = this;
+ newBlock.next = next;
+ next = newBlock;
+
+ List newBlockEntries = entries.subList(0, entries.size() / 2);
+ newBlock.entries.addAll(newBlockEntries);
+ newBlockEntries.clear();
+ largestInNextBlock = newBlock.entries.get(newBlock.entries.size() - 1).size;
+ freeListStore.write(newBlock);
+ nextBlock = newBlock.getPos();
+ }
+
+ freeListStore.write(this);
+ }
+
+ private FreeListBlock getNextBlock() {
+ if (next == null) {
+ next = freeListStore.read(nextBlock, FreeListBlock.class);
+ next.prev = this;
+ }
+ return next;
+ }
+
+ public void alloc(Block block) {
+ if (block.hasPos()) {
+ return;
+ }
+
+ int requiredSize = block.getSize();
+
+ if (entries.isEmpty() || requiredSize <= largestInNextBlock) {
+ if (nextBlock.isNull()) {
+ return;
+ }
+ getNextBlock().alloc(block);
+ return;
+ }
+
+ int index = Collections.binarySearch(entries, new FreeListEntry(null, requiredSize));
+ if (index < 0) {
+ index = -index - 1;
+ }
+ if (index == entries.size()) {
+ // Largest free block is too small
+ return;
+ }
+
+ FreeListEntry entry = entries.remove(index);
+ block.setPos(entry.pos);
+ block.setSize(entry.size);
+ freeListStore.write(this);
+
+ if (entries.size() == 0 && prev != null) {
+ prev.nextBlock = nextBlock;
+ prev.largestInNextBlock = largestInNextBlock;
+ prev.next = next;
+ if (next != null) {
+ next.prev = prev;
+ }
+ freeListStore.write(prev);
+ freeListStore.remove(this);
+ }
+ }
+ }
+
+ private static class FreeListEntry implements Comparable {
+ final BlockPointer pos;
+ final int size;
+
+ private FreeListEntry(BlockPointer pos, int size) {
+ this.pos = pos;
+ this.size = size;
+ }
+
+ @Override
+ public int compareTo(FreeListEntry o) {
+ if (size > o.size) {
+ return 1;
+ }
+ if (size < o.size) {
+ return -1;
+ }
+ return 0;
+ }
+ }
+}
diff --git a/test/random_access/src/main/java/seaweedfs/client/btree/KeyHasher.java b/test/random_access/src/main/java/seaweedfs/client/btree/KeyHasher.java
new file mode 100644
index 000000000..bdc78dde2
--- /dev/null
+++ b/test/random_access/src/main/java/seaweedfs/client/btree/KeyHasher.java
@@ -0,0 +1,75 @@
+/*
+ * Copyright 2014 the original author or authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package seaweedfs.client.btree;
+
+import seaweedfs.client.btree.serialize.Serializer;
+import seaweedfs.client.btree.serialize.kryo.KryoBackedEncoder;
+
+import java.io.IOException;
+import java.io.OutputStream;
+import java.math.BigInteger;
+import java.security.MessageDigest;
+import java.security.NoSuchAlgorithmException;
+
+class KeyHasher {
+ private final Serializer serializer;
+ private final MessageDigestStream digestStream = new MessageDigestStream();
+ private final KryoBackedEncoder encoder = new KryoBackedEncoder(digestStream);
+
+ public KeyHasher(Serializer serializer) {
+ this.serializer = serializer;
+ }
+
+ long getHashCode(K key) throws Exception {
+ serializer.write(encoder, key);
+ encoder.flush();
+ return digestStream.getChecksum();
+ }
+
+ private static class MessageDigestStream extends OutputStream {
+ MessageDigest messageDigest;
+
+ private MessageDigestStream() {
+ try {
+ messageDigest = MessageDigest.getInstance("MD5");
+ } catch (NoSuchAlgorithmException e) {
+ throw UncheckedException.throwAsUncheckedException(e);
+ }
+ }
+
+ @Override
+ public void write(int b) throws IOException {
+ messageDigest.update((byte) b);
+ }
+
+ @Override
+ public void write(byte[] b) throws IOException {
+ messageDigest.update(b);
+ }
+
+ @Override
+ public void write(byte[] b, int off, int len) throws IOException {
+ messageDigest.update(b, off, len);
+ }
+
+ long getChecksum() {
+ byte[] digest = messageDigest.digest();
+ assert digest.length == 16;
+ return new BigInteger(digest).longValue();
+ }
+ }
+}
diff --git a/test/random_access/src/main/java/seaweedfs/client/btree/RandomAccessFileInputStream.java b/test/random_access/src/main/java/seaweedfs/client/btree/RandomAccessFileInputStream.java
new file mode 100644
index 000000000..5f876989f
--- /dev/null
+++ b/test/random_access/src/main/java/seaweedfs/client/btree/RandomAccessFileInputStream.java
@@ -0,0 +1,54 @@
+/*
+ * Copyright 2013 the original author or authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package seaweedfs.client.btree;
+
+import java.io.IOException;
+import java.io.InputStream;
+import java.io.RandomAccessFile;
+
+/**
+ * Reads from a {@link RandomAccessFile}. Each operation reads from and advances the current position of the file.
+ *
+ * Closing this stream does not close the underlying file.
+ */
+public class RandomAccessFileInputStream extends InputStream {
+ private final RandomAccessFile file;
+
+ public RandomAccessFileInputStream(RandomAccessFile file) {
+ this.file = file;
+ }
+
+ @Override
+ public long skip(long n) throws IOException {
+ file.seek(file.getFilePointer() + n);
+ return n;
+ }
+
+ @Override
+ public int read(byte[] bytes) throws IOException {
+ return file.read(bytes);
+ }
+
+ @Override
+ public int read() throws IOException {
+ return file.read();
+ }
+
+ @Override
+ public int read(byte[] bytes, int offset, int length) throws IOException {
+ return file.read(bytes, offset, length);
+ }
+}
diff --git a/test/random_access/src/main/java/seaweedfs/client/btree/RandomAccessFileOutputStream.java b/test/random_access/src/main/java/seaweedfs/client/btree/RandomAccessFileOutputStream.java
new file mode 100644
index 000000000..3327fe3c6
--- /dev/null
+++ b/test/random_access/src/main/java/seaweedfs/client/btree/RandomAccessFileOutputStream.java
@@ -0,0 +1,48 @@
+/*
+ * Copyright 2013 the original author or authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package seaweedfs.client.btree;
+
+import java.io.IOException;
+import java.io.OutputStream;
+import java.io.RandomAccessFile;
+
+/**
+ * Writes to a {@link RandomAccessFile}. Each operation writes to and advances the current position of the file.
+ *
+ *
Closing this stream does not close the underlying file. Flushing this stream does nothing.
+ */
+public class RandomAccessFileOutputStream extends OutputStream {
+ private final RandomAccessFile file;
+
+ public RandomAccessFileOutputStream(RandomAccessFile file) {
+ this.file = file;
+ }
+
+ @Override
+ public void write(int i) throws IOException {
+ file.write(i);
+ }
+
+ @Override
+ public void write(byte[] bytes) throws IOException {
+ file.write(bytes);
+ }
+
+ @Override
+ public void write(byte[] bytes, int offset, int length) throws IOException {
+ file.write(bytes, offset, length);
+ }
+}
diff --git a/test/random_access/src/main/java/seaweedfs/client/btree/StateCheckBlockStore.java b/test/random_access/src/main/java/seaweedfs/client/btree/StateCheckBlockStore.java
new file mode 100644
index 000000000..f720ebb2e
--- /dev/null
+++ b/test/random_access/src/main/java/seaweedfs/client/btree/StateCheckBlockStore.java
@@ -0,0 +1,87 @@
+/*
+ * Copyright 2009 the original author or authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package seaweedfs.client.btree;
+
+public class StateCheckBlockStore implements BlockStore {
+ private final BlockStore blockStore;
+ private boolean open;
+
+ public StateCheckBlockStore(BlockStore blockStore) {
+ this.blockStore = blockStore;
+ }
+
+ @Override
+ public void open(Runnable initAction, Factory factory) {
+ assert !open;
+ open = true;
+ blockStore.open(initAction, factory);
+ }
+
+ public boolean isOpen() {
+ return open;
+ }
+
+ @Override
+ public void close() {
+ if (!open) {
+ return;
+ }
+ open = false;
+ blockStore.close();
+ }
+
+ @Override
+ public void clear() {
+ assert open;
+ blockStore.clear();
+ }
+
+ @Override
+ public void remove(BlockPayload block) {
+ assert open;
+ blockStore.remove(block);
+ }
+
+ @Override
+ public T readFirst(Class payloadType) {
+ assert open;
+ return blockStore.readFirst(payloadType);
+ }
+
+ @Override
+ public T read(BlockPointer pos, Class payloadType) {
+ assert open;
+ return blockStore.read(pos, payloadType);
+ }
+
+ @Override
+ public void write(BlockPayload block) {
+ assert open;
+ blockStore.write(block);
+ }
+
+ @Override
+ public void attach(BlockPayload block) {
+ assert open;
+ blockStore.attach(block);
+ }
+
+ @Override
+ public void flush() {
+ assert open;
+ blockStore.flush();
+ }
+}
diff --git a/test/random_access/src/main/java/seaweedfs/client/btree/StreamByteBuffer.java b/test/random_access/src/main/java/seaweedfs/client/btree/StreamByteBuffer.java
new file mode 100644
index 000000000..8af6e14d8
--- /dev/null
+++ b/test/random_access/src/main/java/seaweedfs/client/btree/StreamByteBuffer.java
@@ -0,0 +1,526 @@
+/*
+ * Copyright 2016 the original author or authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package seaweedfs.client.btree;
+
+import java.io.EOFException;
+import java.io.IOException;
+import java.io.InputStream;
+import java.io.OutputStream;
+import java.nio.ByteBuffer;
+import java.nio.CharBuffer;
+import java.nio.charset.CharacterCodingException;
+import java.nio.charset.Charset;
+import java.nio.charset.CharsetDecoder;
+import java.nio.charset.CoderResult;
+import java.nio.charset.CodingErrorAction;
+import java.util.ArrayList;
+import java.util.LinkedList;
+import java.util.List;
+
+
+/**
+ * An in-memory buffer that provides OutputStream and InputStream interfaces.
+ *
+ * This is more efficient than using ByteArrayOutputStream/ByteArrayInputStream
+ *
+ * Reading the buffer will clear the buffer.
+ * This is not thread-safe, it is intended to be used by a single Thread.
+ */
+public class StreamByteBuffer {
+ private static final int DEFAULT_CHUNK_SIZE = 4096;
+ private static final int MAX_CHUNK_SIZE = 1024 * 1024;
+ private LinkedList chunks = new LinkedList();
+ private StreamByteBufferChunk currentWriteChunk;
+ private StreamByteBufferChunk currentReadChunk;
+ private int chunkSize;
+ private int nextChunkSize;
+ private int maxChunkSize;
+ private StreamByteBufferOutputStream output;
+ private StreamByteBufferInputStream input;
+ private int totalBytesUnreadInList;
+
+ public StreamByteBuffer() {
+ this(DEFAULT_CHUNK_SIZE);
+ }
+
+ public StreamByteBuffer(int chunkSize) {
+ this.chunkSize = chunkSize;
+ this.nextChunkSize = chunkSize;
+ this.maxChunkSize = Math.max(chunkSize, MAX_CHUNK_SIZE);
+ currentWriteChunk = new StreamByteBufferChunk(nextChunkSize);
+ output = new StreamByteBufferOutputStream();
+ input = new StreamByteBufferInputStream();
+ }
+
+ public static StreamByteBuffer of(InputStream inputStream) throws IOException {
+ StreamByteBuffer buffer = new StreamByteBuffer(chunkSizeInDefaultRange(inputStream.available()));
+ buffer.readFully(inputStream);
+ return buffer;
+ }
+
+ public static StreamByteBuffer of(InputStream inputStream, int len) throws IOException {
+ StreamByteBuffer buffer = new StreamByteBuffer(chunkSizeInDefaultRange(len));
+ buffer.readFrom(inputStream, len);
+ return buffer;
+ }
+
+ public static StreamByteBuffer createWithChunkSizeInDefaultRange(int value) {
+ return new StreamByteBuffer(chunkSizeInDefaultRange(value));
+ }
+
+ static int chunkSizeInDefaultRange(int value) {
+ return valueInRange(value, DEFAULT_CHUNK_SIZE, MAX_CHUNK_SIZE);
+ }
+
+ private static int valueInRange(int value, int min, int max) {
+ return Math.min(Math.max(value, min), max);
+ }
+
+ public OutputStream getOutputStream() {
+ return output;
+ }
+
+ public InputStream getInputStream() {
+ return input;
+ }
+
+ public void writeTo(OutputStream target) throws IOException {
+ while (prepareRead() != -1) {
+ currentReadChunk.writeTo(target);
+ }
+ }
+
+ public void readFrom(InputStream inputStream, int len) throws IOException {
+ int bytesLeft = len;
+ while (bytesLeft > 0) {
+ int spaceLeft = allocateSpace();
+ int limit = Math.min(spaceLeft, bytesLeft);
+ int readBytes = currentWriteChunk.readFrom(inputStream, limit);
+ if (readBytes == -1) {
+ throw new EOFException("Unexpected EOF");
+ }
+ bytesLeft -= readBytes;
+ }
+ }
+
+ public void readFully(InputStream inputStream) throws IOException {
+ while (true) {
+ int len = allocateSpace();
+ int readBytes = currentWriteChunk.readFrom(inputStream, len);
+ if (readBytes == -1) {
+ break;
+ }
+ }
+ }
+
+ public byte[] readAsByteArray() {
+ byte[] buf = new byte[totalBytesUnread()];
+ input.readImpl(buf, 0, buf.length);
+ return buf;
+ }
+
+ public List readAsListOfByteArrays() {
+ List listOfByteArrays = new ArrayList(chunks.size() + 1);
+ byte[] buf;
+ while ((buf = input.readNextBuffer()) != null) {
+ if (buf.length > 0) {
+ listOfByteArrays.add(buf);
+ }
+ }
+ return listOfByteArrays;
+ }
+
+ public String readAsString(String encoding) {
+ Charset charset = Charset.forName(encoding);
+ return readAsString(charset);
+ }
+
+ public String readAsString() {
+ return readAsString(Charset.defaultCharset());
+ }
+
+ public String readAsString(Charset charset) {
+ try {
+ return doReadAsString(charset);
+ } catch (CharacterCodingException e) {
+ throw new UncheckedIOException(e);
+ }
+ }
+
+ private String doReadAsString(Charset charset) throws CharacterCodingException {
+ int unreadSize = totalBytesUnread();
+ if (unreadSize > 0) {
+ return readAsCharBuffer(charset).toString();
+ }
+ return "";
+ }
+
+ private CharBuffer readAsCharBuffer(Charset charset) throws CharacterCodingException {
+ CharsetDecoder decoder = charset.newDecoder().onMalformedInput(
+ CodingErrorAction.REPLACE).onUnmappableCharacter(
+ CodingErrorAction.REPLACE);
+ CharBuffer charbuffer = CharBuffer.allocate(totalBytesUnread());
+ ByteBuffer buf = null;
+ boolean wasUnderflow = false;
+ ByteBuffer nextBuf = null;
+ boolean needsFlush = false;
+ while (hasRemaining(nextBuf) || hasRemaining(buf) || prepareRead() != -1) {
+ if (hasRemaining(buf)) {
+ // handle decoding underflow, multi-byte unicode character at buffer chunk boundary
+ if (!wasUnderflow) {
+ throw new IllegalStateException("Unexpected state. Buffer has remaining bytes without underflow in decoding.");
+ }
+ if (!hasRemaining(nextBuf) && prepareRead() != -1) {
+ nextBuf = currentReadChunk.readToNioBuffer();
+ }
+ // copy one by one until the underflow has been resolved
+ buf = ByteBuffer.allocate(buf.remaining() + 1).put(buf);
+ buf.put(nextBuf.get());
+ BufferCaster.cast(buf).flip();
+ } else {
+ if (hasRemaining(nextBuf)) {
+ buf = nextBuf;
+ } else if (prepareRead() != -1) {
+ buf = currentReadChunk.readToNioBuffer();
+ if (!hasRemaining(buf)) {
+ throw new IllegalStateException("Unexpected state. Buffer is empty.");
+ }
+ }
+ nextBuf = null;
+ }
+ boolean endOfInput = !hasRemaining(nextBuf) && prepareRead() == -1;
+ int bufRemainingBefore = buf.remaining();
+ CoderResult result = decoder.decode(buf, charbuffer, false);
+ if (bufRemainingBefore > buf.remaining()) {
+ needsFlush = true;
+ }
+ if (endOfInput) {
+ result = decoder.decode(ByteBuffer.allocate(0), charbuffer, true);
+ if (!result.isUnderflow()) {
+ result.throwException();
+ }
+ break;
+ }
+ wasUnderflow = result.isUnderflow();
+ }
+ if (needsFlush) {
+ CoderResult result = decoder.flush(charbuffer);
+ if (!result.isUnderflow()) {
+ result.throwException();
+ }
+ }
+ clear();
+ // push back remaining bytes of multi-byte unicode character
+ while (hasRemaining(buf)) {
+ byte b = buf.get();
+ try {
+ getOutputStream().write(b);
+ } catch (IOException e) {
+ throw new UncheckedIOException(e);
+ }
+ }
+ BufferCaster.cast(charbuffer).flip();
+ return charbuffer;
+ }
+
+ private boolean hasRemaining(ByteBuffer nextBuf) {
+ return nextBuf != null && nextBuf.hasRemaining();
+ }
+
+ public int totalBytesUnread() {
+ int total = totalBytesUnreadInList;
+ if (currentReadChunk != null) {
+ total += currentReadChunk.bytesUnread();
+ }
+ if (currentWriteChunk != currentReadChunk && currentWriteChunk != null) {
+ total += currentWriteChunk.bytesUnread();
+ }
+ return total;
+ }
+
+ protected int allocateSpace() {
+ int spaceLeft = currentWriteChunk.spaceLeft();
+ if (spaceLeft == 0) {
+ addChunk(currentWriteChunk);
+ currentWriteChunk = new StreamByteBufferChunk(nextChunkSize);
+ if (nextChunkSize < maxChunkSize) {
+ nextChunkSize = Math.min(nextChunkSize * 2, maxChunkSize);
+ }
+ spaceLeft = currentWriteChunk.spaceLeft();
+ }
+ return spaceLeft;
+ }
+
+ protected int prepareRead() {
+ int bytesUnread = (currentReadChunk != null) ? currentReadChunk.bytesUnread() : 0;
+ if (bytesUnread == 0) {
+ if (!chunks.isEmpty()) {
+ currentReadChunk = chunks.removeFirst();
+ bytesUnread = currentReadChunk.bytesUnread();
+ totalBytesUnreadInList -= bytesUnread;
+ } else if (currentReadChunk != currentWriteChunk) {
+ currentReadChunk = currentWriteChunk;
+ bytesUnread = currentReadChunk.bytesUnread();
+ } else {
+ bytesUnread = -1;
+ }
+ }
+ return bytesUnread;
+ }
+
+ public static StreamByteBuffer of(List listOfByteArrays) {
+ StreamByteBuffer buffer = new StreamByteBuffer();
+ buffer.addChunks(listOfByteArrays);
+ return buffer;
+ }
+
+ private void addChunks(List listOfByteArrays) {
+ for (byte[] buf : listOfByteArrays) {
+ addChunk(new StreamByteBufferChunk(buf));
+ }
+ }
+
+ private void addChunk(StreamByteBufferChunk chunk) {
+ chunks.add(chunk);
+ totalBytesUnreadInList += chunk.bytesUnread();
+ }
+
+ static class StreamByteBufferChunk {
+ private int pointer;
+ private byte[] buffer;
+ private int size;
+ private int used;
+
+ public StreamByteBufferChunk(int size) {
+ this.size = size;
+ buffer = new byte[size];
+ }
+
+ public StreamByteBufferChunk(byte[] buf) {
+ this.size = buf.length;
+ this.buffer = buf;
+ this.used = buf.length;
+ }
+
+ public ByteBuffer readToNioBuffer() {
+ if (pointer < used) {
+ ByteBuffer result;
+ if (pointer > 0 || used < size) {
+ result = ByteBuffer.wrap(buffer, pointer, used - pointer);
+ } else {
+ result = ByteBuffer.wrap(buffer);
+ }
+ pointer = used;
+ return result;
+ }
+
+ return null;
+ }
+
+ public boolean write(byte b) {
+ if (used < size) {
+ buffer[used++] = b;
+ return true;
+ }
+
+ return false;
+ }
+
+ public void write(byte[] b, int off, int len) {
+ System.arraycopy(b, off, buffer, used, len);
+ used = used + len;
+ }
+
+ public void read(byte[] b, int off, int len) {
+ System.arraycopy(buffer, pointer, b, off, len);
+ pointer = pointer + len;
+ }
+
+ public void writeTo(OutputStream target) throws IOException {
+ if (pointer < used) {
+ target.write(buffer, pointer, used - pointer);
+ pointer = used;
+ }
+ }
+
+ public void reset() {
+ pointer = 0;
+ }
+
+ public int bytesUsed() {
+ return used;
+ }
+
+ public int bytesUnread() {
+ return used - pointer;
+ }
+
+ public int read() {
+ if (pointer < used) {
+ return buffer[pointer++] & 0xff;
+ }
+
+ return -1;
+ }
+
+ public int spaceLeft() {
+ return size - used;
+ }
+
+ public int readFrom(InputStream inputStream, int len) throws IOException {
+ int readBytes = inputStream.read(buffer, used, len);
+ if(readBytes > 0) {
+ used += readBytes;
+ }
+ return readBytes;
+ }
+
+ public void clear() {
+ used = pointer = 0;
+ }
+
+ public byte[] readBuffer() {
+ if (used == buffer.length && pointer == 0) {
+ pointer = used;
+ return buffer;
+ } else if (pointer < used) {
+ byte[] buf = new byte[used - pointer];
+ read(buf, 0, used - pointer);
+ return buf;
+ } else {
+ return new byte[0];
+ }
+ }
+ }
+
+ class StreamByteBufferOutputStream extends OutputStream {
+ private boolean closed;
+
+ @Override
+ public void write(byte[] b, int off, int len) throws IOException {
+ if (b == null) {
+ throw new NullPointerException();
+ }
+
+ if ((off < 0) || (off > b.length) || (len < 0)
+ || ((off + len) > b.length) || ((off + len) < 0)) {
+ throw new IndexOutOfBoundsException();
+ }
+
+ if (len == 0) {
+ return;
+ }
+
+ int bytesLeft = len;
+ int currentOffset = off;
+ while (bytesLeft > 0) {
+ int spaceLeft = allocateSpace();
+ int writeBytes = Math.min(spaceLeft, bytesLeft);
+ currentWriteChunk.write(b, currentOffset, writeBytes);
+ bytesLeft -= writeBytes;
+ currentOffset += writeBytes;
+ }
+ }
+
+ @Override
+ public void close() throws IOException {
+ closed = true;
+ }
+
+ public boolean isClosed() {
+ return closed;
+ }
+
+ @Override
+ public void write(int b) throws IOException {
+ allocateSpace();
+ currentWriteChunk.write((byte) b);
+ }
+
+ public StreamByteBuffer getBuffer() {
+ return StreamByteBuffer.this;
+ }
+ }
+
+ class StreamByteBufferInputStream extends InputStream {
+ @Override
+ public int read() throws IOException {
+ prepareRead();
+ return currentReadChunk.read();
+ }
+
+ @Override
+ public int read(byte[] b, int off, int len) throws IOException {
+ return readImpl(b, off, len);
+ }
+
+ int readImpl(byte[] b, int off, int len) {
+ if (b == null) {
+ throw new NullPointerException();
+ }
+
+ if ((off < 0) || (off > b.length) || (len < 0)
+ || ((off + len) > b.length) || ((off + len) < 0)) {
+ throw new IndexOutOfBoundsException();
+ }
+
+ if (len == 0) {
+ return 0;
+ }
+
+ int bytesLeft = len;
+ int currentOffset = off;
+ int bytesUnread = prepareRead();
+ int totalBytesRead = 0;
+ while (bytesLeft > 0 && bytesUnread != -1) {
+ int readBytes = Math.min(bytesUnread, bytesLeft);
+ currentReadChunk.read(b, currentOffset, readBytes);
+ bytesLeft -= readBytes;
+ currentOffset += readBytes;
+ totalBytesRead += readBytes;
+ bytesUnread = prepareRead();
+ }
+ if (totalBytesRead > 0) {
+ return totalBytesRead;
+ }
+
+ return -1;
+ }
+
+ @Override
+ public int available() throws IOException {
+ return totalBytesUnread();
+ }
+
+ public StreamByteBuffer getBuffer() {
+ return StreamByteBuffer.this;
+ }
+
+ public byte[] readNextBuffer() {
+ if (prepareRead() != -1) {
+ return currentReadChunk.readBuffer();
+ }
+ return null;
+ }
+ }
+
+ public void clear() {
+ chunks.clear();
+ currentReadChunk = null;
+ totalBytesUnreadInList = 0;
+ currentWriteChunk.clear();
+ }
+}
diff --git a/test/random_access/src/main/java/seaweedfs/client/btree/UncheckedException.java b/test/random_access/src/main/java/seaweedfs/client/btree/UncheckedException.java
new file mode 100644
index 000000000..ab57d8c95
--- /dev/null
+++ b/test/random_access/src/main/java/seaweedfs/client/btree/UncheckedException.java
@@ -0,0 +1,88 @@
+/*
+ * Copyright 2010 the original author or authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package seaweedfs.client.btree;
+
+import java.io.IOException;
+import java.lang.reflect.InvocationTargetException;
+import java.util.concurrent.Callable;
+
+/**
+ * Wraps a checked exception. Carries no other context.
+ */
+public final class UncheckedException extends RuntimeException {
+ private UncheckedException(Throwable cause) {
+ super(cause);
+ }
+
+ private UncheckedException(String message, Throwable cause) {
+ super(message, cause);
+ }
+
+ /**
+ * Note: always throws the failure in some form. The return value is to keep the compiler happy.
+ */
+ public static RuntimeException throwAsUncheckedException(Throwable t) {
+ return throwAsUncheckedException(t, false);
+ }
+
+ /**
+ * Note: always throws the failure in some form. The return value is to keep the compiler happy.
+ */
+ public static RuntimeException throwAsUncheckedException(Throwable t, boolean preserveMessage) {
+ if (t instanceof InterruptedException) {
+ Thread.currentThread().interrupt();
+ }
+ if (t instanceof RuntimeException) {
+ throw (RuntimeException) t;
+ }
+ if (t instanceof Error) {
+ throw (Error) t;
+ }
+ if (t instanceof IOException) {
+ if (preserveMessage) {
+ throw new UncheckedIOException(t.getMessage(), t);
+ } else {
+ throw new UncheckedIOException(t);
+ }
+ }
+ if (preserveMessage) {
+ throw new UncheckedException(t.getMessage(), t);
+ } else {
+ throw new UncheckedException(t);
+ }
+ }
+
+ public static T callUnchecked(Callable callable) {
+ try {
+ return callable.call();
+ } catch (Exception e) {
+ throw throwAsUncheckedException(e);
+ }
+ }
+
+ /**
+ * Unwraps passed InvocationTargetException hence making the stack of exceptions cleaner without losing information.
+ *
+ * Note: always throws the failure in some form. The return value is to keep the compiler happy.
+ *
+ * @param e to be unwrapped
+ * @return an instance of RuntimeException based on the target exception of the parameter.
+ */
+ public static RuntimeException unwrapAndRethrow(InvocationTargetException e) {
+ return UncheckedException.throwAsUncheckedException(e.getTargetException());
+ }
+}
diff --git a/test/random_access/src/main/java/seaweedfs/client/btree/UncheckedIOException.java b/test/random_access/src/main/java/seaweedfs/client/btree/UncheckedIOException.java
new file mode 100644
index 000000000..1cf30df7a
--- /dev/null
+++ b/test/random_access/src/main/java/seaweedfs/client/btree/UncheckedIOException.java
@@ -0,0 +1,36 @@
+/*
+ * Copyright 2012 the original author or authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package seaweedfs.client.btree;
+
+/**
+ * UncheckedIOException
is used to wrap an {@link java.io.IOException} into an unchecked exception.
+ */
+public class UncheckedIOException extends RuntimeException {
+ public UncheckedIOException() {
+ }
+
+ public UncheckedIOException(String message) {
+ super(message);
+ }
+
+ public UncheckedIOException(String message, Throwable cause) {
+ super(message, cause);
+ }
+
+ public UncheckedIOException(Throwable cause) {
+ super(cause);
+ }
+}
diff --git a/test/random_access/src/main/java/seaweedfs/client/btree/serialize/AbstractDecoder.java b/test/random_access/src/main/java/seaweedfs/client/btree/serialize/AbstractDecoder.java
new file mode 100644
index 000000000..d805f4654
--- /dev/null
+++ b/test/random_access/src/main/java/seaweedfs/client/btree/serialize/AbstractDecoder.java
@@ -0,0 +1,133 @@
+/*
+ * Copyright 2013 the original author or authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package seaweedfs.client.btree.serialize;
+
+import javax.annotation.Nullable;
+import java.io.EOFException;
+import java.io.IOException;
+import java.io.InputStream;
+
+public abstract class AbstractDecoder implements Decoder {
+ private DecoderStream stream;
+
+ @Override
+ public InputStream getInputStream() {
+ if (stream == null) {
+ stream = new DecoderStream();
+ }
+ return stream;
+ }
+
+ @Override
+ public void readBytes(byte[] buffer) throws IOException {
+ readBytes(buffer, 0, buffer.length);
+ }
+
+ @Override
+ public byte[] readBinary() throws EOFException, IOException {
+ int size = readSmallInt();
+ byte[] result = new byte[size];
+ readBytes(result);
+ return result;
+ }
+
+ @Override
+ public int readSmallInt() throws EOFException, IOException {
+ return readInt();
+ }
+
+ @Override
+ public long readSmallLong() throws EOFException, IOException {
+ return readLong();
+ }
+
+ @Nullable
+ @Override
+ public Integer readNullableSmallInt() throws IOException {
+ if (readBoolean()) {
+ return readSmallInt();
+ } else {
+ return null;
+ }
+ }
+
+ @Override
+ public String readNullableString() throws EOFException, IOException {
+ if (readBoolean()) {
+ return readString();
+ } else {
+ return null;
+ }
+ }
+
+ @Override
+ public void skipBytes(long count) throws EOFException, IOException {
+ long remaining = count;
+ while (remaining > 0) {
+ long skipped = maybeSkip(remaining);
+ if (skipped <= 0) {
+ break;
+ }
+ remaining -= skipped;
+ }
+ if (remaining > 0) {
+ throw new EOFException();
+ }
+ }
+
+ @Override
+ public T decodeChunked(DecodeAction decodeAction) throws EOFException, Exception {
+ throw new UnsupportedOperationException();
+ }
+
+ @Override
+ public void skipChunked() throws EOFException, IOException {
+ throw new UnsupportedOperationException();
+ }
+
+ protected abstract int maybeReadBytes(byte[] buffer, int offset, int count) throws IOException;
+
+ protected abstract long maybeSkip(long count) throws IOException;
+
+ private class DecoderStream extends InputStream {
+ byte[] buffer = new byte[1];
+
+ @Override
+ public long skip(long n) throws IOException {
+ return maybeSkip(n);
+ }
+
+ @Override
+ public int read() throws IOException {
+ int read = maybeReadBytes(buffer, 0, 1);
+ if (read <= 0) {
+ return read;
+ }
+ return buffer[0] & 0xff;
+ }
+
+ @Override
+ public int read(byte[] buffer) throws IOException {
+ return maybeReadBytes(buffer, 0, buffer.length);
+ }
+
+ @Override
+ public int read(byte[] buffer, int offset, int count) throws IOException {
+ return maybeReadBytes(buffer, offset, count);
+ }
+ }
+}
diff --git a/test/random_access/src/main/java/seaweedfs/client/btree/serialize/AbstractEncoder.java b/test/random_access/src/main/java/seaweedfs/client/btree/serialize/AbstractEncoder.java
new file mode 100644
index 000000000..4caf3461d
--- /dev/null
+++ b/test/random_access/src/main/java/seaweedfs/client/btree/serialize/AbstractEncoder.java
@@ -0,0 +1,101 @@
+/*
+ * Copyright 2013 the original author or authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package seaweedfs.client.btree.serialize;
+
+import javax.annotation.Nullable;
+import java.io.IOException;
+import java.io.OutputStream;
+
+public abstract class AbstractEncoder implements Encoder {
+ private EncoderStream stream;
+
+ @Override
+ public OutputStream getOutputStream() {
+ if (stream == null) {
+ stream = new EncoderStream();
+ }
+ return stream;
+ }
+
+ @Override
+ public void writeBytes(byte[] bytes) throws IOException {
+ writeBytes(bytes, 0, bytes.length);
+ }
+
+ @Override
+ public void writeBinary(byte[] bytes) throws IOException {
+ writeBinary(bytes, 0, bytes.length);
+ }
+
+ @Override
+ public void writeBinary(byte[] bytes, int offset, int count) throws IOException {
+ writeSmallInt(count);
+ writeBytes(bytes, offset, count);
+ }
+
+ @Override
+ public void encodeChunked(EncodeAction writeAction) throws Exception {
+ throw new UnsupportedOperationException();
+ }
+
+ @Override
+ public void writeSmallInt(int value) throws IOException {
+ writeInt(value);
+ }
+
+ @Override
+ public void writeSmallLong(long value) throws IOException {
+ writeLong(value);
+ }
+
+ @Override
+ public void writeNullableSmallInt(@Nullable Integer value) throws IOException {
+ if (value == null) {
+ writeBoolean(false);
+ } else {
+ writeBoolean(true);
+ writeSmallInt(value);
+ }
+ }
+
+ @Override
+ public void writeNullableString(@Nullable CharSequence value) throws IOException {
+ if (value == null) {
+ writeBoolean(false);
+ } else {
+ writeBoolean(true);
+ writeString(value.toString());
+ }
+ }
+
+ private class EncoderStream extends OutputStream {
+ @Override
+ public void write(byte[] buffer) throws IOException {
+ writeBytes(buffer);
+ }
+
+ @Override
+ public void write(byte[] buffer, int offset, int length) throws IOException {
+ writeBytes(buffer, offset, length);
+ }
+
+ @Override
+ public void write(int b) throws IOException {
+ writeByte((byte) b);
+ }
+ }
+}
diff --git a/test/random_access/src/main/java/seaweedfs/client/btree/serialize/AbstractSerializer.java b/test/random_access/src/main/java/seaweedfs/client/btree/serialize/AbstractSerializer.java
new file mode 100644
index 000000000..a60980354
--- /dev/null
+++ b/test/random_access/src/main/java/seaweedfs/client/btree/serialize/AbstractSerializer.java
@@ -0,0 +1,40 @@
+/*
+ * Copyright 2016 the original author or authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package seaweedfs.client.btree.serialize;
+
+import com.google.common.base.Objects;
+
+/**
+ * This abstract class provide a sensible default implementation for {@code Serializer} equality. This equality
+ * implementation is required to enable cache instance reuse within the same Gradle runtime. Serializers are used
+ * as cache parameter which need to be compared to determine compatible cache.
+ */
+public abstract class AbstractSerializer implements Serializer {
+ @Override
+ public boolean equals(Object obj) {
+ if (obj == null) {
+ return false;
+ }
+
+ return Objects.equal(obj.getClass(), getClass());
+ }
+
+ @Override
+ public int hashCode() {
+ return Objects.hashCode(getClass());
+ }
+}
diff --git a/test/random_access/src/main/java/seaweedfs/client/btree/serialize/Cast.java b/test/random_access/src/main/java/seaweedfs/client/btree/serialize/Cast.java
new file mode 100644
index 000000000..4f962cea6
--- /dev/null
+++ b/test/random_access/src/main/java/seaweedfs/client/btree/serialize/Cast.java
@@ -0,0 +1,79 @@
+/*
+ * Copyright 2012 the original author or authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package seaweedfs.client.btree.serialize;
+
+import javax.annotation.Nullable;
+
+public abstract class Cast {
+
+ /**
+ * Casts the given object to the given type, providing a better error message than the default.
+ *
+ * The standard {@link Class#cast(Object)} method produces unsatisfactory error messages on some platforms
+ * when it fails. All this method does is provide a better, consistent, error message.
+ *
+ * This should be used whenever there is a chance the cast could fail. If in doubt, use this.
+ *
+ * @param outputType The type to cast the input to
+ * @param object The object to be cast (must not be {@code null})
+ * @param The type to be cast to
+ * @param The type of the object to be vast
+ * @return The input object, cast to the output type
+ */
+ public static O cast(Class outputType, I object) {
+ try {
+ return outputType.cast(object);
+ } catch (ClassCastException e) {
+ throw new ClassCastException(String.format(
+ "Failed to cast object %s of type %s to target type %s", object, object.getClass().getName(), outputType.getName()
+ ));
+ }
+ }
+
+ /**
+ * Casts the given object to the given type, providing a better error message than the default.
+ *
+ * The standard {@link Class#cast(Object)} method produces unsatisfactory error messages on some platforms
+ * when it fails. All this method does is provide a better, consistent, error message.
+ *
+ * This should be used whenever there is a chance the cast could fail. If in doubt, use this.
+ *
+ * @param outputType The type to cast the input to
+ * @param object The object to be cast
+ * @param The type to be cast to
+ * @param The type of the object to be vast
+ * @return The input object, cast to the output type
+ */
+ @Nullable
+ public static O castNullable(Class outputType, @Nullable I object) {
+ if (object == null) {
+ return null;
+ }
+ return cast(outputType, object);
+ }
+
+ @SuppressWarnings("unchecked")
+ @Nullable
+ public static T uncheckedCast(@Nullable Object object) {
+ return (T) object;
+ }
+
+ @SuppressWarnings("unchecked")
+ public static T uncheckedNonnullCast(Object object) {
+ return (T) object;
+ }
+}
diff --git a/test/random_access/src/main/java/seaweedfs/client/btree/serialize/ClassLoaderObjectInputStream.java b/test/random_access/src/main/java/seaweedfs/client/btree/serialize/ClassLoaderObjectInputStream.java
new file mode 100644
index 000000000..5f9cb3052
--- /dev/null
+++ b/test/random_access/src/main/java/seaweedfs/client/btree/serialize/ClassLoaderObjectInputStream.java
@@ -0,0 +1,43 @@
+/*
+ * Copyright 2010 the original author or authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package seaweedfs.client.btree.serialize;
+
+import java.io.IOException;
+import java.io.InputStream;
+import java.io.ObjectInputStream;
+import java.io.ObjectStreamClass;
+
+public class ClassLoaderObjectInputStream extends ObjectInputStream {
+ private final ClassLoader loader;
+
+ public ClassLoaderObjectInputStream(InputStream in, ClassLoader loader) throws IOException {
+ super(in);
+ this.loader = loader;
+ }
+
+ public ClassLoader getClassLoader() {
+ return loader;
+ }
+
+ @Override
+ protected Class> resolveClass(ObjectStreamClass desc) throws IOException, ClassNotFoundException {
+ try {
+ return Class.forName(desc.getName(), false, loader);
+ } catch (ClassNotFoundException e) {
+ return super.resolveClass(desc);
+ }
+ }
+}
diff --git a/test/random_access/src/main/java/seaweedfs/client/btree/serialize/Decoder.java b/test/random_access/src/main/java/seaweedfs/client/btree/serialize/Decoder.java
new file mode 100644
index 000000000..e5251b8c2
--- /dev/null
+++ b/test/random_access/src/main/java/seaweedfs/client/btree/serialize/Decoder.java
@@ -0,0 +1,140 @@
+/*
+ * Copyright 2013 the original author or authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package seaweedfs.client.btree.serialize;
+
+import javax.annotation.Nullable;
+import java.io.EOFException;
+import java.io.IOException;
+import java.io.InputStream;
+
+/**
+ * Provides a way to decode structured data from a backing byte stream. Implementations may buffer incoming bytes read
+ * from the backing stream prior to decoding.
+ */
+public interface Decoder {
+ /**
+ * Returns an InputStream which can be used to read raw bytes.
+ */
+ InputStream getInputStream();
+
+ /**
+ * Reads a signed 64 bit long value. Can read any value that was written using {@link Encoder#writeLong(long)}.
+ *
+ * @throws EOFException when the end of the byte stream is reached before the long value can be fully read.
+ */
+ long readLong() throws EOFException, IOException;
+
+ /**
+ * Reads a signed 64 bit int value. Can read any value that was written using {@link Encoder#writeSmallLong(long)}.
+ *
+ * @throws EOFException when the end of the byte stream is reached before the int value can be fully read.
+ */
+ long readSmallLong() throws EOFException, IOException;
+
+ /**
+ * Reads a signed 32 bit int value. Can read any value that was written using {@link Encoder#writeInt(int)}.
+ *
+ * @throws EOFException when the end of the byte stream is reached before the int value can be fully read.
+ */
+ int readInt() throws EOFException, IOException;
+
+ /**
+ * Reads a signed 32 bit int value. Can read any value that was written using {@link Encoder#writeSmallInt(int)}.
+ *
+ * @throws EOFException when the end of the byte stream is reached before the int value can be fully read.
+ */
+ int readSmallInt() throws EOFException, IOException;
+
+ /**
+ * Reads a nullable signed 32 bit int value.
+ *
+ * @see #readSmallInt()
+ */
+ @Nullable
+ Integer readNullableSmallInt() throws EOFException, IOException;
+
+ /**
+ * Reads a boolean value. Can read any value that was written using {@link Encoder#writeBoolean(boolean)}.
+ *
+ * @throws EOFException when the end of the byte stream is reached before the boolean value can be fully read.
+ */
+ boolean readBoolean() throws EOFException, IOException;
+
+ /**
+ * Reads a non-null string value. Can read any value that was written using {@link Encoder#writeString(CharSequence)}.
+ *
+ * @throws EOFException when the end of the byte stream is reached before the string can be fully read.
+ */
+ String readString() throws EOFException, IOException;
+
+ /**
+ * Reads a nullable string value. Can reads any value that was written using {@link Encoder#writeNullableString(CharSequence)}.
+ *
+ * @throws EOFException when the end of the byte stream is reached before the string can be fully read.
+ */
+ @Nullable
+ String readNullableString() throws EOFException, IOException;
+
+ /**
+ * Reads a byte value. Can read any byte value that was written using one of the raw byte methods on {@link Encoder}, such as {@link Encoder#writeByte(byte)} or {@link Encoder#getOutputStream()}
+ *
+ * @throws EOFException when the end of the byte stream is reached.
+ */
+ byte readByte() throws EOFException, IOException;
+
+ /**
+ * Reads bytes into the given buffer, filling the buffer. Can read any byte values that were written using one of the raw byte methods on {@link Encoder}, such as {@link
+ * Encoder#writeBytes(byte[])} or {@link Encoder#getOutputStream()}
+ *
+ * @throws EOFException when the end of the byte stream is reached before the buffer is full.
+ */
+ void readBytes(byte[] buffer) throws EOFException, IOException;
+
+ /**
+ * Reads the specified number of bytes into the given buffer. Can read any byte values that were written using one of the raw byte methods on {@link Encoder}, such as {@link
+ * Encoder#writeBytes(byte[])} or {@link Encoder#getOutputStream()}
+ *
+ * @throws EOFException when the end of the byte stream is reached before the specified number of bytes were read.
+ */
+ void readBytes(byte[] buffer, int offset, int count) throws EOFException, IOException;
+
+ /**
+ * Reads a byte array. Can read any byte array written using {@link Encoder#writeBinary(byte[])} or {@link Encoder#writeBinary(byte[], int, int)}.
+ *
+ * @throws EOFException when the end of the byte stream is reached before the byte array was fully read.
+ */
+ byte[] readBinary() throws EOFException, IOException;
+
+ /**
+ * Skips the given number of bytes. Can skip over any byte values that were written using one of the raw byte methods on {@link Encoder}.
+ */
+ void skipBytes(long count) throws EOFException, IOException;
+
+ /**
+ * Reads a byte stream written using {@link Encoder#encodeChunked(Encoder.EncodeAction)}.
+ */
+ T decodeChunked(DecodeAction decodeAction) throws EOFException, Exception;
+
+ /**
+ * Skips over a byte stream written using {@link Encoder#encodeChunked(Encoder.EncodeAction)}, discarding its content.
+ */
+ void skipChunked() throws EOFException, IOException;
+
+ interface DecodeAction {
+ OUT read(IN source) throws Exception;
+ }
+}
diff --git a/test/random_access/src/main/java/seaweedfs/client/btree/serialize/DefaultSerializer.java b/test/random_access/src/main/java/seaweedfs/client/btree/serialize/DefaultSerializer.java
new file mode 100644
index 000000000..15ba1c592
--- /dev/null
+++ b/test/random_access/src/main/java/seaweedfs/client/btree/serialize/DefaultSerializer.java
@@ -0,0 +1,73 @@
+/*
+ * Copyright 2009 the original author or authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package seaweedfs.client.btree.serialize;
+
+import com.google.common.base.Objects;
+
+import java.io.IOException;
+import java.io.ObjectOutputStream;
+import java.io.StreamCorruptedException;
+
+public class DefaultSerializer extends AbstractSerializer {
+ private ClassLoader classLoader;
+
+ public DefaultSerializer() {
+ classLoader = getClass().getClassLoader();
+ }
+
+ public DefaultSerializer(ClassLoader classLoader) {
+ this.classLoader = classLoader != null ? classLoader : getClass().getClassLoader();
+ }
+
+ public ClassLoader getClassLoader() {
+ return classLoader;
+ }
+
+ public void setClassLoader(ClassLoader classLoader) {
+ this.classLoader = classLoader;
+ }
+
+ @Override
+ public T read(Decoder decoder) throws Exception {
+ try {
+ return Cast.uncheckedNonnullCast(new ClassLoaderObjectInputStream(decoder.getInputStream(), classLoader).readObject());
+ } catch (StreamCorruptedException e) {
+ return null;
+ }
+ }
+
+ @Override
+ public void write(Encoder encoder, T value) throws IOException {
+ ObjectOutputStream objectStr = new ObjectOutputStream(encoder.getOutputStream());
+ objectStr.writeObject(value);
+ objectStr.flush();
+ }
+
+ @Override
+ public boolean equals(Object obj) {
+ if (!super.equals(obj)) {
+ return false;
+ }
+
+ DefaultSerializer> rhs = (DefaultSerializer>) obj;
+ return Objects.equal(classLoader, rhs.classLoader);
+ }
+
+ @Override
+ public int hashCode() {
+ return Objects.hashCode(super.hashCode(), classLoader);
+ }
+}
diff --git a/test/random_access/src/main/java/seaweedfs/client/btree/serialize/Encoder.java b/test/random_access/src/main/java/seaweedfs/client/btree/serialize/Encoder.java
new file mode 100644
index 000000000..1cdea10af
--- /dev/null
+++ b/test/random_access/src/main/java/seaweedfs/client/btree/serialize/Encoder.java
@@ -0,0 +1,110 @@
+/*
+ * Copyright 2013 the original author or authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package seaweedfs.client.btree.serialize;
+
+import javax.annotation.Nullable;
+import java.io.IOException;
+import java.io.OutputStream;
+
+/**
+ * Provides a way to encode structured data to a backing byte stream. Implementations may buffer outgoing encoded bytes prior
+ * to writing to the backing byte stream.
+ */
+public interface Encoder {
+ /**
+ * Returns an {@link OutputStream) that can be used to write raw bytes to the stream.
+ */
+ OutputStream getOutputStream();
+
+ /**
+ * Writes a raw byte value to the stream.
+ */
+ void writeByte(byte value) throws IOException;
+
+ /**
+ * Writes the given raw bytes to the stream. Does not encode any length information.
+ */
+ void writeBytes(byte[] bytes) throws IOException;
+
+ /**
+ * Writes the given raw bytes to the stream. Does not encode any length information.
+ */
+ void writeBytes(byte[] bytes, int offset, int count) throws IOException;
+
+ /**
+ * Writes the given byte array to the stream. Encodes the bytes and length information.
+ */
+ void writeBinary(byte[] bytes) throws IOException;
+
+ /**
+ * Writes the given byte array to the stream. Encodes the bytes and length information.
+ */
+ void writeBinary(byte[] bytes, int offset, int count) throws IOException;
+
+ /**
+ * Appends an encoded stream to this stream. Encodes the stream as a series of chunks with length information.
+ */
+ void encodeChunked(EncodeAction writeAction) throws Exception;
+
+ /**
+ * Writes a signed 64 bit long value. The implementation may encode the value as a variable number of bytes, not necessarily as 8 bytes.
+ */
+ void writeLong(long value) throws IOException;
+
+ /**
+ * Writes a signed 64 bit long value whose value is likely to be small and positive but may not be. The implementation may encode the value in a way that is more efficient for small positive
+ * values.
+ */
+ void writeSmallLong(long value) throws IOException;
+
+ /**
+ * Writes a signed 32 bit int value. The implementation may encode the value as a variable number of bytes, not necessarily as 4 bytes.
+ */
+ void writeInt(int value) throws IOException;
+
+ /**
+ * Writes a signed 32 bit int value whose value is likely to be small and positive but may not be. The implementation may encode the value in a way that
+ * is more efficient for small positive values.
+ */
+ void writeSmallInt(int value) throws IOException;
+
+ /**
+ * Writes a nullable signed 32 bit int value whose value is likely to be small and positive but may not be.
+ *
+ * @see #writeSmallInt(int)
+ */
+ void writeNullableSmallInt(@Nullable Integer value) throws IOException;
+
+ /**
+ * Writes a boolean value.
+ */
+ void writeBoolean(boolean value) throws IOException;
+
+ /**
+ * Writes a non-null string value.
+ */
+ void writeString(CharSequence value) throws IOException;
+
+ /**
+ * Writes a nullable string value.
+ */
+ void writeNullableString(@Nullable CharSequence value) throws IOException;
+
+ interface EncodeAction {
+ void write(T target) throws Exception;
+ }
+}
diff --git a/test/random_access/src/main/java/seaweedfs/client/btree/serialize/FlushableEncoder.java b/test/random_access/src/main/java/seaweedfs/client/btree/serialize/FlushableEncoder.java
new file mode 100644
index 000000000..ddef9f5c6
--- /dev/null
+++ b/test/random_access/src/main/java/seaweedfs/client/btree/serialize/FlushableEncoder.java
@@ -0,0 +1,31 @@
+/*
+ * Copyright 2013 the original author or authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package seaweedfs.client.btree.serialize;
+
+import java.io.Flushable;
+import java.io.IOException;
+
+/**
+ * Represents an {@link Encoder} that buffers encoded data prior to writing to the backing stream.
+ */
+public interface FlushableEncoder extends Encoder, Flushable {
+ /**
+ * Ensures that all buffered data has been written to the backing stream. Does not flush the backing stream.
+ */
+ @Override
+ void flush() throws IOException;
+}
diff --git a/test/random_access/src/main/java/seaweedfs/client/btree/serialize/ObjectReader.java b/test/random_access/src/main/java/seaweedfs/client/btree/serialize/ObjectReader.java
new file mode 100644
index 000000000..fdea08191
--- /dev/null
+++ b/test/random_access/src/main/java/seaweedfs/client/btree/serialize/ObjectReader.java
@@ -0,0 +1,28 @@
+/*
+ * Copyright 2012 the original author or authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package seaweedfs.client.btree.serialize;
+
+import java.io.EOFException;
+
+public interface ObjectReader {
+ /**
+ * Reads the next object from the stream.
+ *
+ * @throws EOFException When the next object cannot be fully read due to reaching the end of stream.
+ */
+ T read() throws EOFException, Exception;
+}
diff --git a/test/random_access/src/main/java/seaweedfs/client/btree/serialize/ObjectWriter.java b/test/random_access/src/main/java/seaweedfs/client/btree/serialize/ObjectWriter.java
new file mode 100644
index 000000000..482bdd0f8
--- /dev/null
+++ b/test/random_access/src/main/java/seaweedfs/client/btree/serialize/ObjectWriter.java
@@ -0,0 +1,21 @@
+/*
+ * Copyright 2012 the original author or authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package seaweedfs.client.btree.serialize;
+
+public interface ObjectWriter {
+ void write(T value) throws Exception;
+}
diff --git a/test/random_access/src/main/java/seaweedfs/client/btree/serialize/Serializer.java b/test/random_access/src/main/java/seaweedfs/client/btree/serialize/Serializer.java
new file mode 100644
index 000000000..b474ba3ac
--- /dev/null
+++ b/test/random_access/src/main/java/seaweedfs/client/btree/serialize/Serializer.java
@@ -0,0 +1,33 @@
+/*
+ * Copyright 2009 the original author or authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package seaweedfs.client.btree.serialize;
+
+import java.io.EOFException;
+
+public interface Serializer {
+ /**
+ * Reads the next object from the given stream. The implementation must not perform any buffering, so that it reads only those bytes from the input stream that are
+ * required to deserialize the next object.
+ *
+ * @throws EOFException When the next object cannot be fully read due to reaching the end of stream.
+ */
+ T read(Decoder decoder) throws EOFException, Exception;
+
+ /**
+ * Writes the given object to the given stream. The implementation must not perform any buffering.
+ */
+ void write(Encoder encoder, T value) throws Exception;
+}
diff --git a/test/random_access/src/main/java/seaweedfs/client/btree/serialize/StatefulSerializer.java b/test/random_access/src/main/java/seaweedfs/client/btree/serialize/StatefulSerializer.java
new file mode 100644
index 000000000..ea677d2c0
--- /dev/null
+++ b/test/random_access/src/main/java/seaweedfs/client/btree/serialize/StatefulSerializer.java
@@ -0,0 +1,33 @@
+/*
+ * Copyright 2012 the original author or authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package seaweedfs.client.btree.serialize;
+
+/**
+ * Implementations must allow concurrent reading and writing, so that a thread can read and a thread can write at the same time.
+ * Implementations do not need to support multiple read threads or multiple write threads.
+ */
+public interface StatefulSerializer {
+ /**
+ * Should not perform any buffering
+ */
+ ObjectReader newReader(Decoder decoder);
+
+ /**
+ * Should not perform any buffering
+ */
+ ObjectWriter newWriter(Encoder encoder);
+}
diff --git a/test/random_access/src/main/java/seaweedfs/client/btree/serialize/kryo/KryoBackedDecoder.java b/test/random_access/src/main/java/seaweedfs/client/btree/serialize/kryo/KryoBackedDecoder.java
new file mode 100644
index 000000000..d8e44a0dc
--- /dev/null
+++ b/test/random_access/src/main/java/seaweedfs/client/btree/serialize/kryo/KryoBackedDecoder.java
@@ -0,0 +1,210 @@
+/*
+ * Copyright 2013 the original author or authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package seaweedfs.client.btree.serialize.kryo;
+
+import com.esotericsoftware.kryo.KryoException;
+import com.esotericsoftware.kryo.io.Input;
+import seaweedfs.client.btree.serialize.AbstractDecoder;
+import seaweedfs.client.btree.serialize.Decoder;
+
+import java.io.Closeable;
+import java.io.EOFException;
+import java.io.IOException;
+import java.io.InputStream;
+
+/**
+ * Note that this decoder uses buffering, so will attempt to read beyond the end of the encoded data. This means you should use this type only when this decoder will be used to decode the entire
+ * stream.
+ */
+public class KryoBackedDecoder extends AbstractDecoder implements Decoder, Closeable {
+ private final Input input;
+ private final InputStream inputStream;
+ private long extraSkipped;
+ private KryoBackedDecoder nested;
+
+ public KryoBackedDecoder(InputStream inputStream) {
+ this(inputStream, 4096);
+ }
+
+ public KryoBackedDecoder(InputStream inputStream, int bufferSize) {
+ this.inputStream = inputStream;
+ input = new Input(this.inputStream, bufferSize);
+ }
+
+ @Override
+ protected int maybeReadBytes(byte[] buffer, int offset, int count) {
+ return input.read(buffer, offset, count);
+ }
+
+ @Override
+ protected long maybeSkip(long count) throws IOException {
+ // Work around some bugs in Input.skip()
+ int remaining = input.limit() - input.position();
+ if (remaining == 0) {
+ long skipped = inputStream.skip(count);
+ if (skipped > 0) {
+ extraSkipped += skipped;
+ }
+ return skipped;
+ } else if (count <= remaining) {
+ input.setPosition(input.position() + (int) count);
+ return count;
+ } else {
+ input.setPosition(input.limit());
+ return remaining;
+ }
+ }
+
+ private RuntimeException maybeEndOfStream(KryoException e) throws EOFException {
+ if (e.getMessage().equals("Buffer underflow.")) {
+ throw (EOFException) (new EOFException().initCause(e));
+ }
+ throw e;
+ }
+
+ @Override
+ public byte readByte() throws EOFException {
+ try {
+ return input.readByte();
+ } catch (KryoException e) {
+ throw maybeEndOfStream(e);
+ }
+ }
+
+ @Override
+ public void readBytes(byte[] buffer, int offset, int count) throws EOFException {
+ try {
+ input.readBytes(buffer, offset, count);
+ } catch (KryoException e) {
+ throw maybeEndOfStream(e);
+ }
+ }
+
+ @Override
+ public long readLong() throws EOFException {
+ try {
+ return input.readLong();
+ } catch (KryoException e) {
+ throw maybeEndOfStream(e);
+ }
+ }
+
+ @Override
+ public long readSmallLong() throws EOFException, IOException {
+ try {
+ return input.readLong(true);
+ } catch (KryoException e) {
+ throw maybeEndOfStream(e);
+ }
+ }
+
+ @Override
+ public int readInt() throws EOFException {
+ try {
+ return input.readInt();
+ } catch (KryoException e) {
+ throw maybeEndOfStream(e);
+ }
+ }
+
+ @Override
+ public int readSmallInt() throws EOFException {
+ try {
+ return input.readInt(true);
+ } catch (KryoException e) {
+ throw maybeEndOfStream(e);
+ }
+ }
+
+ @Override
+ public boolean readBoolean() throws EOFException {
+ try {
+ return input.readBoolean();
+ } catch (KryoException e) {
+ throw maybeEndOfStream(e);
+ }
+ }
+
+ @Override
+ public String readString() throws EOFException {
+ return readNullableString();
+ }
+
+ @Override
+ public String readNullableString() throws EOFException {
+ try {
+ return input.readString();
+ } catch (KryoException e) {
+ throw maybeEndOfStream(e);
+ }
+ }
+
+ @Override
+ public void skipChunked() throws EOFException, IOException {
+ while (true) {
+ int count = readSmallInt();
+ if (count == 0) {
+ break;
+ }
+ skipBytes(count);
+ }
+ }
+
+ @Override
+ public T decodeChunked(DecodeAction decodeAction) throws EOFException, Exception {
+ if (nested == null) {
+ nested = new KryoBackedDecoder(new InputStream() {
+ @Override
+ public int read() throws IOException {
+ throw new UnsupportedOperationException();
+ }
+
+ @Override
+ public int read(byte[] buffer, int offset, int length) throws IOException {
+ int count = readSmallInt();
+ if (count == 0) {
+ // End of stream has been reached
+ return -1;
+ }
+ if (count > length) {
+ // For now, assume same size buffers used to read and write
+ throw new UnsupportedOperationException();
+ }
+ readBytes(buffer, offset, count);
+ return count;
+ }
+ });
+ }
+ T value = decodeAction.read(nested);
+ if (readSmallInt() != 0) {
+ throw new IllegalStateException("Expecting the end of nested stream.");
+ }
+ return value;
+ }
+
+ /**
+ * Returns the total number of bytes consumed by this decoder. Some additional bytes may also be buffered by this decoder but have not been consumed.
+ */
+ public long getReadPosition() {
+ return input.total() + extraSkipped;
+ }
+
+ @Override
+ public void close() throws IOException {
+ input.close();
+ }
+}
diff --git a/test/random_access/src/main/java/seaweedfs/client/btree/serialize/kryo/KryoBackedEncoder.java b/test/random_access/src/main/java/seaweedfs/client/btree/serialize/kryo/KryoBackedEncoder.java
new file mode 100644
index 000000000..6de3c4db5
--- /dev/null
+++ b/test/random_access/src/main/java/seaweedfs/client/btree/serialize/kryo/KryoBackedEncoder.java
@@ -0,0 +1,134 @@
+/*
+ * Copyright 2013 the original author or authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package seaweedfs.client.btree.serialize.kryo;
+
+import com.esotericsoftware.kryo.io.Output;
+import seaweedfs.client.btree.serialize.AbstractEncoder;
+import seaweedfs.client.btree.serialize.Encoder;
+import seaweedfs.client.btree.serialize.FlushableEncoder;
+
+import javax.annotation.Nullable;
+import java.io.Closeable;
+import java.io.IOException;
+import java.io.OutputStream;
+
+public class KryoBackedEncoder extends AbstractEncoder implements FlushableEncoder, Closeable {
+ private final Output output;
+ private KryoBackedEncoder nested;
+
+ public KryoBackedEncoder(OutputStream outputStream) {
+ this(outputStream, 4096);
+ }
+
+ public KryoBackedEncoder(OutputStream outputStream, int bufferSize) {
+ output = new Output(outputStream, bufferSize);
+ }
+
+ @Override
+ public void writeByte(byte value) {
+ output.writeByte(value);
+ }
+
+ @Override
+ public void writeBytes(byte[] bytes, int offset, int count) {
+ output.writeBytes(bytes, offset, count);
+ }
+
+ @Override
+ public void writeLong(long value) {
+ output.writeLong(value);
+ }
+
+ @Override
+ public void writeSmallLong(long value) {
+ output.writeLong(value, true);
+ }
+
+ @Override
+ public void writeInt(int value) {
+ output.writeInt(value);
+ }
+
+ @Override
+ public void writeSmallInt(int value) {
+ output.writeInt(value, true);
+ }
+
+ @Override
+ public void writeBoolean(boolean value) {
+ output.writeBoolean(value);
+ }
+
+ @Override
+ public void writeString(CharSequence value) {
+ if (value == null) {
+ throw new IllegalArgumentException("Cannot encode a null string.");
+ }
+ output.writeString(value);
+ }
+
+ @Override
+ public void writeNullableString(@Nullable CharSequence value) {
+ output.writeString(value);
+ }
+
+ @Override
+ public void encodeChunked(EncodeAction writeAction) throws Exception {
+ if (nested == null) {
+ nested = new KryoBackedEncoder(new OutputStream() {
+ @Override
+ public void write(byte[] buffer, int offset, int length) {
+ if (length == 0) {
+ return;
+ }
+ writeSmallInt(length);
+ writeBytes(buffer, offset, length);
+ }
+
+ @Override
+ public void write(byte[] buffer) throws IOException {
+ write(buffer, 0, buffer.length);
+ }
+
+ @Override
+ public void write(int b) {
+ throw new UnsupportedOperationException();
+ }
+ });
+ }
+ writeAction.write(nested);
+ nested.flush();
+ writeSmallInt(0);
+ }
+
+ /**
+ * Returns the total number of bytes written by this encoder, some of which may still be buffered.
+ */
+ public long getWritePosition() {
+ return output.total();
+ }
+
+ @Override
+ public void flush() {
+ output.flush();
+ }
+
+ @Override
+ public void close() {
+ output.close();
+ }
+}
diff --git a/test/random_access/src/main/java/seaweedfs/client/btree/serialize/kryo/StringDeduplicatingKryoBackedDecoder.java b/test/random_access/src/main/java/seaweedfs/client/btree/serialize/kryo/StringDeduplicatingKryoBackedDecoder.java
new file mode 100644
index 000000000..f323daf43
--- /dev/null
+++ b/test/random_access/src/main/java/seaweedfs/client/btree/serialize/kryo/StringDeduplicatingKryoBackedDecoder.java
@@ -0,0 +1,188 @@
+/*
+ * Copyright 2018 the original author or authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package seaweedfs.client.btree.serialize.kryo;
+
+import com.esotericsoftware.kryo.KryoException;
+import com.esotericsoftware.kryo.io.Input;
+import seaweedfs.client.btree.serialize.AbstractDecoder;
+import seaweedfs.client.btree.serialize.Decoder;
+
+import java.io.Closeable;
+import java.io.EOFException;
+import java.io.IOException;
+import java.io.InputStream;
+
+/**
+ * Note that this decoder uses buffering, so will attempt to read beyond the end of the encoded data. This means you should use this type only when this decoder will be used to decode the entire
+ * stream.
+ */
+public class StringDeduplicatingKryoBackedDecoder extends AbstractDecoder implements Decoder, Closeable {
+ public static final int INITIAL_CAPACITY = 32;
+ private final Input input;
+ private final InputStream inputStream;
+ private String[] strings;
+ private long extraSkipped;
+
+ public StringDeduplicatingKryoBackedDecoder(InputStream inputStream) {
+ this(inputStream, 4096);
+ }
+
+ public StringDeduplicatingKryoBackedDecoder(InputStream inputStream, int bufferSize) {
+ this.inputStream = inputStream;
+ input = new Input(this.inputStream, bufferSize);
+ }
+
+ @Override
+ protected int maybeReadBytes(byte[] buffer, int offset, int count) {
+ return input.read(buffer, offset, count);
+ }
+
+ @Override
+ protected long maybeSkip(long count) throws IOException {
+ // Work around some bugs in Input.skip()
+ int remaining = input.limit() - input.position();
+ if (remaining == 0) {
+ long skipped = inputStream.skip(count);
+ if (skipped > 0) {
+ extraSkipped += skipped;
+ }
+ return skipped;
+ } else if (count <= remaining) {
+ input.setPosition(input.position() + (int) count);
+ return count;
+ } else {
+ input.setPosition(input.limit());
+ return remaining;
+ }
+ }
+
+ private RuntimeException maybeEndOfStream(KryoException e) throws EOFException {
+ if (e.getMessage().equals("Buffer underflow.")) {
+ throw (EOFException) (new EOFException().initCause(e));
+ }
+ throw e;
+ }
+
+ @Override
+ public byte readByte() throws EOFException {
+ try {
+ return input.readByte();
+ } catch (KryoException e) {
+ throw maybeEndOfStream(e);
+ }
+ }
+
+ @Override
+ public void readBytes(byte[] buffer, int offset, int count) throws EOFException {
+ try {
+ input.readBytes(buffer, offset, count);
+ } catch (KryoException e) {
+ throw maybeEndOfStream(e);
+ }
+ }
+
+ @Override
+ public long readLong() throws EOFException {
+ try {
+ return input.readLong();
+ } catch (KryoException e) {
+ throw maybeEndOfStream(e);
+ }
+ }
+
+ @Override
+ public long readSmallLong() throws EOFException, IOException {
+ try {
+ return input.readLong(true);
+ } catch (KryoException e) {
+ throw maybeEndOfStream(e);
+ }
+ }
+
+ @Override
+ public int readInt() throws EOFException {
+ try {
+ return input.readInt();
+ } catch (KryoException e) {
+ throw maybeEndOfStream(e);
+ }
+ }
+
+ @Override
+ public int readSmallInt() throws EOFException {
+ try {
+ return input.readInt(true);
+ } catch (KryoException e) {
+ throw maybeEndOfStream(e);
+ }
+ }
+
+ @Override
+ public boolean readBoolean() throws EOFException {
+ try {
+ return input.readBoolean();
+ } catch (KryoException e) {
+ throw maybeEndOfStream(e);
+ }
+ }
+
+ @Override
+ public String readString() throws EOFException {
+ return readNullableString();
+ }
+
+ @Override
+ public String readNullableString() throws EOFException {
+ try {
+ int idx = readInt();
+ if (idx == -1) {
+ return null;
+ }
+ if (strings == null) {
+ strings = new String[INITIAL_CAPACITY];
+ }
+ String string = null;
+ if (idx >= strings.length) {
+ String[] grow = new String[strings.length * 3 / 2];
+ System.arraycopy(strings, 0, grow, 0, strings.length);
+ strings = grow;
+ } else {
+ string = strings[idx];
+ }
+ if (string == null) {
+ string = input.readString();
+ strings[idx] = string;
+ }
+ return string;
+ } catch (KryoException e) {
+ throw maybeEndOfStream(e);
+ }
+ }
+
+ /**
+ * Returns the total number of bytes consumed by this decoder. Some additional bytes may also be buffered by this decoder but have not been consumed.
+ */
+ public long getReadPosition() {
+ return input.total() + extraSkipped;
+ }
+
+ @Override
+ public void close() throws IOException {
+ strings = null;
+ input.close();
+ }
+}
diff --git a/test/random_access/src/main/java/seaweedfs/client/btree/serialize/kryo/StringDeduplicatingKryoBackedEncoder.java b/test/random_access/src/main/java/seaweedfs/client/btree/serialize/kryo/StringDeduplicatingKryoBackedEncoder.java
new file mode 100644
index 000000000..140933660
--- /dev/null
+++ b/test/random_access/src/main/java/seaweedfs/client/btree/serialize/kryo/StringDeduplicatingKryoBackedEncoder.java
@@ -0,0 +1,128 @@
+/*
+ * Copyright 2018 the original author or authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package seaweedfs.client.btree.serialize.kryo;
+
+import com.esotericsoftware.kryo.io.Output;
+import com.google.common.collect.Maps;
+import seaweedfs.client.btree.serialize.AbstractEncoder;
+import seaweedfs.client.btree.serialize.FlushableEncoder;
+
+import javax.annotation.Nullable;
+import java.io.Closeable;
+import java.io.OutputStream;
+import java.util.Map;
+
+public class StringDeduplicatingKryoBackedEncoder extends AbstractEncoder implements FlushableEncoder, Closeable {
+ private Map strings;
+
+ private final Output output;
+
+ public StringDeduplicatingKryoBackedEncoder(OutputStream outputStream) {
+ this(outputStream, 4096);
+ }
+
+ public StringDeduplicatingKryoBackedEncoder(OutputStream outputStream, int bufferSize) {
+ output = new Output(outputStream, bufferSize);
+ }
+
+ @Override
+ public void writeByte(byte value) {
+ output.writeByte(value);
+ }
+
+ @Override
+ public void writeBytes(byte[] bytes, int offset, int count) {
+ output.writeBytes(bytes, offset, count);
+ }
+
+ @Override
+ public void writeLong(long value) {
+ output.writeLong(value);
+ }
+
+ @Override
+ public void writeSmallLong(long value) {
+ output.writeLong(value, true);
+ }
+
+ @Override
+ public void writeInt(int value) {
+ output.writeInt(value);
+ }
+
+ @Override
+ public void writeSmallInt(int value) {
+ output.writeInt(value, true);
+ }
+
+ @Override
+ public void writeBoolean(boolean value) {
+ output.writeBoolean(value);
+ }
+
+ @Override
+ public void writeString(CharSequence value) {
+ if (value == null) {
+ throw new IllegalArgumentException("Cannot encode a null string.");
+ }
+ writeNullableString(value);
+ }
+
+ @Override
+ public void writeNullableString(@Nullable CharSequence value) {
+ if (value == null) {
+ output.writeInt(-1);
+ return;
+ } else {
+ if (strings == null) {
+ strings = Maps.newHashMapWithExpectedSize(1024);
+ }
+ }
+ String key = value.toString();
+ Integer index = strings.get(key);
+ if (index == null) {
+ index = strings.size();
+ output.writeInt(index);
+ strings.put(key, index);
+ output.writeString(key);
+ } else {
+ output.writeInt(index);
+ }
+ }
+
+ /**
+ * Returns the total number of bytes written by this encoder, some of which may still be buffered.
+ */
+ public long getWritePosition() {
+ return output.total();
+ }
+
+ @Override
+ public void flush() {
+ output.flush();
+ }
+
+ @Override
+ public void close() {
+ output.close();
+ }
+
+ public void done() {
+ strings = null;
+ }
+
+}
diff --git a/test/random_access/src/main/java/seaweedfs/client/btree/serialize/kryo/TypeSafeSerializer.java b/test/random_access/src/main/java/seaweedfs/client/btree/serialize/kryo/TypeSafeSerializer.java
new file mode 100644
index 000000000..16c00cdf4
--- /dev/null
+++ b/test/random_access/src/main/java/seaweedfs/client/btree/serialize/kryo/TypeSafeSerializer.java
@@ -0,0 +1,51 @@
+/*
+ * Copyright 2012 the original author or authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package seaweedfs.client.btree.serialize.kryo;
+
+import seaweedfs.client.btree.serialize.*;
+
+public class TypeSafeSerializer implements StatefulSerializer {
+ private final Class type;
+ private final StatefulSerializer serializer;
+
+ public TypeSafeSerializer(Class type, StatefulSerializer serializer) {
+ this.type = type;
+ this.serializer = serializer;
+ }
+
+ @Override
+ public ObjectReader newReader(Decoder decoder) {
+ final ObjectReader reader = serializer.newReader(decoder);
+ return new ObjectReader() {
+ @Override
+ public Object read() throws Exception {
+ return reader.read();
+ }
+ };
+ }
+
+ @Override
+ public ObjectWriter newWriter(Encoder encoder) {
+ final ObjectWriter writer = serializer.newWriter(encoder);
+ return new ObjectWriter() {
+ @Override
+ public void write(Object value) throws Exception {
+ writer.write(type.cast(value));
+ }
+ };
+ }
+}
diff --git a/test/random_access/src/test/java/seaweedfs/client/btree/BTreePersistentIndexedCacheTest.java b/test/random_access/src/test/java/seaweedfs/client/btree/BTreePersistentIndexedCacheTest.java
new file mode 100644
index 000000000..796c7f0f5
--- /dev/null
+++ b/test/random_access/src/test/java/seaweedfs/client/btree/BTreePersistentIndexedCacheTest.java
@@ -0,0 +1,476 @@
+/*
+ * Copyright 2010 the original author or authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package seaweedfs.client.btree;
+
+import seaweedfs.client.btree.serialize.DefaultSerializer;
+import seaweedfs.client.btree.serialize.Serializer;
+import org.junit.Before;
+import org.junit.Rule;
+import org.junit.Test;
+
+import java.io.File;
+import java.io.IOException;
+import java.io.RandomAccessFile;
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.Collection;
+import java.util.Collections;
+import java.util.Comparator;
+import java.util.LinkedHashMap;
+import java.util.List;
+import java.util.Map;
+
+import static org.hamcrest.CoreMatchers.*;
+import static org.junit.Assert.assertNull;
+import static org.hamcrest.MatcherAssert.assertThat;
+import static org.junit.Assert.assertTrue;
+
+public class BTreePersistentIndexedCacheTest {
+ private final Serializer stringSerializer = new DefaultSerializer();
+ private final Serializer integerSerializer = new DefaultSerializer();
+ private BTreePersistentIndexedCache cache;
+ private File cacheFile;
+
+ @Before
+ public void setup() {
+ cacheFile = tmpDirFile("cache.bin");
+ }
+
+ public File tmpDirFile(String filename) {
+ File f = new File("/Users/chris/tmp/mm/dev/btree_test");
+ // File f = new File("/tmp/btree_test");
+ f.mkdirs();
+ return new File(f, filename);
+ }
+
+ private void createCache() {
+ cache = new BTreePersistentIndexedCache(cacheFile, stringSerializer, integerSerializer, (short) 4, 100);
+ }
+
+ private void verifyAndCloseCache() {
+ cache.verify();
+ cache.close();
+ }
+
+ @Test
+ public void getReturnsNullWhenEntryDoesNotExist() {
+ createCache();
+ assertNull(cache.get("unknown"));
+ verifyAndCloseCache();
+ }
+
+ @Test
+ public void persistsAddedEntries() {
+ createCache();
+ checkAdds(1, 2, 3, 4, 5);
+ verifyAndCloseCache();
+ }
+
+ @Test
+ public void persistsAddedEntriesInReverseOrder() {
+ createCache();
+ checkAdds(5, 4, 3, 2, 1);
+ verifyAndCloseCache();
+ }
+
+ @Test
+ public void persistsAddedEntriesOverMultipleIndexBlocks() {
+ createCache();
+ checkAdds(3, 2, 11, 5, 7, 1, 10, 8, 9, 4, 6, 0);
+ verifyAndCloseCache();
+ }
+
+ @Test
+ public void persistsUpdates() {
+ createCache();
+ checkUpdates(3, 2, 11, 5, 7, 1, 10, 8, 9, 4, 6, 0);
+ verifyAndCloseCache();
+ }
+
+ @Test
+ public void handlesUpdatesWhenBlockSizeDecreases() {
+ BTreePersistentIndexedCache> cache =
+ new BTreePersistentIndexedCache>(
+ tmpDirFile("listcache.bin"), stringSerializer,
+ new DefaultSerializer>(), (short) 4, 100);
+
+ List values = Arrays.asList(3, 2, 11, 5, 7, 1, 10, 8, 9, 4, 6, 0);
+ Map> updated = new LinkedHashMap>();
+
+ for (int i = 10; i > 0; i--) {
+ for (Integer value : values) {
+ String key = String.format("key_%d", value);
+ List newValue = new ArrayList(i);
+ for (int j = 0; j < i * 2; j++) {
+ newValue.add(j);
+ }
+ cache.put(key, newValue);
+ updated.put(value, newValue);
+ }
+
+ checkListEntries(cache, updated);
+ }
+
+ cache.reset();
+
+ checkListEntries(cache, updated);
+
+ cache.verify();
+ cache.close();
+ }
+
+ private void checkListEntries(BTreePersistentIndexedCache> cache, Map> updated) {
+ for (Map.Entry> entry : updated.entrySet()) {
+ String key = String.format("key_%d", entry.getKey());
+ assertThat(cache.get(key), equalTo(entry.getValue()));
+ }
+ }
+
+ @Test
+ public void handlesUpdatesWhenBlockSizeIncreases() {
+ BTreePersistentIndexedCache> cache =
+ new BTreePersistentIndexedCache>(
+ tmpDirFile("listcache.bin"), stringSerializer,
+ new DefaultSerializer>(), (short) 4, 100);
+
+ List values = Arrays.asList(3, 2, 11, 5, 7, 1, 10, 8, 9, 4, 6, 0);
+ Map> updated = new LinkedHashMap>();
+
+ for (int i = 1; i < 10; i++) {
+ for (Integer value : values) {
+ String key = String.format("key_%d", value);
+ List newValue = new ArrayList(i);
+ for (int j = 0; j < i * 2; j++) {
+ newValue.add(j);
+ }
+ cache.put(key, newValue);
+ updated.put(value, newValue);
+ }
+
+ checkListEntries(cache, updated);
+ }
+
+ cache.reset();
+
+ checkListEntries(cache, updated);
+
+ cache.verify();
+ cache.close();
+ }
+
+ @Test
+ public void persistsAddedEntriesAfterReopen() {
+ createCache();
+
+ checkAdds(1, 2, 3, 4);
+
+ cache.reset();
+
+ checkAdds(5, 6, 7, 8);
+ verifyAndCloseCache();
+ }
+
+ @Test
+ public void persistsReplacedEntries() {
+ createCache();
+
+ cache.put("key_1", 1);
+ cache.put("key_2", 2);
+ cache.put("key_3", 3);
+ cache.put("key_4", 4);
+ cache.put("key_5", 5);
+
+ cache.put("key_1", 1);
+ cache.put("key_4", 12);
+
+ assertThat(cache.get("key_1"), equalTo(1));
+ assertThat(cache.get("key_2"), equalTo(2));
+ assertThat(cache.get("key_3"), equalTo(3));
+ assertThat(cache.get("key_4"), equalTo(12));
+ assertThat(cache.get("key_5"), equalTo(5));
+
+ cache.reset();
+
+ assertThat(cache.get("key_1"), equalTo(1));
+ assertThat(cache.get("key_2"), equalTo(2));
+ assertThat(cache.get("key_3"), equalTo(3));
+ assertThat(cache.get("key_4"), equalTo(12));
+ assertThat(cache.get("key_5"), equalTo(5));
+
+ verifyAndCloseCache();
+ }
+
+ @Test
+ public void reusesEmptySpaceWhenPuttingEntries() {
+ BTreePersistentIndexedCache cache = new BTreePersistentIndexedCache(cacheFile, stringSerializer, stringSerializer, (short) 4, 100);
+
+ long beforeLen = cacheFile.length();
+ if (beforeLen>0){
+ System.out.println(String.format("cache %s: %s", "key_new", cache.get("key_new")));
+ }
+
+ cache.put("key_1", "abcd");
+ cache.put("key_2", "abcd");
+ cache.put("key_3", "abcd");
+ cache.put("key_4", "abcd");
+ cache.put("key_5", "abcd");
+
+ long len = cacheFile.length();
+ assertTrue(len > 0L);
+
+ System.out.println(String.format("cache file size %d => %d", beforeLen, len));
+
+ cache.put("key_1", "1234");
+ assertThat(cacheFile.length(), equalTo(len));
+
+ cache.remove("key_1");
+ cache.put("key_new", "a1b2");
+ assertThat(cacheFile.length(), equalTo(len));
+
+ cache.put("key_new", "longer value assertThat(cacheFile.length(), equalTo(len))");
+ System.out.println(String.format("cache file size %d beforeLen %d", cacheFile.length(), len));
+ // assertTrue(cacheFile.length() > len);
+ len = cacheFile.length();
+
+ cache.put("key_1", "1234");
+ assertThat(cacheFile.length(), equalTo(len));
+
+ cache.close();
+ }
+
+ @Test
+ public void canHandleLargeNumberOfEntries() {
+ createCache();
+ int count = 2000;
+ List values = new ArrayList();
+ for (int i = 0; i < count; i++) {
+ values.add(i);
+ }
+
+ checkAddsAndRemoves(null, values);
+
+ long len = cacheFile.length();
+
+ checkAddsAndRemoves(Collections.reverseOrder(), values);
+
+ // need to make this better
+ assertTrue(cacheFile.length() < (long)(1.4 * len));
+
+ checkAdds(values);
+
+ // need to make this better
+ assertTrue(cacheFile.length() < (long) (1.4 * 1.4 * len));
+
+ cache.close();
+ }
+
+ @Test
+ public void persistsRemovalOfEntries() {
+ createCache();
+ checkAddsAndRemoves(1, 2, 3, 4, 5);
+ verifyAndCloseCache();
+ }
+
+ @Test
+ public void persistsRemovalOfEntriesInReverse() {
+ createCache();
+ checkAddsAndRemoves(Collections.reverseOrder(), 1, 2, 3, 4, 5);
+ verifyAndCloseCache();
+ }
+
+ @Test
+ public void persistsRemovalOfEntriesOverMultipleIndexBlocks() {
+ createCache();
+ checkAddsAndRemoves(4, 12, 9, 1, 3, 10, 11, 7, 8, 2, 5, 6);
+ verifyAndCloseCache();
+ }
+
+ @Test
+ public void removalRedistributesRemainingEntriesWithLeftSibling() {
+ createCache();
+ // Ends up with: 1 2 3 -> 4 <- 5 6
+ checkAdds(1, 2, 5, 6, 4, 3);
+ cache.verify();
+ cache.remove("key_5");
+ verifyAndCloseCache();
+ }
+
+ @Test
+ public void removalMergesRemainingEntriesIntoLeftSibling() {
+ createCache();
+ // Ends up with: 1 2 -> 3 <- 4 5
+ checkAdds(1, 2, 4, 5, 3);
+ cache.verify();
+ cache.remove("key_4");
+ verifyAndCloseCache();
+ }
+
+ @Test
+ public void removalRedistributesRemainingEntriesWithRightSibling() {
+ createCache();
+ // Ends up with: 1 2 -> 3 <- 4 5 6
+ checkAdds(1, 2, 4, 5, 3, 6);
+ cache.verify();
+ cache.remove("key_2");
+ verifyAndCloseCache();
+ }
+
+ @Test
+ public void removalMergesRemainingEntriesIntoRightSibling() {
+ createCache();
+ // Ends up with: 1 2 -> 3 <- 4 5
+ checkAdds(1, 2, 4, 5, 3);
+ cache.verify();
+ cache.remove("key_2");
+ verifyAndCloseCache();
+ }
+
+ @Test
+ public void handlesOpeningATruncatedCacheFile() throws IOException {
+ BTreePersistentIndexedCache cache = new BTreePersistentIndexedCache(cacheFile, stringSerializer, integerSerializer);
+
+ assertNull(cache.get("key_1"));
+ cache.put("key_1", 99);
+
+ RandomAccessFile file = new RandomAccessFile(cacheFile, "rw");
+ file.setLength(file.length() - 10);
+ file.close();
+
+ cache.reset();
+
+ assertNull(cache.get("key_1"));
+ cache.verify();
+
+ cache.close();
+ }
+
+ @Test
+ public void canUseFileAsKey() {
+ BTreePersistentIndexedCache cache = new BTreePersistentIndexedCache(cacheFile, new DefaultSerializer(), integerSerializer);
+
+ cache.put(new File("file"), 1);
+ cache.put(new File("dir/file"), 2);
+ cache.put(new File("File"), 3);
+
+ assertThat(cache.get(new File("file")), equalTo(1));
+ assertThat(cache.get(new File("dir/file")), equalTo(2));
+ assertThat(cache.get(new File("File")), equalTo(3));
+
+ cache.close();
+ }
+
+ @Test
+ public void handlesKeysWithSameHashCode() {
+ createCache();
+
+ String key1 = new String(new byte[]{2, 31});
+ String key2 = new String(new byte[]{1, 62});
+ cache.put(key1, 1);
+ cache.put(key2, 2);
+
+ assertThat(cache.get(key1), equalTo(1));
+ assertThat(cache.get(key2), equalTo(2));
+
+ cache.close();
+ }
+
+ private void checkAdds(Integer... values) {
+ checkAdds(Arrays.asList(values));
+ }
+
+ private Map checkAdds(Iterable values) {
+ Map added = new LinkedHashMap();
+
+ for (Integer value : values) {
+ String key = String.format("key_%d", value);
+ cache.put(key, value);
+ added.put(String.format("key_%d", value), value);
+ }
+
+ for (Map.Entry entry : added.entrySet()) {
+ assertThat(cache.get(entry.getKey()), equalTo(entry.getValue()));
+ }
+
+ cache.reset();
+
+ for (Map.Entry entry : added.entrySet()) {
+ assertThat(cache.get(entry.getKey()), equalTo(entry.getValue()));
+ }
+
+ return added;
+ }
+
+ private void checkUpdates(Integer... values) {
+ checkUpdates(Arrays.asList(values));
+ }
+
+ private Map checkUpdates(Iterable values) {
+ Map updated = new LinkedHashMap();
+
+ for (int i = 0; i < 10; i++) {
+ for (Integer value : values) {
+ String key = String.format("key_%d", value);
+ int newValue = value + (i * 100);
+ cache.put(key, newValue);
+ updated.put(value, newValue);
+ }
+
+ for (Map.Entry entry : updated.entrySet()) {
+ String key = String.format("key_%d", entry.getKey());
+ assertThat(cache.get(key), equalTo(entry.getValue()));
+ }
+ }
+
+ cache.reset();
+
+ for (Map.Entry entry : updated.entrySet()) {
+ String key = String.format("key_%d", entry.getKey());
+ assertThat(cache.get(key), equalTo(entry.getValue()));
+ }
+
+ return updated;
+ }
+
+ private void checkAddsAndRemoves(Integer... values) {
+ checkAddsAndRemoves(null, values);
+ }
+
+ private void checkAddsAndRemoves(Comparator comparator, Integer... values) {
+ checkAddsAndRemoves(comparator, Arrays.asList(values));
+ }
+
+ private void checkAddsAndRemoves(Comparator comparator, Collection values) {
+ checkAdds(values);
+
+ List deleteValues = new ArrayList(values);
+ Collections.sort(deleteValues, comparator);
+ for (Integer value : deleteValues) {
+ String key = String.format("key_%d", value);
+ assertThat(cache.get(key), notNullValue());
+ cache.remove(key);
+ assertThat(cache.get(key), nullValue());
+ }
+
+ cache.reset();
+ cache.verify();
+
+ for (Integer value : deleteValues) {
+ String key = String.format("key_%d", value);
+ assertThat(cache.get(key), nullValue());
+ }
+ }
+
+}
diff --git a/test/random_access/src/test/java/seaweedfs/file/MmapFileTest.java b/test/random_access/src/test/java/seaweedfs/file/MmapFileTest.java
new file mode 100644
index 000000000..1d741ee2f
--- /dev/null
+++ b/test/random_access/src/test/java/seaweedfs/file/MmapFileTest.java
@@ -0,0 +1,143 @@
+package seaweedfs.file;
+
+import org.junit.Test;
+
+import java.io.File;
+import java.io.FileOutputStream;
+import java.io.IOException;
+import java.io.RandomAccessFile;
+import java.nio.MappedByteBuffer;
+import java.nio.channels.FileChannel;
+
+public class MmapFileTest {
+
+ static File dir = new File("/Users/chris/tmp/mm/dev");
+
+ @Test
+ public void testMmap() {
+ try {
+ System.out.println("starting ...");
+
+ File f = new File(dir, "mmap_file.txt");
+ RandomAccessFile raf = new RandomAccessFile(f, "rw");
+ FileChannel fc = raf.getChannel();
+ MappedByteBuffer mbf = fc.map(FileChannel.MapMode.READ_ONLY, 0, fc.size());
+ fc.close();
+ raf.close();
+
+ FileOutputStream fos = new FileOutputStream(f);
+ fos.write("abcdefg".getBytes());
+ fos.close();
+ System.out.println("completed!");
+ } catch (Exception e) {
+ e.printStackTrace();
+ }
+ }
+
+ @Test
+ public void testBigMmap() throws IOException {
+ /*
+
+// new file
+I0817 09:48:02 25175 dir.go:147] create /dev/mmap_big.txt: OpenReadWrite+OpenCreate
+I0817 09:48:02 25175 wfs.go:116] AcquireHandle /dev/mmap_big.txt uid=502 gid=20
+I0817 09:48:02 25175 file.go:62] file Attr /dev/mmap_big.txt, open:1, size: 0
+I0817 09:48:02 25175 meta_cache_subscribe.go:32] creating /dev/mmap_big.txt
+
+//get channel
+I0817 09:48:26 25175 file.go:62] file Attr /dev/mmap_big.txt, open:1, size: 0
+
+I0817 09:48:32 25175 file.go:62] file Attr /dev/mmap_big.txt, open:1, size: 0
+I0817 09:48:32 25175 wfs.go:116] AcquireHandle /dev/mmap_big.txt uid=0 gid=0
+I0817 09:48:32 25175 filehandle.go:160] Release /dev/mmap_big.txt fh 14968871991130164560
+
+//fileChannel.map
+I0817 09:49:18 25175 file.go:62] file Attr /dev/mmap_big.txt, open:1, size: 0
+I0817 09:49:18 25175 file.go:112] /dev/mmap_big.txt file setattr set size=262144 chunks=0
+I0817 09:49:18 25175 file.go:62] file Attr /dev/mmap_big.txt, open:1, size: 262144
+I0817 09:49:18 25175 file.go:62] file Attr /dev/mmap_big.txt, open:1, size: 262144
+I0817 09:49:18 25175 file.go:62] file Attr /dev/mmap_big.txt, open:1, size: 262144
+
+// buffer.put
+I0817 09:49:49 25175 filehandle.go:57] /dev/mmap_big.txt read fh 14968871991130164560: [0,32768) size 32768 resp.Data len=0 cap=32768
+I0817 09:49:49 25175 reader_at.go:113] zero2 [0,32768)
+I0817 09:49:50 25175 file.go:62] file Attr /dev/mmap_big.txt, open:1, size: 262144
+
+I0817 09:49:53 25175 file.go:233] /dev/mmap_big.txt fsync file Fsync [ID=0x4 Node=0xe Uid=0 Gid=0 Pid=0] Handle 0x2 Flags 1
+
+//close
+I0817 09:50:14 25175 file.go:62] file Attr /dev/mmap_big.txt, open:1, size: 262144
+I0817 09:50:14 25175 dirty_page.go:130] saveToStorage /dev/mmap_big.txt 1,315b69812039e5 [0,4096) of 262144 bytes
+I0817 09:50:14 25175 file.go:274] /dev/mmap_big.txt existing 0 chunks adds 1 more
+I0817 09:50:14 25175 filehandle.go:218] /dev/mmap_big.txt set chunks: 1
+I0817 09:50:14 25175 filehandle.go:220] /dev/mmap_big.txt chunks 0: 1,315b69812039e5 [0,4096)
+I0817 09:50:14 25175 meta_cache_subscribe.go:23] deleting /dev/mmap_big.txt
+I0817 09:50:14 25175 meta_cache_subscribe.go:32] creating /dev/mmap_big.txt
+
+// end of test
+I0817 09:50:41 25175 file.go:62] file Attr /dev/mmap_big.txt, open:1, size: 262144
+I0817 09:50:41 25175 filehandle.go:160] Release /dev/mmap_big.txt fh 14968871991130164560
+
+ */
+ // Create file object
+ File file = new File(dir, "mmap_big.txt");
+
+ try (RandomAccessFile randomAccessFile = new RandomAccessFile(file, "rw")) {
+ // Get file channel in read-write mode
+ FileChannel fileChannel = randomAccessFile.getChannel();
+
+ // Get direct byte buffer access using channel.map() operation
+ MappedByteBuffer buffer = fileChannel.map(FileChannel.MapMode.READ_WRITE, 0, 4096 * 8 * 8);
+
+ //Write the content using put methods
+ buffer.put("howtodoinjava.com".getBytes());
+ }
+
+/*
+> meta.cat /dev/mmap_big.txt
+{
+ "name": "mmap_big.txt",
+ "isDirectory": false,
+ "chunks": [
+ {
+ "fileId": "1,315b69812039e5",
+ "offset": "0",
+ "size": "4096",
+ "mtime": "1597683014026365000",
+ "eTag": "985ab0ac",
+ "sourceFileId": "",
+ "fid": {
+ "volumeId": 1,
+ "fileKey": "3234665",
+ "cookie": 2166372837
+ },
+ "sourceFid": null,
+ "cipherKey": null,
+ "isCompressed": true,
+ "isChunkManifest": false
+ }
+ ],
+ "attributes": {
+ "fileSize": "262144",
+ "mtime": "1597683014",
+ "fileMode": 420,
+ "uid": 502,
+ "gid": 20,
+ "crtime": "1597682882",
+ "mime": "application/octet-stream",
+ "replication": "",
+ "collection": "",
+ "ttlSec": 0,
+ "userName": "",
+ "groupName": [
+ ],
+ "symlinkTarget": "",
+ "md5": null
+ },
+ "extended": {
+ }
+}
+ */
+
+ }
+}
diff --git a/test/random_access/src/test/java/seaweedfs/file/RandomeAccessFileTest.java b/test/random_access/src/test/java/seaweedfs/file/RandomeAccessFileTest.java
new file mode 100644
index 000000000..cb5847567
--- /dev/null
+++ b/test/random_access/src/test/java/seaweedfs/file/RandomeAccessFileTest.java
@@ -0,0 +1,70 @@
+package seaweedfs.file;
+
+import org.junit.Assert;
+import org.junit.Test;
+
+import java.io.File;
+import java.io.IOException;
+import java.io.RandomAccessFile;
+import java.nio.ByteBuffer;
+import java.util.Random;
+
+public class RandomeAccessFileTest {
+
+ @Test
+ public void testRandomWriteAndRead() throws IOException {
+
+ File f = new File(MmapFileTest.dir, "mmap_file.txt");
+
+ RandomAccessFile af = new RandomAccessFile(f, "rw");
+ af.setLength(0);
+ af.close();
+
+ Random r = new Random();
+
+ int maxLength = 5000;
+
+ byte[] data = new byte[maxLength];
+ byte[] readData = new byte[maxLength];
+
+ for (int i = 4096; i < maxLength; i++) {
+
+ RandomAccessFile raf = new RandomAccessFile(f, "rw");
+ long fileSize = raf.length();
+
+ raf.readFully(readData, 0, (int)fileSize);
+
+ for (int x=0;x stop) {
+ int t = stop;
+ stop = start;
+ start = t;
+ }
+ if (stop > fileSize) {
+ fileSize = stop;
+ raf.setLength(fileSize);
+ }
+
+ randomize(r, data, start, stop);
+ raf.seek(start);
+ raf.write(data, start, stop-start);
+
+ raf.close();
+ }
+
+ }
+
+ private static void randomize(Random r, byte[] bytes, int start, int stop) {
+ for (int i = start; i < stop; i++) {
+ int rnd = r.nextInt();
+ bytes[i] = (byte) rnd;
+ }
+ }
+
+
+}
diff --git a/test/s3/basic/basic_test.go b/test/s3/basic/basic_test.go
new file mode 100644
index 000000000..653fa1237
--- /dev/null
+++ b/test/s3/basic/basic_test.go
@@ -0,0 +1,226 @@
+package basic
+
+import (
+ "fmt"
+ "github.com/aws/aws-sdk-go/aws"
+ "github.com/aws/aws-sdk-go/aws/awserr"
+ "github.com/aws/aws-sdk-go/aws/session"
+ "github.com/aws/aws-sdk-go/service/s3"
+ "io/ioutil"
+ "os"
+ "strings"
+ "testing"
+)
+
+var (
+ svc *s3.S3
+)
+
+func init() {
+ // Initialize a session in us-west-2 that the SDK will use to load
+ // credentials from the shared credentials file ~/.aws/credentials.
+ sess, err := session.NewSession(&aws.Config{
+ Region: aws.String("us-west-2"),
+ Endpoint: aws.String("localhost:8333"),
+ DisableSSL: aws.Bool(true),
+ })
+ if err != nil {
+ exitErrorf("create session, %v", err)
+ }
+
+ // Create S3 service client
+ svc = s3.New(sess)
+}
+
+func TestCreateBucket(t *testing.T) {
+
+ input := &s3.CreateBucketInput{
+ Bucket: aws.String("theBucket"),
+ }
+
+ result, err := svc.CreateBucket(input)
+ if err != nil {
+ if aerr, ok := err.(awserr.Error); ok {
+ switch aerr.Code() {
+ case s3.ErrCodeBucketAlreadyExists:
+ fmt.Println(s3.ErrCodeBucketAlreadyExists, aerr.Error())
+ case s3.ErrCodeBucketAlreadyOwnedByYou:
+ fmt.Println(s3.ErrCodeBucketAlreadyOwnedByYou, aerr.Error())
+ default:
+ fmt.Println(aerr.Error())
+ }
+ } else {
+ // Print the error, cast err to awserr.Error to get the Code and
+ // Message from an error.
+ fmt.Println(err.Error())
+ }
+ return
+ }
+
+ fmt.Println(result)
+
+}
+
+func TestPutObject(t *testing.T) {
+
+ input := &s3.PutObjectInput{
+ ACL: aws.String("authenticated-read"),
+ Body: aws.ReadSeekCloser(strings.NewReader("filetoupload")),
+ Bucket: aws.String("theBucket"),
+ Key: aws.String("exampleobject"),
+ }
+
+ result, err := svc.PutObject(input)
+ if err != nil {
+ if aerr, ok := err.(awserr.Error); ok {
+ switch aerr.Code() {
+ default:
+ fmt.Println(aerr.Error())
+ }
+ } else {
+ // Print the error, cast err to awserr.Error to get the Code and
+ // Message from an error.
+ fmt.Println(err.Error())
+ }
+ return
+ }
+
+ fmt.Println(result)
+
+}
+
+func TestListBucket(t *testing.T) {
+
+ result, err := svc.ListBuckets(nil)
+ if err != nil {
+ exitErrorf("Unable to list buckets, %v", err)
+ }
+
+ fmt.Println("Buckets:")
+
+ for _, b := range result.Buckets {
+ fmt.Printf("* %s created on %s\n",
+ aws.StringValue(b.Name), aws.TimeValue(b.CreationDate))
+ }
+
+}
+
+func TestListObjectV2(t *testing.T) {
+
+ listObj, err := svc.ListObjectsV2(&s3.ListObjectsV2Input{
+ Bucket: aws.String(Bucket),
+ Prefix: aws.String("foo"),
+ Delimiter: aws.String("/"),
+ })
+ if err != nil {
+ exitErrorf("Unable to list objects, %v", err)
+ }
+ for _, content := range listObj.Contents {
+ fmt.Println(aws.StringValue(content.Key))
+ }
+ fmt.Printf("list: %s\n", listObj)
+
+}
+
+func exitErrorf(msg string, args ...interface{}) {
+ fmt.Fprintf(os.Stderr, msg+"\n", args...)
+ os.Exit(1)
+}
+
+const (
+ Bucket = "theBucket"
+ object = "foo/bar"
+ Data = ""
+)
+
+func TestObjectOp(t *testing.T) {
+ _, err := svc.CreateBucket(&s3.CreateBucketInput{
+ Bucket: aws.String(Bucket),
+ })
+ if err != nil {
+ exitErrorf("Unable to create bucket, %v", err)
+ }
+
+ _, err = svc.PutObject(&s3.PutObjectInput{
+ Bucket: aws.String(Bucket),
+ Key: aws.String(object),
+ Body: strings.NewReader(Data),
+ })
+ if err != nil {
+ exitErrorf("Unable to put object, %v", err)
+ }
+
+ dest := fmt.Sprintf("%s_bak", object)
+ copyObj, err := svc.CopyObject(&s3.CopyObjectInput{
+ Bucket: aws.String(Bucket),
+ CopySource: aws.String(fmt.Sprintf("%s/%s", Bucket, object)),
+ Key: aws.String(dest),
+ })
+ if err != nil {
+ exitErrorf("Unable to copy object, %v", err)
+ }
+ t.Log("copy object result -> ", copyObj.CopyObjectResult)
+
+ getObj, err := svc.GetObject(&s3.GetObjectInput{
+ Bucket: aws.String(Bucket),
+ Key: aws.String(dest),
+ })
+ if err != nil {
+ exitErrorf("Unable to get copy object, %v", err)
+ }
+
+ data, err := ioutil.ReadAll(getObj.Body)
+ if err != nil {
+ exitErrorf("Unable to read object data, %v", err)
+ }
+ if string(data) != Data {
+ t.Error("object data -> ", string(data))
+ }
+
+ listObj, err := svc.ListObjectsV2(&s3.ListObjectsV2Input{
+ Bucket: aws.String(Bucket),
+ Prefix: aws.String("foo/"),
+ })
+ if err != nil {
+ exitErrorf("Unable to list objects, %v", err)
+ }
+ count := 0
+ for _, content := range listObj.Contents {
+ key := aws.StringValue(content.Key)
+ if key == dest {
+ count++
+ } else if key == object {
+ count++
+ }
+ if count == 2 {
+ break
+ }
+ }
+ if count != 2 {
+ exitErrorf("Unable to find two objects, %v", listObj.Contents)
+ }
+
+ _, err = svc.DeleteObject(&s3.DeleteObjectInput{
+ Bucket: aws.String(Bucket),
+ Key: aws.String(object),
+ })
+ if err != nil {
+ exitErrorf("Unable to delete source object, %v", err)
+ }
+
+ _, err = svc.DeleteObject(&s3.DeleteObjectInput{
+ Bucket: aws.String(Bucket),
+ Key: aws.String(dest),
+ })
+ if err != nil {
+ exitErrorf("Unable to delete object, %v", err)
+ }
+
+ _, err = svc.DeleteBucket(&s3.DeleteBucketInput{
+ Bucket: aws.String(Bucket),
+ })
+
+ if err != nil {
+ exitErrorf("Unable to delete bucket, %v", err)
+ }
+}
diff --git a/test/s3/basic/object_tagging_test.go b/test/s3/basic/object_tagging_test.go
new file mode 100644
index 000000000..2b9b7e5aa
--- /dev/null
+++ b/test/s3/basic/object_tagging_test.go
@@ -0,0 +1,82 @@
+package basic
+
+import (
+ "fmt"
+ "github.com/aws/aws-sdk-go/aws"
+ "github.com/aws/aws-sdk-go/service/s3"
+ "testing"
+)
+
+func TestObjectTagging(t *testing.T) {
+
+ input := &s3.PutObjectInput{
+ Bucket: aws.String("theBucket"),
+ Key: aws.String("testDir/testObject"),
+ }
+
+ svc.PutObject(input)
+
+ printTags()
+
+ setTags()
+
+ printTags()
+
+ clearTags()
+
+ printTags()
+
+}
+
+func printTags() {
+ response, err := svc.GetObjectTagging(
+ &s3.GetObjectTaggingInput{
+ Bucket: aws.String("theBucket"),
+ Key: aws.String("testDir/testObject"),
+ })
+
+ fmt.Println("printTags")
+ if err != nil {
+ fmt.Println(err.Error())
+ }
+
+ fmt.Println(response.TagSet)
+}
+
+func setTags() {
+
+ response, err := svc.PutObjectTagging(&s3.PutObjectTaggingInput{
+ Bucket: aws.String("theBucket"),
+ Key: aws.String("testDir/testObject"),
+ Tagging: &s3.Tagging{
+ TagSet: []*s3.Tag{
+ {
+ Key: aws.String("kye2"),
+ Value: aws.String("value2"),
+ },
+ },
+ },
+ })
+
+ fmt.Println("setTags")
+ if err != nil {
+ fmt.Println(err.Error())
+ }
+
+ fmt.Println(response.String())
+}
+
+func clearTags() {
+
+ response, err := svc.DeleteObjectTagging(&s3.DeleteObjectTaggingInput{
+ Bucket: aws.String("theBucket"),
+ Key: aws.String("testDir/testObject"),
+ })
+
+ fmt.Println("clearTags")
+ if err != nil {
+ fmt.Println(err.Error())
+ }
+
+ fmt.Println(response.String())
+}
diff --git a/test/s3/multipart/aws_upload.go b/test/s3/multipart/aws_upload.go
new file mode 100644
index 000000000..8c15cf6ed
--- /dev/null
+++ b/test/s3/multipart/aws_upload.go
@@ -0,0 +1,175 @@
+package main
+
+// copied from https://github.com/apoorvam/aws-s3-multipart-upload
+
+import (
+ "bytes"
+ "flag"
+ "fmt"
+ "net/http"
+ "os"
+
+ "github.com/aws/aws-sdk-go/aws"
+ "github.com/aws/aws-sdk-go/aws/awserr"
+ "github.com/aws/aws-sdk-go/aws/credentials"
+ "github.com/aws/aws-sdk-go/aws/session"
+ "github.com/aws/aws-sdk-go/service/s3"
+)
+
+const (
+ maxPartSize = int64(5 * 1024 * 1024)
+ maxRetries = 3
+ awsAccessKeyID = "Your access key"
+ awsSecretAccessKey = "Your secret key"
+ awsBucketRegion = "S3 bucket region"
+ awsBucketName = "newBucket"
+)
+
+var (
+ filename = flag.String("f", "", "the file name")
+)
+
+func main() {
+ flag.Parse()
+
+ creds := credentials.NewStaticCredentials(awsAccessKeyID, awsSecretAccessKey, "")
+ _, err := creds.Get()
+ if err != nil {
+ fmt.Printf("bad credentials: %s", err)
+ }
+ cfg := aws.NewConfig().WithRegion(awsBucketRegion).WithCredentials(creds).WithDisableSSL(true).WithEndpoint("localhost:8333")
+ svc := s3.New(session.New(), cfg)
+
+ file, err := os.Open(*filename)
+ if err != nil {
+ fmt.Printf("err opening file: %s", err)
+ return
+ }
+ defer file.Close()
+ fileInfo, _ := file.Stat()
+ size := fileInfo.Size()
+ buffer := make([]byte, size)
+ fileType := http.DetectContentType(buffer)
+ file.Read(buffer)
+
+ path := "/media/" + file.Name()
+ input := &s3.CreateMultipartUploadInput{
+ Bucket: aws.String(awsBucketName),
+ Key: aws.String(path),
+ ContentType: aws.String(fileType),
+ }
+
+ resp, err := svc.CreateMultipartUpload(input)
+ if err != nil {
+ fmt.Println(err.Error())
+ return
+ }
+ fmt.Println("Created multipart upload request")
+
+ var curr, partLength int64
+ var remaining = size
+ var completedParts []*s3.CompletedPart
+ partNumber := 1
+ for curr = 0; remaining != 0; curr += partLength {
+ if remaining < maxPartSize {
+ partLength = remaining
+ } else {
+ partLength = maxPartSize
+ }
+ completedPart, err := uploadPart(svc, resp, buffer[curr:curr+partLength], partNumber)
+ if err != nil {
+ fmt.Println(err.Error())
+ err := abortMultipartUpload(svc, resp)
+ if err != nil {
+ fmt.Println(err.Error())
+ }
+ return
+ }
+ remaining -= partLength
+ partNumber++
+ completedParts = append(completedParts, completedPart)
+ }
+
+ // list parts
+ parts, err := svc.ListParts(&s3.ListPartsInput{
+ Bucket: input.Bucket,
+ Key: input.Key,
+ MaxParts: nil,
+ PartNumberMarker: nil,
+ RequestPayer: nil,
+ UploadId: resp.UploadId,
+ })
+ if err != nil {
+ fmt.Println(err.Error())
+ return
+ }
+ fmt.Printf("list parts: %d\n", len(parts.Parts))
+ for i, part := range parts.Parts {
+ fmt.Printf("part %d: %v\n", i, part)
+ }
+
+
+ completeResponse, err := completeMultipartUpload(svc, resp, completedParts)
+ if err != nil {
+ fmt.Println(err.Error())
+ return
+ }
+
+ fmt.Printf("Successfully uploaded file: %s\n", completeResponse.String())
+}
+
+func completeMultipartUpload(svc *s3.S3, resp *s3.CreateMultipartUploadOutput, completedParts []*s3.CompletedPart) (*s3.CompleteMultipartUploadOutput, error) {
+ completeInput := &s3.CompleteMultipartUploadInput{
+ Bucket: resp.Bucket,
+ Key: resp.Key,
+ UploadId: resp.UploadId,
+ MultipartUpload: &s3.CompletedMultipartUpload{
+ Parts: completedParts,
+ },
+ }
+ return svc.CompleteMultipartUpload(completeInput)
+}
+
+func uploadPart(svc *s3.S3, resp *s3.CreateMultipartUploadOutput, fileBytes []byte, partNumber int) (*s3.CompletedPart, error) {
+ tryNum := 1
+ partInput := &s3.UploadPartInput{
+ Body: bytes.NewReader(fileBytes),
+ Bucket: resp.Bucket,
+ Key: resp.Key,
+ PartNumber: aws.Int64(int64(partNumber)),
+ UploadId: resp.UploadId,
+ ContentLength: aws.Int64(int64(len(fileBytes))),
+ }
+
+ for tryNum <= maxRetries {
+ uploadResult, err := svc.UploadPart(partInput)
+ if err != nil {
+ if tryNum == maxRetries {
+ if aerr, ok := err.(awserr.Error); ok {
+ return nil, aerr
+ }
+ return nil, err
+ }
+ fmt.Printf("Retrying to upload part #%v\n", partNumber)
+ tryNum++
+ } else {
+ fmt.Printf("Uploaded part #%v\n", partNumber)
+ return &s3.CompletedPart{
+ ETag: uploadResult.ETag,
+ PartNumber: aws.Int64(int64(partNumber)),
+ }, nil
+ }
+ }
+ return nil, nil
+}
+
+func abortMultipartUpload(svc *s3.S3, resp *s3.CreateMultipartUploadOutput) error {
+ fmt.Println("Aborting multipart upload for UploadId#" + *resp.UploadId)
+ abortInput := &s3.AbortMultipartUploadInput{
+ Bucket: resp.Bucket,
+ Key: resp.Key,
+ UploadId: resp.UploadId,
+ }
+ _, err := svc.AbortMultipartUpload(abortInput)
+ return err
+}
diff --git a/unmaintained/change_superblock/change_superblock.go b/unmaintained/change_superblock/change_superblock.go
index afe651c4e..56342a0cb 100644
--- a/unmaintained/change_superblock/change_superblock.go
+++ b/unmaintained/change_superblock/change_superblock.go
@@ -92,7 +92,7 @@ func main() {
header := superBlock.Bytes()
- if n, e := datFile.WriteAt(header, 0); n == 0 || e != nil {
+ if n, e := datBackend.WriteAt(header, 0); n == 0 || e != nil {
glog.Fatalf("cannot write super block: %v", e)
}
diff --git a/unmaintained/check_disk_size/check_disk_size.go b/unmaintained/check_disk_size/check_disk_size.go
new file mode 100644
index 000000000..4a8b92b88
--- /dev/null
+++ b/unmaintained/check_disk_size/check_disk_size.go
@@ -0,0 +1,42 @@
+package main
+
+import (
+ "flag"
+ "fmt"
+ "runtime"
+ "syscall"
+)
+
+var (
+ dir = flag.String("dir", ".", "the directory which uses a disk")
+)
+
+func main() {
+ flag.Parse()
+
+ fillInDiskStatus(*dir)
+
+ fmt.Printf("OS: %v\n", runtime.GOOS)
+ fmt.Printf("Arch: %v\n", runtime.GOARCH)
+
+}
+
+func fillInDiskStatus(dir string) {
+ fs := syscall.Statfs_t{}
+ err := syscall.Statfs(dir, &fs)
+ if err != nil {
+ fmt.Printf("failed to statfs on %s: %v\n", dir, err)
+ return
+ }
+ fmt.Printf("statfs: %+v\n", fs)
+ fmt.Println()
+
+ total := fs.Blocks * uint64(fs.Bsize)
+ free := fs.Bfree * uint64(fs.Bsize)
+ fmt.Printf("Total: %d blocks x %d block size = %d bytes\n", fs.Blocks, uint64(fs.Bsize), total)
+ fmt.Printf("Free : %d blocks x %d block size = %d bytes\n", fs.Bfree, uint64(fs.Bsize), free)
+ fmt.Printf("Used : %d blocks x %d block size = %d bytes\n", fs.Blocks-fs.Bfree, uint64(fs.Bsize), total-free)
+ fmt.Printf("Free Percentage : %.2f%%\n", float32((float64(free)/float64(total))*100))
+ fmt.Printf("Used Percentage : %.2f%%\n", float32((float64(total-free)/float64(total))*100))
+ return
+}
diff --git a/unmaintained/compact_leveldb/compact_leveldb.go b/unmaintained/compact_leveldb/compact_leveldb.go
index 317356c3f..9be5697de 100644
--- a/unmaintained/compact_leveldb/compact_leveldb.go
+++ b/unmaintained/compact_leveldb/compact_leveldb.go
@@ -5,6 +5,7 @@ import (
"log"
"github.com/syndtr/goleveldb/leveldb"
+ "github.com/syndtr/goleveldb/leveldb/errors"
"github.com/syndtr/goleveldb/leveldb/opt"
"github.com/syndtr/goleveldb/leveldb/util"
)
@@ -25,6 +26,9 @@ func main() {
}
db, err := leveldb.OpenFile(*dir, opts)
+ if errors.IsCorrupted(err) {
+ db, err = leveldb.RecoverFile(*dir, opts)
+ }
if err != nil {
log.Fatal(err)
}
diff --git a/unmaintained/diff_volume_servers/diff_volume_servers.go b/unmaintained/diff_volume_servers/diff_volume_servers.go
new file mode 100644
index 000000000..27a537617
--- /dev/null
+++ b/unmaintained/diff_volume_servers/diff_volume_servers.go
@@ -0,0 +1,196 @@
+package main
+
+import (
+ "bytes"
+ "context"
+ "errors"
+ "flag"
+ "fmt"
+ "io"
+ "math"
+ "os"
+ "strings"
+
+ "github.com/chrislusf/seaweedfs/weed/glog"
+ "github.com/chrislusf/seaweedfs/weed/operation"
+ "github.com/chrislusf/seaweedfs/weed/pb/volume_server_pb"
+ "github.com/chrislusf/seaweedfs/weed/security"
+ "github.com/chrislusf/seaweedfs/weed/storage/idx"
+ "github.com/chrislusf/seaweedfs/weed/storage/needle"
+ "github.com/chrislusf/seaweedfs/weed/storage/types"
+ "github.com/chrislusf/seaweedfs/weed/util"
+ "google.golang.org/grpc"
+)
+
+var (
+ serversStr = flag.String("volumeServers", "", "comma-delimited list of volume servers to diff the volume against")
+ volumeId = flag.Int("volumeId", -1, "a volume id to diff from servers")
+ volumeCollection = flag.String("collection", "", "the volume collection name")
+ grpcDialOption grpc.DialOption
+)
+
+/*
+ Diff the volume's files across multiple volume servers.
+ diff_volume_servers -volumeServers 127.0.0.1:8080,127.0.0.1:8081 -volumeId 5
+
+ Example Output:
+ reference 127.0.0.1:8081
+ fileId volumeServer message
+ 5,01617c3f61 127.0.0.1:8080 wrongSize
+*/
+func main() {
+ flag.Parse()
+
+ util.LoadConfiguration("security", false)
+ grpcDialOption = security.LoadClientTLS(util.GetViper(), "grpc.client")
+
+ vid := uint32(*volumeId)
+ servers := strings.Split(*serversStr, ",")
+ if len(servers) < 2 {
+ glog.Fatalf("You must specify more than 1 server\n")
+ }
+ var referenceServer string
+ var maxOffset int64
+ allFiles := map[string]map[types.NeedleId]needleState{}
+ for _, addr := range servers {
+ files, offset, err := getVolumeFiles(vid, addr)
+ if err != nil {
+ glog.Fatalf("Failed to copy idx from volume server %s\n", err)
+ }
+ allFiles[addr] = files
+ if offset > maxOffset {
+ referenceServer = addr
+ }
+ }
+
+ same := true
+ fmt.Println("reference", referenceServer)
+ fmt.Println("fileId volumeServer message")
+ for nid, n := range allFiles[referenceServer] {
+ for addr, files := range allFiles {
+ if addr == referenceServer {
+ continue
+ }
+ var diffMsg string
+ n2, ok := files[nid]
+ if !ok {
+ if n.state == stateDeleted {
+ continue
+ }
+ diffMsg = "missing"
+ } else if n2.state != n.state {
+ switch n.state {
+ case stateDeleted:
+ diffMsg = "notDeleted"
+ case statePresent:
+ diffMsg = "deleted"
+ }
+ } else if n2.size != n.size {
+ diffMsg = "wrongSize"
+ } else {
+ continue
+ }
+ same = false
+
+ // fetch the needle details
+ var id string
+ var err error
+ if n.state == statePresent {
+ id, err = getNeedleFileId(vid, nid, referenceServer)
+ } else {
+ id, err = getNeedleFileId(vid, nid, addr)
+ }
+ if err != nil {
+ glog.Fatalf("Failed to get needle info %d from volume server %s\n", nid, err)
+ }
+ fmt.Println(id, addr, diffMsg)
+ }
+ }
+ if !same {
+ os.Exit(1)
+ }
+}
+
+const (
+ stateDeleted uint8 = 1
+ statePresent uint8 = 2
+)
+
+type needleState struct {
+ state uint8
+ size types.Size
+}
+
+func getVolumeFiles(v uint32, addr string) (map[types.NeedleId]needleState, int64, error) {
+ var idxFile *bytes.Reader
+ err := operation.WithVolumeServerClient(addr, grpcDialOption, func(vs volume_server_pb.VolumeServerClient) error {
+ ctx, cancel := context.WithCancel(context.Background())
+ defer cancel()
+ copyFileClient, err := vs.CopyFile(ctx, &volume_server_pb.CopyFileRequest{
+ VolumeId: v,
+ Ext: ".idx",
+ CompactionRevision: math.MaxUint32,
+ StopOffset: math.MaxInt64,
+ Collection: *volumeCollection,
+ })
+ if err != nil {
+ return err
+ }
+ var buf bytes.Buffer
+ for {
+ resp, err := copyFileClient.Recv()
+ if errors.Is(err, io.EOF) {
+ break
+ }
+ if err != nil {
+ return err
+ }
+ buf.Write(resp.FileContent)
+ }
+ idxFile = bytes.NewReader(buf.Bytes())
+ return nil
+ })
+ if err != nil {
+ return nil, 0, err
+ }
+
+ var maxOffset int64
+ files := map[types.NeedleId]needleState{}
+ err = idx.WalkIndexFile(idxFile, func(key types.NeedleId, offset types.Offset, size types.Size) error {
+ if offset.IsZero() || size.IsDeleted() {
+ files[key] = needleState{
+ state: stateDeleted,
+ size: size,
+ }
+ } else {
+ files[key] = needleState{
+ state: statePresent,
+ size: size,
+ }
+ }
+ if actual := offset.ToActualOffset(); actual > maxOffset {
+ maxOffset = actual
+ }
+ return nil
+ })
+ if err != nil {
+ return nil, 0, err
+ }
+ return files, maxOffset, nil
+}
+
+func getNeedleFileId(v uint32, nid types.NeedleId, addr string) (string, error) {
+ var id string
+ err := operation.WithVolumeServerClient(addr, grpcDialOption, func(vs volume_server_pb.VolumeServerClient) error {
+ resp, err := vs.VolumeNeedleStatus(context.Background(), &volume_server_pb.VolumeNeedleStatusRequest{
+ VolumeId: v,
+ NeedleId: uint64(nid),
+ })
+ if err != nil {
+ return err
+ }
+ id = needle.NewFileId(needle.VolumeId(v), resp.NeedleId, resp.Cookie).String()
+ return nil
+ })
+ return id, err
+}
diff --git a/unmaintained/fix_dat/fix_dat.go b/unmaintained/fix_dat/fix_dat.go
index d6110d870..70bce3bf9 100644
--- a/unmaintained/fix_dat/fix_dat.go
+++ b/unmaintained/fix_dat/fix_dat.go
@@ -98,7 +98,7 @@ func iterateEntries(datBackend backend.BackendStorageFile, idxFile *os.File, vis
// parse index file entry
key := util.BytesToUint64(bytes[0:8])
offsetFromIndex := util.BytesToUint32(bytes[8:12])
- sizeFromIndex := util.BytesToUint32(bytes[12:16])
+ sizeFromIndex := types.BytesToSize(bytes[12:16])
count, _ = idxFile.ReadAt(bytes, readerOffset)
readerOffset += int64(count)
@@ -123,7 +123,7 @@ func iterateEntries(datBackend backend.BackendStorageFile, idxFile *os.File, vis
}
}()
- if n.Size <= n.DataSize {
+ if n.Size <= types.Size(n.DataSize) {
continue
}
visitNeedle(n, offset)
diff --git a/unmaintained/repeated_vacuum/repeated_vacuum.go b/unmaintained/repeated_vacuum/repeated_vacuum.go
index 28bcabb9b..bff5becc1 100644
--- a/unmaintained/repeated_vacuum/repeated_vacuum.go
+++ b/unmaintained/repeated_vacuum/repeated_vacuum.go
@@ -1,51 +1,73 @@
package main
import (
- "bytes"
"flag"
"fmt"
"log"
"math/rand"
+ "time"
- "github.com/chrislusf/seaweedfs/weed/security"
- "github.com/spf13/viper"
+ "google.golang.org/grpc"
"github.com/chrislusf/seaweedfs/weed/operation"
+ "github.com/chrislusf/seaweedfs/weed/security"
"github.com/chrislusf/seaweedfs/weed/util"
)
var (
- master = flag.String("master", "127.0.0.1:9333", "the master server")
- repeat = flag.Int("n", 5, "repeat how many times")
+ master = flag.String("master", "127.0.0.1:9333", "the master server")
+ repeat = flag.Int("n", 5, "repeat how many times")
+ garbageThreshold = flag.Float64("garbageThreshold", 0.3, "garbageThreshold")
+ replication = flag.String("replication", "", "replication 000, 001, 002, etc")
)
func main() {
flag.Parse()
util.LoadConfiguration("security", false)
- grpcDialOption := security.LoadClientTLS(viper.Sub("grpc"), "client")
+ grpcDialOption := security.LoadClientTLS(util.GetViper(), "grpc.client")
- for i := 0; i < *repeat; i++ {
- assignResult, err := operation.Assign(*master, grpcDialOption, &operation.VolumeAssignRequest{Count: 1})
- if err != nil {
- log.Fatalf("assign: %v", err)
- }
+ genFile(grpcDialOption, 0)
- data := make([]byte, 1024)
- rand.Read(data)
- reader := bytes.NewReader(data)
+ go func() {
+ for {
+ println("vacuum threshold", *garbageThreshold)
+ _, _, err := util.Get(fmt.Sprintf("http://%s/vol/vacuum?garbageThreshold=%f", *master, *garbageThreshold))
+ if err != nil {
+ log.Fatalf("vacuum: %v", err)
+ }
+ time.Sleep(time.Second)
+ }
+ }()
- targetUrl := fmt.Sprintf("http://%s/%s", assignResult.Url, assignResult.Fid)
+ for i := 0; i < *repeat; i++ {
+ // create 2 files, and delete one of them
- _, err = operation.Upload(targetUrl, fmt.Sprintf("test%d", i), reader, false, "", nil, assignResult.Auth)
- if err != nil {
- log.Fatalf("upload: %v", err)
- }
+ assignResult, targetUrl := genFile(grpcDialOption, i)
util.Delete(targetUrl, string(assignResult.Auth))
- util.Get(fmt.Sprintf("http://%s/vol/vacuum", *master))
+ }
+
+}
+func genFile(grpcDialOption grpc.DialOption, i int) (*operation.AssignResult, string) {
+ assignResult, err := operation.Assign(*master, grpcDialOption, &operation.VolumeAssignRequest{
+ Count: 1,
+ Replication: *replication,
+ })
+ if err != nil {
+ log.Fatalf("assign: %v", err)
}
+ data := make([]byte, 1024)
+ rand.Read(data)
+
+ targetUrl := fmt.Sprintf("http://%s/%s", assignResult.Url, assignResult.Fid)
+
+ _, err = operation.UploadData(targetUrl, fmt.Sprintf("test%d", i), false, data, false, "bench/test", nil, assignResult.Auth)
+ if err != nil {
+ log.Fatalf("upload: %v", err)
+ }
+ return assignResult, targetUrl
}
diff --git a/unmaintained/s3/benchmark/hsbench.sh b/unmaintained/s3/benchmark/hsbench.sh
new file mode 100755
index 000000000..285b51405
--- /dev/null
+++ b/unmaintained/s3/benchmark/hsbench.sh
@@ -0,0 +1,3 @@
+#!/bin/bash
+
+hsbench -a accesstoken -s secret -z 4K -d 10 -t 10 -b 10 -u http://localhost:8333 -m "cxipgdx" -bp "hsbench-"
diff --git a/unmaintained/s3/presigned_put/presigned_put.go b/unmaintained/s3/presigned_put/presigned_put.go
new file mode 100644
index 000000000..e8368d124
--- /dev/null
+++ b/unmaintained/s3/presigned_put/presigned_put.go
@@ -0,0 +1,73 @@
+package main
+
+import (
+ "github.com/aws/aws-sdk-go/aws"
+ "github.com/aws/aws-sdk-go/aws/session"
+ "github.com/aws/aws-sdk-go/service/s3"
+ "encoding/base64"
+ "fmt"
+ "crypto/md5"
+ "strings"
+ "time"
+ "net/http"
+)
+
+// Downloads an item from an S3 Bucket in the region configured in the shared config
+// or AWS_REGION environment variable.
+//
+// Usage:
+// go run presigned_put.go
+// For this exampl to work, the domainName is needd
+// weed s3 -domainName=localhost
+func main() {
+ h := md5.New()
+ content := strings.NewReader(stringContent)
+ content.WriteTo(h)
+
+ // Initialize a session in us-west-2 that the SDK will use to load
+ // credentials from the shared credentials file ~/.aws/credentials.
+ sess, err := session.NewSession(&aws.Config{
+ Region: aws.String("us-east-1"),
+ Endpoint: aws.String("http://localhost:8333"),
+ })
+
+ // Create S3 service client
+ svc := s3.New(sess)
+
+ putRequest, output := svc.PutObjectRequest(&s3.PutObjectInput{
+ Bucket: aws.String("dev"),
+ Key: aws.String("testKey"),
+ })
+ fmt.Printf("output: %+v\n", output)
+
+ md5s := base64.StdEncoding.EncodeToString(h.Sum(nil))
+ putRequest.HTTPRequest.Header.Set("Content-MD5", md5s)
+
+ url, err := putRequest.Presign(15 * time.Minute)
+ if err != nil {
+ fmt.Println("error presigning request", err)
+ return
+ }
+
+ fmt.Println(url)
+
+ req, err := http.NewRequest("PUT", url, strings.NewReader(stringContent))
+ req.Header.Set("Content-MD5", md5s)
+ if err != nil {
+ fmt.Println("error creating request", url)
+ return
+ }
+
+ resp, err := http.DefaultClient.Do(req)
+ if err != nil {
+ fmt.Printf("error put request: %v\n", err)
+ return
+ }
+ fmt.Printf("response: %+v\n", resp)
+}
+
+var stringContent = `Generate a Pre-Signed URL for an Amazon S3 PUT Operation with a Specific Payload
+You can generate a pre-signed URL for a PUT operation that checks whether users upload the correct content. When the SDK pre-signs a request, it computes the checksum of the request body and generates an MD5 checksum that is included in the pre-signed URL. Users must upload the same content that produces the same MD5 checksum generated by the SDK; otherwise, the operation fails. This is not the Content-MD5, but the signature. To enforce Content-MD5, simply add the header to the request.
+
+The following example adds a Body field to generate a pre-signed PUT operation that requires a specific payload to be uploaded by users.
+`
\ No newline at end of file
diff --git a/unmaintained/see_dat/see_dat.go b/unmaintained/see_dat/see_dat.go
index efc58e751..17c494841 100644
--- a/unmaintained/see_dat/see_dat.go
+++ b/unmaintained/see_dat/see_dat.go
@@ -2,6 +2,7 @@ package main
import (
"flag"
+ "github.com/chrislusf/seaweedfs/weed/util"
"time"
"github.com/chrislusf/seaweedfs/weed/glog"
@@ -31,7 +32,8 @@ func (scanner *VolumeFileScanner4SeeDat) ReadNeedleBody() bool {
func (scanner *VolumeFileScanner4SeeDat) VisitNeedle(n *needle.Needle, offset int64, needleHeader, needleBody []byte) error {
t := time.Unix(int64(n.AppendAtNs)/int64(time.Second), int64(n.AppendAtNs)%int64(time.Second))
- glog.V(0).Infof("%d,%s%x offset %d size %d cookie %x appendedAt %v", *volumeId, n.Id, n.Cookie, offset, n.Size, n.Cookie, t)
+ glog.V(0).Infof("%d,%s%x offset %d size %d(%s) cookie %x appendedAt %v",
+ *volumeId, n.Id, n.Cookie, offset, n.Size, util.BytesToHumanReadable(uint64(n.Size)), n.Cookie, t)
return nil
}
diff --git a/unmaintained/see_idx/see_idx.go b/unmaintained/see_idx/see_idx.go
index 777af1821..22c659351 100644
--- a/unmaintained/see_idx/see_idx.go
+++ b/unmaintained/see_idx/see_idx.go
@@ -3,6 +3,7 @@ package main
import (
"flag"
"fmt"
+ "github.com/chrislusf/seaweedfs/weed/util"
"os"
"path"
"strconv"
@@ -35,8 +36,8 @@ func main() {
}
defer indexFile.Close()
- idx.WalkIndexFile(indexFile, func(key types.NeedleId, offset types.Offset, size uint32) error {
- fmt.Printf("key:%v offset:%v size:%v\n", key, offset, size)
+ idx.WalkIndexFile(indexFile, func(key types.NeedleId, offset types.Offset, size types.Size) error {
+ fmt.Printf("key:%v offset:%v size:%v(%v)\n", key, offset, size, util.BytesToHumanReadable(uint64(size)))
return nil
})
diff --git a/unmaintained/see_log_entry/see_log_entry.go b/unmaintained/see_log_entry/see_log_entry.go
new file mode 100644
index 000000000..45480d4dc
--- /dev/null
+++ b/unmaintained/see_log_entry/see_log_entry.go
@@ -0,0 +1,75 @@
+package main
+
+import (
+ "flag"
+ "fmt"
+ "io"
+ "log"
+ "os"
+
+ "github.com/golang/protobuf/proto"
+
+ "github.com/chrislusf/seaweedfs/weed/filer"
+ "github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
+ "github.com/chrislusf/seaweedfs/weed/util"
+)
+
+var (
+ logdataFile = flag.String("logdata", "", "log data file saved under "+ filer.SystemLogDir)
+)
+
+func main() {
+ flag.Parse()
+
+ dst, err := os.OpenFile(*logdataFile, os.O_RDONLY, 0644)
+ if err != nil {
+ log.Fatalf("failed to open %s: %v", *logdataFile, err)
+ }
+ defer dst.Close()
+
+ err = walkLogEntryFile(dst)
+ if err != nil {
+ log.Fatalf("failed to visit %s: %v", *logdataFile, err)
+ }
+
+}
+
+func walkLogEntryFile(dst *os.File) error {
+
+ sizeBuf := make([]byte, 4)
+
+ for {
+ if n, err := dst.Read(sizeBuf); n != 4 {
+ if err == io.EOF {
+ return nil
+ }
+ return err
+ }
+
+ size := util.BytesToUint32(sizeBuf)
+
+ data := make([]byte, int(size))
+
+ if n, err := dst.Read(data); n != len(data) {
+ return err
+ }
+
+ logEntry := &filer_pb.LogEntry{}
+ err := proto.Unmarshal(data, logEntry)
+ if err != nil {
+ log.Printf("unexpected unmarshal filer_pb.LogEntry: %v", err)
+ return nil
+ }
+
+ event := &filer_pb.SubscribeMetadataResponse{}
+ err = proto.Unmarshal(logEntry.Data, event)
+ if err != nil {
+ log.Printf("unexpected unmarshal filer_pb.SubscribeMetadataResponse: %v", err)
+ return nil
+ }
+
+ fmt.Printf("event: %+v\n", event)
+
+ }
+
+}
diff --git a/unmaintained/see_meta/see_meta.go b/unmaintained/see_meta/see_meta.go
index 0d2ac8de1..452badfd6 100644
--- a/unmaintained/see_meta/see_meta.go
+++ b/unmaintained/see_meta/see_meta.go
@@ -7,10 +7,10 @@ import (
"log"
"os"
- "github.com/chrislusf/seaweedfs/weed/filer2"
+ "github.com/golang/protobuf/proto"
+
"github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
"github.com/chrislusf/seaweedfs/weed/util"
- "github.com/golang/protobuf/proto"
)
var (
@@ -58,7 +58,7 @@ func walkMetaFile(dst *os.File) error {
return err
}
- fmt.Fprintf(os.Stdout, "file %s %v\n", filer2.FullPath(fullEntry.Dir).Child(fullEntry.Entry.Name), fullEntry.Entry.Attributes.String())
+ fmt.Fprintf(os.Stdout, "file %s %v\n", util.FullPath(fullEntry.Dir).Child(fullEntry.Entry.Name), fullEntry.Entry.Attributes.String())
for i, chunk := range fullEntry.Entry.Chunks {
fmt.Fprintf(os.Stdout, " chunk %d %v\n", i+1, chunk.String())
}
diff --git a/unmaintained/stress_filer_upload/bench_filer_upload/bench_filer_upload.go b/unmaintained/stress_filer_upload/bench_filer_upload/bench_filer_upload.go
index b2e4b28c6..2ee8028f2 100644
--- a/unmaintained/stress_filer_upload/bench_filer_upload/bench_filer_upload.go
+++ b/unmaintained/stress_filer_upload/bench_filer_upload/bench_filer_upload.go
@@ -45,7 +45,7 @@ func main() {
defer wg.Done()
client := &http.Client{Transport: &http.Transport{
- MaxConnsPerHost: 1024,
+ MaxIdleConns: 1024,
MaxIdleConnsPerHost: 1024,
}}
r := rand.New(rand.NewSource(time.Now().UnixNano() + int64(x)))
diff --git a/unmaintained/stress_filer_upload/write_files/write_files.go b/unmaintained/stress_filer_upload/write_files/write_files.go
new file mode 100644
index 000000000..508e37d14
--- /dev/null
+++ b/unmaintained/stress_filer_upload/write_files/write_files.go
@@ -0,0 +1,54 @@
+package main
+
+import (
+ "flag"
+ "fmt"
+ "math/rand"
+ "os"
+ "time"
+)
+
+var (
+ minSize = flag.Int("minSize", 1024, "min file size")
+ maxSize = flag.Int("maxSize", 3*1024*1024, "max file size")
+ fileCount = flag.Int("n", 1, "number of files to write")
+ blockSize = flag.Int("blockSizeKB", 4, "write block size")
+ toDir = flag.String("dir", ".", "destination directory")
+)
+
+func check(e error) {
+ if e != nil {
+ panic(e)
+ }
+}
+
+func main() {
+
+ flag.Parse()
+
+ block := make([]byte, *blockSize*1024)
+
+ for i := 0; i < *fileCount; i++ {
+
+ f, err := os.Create(fmt.Sprintf("%s/file%05d", *toDir, i))
+ check(err)
+
+ fileSize := *minSize + rand.Intn(*maxSize-*minSize)
+ startTime := time.Now()
+
+ fmt.Printf("write %s %d bytes: ", f.Name(), fileSize)
+
+ for x := 0; x < fileSize; {
+ rand.Read(block)
+ _, err = f.Write(block)
+ check(err)
+ x += len(block)
+ }
+
+ err = f.Close()
+ check(err)
+
+ fmt.Printf("%.02f MB/sec\n", float64(fileSize)*float64(time.Second)/float64(time.Now().Sub(startTime)*1024*1024))
+ }
+
+}
diff --git a/unmaintained/volume_tailer/volume_tailer.go b/unmaintained/volume_tailer/volume_tailer.go
index f0ef51c09..32da2e6ab 100644
--- a/unmaintained/volume_tailer/volume_tailer.go
+++ b/unmaintained/volume_tailer/volume_tailer.go
@@ -9,7 +9,6 @@ import (
"github.com/chrislusf/seaweedfs/weed/security"
"github.com/chrislusf/seaweedfs/weed/storage/needle"
util2 "github.com/chrislusf/seaweedfs/weed/util"
- "github.com/spf13/viper"
"golang.org/x/tools/godoc/util"
)
@@ -25,7 +24,7 @@ func main() {
flag.Parse()
util2.LoadConfiguration("security", false)
- grpcDialOption := security.LoadClientTLS(viper.Sub("grpc"), "client")
+ grpcDialOption := security.LoadClientTLS(util2.GetViper(), "grpc.client")
vid := needle.VolumeId(*volumeId)
@@ -38,7 +37,7 @@ func main() {
sinceTimeNs = time.Now().Add(-*rewindDuration).UnixNano()
}
- err := operation.TailVolume(*master, grpcDialOption, vid, uint64(sinceTimeNs), *timeoutSeconds, func(n *needle.Needle) (err error) {
+ err := operation.TailVolume(func()string{return *master}, grpcDialOption, vid, uint64(sinceTimeNs), *timeoutSeconds, func(n *needle.Needle) (err error) {
if n.Size == 0 {
println("-", n.String())
return nil
@@ -49,8 +48,8 @@ func main() {
if *showTextFile {
data := n.Data
- if n.IsGzipped() {
- if data, err = util2.UnGzipData(data); err != nil {
+ if n.IsCompressed() {
+ if data, err = util2.DecompressData(data); err != nil {
return err
}
}
@@ -58,7 +57,7 @@ func main() {
println(string(data))
}
- println("-", n.String(), "compressed", n.IsGzipped(), "original size", len(data))
+ println("-", n.String(), "compressed", n.IsCompressed(), "original size", len(data))
}
return nil
})
diff --git a/weed/Makefile b/weed/Makefile
new file mode 100644
index 000000000..8f1257d09
--- /dev/null
+++ b/weed/Makefile
@@ -0,0 +1,39 @@
+BINARY = weed
+
+SOURCE_DIR = .
+
+all: debug_mount
+
+.PHONY : clean debug_mount
+
+clean:
+ go clean $(SOURCE_DIR)
+ rm -f $(BINARY)
+
+debug_shell:
+ go build -gcflags="all=-N -l"
+ dlv --listen=:2345 --headless=true --api-version=2 --accept-multiclient exec weed -- shell
+
+debug_mount:
+ go build -gcflags="all=-N -l"
+ dlv --listen=:2345 --headless=true --api-version=2 --accept-multiclient exec weed -- mount -dir=~/tmp/mm -cacheCapacityMB=0 -filer.path=/buckets
+
+debug_server:
+ go build -gcflags="all=-N -l"
+ dlv --listen=:2345 --headless=true --api-version=2 --accept-multiclient exec weed -- server -dir=/Volumes/mobile_disk/99 -filer -volume.port=8343 -s3 -volume.max=0 -master.volumeSizeLimitMB=1024 -volume.preStopSeconds=1
+
+debug_volume:
+ go build -gcflags="all=-N -l"
+ dlv --listen=:2345 --headless=true --api-version=2 --accept-multiclient exec weed -- volume -dir=/Volumes/mobile_disk/100 -port 8564 -max=30 -preStopSeconds=2
+
+debug_webdav:
+ go build -gcflags="all=-N -l"
+ dlv --listen=:2345 --headless=true --api-version=2 --accept-multiclient exec weed -- -v=4 webdav
+
+debug_s3:
+ go build -gcflags="all=-N -l"
+ dlv --listen=:2345 --headless=true --api-version=2 --accept-multiclient exec weed -- -v=4 s3
+
+debug_filer_copy:
+ go build -gcflags="all=-N -l"
+ dlv --listen=:2345 --headless=true --api-version=2 --accept-multiclient exec weed -- -v=4 filer.backup -filer=localhost:8888 -filerProxy -timeAgo=10h
diff --git a/weed/command/backup.go b/weed/command/backup.go
index 0f6bed225..207df770b 100644
--- a/weed/command/backup.go
+++ b/weed/command/backup.go
@@ -3,8 +3,6 @@ package command
import (
"fmt"
- "github.com/spf13/viper"
-
"github.com/chrislusf/seaweedfs/weed/security"
"github.com/chrislusf/seaweedfs/weed/storage/needle"
"github.com/chrislusf/seaweedfs/weed/storage/super_block"
@@ -66,7 +64,7 @@ var cmdBackup = &Command{
func runBackup(cmd *Command, args []string) bool {
util.LoadConfiguration("security", false)
- grpcDialOption := security.LoadClientTLS(viper.Sub("grpc"), "client")
+ grpcDialOption := security.LoadClientTLS(util.GetViper(), "grpc.client")
if *s.volumeId == -1 {
return false
@@ -74,7 +72,7 @@ func runBackup(cmd *Command, args []string) bool {
vid := needle.VolumeId(*s.volumeId)
// find volume location, replication, ttl info
- lookup, err := operation.Lookup(*s.master, vid.String())
+ lookup, err := operation.Lookup(func() string { return *s.master }, vid.String())
if err != nil {
fmt.Printf("Error looking up volume %d: %v\n", vid, err)
return true
@@ -114,14 +112,14 @@ func runBackup(cmd *Command, args []string) bool {
return true
}
}
- v, err := storage.NewVolume(*s.dir, *s.collection, vid, storage.NeedleMapInMemory, replication, ttl, 0, 0)
+ v, err := storage.NewVolume(util.ResolvePath(*s.dir), util.ResolvePath(*s.dir), *s.collection, vid, storage.NeedleMapInMemory, replication, ttl, 0, 0)
if err != nil {
fmt.Printf("Error creating or reading from volume %d: %v\n", vid, err)
return true
}
if v.SuperBlock.CompactionRevision < uint16(stats.CompactRevision) {
- if err = v.Compact2(30 * 1024 * 1024 * 1024); err != nil {
+ if err = v.Compact2(30*1024*1024*1024, 0); err != nil {
fmt.Printf("Compact Volume before synchronizing %v\n", err)
return true
}
@@ -139,7 +137,7 @@ func runBackup(cmd *Command, args []string) bool {
// remove the old data
v.Destroy()
// recreate an empty volume
- v, err = storage.NewVolume(*s.dir, *s.collection, vid, storage.NeedleMapInMemory, replication, ttl, 0, 0)
+ v, err = storage.NewVolume(util.ResolvePath(*s.dir), util.ResolvePath(*s.dir), *s.collection, vid, storage.NeedleMapInMemory, replication, ttl, 0, 0)
if err != nil {
fmt.Printf("Error creating or reading from volume %d: %v\n", vid, err)
return true
diff --git a/weed/command/benchmark.go b/weed/command/benchmark.go
index 26be1fe3a..4fedb55f1 100644
--- a/weed/command/benchmark.go
+++ b/weed/command/benchmark.go
@@ -2,7 +2,6 @@ package command
import (
"bufio"
- "context"
"fmt"
"io"
"math"
@@ -15,7 +14,6 @@ import (
"sync"
"time"
- "github.com/spf13/viper"
"google.golang.org/grpc"
"github.com/chrislusf/seaweedfs/weed/glog"
@@ -37,10 +35,13 @@ type BenchmarkOptions struct {
sequentialRead *bool
collection *string
replication *string
+ diskType *string
cpuprofile *string
maxCpu *int
grpcDialOption grpc.DialOption
masterClient *wdclient.MasterClient
+ fsync *bool
+ useTcp *bool
}
var (
@@ -63,8 +64,11 @@ func init() {
b.sequentialRead = cmdBenchmark.Flag.Bool("readSequentially", false, "randomly read by ids from \"-list\" specified file")
b.collection = cmdBenchmark.Flag.String("collection", "benchmark", "write data to this collection")
b.replication = cmdBenchmark.Flag.String("replication", "000", "replication type")
+ b.diskType = cmdBenchmark.Flag.String("disk", "", "[hdd|ssd|] hard drive or solid state drive or any tag")
b.cpuprofile = cmdBenchmark.Flag.String("cpuprofile", "", "cpu profile output file")
b.maxCpu = cmdBenchmark.Flag.Int("maxCpu", 0, "maximum number of CPUs. 0 means all available CPUs")
+ b.fsync = cmdBenchmark.Flag.Bool("fsync", false, "flush data to disk after write")
+ b.useTcp = cmdBenchmark.Flag.Bool("useTcp", false, "send data via tcp")
sharedBytes = make([]byte, 1024)
}
@@ -109,9 +113,9 @@ var (
func runBenchmark(cmd *Command, args []string) bool {
util.LoadConfiguration("security", false)
- b.grpcDialOption = security.LoadClientTLS(viper.Sub("grpc"), "client")
+ b.grpcDialOption = security.LoadClientTLS(util.GetViper(), "grpc.client")
- fmt.Printf("This is SeaweedFS version %s %s %s\n", util.VERSION, runtime.GOOS, runtime.GOARCH)
+ fmt.Printf("This is SeaweedFS version %s %s %s\n", util.Version(), runtime.GOOS, runtime.GOARCH)
if *b.maxCpu < 1 {
*b.maxCpu = runtime.NumCPU()
}
@@ -125,7 +129,7 @@ func runBenchmark(cmd *Command, args []string) bool {
defer pprof.StopCPUProfile()
}
- b.masterClient = wdclient.NewMasterClient(context.Background(), b.grpcDialOption, "client", strings.Split(*b.masters, ","))
+ b.masterClient = wdclient.NewMasterClient(b.grpcDialOption, "client", "", 0, "", strings.Split(*b.masters, ","))
go b.masterClient.KeepConnectedToMaster()
b.masterClient.WaitUntilConnected()
@@ -221,25 +225,37 @@ func writeFiles(idChan chan int, fileIdLineChan chan string, s *stat) {
random := rand.New(rand.NewSource(time.Now().UnixNano()))
+ volumeTcpClient := wdclient.NewVolumeTcpClient()
+
for id := range idChan {
start := time.Now()
fileSize := int64(*b.fileSize + random.Intn(64))
fp := &operation.FilePart{
- Reader: &FakeReader{id: uint64(id), size: fileSize},
+ Reader: &FakeReader{id: uint64(id), size: fileSize, random: random},
FileSize: fileSize,
MimeType: "image/bench", // prevent gzip benchmark content
+ Fsync: *b.fsync,
}
ar := &operation.VolumeAssignRequest{
Count: 1,
Collection: *b.collection,
Replication: *b.replication,
+ DiskType: *b.diskType,
}
- if assignResult, err := operation.Assign(b.masterClient.GetMaster(), b.grpcDialOption, ar); err == nil {
+ if assignResult, err := operation.Assign(b.masterClient.GetMaster, b.grpcDialOption, ar); err == nil {
fp.Server, fp.Fid, fp.Collection = assignResult.Url, assignResult.Fid, *b.collection
if !isSecure && assignResult.Auth != "" {
isSecure = true
}
- if _, err := fp.Upload(0, b.masterClient.GetMaster(), assignResult.Auth, b.grpcDialOption); err == nil {
+ if *b.useTcp {
+ if uploadByTcp(volumeTcpClient, fp) {
+ fileIdLineChan <- fp.Fid
+ s.completed++
+ s.transferred += fileSize
+ } else {
+ s.failed++
+ }
+ } else if _, err := fp.Upload(0, b.masterClient.GetMaster, false, assignResult.Auth, b.grpcDialOption); err == nil {
if random.Intn(100) < *b.deletePercentage {
s.total++
delayedDeleteChan <- &delayedFile{time.Now().Add(time.Second), fp}
@@ -279,19 +295,29 @@ func readFiles(fileIdLineChan chan string, s *stat) {
fmt.Printf("reading file %s\n", fid)
}
start := time.Now()
- url, err := b.masterClient.LookupFileId(fid)
+ var bytesRead int
+ var err error
+ urls, err := b.masterClient.LookupFileId(fid)
if err != nil {
s.failed++
println("!!!! ", fid, " location not found!!!!!")
continue
}
- if bytesRead, err := util.Get(url); err == nil {
+ var bytes []byte
+ for _, url := range urls {
+ bytes, _, err = util.Get(url)
+ if err == nil {
+ break
+ }
+ }
+ bytesRead = len(bytes)
+ if err == nil {
s.completed++
- s.transferred += int64(len(bytesRead))
+ s.transferred += int64(bytesRead)
readStats.addSample(time.Now().Sub(start))
} else {
s.failed++
- fmt.Printf("Failed to read %s error:%v\n", url, err)
+ fmt.Printf("Failed to read %s error:%v\n", fid, err)
}
}
}
@@ -315,6 +341,17 @@ func writeFileIds(fileName string, fileIdLineChan chan string, finishChan chan b
}
}
+func uploadByTcp(volumeTcpClient *wdclient.VolumeTcpClient, fp *operation.FilePart) bool {
+
+ err := volumeTcpClient.PutFileChunk(fp.Server, fp.Fid, uint32(fp.FileSize), fp.Reader)
+ if err != nil {
+ glog.Errorf("upload chunk err: %v", err)
+ return false
+ }
+
+ return true
+}
+
func readFileIds(fileName string, fileIdLineChan chan string) {
file, err := os.Open(fileName) // For read access.
if err != nil {
@@ -353,7 +390,7 @@ func readFileIds(fileName string, fileIdLineChan chan string) {
}
const (
- benchResolution = 10000 //0.1 microsecond
+ benchResolution = 10000 // 0.1 microsecond
benchBucket = 1000000000 / benchResolution
)
@@ -476,7 +513,7 @@ func (s *stats) printStats() {
fmt.Printf("\nConnection Times (ms)\n")
fmt.Printf(" min avg max std\n")
fmt.Printf("Total: %2.1f %3.1f %3.1f %3.1f\n", float32(min)/10, float32(avg)/10, float32(max)/10, std/10)
- //printing percentiles
+ // printing percentiles
fmt.Printf("\nPercentage of the requests served within a certain time (ms)\n")
percentiles := make([]int, len(percentages))
for i := 0; i < len(percentages); i++ {
@@ -510,8 +547,9 @@ func (s *stats) printStats() {
// a fake reader to generate content to upload
type FakeReader struct {
- id uint64 // an id number
- size int64 // max bytes
+ id uint64 // an id number
+ size int64 // max bytes
+ random *rand.Rand
}
func (l *FakeReader) Read(p []byte) (n int, err error) {
@@ -527,6 +565,7 @@ func (l *FakeReader) Read(p []byte) (n int, err error) {
for i := 0; i < 8; i++ {
p[i] = byte(l.id >> uint(i*8))
}
+ l.random.Read(p[8:])
}
l.size -= int64(n)
return
diff --git a/weed/command/command.go b/weed/command/command.go
index 79c00d4cd..b6efcead2 100644
--- a/weed/command/command.go
+++ b/weed/command/command.go
@@ -1,8 +1,8 @@
package command
import (
- "flag"
"fmt"
+ flag "github.com/chrislusf/seaweedfs/weed/util/fla9"
"os"
"strings"
)
@@ -12,20 +12,28 @@ var Commands = []*Command{
cmdBackup,
cmdCompact,
cmdCopy,
- cmdFix,
+ cmdDownload,
+ cmdExport,
+ cmdFiler,
+ cmdFilerBackup,
+ cmdFilerCat,
+ cmdFilerMetaBackup,
+ cmdFilerMetaTail,
cmdFilerReplicate,
- cmdServer,
+ cmdFilerSynchronize,
+ cmdFix,
+ cmdGateway,
cmdMaster,
- cmdFiler,
+ cmdMount,
cmdS3,
- cmdUpload,
- cmdDownload,
+ cmdIam,
+ cmdMsgBroker,
cmdScaffold,
+ cmdServer,
cmdShell,
+ cmdUpload,
cmdVersion,
cmdVolume,
- cmdExport,
- cmdMount,
cmdWebDav,
}
diff --git a/weed/command/compact.go b/weed/command/compact.go
index 85313b749..92e25f474 100644
--- a/weed/command/compact.go
+++ b/weed/command/compact.go
@@ -4,6 +4,7 @@ import (
"github.com/chrislusf/seaweedfs/weed/glog"
"github.com/chrislusf/seaweedfs/weed/storage"
"github.com/chrislusf/seaweedfs/weed/storage/needle"
+ "github.com/chrislusf/seaweedfs/weed/util"
)
func init() {
@@ -40,8 +41,7 @@ func runCompact(cmd *Command, args []string) bool {
preallocate := *compactVolumePreallocate * (1 << 20)
vid := needle.VolumeId(*compactVolumeId)
- v, err := storage.NewVolume(*compactVolumePath, *compactVolumeCollection, vid,
- storage.NeedleMapInMemory, nil, nil, preallocate, 0)
+ v, err := storage.NewVolume(util.ResolvePath(*compactVolumePath), util.ResolvePath(*compactVolumePath), *compactVolumeCollection, vid, storage.NeedleMapInMemory, nil, nil, preallocate, 0)
if err != nil {
glog.Fatalf("Load Volume [ERROR] %s\n", err)
}
@@ -50,7 +50,7 @@ func runCompact(cmd *Command, args []string) bool {
glog.Fatalf("Compact Volume [ERROR] %s\n", err)
}
} else {
- if err = v.Compact2(preallocate); err != nil {
+ if err = v.Compact2(preallocate, 0); err != nil {
glog.Fatalf("Compact Volume [ERROR] %s\n", err)
}
}
diff --git a/weed/command/download.go b/weed/command/download.go
index b3e33defd..7bbff9448 100644
--- a/weed/command/download.go
+++ b/weed/command/download.go
@@ -4,6 +4,7 @@ import (
"fmt"
"io"
"io/ioutil"
+ "net/http"
"os"
"path"
"strings"
@@ -43,15 +44,15 @@ var cmdDownload = &Command{
func runDownload(cmd *Command, args []string) bool {
for _, fid := range args {
- if e := downloadToFile(*d.server, fid, *d.dir); e != nil {
+ if e := downloadToFile(func() string { return *d.server }, fid, util.ResolvePath(*d.dir)); e != nil {
fmt.Println("Download Error: ", fid, e)
}
}
return true
}
-func downloadToFile(server, fileId, saveDir string) error {
- fileUrl, lookupError := operation.LookupFileId(server, fileId)
+func downloadToFile(masterFn operation.GetMasterFn, fileId, saveDir string) error {
+ fileUrl, lookupError := operation.LookupFileId(masterFn, fileId)
if lookupError != nil {
return lookupError
}
@@ -59,7 +60,7 @@ func downloadToFile(server, fileId, saveDir string) error {
if err != nil {
return err
}
- defer rc.Close()
+ defer util.CloseResponse(rc)
if filename == "" {
filename = fileId
}
@@ -75,14 +76,14 @@ func downloadToFile(server, fileId, saveDir string) error {
}
defer f.Close()
if isFileList {
- content, err := ioutil.ReadAll(rc)
+ content, err := ioutil.ReadAll(rc.Body)
if err != nil {
return err
}
fids := strings.Split(string(content), "\n")
for _, partId := range fids {
var n int
- _, part, err := fetchContent(*d.server, partId)
+ _, part, err := fetchContent(masterFn, partId)
if err == nil {
n, err = f.Write(part)
}
@@ -94,7 +95,7 @@ func downloadToFile(server, fileId, saveDir string) error {
}
}
} else {
- if _, err = io.Copy(f, rc); err != nil {
+ if _, err = io.Copy(f, rc.Body); err != nil {
return err
}
@@ -102,17 +103,17 @@ func downloadToFile(server, fileId, saveDir string) error {
return nil
}
-func fetchContent(server string, fileId string) (filename string, content []byte, e error) {
- fileUrl, lookupError := operation.LookupFileId(server, fileId)
+func fetchContent(masterFn operation.GetMasterFn, fileId string) (filename string, content []byte, e error) {
+ fileUrl, lookupError := operation.LookupFileId(masterFn, fileId)
if lookupError != nil {
return "", nil, lookupError
}
- var rc io.ReadCloser
+ var rc *http.Response
if filename, _, rc, e = util.DownloadFile(fileUrl); e != nil {
return "", nil, e
}
- content, e = ioutil.ReadAll(rc)
- rc.Close()
+ defer util.CloseResponse(rc)
+ content, e = ioutil.ReadAll(rc.Body)
return
}
diff --git a/weed/command/export.go b/weed/command/export.go
index 8d664ad3b..1c32e1050 100644
--- a/weed/command/export.go
+++ b/weed/command/export.go
@@ -19,10 +19,11 @@ import (
"github.com/chrislusf/seaweedfs/weed/storage/needle_map"
"github.com/chrislusf/seaweedfs/weed/storage/super_block"
"github.com/chrislusf/seaweedfs/weed/storage/types"
+ "github.com/chrislusf/seaweedfs/weed/util"
)
const (
- defaultFnFormat = `{{.Mime}}/{{.Id}}:{{.Name}}`
+ defaultFnFormat = `{{.Id}}_{{.Name}}{{.Ext}}`
timeFormat = "2006-01-02T15:04:05"
)
@@ -55,7 +56,7 @@ func init() {
var (
output = cmdExport.Flag.String("o", "", "output tar file name, must ends with .tar, or just a \"-\" for stdout")
- format = cmdExport.Flag.String("fileNameFormat", defaultFnFormat, "filename formatted with {{.Mime}} {{.Id}} {{.Name}} {{.Ext}}")
+ format = cmdExport.Flag.String("fileNameFormat", defaultFnFormat, "filename formatted with {{.Id}} {{.Name}} {{.Ext}}")
newer = cmdExport.Flag.String("newer", "", "export only files newer than this time, default is all files. Must be specified in RFC3339 without timezone, e.g. 2006-01-02T15:04:05")
showDeleted = cmdExport.Flag.Bool("deleted", false, "export deleted files. only applies if -o is not specified")
limit = cmdExport.Flag.Int("limit", 0, "only show first n entries if specified")
@@ -69,21 +70,23 @@ var (
localLocation, _ = time.LoadLocation("Local")
)
-func printNeedle(vid needle.VolumeId, n *needle.Needle, version needle.Version, deleted bool) {
+func printNeedle(vid needle.VolumeId, n *needle.Needle, version needle.Version, deleted bool, offset int64, onDiskSize int64) {
key := needle.NewFileIdFromNeedle(vid, n).String()
- size := n.DataSize
+ size := int32(n.DataSize)
if version == needle.Version1 {
- size = n.Size
+ size = int32(n.Size)
}
- fmt.Printf("%s\t%s\t%d\t%t\t%s\t%s\t%s\t%t\n",
+ fmt.Printf("%s\t%s\t%d\t%t\t%s\t%s\t%s\t%t\t%d\t%d\n",
key,
n.Name,
size,
- n.IsGzipped(),
+ n.IsCompressed(),
n.Mime,
n.LastModifiedString(),
n.Ttl.String(),
deleted,
+ offset,
+ offset+onDiskSize,
)
}
@@ -108,9 +111,9 @@ func (scanner *VolumeFileScanner4Export) VisitNeedle(n *needle.Needle, offset in
vid := scanner.vid
nv, ok := needleMap.Get(n.Id)
- glog.V(3).Infof("key %d offset %d size %d disk_size %d gzip %v ok %v nv %+v",
- n.Id, offset, n.Size, n.DiskSize(scanner.version), n.IsGzipped(), ok, nv)
- if ok && nv.Size > 0 && nv.Size != types.TombstoneFileSize && nv.Offset.ToAcutalOffset() == offset {
+ glog.V(3).Infof("key %d offset %d size %d disk_size %d compressed %v ok %v nv %+v",
+ n.Id, offset, n.Size, n.DiskSize(scanner.version), n.IsCompressed(), ok, nv)
+ if *showDeleted && n.Size > 0 || ok && nv.Size.IsValid() && nv.Offset.ToActualOffset() == offset {
if newerThanUnix >= 0 && n.HasLastModifiedDate() && n.LastModified < uint64(newerThanUnix) {
glog.V(3).Infof("Skipping this file, as it's old enough: LastModified %d vs %d",
n.LastModified, newerThanUnix)
@@ -123,17 +126,17 @@ func (scanner *VolumeFileScanner4Export) VisitNeedle(n *needle.Needle, offset in
if tarOutputFile != nil {
return writeFile(vid, n)
} else {
- printNeedle(vid, n, scanner.version, false)
+ printNeedle(vid, n, scanner.version, false, offset, n.DiskSize(scanner.version))
return nil
}
}
if !ok {
if *showDeleted && tarOutputFile == nil {
if n.DataSize > 0 {
- printNeedle(vid, n, scanner.version, true)
+ printNeedle(vid, n, scanner.version, true, offset, n.DiskSize(scanner.version))
} else {
n.Name = []byte("*tombstone")
- printNeedle(vid, n, scanner.version, true)
+ printNeedle(vid, n, scanner.version, true, offset, n.DiskSize(scanner.version))
}
}
glog.V(2).Infof("This seems deleted %d size %d", n.Id, n.Size)
@@ -195,7 +198,9 @@ func runExport(cmd *Command, args []string) bool {
vid := needle.VolumeId(*export.volumeId)
needleMap := needle_map.NewMemDb()
- if err := needleMap.LoadFromIdx(path.Join(*export.dir, fileName+".idx")); err != nil {
+ defer needleMap.Close()
+
+ if err := needleMap.LoadFromIdx(path.Join(util.ResolvePath(*export.dir), fileName+".idx")); err != nil {
glog.Fatalf("cannot load needle map from %s.idx: %s", fileName, err)
}
@@ -205,12 +210,12 @@ func runExport(cmd *Command, args []string) bool {
}
if tarOutputFile == nil {
- fmt.Printf("key\tname\tsize\tgzip\tmime\tmodified\tttl\tdeleted\n")
+ fmt.Printf("key\tname\tsize\tgzip\tmime\tmodified\tttl\tdeleted\tstart\tstop\n")
}
- err = storage.ScanVolumeFile(*export.dir, *export.collection, vid, storage.NeedleMapInMemory, volumeFileScanner)
+ err = storage.ScanVolumeFile(util.ResolvePath(*export.dir), *export.collection, vid, storage.NeedleMapInMemory, volumeFileScanner)
if err != nil && err != io.EOF {
- glog.Fatalf("Export Volume File [ERROR] %s\n", err)
+ glog.Errorf("Export Volume File [ERROR] %s\n", err)
}
return true
}
@@ -240,8 +245,11 @@ func writeFile(vid needle.VolumeId, n *needle.Needle) (err error) {
fileName := fileNameTemplateBuffer.String()
- if n.IsGzipped() && path.Ext(fileName) != ".gz" {
- fileName = fileName + ".gz"
+ if n.IsCompressed() {
+ if util.IsGzippedContent(n.Data) && path.Ext(fileName) != ".gz" {
+ fileName = fileName + ".gz"
+ }
+ // TODO other compression method
}
tarHeader.Name, tarHeader.Size = fileName, int64(len(n.Data))
diff --git a/weed/command/filer.go b/weed/command/filer.go
index b1ceb46f5..a723b4d8a 100644
--- a/weed/command/filer.go
+++ b/weed/command/filer.go
@@ -1,58 +1,102 @@
package command
import (
+ "fmt"
"net/http"
+ "os"
"strconv"
"strings"
"time"
- "github.com/chrislusf/seaweedfs/weed/security"
- "github.com/spf13/viper"
+ "google.golang.org/grpc/reflection"
"github.com/chrislusf/seaweedfs/weed/glog"
+ "github.com/chrislusf/seaweedfs/weed/pb"
"github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
+ "github.com/chrislusf/seaweedfs/weed/security"
"github.com/chrislusf/seaweedfs/weed/server"
+ stats_collect "github.com/chrislusf/seaweedfs/weed/stats"
"github.com/chrislusf/seaweedfs/weed/util"
- "google.golang.org/grpc/reflection"
)
var (
- f FilerOptions
+ f FilerOptions
+ filerStartS3 *bool
+ filerS3Options S3Options
+ filerStartWebDav *bool
+ filerWebDavOptions WebDavOption
+ filerStartIam *bool
+ filerIamOptions IamOptions
)
type FilerOptions struct {
masters *string
ip *string
+ bindIp *string
port *int
publicPort *int
collection *string
defaultReplicaPlacement *string
- redirectOnRead *bool
disableDirListing *bool
maxMB *int
dirListingLimit *int
dataCenter *string
+ rack *string
enableNotification *bool
disableHttp *bool
-
- // default leveldb directory, used in "weed server" mode
+ cipher *bool
+ peers *string
+ metricsHttpPort *int
+ saveToFilerLimit *int
defaultLevelDbDirectory *string
+ concurrentUploadLimitMB *int
}
func init() {
cmdFiler.Run = runFiler // break init cycle
f.masters = cmdFiler.Flag.String("master", "localhost:9333", "comma-separated master servers")
- f.collection = cmdFiler.Flag.String("collection", "", "all data will be stored in this collection")
- f.ip = cmdFiler.Flag.String("ip", "", "filer server http listen ip address")
+ f.collection = cmdFiler.Flag.String("collection", "", "all data will be stored in this default collection")
+ f.ip = cmdFiler.Flag.String("ip", util.DetectedHostAddress(), "filer server http listen ip address")
+ f.bindIp = cmdFiler.Flag.String("ip.bind", "", "ip address to bind to")
f.port = cmdFiler.Flag.Int("port", 8888, "filer server http listen port")
f.publicPort = cmdFiler.Flag.Int("port.readonly", 0, "readonly port opened to public")
- f.defaultReplicaPlacement = cmdFiler.Flag.String("defaultReplicaPlacement", "000", "default replication type if not specified")
- f.redirectOnRead = cmdFiler.Flag.Bool("redirectOnRead", false, "whether proxy or redirect to volume server during file GET request")
+ f.defaultReplicaPlacement = cmdFiler.Flag.String("defaultReplicaPlacement", "", "default replication type. If not specified, use master setting.")
f.disableDirListing = cmdFiler.Flag.Bool("disableDirListing", false, "turn off directory listing")
- f.maxMB = cmdFiler.Flag.Int("maxMB", 32, "split files larger than the limit")
+ f.maxMB = cmdFiler.Flag.Int("maxMB", 4, "split files larger than the limit")
f.dirListingLimit = cmdFiler.Flag.Int("dirListLimit", 100000, "limit sub dir listing size")
- f.dataCenter = cmdFiler.Flag.String("dataCenter", "", "prefer to write to volumes in this data center")
+ f.dataCenter = cmdFiler.Flag.String("dataCenter", "", "prefer to read and write to volumes in this data center")
+ f.rack = cmdFiler.Flag.String("rack", "", "prefer to write to volumes in this rack")
f.disableHttp = cmdFiler.Flag.Bool("disableHttp", false, "disable http request, only gRpc operations are allowed")
+ f.cipher = cmdFiler.Flag.Bool("encryptVolumeData", false, "encrypt data on volume servers")
+ f.peers = cmdFiler.Flag.String("peers", "", "all filers sharing the same filer store in comma separated ip:port list")
+ f.metricsHttpPort = cmdFiler.Flag.Int("metricsPort", 0, "Prometheus metrics listen port")
+ f.saveToFilerLimit = cmdFiler.Flag.Int("saveToFilerLimit", 0, "files smaller than this limit will be saved in filer store")
+ f.defaultLevelDbDirectory = cmdFiler.Flag.String("defaultStoreDir", ".", "if filer.toml is empty, use an embedded filer store in the directory")
+ f.concurrentUploadLimitMB = cmdFiler.Flag.Int("concurrentUploadLimitMB", 128, "limit total concurrent upload size")
+
+ // start s3 on filer
+ filerStartS3 = cmdFiler.Flag.Bool("s3", false, "whether to start S3 gateway")
+ filerS3Options.port = cmdFiler.Flag.Int("s3.port", 8333, "s3 server http listen port")
+ filerS3Options.domainName = cmdFiler.Flag.String("s3.domainName", "", "suffix of the host name in comma separated list, {bucket}.{domainName}")
+ filerS3Options.tlsPrivateKey = cmdFiler.Flag.String("s3.key.file", "", "path to the TLS private key file")
+ filerS3Options.tlsCertificate = cmdFiler.Flag.String("s3.cert.file", "", "path to the TLS certificate file")
+ filerS3Options.config = cmdFiler.Flag.String("s3.config", "", "path to the config file")
+ filerS3Options.allowEmptyFolder = cmdFiler.Flag.Bool("s3.allowEmptyFolder", false, "allow empty folders")
+
+ // start webdav on filer
+ filerStartWebDav = cmdFiler.Flag.Bool("webdav", false, "whether to start webdav gateway")
+ filerWebDavOptions.port = cmdFiler.Flag.Int("webdav.port", 7333, "webdav server http listen port")
+ filerWebDavOptions.collection = cmdFiler.Flag.String("webdav.collection", "", "collection to create the files")
+ filerWebDavOptions.replication = cmdFiler.Flag.String("webdav.replication", "", "replication to create the files")
+ filerWebDavOptions.disk = cmdFiler.Flag.String("webdav.disk", "", "[hdd|ssd|] hard drive or solid state drive or any tag")
+ filerWebDavOptions.tlsPrivateKey = cmdFiler.Flag.String("webdav.key.file", "", "path to the TLS private key file")
+ filerWebDavOptions.tlsCertificate = cmdFiler.Flag.String("webdav.cert.file", "", "path to the TLS certificate file")
+ filerWebDavOptions.cacheDir = cmdFiler.Flag.String("webdav.cacheDir", os.TempDir(), "local cache directory for file chunks")
+ filerWebDavOptions.cacheSizeMB = cmdFiler.Flag.Int64("webdav.cacheCapacityMB", 1000, "local cache capacity in MB")
+
+ // start iam on filer
+ filerStartIam = cmdFiler.Flag.Bool("iam", false, "whether to start IAM service")
+ filerIamOptions.port = cmdFiler.Flag.Int("iam.port", 8111, "iam server http listen port")
}
var cmdFiler = &Command{
@@ -69,7 +113,8 @@ var cmdFiler = &Command{
//return a json format subdirectory and files listing
GET /path/to/
- The configuration file "filer.toml" is read from ".", "$HOME/.seaweedfs/", or "/etc/seaweedfs/", in that order.
+ The configuration file "filer.toml" is read from ".", "$HOME/.seaweedfs/", "/usr/local/etc/seaweedfs/", or "/etc/seaweedfs/", in that order.
+ If the "filer.toml" is not found, an embedded filer store will be created under "-defaultStoreDir".
The example filer.toml configuration file can be generated by "weed scaffold -config=filer"
@@ -80,6 +125,37 @@ func runFiler(cmd *Command, args []string) bool {
util.LoadConfiguration("security", false)
+ go stats_collect.StartMetricsServer(*f.metricsHttpPort)
+
+ filerAddress := fmt.Sprintf("%s:%d", *f.ip, *f.port)
+ startDelay := time.Duration(2)
+ if *filerStartS3 {
+ filerS3Options.filer = &filerAddress
+ go func() {
+ time.Sleep(startDelay * time.Second)
+ filerS3Options.startS3Server()
+ }()
+ startDelay++
+ }
+
+ if *filerStartWebDav {
+ filerWebDavOptions.filer = &filerAddress
+ go func() {
+ time.Sleep(startDelay * time.Second)
+ filerWebDavOptions.startWebDav()
+ }()
+ startDelay++
+ }
+
+ if *filerStartIam {
+ filerIamOptions.filer = &filerAddress
+ filerIamOptions.masters = f.masters
+ go func() {
+ time.Sleep(startDelay * time.Second)
+ filerIamOptions.startIamServer()
+ }()
+ }
+
f.startFiler()
return true
@@ -94,31 +170,38 @@ func (fo *FilerOptions) startFiler() {
publicVolumeMux = http.NewServeMux()
}
- defaultLevelDbDirectory := "./filerldb2"
- if fo.defaultLevelDbDirectory != nil {
- defaultLevelDbDirectory = *fo.defaultLevelDbDirectory + "/filerldb2"
+ defaultLevelDbDirectory := util.ResolvePath(*fo.defaultLevelDbDirectory + "/filerldb2")
+
+ var peers []string
+ if *fo.peers != "" {
+ peers = strings.Split(*fo.peers, ",")
}
fs, nfs_err := weed_server.NewFilerServer(defaultMux, publicVolumeMux, &weed_server.FilerOption{
- Masters: strings.Split(*fo.masters, ","),
- Collection: *fo.collection,
- DefaultReplication: *fo.defaultReplicaPlacement,
- RedirectOnRead: *fo.redirectOnRead,
- DisableDirListing: *fo.disableDirListing,
- MaxMB: *fo.maxMB,
- DirListingLimit: *fo.dirListingLimit,
- DataCenter: *fo.dataCenter,
- DefaultLevelDbDir: defaultLevelDbDirectory,
- DisableHttp: *fo.disableHttp,
- Port: *fo.port,
+ Masters: strings.Split(*fo.masters, ","),
+ Collection: *fo.collection,
+ DefaultReplication: *fo.defaultReplicaPlacement,
+ DisableDirListing: *fo.disableDirListing,
+ MaxMB: *fo.maxMB,
+ DirListingLimit: *fo.dirListingLimit,
+ DataCenter: *fo.dataCenter,
+ Rack: *fo.rack,
+ DefaultLevelDbDir: defaultLevelDbDirectory,
+ DisableHttp: *fo.disableHttp,
+ Host: *fo.ip,
+ Port: uint32(*fo.port),
+ Cipher: *fo.cipher,
+ SaveToFilerLimit: int64(*fo.saveToFilerLimit),
+ Filers: peers,
+ ConcurrentUploadLimit: int64(*fo.concurrentUploadLimitMB) * 1024 * 1024,
})
if nfs_err != nil {
glog.Fatalf("Filer startup error: %v", nfs_err)
}
if *fo.publicPort != 0 {
- publicListeningAddress := *fo.ip + ":" + strconv.Itoa(*fo.publicPort)
- glog.V(0).Infoln("Start Seaweed filer server", util.VERSION, "public at", publicListeningAddress)
+ publicListeningAddress := *fo.bindIp + ":" + strconv.Itoa(*fo.publicPort)
+ glog.V(0).Infoln("Start Seaweed filer server", util.Version(), "public at", publicListeningAddress)
publicListener, e := util.NewListener(publicListeningAddress, 0)
if e != nil {
glog.Fatalf("Filer server public listener error on port %d:%v", *fo.publicPort, e)
@@ -130,9 +213,9 @@ func (fo *FilerOptions) startFiler() {
}()
}
- glog.V(0).Infof("Start Seaweed Filer %s at %s:%d", util.VERSION, *fo.ip, *fo.port)
+ glog.V(0).Infof("Start Seaweed Filer %s at %s:%d", util.Version(), *fo.ip, *fo.port)
filerListener, e := util.NewListener(
- *fo.ip+":"+strconv.Itoa(*fo.port),
+ *fo.bindIp+":"+strconv.Itoa(*fo.port),
time.Duration(10)*time.Second,
)
if e != nil {
@@ -141,11 +224,11 @@ func (fo *FilerOptions) startFiler() {
// starting grpc server
grpcPort := *fo.port + 10000
- grpcL, err := util.NewListener(":"+strconv.Itoa(grpcPort), 0)
+ grpcL, err := util.NewListener(*fo.bindIp+":"+strconv.Itoa(grpcPort), 0)
if err != nil {
glog.Fatalf("failed to listen on grpc port %d: %v", grpcPort, err)
}
- grpcS := util.NewGrpcServer(security.LoadServerTLS(viper.Sub("grpc"), "filer"))
+ grpcS := pb.NewGrpcServer(security.LoadServerTLS(util.GetViper(), "grpc.filer"))
filer_pb.RegisterSeaweedFilerServer(grpcS, fs)
reflection.Register(grpcS)
go grpcS.Serve(grpcL)
diff --git a/weed/command/filer_backup.go b/weed/command/filer_backup.go
new file mode 100644
index 000000000..888b46fe7
--- /dev/null
+++ b/weed/command/filer_backup.go
@@ -0,0 +1,157 @@
+package command
+
+import (
+ "context"
+ "fmt"
+ "github.com/chrislusf/seaweedfs/weed/glog"
+ "github.com/chrislusf/seaweedfs/weed/pb"
+ "github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
+ "github.com/chrislusf/seaweedfs/weed/replication/source"
+ "github.com/chrislusf/seaweedfs/weed/security"
+ "github.com/chrislusf/seaweedfs/weed/util"
+ "google.golang.org/grpc"
+ "io"
+ "time"
+)
+
+type FilerBackupOptions struct {
+ isActivePassive *bool
+ filer *string
+ path *string
+ debug *bool
+ proxyByFiler *bool
+ timeAgo *time.Duration
+}
+
+var (
+ filerBackupOptions FilerBackupOptions
+)
+
+func init() {
+ cmdFilerBackup.Run = runFilerBackup // break init cycle
+ filerBackupOptions.filer = cmdFilerBackup.Flag.String("filer", "localhost:8888", "filer of one SeaweedFS cluster")
+ filerBackupOptions.path = cmdFilerBackup.Flag.String("filerPath", "/", "directory to sync on filer")
+ filerBackupOptions.proxyByFiler = cmdFilerBackup.Flag.Bool("filerProxy", false, "read and write file chunks by filer instead of volume servers")
+ filerBackupOptions.debug = cmdFilerBackup.Flag.Bool("debug", false, "debug mode to print out received files")
+ filerBackupOptions.timeAgo = cmdFilerBackup.Flag.Duration("timeAgo", 0, "start time before now. \"300ms\", \"1.5h\" or \"2h45m\". Valid time units are \"ns\", \"us\" (or \"µs\"), \"ms\", \"s\", \"m\", \"h\"")
+}
+
+var cmdFilerBackup = &Command{
+ UsageLine: "filer.backup -filer=: ",
+ Short: "resume-able continuously replicate files from a SeaweedFS cluster to another location defined in replication.toml",
+ Long: `resume-able continuously replicate files from a SeaweedFS cluster to another location defined in replication.toml
+
+ filer.backup listens on filer notifications. If any file is updated, it will fetch the updated content,
+ and write to the destination. This is to replace filer.replicate command since additional message queue is not needed.
+
+ If restarted and "-timeAgo" is not set, the synchronization will resume from the previous checkpoints, persisted every minute.
+ A fresh sync will start from the earliest metadata logs. To reset the checkpoints, just set "-timeAgo" to a high value.
+
+`,
+}
+
+func runFilerBackup(cmd *Command, args []string) bool {
+
+ grpcDialOption := security.LoadClientTLS(util.GetViper(), "grpc.client")
+
+ util.LoadConfiguration("security", false)
+ util.LoadConfiguration("replication", true)
+
+ for {
+ err := doFilerBackup(grpcDialOption, &filerBackupOptions)
+ if err != nil {
+ glog.Errorf("backup from %s: %v", *filerBackupOptions.filer, err)
+ time.Sleep(1747 * time.Millisecond)
+ }
+ }
+
+ return true
+}
+
+const (
+ BackupKeyPrefix = "backup."
+)
+
+func doFilerBackup(grpcDialOption grpc.DialOption, backupOption *FilerBackupOptions) error {
+
+ // find data sink
+ config := util.GetViper()
+ dataSink := findSink(config)
+ if dataSink == nil {
+ return fmt.Errorf("no data sink configured in replication.toml")
+ }
+
+ sourceFiler := *backupOption.filer
+ sourcePath := *backupOption.path
+ timeAgo := *backupOption.timeAgo
+ targetPath := dataSink.GetSinkToDirectory()
+ debug := *backupOption.debug
+
+ // get start time for the data sink
+ startFrom := time.Unix(0, 0)
+ sinkId := util.HashStringToLong(dataSink.GetName() + dataSink.GetSinkToDirectory())
+ if timeAgo.Milliseconds() == 0 {
+ lastOffsetTsNs, err := getOffset(grpcDialOption, sourceFiler, BackupKeyPrefix, int32(sinkId))
+ if err != nil {
+ glog.V(0).Infof("starting from %v", startFrom)
+ } else {
+ startFrom = time.Unix(0, lastOffsetTsNs)
+ glog.V(0).Infof("resuming from %v", startFrom)
+ }
+ } else {
+ startFrom = time.Now().Add(-timeAgo)
+ glog.V(0).Infof("start time is set to %v", startFrom)
+ }
+
+ // create filer sink
+ filerSource := &source.FilerSource{}
+ filerSource.DoInitialize(sourceFiler, pb.ServerToGrpcAddress(sourceFiler), sourcePath, *backupOption.proxyByFiler)
+ dataSink.SetSourceFiler(filerSource)
+
+ processEventFn := genProcessFunction(sourcePath, targetPath, dataSink, debug)
+
+ return pb.WithFilerClient(sourceFiler, grpcDialOption, func(client filer_pb.SeaweedFilerClient) error {
+
+ ctx, cancel := context.WithCancel(context.Background())
+ defer cancel()
+
+ stream, err := client.SubscribeMetadata(ctx, &filer_pb.SubscribeMetadataRequest{
+ ClientName: "backup_" + dataSink.GetName(),
+ PathPrefix: sourcePath,
+ SinceNs: startFrom.UnixNano(),
+ })
+ if err != nil {
+ return fmt.Errorf("listen: %v", err)
+ }
+
+ var counter int64
+ var lastWriteTime time.Time
+ for {
+ resp, listenErr := stream.Recv()
+
+ if listenErr == io.EOF {
+ return nil
+ }
+ if listenErr != nil {
+ return listenErr
+ }
+
+ if err := processEventFn(resp); err != nil {
+ return fmt.Errorf("processEventFn: %v", err)
+ }
+
+ counter++
+ if lastWriteTime.Add(3 * time.Second).Before(time.Now()) {
+ glog.V(0).Infof("backup %s progressed to %v %0.2f/sec", sourceFiler, time.Unix(0, resp.TsNs), float64(counter)/float64(3))
+ counter = 0
+ lastWriteTime = time.Now()
+ if err := setOffset(grpcDialOption, sourceFiler, BackupKeyPrefix, int32(sinkId), resp.TsNs); err != nil {
+ return fmt.Errorf("setOffset: %v", err)
+ }
+ }
+
+ }
+
+ })
+
+}
diff --git a/weed/command/filer_cat.go b/weed/command/filer_cat.go
new file mode 100644
index 000000000..c4281feba
--- /dev/null
+++ b/weed/command/filer_cat.go
@@ -0,0 +1,118 @@
+package command
+
+import (
+ "context"
+ "fmt"
+ "github.com/chrislusf/seaweedfs/weed/filer"
+ "github.com/chrislusf/seaweedfs/weed/pb"
+ "github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
+ "github.com/chrislusf/seaweedfs/weed/wdclient"
+ "google.golang.org/grpc"
+ "math"
+ "net/url"
+ "os"
+ "strings"
+
+ "github.com/chrislusf/seaweedfs/weed/security"
+ "github.com/chrislusf/seaweedfs/weed/util"
+)
+
+var (
+ filerCat FilerCatOptions
+)
+
+type FilerCatOptions struct {
+ grpcDialOption grpc.DialOption
+ filerAddress string
+ filerClient filer_pb.SeaweedFilerClient
+ output *string
+}
+
+func (fco *FilerCatOptions) GetLookupFileIdFunction() wdclient.LookupFileIdFunctionType {
+ return func(fileId string) (targetUrls []string, err error) {
+ vid := filer.VolumeId(fileId)
+ resp, err := fco.filerClient.LookupVolume(context.Background(), &filer_pb.LookupVolumeRequest{
+ VolumeIds: []string{vid},
+ })
+ if err != nil {
+ return nil, err
+ }
+ locations := resp.LocationsMap[vid]
+ for _, loc := range locations.Locations {
+ targetUrls = append(targetUrls, fmt.Sprintf("http://%s/%s", loc.Url, fileId))
+ }
+ return
+ }
+}
+
+func init() {
+ cmdFilerCat.Run = runFilerCat // break init cycle
+ filerCat.output = cmdFilerCat.Flag.String("o", "", "write to file instead of stdout")
+}
+
+var cmdFilerCat = &Command{
+ UsageLine: "filer.cat [-o ] http://localhost:8888/path/to/file",
+ Short: "copy one file to local",
+ Long: `read one file to stdout or write to a file
+
+`,
+}
+
+func runFilerCat(cmd *Command, args []string) bool {
+
+ util.LoadConfiguration("security", false)
+
+ if len(args) == 0 {
+ return false
+ }
+ filerSource := args[len(args)-1]
+
+ filerUrl, err := url.Parse(filerSource)
+ if err != nil {
+ fmt.Printf("The last argument should be a URL on filer: %v\n", err)
+ return false
+ }
+ urlPath := filerUrl.Path
+ if strings.HasSuffix(urlPath, "/") {
+ fmt.Printf("The last argument should be a file: %v\n", err)
+ return false
+ }
+
+ filerCat.filerAddress = filerUrl.Host
+ filerCat.grpcDialOption = security.LoadClientTLS(util.GetViper(), "grpc.client")
+
+ dir, name := util.FullPath(urlPath).DirAndName()
+
+ writer := os.Stdout
+ if *filerCat.output != "" {
+
+ fmt.Printf("saving %s to %s\n", filerSource, *filerCat.output)
+
+ f, err := os.OpenFile(*filerCat.output, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0755)
+ if err != nil {
+ fmt.Printf("open file %s: %v\n", *filerCat.output, err)
+ return false
+ }
+ defer f.Close()
+ writer = f
+ }
+
+ pb.WithFilerClient(filerCat.filerAddress, filerCat.grpcDialOption, func(client filer_pb.SeaweedFilerClient) error {
+
+ request := &filer_pb.LookupDirectoryEntryRequest{
+ Name: name,
+ Directory: dir,
+ }
+ respLookupEntry, err := filer_pb.LookupEntry(client, request)
+ if err != nil {
+ return err
+ }
+
+ filerCat.filerClient = client
+
+ return filer.StreamContent(&filerCat, writer, respLookupEntry.Entry.Chunks, 0, math.MaxInt64, false)
+
+ })
+
+ return true
+}
diff --git a/weed/command/filer_copy.go b/weed/command/filer_copy.go
index f14d18c52..e7a9b107f 100644
--- a/weed/command/filer_copy.go
+++ b/weed/command/filer_copy.go
@@ -14,13 +14,17 @@ import (
"sync"
"time"
+ "google.golang.org/grpc"
+
+ "github.com/chrislusf/seaweedfs/weed/util/grace"
+
"github.com/chrislusf/seaweedfs/weed/operation"
+ "github.com/chrislusf/seaweedfs/weed/pb"
"github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
"github.com/chrislusf/seaweedfs/weed/security"
+ "github.com/chrislusf/seaweedfs/weed/storage/needle"
"github.com/chrislusf/seaweedfs/weed/util"
"github.com/chrislusf/seaweedfs/weed/wdclient"
- "github.com/spf13/viper"
- "google.golang.org/grpc"
)
var (
@@ -33,13 +37,15 @@ type CopyOptions struct {
replication *string
collection *string
ttl *string
+ diskType *string
maxMB *int
masterClient *wdclient.MasterClient
concurrenctFiles *int
concurrenctChunks *int
- compressionLevel *int
grpcDialOption grpc.DialOption
masters []string
+ cipher bool
+ ttlSec int32
}
func init() {
@@ -49,10 +55,10 @@ func init() {
copy.replication = cmdCopy.Flag.String("replication", "", "replication type")
copy.collection = cmdCopy.Flag.String("collection", "", "optional collection name")
copy.ttl = cmdCopy.Flag.String("ttl", "", "time to live, e.g.: 1m, 1h, 1d, 1M, 1y")
- copy.maxMB = cmdCopy.Flag.Int("maxMB", 32, "split files larger than the limit")
+ copy.diskType = cmdCopy.Flag.String("disk", "", "[hdd|ssd|] hard drive or solid state drive or any tag")
+ copy.maxMB = cmdCopy.Flag.Int("maxMB", 4, "split files larger than the limit")
copy.concurrenctFiles = cmdCopy.Flag.Int("c", 8, "concurrent file copy goroutines")
copy.concurrenctChunks = cmdCopy.Flag.Int("concurrentChunks", 8, "concurrent chunk copy goroutines for each file")
- copy.compressionLevel = cmdCopy.Flag.Int("compressionLevel", 9, "local file compression level 1 ~ 9")
}
var cmdCopy = &Command{
@@ -68,7 +74,7 @@ var cmdCopy = &Command{
If "maxMB" is set to a positive number, files larger than it would be split into chunks.
- `,
+`,
}
func runCopy(cmd *Command, args []string) bool {
@@ -88,7 +94,7 @@ func runCopy(cmd *Command, args []string) bool {
}
urlPath := filerUrl.Path
if !strings.HasSuffix(urlPath, "/") {
- fmt.Printf("The last argument should be a folder and end with \"/\": %v\n", err)
+ fmt.Printf("The last argument should be a folder and end with \"/\"\n")
return false
}
@@ -105,15 +111,25 @@ func runCopy(cmd *Command, args []string) bool {
filerGrpcPort := filerPort + 10000
filerGrpcAddress := fmt.Sprintf("%s:%d", filerUrl.Hostname(), filerGrpcPort)
- copy.grpcDialOption = security.LoadClientTLS(viper.Sub("grpc"), "client")
-
- ctx := context.Background()
+ copy.grpcDialOption = security.LoadClientTLS(util.GetViper(), "grpc.client")
- masters, collection, replication, maxMB, err := readFilerConfiguration(ctx, copy.grpcDialOption, filerGrpcAddress)
+ masters, collection, replication, dirBuckets, maxMB, cipher, err := readFilerConfiguration(copy.grpcDialOption, filerGrpcAddress)
if err != nil {
fmt.Printf("read from filer %s: %v\n", filerGrpcAddress, err)
return false
}
+ if strings.HasPrefix(urlPath, dirBuckets+"/") {
+ restPath := urlPath[len(dirBuckets)+1:]
+ if strings.Index(restPath, "/") > 0 {
+ expectedBucket := restPath[:strings.Index(restPath, "/")]
+ if *copy.collection == "" {
+ *copy.collection = expectedBucket
+ } else if *copy.collection != expectedBucket {
+ fmt.Printf("destination %s uses collection \"%s\": unexpected collection \"%v\"\n", urlPath, expectedBucket, *copy.collection)
+ return true
+ }
+ }
+ }
if *copy.collection == "" {
*copy.collection = collection
}
@@ -124,13 +140,17 @@ func runCopy(cmd *Command, args []string) bool {
*copy.maxMB = int(maxMB)
}
copy.masters = masters
+ copy.cipher = cipher
- copy.masterClient = wdclient.NewMasterClient(ctx, copy.grpcDialOption, "client", copy.masters)
- go copy.masterClient.KeepConnectedToMaster()
- copy.masterClient.WaitUntilConnected()
+ ttl, err := needle.ReadTTL(*copy.ttl)
+ if err != nil {
+ fmt.Printf("parsing ttl %s: %v\n", *copy.ttl, err)
+ return false
+ }
+ copy.ttlSec = int32(ttl.Minutes()) * 60
if *cmdCopy.IsDebug {
- util.SetupProfiling("filer.copy.cpu.pprof", "filer.copy.mem.pprof")
+ grace.SetupProfiling("filer.copy.cpu.pprof", "filer.copy.mem.pprof")
}
fileCopyTaskChan := make(chan FileCopyTask, *copy.concurrenctFiles)
@@ -139,7 +159,7 @@ func runCopy(cmd *Command, args []string) bool {
defer close(fileCopyTaskChan)
for _, fileOrDir := range fileOrDirs {
if err := genFileCopyTask(fileOrDir, urlPath, fileCopyTaskChan); err != nil {
- fmt.Fprintf(os.Stderr, "gen file list error: %v\n", err)
+ fmt.Fprintf(os.Stderr, "genFileCopyTask : %v\n", err)
break
}
}
@@ -153,7 +173,7 @@ func runCopy(cmd *Command, args []string) bool {
filerHost: filerUrl.Host,
filerGrpcAddress: filerGrpcAddress,
}
- if err := worker.copyFiles(ctx, fileCopyTaskChan); err != nil {
+ if err := worker.copyFiles(fileCopyTaskChan); err != nil {
fmt.Fprintf(os.Stderr, "copy file error: %v\n", err)
return
}
@@ -164,13 +184,15 @@ func runCopy(cmd *Command, args []string) bool {
return true
}
-func readFilerConfiguration(ctx context.Context, grpcDialOption grpc.DialOption, filerGrpcAddress string) (masters []string, collection, replication string, maxMB uint32, err error) {
- err = withFilerClient(ctx, filerGrpcAddress, grpcDialOption, func(client filer_pb.SeaweedFilerClient) error {
- resp, err := client.GetFilerConfiguration(ctx, &filer_pb.GetFilerConfigurationRequest{})
+func readFilerConfiguration(grpcDialOption grpc.DialOption, filerGrpcAddress string) (masters []string, collection, replication string, dirBuckets string, maxMB uint32, cipher bool, err error) {
+ err = pb.WithGrpcFilerClient(filerGrpcAddress, grpcDialOption, func(client filer_pb.SeaweedFilerClient) error {
+ resp, err := client.GetFilerConfiguration(context.Background(), &filer_pb.GetFilerConfigurationRequest{})
if err != nil {
return fmt.Errorf("get filer %s configuration: %v", filerGrpcAddress, err)
}
masters, collection, replication, maxMB = resp.Masters, resp.Collection, resp.Replication, resp.MaxMb
+ dirBuckets = resp.DirBuckets
+ cipher = resp.Cipher
return nil
})
return
@@ -180,21 +202,11 @@ func genFileCopyTask(fileOrDir string, destPath string, fileCopyTaskChan chan Fi
fi, err := os.Stat(fileOrDir)
if err != nil {
- fmt.Fprintf(os.Stderr, "Failed to get stat for file %s: %v\n", fileOrDir, err)
+ fmt.Fprintf(os.Stderr, "Error: read file %s: %v\n", fileOrDir, err)
return nil
}
mode := fi.Mode()
- if mode.IsDir() {
- files, _ := ioutil.ReadDir(fileOrDir)
- for _, subFileOrDir := range files {
- if err = genFileCopyTask(fileOrDir+"/"+subFileOrDir.Name(), destPath+fi.Name()+"/", fileCopyTaskChan); err != nil {
- return err
- }
- }
- return nil
- }
-
uid, gid := util.GetFileUidGid(fi)
fileCopyTaskChan <- FileCopyTask{
@@ -206,6 +218,16 @@ func genFileCopyTask(fileOrDir string, destPath string, fileCopyTaskChan chan Fi
gid: gid,
}
+ if mode.IsDir() {
+ files, _ := ioutil.ReadDir(fileOrDir)
+ println("checking directory", fileOrDir)
+ for _, subFileOrDir := range files {
+ if err = genFileCopyTask(fileOrDir+"/"+subFileOrDir.Name(), destPath+fi.Name()+"/", fileCopyTaskChan); err != nil {
+ return err
+ }
+ }
+ }
+
return nil
}
@@ -215,9 +237,9 @@ type FileCopyWorker struct {
filerGrpcAddress string
}
-func (worker *FileCopyWorker) copyFiles(ctx context.Context, fileCopyTaskChan chan FileCopyTask) error {
+func (worker *FileCopyWorker) copyFiles(fileCopyTaskChan chan FileCopyTask) error {
for task := range fileCopyTaskChan {
- if err := worker.doEachCopy(ctx, task); err != nil {
+ if err := worker.doEachCopy(task); err != nil {
return err
}
}
@@ -233,7 +255,7 @@ type FileCopyTask struct {
gid uint32
}
-func (worker *FileCopyWorker) doEachCopy(ctx context.Context, task FileCopyTask) error {
+func (worker *FileCopyWorker) doEachCopy(task FileCopyTask) error {
f, err := os.Open(task.sourceLocation)
if err != nil {
@@ -261,36 +283,58 @@ func (worker *FileCopyWorker) doEachCopy(ctx context.Context, task FileCopyTask)
}
if chunkCount == 1 {
- return worker.uploadFileAsOne(ctx, task, f)
+ return worker.uploadFileAsOne(task, f)
}
- return worker.uploadFileInChunks(ctx, task, f, chunkCount, chunkSize)
+ return worker.uploadFileInChunks(task, f, chunkCount, chunkSize)
}
-func (worker *FileCopyWorker) uploadFileAsOne(ctx context.Context, task FileCopyTask, f *os.File) error {
+func (worker *FileCopyWorker) uploadFileAsOne(task FileCopyTask, f *os.File) error {
// upload the file content
fileName := filepath.Base(f.Name())
- mimeType := detectMimeType(f)
+ var mimeType string
var chunks []*filer_pb.FileChunk
+ var assignResult *filer_pb.AssignVolumeResponse
+ var assignError error
- if task.fileSize > 0 {
+ if task.fileMode & os.ModeDir == 0 && task.fileSize > 0 {
+
+ mimeType = detectMimeType(f)
+ data, err := ioutil.ReadAll(f)
+ if err != nil {
+ return err
+ }
// assign a volume
- assignResult, err := operation.Assign(worker.options.masterClient.GetMaster(), worker.options.grpcDialOption, &operation.VolumeAssignRequest{
- Count: 1,
- Replication: *worker.options.replication,
- Collection: *worker.options.collection,
- Ttl: *worker.options.ttl,
+ err = pb.WithGrpcFilerClient(worker.filerGrpcAddress, worker.options.grpcDialOption, func(client filer_pb.SeaweedFilerClient) error {
+
+ request := &filer_pb.AssignVolumeRequest{
+ Count: 1,
+ Replication: *worker.options.replication,
+ Collection: *worker.options.collection,
+ TtlSec: worker.options.ttlSec,
+ DiskType: *worker.options.diskType,
+ Path: task.destinationUrlPath,
+ }
+
+ assignResult, assignError = client.AssignVolume(context.Background(), request)
+ if assignError != nil {
+ return fmt.Errorf("assign volume failure %v: %v", request, assignError)
+ }
+ if assignResult.Error != "" {
+ return fmt.Errorf("assign volume failure %v: %v", request, assignResult.Error)
+ }
+ return nil
})
if err != nil {
- fmt.Printf("Failed to assign from %v: %v\n", worker.options.masters, err)
+ return fmt.Errorf("Failed to assign from %v: %v\n", worker.options.masters, err)
}
- targetUrl := "http://" + assignResult.Url + "/" + assignResult.Fid
+ targetUrl := "http://" + assignResult.Url + "/" + assignResult.FileId
- uploadResult, err := operation.UploadWithLocalCompressionLevel(targetUrl, fileName, f, false, mimeType, nil, assignResult.Auth, *worker.options.compressionLevel)
+ uploadResult, err := operation.UploadData(targetUrl, fileName, worker.options.cipher, data, false, mimeType, nil, security.EncodedJwt(assignResult.Auth))
if err != nil {
return fmt.Errorf("upload data %v to %s: %v\n", fileName, targetUrl, err)
}
@@ -299,18 +343,12 @@ func (worker *FileCopyWorker) uploadFileAsOne(ctx context.Context, task FileCopy
}
fmt.Printf("uploaded %s to %s\n", fileName, targetUrl)
- chunks = append(chunks, &filer_pb.FileChunk{
- FileId: assignResult.Fid,
- Offset: 0,
- Size: uint64(uploadResult.Size),
- Mtime: time.Now().UnixNano(),
- ETag: uploadResult.ETag,
- })
+ chunks = append(chunks, uploadResult.ToPbFileChunk(assignResult.FileId, 0))
fmt.Printf("copied %s => http://%s%s%s\n", fileName, worker.filerHost, task.destinationUrlPath, fileName)
}
- if err := withFilerClient(ctx, worker.filerGrpcAddress, worker.options.grpcDialOption, func(client filer_pb.SeaweedFilerClient) error {
+ if err := pb.WithGrpcFilerClient(worker.filerGrpcAddress, worker.options.grpcDialOption, func(client filer_pb.SeaweedFilerClient) error {
request := &filer_pb.CreateEntryRequest{
Directory: task.destinationUrlPath,
Entry: &filer_pb.Entry{
@@ -325,13 +363,13 @@ func (worker *FileCopyWorker) uploadFileAsOne(ctx context.Context, task FileCopy
Mime: mimeType,
Replication: *worker.options.replication,
Collection: *worker.options.collection,
- TtlSec: int32(util.ParseInt(*worker.options.ttl, 0)),
+ TtlSec: worker.options.ttlSec,
},
Chunks: chunks,
},
}
- if _, err := client.CreateEntry(ctx, request); err != nil {
+ if err := filer_pb.CreateEntry(client, request); err != nil {
return fmt.Errorf("update fh: %v", err)
}
return nil
@@ -342,7 +380,7 @@ func (worker *FileCopyWorker) uploadFileAsOne(ctx context.Context, task FileCopy
return nil
}
-func (worker *FileCopyWorker) uploadFileInChunks(ctx context.Context, task FileCopyTask, f *os.File, chunkCount int, chunkSize int64) error {
+func (worker *FileCopyWorker) uploadFileInChunks(task FileCopyTask, f *os.File, chunkCount int, chunkSize int64) error {
fileName := filepath.Base(f.Name())
mimeType := detectMimeType(f)
@@ -352,6 +390,7 @@ func (worker *FileCopyWorker) uploadFileInChunks(ctx context.Context, task FileC
concurrentChunks := make(chan struct{}, *worker.options.concurrenctChunks)
var wg sync.WaitGroup
var uploadError error
+ var collection, replication string
fmt.Printf("uploading %s in %d chunks ...\n", fileName, chunkCount)
for i := int64(0); i < int64(chunkCount) && uploadError == nil; i++ {
@@ -363,22 +402,43 @@ func (worker *FileCopyWorker) uploadFileInChunks(ctx context.Context, task FileC
<-concurrentChunks
}()
// assign a volume
- assignResult, err := operation.Assign(worker.options.masterClient.GetMaster(), worker.options.grpcDialOption, &operation.VolumeAssignRequest{
- Count: 1,
- Replication: *worker.options.replication,
- Collection: *worker.options.collection,
- Ttl: *worker.options.ttl,
+ var assignResult *filer_pb.AssignVolumeResponse
+ var assignError error
+ err := pb.WithGrpcFilerClient(worker.filerGrpcAddress, worker.options.grpcDialOption, func(client filer_pb.SeaweedFilerClient) error {
+ request := &filer_pb.AssignVolumeRequest{
+ Count: 1,
+ Replication: *worker.options.replication,
+ Collection: *worker.options.collection,
+ TtlSec: worker.options.ttlSec,
+ DiskType: *worker.options.diskType,
+ Path: task.destinationUrlPath + fileName,
+ }
+
+ assignResult, assignError = client.AssignVolume(context.Background(), request)
+ if assignError != nil {
+ return fmt.Errorf("assign volume failure %v: %v", request, assignError)
+ }
+ if assignResult.Error != "" {
+ return fmt.Errorf("assign volume failure %v: %v", request, assignResult.Error)
+ }
+ return nil
})
if err != nil {
fmt.Printf("Failed to assign from %v: %v\n", worker.options.masters, err)
}
+ if err != nil {
+ fmt.Printf("Failed to assign from %v: %v\n", worker.options.masters, err)
+ }
- targetUrl := "http://" + assignResult.Url + "/" + assignResult.Fid
+ targetUrl := "http://" + assignResult.Url + "/" + assignResult.FileId
+ if collection == "" {
+ collection = assignResult.Collection
+ }
+ if replication == "" {
+ replication = assignResult.Replication
+ }
- uploadResult, err := operation.Upload(targetUrl,
- fileName+"-"+strconv.FormatInt(i+1, 10),
- io.NewSectionReader(f, i*chunkSize, chunkSize),
- false, "", nil, assignResult.Auth)
+ uploadResult, err, _ := operation.Upload(targetUrl, fileName+"-"+strconv.FormatInt(i+1, 10), worker.options.cipher, io.NewSectionReader(f, i*chunkSize, chunkSize), false, "", nil, security.EncodedJwt(assignResult.Auth))
if err != nil {
uploadError = fmt.Errorf("upload data %v to %s: %v\n", fileName, targetUrl, err)
return
@@ -387,13 +447,8 @@ func (worker *FileCopyWorker) uploadFileInChunks(ctx context.Context, task FileC
uploadError = fmt.Errorf("upload %v to %s result: %v\n", fileName, targetUrl, uploadResult.Error)
return
}
- chunksChan <- &filer_pb.FileChunk{
- FileId: assignResult.Fid,
- Offset: i * chunkSize,
- Size: uint64(uploadResult.Size),
- Mtime: time.Now().UnixNano(),
- ETag: uploadResult.ETag,
- }
+ chunksChan <- uploadResult.ToPbFileChunk(assignResult.FileId, i*chunkSize)
+
fmt.Printf("uploaded %s-%d to %s [%d,%d)\n", fileName, i+1, targetUrl, i*chunkSize, i*chunkSize+int64(uploadResult.Size))
}(i)
}
@@ -410,11 +465,13 @@ func (worker *FileCopyWorker) uploadFileInChunks(ctx context.Context, task FileC
for _, chunk := range chunks {
fileIds = append(fileIds, chunk.FileId)
}
- operation.DeleteFiles(worker.options.masterClient.GetMaster(), worker.options.grpcDialOption, fileIds)
+ operation.DeleteFiles(func() string {
+ return copy.masters[0]
+ }, false, worker.options.grpcDialOption, fileIds)
return uploadError
}
- if err := withFilerClient(ctx, worker.filerGrpcAddress, worker.options.grpcDialOption, func(client filer_pb.SeaweedFilerClient) error {
+ if err := pb.WithGrpcFilerClient(worker.filerGrpcAddress, worker.options.grpcDialOption, func(client filer_pb.SeaweedFilerClient) error {
request := &filer_pb.CreateEntryRequest{
Directory: task.destinationUrlPath,
Entry: &filer_pb.Entry{
@@ -427,15 +484,15 @@ func (worker *FileCopyWorker) uploadFileInChunks(ctx context.Context, task FileC
FileSize: uint64(task.fileSize),
FileMode: uint32(task.fileMode),
Mime: mimeType,
- Replication: *worker.options.replication,
- Collection: *worker.options.collection,
- TtlSec: int32(util.ParseInt(*worker.options.ttl, 0)),
+ Replication: replication,
+ Collection: collection,
+ TtlSec: worker.options.ttlSec,
},
Chunks: chunks,
},
}
- if _, err := client.CreateEntry(ctx, request); err != nil {
+ if err := filer_pb.CreateEntry(client, request); err != nil {
return fmt.Errorf("update fh: %v", err)
}
return nil
@@ -457,18 +514,12 @@ func detectMimeType(f *os.File) string {
}
if err != nil {
fmt.Printf("read head of %v: %v\n", f.Name(), err)
- return "application/octet-stream"
+ return ""
}
f.Seek(0, io.SeekStart)
mimeType := http.DetectContentType(head[:n])
+ if mimeType == "application/octet-stream" {
+ return ""
+ }
return mimeType
}
-
-func withFilerClient(ctx context.Context, filerAddress string, grpcDialOption grpc.DialOption, fn func(filer_pb.SeaweedFilerClient) error) error {
-
- return util.WithCachedGrpcClient(ctx, func(clientConn *grpc.ClientConn) error {
- client := filer_pb.NewSeaweedFilerClient(clientConn)
- return fn(client)
- }, filerAddress, grpcDialOption)
-
-}
diff --git a/weed/command/filer_meta_backup.go b/weed/command/filer_meta_backup.go
new file mode 100644
index 000000000..ba0b44659
--- /dev/null
+++ b/weed/command/filer_meta_backup.go
@@ -0,0 +1,268 @@
+package command
+
+import (
+ "context"
+ "fmt"
+ "github.com/chrislusf/seaweedfs/weed/filer"
+ "github.com/chrislusf/seaweedfs/weed/glog"
+ "github.com/spf13/viper"
+ "google.golang.org/grpc"
+ "io"
+ "reflect"
+ "time"
+
+ "github.com/chrislusf/seaweedfs/weed/pb"
+ "github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
+ "github.com/chrislusf/seaweedfs/weed/security"
+ "github.com/chrislusf/seaweedfs/weed/util"
+)
+
+var (
+ metaBackup FilerMetaBackupOptions
+)
+
+type FilerMetaBackupOptions struct {
+ grpcDialOption grpc.DialOption
+ filerAddress *string
+ filerDirectory *string
+ restart *bool
+ backupFilerConfig *string
+
+ store filer.FilerStore
+}
+
+func init() {
+ cmdFilerMetaBackup.Run = runFilerMetaBackup // break init cycle
+ metaBackup.filerAddress = cmdFilerMetaBackup.Flag.String("filer", "localhost:8888", "filer hostname:port")
+ metaBackup.filerDirectory = cmdFilerMetaBackup.Flag.String("filerDir", "/", "a folder on the filer")
+ metaBackup.restart = cmdFilerMetaBackup.Flag.Bool("restart", false, "copy the full metadata before async incremental backup")
+ metaBackup.backupFilerConfig = cmdFilerMetaBackup.Flag.String("config", "", "path to filer.toml specifying backup filer store")
+}
+
+var cmdFilerMetaBackup = &Command{
+ UsageLine: "filer.meta.backup [-filer=localhost:8888] [-filerDir=/] [-restart] -config=/path/to/backup_filer.toml",
+ Short: "continuously backup filer meta data changes to anther filer store specified in a backup_filer.toml",
+ Long: `continuously backup filer meta data changes.
+The backup writes to another filer store specified in a backup_filer.toml.
+
+ weed filer.meta.backup -config=/path/to/backup_filer.toml -filer="localhost:8888"
+ weed filer.meta.backup -config=/path/to/backup_filer.toml -filer="localhost:8888" -restart
+
+ `,
+}
+
+func runFilerMetaBackup(cmd *Command, args []string) bool {
+
+ metaBackup.grpcDialOption = security.LoadClientTLS(util.GetViper(), "grpc.client")
+
+ // load backup_filer.toml
+ v := viper.New()
+ v.SetConfigFile(*metaBackup.backupFilerConfig)
+
+ if err := v.ReadInConfig(); err != nil { // Handle errors reading the config file
+ glog.Fatalf("Failed to load %s file.\nPlease use this command to generate the a %s.toml file\n"+
+ " weed scaffold -config=%s -output=.\n\n\n",
+ *metaBackup.backupFilerConfig, "backup_filer", "filer")
+ }
+
+ if err := metaBackup.initStore(v); err != nil {
+ glog.V(0).Infof("init backup filer store: %v", err)
+ return true
+ }
+
+ missingPreviousBackup := false
+ _, err := metaBackup.getOffset()
+ if err != nil {
+ missingPreviousBackup = true
+ }
+
+ if *metaBackup.restart || missingPreviousBackup {
+ glog.V(0).Infof("traversing metadata tree...")
+ startTime := time.Now()
+ if err := metaBackup.traverseMetadata(); err != nil {
+ glog.Errorf("traverse meta data: %v", err)
+ return true
+ }
+ glog.V(0).Infof("metadata copied up to %v", startTime)
+ if err := metaBackup.setOffset(startTime); err != nil {
+ startTime = time.Now()
+ }
+ }
+
+ for {
+ err := metaBackup.streamMetadataBackup()
+ if err != nil {
+ glog.Errorf("filer meta backup from %s: %v", *metaBackup.filerAddress, err)
+ time.Sleep(1747 * time.Millisecond)
+ }
+ }
+
+ return true
+}
+
+func (metaBackup *FilerMetaBackupOptions) initStore(v *viper.Viper) error {
+ // load configuration for default filer store
+ hasDefaultStoreConfigured := false
+ for _, store := range filer.Stores {
+ if v.GetBool(store.GetName() + ".enabled") {
+ store = reflect.New(reflect.ValueOf(store).Elem().Type()).Interface().(filer.FilerStore)
+ if err := store.Initialize(v, store.GetName()+"."); err != nil {
+ glog.Fatalf("failed to initialize store for %s: %+v", store.GetName(), err)
+ }
+ glog.V(0).Infof("configured filer store to %s", store.GetName())
+ hasDefaultStoreConfigured = true
+ metaBackup.store = filer.NewFilerStoreWrapper(store)
+ break
+ }
+ }
+ if !hasDefaultStoreConfigured {
+ return fmt.Errorf("no filer store enabled in %s", v.ConfigFileUsed())
+ }
+
+ return nil
+}
+
+func (metaBackup *FilerMetaBackupOptions) traverseMetadata() (err error) {
+ var saveErr error
+
+ traverseErr := filer_pb.TraverseBfs(metaBackup, util.FullPath(*metaBackup.filerDirectory), func(parentPath util.FullPath, entry *filer_pb.Entry) {
+
+ println("+", parentPath.Child(entry.Name))
+ if err := metaBackup.store.InsertEntry(context.Background(), filer.FromPbEntry(string(parentPath), entry)); err != nil {
+ saveErr = fmt.Errorf("insert entry error: %v\n", err)
+ return
+ }
+
+ })
+
+ if traverseErr != nil {
+ return fmt.Errorf("traverse: %v", traverseErr)
+ }
+ return saveErr
+}
+
+var (
+ MetaBackupKey = []byte("metaBackup")
+)
+
+func (metaBackup *FilerMetaBackupOptions) streamMetadataBackup() error {
+
+ startTime, err := metaBackup.getOffset()
+ if err != nil {
+ startTime = time.Now()
+ }
+ glog.V(0).Infof("streaming from %v", startTime)
+
+ store := metaBackup.store
+
+ eachEntryFunc := func(resp *filer_pb.SubscribeMetadataResponse) error {
+
+ ctx := context.Background()
+ message := resp.EventNotification
+
+ if message.OldEntry == nil && message.NewEntry == nil {
+ return nil
+ }
+ if message.OldEntry == nil && message.NewEntry != nil {
+ println("+", util.FullPath(message.NewParentPath).Child(message.NewEntry.Name))
+ entry := filer.FromPbEntry(message.NewParentPath, message.NewEntry)
+ return store.InsertEntry(ctx, entry)
+ }
+ if message.OldEntry != nil && message.NewEntry == nil {
+ println("-", util.FullPath(resp.Directory).Child(message.OldEntry.Name))
+ return store.DeleteEntry(ctx, util.FullPath(resp.Directory).Child(message.OldEntry.Name))
+ }
+ if message.OldEntry != nil && message.NewEntry != nil {
+ if resp.Directory == message.NewParentPath && message.OldEntry.Name == message.NewEntry.Name {
+ println("~", util.FullPath(message.NewParentPath).Child(message.NewEntry.Name))
+ entry := filer.FromPbEntry(message.NewParentPath, message.NewEntry)
+ return store.UpdateEntry(ctx, entry)
+ }
+ println("-", util.FullPath(resp.Directory).Child(message.OldEntry.Name))
+ if err := store.DeleteEntry(ctx, util.FullPath(resp.Directory).Child(message.OldEntry.Name)); err != nil {
+ return err
+ }
+ println("+", util.FullPath(message.NewParentPath).Child(message.NewEntry.Name))
+ return store.InsertEntry(ctx, filer.FromPbEntry(message.NewParentPath, message.NewEntry))
+ }
+
+ return nil
+ }
+
+ tailErr := pb.WithFilerClient(*metaBackup.filerAddress, metaBackup.grpcDialOption, func(client filer_pb.SeaweedFilerClient) error {
+
+ ctx, cancel := context.WithCancel(context.Background())
+ defer cancel()
+
+ stream, err := client.SubscribeMetadata(ctx, &filer_pb.SubscribeMetadataRequest{
+ ClientName: "meta_backup",
+ PathPrefix: *metaBackup.filerDirectory,
+ SinceNs: startTime.UnixNano(),
+ })
+ if err != nil {
+ return fmt.Errorf("listen: %v", err)
+ }
+
+ var counter int64
+ var lastWriteTime time.Time
+ for {
+ resp, listenErr := stream.Recv()
+ if listenErr == io.EOF {
+ return nil
+ }
+ if listenErr != nil {
+ return listenErr
+ }
+ if err = eachEntryFunc(resp); err != nil {
+ return err
+ }
+
+ counter++
+ if lastWriteTime.Add(3 * time.Second).Before(time.Now()) {
+ glog.V(0).Infof("meta backup %s progressed to %v %0.2f/sec", *metaBackup.filerAddress, time.Unix(0, resp.TsNs), float64(counter)/float64(3))
+ counter = 0
+ lastWriteTime = time.Now()
+ if err2 := metaBackup.setOffset(lastWriteTime); err2 != nil {
+ return err2
+ }
+ }
+
+ }
+
+ })
+ return tailErr
+}
+
+func (metaBackup *FilerMetaBackupOptions) getOffset() (lastWriteTime time.Time, err error) {
+ value, err := metaBackup.store.KvGet(context.Background(), MetaBackupKey)
+ if err != nil {
+ return
+ }
+ tsNs := util.BytesToUint64(value)
+
+ return time.Unix(0, int64(tsNs)), nil
+}
+
+func (metaBackup *FilerMetaBackupOptions) setOffset(lastWriteTime time.Time) error {
+ valueBuf := make([]byte, 8)
+ util.Uint64toBytes(valueBuf, uint64(lastWriteTime.UnixNano()))
+
+ if err := metaBackup.store.KvPut(context.Background(), MetaBackupKey, valueBuf); err != nil {
+ return err
+ }
+ return nil
+}
+
+var _ = filer_pb.FilerClient(&FilerMetaBackupOptions{})
+
+func (metaBackup *FilerMetaBackupOptions) WithFilerClient(fn func(filer_pb.SeaweedFilerClient) error) error {
+
+ return pb.WithFilerClient(*metaBackup.filerAddress, metaBackup.grpcDialOption, func(client filer_pb.SeaweedFilerClient) error {
+ return fn(client)
+ })
+
+}
+
+func (metaBackup *FilerMetaBackupOptions) AdjustedUrl(location *filer_pb.Location) string {
+ return location.Url
+}
diff --git a/weed/command/filer_meta_tail.go b/weed/command/filer_meta_tail.go
new file mode 100644
index 000000000..8451ffd78
--- /dev/null
+++ b/weed/command/filer_meta_tail.go
@@ -0,0 +1,211 @@
+package command
+
+import (
+ "context"
+ "fmt"
+ "github.com/golang/protobuf/jsonpb"
+ jsoniter "github.com/json-iterator/go"
+ "github.com/olivere/elastic/v7"
+ "io"
+ "os"
+ "path/filepath"
+ "strings"
+ "time"
+
+ "github.com/chrislusf/seaweedfs/weed/pb"
+ "github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
+ "github.com/chrislusf/seaweedfs/weed/security"
+ "github.com/chrislusf/seaweedfs/weed/util"
+)
+
+func init() {
+ cmdFilerMetaTail.Run = runFilerMetaTail // break init cycle
+}
+
+var cmdFilerMetaTail = &Command{
+ UsageLine: "filer.meta.tail [-filer=localhost:8888] [-pathPrefix=/]",
+ Short: "see continuous changes on a filer",
+ Long: `See continuous changes on a filer.
+
+ weed filer.meta.tail -timeAgo=30h | grep truncate
+ weed filer.meta.tail -timeAgo=30h | jq .
+ weed filer.meta.tail -timeAgo=30h | jq .eventNotification.newEntry.name
+
+ `,
+}
+
+var (
+ tailFiler = cmdFilerMetaTail.Flag.String("filer", "localhost:8888", "filer hostname:port")
+ tailTarget = cmdFilerMetaTail.Flag.String("pathPrefix", "/", "path to a folder or common prefix for the folders or files on filer")
+ tailStart = cmdFilerMetaTail.Flag.Duration("timeAgo", 0, "start time before now. \"300ms\", \"1.5h\" or \"2h45m\". Valid time units are \"ns\", \"us\" (or \"µs\"), \"ms\", \"s\", \"m\", \"h\"")
+ tailPattern = cmdFilerMetaTail.Flag.String("pattern", "", "full path or just filename pattern, ex: \"/home/?opher\", \"*.pdf\", see https://golang.org/pkg/path/filepath/#Match ")
+ esServers = cmdFilerMetaTail.Flag.String("es", "", "comma-separated elastic servers http://")
+ esIndex = cmdFilerMetaTail.Flag.String("es.index", "seaweedfs", "ES index name")
+)
+
+func runFilerMetaTail(cmd *Command, args []string) bool {
+
+ grpcDialOption := security.LoadClientTLS(util.GetViper(), "grpc.client")
+
+ var filterFunc func(dir, fname string) bool
+ if *tailPattern != "" {
+ if strings.Contains(*tailPattern, "/") {
+ println("watch path pattern", *tailPattern)
+ filterFunc = func(dir, fname string) bool {
+ matched, err := filepath.Match(*tailPattern, dir+"/"+fname)
+ if err != nil {
+ fmt.Printf("error: %v", err)
+ }
+ return matched
+ }
+ } else {
+ println("watch file pattern", *tailPattern)
+ filterFunc = func(dir, fname string) bool {
+ matched, err := filepath.Match(*tailPattern, fname)
+ if err != nil {
+ fmt.Printf("error: %v", err)
+ }
+ return matched
+ }
+ }
+ }
+
+ shouldPrint := func(resp *filer_pb.SubscribeMetadataResponse) bool {
+ if filterFunc == nil {
+ return true
+ }
+ if resp.EventNotification.OldEntry == nil && resp.EventNotification.NewEntry == nil {
+ return false
+ }
+ if resp.EventNotification.OldEntry != nil && filterFunc(resp.Directory, resp.EventNotification.OldEntry.Name) {
+ return true
+ }
+ if resp.EventNotification.NewEntry != nil && filterFunc(resp.EventNotification.NewParentPath, resp.EventNotification.NewEntry.Name) {
+ return true
+ }
+ return false
+ }
+
+ jsonpbMarshaler := jsonpb.Marshaler{
+ EmitDefaults: false,
+ }
+ eachEntryFunc := func(resp *filer_pb.SubscribeMetadataResponse) error {
+ jsonpbMarshaler.Marshal(os.Stdout, resp)
+ fmt.Fprintln(os.Stdout)
+ return nil
+ }
+ if *esServers != "" {
+ var err error
+ eachEntryFunc, err = sendToElasticSearchFunc(*esServers, *esIndex)
+ if err != nil {
+ fmt.Printf("create elastic search client to %s: %+v\n", *esServers, err)
+ return false
+ }
+ }
+
+ tailErr := pb.WithFilerClient(*tailFiler, grpcDialOption, func(client filer_pb.SeaweedFilerClient) error {
+
+ ctx, cancel := context.WithCancel(context.Background())
+ defer cancel()
+
+ stream, err := client.SubscribeMetadata(ctx, &filer_pb.SubscribeMetadataRequest{
+ ClientName: "tail",
+ PathPrefix: *tailTarget,
+ SinceNs: time.Now().Add(-*tailStart).UnixNano(),
+ })
+ if err != nil {
+ return fmt.Errorf("listen: %v", err)
+ }
+
+ for {
+ resp, listenErr := stream.Recv()
+ if listenErr == io.EOF {
+ return nil
+ }
+ if listenErr != nil {
+ return listenErr
+ }
+ if !shouldPrint(resp) {
+ continue
+ }
+ if err = eachEntryFunc(resp); err != nil {
+ return err
+ }
+ }
+
+ })
+ if tailErr != nil {
+ fmt.Printf("tail %s: %v\n", *tailFiler, tailErr)
+ }
+
+ return true
+}
+
+type EsDocument struct {
+ Dir string `json:"dir,omitempty"`
+ Name string `json:"name,omitempty"`
+ IsDirectory bool `json:"isDir,omitempty"`
+ Size uint64 `json:"size,omitempty"`
+ Uid uint32 `json:"uid,omitempty"`
+ Gid uint32 `json:"gid,omitempty"`
+ UserName string `json:"userName,omitempty"`
+ Collection string `json:"collection,omitempty"`
+ Crtime int64 `json:"crtime,omitempty"`
+ Mtime int64 `json:"mtime,omitempty"`
+ Mime string `json:"mime,omitempty"`
+}
+
+func toEsEntry(event *filer_pb.EventNotification) (*EsDocument, string) {
+ entry := event.NewEntry
+ dir, name := event.NewParentPath, entry.Name
+ id := util.Md5String([]byte(util.NewFullPath(dir, name)))
+ esEntry := &EsDocument{
+ Dir: dir,
+ Name: name,
+ IsDirectory: entry.IsDirectory,
+ Size: entry.Attributes.FileSize,
+ Uid: entry.Attributes.Uid,
+ Gid: entry.Attributes.Gid,
+ UserName: entry.Attributes.UserName,
+ Collection: entry.Attributes.Collection,
+ Crtime: entry.Attributes.Crtime,
+ Mtime: entry.Attributes.Mtime,
+ Mime: entry.Attributes.Mime,
+ }
+ return esEntry, id
+}
+
+func sendToElasticSearchFunc(servers string, esIndex string) (func(resp *filer_pb.SubscribeMetadataResponse) error, error) {
+ options := []elastic.ClientOptionFunc{}
+ options = append(options, elastic.SetURL(strings.Split(servers, ",")...))
+ options = append(options, elastic.SetSniff(false))
+ options = append(options, elastic.SetHealthcheck(false))
+ client, err := elastic.NewClient(options...)
+ if err != nil {
+ return nil, err
+ }
+ return func(resp *filer_pb.SubscribeMetadataResponse) error {
+ event := resp.EventNotification
+ if event.OldEntry != nil &&
+ (event.NewEntry == nil || resp.Directory != event.NewParentPath || event.OldEntry.Name != event.NewEntry.Name) {
+ // delete or not update the same file
+ dir, name := resp.Directory, event.OldEntry.Name
+ id := util.Md5String([]byte(util.NewFullPath(dir, name)))
+ println("delete", id)
+ _, err := client.Delete().Index(esIndex).Id(id).Do(context.Background())
+ return err
+ }
+ if event.NewEntry != nil {
+ // add a new file or update the same file
+ esEntry, id := toEsEntry(event)
+ value, err := jsoniter.Marshal(esEntry)
+ if err != nil {
+ return err
+ }
+ println(string(value))
+ _, err = client.Index().Index(esIndex).Id(id).BodyJson(string(value)).Do(context.Background())
+ return err
+ }
+ return nil
+ }, nil
+}
diff --git a/weed/command/filer_replication.go b/weed/command/filer_replication.go
index c6e7f5dba..885c95540 100644
--- a/weed/command/filer_replication.go
+++ b/weed/command/filer_replication.go
@@ -11,10 +11,10 @@ import (
_ "github.com/chrislusf/seaweedfs/weed/replication/sink/b2sink"
_ "github.com/chrislusf/seaweedfs/weed/replication/sink/filersink"
_ "github.com/chrislusf/seaweedfs/weed/replication/sink/gcssink"
+ _ "github.com/chrislusf/seaweedfs/weed/replication/sink/localsink"
_ "github.com/chrislusf/seaweedfs/weed/replication/sink/s3sink"
"github.com/chrislusf/seaweedfs/weed/replication/sub"
"github.com/chrislusf/seaweedfs/weed/util"
- "github.com/spf13/viper"
)
func init() {
@@ -39,7 +39,7 @@ func runFilerReplicate(cmd *Command, args []string) bool {
util.LoadConfiguration("security", false)
util.LoadConfiguration("replication", true)
util.LoadConfiguration("notification", true)
- config := viper.GetViper()
+ config := util.GetViper()
var notificationInput sub.NotificationInput
@@ -47,8 +47,7 @@ func runFilerReplicate(cmd *Command, args []string) bool {
for _, input := range sub.NotificationInputs {
if config.GetBool("notification." + input.GetName() + ".enabled") {
- viperSub := config.Sub("notification." + input.GetName())
- if err := input.Initialize(viperSub); err != nil {
+ if err := input.Initialize(config, "notification."+input.GetName()+"."); err != nil {
glog.Fatalf("Failed to initialize notification input for %s: %+v",
input.GetName(), err)
}
@@ -66,29 +65,16 @@ func runFilerReplicate(cmd *Command, args []string) bool {
// avoid recursive replication
if config.GetBool("notification.source.filer.enabled") && config.GetBool("notification.sink.filer.enabled") {
- sourceConfig, sinkConfig := config.Sub("source.filer"), config.Sub("sink.filer")
- if sourceConfig.GetString("grpcAddress") == sinkConfig.GetString("grpcAddress") {
- fromDir := sourceConfig.GetString("directory")
- toDir := sinkConfig.GetString("directory")
+ if config.GetString("source.filer.grpcAddress") == config.GetString("sink.filer.grpcAddress") {
+ fromDir := config.GetString("source.filer.directory")
+ toDir := config.GetString("sink.filer.directory")
if strings.HasPrefix(toDir, fromDir) {
glog.Fatalf("recursive replication! source directory %s includes the sink directory %s", fromDir, toDir)
}
}
}
- var dataSink sink.ReplicationSink
- for _, sk := range sink.Sinks {
- if config.GetBool("sink." + sk.GetName() + ".enabled") {
- viperSub := config.Sub("sink." + sk.GetName())
- if err := sk.Initialize(viperSub); err != nil {
- glog.Fatalf("Failed to initialize sink for %s: %+v",
- sk.GetName(), err)
- }
- glog.V(0).Infof("Configure sink to %s", sk.GetName())
- dataSink = sk
- break
- }
- }
+ dataSink := findSink(config)
if dataSink == nil {
println("no data sink configured in replication.toml:")
@@ -98,16 +84,22 @@ func runFilerReplicate(cmd *Command, args []string) bool {
return true
}
- replicator := replication.NewReplicator(config.Sub("source.filer"), dataSink)
+ replicator := replication.NewReplicator(config, "source.filer.", dataSink)
for {
- key, m, err := notificationInput.ReceiveMessage()
+ key, m, onSuccessFn, onFailureFn, err := notificationInput.ReceiveMessage()
if err != nil {
glog.Errorf("receive %s: %+v", key, err)
+ if onFailureFn != nil {
+ onFailureFn()
+ }
continue
}
if key == "" {
// long poll received no messages
+ if onSuccessFn != nil {
+ onSuccessFn()
+ }
continue
}
if m.OldEntry != nil && m.NewEntry == nil {
@@ -119,15 +111,36 @@ func runFilerReplicate(cmd *Command, args []string) bool {
}
if err = replicator.Replicate(context.Background(), key, m); err != nil {
glog.Errorf("replicate %s: %+v", key, err)
+ if onFailureFn != nil {
+ onFailureFn()
+ }
} else {
glog.V(1).Infof("replicated %s", key)
+ if onSuccessFn != nil {
+ onSuccessFn()
+ }
}
}
- return true
}
-func validateOneEnabledInput(config *viper.Viper) {
+func findSink(config *util.ViperProxy) sink.ReplicationSink {
+ var dataSink sink.ReplicationSink
+ for _, sk := range sink.Sinks {
+ if config.GetBool("sink." + sk.GetName() + ".enabled") {
+ if err := sk.Initialize(config, "sink."+sk.GetName()+"."); err != nil {
+ glog.Fatalf("Failed to initialize sink for %s: %+v",
+ sk.GetName(), err)
+ }
+ glog.V(0).Infof("Configure sink to %s", sk.GetName())
+ dataSink = sk
+ break
+ }
+ }
+ return dataSink
+}
+
+func validateOneEnabledInput(config *util.ViperProxy) {
enabledInput := ""
for _, input := range sub.NotificationInputs {
if config.GetBool("notification." + input.GetName() + ".enabled") {
diff --git a/weed/command/filer_sync.go b/weed/command/filer_sync.go
new file mode 100644
index 000000000..0f34e5701
--- /dev/null
+++ b/weed/command/filer_sync.go
@@ -0,0 +1,374 @@
+package command
+
+import (
+ "context"
+ "errors"
+ "fmt"
+ "github.com/chrislusf/seaweedfs/weed/glog"
+ "github.com/chrislusf/seaweedfs/weed/pb"
+ "github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
+ "github.com/chrislusf/seaweedfs/weed/replication"
+ "github.com/chrislusf/seaweedfs/weed/replication/sink"
+ "github.com/chrislusf/seaweedfs/weed/replication/sink/filersink"
+ "github.com/chrislusf/seaweedfs/weed/replication/source"
+ "github.com/chrislusf/seaweedfs/weed/security"
+ "github.com/chrislusf/seaweedfs/weed/util"
+ "github.com/chrislusf/seaweedfs/weed/util/grace"
+ "google.golang.org/grpc"
+ "io"
+ "strings"
+ "time"
+)
+
+type SyncOptions struct {
+ isActivePassive *bool
+ filerA *string
+ filerB *string
+ aPath *string
+ bPath *string
+ aReplication *string
+ bReplication *string
+ aCollection *string
+ bCollection *string
+ aTtlSec *int
+ bTtlSec *int
+ aDiskType *string
+ bDiskType *string
+ aDebug *bool
+ bDebug *bool
+ aProxyByFiler *bool
+ bProxyByFiler *bool
+}
+
+var (
+ syncOptions SyncOptions
+ syncCpuProfile *string
+ syncMemProfile *string
+)
+
+func init() {
+ cmdFilerSynchronize.Run = runFilerSynchronize // break init cycle
+ syncOptions.isActivePassive = cmdFilerSynchronize.Flag.Bool("isActivePassive", false, "one directional follow from A to B if true")
+ syncOptions.filerA = cmdFilerSynchronize.Flag.String("a", "", "filer A in one SeaweedFS cluster")
+ syncOptions.filerB = cmdFilerSynchronize.Flag.String("b", "", "filer B in the other SeaweedFS cluster")
+ syncOptions.aPath = cmdFilerSynchronize.Flag.String("a.path", "/", "directory to sync on filer A")
+ syncOptions.bPath = cmdFilerSynchronize.Flag.String("b.path", "/", "directory to sync on filer B")
+ syncOptions.aReplication = cmdFilerSynchronize.Flag.String("a.replication", "", "replication on filer A")
+ syncOptions.bReplication = cmdFilerSynchronize.Flag.String("b.replication", "", "replication on filer B")
+ syncOptions.aCollection = cmdFilerSynchronize.Flag.String("a.collection", "", "collection on filer A")
+ syncOptions.bCollection = cmdFilerSynchronize.Flag.String("b.collection", "", "collection on filer B")
+ syncOptions.aTtlSec = cmdFilerSynchronize.Flag.Int("a.ttlSec", 0, "ttl in seconds on filer A")
+ syncOptions.bTtlSec = cmdFilerSynchronize.Flag.Int("b.ttlSec", 0, "ttl in seconds on filer B")
+ syncOptions.aDiskType = cmdFilerSynchronize.Flag.String("a.disk", "", "[hdd|ssd|] hard drive or solid state drive or any tag on filer A")
+ syncOptions.bDiskType = cmdFilerSynchronize.Flag.String("b.disk", "", "[hdd|ssd|] hard drive or solid state drive or any tag on filer B")
+ syncOptions.aProxyByFiler = cmdFilerSynchronize.Flag.Bool("a.filerProxy", false, "read and write file chunks by filer A instead of volume servers")
+ syncOptions.bProxyByFiler = cmdFilerSynchronize.Flag.Bool("b.filerProxy", false, "read and write file chunks by filer B instead of volume servers")
+ syncOptions.aDebug = cmdFilerSynchronize.Flag.Bool("a.debug", false, "debug mode to print out filer A received files")
+ syncOptions.bDebug = cmdFilerSynchronize.Flag.Bool("b.debug", false, "debug mode to print out filer B received files")
+ syncCpuProfile = cmdFilerSynchronize.Flag.String("cpuprofile", "", "cpu profile output file")
+ syncMemProfile = cmdFilerSynchronize.Flag.String("memprofile", "", "memory profile output file")
+}
+
+var cmdFilerSynchronize = &Command{
+ UsageLine: "filer.sync -a=: -b=:",
+ Short: "resumeable continuous synchronization between two active-active or active-passive SeaweedFS clusters",
+ Long: `resumeable continuous synchronization for file changes between two active-active or active-passive filers
+
+ filer.sync listens on filer notifications. If any file is updated, it will fetch the updated content,
+ and write to the other destination. Different from filer.replicate:
+
+ * filer.sync only works between two filers.
+ * filer.sync does not need any special message queue setup.
+ * filer.sync supports both active-active and active-passive modes.
+
+ If restarted, the synchronization will resume from the previous checkpoints, persisted every minute.
+ A fresh sync will start from the earliest metadata logs.
+
+`,
+}
+
+func runFilerSynchronize(cmd *Command, args []string) bool {
+
+ grpcDialOption := security.LoadClientTLS(util.GetViper(), "grpc.client")
+
+ grace.SetupProfiling(*syncCpuProfile, *syncMemProfile)
+
+ go func() {
+ for {
+ err := doSubscribeFilerMetaChanges(grpcDialOption, *syncOptions.filerA, *syncOptions.aPath, *syncOptions.aProxyByFiler, *syncOptions.filerB,
+ *syncOptions.bPath, *syncOptions.bReplication, *syncOptions.bCollection, *syncOptions.bTtlSec, *syncOptions.bProxyByFiler, *syncOptions.bDiskType, *syncOptions.bDebug)
+ if err != nil {
+ glog.Errorf("sync from %s to %s: %v", *syncOptions.filerA, *syncOptions.filerB, err)
+ time.Sleep(1747 * time.Millisecond)
+ }
+ }
+ }()
+
+ if !*syncOptions.isActivePassive {
+ go func() {
+ for {
+ err := doSubscribeFilerMetaChanges(grpcDialOption, *syncOptions.filerB, *syncOptions.bPath, *syncOptions.bProxyByFiler, *syncOptions.filerA,
+ *syncOptions.aPath, *syncOptions.aReplication, *syncOptions.aCollection, *syncOptions.aTtlSec, *syncOptions.aProxyByFiler, *syncOptions.aDiskType, *syncOptions.aDebug)
+ if err != nil {
+ glog.Errorf("sync from %s to %s: %v", *syncOptions.filerB, *syncOptions.filerA, err)
+ time.Sleep(2147 * time.Millisecond)
+ }
+ }
+ }()
+ }
+
+ select {}
+
+ return true
+}
+
+func doSubscribeFilerMetaChanges(grpcDialOption grpc.DialOption, sourceFiler, sourcePath string, sourceReadChunkFromFiler bool, targetFiler, targetPath string,
+ replicationStr, collection string, ttlSec int, sinkWriteChunkByFiler bool, diskType string, debug bool) error {
+
+ // read source filer signature
+ sourceFilerSignature, sourceErr := replication.ReadFilerSignature(grpcDialOption, sourceFiler)
+ if sourceErr != nil {
+ return sourceErr
+ }
+ // read target filer signature
+ targetFilerSignature, targetErr := replication.ReadFilerSignature(grpcDialOption, targetFiler)
+ if targetErr != nil {
+ return targetErr
+ }
+
+ // if first time, start from now
+ // if has previously synced, resume from that point of time
+ sourceFilerOffsetTsNs, err := getOffset(grpcDialOption, targetFiler, SyncKeyPrefix, sourceFilerSignature)
+ if err != nil {
+ return err
+ }
+
+ glog.V(0).Infof("start sync %s(%d) => %s(%d) from %v(%d)", sourceFiler, sourceFilerSignature, targetFiler, targetFilerSignature, time.Unix(0, sourceFilerOffsetTsNs), sourceFilerOffsetTsNs)
+
+ // create filer sink
+ filerSource := &source.FilerSource{}
+ filerSource.DoInitialize(sourceFiler, pb.ServerToGrpcAddress(sourceFiler), sourcePath, sourceReadChunkFromFiler)
+ filerSink := &filersink.FilerSink{}
+ filerSink.DoInitialize(targetFiler, pb.ServerToGrpcAddress(targetFiler), targetPath, replicationStr, collection, ttlSec, diskType, grpcDialOption, sinkWriteChunkByFiler)
+ filerSink.SetSourceFiler(filerSource)
+
+ persistEventFn := genProcessFunction(sourcePath, targetPath, filerSink, debug)
+
+ processEventFn := func(resp *filer_pb.SubscribeMetadataResponse) error {
+ message := resp.EventNotification
+ for _, sig := range message.Signatures {
+ if sig == targetFilerSignature && targetFilerSignature != 0 {
+ fmt.Printf("%s skipping %s change to %v\n", targetFiler, sourceFiler, message)
+ return nil
+ }
+ }
+ return persistEventFn(resp)
+ }
+
+ return pb.WithFilerClient(sourceFiler, grpcDialOption, func(client filer_pb.SeaweedFilerClient) error {
+
+ ctx, cancel := context.WithCancel(context.Background())
+ defer cancel()
+
+ stream, err := client.SubscribeMetadata(ctx, &filer_pb.SubscribeMetadataRequest{
+ ClientName: "syncTo_" + targetFiler,
+ PathPrefix: sourcePath,
+ SinceNs: sourceFilerOffsetTsNs,
+ Signature: targetFilerSignature,
+ })
+ if err != nil {
+ return fmt.Errorf("listen: %v", err)
+ }
+
+ var counter int64
+ var lastWriteTime time.Time
+ for {
+ resp, listenErr := stream.Recv()
+ if listenErr == io.EOF {
+ return nil
+ }
+ if listenErr != nil {
+ return listenErr
+ }
+
+ if err := processEventFn(resp); err != nil {
+ return err
+ }
+
+ counter++
+ if lastWriteTime.Add(3 * time.Second).Before(time.Now()) {
+ glog.V(0).Infof("sync %s => %s progressed to %v %0.2f/sec", sourceFiler, targetFiler, time.Unix(0, resp.TsNs), float64(counter)/float64(3))
+ counter = 0
+ lastWriteTime = time.Now()
+ if err := setOffset(grpcDialOption, targetFiler, SyncKeyPrefix, sourceFilerSignature, resp.TsNs); err != nil {
+ return err
+ }
+ }
+
+ }
+
+ })
+
+}
+
+const (
+ SyncKeyPrefix = "sync."
+)
+
+func getOffset(grpcDialOption grpc.DialOption, filer string, signaturePrefix string, signature int32) (lastOffsetTsNs int64, readErr error) {
+
+ readErr = pb.WithFilerClient(filer, grpcDialOption, func(client filer_pb.SeaweedFilerClient) error {
+ syncKey := []byte(signaturePrefix + "____")
+ util.Uint32toBytes(syncKey[len(signaturePrefix):len(signaturePrefix)+4], uint32(signature))
+
+ resp, err := client.KvGet(context.Background(), &filer_pb.KvGetRequest{Key: syncKey})
+ if err != nil {
+ return err
+ }
+
+ if len(resp.Error) != 0 {
+ return errors.New(resp.Error)
+ }
+ if len(resp.Value) < 8 {
+ return nil
+ }
+
+ lastOffsetTsNs = int64(util.BytesToUint64(resp.Value))
+
+ return nil
+ })
+
+ return
+
+}
+
+func setOffset(grpcDialOption grpc.DialOption, filer string, signaturePrefix string, signature int32, offsetTsNs int64) error {
+ return pb.WithFilerClient(filer, grpcDialOption, func(client filer_pb.SeaweedFilerClient) error {
+
+ syncKey := []byte(signaturePrefix + "____")
+ util.Uint32toBytes(syncKey[len(signaturePrefix):len(signaturePrefix)+4], uint32(signature))
+
+ valueBuf := make([]byte, 8)
+ util.Uint64toBytes(valueBuf, uint64(offsetTsNs))
+
+ resp, err := client.KvPut(context.Background(), &filer_pb.KvPutRequest{
+ Key: syncKey,
+ Value: valueBuf,
+ })
+ if err != nil {
+ return err
+ }
+
+ if len(resp.Error) != 0 {
+ return errors.New(resp.Error)
+ }
+
+ return nil
+
+ })
+
+}
+
+func genProcessFunction(sourcePath string, targetPath string, dataSink sink.ReplicationSink, debug bool) func(resp *filer_pb.SubscribeMetadataResponse) error {
+ // process function
+ processEventFn := func(resp *filer_pb.SubscribeMetadataResponse) error {
+ message := resp.EventNotification
+
+ var sourceOldKey, sourceNewKey util.FullPath
+ if message.OldEntry != nil {
+ sourceOldKey = util.FullPath(resp.Directory).Child(message.OldEntry.Name)
+ }
+ if message.NewEntry != nil {
+ sourceNewKey = util.FullPath(message.NewParentPath).Child(message.NewEntry.Name)
+ }
+
+ if debug {
+ glog.V(0).Infof("received %v", resp)
+ }
+
+ if !strings.HasPrefix(resp.Directory, sourcePath) {
+ return nil
+ }
+
+ // handle deletions
+ if message.OldEntry != nil && message.NewEntry == nil {
+ if !strings.HasPrefix(string(sourceOldKey), sourcePath) {
+ return nil
+ }
+ key := buildKey(dataSink, message, targetPath, sourceOldKey, sourcePath)
+ return dataSink.DeleteEntry(key, message.OldEntry.IsDirectory, message.DeleteChunks, message.Signatures)
+ }
+
+ // handle new entries
+ if message.OldEntry == nil && message.NewEntry != nil {
+ if !strings.HasPrefix(string(sourceNewKey), sourcePath) {
+ return nil
+ }
+ key := buildKey(dataSink, message, targetPath, sourceNewKey, sourcePath)
+ return dataSink.CreateEntry(key, message.NewEntry, message.Signatures)
+ }
+
+ // this is something special?
+ if message.OldEntry == nil && message.NewEntry == nil {
+ return nil
+ }
+
+ // handle updates
+ if strings.HasPrefix(string(sourceOldKey), sourcePath) {
+ // old key is in the watched directory
+ if strings.HasPrefix(string(sourceNewKey), sourcePath) {
+ // new key is also in the watched directory
+ if !dataSink.IsIncremental() {
+ oldKey := util.Join(targetPath, string(sourceOldKey)[len(sourcePath):])
+ message.NewParentPath = util.Join(targetPath, message.NewParentPath[len(sourcePath):])
+ foundExisting, err := dataSink.UpdateEntry(string(oldKey), message.OldEntry, message.NewParentPath, message.NewEntry, message.DeleteChunks, message.Signatures)
+ if foundExisting {
+ return err
+ }
+
+ // not able to find old entry
+ if err = dataSink.DeleteEntry(string(oldKey), message.OldEntry.IsDirectory, false, message.Signatures); err != nil {
+ return fmt.Errorf("delete old entry %v: %v", oldKey, err)
+ }
+ }
+ // create the new entry
+ newKey := buildKey(dataSink, message, targetPath, sourceNewKey, sourcePath)
+ return dataSink.CreateEntry(newKey, message.NewEntry, message.Signatures)
+
+ } else {
+ // new key is outside of the watched directory
+ if !dataSink.IsIncremental() {
+ key := buildKey(dataSink, message, targetPath, sourceOldKey, sourcePath)
+ return dataSink.DeleteEntry(key, message.OldEntry.IsDirectory, message.DeleteChunks, message.Signatures)
+ }
+ }
+ } else {
+ // old key is outside of the watched directory
+ if strings.HasPrefix(string(sourceNewKey), sourcePath) {
+ // new key is in the watched directory
+ key := buildKey(dataSink, message, targetPath, sourceNewKey, sourcePath)
+ return dataSink.CreateEntry(key, message.NewEntry, message.Signatures)
+ } else {
+ // new key is also outside of the watched directory
+ // skip
+ }
+ }
+
+ return nil
+ }
+ return processEventFn
+}
+
+func buildKey(dataSink sink.ReplicationSink, message *filer_pb.EventNotification, targetPath string, sourceKey util.FullPath, sourcePath string) string {
+ if !dataSink.IsIncremental() {
+ return util.Join(targetPath, string(sourceKey)[len(sourcePath):])
+ }
+ var mTime int64
+ if message.NewEntry != nil {
+ mTime = message.NewEntry.Attributes.Mtime
+ } else if message.OldEntry != nil {
+ mTime = message.OldEntry.Attributes.Mtime
+ }
+ dateKey := time.Unix(mTime, 0).Format("2006-01-02")
+ return util.Join(targetPath, dateKey, string(sourceKey)[len(sourcePath):])
+}
diff --git a/weed/command/fix.go b/weed/command/fix.go
index 76bc19f7e..ae9a051b8 100644
--- a/weed/command/fix.go
+++ b/weed/command/fix.go
@@ -11,6 +11,7 @@ import (
"github.com/chrislusf/seaweedfs/weed/storage/needle_map"
"github.com/chrislusf/seaweedfs/weed/storage/super_block"
"github.com/chrislusf/seaweedfs/weed/storage/types"
+ "github.com/chrislusf/seaweedfs/weed/util"
)
func init() {
@@ -46,8 +47,8 @@ func (scanner *VolumeFileScanner4Fix) ReadNeedleBody() bool {
}
func (scanner *VolumeFileScanner4Fix) VisitNeedle(n *needle.Needle, offset int64, needleHeader, needleBody []byte) error {
- glog.V(2).Infof("key %d offset %d size %d disk_size %d gzip %v", n.Id, offset, n.Size, n.DiskSize(scanner.version), n.IsGzipped())
- if n.Size > 0 && n.Size != types.TombstoneFileSize {
+ glog.V(2).Infof("key %d offset %d size %d disk_size %d compressed %v", n.Id, offset, n.Size, n.DiskSize(scanner.version), n.IsCompressed())
+ if n.Size.IsValid() {
pe := scanner.nm.Set(n.Id, types.ToOffset(offset), n.Size)
glog.V(2).Infof("saved %d with error %v", n.Size, pe)
} else {
@@ -67,23 +68,23 @@ func runFix(cmd *Command, args []string) bool {
if *fixVolumeCollection != "" {
baseFileName = *fixVolumeCollection + "_" + baseFileName
}
- indexFileName := path.Join(*fixVolumePath, baseFileName+".idx")
- indexFile, err := os.OpenFile(indexFileName, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0644)
- if err != nil {
- glog.Fatalf("Create Volume Index [ERROR] %s\n", err)
- }
- defer indexFile.Close()
+ indexFileName := path.Join(util.ResolvePath(*fixVolumePath), baseFileName+".idx")
nm := needle_map.NewMemDb()
+ defer nm.Close()
vid := needle.VolumeId(*fixVolumeId)
scanner := &VolumeFileScanner4Fix{
nm: nm,
}
- err = storage.ScanVolumeFile(*fixVolumePath, *fixVolumeCollection, vid, storage.NeedleMapInMemory, scanner)
- if err != nil {
- glog.Fatalf("Export Volume File [ERROR] %s\n", err)
+ if err := storage.ScanVolumeFile(util.ResolvePath(*fixVolumePath), *fixVolumeCollection, vid, storage.NeedleMapInMemory, scanner); err != nil {
+ glog.Fatalf("scan .dat File: %v", err)
+ os.Remove(indexFileName)
+ }
+
+ if err := nm.SaveToIdx(indexFileName); err != nil {
+ glog.Fatalf("save to .idx File: %v", err)
os.Remove(indexFileName)
}
diff --git a/weed/command/gateway.go b/weed/command/gateway.go
new file mode 100644
index 000000000..8a6f852a5
--- /dev/null
+++ b/weed/command/gateway.go
@@ -0,0 +1,93 @@
+package command
+
+import (
+ "net/http"
+ "strconv"
+ "strings"
+ "time"
+
+ "github.com/chrislusf/seaweedfs/weed/glog"
+ "github.com/chrislusf/seaweedfs/weed/server"
+ "github.com/chrislusf/seaweedfs/weed/util"
+)
+
+var (
+ gatewayOptions GatewayOptions
+)
+
+type GatewayOptions struct {
+ masters *string
+ filers *string
+ bindIp *string
+ port *int
+ maxMB *int
+}
+
+func init() {
+ cmdGateway.Run = runGateway // break init cycle
+ gatewayOptions.masters = cmdGateway.Flag.String("master", "localhost:9333", "comma-separated master servers")
+ gatewayOptions.filers = cmdGateway.Flag.String("filer", "localhost:8888", "comma-separated filer servers")
+ gatewayOptions.bindIp = cmdGateway.Flag.String("ip.bind", "localhost", "ip address to bind to")
+ gatewayOptions.port = cmdGateway.Flag.Int("port", 5647, "gateway http listen port")
+ gatewayOptions.maxMB = cmdGateway.Flag.Int("maxMB", 4, "split files larger than the limit")
+}
+
+var cmdGateway = &Command{
+ UsageLine: "gateway -port=8888 -master=[,]* -filer=[,]*",
+ Short: "start a gateway server that points to a list of master servers or a list of filers",
+ Long: `start a gateway server which accepts REST operation to write any blobs, files, or topic messages.
+
+ POST /blobs/
+ upload the blob and return a chunk id
+ DELETE /blobs/
+ delete a chunk id
+
+ /*
+ POST /files/path/to/a/file
+ save /path/to/a/file on filer
+ DELETE /files/path/to/a/file
+ delete /path/to/a/file on filer
+
+ POST /topics/topicName
+ save on filer to /topics/topicName//ts.json
+ */
+`,
+}
+
+func runGateway(cmd *Command, args []string) bool {
+
+ util.LoadConfiguration("security", false)
+
+ gatewayOptions.startGateway()
+
+ return true
+}
+
+func (gw *GatewayOptions) startGateway() {
+
+ defaultMux := http.NewServeMux()
+
+ _, gws_err := weed_server.NewGatewayServer(defaultMux, &weed_server.GatewayOption{
+ Masters: strings.Split(*gw.masters, ","),
+ Filers: strings.Split(*gw.filers, ","),
+ MaxMB: *gw.maxMB,
+ })
+ if gws_err != nil {
+ glog.Fatalf("Gateway startup error: %v", gws_err)
+ }
+
+ glog.V(0).Infof("Start Seaweed Gateway %s at %s:%d", util.Version(), *gw.bindIp, *gw.port)
+ gatewayListener, e := util.NewListener(
+ *gw.bindIp+":"+strconv.Itoa(*gw.port),
+ time.Duration(10)*time.Second,
+ )
+ if e != nil {
+ glog.Fatalf("Filer listener error: %v", e)
+ }
+
+ httpS := &http.Server{Handler: defaultMux}
+ if err := httpS.Serve(gatewayListener); err != nil {
+ glog.Fatalf("Gateway Fail to serve: %v", e)
+ }
+
+}
diff --git a/weed/command/iam.go b/weed/command/iam.go
new file mode 100644
index 000000000..17d0832cb
--- /dev/null
+++ b/weed/command/iam.go
@@ -0,0 +1,97 @@
+package command
+
+import (
+ "context"
+ "fmt"
+ "net/http"
+
+ "github.com/chrislusf/seaweedfs/weed/glog"
+ "github.com/chrislusf/seaweedfs/weed/iamapi"
+ "github.com/chrislusf/seaweedfs/weed/pb"
+ "github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
+ "github.com/chrislusf/seaweedfs/weed/security"
+ "github.com/chrislusf/seaweedfs/weed/util"
+ "github.com/gorilla/mux"
+ "time"
+)
+
+var (
+ iamStandaloneOptions IamOptions
+)
+
+type IamOptions struct {
+ filer *string
+ masters *string
+ port *int
+}
+
+func init() {
+ cmdIam.Run = runIam // break init cycle
+ iamStandaloneOptions.filer = cmdIam.Flag.String("filer", "localhost:8888", "filer server address")
+ iamStandaloneOptions.masters = cmdIam.Flag.String("master", "localhost:9333", "comma-separated master servers")
+ iamStandaloneOptions.port = cmdIam.Flag.Int("port", 8111, "iam server http listen port")
+}
+
+var cmdIam = &Command{
+ UsageLine: "iam [-port=8111] [-filer=] [-masters=,]",
+ Short: "start a iam API compatible server",
+ Long: "start a iam API compatible server.",
+}
+
+func runIam(cmd *Command, args []string) bool {
+ return iamStandaloneOptions.startIamServer()
+}
+
+func (iamopt *IamOptions) startIamServer() bool {
+ filerGrpcAddress, err := pb.ParseServerToGrpcAddress(*iamopt.filer)
+ if err != nil {
+ glog.Fatal(err)
+ return false
+ }
+
+ grpcDialOption := security.LoadClientTLS(util.GetViper(), "grpc.client")
+ for {
+ err = pb.WithGrpcFilerClient(filerGrpcAddress, grpcDialOption, func(client filer_pb.SeaweedFilerClient) error {
+ resp, err := client.GetFilerConfiguration(context.Background(), &filer_pb.GetFilerConfigurationRequest{})
+ if err != nil {
+ return fmt.Errorf("get filer %s configuration: %v", filerGrpcAddress, err)
+ }
+ glog.V(0).Infof("IAM read filer configuration: %s", resp)
+ return nil
+ })
+ if err != nil {
+ glog.V(0).Infof("wait to connect to filer %s grpc address %s", *iamopt.filer, filerGrpcAddress)
+ time.Sleep(time.Second)
+ } else {
+ glog.V(0).Infof("connected to filer %s grpc address %s", *iamopt.filer, filerGrpcAddress)
+ break
+ }
+ }
+
+ router := mux.NewRouter().SkipClean(true)
+ _, iamApiServer_err := iamapi.NewIamApiServer(router, &iamapi.IamServerOption{
+ Filer: *iamopt.filer,
+ Port: *iamopt.port,
+ FilerGrpcAddress: filerGrpcAddress,
+ GrpcDialOption: grpcDialOption,
+ })
+ glog.V(0).Info("NewIamApiServer created")
+ if iamApiServer_err != nil {
+ glog.Fatalf("IAM API Server startup error: %v", iamApiServer_err)
+ }
+
+ httpS := &http.Server{Handler: router}
+
+ listenAddress := fmt.Sprintf(":%d", *iamopt.port)
+ iamApiListener, err := util.NewListener(listenAddress, time.Duration(10)*time.Second)
+ if err != nil {
+ glog.Fatalf("IAM API Server listener on %s error: %v", listenAddress, err)
+ }
+
+ glog.V(0).Infof("Start Seaweed IAM API Server %s at http port %d", util.Version(), *iamopt.port)
+ if err = httpS.Serve(iamApiListener); err != nil {
+ glog.Fatalf("IAM API Server Fail to serve: %v", err)
+ }
+
+ return true
+}
diff --git a/weed/command/master.go b/weed/command/master.go
index 8d0a3289c..0f5e2156d 100644
--- a/weed/command/master.go
+++ b/weed/command/master.go
@@ -1,22 +1,25 @@
package command
import (
+ "github.com/chrislusf/raft/protobuf"
+ "github.com/gorilla/mux"
+ "google.golang.org/grpc/reflection"
"net/http"
"os"
- "runtime"
+ "sort"
"strconv"
"strings"
+ "time"
+
+ "github.com/chrislusf/seaweedfs/weed/util/grace"
- "github.com/chrislusf/raft/protobuf"
"github.com/chrislusf/seaweedfs/weed/glog"
+ "github.com/chrislusf/seaweedfs/weed/pb"
"github.com/chrislusf/seaweedfs/weed/pb/master_pb"
"github.com/chrislusf/seaweedfs/weed/security"
"github.com/chrislusf/seaweedfs/weed/server"
"github.com/chrislusf/seaweedfs/weed/storage/backend"
"github.com/chrislusf/seaweedfs/weed/util"
- "github.com/gorilla/mux"
- "github.com/spf13/viper"
- "google.golang.org/grpc/reflection"
)
var (
@@ -24,38 +27,40 @@ var (
)
type MasterOptions struct {
- port *int
- ip *string
- ipBind *string
- metaFolder *string
- peers *string
- volumeSizeLimitMB *uint
- volumePreallocate *bool
- pulseSeconds *int
+ port *int
+ ip *string
+ ipBind *string
+ metaFolder *string
+ peers *string
+ volumeSizeLimitMB *uint
+ volumePreallocate *bool
+ // pulseSeconds *int
defaultReplication *string
garbageThreshold *float64
whiteList *string
disableHttp *bool
metricsAddress *string
metricsIntervalSec *int
+ raftResumeState *bool
}
func init() {
cmdMaster.Run = runMaster // break init cycle
m.port = cmdMaster.Flag.Int("port", 9333, "http listen port")
- m.ip = cmdMaster.Flag.String("ip", "localhost", "master | address")
- m.ipBind = cmdMaster.Flag.String("ip.bind", "0.0.0.0", "ip address to bind to")
+ m.ip = cmdMaster.Flag.String("ip", util.DetectedHostAddress(), "master | address, also used as identifier")
+ m.ipBind = cmdMaster.Flag.String("ip.bind", "", "ip address to bind to")
m.metaFolder = cmdMaster.Flag.String("mdir", os.TempDir(), "data directory to store meta data")
- m.peers = cmdMaster.Flag.String("peers", "", "all master nodes in comma separated ip:port list, example: 127.0.0.1:9093,127.0.0.1:9094")
+ m.peers = cmdMaster.Flag.String("peers", "", "all master nodes in comma separated ip:port list, example: 127.0.0.1:9093,127.0.0.1:9094,127.0.0.1:9095")
m.volumeSizeLimitMB = cmdMaster.Flag.Uint("volumeSizeLimitMB", 30*1000, "Master stops directing writes to oversized volumes.")
m.volumePreallocate = cmdMaster.Flag.Bool("volumePreallocate", false, "Preallocate disk space for volumes.")
- m.pulseSeconds = cmdMaster.Flag.Int("pulseSeconds", 5, "number of seconds between heartbeats")
+ // m.pulseSeconds = cmdMaster.Flag.Int("pulseSeconds", 5, "number of seconds between heartbeats")
m.defaultReplication = cmdMaster.Flag.String("defaultReplication", "000", "Default replication type if not specified.")
m.garbageThreshold = cmdMaster.Flag.Float64("garbageThreshold", 0.3, "threshold to vacuum and reclaim spaces")
m.whiteList = cmdMaster.Flag.String("whiteList", "", "comma separated Ip addresses having write permission. No limit if empty.")
m.disableHttp = cmdMaster.Flag.Bool("disableHttp", false, "disable http requests, only gRPC operations are allowed.")
- m.metricsAddress = cmdMaster.Flag.String("metrics.address", "", "Prometheus gateway address")
+ m.metricsAddress = cmdMaster.Flag.String("metrics.address", "", "Prometheus gateway address :")
m.metricsIntervalSec = cmdMaster.Flag.Int("metrics.intervalSeconds", 15, "Prometheus push interval in seconds")
+ m.raftResumeState = cmdMaster.Flag.Bool("resumeState", false, "resume previous state on start master server")
}
var cmdMaster = &Command{
@@ -63,7 +68,7 @@ var cmdMaster = &Command{
Short: "start a master server",
Long: `start a master server to provide volume=>location mapping service and sequence number of file ids
- The configuration file "security.toml" is read from ".", "$HOME/.seaweedfs/", or "/etc/seaweedfs/", in that order.
+ The configuration file "security.toml" is read from ".", "$HOME/.seaweedfs/", "/usr/local/etc/seaweedfs/", or "/etc/seaweedfs/", in that order.
The example security.toml configuration file can be generated by "weed scaffold -config=security"
@@ -80,10 +85,13 @@ func runMaster(cmd *Command, args []string) bool {
util.LoadConfiguration("security", false)
util.LoadConfiguration("master", false)
- runtime.GOMAXPROCS(runtime.NumCPU())
- util.SetupProfiling(*masterCpuProfile, *masterMemProfile)
+ grace.SetupProfiling(*masterCpuProfile, *masterMemProfile)
- if err := util.TestFolderWritable(*m.metaFolder); err != nil {
+ parent, _ := util.FullPath(*m.metaFolder).DirAndName()
+ if util.FileExists(string(parent)) && !util.FileExists(*m.metaFolder) {
+ os.MkdirAll(*m.metaFolder, 0755)
+ }
+ if err := util.TestFolderWritable(util.ResolvePath(*m.metaFolder)); err != nil {
glog.Fatalf("Check Meta Folder (-mdir) Writable %s : %s", *m.metaFolder, err)
}
@@ -102,23 +110,23 @@ func runMaster(cmd *Command, args []string) bool {
func startMaster(masterOption MasterOptions, masterWhiteList []string) {
- backend.LoadConfiguration(viper.GetViper())
+ backend.LoadConfiguration(util.GetViper())
myMasterAddress, peers := checkPeers(*masterOption.ip, *masterOption.port, *masterOption.peers)
r := mux.NewRouter()
ms := weed_server.NewMasterServer(r, masterOption.toMasterOption(masterWhiteList), peers)
listeningAddress := *masterOption.ipBind + ":" + strconv.Itoa(*masterOption.port)
- glog.V(0).Infof("Start Seaweed Master %s at %s", util.VERSION, listeningAddress)
+ glog.V(0).Infof("Start Seaweed Master %s at %s", util.Version(), listeningAddress)
masterListener, e := util.NewListener(listeningAddress, 0)
if e != nil {
glog.Fatalf("Master startup error: %v", e)
}
// start raftServer
- raftServer := weed_server.NewRaftServer(security.LoadClientTLS(viper.Sub("grpc"), "master"),
- peers, myMasterAddress, *masterOption.metaFolder, ms.Topo, *masterOption.pulseSeconds)
+ raftServer, err := weed_server.NewRaftServer(security.LoadClientTLS(util.GetViper(), "grpc.master"),
+ peers, myMasterAddress, util.ResolvePath(*masterOption.metaFolder), ms.Topo, *masterOption.raftResumeState)
if raftServer == nil {
- glog.Fatalf("please verify %s is writable, see https://github.com/chrislusf/seaweedfs/issues/717", *masterOption.metaFolder)
+ glog.Fatalf("please verify %s is writable, see https://github.com/chrislusf/seaweedfs/issues/717: %s", *masterOption.metaFolder, err)
}
ms.SetRaftServer(raftServer)
r.HandleFunc("/cluster/status", raftServer.StatusHandler).Methods("GET")
@@ -128,14 +136,22 @@ func startMaster(masterOption MasterOptions, masterWhiteList []string) {
if err != nil {
glog.Fatalf("master failed to listen on grpc port %d: %v", grpcPort, err)
}
- // Create your protocol servers.
- grpcS := util.NewGrpcServer(security.LoadServerTLS(viper.Sub("grpc"), "master"))
+ grpcS := pb.NewGrpcServer(security.LoadServerTLS(util.GetViper(), "grpc.master"))
master_pb.RegisterSeaweedServer(grpcS, ms)
protobuf.RegisterRaftServer(grpcS, raftServer)
reflection.Register(grpcS)
- glog.V(0).Infof("Start Seaweed Master %s grpc server at %s:%d", util.VERSION, *masterOption.ipBind, grpcPort)
+ glog.V(0).Infof("Start Seaweed Master %s grpc server at %s:%d", util.Version(), *masterOption.ipBind, grpcPort)
go grpcS.Serve(grpcL)
+ go func() {
+ time.Sleep(1500 * time.Millisecond)
+ if ms.Topo.RaftServer.Leader() == "" && ms.Topo.RaftServer.IsLogEmpty() && isTheFirstOne(myMasterAddress, peers) {
+ if ms.MasterClient.FindLeaderFromOtherPeers(myMasterAddress) == "" {
+ raftServer.DoJoinCommand()
+ }
+ }
+ }()
+
go ms.MasterClient.KeepConnectedToMaster()
// start http server
@@ -146,6 +162,7 @@ func startMaster(masterOption MasterOptions, masterWhiteList []string) {
}
func checkPeers(masterIp string, masterPort int, peers string) (masterAddress string, cleanedPeers []string) {
+ glog.V(0).Infof("current: %s:%d peers:%s", masterIp, masterPort, peers)
masterAddress = masterIp + ":" + strconv.Itoa(masterPort)
if peers != "" {
cleanedPeers = strings.Split(peers, ",")
@@ -168,13 +185,22 @@ func checkPeers(masterIp string, masterPort int, peers string) (masterAddress st
return
}
+func isTheFirstOne(self string, peers []string) bool {
+ sort.Strings(peers)
+ if len(peers) <= 0 {
+ return true
+ }
+ return self == peers[0]
+}
+
func (m *MasterOptions) toMasterOption(whiteList []string) *weed_server.MasterOption {
return &weed_server.MasterOption{
- Port: *m.port,
- MetaFolder: *m.metaFolder,
- VolumeSizeLimitMB: *m.volumeSizeLimitMB,
- VolumePreallocate: *m.volumePreallocate,
- PulseSeconds: *m.pulseSeconds,
+ Host: *m.ip,
+ Port: *m.port,
+ MetaFolder: *m.metaFolder,
+ VolumeSizeLimitMB: *m.volumeSizeLimitMB,
+ VolumePreallocate: *m.volumePreallocate,
+ // PulseSeconds: *m.pulseSeconds,
DefaultReplicaPlacement: *m.defaultReplication,
GarbageThreshold: *m.garbageThreshold,
WhiteList: whiteList,
diff --git a/weed/command/mount.go b/weed/command/mount.go
index f09b285f7..5811f0b99 100644
--- a/weed/command/mount.go
+++ b/weed/command/mount.go
@@ -1,29 +1,38 @@
package command
import (
- "fmt"
- "strconv"
- "strings"
+ "os"
+ "time"
)
type MountOptions struct {
filer *string
filerMountRootPath *string
dir *string
- dirListCacheLimit *int64
+ dirAutoCreate *bool
collection *string
replication *string
+ diskType *string
ttlSec *int
chunkSizeLimitMB *int
+ concurrentWriters *int
+ cacheDir *string
+ cacheSizeMB *int64
dataCenter *string
allowOthers *bool
umaskString *string
+ nonempty *bool
+ volumeServerAccess *string
+ uidMap *string
+ gidMap *string
+ readOnly *bool
}
var (
- mountOptions MountOptions
- mountCpuProfile *string
- mountMemProfile *string
+ mountOptions MountOptions
+ mountCpuProfile *string
+ mountMemProfile *string
+ mountReadRetryTime *time.Duration
)
func init() {
@@ -31,16 +40,27 @@ func init() {
mountOptions.filer = cmdMount.Flag.String("filer", "localhost:8888", "weed filer location")
mountOptions.filerMountRootPath = cmdMount.Flag.String("filer.path", "/", "mount this remote path from filer server")
mountOptions.dir = cmdMount.Flag.String("dir", ".", "mount weed filer to this directory")
- mountOptions.dirListCacheLimit = cmdMount.Flag.Int64("dirListCacheLimit", 1000000, "limit cache size to speed up directory long format listing")
+ mountOptions.dirAutoCreate = cmdMount.Flag.Bool("dirAutoCreate", false, "auto create the directory to mount to")
mountOptions.collection = cmdMount.Flag.String("collection", "", "collection to create the files")
mountOptions.replication = cmdMount.Flag.String("replication", "", "replication(e.g. 000, 001) to create to files. If empty, let filer decide.")
+ mountOptions.diskType = cmdMount.Flag.String("disk", "", "[hdd|ssd|] hard drive or solid state drive or any tag")
mountOptions.ttlSec = cmdMount.Flag.Int("ttl", 0, "file ttl in seconds")
- mountOptions.chunkSizeLimitMB = cmdMount.Flag.Int("chunkSizeLimitMB", 4, "local write buffer size, also chunk large files")
+ mountOptions.chunkSizeLimitMB = cmdMount.Flag.Int("chunkSizeLimitMB", 2, "local write buffer size, also chunk large files")
+ mountOptions.concurrentWriters = cmdMount.Flag.Int("concurrentWriters", 32, "limit concurrent goroutine writers if not 0")
+ mountOptions.cacheDir = cmdMount.Flag.String("cacheDir", os.TempDir(), "local cache directory for file chunks and meta data")
+ mountOptions.cacheSizeMB = cmdMount.Flag.Int64("cacheCapacityMB", 1000, "local file chunk cache capacity in MB (0 will disable cache)")
mountOptions.dataCenter = cmdMount.Flag.String("dataCenter", "", "prefer to write to the data center")
mountOptions.allowOthers = cmdMount.Flag.Bool("allowOthers", true, "allows other users to access the file system")
mountOptions.umaskString = cmdMount.Flag.String("umask", "022", "octal umask, e.g., 022, 0111")
+ mountOptions.nonempty = cmdMount.Flag.Bool("nonempty", false, "allows the mounting over a non-empty directory")
+ mountOptions.volumeServerAccess = cmdMount.Flag.String("volumeServerAccess", "direct", "access volume servers by [direct|publicUrl|filerProxy]")
+ mountOptions.uidMap = cmdMount.Flag.String("map.uid", "", "map local uid to uid on filer, comma-separated :")
+ mountOptions.gidMap = cmdMount.Flag.String("map.gid", "", "map local gid to gid on filer, comma-separated :")
+ mountOptions.readOnly = cmdMount.Flag.Bool("readOnly", false, "read only")
+
mountCpuProfile = cmdMount.Flag.String("cpuprofile", "", "cpu profile output file")
mountMemProfile = cmdMount.Flag.String("memprofile", "", "memory profile output file")
+ mountReadRetryTime = cmdMount.Flag.Duration("readRetryTime", 6*time.Second, "maximum read retry wait time")
}
var cmdMount = &Command{
@@ -60,19 +80,3 @@ var cmdMount = &Command{
`,
}
-
-func parseFilerGrpcAddress(filer string) (filerGrpcAddress string, err error) {
- hostnameAndPort := strings.Split(filer, ":")
- if len(hostnameAndPort) != 2 {
- return "", fmt.Errorf("filer should have hostname:port format: %v", hostnameAndPort)
- }
-
- filerPort, parseErr := strconv.ParseUint(hostnameAndPort[1], 10, 64)
- if parseErr != nil {
- return "", fmt.Errorf("filer port parse error: %v", parseErr)
- }
-
- filerGrpcPort := int(filerPort) + 10000
-
- return fmt.Sprintf("%s:%d", hostnameAndPort[0], filerGrpcPort), nil
-}
diff --git a/weed/command/mount_linux.go b/weed/command/mount_linux.go
index 80a5f9da4..25c4f72cf 100644
--- a/weed/command/mount_linux.go
+++ b/weed/command/mount_linux.go
@@ -138,9 +138,7 @@ func parseInfoFile(r io.Reader) ([]*Info, error) {
}
func osSpecificMountOptions() []fuse.MountOption {
- return []fuse.MountOption{
- fuse.AllowNonEmptyMount(),
- }
+ return []fuse.MountOption{}
}
func checkMountPointAvailable(dir string) bool {
diff --git a/weed/command/mount_std.go b/weed/command/mount_std.go
index 453531d00..2474cf7dd 100644
--- a/weed/command/mount_std.go
+++ b/weed/command/mount_std.go
@@ -3,7 +3,9 @@
package command
import (
+ "context"
"fmt"
+ "github.com/chrislusf/seaweedfs/weed/storage/types"
"os"
"os/user"
"path"
@@ -12,20 +14,27 @@ import (
"strings"
"time"
- "github.com/jacobsa/daemonize"
- "github.com/spf13/viper"
+ "github.com/chrislusf/seaweedfs/weed/filesys/meta_cache"
+
+ "github.com/seaweedfs/fuse"
+ "github.com/seaweedfs/fuse/fs"
"github.com/chrislusf/seaweedfs/weed/filesys"
"github.com/chrislusf/seaweedfs/weed/glog"
+ "github.com/chrislusf/seaweedfs/weed/pb"
+ "github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
"github.com/chrislusf/seaweedfs/weed/security"
"github.com/chrislusf/seaweedfs/weed/util"
- "github.com/seaweedfs/fuse"
- "github.com/seaweedfs/fuse/fs"
+ "github.com/chrislusf/seaweedfs/weed/util/grace"
)
func runMount(cmd *Command, args []string) bool {
- util.SetupProfiling(*mountCpuProfile, *mountMemProfile)
+ grace.SetupProfiling(*mountCpuProfile, *mountMemProfile)
+ if *mountReadRetryTime < time.Second {
+ *mountReadRetryTime = time.Second
+ }
+ util.RetryWaitTime = *mountReadRetryTime
umask, umaskErr := strconv.ParseUint(*mountOptions.umaskString, 8, 64)
if umaskErr != nil {
@@ -33,27 +42,52 @@ func runMount(cmd *Command, args []string) bool {
return false
}
- return RunMount(
- *mountOptions.filer,
- *mountOptions.filerMountRootPath,
- *mountOptions.dir,
- *mountOptions.collection,
- *mountOptions.replication,
- *mountOptions.dataCenter,
- *mountOptions.chunkSizeLimitMB,
- *mountOptions.allowOthers,
- *mountOptions.ttlSec,
- *mountOptions.dirListCacheLimit,
- os.FileMode(umask),
- )
+ if len(args) > 0 {
+ return false
+ }
+
+ return RunMount(&mountOptions, os.FileMode(umask))
}
-func RunMount(filer, filerMountRootPath, dir, collection, replication, dataCenter string, chunkSizeLimitMB int,
- allowOthers bool, ttlSec int, dirListCacheLimit int64, umask os.FileMode) bool {
+func RunMount(option *MountOptions, umask os.FileMode) bool {
+
+ filer := *option.filer
+ // parse filer grpc address
+ filerGrpcAddress, err := pb.ParseServerToGrpcAddress(filer)
+ if err != nil {
+ glog.V(0).Infof("ParseFilerGrpcAddress: %v", err)
+ return true
+ }
util.LoadConfiguration("security", false)
+ // try to connect to filer, filerBucketsPath may be useful later
+ grpcDialOption := security.LoadClientTLS(util.GetViper(), "grpc.client")
+ var cipher bool
+ for i := 0; i < 10; i++ {
+ err = pb.WithGrpcFilerClient(filerGrpcAddress, grpcDialOption, func(client filer_pb.SeaweedFilerClient) error {
+ resp, err := client.GetFilerConfiguration(context.Background(), &filer_pb.GetFilerConfigurationRequest{})
+ if err != nil {
+ return fmt.Errorf("get filer grpc address %s configuration: %v", filerGrpcAddress, err)
+ }
+ cipher = resp.Cipher
+ return nil
+ })
+ if err != nil {
+ glog.V(0).Infof("failed to talk to filer %s: %v", filerGrpcAddress, err)
+ glog.V(0).Infof("wait for %d seconds ...", i+1)
+ time.Sleep(time.Duration(i+1) * time.Second)
+ }
+ }
+ if err != nil {
+ glog.Errorf("failed to talk to filer %s: %v", filerGrpcAddress, err)
+ return true
+ }
+
+ filerMountRootPath := *option.filerMountRootPath
+ dir := util.ResolvePath(*option.dir)
+ chunkSizeLimitMB := *mountOptions.chunkSizeLimitMB
- fmt.Printf("This is SeaweedFS version %s %s %s\n", util.VERSION, runtime.GOOS, runtime.GOARCH)
+ fmt.Printf("This is SeaweedFS version %s %s %s\n", util.Version(), runtime.GOOS, runtime.GOARCH)
if dir == "" {
fmt.Printf("Please specify the mount directory via \"-dir\"")
return false
@@ -65,15 +99,21 @@ func RunMount(filer, filerMountRootPath, dir, collection, replication, dataCente
fuse.Unmount(dir)
- uid, gid := uint32(0), uint32(0)
-
// detect mount folder mode
- mountMode := os.ModeDir | 0755
+ if *option.dirAutoCreate {
+ os.MkdirAll(dir, os.FileMode(0777)&^umask)
+ }
fileInfo, err := os.Stat(dir)
+
+ uid, gid := uint32(0), uint32(0)
+ mountMode := os.ModeDir | 0777
if err == nil {
- mountMode = os.ModeDir | fileInfo.Mode()
+ mountMode = os.ModeDir | os.FileMode(0777)&^umask
uid, gid = util.GetFileUidGid(fileInfo)
- fmt.Printf("mount point owner uid=%d gid=%d mode=%s\n", uid, gid, fileInfo.Mode())
+ fmt.Printf("mount point owner uid=%d gid=%d mode=%s\n", uid, gid, mountMode)
+ } else {
+ fmt.Printf("can not stat %s\n", dir)
+ return false
}
if uid == 0 {
@@ -88,10 +128,17 @@ func RunMount(filer, filerMountRootPath, dir, collection, replication, dataCente
}
}
+ // mapping uid, gid
+ uidGidMapper, err := meta_cache.NewUidGidMapper(*option.uidMap, *option.gidMap)
+ if err != nil {
+ fmt.Printf("failed to parse %s %s: %v\n", *option.uidMap, *option.gidMap, err)
+ return false
+ }
+
// Ensure target mount point availability
if isValid := checkMountPointAvailable(dir); !isValid {
glog.Fatalf("Expected mount to still be active, target mount point: %s, please check!", dir)
- return false
+ return true
}
mountName := path.Base(dir)
@@ -100,10 +147,8 @@ func RunMount(filer, filerMountRootPath, dir, collection, replication, dataCente
fuse.VolumeName(mountName),
fuse.FSName(filer + ":" + filerMountRootPath),
fuse.Subtype("seaweedfs"),
- fuse.NoAppleDouble(),
+ // fuse.NoAppleDouble(), // include .DS_Store, otherwise can not delete non-empty folders
fuse.NoAppleXattr(),
- fuse.NoBrowse(),
- fuse.AutoXattr(),
fuse.ExclCreate(),
fuse.DaemonTimeout("3600"),
fuse.AllowSUID(),
@@ -111,68 +156,77 @@ func RunMount(filer, filerMountRootPath, dir, collection, replication, dataCente
fuse.MaxReadahead(1024 * 128),
fuse.AsyncRead(),
fuse.WritebackCache(),
- fuse.AllowNonEmptyMount(),
+ fuse.MaxBackground(128),
+ fuse.CongestionThreshold(128),
}
options = append(options, osSpecificMountOptions()...)
-
- if allowOthers {
+ if *option.allowOthers {
options = append(options, fuse.AllowOther())
}
-
- c, err := fuse.Mount(dir, options...)
- if err != nil {
- glog.V(0).Infof("mount: %v", err)
- daemonize.SignalOutcome(err)
- return true
+ if *option.nonempty {
+ options = append(options, fuse.AllowNonEmptyMount())
}
-
- util.OnInterrupt(func() {
- fuse.Unmount(dir)
- c.Close()
- })
-
- filerGrpcAddress, err := parseFilerGrpcAddress(filer)
- if err != nil {
- glog.V(0).Infof("parseFilerGrpcAddress: %v", err)
- daemonize.SignalOutcome(err)
- return true
+ if *option.readOnly {
+ options = append(options, fuse.ReadOnly())
}
+ // find mount point
mountRoot := filerMountRootPath
if mountRoot != "/" && strings.HasSuffix(mountRoot, "/") {
mountRoot = mountRoot[0 : len(mountRoot)-1]
}
- daemonize.SignalOutcome(nil)
+ diskType := types.ToDiskType(*option.diskType)
- err = fs.Serve(c, filesys.NewSeaweedFileSystem(&filesys.Option{
+ seaweedFileSystem := filesys.NewSeaweedFileSystem(&filesys.Option{
+ MountDirectory: dir,
+ FilerAddress: filer,
FilerGrpcAddress: filerGrpcAddress,
- GrpcDialOption: security.LoadClientTLS(viper.Sub("grpc"), "client"),
+ GrpcDialOption: grpcDialOption,
FilerMountRootPath: mountRoot,
- Collection: collection,
- Replication: replication,
- TtlSec: int32(ttlSec),
+ Collection: *option.collection,
+ Replication: *option.replication,
+ TtlSec: int32(*option.ttlSec),
+ DiskType: diskType,
ChunkSizeLimit: int64(chunkSizeLimitMB) * 1024 * 1024,
- DataCenter: dataCenter,
- DirListCacheLimit: dirListCacheLimit,
- EntryCacheTtl: 3 * time.Second,
+ ConcurrentWriters: *option.concurrentWriters,
+ CacheDir: *option.cacheDir,
+ CacheSizeMB: *option.cacheSizeMB,
+ DataCenter: *option.dataCenter,
MountUid: uid,
MountGid: gid,
MountMode: mountMode,
MountCtime: fileInfo.ModTime(),
MountMtime: time.Now(),
Umask: umask,
- }))
+ VolumeServerAccess: *mountOptions.volumeServerAccess,
+ Cipher: cipher,
+ UidGidMapper: uidGidMapper,
+ })
+
+ // mount
+ c, err := fuse.Mount(dir, options...)
if err != nil {
- fuse.Unmount(dir)
+ glog.V(0).Infof("mount: %v", err)
+ return true
}
+ defer fuse.Unmount(dir)
+
+ grace.OnInterrupt(func() {
+ fuse.Unmount(dir)
+ c.Close()
+ })
+
+ glog.V(0).Infof("mounted %s%s to %s", filer, mountRoot, dir)
+ server := fs.New(c, nil)
+ seaweedFileSystem.Server = server
+ err = server.Serve(seaweedFileSystem)
// check if the mount process has an error to report
<-c.Ready
if err := c.MountError; err != nil {
glog.V(0).Infof("mount process: %v", err)
- daemonize.SignalOutcome(err)
return true
}
diff --git a/weed/command/msg_broker.go b/weed/command/msg_broker.go
new file mode 100644
index 000000000..db0b4148d
--- /dev/null
+++ b/weed/command/msg_broker.go
@@ -0,0 +1,114 @@
+package command
+
+import (
+ "context"
+ "fmt"
+ "strconv"
+ "time"
+
+ "google.golang.org/grpc/reflection"
+
+ "github.com/chrislusf/seaweedfs/weed/util/grace"
+
+ "github.com/chrislusf/seaweedfs/weed/glog"
+ "github.com/chrislusf/seaweedfs/weed/messaging/broker"
+ "github.com/chrislusf/seaweedfs/weed/pb"
+ "github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
+ "github.com/chrislusf/seaweedfs/weed/pb/messaging_pb"
+ "github.com/chrislusf/seaweedfs/weed/security"
+ "github.com/chrislusf/seaweedfs/weed/util"
+)
+
+var (
+ messageBrokerStandaloneOptions MessageBrokerOptions
+)
+
+type MessageBrokerOptions struct {
+ filer *string
+ ip *string
+ port *int
+ cpuprofile *string
+ memprofile *string
+}
+
+func init() {
+ cmdMsgBroker.Run = runMsgBroker // break init cycle
+ messageBrokerStandaloneOptions.filer = cmdMsgBroker.Flag.String("filer", "localhost:8888", "filer server address")
+ messageBrokerStandaloneOptions.ip = cmdMsgBroker.Flag.String("ip", util.DetectedHostAddress(), "broker host address")
+ messageBrokerStandaloneOptions.port = cmdMsgBroker.Flag.Int("port", 17777, "broker gRPC listen port")
+ messageBrokerStandaloneOptions.cpuprofile = cmdMsgBroker.Flag.String("cpuprofile", "", "cpu profile output file")
+ messageBrokerStandaloneOptions.memprofile = cmdMsgBroker.Flag.String("memprofile", "", "memory profile output file")
+}
+
+var cmdMsgBroker = &Command{
+ UsageLine: "msgBroker [-port=17777] [-filer=]",
+ Short: "start a message queue broker",
+ Long: `start a message queue broker
+
+ The broker can accept gRPC calls to write or read messages. The messages are stored via filer.
+ The brokers are stateless. To scale up, just add more brokers.
+
+`,
+}
+
+func runMsgBroker(cmd *Command, args []string) bool {
+
+ util.LoadConfiguration("security", false)
+
+ return messageBrokerStandaloneOptions.startQueueServer()
+
+}
+
+func (msgBrokerOpt *MessageBrokerOptions) startQueueServer() bool {
+
+ grace.SetupProfiling(*messageBrokerStandaloneOptions.cpuprofile, *messageBrokerStandaloneOptions.memprofile)
+
+ filerGrpcAddress, err := pb.ParseServerToGrpcAddress(*msgBrokerOpt.filer)
+ if err != nil {
+ glog.Fatal(err)
+ return false
+ }
+
+ grpcDialOption := security.LoadClientTLS(util.GetViper(), "grpc.msg_broker")
+ cipher := false
+
+ for {
+ err = pb.WithGrpcFilerClient(filerGrpcAddress, grpcDialOption, func(client filer_pb.SeaweedFilerClient) error {
+ resp, err := client.GetFilerConfiguration(context.Background(), &filer_pb.GetFilerConfigurationRequest{})
+ if err != nil {
+ return fmt.Errorf("get filer %s configuration: %v", filerGrpcAddress, err)
+ }
+ cipher = resp.Cipher
+ return nil
+ })
+ if err != nil {
+ glog.V(0).Infof("wait to connect to filer %s grpc address %s", *msgBrokerOpt.filer, filerGrpcAddress)
+ time.Sleep(time.Second)
+ } else {
+ glog.V(0).Infof("connected to filer %s grpc address %s", *msgBrokerOpt.filer, filerGrpcAddress)
+ break
+ }
+ }
+
+ qs, err := broker.NewMessageBroker(&broker.MessageBrokerOption{
+ Filers: []string{*msgBrokerOpt.filer},
+ DefaultReplication: "",
+ MaxMB: 0,
+ Ip: *msgBrokerOpt.ip,
+ Port: *msgBrokerOpt.port,
+ Cipher: cipher,
+ }, grpcDialOption)
+
+ // start grpc listener
+ grpcL, err := util.NewListener(":"+strconv.Itoa(*msgBrokerOpt.port), 0)
+ if err != nil {
+ glog.Fatalf("failed to listen on grpc port %d: %v", *msgBrokerOpt.port, err)
+ }
+ grpcS := pb.NewGrpcServer(security.LoadServerTLS(util.GetViper(), "grpc.msg_broker"))
+ messaging_pb.RegisterSeaweedMessagingServer(grpcS, qs)
+ reflection.Register(grpcS)
+ grpcS.Serve(grpcL)
+
+ return true
+
+}
diff --git a/weed/command/s3.go b/weed/command/s3.go
index e004bb066..c8292a7d5 100644
--- a/weed/command/s3.go
+++ b/weed/command/s3.go
@@ -1,18 +1,21 @@
package command
import (
+ "context"
+ "fmt"
"net/http"
"time"
+ "github.com/chrislusf/seaweedfs/weed/pb"
+ "github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
"github.com/chrislusf/seaweedfs/weed/security"
- "github.com/spf13/viper"
- "fmt"
+ "github.com/gorilla/mux"
"github.com/chrislusf/seaweedfs/weed/glog"
"github.com/chrislusf/seaweedfs/weed/s3api"
+ stats_collect "github.com/chrislusf/seaweedfs/weed/stats"
"github.com/chrislusf/seaweedfs/weed/util"
- "github.com/gorilla/mux"
)
var (
@@ -21,28 +24,104 @@ var (
type S3Options struct {
filer *string
- filerBucketsPath *string
port *int
+ config *string
domainName *string
tlsPrivateKey *string
tlsCertificate *string
+ metricsHttpPort *int
+ allowEmptyFolder *bool
}
func init() {
cmdS3.Run = runS3 // break init cycle
s3StandaloneOptions.filer = cmdS3.Flag.String("filer", "localhost:8888", "filer server address")
- s3StandaloneOptions.filerBucketsPath = cmdS3.Flag.String("filer.dir.buckets", "/buckets", "folder on filer to store all buckets")
s3StandaloneOptions.port = cmdS3.Flag.Int("port", 8333, "s3 server http listen port")
- s3StandaloneOptions.domainName = cmdS3.Flag.String("domainName", "", "suffix of the host name, {bucket}.{domainName}")
+ s3StandaloneOptions.domainName = cmdS3.Flag.String("domainName", "", "suffix of the host name in comma separated list, {bucket}.{domainName}")
+ s3StandaloneOptions.config = cmdS3.Flag.String("config", "", "path to the config file")
s3StandaloneOptions.tlsPrivateKey = cmdS3.Flag.String("key.file", "", "path to the TLS private key file")
s3StandaloneOptions.tlsCertificate = cmdS3.Flag.String("cert.file", "", "path to the TLS certificate file")
+ s3StandaloneOptions.metricsHttpPort = cmdS3.Flag.Int("metricsPort", 0, "Prometheus metrics listen port")
+ s3StandaloneOptions.allowEmptyFolder = cmdS3.Flag.Bool("allowEmptyFolder", false, "allow empty folders")
}
var cmdS3 = &Command{
- UsageLine: "s3 -port=8333 -filer=",
+ UsageLine: "s3 [-port=8333] [-filer=] [-config=]",
Short: "start a s3 API compatible server that is backed by a filer",
Long: `start a s3 API compatible server that is backed by a filer.
+ By default, you can use any access key and secret key to access the S3 APIs.
+ To enable credential based access, create a config.json file similar to this:
+
+{
+ "identities": [
+ {
+ "name": "anonymous",
+ "actions": [
+ "Read"
+ ]
+ },
+ {
+ "name": "some_admin_user",
+ "credentials": [
+ {
+ "accessKey": "some_access_key1",
+ "secretKey": "some_secret_key1"
+ }
+ ],
+ "actions": [
+ "Admin",
+ "Read",
+ "List",
+ "Tagging",
+ "Write"
+ ]
+ },
+ {
+ "name": "some_read_only_user",
+ "credentials": [
+ {
+ "accessKey": "some_access_key2",
+ "secretKey": "some_secret_key2"
+ }
+ ],
+ "actions": [
+ "Read"
+ ]
+ },
+ {
+ "name": "some_normal_user",
+ "credentials": [
+ {
+ "accessKey": "some_access_key3",
+ "secretKey": "some_secret_key3"
+ }
+ ],
+ "actions": [
+ "Read",
+ "List",
+ "Tagging",
+ "Write"
+ ]
+ },
+ {
+ "name": "user_limited_to_bucket1",
+ "credentials": [
+ {
+ "accessKey": "some_access_key4",
+ "secretKey": "some_secret_key4"
+ }
+ ],
+ "actions": [
+ "Read:bucket1",
+ "List:bucket1",
+ "Tagging:bucket1",
+ "Write:bucket1"
+ ]
+ }
+ ]
+}
+
`,
}
@@ -50,26 +129,61 @@ func runS3(cmd *Command, args []string) bool {
util.LoadConfiguration("security", false)
+ go stats_collect.StartMetricsServer(*s3StandaloneOptions.metricsHttpPort)
+
return s3StandaloneOptions.startS3Server()
}
func (s3opt *S3Options) startS3Server() bool {
- filerGrpcAddress, err := parseFilerGrpcAddress(*s3opt.filer)
+ filerGrpcAddress, err := pb.ParseServerToGrpcAddress(*s3opt.filer)
if err != nil {
glog.Fatal(err)
return false
}
+ filerBucketsPath := "/buckets"
+
+ grpcDialOption := security.LoadClientTLS(util.GetViper(), "grpc.client")
+
+ // metrics read from the filer
+ var metricsAddress string
+ var metricsIntervalSec int
+
+ for {
+ err = pb.WithGrpcFilerClient(filerGrpcAddress, grpcDialOption, func(client filer_pb.SeaweedFilerClient) error {
+ resp, err := client.GetFilerConfiguration(context.Background(), &filer_pb.GetFilerConfigurationRequest{})
+ if err != nil {
+ return fmt.Errorf("get filer %s configuration: %v", filerGrpcAddress, err)
+ }
+ filerBucketsPath = resp.DirBuckets
+ metricsAddress, metricsIntervalSec = resp.MetricsAddress, int(resp.MetricsIntervalSec)
+ glog.V(0).Infof("S3 read filer buckets dir: %s", filerBucketsPath)
+ return nil
+ })
+ if err != nil {
+ glog.V(0).Infof("wait to connect to filer %s grpc address %s", *s3opt.filer, filerGrpcAddress)
+ time.Sleep(time.Second)
+ } else {
+ glog.V(0).Infof("connected to filer %s grpc address %s", *s3opt.filer, filerGrpcAddress)
+ break
+ }
+ }
+
+ go stats_collect.LoopPushingMetric("s3", stats_collect.SourceName(uint32(*s3opt.port)), metricsAddress, metricsIntervalSec)
+
router := mux.NewRouter().SkipClean(true)
_, s3ApiServer_err := s3api.NewS3ApiServer(router, &s3api.S3ApiServerOption{
Filer: *s3opt.filer,
+ Port: *s3opt.port,
FilerGrpcAddress: filerGrpcAddress,
+ Config: *s3opt.config,
DomainName: *s3opt.domainName,
- BucketsPath: *s3opt.filerBucketsPath,
- GrpcDialOption: security.LoadClientTLS(viper.Sub("grpc"), "client"),
+ BucketsPath: filerBucketsPath,
+ GrpcDialOption: grpcDialOption,
+ AllowEmptyFolder: *s3opt.allowEmptyFolder,
})
if s3ApiServer_err != nil {
glog.Fatalf("S3 API Server startup error: %v", s3ApiServer_err)
@@ -84,12 +198,12 @@ func (s3opt *S3Options) startS3Server() bool {
}
if *s3opt.tlsPrivateKey != "" {
- glog.V(0).Infof("Start Seaweed S3 API Server %s at https port %d", util.VERSION, *s3opt.port)
+ glog.V(0).Infof("Start Seaweed S3 API Server %s at https port %d", util.Version(), *s3opt.port)
if err = httpS.ServeTLS(s3ApiListener, *s3opt.tlsCertificate, *s3opt.tlsPrivateKey); err != nil {
glog.Fatalf("S3 API Server Fail to serve: %v", err)
}
} else {
- glog.V(0).Infof("Start Seaweed S3 API Server %s at http port %d", util.VERSION, *s3opt.port)
+ glog.V(0).Infof("Start Seaweed S3 API Server %s at http port %d", util.Version(), *s3opt.port)
if err = httpS.Serve(s3ApiListener); err != nil {
glog.Fatalf("S3 API Server Fail to serve: %v", err)
}
diff --git a/weed/command/scaffold.go b/weed/command/scaffold.go
index a76466ed6..88dc94df1 100644
--- a/weed/command/scaffold.go
+++ b/weed/command/scaffold.go
@@ -14,6 +14,14 @@ var cmdScaffold = &Command{
Short: "generate basic configuration files",
Long: `Generate filer.toml with all possible configurations for you to customize.
+ The options can also be overwritten by environment variables.
+ For example, the filer.toml mysql password can be overwritten by environment variable
+ export WEED_MYSQL_PASSWORD=some_password
+ Environment variable rules:
+ * Prefix the variable name with "WEED_"
+ * Upppercase the reset of variable name.
+ * Replace '.' with '_'
+
`,
}
@@ -36,6 +44,8 @@ func runScaffold(cmd *Command, args []string) bool {
content = SECURITY_TOML_EXAMPLE
case "master":
content = MASTER_TOML_EXAMPLE
+ case "shell":
+ content = SHELL_TOML_EXAMPLE
}
if content == "" {
println("need a valid -config option")
@@ -59,21 +69,43 @@ const (
# $HOME/.seaweedfs/filer.toml
# /etc/seaweedfs/filer.toml
+####################################################
+# Customizable filer server options
+####################################################
+[filer.options]
+# with http DELETE, by default the filer would check whether a folder is empty.
+# recursive_delete will delete all sub folders and files, similar to "rm -Rf"
+recursive_delete = false
+# directories under this folder will be automatically creating a separate bucket
+buckets_folder = "/buckets"
+
+####################################################
+# The following are filer store options
+####################################################
+
[leveldb2]
# local on disk, mostly for simple single-machine setup, fairly scalable
# faster than previous leveldb, recommended.
enabled = true
-dir = "." # directory to store level db files
+dir = "./filerldb2" # directory to store level db files
-####################################################
-# multiple filers on shared storage, fairly scalable
-####################################################
+[leveldb3]
+# similar to leveldb2.
+# each bucket has its own meta store.
+enabled = false
+dir = "./filerldb3" # directory to store level db files
-[mysql] # or tidb
+[rocksdb]
+# local on disk, similar to leveldb
+# since it is using a C wrapper, you need to install rocksdb and build it by yourself
+enabled = false
+dir = "./filerrdb" # directory to store rocksdb files
+
+[mysql] # or memsql, tidb
# CREATE TABLE IF NOT EXISTS filemeta (
-# dirhash BIGINT COMMENT 'first 64 bits of MD5 hash value of directory field',
-# name VARCHAR(1000) COMMENT 'directory or file name',
-# directory TEXT COMMENT 'full path to parent directory',
+# dirhash BIGINT COMMENT 'first 64 bits of MD5 hash value of directory field',
+# name VARCHAR(1000) BINARY COMMENT 'directory or file name',
+# directory TEXT COMMENT 'full path to parent directory',
# meta LONGBLOB,
# PRIMARY KEY (dirhash, name)
# ) DEFAULT CHARSET=utf8;
@@ -86,9 +118,37 @@ password = ""
database = "" # create or use an existing database
connection_max_idle = 2
connection_max_open = 100
+connection_max_lifetime_seconds = 0
+interpolateParams = false
+# if insert/upsert failing, you can disable upsert or update query syntax to match your RDBMS syntax:
+enableUpsert = true
+upsertQuery = """INSERT INTO ` + "`%s`" + ` (dirhash,name,directory,meta) VALUES(?,?,?,?) ON DUPLICATE KEY UPDATE meta = VALUES(meta)"""
+
+[mysql2] # or memsql, tidb
+enabled = false
+createTable = """
+ CREATE TABLE IF NOT EXISTS ` + "`%s`" + ` (
+ dirhash BIGINT,
+ name VARCHAR(1000) BINARY,
+ directory TEXT,
+ meta LONGBLOB,
+ PRIMARY KEY (dirhash, name)
+ ) DEFAULT CHARSET=utf8;
+"""
+hostname = "localhost"
+port = 3306
+username = "root"
+password = ""
+database = "" # create or use an existing database
+connection_max_idle = 2
+connection_max_open = 100
+connection_max_lifetime_seconds = 0
interpolateParams = false
+# if insert/upsert failing, you can disable upsert or update query syntax to match your RDBMS syntax:
+enableUpsert = true
+upsertQuery = """INSERT INTO ` + "`%s`" + ` (dirhash,name,directory,meta) VALUES(?,?,?,?) ON DUPLICATE KEY UPDATE meta = VALUES(meta)"""
-[postgres] # or cockroachdb
+[postgres] # or cockroachdb, YugabyteDB
# CREATE TABLE IF NOT EXISTS filemeta (
# dirhash BIGINT,
# name VARCHAR(65535),
@@ -101,10 +161,40 @@ hostname = "localhost"
port = 5432
username = "postgres"
password = ""
-database = "" # create or use an existing database
+database = "postgres" # create or use an existing database
+schema = ""
+sslmode = "disable"
+connection_max_idle = 100
+connection_max_open = 100
+connection_max_lifetime_seconds = 0
+# if insert/upsert failing, you can disable upsert or update query syntax to match your RDBMS syntax:
+enableUpsert = true
+upsertQuery = """INSERT INTO "%[1]s" (dirhash,name,directory,meta) VALUES($1,$2,$3,$4) ON CONFLICT (dirhash,name) DO UPDATE SET meta = EXCLUDED.meta WHERE "%[1]s".meta != EXCLUDED.meta"""
+
+[postgres2]
+enabled = false
+createTable = """
+ CREATE TABLE IF NOT EXISTS "%s" (
+ dirhash BIGINT,
+ name VARCHAR(65535),
+ directory VARCHAR(65535),
+ meta bytea,
+ PRIMARY KEY (dirhash, name)
+ );
+"""
+hostname = "localhost"
+port = 5432
+username = "postgres"
+password = ""
+database = "postgres" # create or use an existing database
+schema = ""
sslmode = "disable"
connection_max_idle = 100
connection_max_open = 100
+connection_max_lifetime_seconds = 0
+# if insert/upsert failing, you can disable upsert or update query syntax to match your RDBMS syntax:
+enableUpsert = true
+upsertQuery = """INSERT INTO "%[1]s" (dirhash,name,directory,meta) VALUES($1,$2,$3,$4) ON CONFLICT (dirhash,name) DO UPDATE SET meta = EXCLUDED.meta WHERE "%[1]s".meta != EXCLUDED.meta"""
[cassandra]
# CREATE TABLE filemeta (
@@ -118,14 +208,25 @@ keyspace="seaweedfs"
hosts=[
"localhost:9042",
]
+username=""
+password=""
+# This changes the data layout. Only add new directories. Removing/Updating will cause data loss.
+superLargeDirectories = []
+
+[hbase]
+enabled = false
+zkquorum = ""
+table = "seaweedfs"
-[redis]
+[redis2]
enabled = false
address = "localhost:6379"
password = ""
database = 0
+# This changes the data layout. Only add new directories. Removing/Updating will cause data loss.
+superLargeDirectories = []
-[redis_cluster]
+[redis_cluster2]
enabled = false
addresses = [
"localhost:30001",
@@ -136,20 +237,58 @@ addresses = [
"localhost:30006",
]
password = ""
-// allows reads from slave servers or the master, but all writes still go to the master
-readOnly = true
-// automatically use the closest Redis server for reads
-routeByLatency = true
+# allows reads from slave servers or the master, but all writes still go to the master
+readOnly = false
+# automatically use the closest Redis server for reads
+routeByLatency = false
+# This changes the data layout. Only add new directories. Removing/Updating will cause data loss.
+superLargeDirectories = []
[etcd]
enabled = false
servers = "localhost:2379"
timeout = "3s"
-[tikv]
+[mongodb]
enabled = false
-pdAddress = "192.168.199.113:2379"
+uri = "mongodb://localhost:27017"
+option_pool_size = 0
+database = "seaweedfs"
+[elastic7]
+enabled = false
+servers = [
+ "http://localhost1:9200",
+ "http://localhost2:9200",
+ "http://localhost3:9200",
+]
+username = ""
+password = ""
+sniff_enabled = false
+healthcheck_enabled = false
+# increase the value is recommend, be sure the value in Elastic is greater or equal here
+index.max_result_window = 10000
+
+
+
+##########################
+##########################
+# To add path-specific filer store:
+#
+# 1. Add a name following the store type separated by a dot ".". E.g., cassandra.tmp
+# 2. Add a location configuraiton. E.g., location = "/tmp/"
+# 3. Copy and customize all other configurations.
+# Make sure they are not the same if using the same store type!
+# 4. Set enabled to true
+#
+# The following is just using redis as an example
+##########################
+[redis2.tmp]
+enabled = false
+location = "/tmp/"
+address = "localhost:6379"
+password = ""
+database = 1
`
@@ -204,7 +343,8 @@ enabled = false
# This URL will Dial the RabbitMQ server at the URL in the environment
# variable RABBIT_SERVER_URL and open the exchange "myexchange".
# The exchange must have already been created by some other means, like
-# the RabbitMQ management plugin.
+# the RabbitMQ management plugin. Сreate myexchange of type fanout and myqueue then
+# create binding myexchange => myqueue
topic_url = "rabbit://myexchange"
sub_url = "rabbit://myqueue"
`
@@ -225,6 +365,19 @@ grpcAddress = "localhost:18888"
# i.e., all files with this "prefix" are sent to notification message queue.
directory = "/buckets"
+[sink.local]
+enabled = false
+directory = "/data"
+# all replicated files are under modified time as yyyy-mm-dd directories
+# so each date directory contains all new and updated files.
+is_incremental = false
+
+[sink.local_incremental]
+# all replicated files are under modified time as yyyy-mm-dd directories
+# so each date directory contains all new and updated files.
+enabled = false
+directory = "/backup"
+
[sink.filer]
enabled = false
grpcAddress = "localhost:18888"
@@ -235,6 +388,7 @@ directory = "/backup"
replication = ""
collection = ""
ttlSec = 0
+is_incremental = false
[sink.s3]
# read credentials doc at https://docs.aws.amazon.com/sdk-for-go/v1/developer-guide/sessions.html
@@ -245,6 +399,8 @@ aws_secret_access_key = "" # if empty, loads from the shared credentials fil
region = "us-east-2"
bucket = "your_bucket_name" # an existing bucket
directory = "/" # destination directory
+endpoint = ""
+is_incremental = false
[sink.google_cloud_storage]
# read credentials doc at https://cloud.google.com/docs/authentication/getting-started
@@ -252,6 +408,7 @@ enabled = false
google_application_credentials = "/path/to/x.json" # path to json credential file
bucket = "your_bucket_seaweedfs" # an existing bucket
directory = "/" # destination directory
+is_incremental = false
[sink.azure]
# experimental, let me know if it works
@@ -260,6 +417,7 @@ account_name = ""
account_key = ""
container = "mycontainer" # an existing container
directory = "/" # destination directory
+is_incremental = false
[sink.backblaze]
enabled = false
@@ -267,6 +425,7 @@ b2_account_id = ""
b2_master_application_key = ""
bucket = "mybucket" # an existing bucket
directory = "/" # destination directory
+is_incremental = false
`
@@ -293,18 +452,28 @@ expires_after_seconds = 10 # seconds
# the host name is not checked, so the PERM files can be shared.
[grpc]
ca = ""
+# Set wildcard domain for enable TLS authentication by common names
+allowed_wildcard_domain = "" # .mycompany.com
[grpc.volume]
cert = ""
key = ""
+allowed_commonNames = "" # comma-separated SSL certificate common names
[grpc.master]
cert = ""
key = ""
+allowed_commonNames = "" # comma-separated SSL certificate common names
[grpc.filer]
cert = ""
key = ""
+allowed_commonNames = "" # comma-separated SSL certificate common names
+
+[grpc.msg_broker]
+cert = ""
+key = ""
+allowed_commonNames = "" # comma-separated SSL certificate common names
# use this for any place needs a grpc client
# i.e., "weed backup|benchmark|filer.copy|filer.replicate|mount|s3|upload"
@@ -312,7 +481,6 @@ key = ""
cert = ""
key = ""
-
# volume server https options
# Note: work in progress!
# this does not work with other clients, e.g., "weed filer|mount" etc, yet.
@@ -335,23 +503,29 @@ key = ""
[master.maintenance]
# periodically run these scripts are the same as running them from 'weed shell'
scripts = """
+ lock
ec.encode -fullPercent=95 -quietFor=1h
ec.rebuild -force
ec.balance -force
volume.balance -force
+ volume.fix.replication
+ unlock
"""
sleep_minutes = 17 # sleep minutes between each script execution
[master.filer]
-default_filer_url = "http://localhost:8888/"
+default = "localhost:8888" # used by maintenance scripts if the scripts needs to use fs related commands
+
[master.sequencer]
-type = "memory" # Choose [memory|etcd] type for storing the file id sequence
+type = "raft" # Choose [raft|etcd|snowflake] type for storing the file id sequence
# when sequencer.type = etcd, set listen client urls of etcd cluster that store file id sequence
# example : http://127.0.0.1:2379,http://127.0.0.1:2389
sequencer_etcd_urls = "http://127.0.0.1:2379"
+# configurations for tiered cloud storage
+# old volumes are transparently moved to cloud for cost efficiency
[storage.backend]
[storage.backend.s3.default]
enabled = false
@@ -359,6 +533,41 @@ sequencer_etcd_urls = "http://127.0.0.1:2379"
aws_secret_access_key = "" # if empty, loads from the shared credentials file (~/.aws/credentials).
region = "us-east-2"
bucket = "your_bucket_name" # an existing bucket
+ endpoint = ""
+
+# create this number of logical volumes if no more writable volumes
+# count_x means how many copies of data.
+# e.g.:
+# 000 has only one copy, copy_1
+# 010 and 001 has two copies, copy_2
+# 011 has only 3 copies, copy_3
+[master.volume_growth]
+copy_1 = 7 # create 1 x 7 = 7 actual volumes
+copy_2 = 6 # create 2 x 6 = 12 actual volumes
+copy_3 = 3 # create 3 x 3 = 9 actual volumes
+copy_other = 1 # create n x 1 = n actual volumes
+
+# configuration flags for replication
+[master.replication]
+# any replication counts should be considered minimums. If you specify 010 and
+# have 3 different racks, that's still considered writable. Writes will still
+# try to replicate to all available volumes. You should only use this option
+# if you are doing your own replication or periodic sync of volumes.
+treat_replication_as_minimums = false
+
+`
+ SHELL_TOML_EXAMPLE = `
+
+[cluster]
+default = "c1"
+
+[cluster.c1]
+master = "localhost:9333" # comma-separated master servers
+filer = "localhost:8888" # filer host and port
+
+[cluster.c2]
+master = ""
+filer = ""
`
)
diff --git a/weed/command/server.go b/weed/command/server.go
index 87f404ed3..6eb3bf97c 100644
--- a/weed/command/server.go
+++ b/weed/command/server.go
@@ -2,26 +2,30 @@ package command
import (
"fmt"
+ "github.com/chrislusf/seaweedfs/weed/util/grace"
"os"
- "runtime"
- "runtime/pprof"
"strings"
"time"
+ stats_collect "github.com/chrislusf/seaweedfs/weed/stats"
+
"github.com/chrislusf/seaweedfs/weed/glog"
"github.com/chrislusf/seaweedfs/weed/util"
)
type ServerOptions struct {
cpuprofile *string
+ memprofile *string
v VolumeServerOptions
}
var (
- serverOptions ServerOptions
- masterOptions MasterOptions
- filerOptions FilerOptions
- s3Options S3Options
+ serverOptions ServerOptions
+ masterOptions MasterOptions
+ filerOptions FilerOptions
+ s3Options S3Options
+ webdavOptions WebDavOption
+ msgBrokerOptions MessageBrokerOptions
)
func init() {
@@ -29,7 +33,7 @@ func init() {
}
var cmdServer = &Command{
- UsageLine: "server -port=8080 -dir=/tmp -volume.max=5 -ip=server_name",
+ UsageLine: "server -dir=/tmp -volume.max=5 -ip=server_name",
Short: "start a master server, a volume server, and optionally a filer and a S3 gateway",
Long: `start both a volume server to provide storage spaces
and a master server to provide volume=>location mapping service and sequence number of file ids
@@ -45,24 +49,34 @@ var cmdServer = &Command{
}
var (
- serverIp = cmdServer.Flag.String("ip", "localhost", "ip or server name")
- serverBindIp = cmdServer.Flag.String("ip.bind", "0.0.0.0", "ip address to bind to")
+ serverIp = cmdServer.Flag.String("ip", util.DetectedHostAddress(), "ip or server name, also used as identifier")
+ serverBindIp = cmdServer.Flag.String("ip.bind", "", "ip address to bind to")
serverTimeout = cmdServer.Flag.Int("idleTimeout", 30, "connection idle seconds")
serverDataCenter = cmdServer.Flag.String("dataCenter", "", "current volume server's data center name")
serverRack = cmdServer.Flag.String("rack", "", "current volume server's rack name")
serverWhiteListOption = cmdServer.Flag.String("whiteList", "", "comma separated Ip addresses having write permission. No limit if empty.")
serverDisableHttp = cmdServer.Flag.Bool("disableHttp", false, "disable http requests, only gRPC operations are allowed.")
volumeDataFolders = cmdServer.Flag.String("dir", os.TempDir(), "directories to store data files. dir[,dir]...")
- volumeMaxDataVolumeCounts = cmdServer.Flag.String("volume.max", "7", "maximum numbers of volumes, count[,count]...")
- pulseSeconds = cmdServer.Flag.Int("pulseSeconds", 5, "number of seconds between heartbeats")
- isStartingFiler = cmdServer.Flag.Bool("filer", false, "whether to start filer")
- isStartingS3 = cmdServer.Flag.Bool("s3", false, "whether to start S3 gateway")
+ volumeMaxDataVolumeCounts = cmdServer.Flag.String("volume.max", "8", "maximum numbers of volumes, count[,count]... If set to zero, the limit will be auto configured.")
+ volumeMinFreeSpacePercent = cmdServer.Flag.String("volume.minFreeSpacePercent", "1", "minimum free disk space (default to 1%). Low disk space will mark all volumes as ReadOnly.")
+ serverMetricsHttpPort = cmdServer.Flag.Int("metricsPort", 0, "Prometheus metrics listen port")
+
+ // pulseSeconds = cmdServer.Flag.Int("pulseSeconds", 5, "number of seconds between heartbeats")
+ isStartingMasterServer = cmdServer.Flag.Bool("master", true, "whether to start master server")
+ isStartingVolumeServer = cmdServer.Flag.Bool("volume", true, "whether to start volume server")
+ isStartingFiler = cmdServer.Flag.Bool("filer", false, "whether to start filer")
+ isStartingS3 = cmdServer.Flag.Bool("s3", false, "whether to start S3 gateway")
+ isStartingWebDav = cmdServer.Flag.Bool("webdav", false, "whether to start WebDAV gateway")
+ isStartingMsgBroker = cmdServer.Flag.Bool("msgBroker", false, "whether to start message broker")
serverWhiteList []string
+
+ False = false
)
func init() {
serverOptions.cpuprofile = cmdServer.Flag.String("cpuprofile", "", "cpu profile output file")
+ serverOptions.memprofile = cmdServer.Flag.String("memprofile", "", "memory profile output file")
masterOptions.port = cmdServer.Flag.Int("master.port", 9333, "master server http listen port")
masterOptions.metaFolder = cmdServer.Flag.String("master.dir", "", "data directory to store meta data, default to same as -dir specified")
@@ -73,29 +87,52 @@ func init() {
masterOptions.garbageThreshold = cmdServer.Flag.Float64("garbageThreshold", 0.3, "threshold to vacuum and reclaim spaces")
masterOptions.metricsAddress = cmdServer.Flag.String("metrics.address", "", "Prometheus gateway address")
masterOptions.metricsIntervalSec = cmdServer.Flag.Int("metrics.intervalSeconds", 15, "Prometheus push interval in seconds")
+ masterOptions.raftResumeState = cmdServer.Flag.Bool("resumeState", false, "resume previous state on start master server")
filerOptions.collection = cmdServer.Flag.String("filer.collection", "", "all data will be stored in this collection")
filerOptions.port = cmdServer.Flag.Int("filer.port", 8888, "filer server http listen port")
filerOptions.publicPort = cmdServer.Flag.Int("filer.port.public", 0, "filer server public http listen port")
- filerOptions.defaultReplicaPlacement = cmdServer.Flag.String("filer.defaultReplicaPlacement", "", "Default replication type if not specified during runtime.")
- filerOptions.redirectOnRead = cmdServer.Flag.Bool("filer.redirectOnRead", false, "whether proxy or redirect to volume server during file GET request")
+ filerOptions.defaultReplicaPlacement = cmdServer.Flag.String("filer.defaultReplicaPlacement", "", "default replication type. If not specified, use master setting.")
filerOptions.disableDirListing = cmdServer.Flag.Bool("filer.disableDirListing", false, "turn off directory listing")
- filerOptions.maxMB = cmdServer.Flag.Int("filer.maxMB", 32, "split files larger than the limit")
+ filerOptions.maxMB = cmdServer.Flag.Int("filer.maxMB", 4, "split files larger than the limit")
filerOptions.dirListingLimit = cmdServer.Flag.Int("filer.dirListLimit", 1000, "limit sub dir listing size")
+ filerOptions.cipher = cmdServer.Flag.Bool("filer.encryptVolumeData", false, "encrypt data on volume servers")
+ filerOptions.peers = cmdServer.Flag.String("filer.peers", "", "all filers sharing the same filer store in comma separated ip:port list")
+ filerOptions.saveToFilerLimit = cmdServer.Flag.Int("filer.saveToFilerLimit", 0, "Small files smaller than this limit can be cached in filer store.")
+ filerOptions.concurrentUploadLimitMB = cmdServer.Flag.Int("filer.concurrentUploadLimitMB", 64, "limit total concurrent upload size")
serverOptions.v.port = cmdServer.Flag.Int("volume.port", 8080, "volume server http listen port")
serverOptions.v.publicPort = cmdServer.Flag.Int("volume.port.public", 0, "volume server public port")
serverOptions.v.indexType = cmdServer.Flag.String("volume.index", "memory", "Choose [memory|leveldb|leveldbMedium|leveldbLarge] mode for memory~performance balance.")
+ serverOptions.v.diskType = cmdServer.Flag.String("volume.disk", "", "[hdd|ssd|] hard drive or solid state drive or any tag")
serverOptions.v.fixJpgOrientation = cmdServer.Flag.Bool("volume.images.fix.orientation", false, "Adjust jpg orientation when uploading.")
serverOptions.v.readRedirect = cmdServer.Flag.Bool("volume.read.redirect", true, "Redirect moved or non-local volumes.")
serverOptions.v.compactionMBPerSecond = cmdServer.Flag.Int("volume.compactionMBps", 0, "limit compaction speed in mega bytes per second")
+ serverOptions.v.fileSizeLimitMB = cmdServer.Flag.Int("volume.fileSizeLimitMB", 256, "limit file size to avoid out of memory")
+ serverOptions.v.concurrentUploadLimitMB = cmdServer.Flag.Int("volume.concurrentUploadLimitMB", 64, "limit total concurrent upload size")
serverOptions.v.publicUrl = cmdServer.Flag.String("volume.publicUrl", "", "publicly accessible address")
+ serverOptions.v.preStopSeconds = cmdServer.Flag.Int("volume.preStopSeconds", 10, "number of seconds between stop send heartbeats and stop volume server")
+ serverOptions.v.pprof = cmdServer.Flag.Bool("volume.pprof", false, "enable pprof http handlers. precludes --memprofile and --cpuprofile")
+ serverOptions.v.idxFolder = cmdServer.Flag.String("volume.dir.idx", "", "directory to store .idx files")
+ serverOptions.v.enableTcp = cmdServer.Flag.Bool("volume.tcp", false, " enable tcp port")
- s3Options.filerBucketsPath = cmdServer.Flag.String("s3.filer.dir.buckets", "/buckets", "folder on filer to store all buckets")
s3Options.port = cmdServer.Flag.Int("s3.port", 8333, "s3 server http listen port")
- s3Options.domainName = cmdServer.Flag.String("s3.domainName", "", "suffix of the host name, {bucket}.{domainName}")
+ s3Options.domainName = cmdServer.Flag.String("s3.domainName", "", "suffix of the host name in comma separated list, {bucket}.{domainName}")
s3Options.tlsPrivateKey = cmdServer.Flag.String("s3.key.file", "", "path to the TLS private key file")
s3Options.tlsCertificate = cmdServer.Flag.String("s3.cert.file", "", "path to the TLS certificate file")
+ s3Options.config = cmdServer.Flag.String("s3.config", "", "path to the config file")
+ s3Options.allowEmptyFolder = cmdServer.Flag.Bool("s3.allowEmptyFolder", false, "allow empty folders")
+
+ webdavOptions.port = cmdServer.Flag.Int("webdav.port", 7333, "webdav server http listen port")
+ webdavOptions.collection = cmdServer.Flag.String("webdav.collection", "", "collection to create the files")
+ webdavOptions.replication = cmdServer.Flag.String("webdav.replication", "", "replication to create the files")
+ webdavOptions.disk = cmdServer.Flag.String("webdav.disk", "", "[hdd|ssd|] hard drive or solid state drive or any tag")
+ webdavOptions.tlsPrivateKey = cmdServer.Flag.String("webdav.key.file", "", "path to the TLS private key file")
+ webdavOptions.tlsCertificate = cmdServer.Flag.String("webdav.cert.file", "", "path to the TLS certificate file")
+ webdavOptions.cacheDir = cmdServer.Flag.String("webdav.cacheDir", os.TempDir(), "local cache directory for file chunks")
+ webdavOptions.cacheSizeMB = cmdServer.Flag.Int64("webdav.cacheCapacityMB", 1000, "local cache capacity in MB")
+
+ msgBrokerOptions.port = cmdServer.Flag.Int("msgBroker.port", 17777, "broker gRPC listen port")
}
@@ -104,55 +141,54 @@ func runServer(cmd *Command, args []string) bool {
util.LoadConfiguration("security", false)
util.LoadConfiguration("master", false)
- if *serverOptions.cpuprofile != "" {
- f, err := os.Create(*serverOptions.cpuprofile)
- if err != nil {
- glog.Fatal(err)
- }
- pprof.StartCPUProfile(f)
- defer pprof.StopCPUProfile()
- }
+ grace.SetupProfiling(*serverOptions.cpuprofile, *serverOptions.memprofile)
- if *filerOptions.redirectOnRead {
+ if *isStartingS3 {
*isStartingFiler = true
}
-
- if *isStartingS3 {
+ if *isStartingWebDav {
+ *isStartingFiler = true
+ }
+ if *isStartingMsgBroker {
*isStartingFiler = true
}
- _, peerList := checkPeers(*serverIp, *masterOptions.port, *masterOptions.peers)
- peers := strings.Join(peerList, ",")
- masterOptions.peers = &peers
+ if *isStartingMasterServer {
+ _, peerList := checkPeers(*serverIp, *masterOptions.port, *masterOptions.peers)
+ peers := strings.Join(peerList, ",")
+ masterOptions.peers = &peers
+ }
+ // ip address
masterOptions.ip = serverIp
masterOptions.ipBind = serverBindIp
- filerOptions.masters = &peers
- filerOptions.ip = serverBindIp
+ filerOptions.masters = masterOptions.peers
+ filerOptions.ip = serverIp
+ filerOptions.bindIp = serverBindIp
serverOptions.v.ip = serverIp
serverOptions.v.bindIp = serverBindIp
- serverOptions.v.masters = &peers
+ serverOptions.v.masters = masterOptions.peers
serverOptions.v.idleConnectionTimeout = serverTimeout
serverOptions.v.dataCenter = serverDataCenter
serverOptions.v.rack = serverRack
+ msgBrokerOptions.ip = serverIp
- serverOptions.v.pulseSeconds = pulseSeconds
- masterOptions.pulseSeconds = pulseSeconds
+ // serverOptions.v.pulseSeconds = pulseSeconds
+ // masterOptions.pulseSeconds = pulseSeconds
masterOptions.whiteList = serverWhiteListOption
filerOptions.dataCenter = serverDataCenter
+ filerOptions.rack = serverRack
filerOptions.disableHttp = serverDisableHttp
masterOptions.disableHttp = serverDisableHttp
filerAddress := fmt.Sprintf("%s:%d", *serverIp, *filerOptions.port)
s3Options.filer = &filerAddress
+ webdavOptions.filer = &filerAddress
+ msgBrokerOptions.filer = &filerAddress
- if *filerOptions.defaultReplicaPlacement == "" {
- *filerOptions.defaultReplicaPlacement = *masterOptions.defaultReplication
- }
-
- runtime.GOMAXPROCS(runtime.NumCPU())
+ go stats_collect.StartMetricsServer(*serverMetricsHttpPort)
folders := strings.Split(*volumeDataFolders, ",")
@@ -163,7 +199,7 @@ func runServer(cmd *Command, args []string) bool {
if *masterOptions.metaFolder == "" {
*masterOptions.metaFolder = folders[0]
}
- if err := util.TestFolderWritable(*masterOptions.metaFolder); err != nil {
+ if err := util.TestFolderWritable(util.ResolvePath(*masterOptions.metaFolder)); err != nil {
glog.Fatalf("Check Meta Folder (-mdir=\"%s\") Writable: %s", *masterOptions.metaFolder, err)
}
filerOptions.defaultLevelDbDirectory = masterOptions.metaFolder
@@ -190,12 +226,33 @@ func runServer(cmd *Command, args []string) bool {
}()
}
+ if *isStartingWebDav {
+ go func() {
+ time.Sleep(2 * time.Second)
+
+ webdavOptions.startWebDav()
+
+ }()
+ }
+
+ if *isStartingMsgBroker {
+ go func() {
+ time.Sleep(2 * time.Second)
+ msgBrokerOptions.startQueueServer()
+ }()
+ }
+
// start volume server
- {
- go serverOptions.v.startVolumeServer(*volumeDataFolders, *volumeMaxDataVolumeCounts, *serverWhiteListOption)
+ if *isStartingVolumeServer {
+ go serverOptions.v.startVolumeServer(*volumeDataFolders, *volumeMaxDataVolumeCounts, *serverWhiteListOption, *volumeMinFreeSpacePercent)
+
+ }
+
+ if *isStartingMasterServer {
+ go startMaster(masterOptions, serverWhiteList)
}
- startMaster(masterOptions, serverWhiteList)
+ select {}
return true
}
diff --git a/weed/command/shell.go b/weed/command/shell.go
index 34b5aef31..c9976e809 100644
--- a/weed/command/shell.go
+++ b/weed/command/shell.go
@@ -6,18 +6,19 @@ import (
"github.com/chrislusf/seaweedfs/weed/security"
"github.com/chrislusf/seaweedfs/weed/shell"
"github.com/chrislusf/seaweedfs/weed/util"
- "github.com/spf13/viper"
)
var (
- shellOptions shell.ShellOptions
- shellInitialFilerUrl *string
+ shellOptions shell.ShellOptions
+ shellInitialFiler *string
+ shellCluster *string
)
func init() {
cmdShell.Run = runShell // break init cycle
- shellOptions.Masters = cmdShell.Flag.String("master", "localhost:9333", "comma-separated master servers")
- shellInitialFilerUrl = cmdShell.Flag.String("filer.url", "http://localhost:8888/", "initial filer url")
+ shellOptions.Masters = cmdShell.Flag.String("master", "", "comma-separated master servers, e.g. localhost:9333")
+ shellInitialFiler = cmdShell.Flag.String("filer", "", "filer host and port, e.g. localhost:8888")
+ shellCluster = cmdShell.Flag.String("cluster", "", "cluster defined in shell.toml")
}
var cmdShell = &Command{
@@ -25,20 +26,40 @@ var cmdShell = &Command{
Short: "run interactive administrative commands",
Long: `run interactive administrative commands.
+ Generate shell.toml via "weed scaffold -config=shell"
+
`,
}
func runShell(command *Command, args []string) bool {
util.LoadConfiguration("security", false)
- shellOptions.GrpcDialOption = security.LoadClientTLS(viper.Sub("grpc"), "client")
+ shellOptions.GrpcDialOption = security.LoadClientTLS(util.GetViper(), "grpc.client")
+
+ if *shellOptions.Masters == "" && *shellInitialFiler == "" {
+ util.LoadConfiguration("shell", false)
+ v := util.GetViper()
+ cluster := v.GetString("cluster.default")
+ if *shellCluster != "" {
+ cluster = *shellCluster
+ }
+ if cluster == "" {
+ *shellOptions.Masters, *shellInitialFiler = "localhost:9333", "localhost:8888"
+ } else {
+ *shellOptions.Masters = v.GetString("cluster." + cluster + ".master")
+ *shellInitialFiler = v.GetString("cluster." + cluster + ".filer")
+ }
+ }
+
+ fmt.Printf("master: %s filer: %s\n", *shellOptions.Masters, *shellInitialFiler)
- var filerPwdErr error
- shellOptions.FilerHost, shellOptions.FilerPort, shellOptions.Directory, filerPwdErr = util.ParseFilerUrl(*shellInitialFilerUrl)
- if filerPwdErr != nil {
- fmt.Printf("failed to parse url filer.url=%s : %v\n", *shellInitialFilerUrl, filerPwdErr)
+ var err error
+ shellOptions.FilerHost, shellOptions.FilerPort, err = util.ParseHostPort(*shellInitialFiler)
+ if err != nil {
+ fmt.Printf("failed to parse filer %s: %v\n", *shellInitialFiler, err)
return false
}
+ shellOptions.Directory = "/"
shell.RunShell(shellOptions)
diff --git a/weed/command/upload.go b/weed/command/upload.go
index 25e938d9b..0f9361b40 100644
--- a/weed/command/upload.go
+++ b/weed/command/upload.go
@@ -1,16 +1,18 @@
package command
import (
+ "context"
"encoding/json"
"fmt"
+ "github.com/chrislusf/seaweedfs/weed/pb"
+ "github.com/chrislusf/seaweedfs/weed/pb/master_pb"
+ "google.golang.org/grpc"
"os"
"path/filepath"
+ "github.com/chrislusf/seaweedfs/weed/operation"
"github.com/chrislusf/seaweedfs/weed/security"
"github.com/chrislusf/seaweedfs/weed/util"
- "github.com/spf13/viper"
-
- "github.com/chrislusf/seaweedfs/weed/operation"
)
var (
@@ -18,14 +20,16 @@ var (
)
type UploadOptions struct {
- master *string
- dir *string
- include *string
- replication *string
- collection *string
- dataCenter *string
- ttl *string
- maxMB *int
+ master *string
+ dir *string
+ include *string
+ replication *string
+ collection *string
+ dataCenter *string
+ ttl *string
+ diskType *string
+ maxMB *int
+ usePublicUrl *bool
}
func init() {
@@ -37,8 +41,10 @@ func init() {
upload.replication = cmdUpload.Flag.String("replication", "", "replication type")
upload.collection = cmdUpload.Flag.String("collection", "", "optional collection name")
upload.dataCenter = cmdUpload.Flag.String("dataCenter", "", "optional data center name")
+ upload.diskType = cmdUpload.Flag.String("disk", "", "[hdd|ssd|] hard drive or solid state drive or any tag")
upload.ttl = cmdUpload.Flag.String("ttl", "", "time to live, e.g.: 1m, 1h, 1d, 1M, 1y")
- upload.maxMB = cmdUpload.Flag.Int("maxMB", 32, "split files larger than the limit")
+ upload.maxMB = cmdUpload.Flag.Int("maxMB", 4, "split files larger than the limit")
+ upload.usePublicUrl = cmdUpload.Flag.Bool("usePublicUrl", false, "upload to public url from volume server")
}
var cmdUpload = &Command{
@@ -63,13 +69,22 @@ var cmdUpload = &Command{
func runUpload(cmd *Command, args []string) bool {
util.LoadConfiguration("security", false)
- grpcDialOption := security.LoadClientTLS(viper.Sub("grpc"), "client")
+ grpcDialOption := security.LoadClientTLS(util.GetViper(), "grpc.client")
+
+ defaultCollection, err := readMasterConfiguration(grpcDialOption, *upload.master)
+ if err != nil {
+ fmt.Printf("upload: %v", err)
+ return false
+ }
+ if *upload.replication == "" {
+ *upload.replication = defaultCollection
+ }
if len(args) == 0 {
if *upload.dir == "" {
return false
}
- filepath.Walk(*upload.dir, func(path string, info os.FileInfo, err error) error {
+ filepath.Walk(util.ResolvePath(*upload.dir), func(path string, info os.FileInfo, err error) error {
if err == nil {
if !info.IsDir() {
if *upload.include != "" {
@@ -81,9 +96,7 @@ func runUpload(cmd *Command, args []string) bool {
if e != nil {
return e
}
- results, e := operation.SubmitFiles(*upload.master, grpcDialOption, parts,
- *upload.replication, *upload.collection, *upload.dataCenter,
- *upload.ttl, *upload.maxMB)
+ results, e := operation.SubmitFiles(func() string { return *upload.master }, grpcDialOption, parts, *upload.replication, *upload.collection, *upload.dataCenter, *upload.ttl, *upload.diskType, *upload.maxMB, *upload.usePublicUrl)
bytes, _ := json.Marshal(results)
fmt.Println(string(bytes))
if e != nil {
@@ -100,11 +113,21 @@ func runUpload(cmd *Command, args []string) bool {
if e != nil {
fmt.Println(e.Error())
}
- results, _ := operation.SubmitFiles(*upload.master, grpcDialOption, parts,
- *upload.replication, *upload.collection, *upload.dataCenter,
- *upload.ttl, *upload.maxMB)
+ results, _ := operation.SubmitFiles(func() string { return *upload.master }, grpcDialOption, parts, *upload.replication, *upload.collection, *upload.dataCenter, *upload.ttl, *upload.diskType, *upload.maxMB, *upload.usePublicUrl)
bytes, _ := json.Marshal(results)
fmt.Println(string(bytes))
}
return true
}
+
+func readMasterConfiguration(grpcDialOption grpc.DialOption, masterAddress string) (replication string, err error) {
+ err = pb.WithMasterClient(masterAddress, grpcDialOption, func(client master_pb.SeaweedClient) error {
+ resp, err := client.GetMasterConfiguration(context.Background(), &master_pb.GetMasterConfigurationRequest{})
+ if err != nil {
+ return fmt.Errorf("get master %s configuration: %v", masterAddress, err)
+ }
+ replication = resp.DefaultReplication
+ return nil
+ })
+ return
+}
diff --git a/weed/command/version.go b/weed/command/version.go
index 8fdd68ec8..9caf7dc4e 100644
--- a/weed/command/version.go
+++ b/weed/command/version.go
@@ -19,6 +19,6 @@ func runVersion(cmd *Command, args []string) bool {
cmd.Usage()
}
- fmt.Printf("version %s %s %s\n", util.VERSION, runtime.GOOS, runtime.GOARCH)
+ fmt.Printf("version %s %s %s\n", util.Version(), runtime.GOOS, runtime.GOARCH)
return true
}
diff --git a/weed/command/volume.go b/weed/command/volume.go
index 3e8341ef8..9df500178 100644
--- a/weed/command/volume.go
+++ b/weed/command/volume.go
@@ -2,25 +2,32 @@ package command
import (
"fmt"
+ "github.com/chrislusf/seaweedfs/weed/storage/types"
"net/http"
+ httppprof "net/http/pprof"
"os"
- "runtime"
"runtime/pprof"
"strconv"
"strings"
"time"
- "github.com/chrislusf/seaweedfs/weed/security"
- "github.com/chrislusf/seaweedfs/weed/util/httpdown"
"github.com/spf13/viper"
"google.golang.org/grpc"
+ "github.com/chrislusf/seaweedfs/weed/util/grace"
+
+ "github.com/chrislusf/seaweedfs/weed/pb"
+ "github.com/chrislusf/seaweedfs/weed/security"
+ "github.com/chrislusf/seaweedfs/weed/util/httpdown"
+
+ "google.golang.org/grpc/reflection"
+
"github.com/chrislusf/seaweedfs/weed/glog"
"github.com/chrislusf/seaweedfs/weed/pb/volume_server_pb"
"github.com/chrislusf/seaweedfs/weed/server"
+ stats_collect "github.com/chrislusf/seaweedfs/weed/stats"
"github.com/chrislusf/seaweedfs/weed/storage"
"github.com/chrislusf/seaweedfs/weed/util"
- "google.golang.org/grpc/reflection"
)
var (
@@ -28,45 +35,62 @@ var (
)
type VolumeServerOptions struct {
- port *int
- publicPort *int
- folders []string
- folderMaxLimits []int
- ip *string
- publicUrl *string
- bindIp *string
- masters *string
- pulseSeconds *int
- idleConnectionTimeout *int
- dataCenter *string
- rack *string
- whiteList []string
- indexType *string
- fixJpgOrientation *bool
- readRedirect *bool
- cpuProfile *string
- memProfile *string
- compactionMBPerSecond *int
+ port *int
+ publicPort *int
+ folders []string
+ folderMaxLimits []int
+ idxFolder *string
+ ip *string
+ publicUrl *string
+ bindIp *string
+ masters *string
+ idleConnectionTimeout *int
+ dataCenter *string
+ rack *string
+ whiteList []string
+ indexType *string
+ diskType *string
+ fixJpgOrientation *bool
+ readRedirect *bool
+ cpuProfile *string
+ memProfile *string
+ compactionMBPerSecond *int
+ fileSizeLimitMB *int
+ concurrentUploadLimitMB *int
+ minFreeSpacePercents []float32
+ pprof *bool
+ preStopSeconds *int
+ metricsHttpPort *int
+ // pulseSeconds *int
+ enableTcp *bool
}
func init() {
cmdVolume.Run = runVolume // break init cycle
v.port = cmdVolume.Flag.Int("port", 8080, "http listen port")
v.publicPort = cmdVolume.Flag.Int("port.public", 0, "port opened to public")
- v.ip = cmdVolume.Flag.String("ip", "", "ip or server name")
+ v.ip = cmdVolume.Flag.String("ip", util.DetectedHostAddress(), "ip or server name, also used as identifier")
v.publicUrl = cmdVolume.Flag.String("publicUrl", "", "Publicly accessible address")
- v.bindIp = cmdVolume.Flag.String("ip.bind", "0.0.0.0", "ip address to bind to")
+ v.bindIp = cmdVolume.Flag.String("ip.bind", "", "ip address to bind to")
v.masters = cmdVolume.Flag.String("mserver", "localhost:9333", "comma-separated master servers")
- v.pulseSeconds = cmdVolume.Flag.Int("pulseSeconds", 5, "number of seconds between heartbeats, must be smaller than or equal to the master's setting")
+ v.preStopSeconds = cmdVolume.Flag.Int("preStopSeconds", 10, "number of seconds between stop send heartbeats and stop volume server")
+ // v.pulseSeconds = cmdVolume.Flag.Int("pulseSeconds", 5, "number of seconds between heartbeats, must be smaller than or equal to the master's setting")
v.idleConnectionTimeout = cmdVolume.Flag.Int("idleTimeout", 30, "connection idle seconds")
v.dataCenter = cmdVolume.Flag.String("dataCenter", "", "current volume server's data center name")
v.rack = cmdVolume.Flag.String("rack", "", "current volume server's rack name")
v.indexType = cmdVolume.Flag.String("index", "memory", "Choose [memory|leveldb|leveldbMedium|leveldbLarge] mode for memory~performance balance.")
+ v.diskType = cmdVolume.Flag.String("disk", "", "[hdd|ssd|] hard drive or solid state drive or any tag")
v.fixJpgOrientation = cmdVolume.Flag.Bool("images.fix.orientation", false, "Adjust jpg orientation when uploading.")
v.readRedirect = cmdVolume.Flag.Bool("read.redirect", true, "Redirect moved or non-local volumes.")
v.cpuProfile = cmdVolume.Flag.String("cpuprofile", "", "cpu profile output file")
v.memProfile = cmdVolume.Flag.String("memprofile", "", "memory profile output file")
v.compactionMBPerSecond = cmdVolume.Flag.Int("compactionMBps", 0, "limit background compaction or copying speed in mega bytes per second")
+ v.fileSizeLimitMB = cmdVolume.Flag.Int("fileSizeLimitMB", 256, "limit file size to avoid out of memory")
+ v.concurrentUploadLimitMB = cmdVolume.Flag.Int("concurrentUploadLimitMB", 128, "limit total concurrent upload size")
+ v.pprof = cmdVolume.Flag.Bool("pprof", false, "enable pprof http handlers. precludes --memprofile and --cpuprofile")
+ v.metricsHttpPort = cmdVolume.Flag.Int("metricsPort", 0, "Prometheus metrics listen port")
+ v.idxFolder = cmdVolume.Flag.String("dir.idx", "", "directory to store .idx files")
+ v.enableTcp = cmdVolume.Flag.Bool("tcp", false, " enable tcp port")
}
var cmdVolume = &Command{
@@ -79,26 +103,39 @@ var cmdVolume = &Command{
var (
volumeFolders = cmdVolume.Flag.String("dir", os.TempDir(), "directories to store data files. dir[,dir]...")
- maxVolumeCounts = cmdVolume.Flag.String("max", "7", "maximum numbers of volumes, count[,count]...")
+ maxVolumeCounts = cmdVolume.Flag.String("max", "8", "maximum numbers of volumes, count[,count]... If set to zero, the limit will be auto configured.")
volumeWhiteListOption = cmdVolume.Flag.String("whiteList", "", "comma separated Ip addresses having write permission. No limit if empty.")
+ minFreeSpacePercent = cmdVolume.Flag.String("minFreeSpacePercent", "1", "minimum free disk space (default to 1%). Low disk space will mark all volumes as ReadOnly.")
)
func runVolume(cmd *Command, args []string) bool {
util.LoadConfiguration("security", false)
- runtime.GOMAXPROCS(runtime.NumCPU())
- util.SetupProfiling(*v.cpuProfile, *v.memProfile)
+ // If --pprof is set we assume the caller wants to be able to collect
+ // cpu and memory profiles via go tool pprof
+ if !*v.pprof {
+ grace.SetupProfiling(*v.cpuProfile, *v.memProfile)
+ }
+
+ go stats_collect.StartMetricsServer(*v.metricsHttpPort)
- v.startVolumeServer(*volumeFolders, *maxVolumeCounts, *volumeWhiteListOption)
+ v.startVolumeServer(*volumeFolders, *maxVolumeCounts, *volumeWhiteListOption, *minFreeSpacePercent)
return true
}
-func (v VolumeServerOptions) startVolumeServer(volumeFolders, maxVolumeCounts, volumeWhiteListOption string) {
+func (v VolumeServerOptions) startVolumeServer(volumeFolders, maxVolumeCounts, volumeWhiteListOption, minFreeSpacePercent string) {
// Set multiple folders and each folder's max volume count limit'
v.folders = strings.Split(volumeFolders, ",")
+ for _, folder := range v.folders {
+ if err := util.TestFolderWritable(util.ResolvePath(folder)); err != nil {
+ glog.Fatalf("Check Data Folder(-dir) Writable %s : %s", folder, err)
+ }
+ }
+
+ // set max
maxCountStrings := strings.Split(maxVolumeCounts, ",")
for _, maxString := range maxCountStrings {
if max, e := strconv.Atoi(maxString); e == nil {
@@ -107,14 +144,47 @@ func (v VolumeServerOptions) startVolumeServer(volumeFolders, maxVolumeCounts, v
glog.Fatalf("The max specified in -max not a valid number %s", maxString)
}
}
+ if len(v.folderMaxLimits) == 1 && len(v.folders) > 1 {
+ for i := 0; i < len(v.folders)-1; i++ {
+ v.folderMaxLimits = append(v.folderMaxLimits, v.folderMaxLimits[0])
+ }
+ }
if len(v.folders) != len(v.folderMaxLimits) {
glog.Fatalf("%d directories by -dir, but only %d max is set by -max", len(v.folders), len(v.folderMaxLimits))
}
- for _, folder := range v.folders {
- if err := util.TestFolderWritable(folder); err != nil {
- glog.Fatalf("Check Data Folder(-dir) Writable %s : %s", folder, err)
+
+ // set minFreeSpacePercent
+ minFreeSpacePercentStrings := strings.Split(minFreeSpacePercent, ",")
+ for _, freeString := range minFreeSpacePercentStrings {
+ if value, e := strconv.ParseFloat(freeString, 32); e == nil {
+ v.minFreeSpacePercents = append(v.minFreeSpacePercents, float32(value))
+ } else {
+ glog.Fatalf("The value specified in -minFreeSpacePercent not a valid value %s", freeString)
+ }
+ }
+ if len(v.minFreeSpacePercents) == 1 && len(v.folders) > 1 {
+ for i := 0; i < len(v.folders)-1; i++ {
+ v.minFreeSpacePercents = append(v.minFreeSpacePercents, v.minFreeSpacePercents[0])
+ }
+ }
+ if len(v.folders) != len(v.minFreeSpacePercents) {
+ glog.Fatalf("%d directories by -dir, but only %d minFreeSpacePercent is set by -minFreeSpacePercent", len(v.folders), len(v.minFreeSpacePercents))
+ }
+
+ // set disk types
+ var diskTypes []types.DiskType
+ diskTypeStrings := strings.Split(*v.diskType, ",")
+ for _, diskTypeString := range diskTypeStrings {
+ diskTypes = append(diskTypes, types.ToDiskType(diskTypeString))
+ }
+ if len(diskTypes) == 1 && len(v.folders) > 1 {
+ for i := 0; i < len(v.folders)-1; i++ {
+ diskTypes = append(diskTypes, diskTypes[0])
}
}
+ if len(v.folders) != len(diskTypes) {
+ glog.Fatalf("%d directories by -dir, but only %d disk types is set by -disk", len(v.folders), len(diskTypes))
+ }
// security related white list configuration
if volumeWhiteListOption != "" {
@@ -122,7 +192,8 @@ func (v VolumeServerOptions) startVolumeServer(volumeFolders, maxVolumeCounts, v
}
if *v.ip == "" {
- *v.ip = "127.0.0.1"
+ *v.ip = util.DetectedHostAddress()
+ glog.V(0).Infof("detected volume server ip address: %v", *v.ip)
}
if *v.publicPort == 0 {
@@ -138,6 +209,14 @@ func (v VolumeServerOptions) startVolumeServer(volumeFolders, maxVolumeCounts, v
publicVolumeMux = http.NewServeMux()
}
+ if *v.pprof {
+ volumeMux.HandleFunc("/debug/pprof/", httppprof.Index)
+ volumeMux.HandleFunc("/debug/pprof/cmdline", httppprof.Cmdline)
+ volumeMux.HandleFunc("/debug/pprof/profile", httppprof.Profile)
+ volumeMux.HandleFunc("/debug/pprof/symbol", httppprof.Symbol)
+ volumeMux.HandleFunc("/debug/pprof/trace", httppprof.Trace)
+ }
+
volumeNeedleMapKind := storage.NeedleMapInMemory
switch *v.indexType {
case "leveldb":
@@ -152,14 +231,16 @@ func (v VolumeServerOptions) startVolumeServer(volumeFolders, maxVolumeCounts, v
volumeServer := weed_server.NewVolumeServer(volumeMux, publicVolumeMux,
*v.ip, *v.port, *v.publicUrl,
- v.folders, v.folderMaxLimits,
+ v.folders, v.folderMaxLimits, v.minFreeSpacePercents, diskTypes,
+ *v.idxFolder,
volumeNeedleMapKind,
- strings.Split(masters, ","), *v.pulseSeconds, *v.dataCenter, *v.rack,
+ strings.Split(masters, ","), 5, *v.dataCenter, *v.rack,
v.whiteList,
*v.fixJpgOrientation, *v.readRedirect,
*v.compactionMBPerSecond,
+ *v.fileSizeLimitMB,
+ int64(*v.concurrentUploadLimitMB)*1024*1024,
)
-
// starting grpc server
grpcS := v.startGrpcService(volumeServer)
@@ -172,50 +253,56 @@ func (v VolumeServerOptions) startVolumeServer(volumeFolders, maxVolumeCounts, v
}
}
+ // starting tcp server
+ if *v.enableTcp {
+ go v.startTcpService(volumeServer)
+ }
+
// starting the cluster http server
clusterHttpServer := v.startClusterHttpService(volumeMux)
- stopChain := make(chan struct{})
- util.OnInterrupt(func() {
+ stopChan := make(chan bool)
+ grace.OnInterrupt(func() {
fmt.Println("volume server has be killed")
- var startTime time.Time
-
- // firstly, stop the public http service to prevent from receiving new user request
- if nil != publicHttpDown {
- startTime = time.Now()
- if err := publicHttpDown.Stop(); err != nil {
- glog.Warningf("stop the public http server failed, %v", err)
- }
- delta := time.Now().Sub(startTime).Nanoseconds() / 1e6
- glog.V(0).Infof("stop public http server, elapsed %dms", delta)
- }
- startTime = time.Now()
- if err := clusterHttpServer.Stop(); err != nil {
- glog.Warningf("stop the cluster http server failed, %v", err)
+ // Stop heartbeats
+ if !volumeServer.StopHeartbeat() {
+ glog.V(0).Infof("stop send heartbeat and wait %d seconds until shutdown ...", *v.preStopSeconds)
+ time.Sleep(time.Duration(*v.preStopSeconds) * time.Second)
}
- delta := time.Now().Sub(startTime).Nanoseconds() / 1e6
- glog.V(0).Infof("graceful stop cluster http server, elapsed [%d]", delta)
- startTime = time.Now()
- grpcS.GracefulStop()
- delta = time.Now().Sub(startTime).Nanoseconds() / 1e6
- glog.V(0).Infof("graceful stop gRPC, elapsed [%d]", delta)
+ shutdown(publicHttpDown, clusterHttpServer, grpcS, volumeServer)
+ stopChan <- true
+ })
- startTime = time.Now()
- volumeServer.Shutdown()
- delta = time.Now().Sub(startTime).Nanoseconds() / 1e6
- glog.V(0).Infof("stop volume server, elapsed [%d]", delta)
+ select {
+ case <-stopChan:
+ }
- pprof.StopCPUProfile()
+}
- close(stopChain) // notify exit
- })
+func shutdown(publicHttpDown httpdown.Server, clusterHttpServer httpdown.Server, grpcS *grpc.Server, volumeServer *weed_server.VolumeServer) {
- select {
- case <-stopChain:
+ // firstly, stop the public http service to prevent from receiving new user request
+ if nil != publicHttpDown {
+ glog.V(0).Infof("stop public http server ... ")
+ if err := publicHttpDown.Stop(); err != nil {
+ glog.Warningf("stop the public http server failed, %v", err)
+ }
+ }
+
+ glog.V(0).Infof("graceful stop cluster http server ... ")
+ if err := clusterHttpServer.Stop(); err != nil {
+ glog.Warningf("stop the cluster http server failed, %v", err)
}
- glog.Warningf("the volume server exit.")
+
+ glog.V(0).Infof("graceful stop gRPC ...")
+ grpcS.GracefulStop()
+
+ volumeServer.Shutdown()
+
+ pprof.StopCPUProfile()
+
}
// check whether configure the public port
@@ -229,7 +316,7 @@ func (v VolumeServerOptions) startGrpcService(vs volume_server_pb.VolumeServerSe
if err != nil {
glog.Fatalf("failed to listen on grpc port %d: %v", grpcPort, err)
}
- grpcS := util.NewGrpcServer(security.LoadServerTLS(viper.Sub("grpc"), "volume"))
+ grpcS := pb.NewGrpcServer(security.LoadServerTLS(util.GetViper(), "grpc.volume"))
volume_server_pb.RegisterVolumeServerServer(grpcS, vs)
reflection.Register(grpcS)
go func() {
@@ -242,7 +329,7 @@ func (v VolumeServerOptions) startGrpcService(vs volume_server_pb.VolumeServerSe
func (v VolumeServerOptions) startPublicHttpService(handler http.Handler) httpdown.Server {
publicListeningAddress := *v.bindIp + ":" + strconv.Itoa(*v.publicPort)
- glog.V(0).Infoln("Start Seaweed volume server", util.VERSION, "public at", publicListeningAddress)
+ glog.V(0).Infoln("Start Seaweed volume server", util.Version(), "public at", publicListeningAddress)
publicListener, e := util.NewListener(publicListeningAddress, time.Duration(*v.idleConnectionTimeout)*time.Second)
if e != nil {
glog.Fatalf("Volume server listener error:%v", e)
@@ -269,7 +356,7 @@ func (v VolumeServerOptions) startClusterHttpService(handler http.Handler) httpd
}
listeningAddress := *v.bindIp + ":" + strconv.Itoa(*v.port)
- glog.V(0).Infof("Start Seaweed volume server %s at %s", util.VERSION, listeningAddress)
+ glog.V(0).Infof("Start Seaweed volume server %s at %s", util.Version(), listeningAddress)
listener, e := util.NewListener(listeningAddress, time.Duration(*v.idleConnectionTimeout)*time.Second)
if e != nil {
glog.Fatalf("Volume server listener error:%v", e)
@@ -288,3 +375,22 @@ func (v VolumeServerOptions) startClusterHttpService(handler http.Handler) httpd
}()
return clusterHttpServer
}
+
+func (v VolumeServerOptions) startTcpService(volumeServer *weed_server.VolumeServer) {
+ listeningAddress := *v.bindIp + ":" + strconv.Itoa(*v.port+20000)
+ glog.V(0).Infoln("Start Seaweed volume server", util.Version(), "tcp at", listeningAddress)
+ listener, e := util.NewListener(listeningAddress, 0)
+ if e != nil {
+ glog.Fatalf("Volume server listener error on %s:%v", listeningAddress, e)
+ }
+ defer listener.Close()
+
+ for {
+ c, err := listener.Accept()
+ if err != nil {
+ fmt.Println(err)
+ return
+ }
+ go volumeServer.HandleTcpConnection(c)
+ }
+}
diff --git a/weed/command/webdav.go b/weed/command/webdav.go
index 371c4a9ad..781ea1e36 100644
--- a/weed/command/webdav.go
+++ b/weed/command/webdav.go
@@ -1,17 +1,20 @@
package command
import (
+ "context"
"fmt"
"net/http"
+ "os"
"os/user"
"strconv"
"time"
"github.com/chrislusf/seaweedfs/weed/glog"
+ "github.com/chrislusf/seaweedfs/weed/pb"
+ "github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
"github.com/chrislusf/seaweedfs/weed/security"
"github.com/chrislusf/seaweedfs/weed/server"
"github.com/chrislusf/seaweedfs/weed/util"
- "github.com/spf13/viper"
)
var (
@@ -22,8 +25,12 @@ type WebDavOption struct {
filer *string
port *int
collection *string
+ replication *string
+ disk *string
tlsPrivateKey *string
tlsCertificate *string
+ cacheDir *string
+ cacheSizeMB *int64
}
func init() {
@@ -31,13 +38,17 @@ func init() {
webDavStandaloneOptions.filer = cmdWebDav.Flag.String("filer", "localhost:8888", "filer server address")
webDavStandaloneOptions.port = cmdWebDav.Flag.Int("port", 7333, "webdav server http listen port")
webDavStandaloneOptions.collection = cmdWebDav.Flag.String("collection", "", "collection to create the files")
+ webDavStandaloneOptions.replication = cmdWebDav.Flag.String("replication", "", "replication to create the files")
+ webDavStandaloneOptions.disk = cmdWebDav.Flag.String("disk", "", "[hdd|ssd|] hard drive or solid state drive or any tag")
webDavStandaloneOptions.tlsPrivateKey = cmdWebDav.Flag.String("key.file", "", "path to the TLS private key file")
webDavStandaloneOptions.tlsCertificate = cmdWebDav.Flag.String("cert.file", "", "path to the TLS certificate file")
+ webDavStandaloneOptions.cacheDir = cmdWebDav.Flag.String("cacheDir", os.TempDir(), "local cache directory for file chunks")
+ webDavStandaloneOptions.cacheSizeMB = cmdWebDav.Flag.Int64("cacheCapacityMB", 1000, "local cache capacity in MB")
}
var cmdWebDav = &Command{
UsageLine: "webdav -port=7333 -filer=",
- Short: " start a webdav server that is backed by a filer",
+ Short: "start a webdav server that is backed by a filer",
Long: `start a webdav server that is backed by a filer.
`,
@@ -47,7 +58,7 @@ func runWebDav(cmd *Command, args []string) bool {
util.LoadConfiguration("security", false)
- glog.V(0).Infof("Starting Seaweed WebDav Server %s at https port %d", util.VERSION, *webDavStandaloneOptions.port)
+ glog.V(0).Infof("Starting Seaweed WebDav Server %s at https port %d", util.Version(), *webDavStandaloneOptions.port)
return webDavStandaloneOptions.startWebDav()
@@ -55,12 +66,6 @@ func runWebDav(cmd *Command, args []string) bool {
func (wo *WebDavOption) startWebDav() bool {
- filerGrpcAddress, err := parseFilerGrpcAddress(*wo.filer)
- if err != nil {
- glog.Fatal(err)
- return false
- }
-
// detect current user
uid, gid := uint32(0), uint32(0)
if u, err := user.Current(); err == nil {
@@ -72,13 +77,47 @@ func (wo *WebDavOption) startWebDav() bool {
}
}
+ // parse filer grpc address
+ filerGrpcAddress, err := pb.ParseServerToGrpcAddress(*wo.filer)
+ if err != nil {
+ glog.Fatal(err)
+ return false
+ }
+
+ grpcDialOption := security.LoadClientTLS(util.GetViper(), "grpc.client")
+
+ var cipher bool
+ // connect to filer
+ for {
+ err = pb.WithGrpcFilerClient(filerGrpcAddress, grpcDialOption, func(client filer_pb.SeaweedFilerClient) error {
+ resp, err := client.GetFilerConfiguration(context.Background(), &filer_pb.GetFilerConfigurationRequest{})
+ if err != nil {
+ return fmt.Errorf("get filer %s configuration: %v", filerGrpcAddress, err)
+ }
+ cipher = resp.Cipher
+ return nil
+ })
+ if err != nil {
+ glog.V(0).Infof("wait to connect to filer %s grpc address %s", *wo.filer, filerGrpcAddress)
+ time.Sleep(time.Second)
+ } else {
+ glog.V(0).Infof("connected to filer %s grpc address %s", *wo.filer, filerGrpcAddress)
+ break
+ }
+ }
+
ws, webdavServer_err := weed_server.NewWebDavServer(&weed_server.WebDavOption{
Filer: *wo.filer,
FilerGrpcAddress: filerGrpcAddress,
- GrpcDialOption: security.LoadClientTLS(viper.Sub("grpc"), "client"),
+ GrpcDialOption: grpcDialOption,
Collection: *wo.collection,
+ Replication: *wo.replication,
+ DiskType: *wo.disk,
Uid: uid,
Gid: gid,
+ Cipher: cipher,
+ CacheDir: util.ResolvePath(*wo.cacheDir),
+ CacheSizeMB: *wo.cacheSizeMB,
})
if webdavServer_err != nil {
glog.Fatalf("WebDav Server startup error: %v", webdavServer_err)
@@ -93,12 +132,12 @@ func (wo *WebDavOption) startWebDav() bool {
}
if *wo.tlsPrivateKey != "" {
- glog.V(0).Infof("Start Seaweed WebDav Server %s at https port %d", util.VERSION, *wo.port)
+ glog.V(0).Infof("Start Seaweed WebDav Server %s at https port %d", util.Version(), *wo.port)
if err = httpS.ServeTLS(webDavListener, *wo.tlsCertificate, *wo.tlsPrivateKey); err != nil {
glog.Fatalf("WebDav Server Fail to serve: %v", err)
}
} else {
- glog.V(0).Infof("Start Seaweed WebDav Server %s at http port %d", util.VERSION, *wo.port)
+ glog.V(0).Infof("Start Seaweed WebDav Server %s at http port %d", util.Version(), *wo.port)
if err = httpS.Serve(webDavListener); err != nil {
glog.Fatalf("WebDav Server Fail to serve: %v", err)
}
diff --git a/weed/filer/abstract_sql/abstract_sql_store.go b/weed/filer/abstract_sql/abstract_sql_store.go
new file mode 100644
index 000000000..ab8f6bcbd
--- /dev/null
+++ b/weed/filer/abstract_sql/abstract_sql_store.go
@@ -0,0 +1,364 @@
+package abstract_sql
+
+import (
+ "context"
+ "database/sql"
+ "fmt"
+ "github.com/chrislusf/seaweedfs/weed/filer"
+ "github.com/chrislusf/seaweedfs/weed/glog"
+ "github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
+ "github.com/chrislusf/seaweedfs/weed/util"
+ "strings"
+ "sync"
+)
+
+type SqlGenerator interface {
+ GetSqlInsert(tableName string) string
+ GetSqlUpdate(tableName string) string
+ GetSqlFind(tableName string) string
+ GetSqlDelete(tableName string) string
+ GetSqlDeleteFolderChildren(tableName string) string
+ GetSqlListExclusive(tableName string) string
+ GetSqlListInclusive(tableName string) string
+ GetSqlCreateTable(tableName string) string
+ GetSqlDropTable(tableName string) string
+}
+
+type AbstractSqlStore struct {
+ SqlGenerator
+ DB *sql.DB
+ SupportBucketTable bool
+ dbs map[string]bool
+ dbsLock sync.Mutex
+}
+
+func (store *AbstractSqlStore) OnBucketCreation(bucket string) {
+ store.dbsLock.Lock()
+ defer store.dbsLock.Unlock()
+
+ store.CreateTable(context.Background(), bucket)
+
+ if store.dbs == nil {
+ return
+ }
+ store.dbs[bucket] = true
+}
+func (store *AbstractSqlStore) OnBucketDeletion(bucket string) {
+ store.dbsLock.Lock()
+ defer store.dbsLock.Unlock()
+
+ store.deleteTable(context.Background(), bucket)
+
+ if store.dbs == nil {
+ return
+ }
+ delete(store.dbs, bucket)
+}
+
+const (
+ DEFAULT_TABLE = "filemeta"
+)
+
+type TxOrDB interface {
+ ExecContext(ctx context.Context, query string, args ...interface{}) (sql.Result, error)
+ QueryRowContext(ctx context.Context, query string, args ...interface{}) *sql.Row
+ QueryContext(ctx context.Context, query string, args ...interface{}) (*sql.Rows, error)
+}
+
+func (store *AbstractSqlStore) BeginTransaction(ctx context.Context) (context.Context, error) {
+ tx, err := store.DB.BeginTx(ctx, &sql.TxOptions{
+ Isolation: sql.LevelReadCommitted,
+ ReadOnly: false,
+ })
+ if err != nil {
+ return ctx, err
+ }
+
+ return context.WithValue(ctx, "tx", tx), nil
+}
+func (store *AbstractSqlStore) CommitTransaction(ctx context.Context) error {
+ if tx, ok := ctx.Value("tx").(*sql.Tx); ok {
+ return tx.Commit()
+ }
+ return nil
+}
+func (store *AbstractSqlStore) RollbackTransaction(ctx context.Context) error {
+ if tx, ok := ctx.Value("tx").(*sql.Tx); ok {
+ return tx.Rollback()
+ }
+ return nil
+}
+
+func (store *AbstractSqlStore) getTxOrDB(ctx context.Context, fullpath util.FullPath, isForChildren bool) (txOrDB TxOrDB, bucket string, shortPath util.FullPath, err error) {
+
+ shortPath = fullpath
+ bucket = DEFAULT_TABLE
+
+ if tx, ok := ctx.Value("tx").(*sql.Tx); ok {
+ txOrDB = tx
+ } else {
+ txOrDB = store.DB
+ }
+
+ if !store.SupportBucketTable {
+ return
+ }
+
+ if !strings.HasPrefix(string(fullpath), "/buckets/") {
+ return
+ }
+
+ // detect bucket
+ bucketAndObjectKey := string(fullpath)[len("/buckets/"):]
+ t := strings.Index(bucketAndObjectKey, "/")
+ if t < 0 && !isForChildren {
+ return
+ }
+ bucket = bucketAndObjectKey
+ shortPath = "/"
+ if t > 0 {
+ bucket = bucketAndObjectKey[:t]
+ shortPath = util.FullPath(bucketAndObjectKey[t:])
+ }
+
+ if isValidBucket(bucket) {
+ store.dbsLock.Lock()
+ defer store.dbsLock.Unlock()
+
+ if store.dbs == nil {
+ store.dbs = make(map[string]bool)
+ }
+
+ if _, found := store.dbs[bucket]; !found {
+ if err = store.CreateTable(ctx, bucket); err == nil {
+ store.dbs[bucket] = true
+ }
+ }
+
+ }
+
+ return
+}
+
+func (store *AbstractSqlStore) InsertEntry(ctx context.Context, entry *filer.Entry) (err error) {
+
+ db, bucket, shortPath, err := store.getTxOrDB(ctx, entry.FullPath, false)
+ if err != nil {
+ return fmt.Errorf("findDB %s : %v", entry.FullPath, err)
+ }
+
+ dir, name := shortPath.DirAndName()
+ meta, err := entry.EncodeAttributesAndChunks()
+ if err != nil {
+ return fmt.Errorf("encode %s: %s", entry.FullPath, err)
+ }
+
+ if len(entry.Chunks) > 50 {
+ meta = util.MaybeGzipData(meta)
+ }
+
+ res, err := db.ExecContext(ctx, store.GetSqlInsert(bucket), util.HashStringToLong(dir), name, dir, meta)
+ if err == nil {
+ return
+ }
+
+ if !strings.Contains(strings.ToLower(err.Error()), "duplicate") {
+ // return fmt.Errorf("insert: %s", err)
+ // skip this since the error can be in a different language
+ }
+
+ // now the insert failed possibly due to duplication constraints
+ glog.V(1).Infof("insert %s falls back to update: %v", entry.FullPath, err)
+
+ res, err = db.ExecContext(ctx, store.GetSqlUpdate(bucket), meta, util.HashStringToLong(dir), name, dir)
+ if err != nil {
+ return fmt.Errorf("upsert %s: %s", entry.FullPath, err)
+ }
+
+ _, err = res.RowsAffected()
+ if err != nil {
+ return fmt.Errorf("upsert %s but no rows affected: %s", entry.FullPath, err)
+ }
+ return nil
+
+}
+
+func (store *AbstractSqlStore) UpdateEntry(ctx context.Context, entry *filer.Entry) (err error) {
+
+ db, bucket, shortPath, err := store.getTxOrDB(ctx, entry.FullPath, false)
+ if err != nil {
+ return fmt.Errorf("findDB %s : %v", entry.FullPath, err)
+ }
+
+ dir, name := shortPath.DirAndName()
+ meta, err := entry.EncodeAttributesAndChunks()
+ if err != nil {
+ return fmt.Errorf("encode %s: %s", entry.FullPath, err)
+ }
+
+ res, err := db.ExecContext(ctx, store.GetSqlUpdate(bucket), meta, util.HashStringToLong(dir), name, dir)
+ if err != nil {
+ return fmt.Errorf("update %s: %s", entry.FullPath, err)
+ }
+
+ _, err = res.RowsAffected()
+ if err != nil {
+ return fmt.Errorf("update %s but no rows affected: %s", entry.FullPath, err)
+ }
+ return nil
+}
+
+func (store *AbstractSqlStore) FindEntry(ctx context.Context, fullpath util.FullPath) (*filer.Entry, error) {
+
+ db, bucket, shortPath, err := store.getTxOrDB(ctx, fullpath, false)
+ if err != nil {
+ return nil, fmt.Errorf("findDB %s : %v", fullpath, err)
+ }
+
+ dir, name := shortPath.DirAndName()
+ row := db.QueryRowContext(ctx, store.GetSqlFind(bucket), util.HashStringToLong(dir), name, dir)
+
+ var data []byte
+ if err := row.Scan(&data); err != nil {
+ if err == sql.ErrNoRows {
+ return nil, filer_pb.ErrNotFound
+ }
+ return nil, fmt.Errorf("find %s: %v", fullpath, err)
+ }
+
+ entry := &filer.Entry{
+ FullPath: fullpath,
+ }
+ if err := entry.DecodeAttributesAndChunks(util.MaybeDecompressData(data)); err != nil {
+ return entry, fmt.Errorf("decode %s : %v", entry.FullPath, err)
+ }
+
+ return entry, nil
+}
+
+func (store *AbstractSqlStore) DeleteEntry(ctx context.Context, fullpath util.FullPath) error {
+
+ db, bucket, shortPath, err := store.getTxOrDB(ctx, fullpath, false)
+ if err != nil {
+ return fmt.Errorf("findDB %s : %v", fullpath, err)
+ }
+
+ dir, name := shortPath.DirAndName()
+
+ res, err := db.ExecContext(ctx, store.GetSqlDelete(bucket), util.HashStringToLong(dir), name, dir)
+ if err != nil {
+ return fmt.Errorf("delete %s: %s", fullpath, err)
+ }
+
+ _, err = res.RowsAffected()
+ if err != nil {
+ return fmt.Errorf("delete %s but no rows affected: %s", fullpath, err)
+ }
+
+ return nil
+}
+
+func (store *AbstractSqlStore) DeleteFolderChildren(ctx context.Context, fullpath util.FullPath) error {
+
+ db, bucket, shortPath, err := store.getTxOrDB(ctx, fullpath, true)
+ if err != nil {
+ return fmt.Errorf("findDB %s : %v", fullpath, err)
+ }
+
+ if isValidBucket(bucket) && shortPath == "/" {
+ if err = store.deleteTable(ctx, bucket); err == nil {
+ store.dbsLock.Lock()
+ delete(store.dbs, bucket)
+ store.dbsLock.Unlock()
+ return nil
+ } else {
+ return err
+ }
+ }
+
+ glog.V(4).Infof("delete %s SQL %s %d", string(shortPath), store.GetSqlDeleteFolderChildren(bucket), util.HashStringToLong(string(shortPath)))
+
+ res, err := db.ExecContext(ctx, store.GetSqlDeleteFolderChildren(bucket), util.HashStringToLong(string(shortPath)), string(shortPath))
+ if err != nil {
+ return fmt.Errorf("deleteFolderChildren %s: %s", fullpath, err)
+ }
+
+ _, err = res.RowsAffected()
+ if err != nil {
+ return fmt.Errorf("deleteFolderChildren %s but no rows affected: %s", fullpath, err)
+ }
+
+ return nil
+}
+
+func (store *AbstractSqlStore) ListDirectoryPrefixedEntries(ctx context.Context, dirPath util.FullPath, startFileName string, includeStartFile bool, limit int64, prefix string, eachEntryFunc filer.ListEachEntryFunc) (lastFileName string, err error) {
+
+ db, bucket, shortPath, err := store.getTxOrDB(ctx, dirPath, true)
+ if err != nil {
+ return lastFileName, fmt.Errorf("findDB %s : %v", dirPath, err)
+ }
+
+ sqlText := store.GetSqlListExclusive(bucket)
+ if includeStartFile {
+ sqlText = store.GetSqlListInclusive(bucket)
+ }
+
+ rows, err := db.QueryContext(ctx, sqlText, util.HashStringToLong(string(shortPath)), startFileName, string(shortPath), prefix+"%", limit+1)
+ if err != nil {
+ return lastFileName, fmt.Errorf("list %s : %v", dirPath, err)
+ }
+ defer rows.Close()
+
+ for rows.Next() {
+ var name string
+ var data []byte
+ if err = rows.Scan(&name, &data); err != nil {
+ glog.V(0).Infof("scan %s : %v", dirPath, err)
+ return lastFileName, fmt.Errorf("scan %s: %v", dirPath, err)
+ }
+ lastFileName = name
+
+ entry := &filer.Entry{
+ FullPath: util.NewFullPath(string(dirPath), name),
+ }
+ if err = entry.DecodeAttributesAndChunks(util.MaybeDecompressData(data)); err != nil {
+ glog.V(0).Infof("scan decode %s : %v", entry.FullPath, err)
+ return lastFileName, fmt.Errorf("scan decode %s : %v", entry.FullPath, err)
+ }
+
+ if !eachEntryFunc(entry) {
+ break
+ }
+
+ }
+
+ return lastFileName, nil
+}
+
+func (store *AbstractSqlStore) ListDirectoryEntries(ctx context.Context, dirPath util.FullPath, startFileName string, includeStartFile bool, limit int64, eachEntryFunc filer.ListEachEntryFunc) (lastFileName string, err error) {
+ return store.ListDirectoryPrefixedEntries(ctx, dirPath, startFileName, includeStartFile, limit, "", nil)
+}
+
+func (store *AbstractSqlStore) Shutdown() {
+ store.DB.Close()
+}
+
+func isValidBucket(bucket string) bool {
+ return bucket != DEFAULT_TABLE && bucket != ""
+}
+
+func (store *AbstractSqlStore) CreateTable(ctx context.Context, bucket string) error {
+ if !store.SupportBucketTable {
+ return nil
+ }
+ _, err := store.DB.ExecContext(ctx, store.SqlGenerator.GetSqlCreateTable(bucket))
+ return err
+}
+
+func (store *AbstractSqlStore) deleteTable(ctx context.Context, bucket string) error {
+ if !store.SupportBucketTable {
+ return nil
+ }
+ _, err := store.DB.ExecContext(ctx, store.SqlGenerator.GetSqlDropTable(bucket))
+ return err
+}
diff --git a/weed/filer/abstract_sql/abstract_sql_store_kv.go b/weed/filer/abstract_sql/abstract_sql_store_kv.go
new file mode 100644
index 000000000..03b016c76
--- /dev/null
+++ b/weed/filer/abstract_sql/abstract_sql_store_kv.go
@@ -0,0 +1,105 @@
+package abstract_sql
+
+import (
+ "context"
+ "database/sql"
+ "encoding/base64"
+ "fmt"
+ "github.com/chrislusf/seaweedfs/weed/filer"
+ "github.com/chrislusf/seaweedfs/weed/glog"
+ "github.com/chrislusf/seaweedfs/weed/util"
+ "strings"
+)
+
+func (store *AbstractSqlStore) KvPut(ctx context.Context, key []byte, value []byte) (err error) {
+
+ db, _, _, err := store.getTxOrDB(ctx, "", false)
+ if err != nil {
+ return fmt.Errorf("findDB: %v", err)
+ }
+
+ dirStr, dirHash, name := genDirAndName(key)
+
+ res, err := db.ExecContext(ctx, store.GetSqlInsert(DEFAULT_TABLE), dirHash, name, dirStr, value)
+ if err == nil {
+ return
+ }
+
+ if !strings.Contains(strings.ToLower(err.Error()), "duplicate") {
+ // return fmt.Errorf("kv insert: %s", err)
+ // skip this since the error can be in a different language
+ }
+
+ // now the insert failed possibly due to duplication constraints
+ glog.V(1).Infof("kv insert falls back to update: %s", err)
+
+ res, err = db.ExecContext(ctx, store.GetSqlUpdate(DEFAULT_TABLE), value, dirHash, name, dirStr)
+ if err != nil {
+ return fmt.Errorf("kv upsert: %s", err)
+ }
+
+ _, err = res.RowsAffected()
+ if err != nil {
+ return fmt.Errorf("kv upsert no rows affected: %s", err)
+ }
+ return nil
+
+}
+
+func (store *AbstractSqlStore) KvGet(ctx context.Context, key []byte) (value []byte, err error) {
+
+ db, _, _, err := store.getTxOrDB(ctx, "", false)
+ if err != nil {
+ return nil, fmt.Errorf("findDB: %v", err)
+ }
+
+ dirStr, dirHash, name := genDirAndName(key)
+ row := db.QueryRowContext(ctx, store.GetSqlFind(DEFAULT_TABLE), dirHash, name, dirStr)
+
+ err = row.Scan(&value)
+
+ if err == sql.ErrNoRows {
+ return nil, filer.ErrKvNotFound
+ }
+
+ if err != nil {
+ return nil, fmt.Errorf("kv get: %v", err)
+ }
+
+ return
+}
+
+func (store *AbstractSqlStore) KvDelete(ctx context.Context, key []byte) (err error) {
+
+ db, _, _, err := store.getTxOrDB(ctx, "", false)
+ if err != nil {
+ return fmt.Errorf("findDB: %v", err)
+ }
+
+ dirStr, dirHash, name := genDirAndName(key)
+
+ res, err := db.ExecContext(ctx, store.GetSqlDelete(DEFAULT_TABLE), dirHash, name, dirStr)
+ if err != nil {
+ return fmt.Errorf("kv delete: %s", err)
+ }
+
+ _, err = res.RowsAffected()
+ if err != nil {
+ return fmt.Errorf("kv delete no rows affected: %s", err)
+ }
+
+ return nil
+
+}
+
+func genDirAndName(key []byte) (dirStr string, dirHash int64, name string) {
+ for len(key) < 8 {
+ key = append(key, 0)
+ }
+
+ dirHash = int64(util.BytesToUint64(key[:8]))
+ dirStr = base64.StdEncoding.EncodeToString(key[:8])
+ name = base64.StdEncoding.EncodeToString(key[8:])
+
+ return
+}
diff --git a/weed/filer2/cassandra/README.txt b/weed/filer/cassandra/README.txt
similarity index 100%
rename from weed/filer2/cassandra/README.txt
rename to weed/filer/cassandra/README.txt
diff --git a/weed/filer/cassandra/cassandra_store.go b/weed/filer/cassandra/cassandra_store.go
new file mode 100644
index 000000000..fd2ce91a6
--- /dev/null
+++ b/weed/filer/cassandra/cassandra_store.go
@@ -0,0 +1,212 @@
+package cassandra
+
+import (
+ "context"
+ "fmt"
+ "github.com/gocql/gocql"
+
+ "github.com/chrislusf/seaweedfs/weed/filer"
+ "github.com/chrislusf/seaweedfs/weed/glog"
+ "github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
+ "github.com/chrislusf/seaweedfs/weed/util"
+)
+
+func init() {
+ filer.Stores = append(filer.Stores, &CassandraStore{})
+}
+
+type CassandraStore struct {
+ cluster *gocql.ClusterConfig
+ session *gocql.Session
+ superLargeDirectoryHash map[string]string
+}
+
+func (store *CassandraStore) GetName() string {
+ return "cassandra"
+}
+
+func (store *CassandraStore) Initialize(configuration util.Configuration, prefix string) (err error) {
+ return store.initialize(
+ configuration.GetString(prefix+"keyspace"),
+ configuration.GetStringSlice(prefix+"hosts"),
+ configuration.GetString(prefix+"username"),
+ configuration.GetString(prefix+"password"),
+ configuration.GetStringSlice(prefix+"superLargeDirectories"),
+ )
+}
+
+func (store *CassandraStore) isSuperLargeDirectory(dir string) (dirHash string, isSuperLargeDirectory bool) {
+ dirHash, isSuperLargeDirectory = store.superLargeDirectoryHash[dir]
+ return
+}
+
+func (store *CassandraStore) initialize(keyspace string, hosts []string, username string, password string, superLargeDirectories []string) (err error) {
+ store.cluster = gocql.NewCluster(hosts...)
+ if username != "" && password != "" {
+ store.cluster.Authenticator = gocql.PasswordAuthenticator{Username: username, Password: password}
+ }
+ store.cluster.Keyspace = keyspace
+ store.cluster.Consistency = gocql.LocalQuorum
+ store.session, err = store.cluster.CreateSession()
+ if err != nil {
+ glog.V(0).Infof("Failed to open cassandra store, hosts %v, keyspace %s", hosts, keyspace)
+ }
+
+ // set directory hash
+ store.superLargeDirectoryHash = make(map[string]string)
+ existingHash := make(map[string]string)
+ for _, dir := range superLargeDirectories {
+ // adding dir hash to avoid duplicated names
+ dirHash := util.Md5String([]byte(dir))[:4]
+ store.superLargeDirectoryHash[dir] = dirHash
+ if existingDir, found := existingHash[dirHash]; found {
+ glog.Fatalf("directory %s has the same hash as %s", dir, existingDir)
+ }
+ existingHash[dirHash] = dir
+ }
+ return
+}
+
+func (store *CassandraStore) BeginTransaction(ctx context.Context) (context.Context, error) {
+ return ctx, nil
+}
+func (store *CassandraStore) CommitTransaction(ctx context.Context) error {
+ return nil
+}
+func (store *CassandraStore) RollbackTransaction(ctx context.Context) error {
+ return nil
+}
+
+func (store *CassandraStore) InsertEntry(ctx context.Context, entry *filer.Entry) (err error) {
+
+ dir, name := entry.FullPath.DirAndName()
+ if dirHash, ok := store.isSuperLargeDirectory(dir); ok {
+ dir, name = dirHash+name, ""
+ }
+
+ meta, err := entry.EncodeAttributesAndChunks()
+ if err != nil {
+ return fmt.Errorf("encode %s: %s", entry.FullPath, err)
+ }
+
+ if len(entry.Chunks) > 50 {
+ meta = util.MaybeGzipData(meta)
+ }
+
+ if err := store.session.Query(
+ "INSERT INTO filemeta (directory,name,meta) VALUES(?,?,?) USING TTL ? ",
+ dir, name, meta, entry.TtlSec).Exec(); err != nil {
+ return fmt.Errorf("insert %s: %s", entry.FullPath, err)
+ }
+
+ return nil
+}
+
+func (store *CassandraStore) UpdateEntry(ctx context.Context, entry *filer.Entry) (err error) {
+
+ return store.InsertEntry(ctx, entry)
+}
+
+func (store *CassandraStore) FindEntry(ctx context.Context, fullpath util.FullPath) (entry *filer.Entry, err error) {
+
+ dir, name := fullpath.DirAndName()
+ if dirHash, ok := store.isSuperLargeDirectory(dir); ok {
+ dir, name = dirHash+name, ""
+ }
+
+ var data []byte
+ if err := store.session.Query(
+ "SELECT meta FROM filemeta WHERE directory=? AND name=?",
+ dir, name).Consistency(gocql.One).Scan(&data); err != nil {
+ if err != gocql.ErrNotFound {
+ return nil, filer_pb.ErrNotFound
+ }
+ }
+
+ if len(data) == 0 {
+ return nil, filer_pb.ErrNotFound
+ }
+
+ entry = &filer.Entry{
+ FullPath: fullpath,
+ }
+ err = entry.DecodeAttributesAndChunks(util.MaybeDecompressData(data))
+ if err != nil {
+ return entry, fmt.Errorf("decode %s : %v", entry.FullPath, err)
+ }
+
+ return entry, nil
+}
+
+func (store *CassandraStore) DeleteEntry(ctx context.Context, fullpath util.FullPath) error {
+
+ dir, name := fullpath.DirAndName()
+ if dirHash, ok := store.isSuperLargeDirectory(dir); ok {
+ dir, name = dirHash+name, ""
+ }
+
+ if err := store.session.Query(
+ "DELETE FROM filemeta WHERE directory=? AND name=?",
+ dir, name).Exec(); err != nil {
+ return fmt.Errorf("delete %s : %v", fullpath, err)
+ }
+
+ return nil
+}
+
+func (store *CassandraStore) DeleteFolderChildren(ctx context.Context, fullpath util.FullPath) error {
+ if _, ok := store.isSuperLargeDirectory(string(fullpath)); ok {
+ return nil // filer.ErrUnsupportedSuperLargeDirectoryListing
+ }
+
+ if err := store.session.Query(
+ "DELETE FROM filemeta WHERE directory=?",
+ fullpath).Exec(); err != nil {
+ return fmt.Errorf("delete %s : %v", fullpath, err)
+ }
+
+ return nil
+}
+
+func (store *CassandraStore) ListDirectoryPrefixedEntries(ctx context.Context, dirPath util.FullPath, startFileName string, includeStartFile bool, limit int64, prefix string, eachEntryFunc filer.ListEachEntryFunc) (lastFileName string, err error) {
+ return lastFileName, filer.ErrUnsupportedListDirectoryPrefixed
+}
+
+func (store *CassandraStore) ListDirectoryEntries(ctx context.Context, dirPath util.FullPath, startFileName string, includeStartFile bool, limit int64, eachEntryFunc filer.ListEachEntryFunc) (lastFileName string, err error) {
+
+ if _, ok := store.isSuperLargeDirectory(string(dirPath)); ok {
+ return // nil, filer.ErrUnsupportedSuperLargeDirectoryListing
+ }
+
+ cqlStr := "SELECT NAME, meta FROM filemeta WHERE directory=? AND name>? ORDER BY NAME ASC LIMIT ?"
+ if includeStartFile {
+ cqlStr = "SELECT NAME, meta FROM filemeta WHERE directory=? AND name>=? ORDER BY NAME ASC LIMIT ?"
+ }
+
+ var data []byte
+ var name string
+ iter := store.session.Query(cqlStr, string(dirPath), startFileName, limit+1).Iter()
+ for iter.Scan(&name, &data) {
+ entry := &filer.Entry{
+ FullPath: util.NewFullPath(string(dirPath), name),
+ }
+ lastFileName = name
+ if decodeErr := entry.DecodeAttributesAndChunks(util.MaybeDecompressData(data)); decodeErr != nil {
+ err = decodeErr
+ glog.V(0).Infof("list %s : %v", entry.FullPath, err)
+ break
+ }
+ if !eachEntryFunc(entry) {
+ break
+ }
+ }
+ if err := iter.Close(); err != nil {
+ glog.V(0).Infof("list iterator close: %v", err)
+ }
+
+ return lastFileName, err
+}
+
+func (store *CassandraStore) Shutdown() {
+ store.session.Close()
+}
diff --git a/weed/filer/cassandra/cassandra_store_kv.go b/weed/filer/cassandra/cassandra_store_kv.go
new file mode 100644
index 000000000..dafa9bb15
--- /dev/null
+++ b/weed/filer/cassandra/cassandra_store_kv.go
@@ -0,0 +1,62 @@
+package cassandra
+
+import (
+ "context"
+ "encoding/base64"
+ "fmt"
+ "github.com/chrislusf/seaweedfs/weed/filer"
+ "github.com/gocql/gocql"
+)
+
+func (store *CassandraStore) KvPut(ctx context.Context, key []byte, value []byte) (err error) {
+ dir, name := genDirAndName(key)
+
+ if err := store.session.Query(
+ "INSERT INTO filemeta (directory,name,meta) VALUES(?,?,?) USING TTL ? ",
+ dir, name, value, 0).Exec(); err != nil {
+ return fmt.Errorf("kv insert: %s", err)
+ }
+
+ return nil
+}
+
+func (store *CassandraStore) KvGet(ctx context.Context, key []byte) (data []byte, err error) {
+ dir, name := genDirAndName(key)
+
+ if err := store.session.Query(
+ "SELECT meta FROM filemeta WHERE directory=? AND name=?",
+ dir, name).Consistency(gocql.One).Scan(&data); err != nil {
+ if err != gocql.ErrNotFound {
+ return nil, filer.ErrKvNotFound
+ }
+ }
+
+ if len(data) == 0 {
+ return nil, filer.ErrKvNotFound
+ }
+
+ return data, nil
+}
+
+func (store *CassandraStore) KvDelete(ctx context.Context, key []byte) (err error) {
+ dir, name := genDirAndName(key)
+
+ if err := store.session.Query(
+ "DELETE FROM filemeta WHERE directory=? AND name=?",
+ dir, name).Exec(); err != nil {
+ return fmt.Errorf("kv delete: %v", err)
+ }
+
+ return nil
+}
+
+func genDirAndName(key []byte) (dir string, name string) {
+ for len(key) < 8 {
+ key = append(key, 0)
+ }
+
+ dir = base64.StdEncoding.EncodeToString(key[:8])
+ name = base64.StdEncoding.EncodeToString(key[8:])
+
+ return
+}
diff --git a/weed/filer/configuration.go b/weed/filer/configuration.go
new file mode 100644
index 000000000..9ef2f3e0f
--- /dev/null
+++ b/weed/filer/configuration.go
@@ -0,0 +1,93 @@
+package filer
+
+import (
+ "github.com/chrislusf/seaweedfs/weed/glog"
+ "github.com/chrislusf/seaweedfs/weed/util"
+ "os"
+ "reflect"
+ "strings"
+)
+
+var (
+ Stores []FilerStore
+)
+
+func (f *Filer) LoadConfiguration(config *util.ViperProxy) {
+
+ validateOneEnabledStore(config)
+
+ // load configuration for default filer store
+ hasDefaultStoreConfigured := false
+ for _, store := range Stores {
+ if config.GetBool(store.GetName() + ".enabled") {
+ store = reflect.New(reflect.ValueOf(store).Elem().Type()).Interface().(FilerStore)
+ if err := store.Initialize(config, store.GetName()+"."); err != nil {
+ glog.Fatalf("failed to initialize store for %s: %+v", store.GetName(), err)
+ }
+ f.SetStore(store)
+ glog.V(0).Infof("configured filer store to %s", store.GetName())
+ hasDefaultStoreConfigured = true
+ break
+ }
+ }
+
+ if !hasDefaultStoreConfigured {
+ println()
+ println("Supported filer stores are:")
+ for _, store := range Stores {
+ println(" " + store.GetName())
+ }
+ os.Exit(-1)
+ }
+
+ // load path-specific filer store here
+ // f.Store.AddPathSpecificStore(path, store)
+ storeNames := make(map[string]FilerStore)
+ for _, store := range Stores {
+ storeNames[store.GetName()] = store
+ }
+ allKeys := config.AllKeys()
+ for _, key := range allKeys {
+ if !strings.HasSuffix(key, ".enabled") {
+ continue
+ }
+ key = key[:len(key)-len(".enabled")]
+ if !strings.Contains(key, ".") {
+ continue
+ }
+
+ parts := strings.Split(key, ".")
+ storeName, storeId := parts[0], parts[1]
+
+ store, found := storeNames[storeName]
+ if !found {
+ continue
+ }
+ store = reflect.New(reflect.ValueOf(store).Elem().Type()).Interface().(FilerStore)
+ if err := store.Initialize(config, key+"."); err != nil {
+ glog.Fatalf("Failed to initialize store for %s: %+v", key, err)
+ }
+ location := config.GetString(key + ".location")
+ if location == "" {
+ glog.Errorf("path-specific filer store needs %s", key+".location")
+ os.Exit(-1)
+ }
+ f.Store.AddPathSpecificStore(location, storeId, store)
+
+ glog.V(0).Infof("configure filer %s for %s", store.GetName(), location)
+ }
+
+}
+
+func validateOneEnabledStore(config *util.ViperProxy) {
+ enabledStore := ""
+ for _, store := range Stores {
+ if config.GetBool(store.GetName() + ".enabled") {
+ if enabledStore == "" {
+ enabledStore = store.GetName()
+ } else {
+ glog.Fatalf("Filer store is enabled for both %s and %s", enabledStore, store.GetName())
+ }
+ }
+ }
+}
diff --git a/weed/filer/elastic/v7/elastic_store.go b/weed/filer/elastic/v7/elastic_store.go
new file mode 100644
index 000000000..a16e5ebca
--- /dev/null
+++ b/weed/filer/elastic/v7/elastic_store.go
@@ -0,0 +1,307 @@
+package elastic
+
+import (
+ "context"
+ "fmt"
+ "math"
+ "strings"
+
+ "github.com/chrislusf/seaweedfs/weed/filer"
+ "github.com/chrislusf/seaweedfs/weed/glog"
+ "github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
+ weed_util "github.com/chrislusf/seaweedfs/weed/util"
+ jsoniter "github.com/json-iterator/go"
+ elastic "github.com/olivere/elastic/v7"
+)
+
+var (
+ indexType = "_doc"
+ indexPrefix = ".seaweedfs_"
+ indexKV = ".seaweedfs_kv_entries"
+ kvMappings = ` {
+ "mappings": {
+ "enabled": false,
+ "properties": {
+ "Value":{
+ "type": "binary"
+ }
+ }
+ }
+ }`
+)
+
+type ESEntry struct {
+ ParentId string `json:"ParentId"`
+ Entry *filer.Entry
+}
+
+type ESKVEntry struct {
+ Value []byte `json:"Value"`
+}
+
+func init() {
+ filer.Stores = append(filer.Stores, &ElasticStore{})
+}
+
+type ElasticStore struct {
+ client *elastic.Client
+ maxPageSize int
+}
+
+func (store *ElasticStore) GetName() string {
+ return "elastic7"
+}
+
+func (store *ElasticStore) Initialize(configuration weed_util.Configuration, prefix string) (err error) {
+ options := []elastic.ClientOptionFunc{}
+ servers := configuration.GetStringSlice(prefix + "servers")
+ options = append(options, elastic.SetURL(servers...))
+ username := configuration.GetString(prefix + "username")
+ password := configuration.GetString(prefix + "password")
+ if username != "" && password != "" {
+ options = append(options, elastic.SetBasicAuth(username, password))
+ }
+ options = append(options, elastic.SetSniff(configuration.GetBool(prefix+"sniff_enabled")))
+ options = append(options, elastic.SetHealthcheck(configuration.GetBool(prefix+"healthcheck_enabled")))
+ store.maxPageSize = configuration.GetInt(prefix + "index.max_result_window")
+ if store.maxPageSize <= 0 {
+ store.maxPageSize = 10000
+ }
+ glog.Infof("filer store elastic endpoints: %v.", servers)
+ return store.initialize(options)
+}
+
+func (store *ElasticStore) initialize(options []elastic.ClientOptionFunc) (err error) {
+ ctx := context.Background()
+ store.client, err = elastic.NewClient(options...)
+ if err != nil {
+ return fmt.Errorf("init elastic %v.", err)
+ }
+ if ok, err := store.client.IndexExists(indexKV).Do(ctx); err == nil && !ok {
+ _, err = store.client.CreateIndex(indexKV).Body(kvMappings).Do(ctx)
+ if err != nil {
+ return fmt.Errorf("create index(%s) %v.", indexKV, err)
+ }
+ }
+ return nil
+}
+
+func (store *ElasticStore) BeginTransaction(ctx context.Context) (context.Context, error) {
+ return ctx, nil
+}
+func (store *ElasticStore) CommitTransaction(ctx context.Context) error {
+ return nil
+}
+func (store *ElasticStore) RollbackTransaction(ctx context.Context) error {
+ return nil
+}
+
+func (store *ElasticStore) ListDirectoryPrefixedEntries(ctx context.Context, dirPath weed_util.FullPath, startFileName string, includeStartFile bool, limit int64, prefix string, eachEntryFunc filer.ListEachEntryFunc) (lastFileName string, err error) {
+ return lastFileName, filer.ErrUnsupportedListDirectoryPrefixed
+}
+
+func (store *ElasticStore) InsertEntry(ctx context.Context, entry *filer.Entry) (err error) {
+ index := getIndex(entry.FullPath, false)
+ dir, _ := entry.FullPath.DirAndName()
+ id := weed_util.Md5String([]byte(entry.FullPath))
+ esEntry := &ESEntry{
+ ParentId: weed_util.Md5String([]byte(dir)),
+ Entry: entry,
+ }
+ value, err := jsoniter.Marshal(esEntry)
+ if err != nil {
+ glog.Errorf("insert entry(%s) %v.", string(entry.FullPath), err)
+ return fmt.Errorf("insert entry %v.", err)
+ }
+ _, err = store.client.Index().
+ Index(index).
+ Type(indexType).
+ Id(id).
+ BodyJson(string(value)).
+ Do(ctx)
+ if err != nil {
+ glog.Errorf("insert entry(%s) %v.", string(entry.FullPath), err)
+ return fmt.Errorf("insert entry %v.", err)
+ }
+ return nil
+}
+
+func (store *ElasticStore) UpdateEntry(ctx context.Context, entry *filer.Entry) (err error) {
+ return store.InsertEntry(ctx, entry)
+}
+
+func (store *ElasticStore) FindEntry(ctx context.Context, fullpath weed_util.FullPath) (entry *filer.Entry, err error) {
+ index := getIndex(fullpath, false)
+ id := weed_util.Md5String([]byte(fullpath))
+ searchResult, err := store.client.Get().
+ Index(index).
+ Type(indexType).
+ Id(id).
+ Do(ctx)
+ if elastic.IsNotFound(err) {
+ return nil, filer_pb.ErrNotFound
+ }
+ if searchResult != nil && searchResult.Found {
+ esEntry := &ESEntry{
+ ParentId: "",
+ Entry: &filer.Entry{},
+ }
+ err := jsoniter.Unmarshal(searchResult.Source, esEntry)
+ return esEntry.Entry, err
+ }
+ glog.Errorf("find entry(%s),%v.", string(fullpath), err)
+ return nil, filer_pb.ErrNotFound
+}
+
+func (store *ElasticStore) DeleteEntry(ctx context.Context, fullpath weed_util.FullPath) (err error) {
+ index := getIndex(fullpath, false)
+ id := weed_util.Md5String([]byte(fullpath))
+ if strings.Count(string(fullpath), "/") == 1 {
+ return store.deleteIndex(ctx, index)
+ }
+ return store.deleteEntry(ctx, index, id)
+}
+
+func (store *ElasticStore) deleteIndex(ctx context.Context, index string) (err error) {
+ deleteResult, err := store.client.DeleteIndex(index).Do(ctx)
+ if elastic.IsNotFound(err) || (err == nil && deleteResult.Acknowledged) {
+ return nil
+ }
+ glog.Errorf("delete index(%s) %v.", index, err)
+ return err
+}
+
+func (store *ElasticStore) deleteEntry(ctx context.Context, index, id string) (err error) {
+ deleteResult, err := store.client.Delete().
+ Index(index).
+ Type(indexType).
+ Id(id).
+ Do(ctx)
+ if err == nil {
+ if deleteResult.Result == "deleted" || deleteResult.Result == "not_found" {
+ return nil
+ }
+ }
+ glog.Errorf("delete entry(index:%s,_id:%s) %v.", index, id, err)
+ return fmt.Errorf("delete entry %v.", err)
+}
+
+func (store *ElasticStore) DeleteFolderChildren(ctx context.Context, fullpath weed_util.FullPath) (err error) {
+ _, err = store.ListDirectoryEntries(ctx, fullpath, "", false, math.MaxInt32, func(entry *filer.Entry) bool {
+ if err := store.DeleteEntry(ctx, entry.FullPath); err != nil {
+ glog.Errorf("elastic delete %s: %v.", entry.FullPath, err)
+ return false
+ }
+ return true
+ })
+ return
+}
+
+func (store *ElasticStore) ListDirectoryEntries(ctx context.Context, dirPath weed_util.FullPath, startFileName string, includeStartFile bool, limit int64, eachEntryFunc filer.ListEachEntryFunc) (lastFileName string, err error) {
+ return store.listDirectoryEntries(ctx, dirPath, startFileName, includeStartFile, limit, eachEntryFunc)
+}
+
+func (store *ElasticStore) listDirectoryEntries(
+ ctx context.Context, fullpath weed_util.FullPath, startFileName string, inclusive bool, limit int64, eachEntryFunc filer.ListEachEntryFunc) (lastFileName string, err error) {
+ first := true
+ index := getIndex(fullpath, true)
+ nextStart := ""
+ parentId := weed_util.Md5String([]byte(fullpath))
+ if _, err = store.client.Refresh(index).Do(ctx); err != nil {
+ if elastic.IsNotFound(err) {
+ store.client.CreateIndex(index).Do(ctx)
+ return
+ }
+ }
+ for {
+ result := &elastic.SearchResult{}
+ if (startFileName == "" && first) || inclusive {
+ if result, err = store.search(ctx, index, parentId); err != nil {
+ glog.Errorf("search (%s,%s,%t,%d) %v.", string(fullpath), startFileName, inclusive, limit, err)
+ return
+ }
+ } else {
+ fullPath := string(fullpath) + "/" + startFileName
+ if !first {
+ fullPath = nextStart
+ }
+ after := weed_util.Md5String([]byte(fullPath))
+ if result, err = store.searchAfter(ctx, index, parentId, after); err != nil {
+ glog.Errorf("searchAfter (%s,%s,%t,%d) %v.", string(fullpath), startFileName, inclusive, limit, err)
+ return
+ }
+ }
+ first = false
+ for _, hit := range result.Hits.Hits {
+ esEntry := &ESEntry{
+ ParentId: "",
+ Entry: &filer.Entry{},
+ }
+ if err := jsoniter.Unmarshal(hit.Source, esEntry); err == nil {
+ limit--
+ if limit < 0 {
+ return lastFileName, nil
+ }
+ nextStart = string(esEntry.Entry.FullPath)
+ fileName := esEntry.Entry.FullPath.Name()
+ if fileName == startFileName && !inclusive {
+ continue
+ }
+ if !eachEntryFunc(esEntry.Entry) {
+ break
+ }
+ lastFileName = fileName
+ }
+ }
+ if len(result.Hits.Hits) < store.maxPageSize {
+ break
+ }
+ }
+ return
+}
+
+func (store *ElasticStore) search(ctx context.Context, index, parentId string) (result *elastic.SearchResult, err error) {
+ if count, err := store.client.Count(index).Do(ctx); err == nil && count == 0 {
+ return &elastic.SearchResult{
+ Hits: &elastic.SearchHits{
+ Hits: make([]*elastic.SearchHit, 0)},
+ }, nil
+ }
+ queryResult, err := store.client.Search().
+ Index(index).
+ Query(elastic.NewMatchQuery("ParentId", parentId)).
+ Size(store.maxPageSize).
+ Sort("_id", false).
+ Do(ctx)
+ return queryResult, err
+}
+
+func (store *ElasticStore) searchAfter(ctx context.Context, index, parentId, after string) (result *elastic.SearchResult, err error) {
+ queryResult, err := store.client.Search().
+ Index(index).
+ Query(elastic.NewMatchQuery("ParentId", parentId)).
+ SearchAfter(after).
+ Size(store.maxPageSize).
+ Sort("_id", false).
+ Do(ctx)
+ return queryResult, err
+
+}
+
+func (store *ElasticStore) Shutdown() {
+ store.client.Stop()
+}
+
+func getIndex(fullpath weed_util.FullPath, isDirectory bool) string {
+ path := strings.Split(string(fullpath), "/")
+ if isDirectory && len(path) >= 2 {
+ return indexPrefix + strings.ToLower(path[1])
+ }
+ if len(path) > 2 {
+ return indexPrefix + strings.ToLower(path[1])
+ }
+ if len(path) == 2 {
+ return indexPrefix
+ }
+ return ""
+}
diff --git a/weed/filer/elastic/v7/elastic_store_kv.go b/weed/filer/elastic/v7/elastic_store_kv.go
new file mode 100644
index 000000000..99c03314e
--- /dev/null
+++ b/weed/filer/elastic/v7/elastic_store_kv.go
@@ -0,0 +1,65 @@
+package elastic
+
+import (
+ "context"
+ "fmt"
+
+ "github.com/chrislusf/seaweedfs/weed/filer"
+
+ "github.com/chrislusf/seaweedfs/weed/glog"
+ jsoniter "github.com/json-iterator/go"
+ elastic "github.com/olivere/elastic/v7"
+)
+
+func (store *ElasticStore) KvDelete(ctx context.Context, key []byte) (err error) {
+ deleteResult, err := store.client.Delete().
+ Index(indexKV).
+ Type(indexType).
+ Id(string(key)).
+ Do(ctx)
+ if err == nil {
+ if deleteResult.Result == "deleted" || deleteResult.Result == "not_found" {
+ return nil
+ }
+ }
+ glog.Errorf("delete key(id:%s) %v.", string(key), err)
+ return fmt.Errorf("delete key %v.", err)
+}
+
+func (store *ElasticStore) KvGet(ctx context.Context, key []byte) (value []byte, err error) {
+ searchResult, err := store.client.Get().
+ Index(indexKV).
+ Type(indexType).
+ Id(string(key)).
+ Do(ctx)
+ if elastic.IsNotFound(err) {
+ return value, filer.ErrKvNotFound
+ }
+ if searchResult != nil && searchResult.Found {
+ esEntry := &ESKVEntry{}
+ if err := jsoniter.Unmarshal(searchResult.Source, esEntry); err == nil {
+ return esEntry.Value, nil
+ }
+ }
+ glog.Errorf("find key(%s),%v.", string(key), err)
+ return value, filer.ErrKvNotFound
+}
+
+func (store *ElasticStore) KvPut(ctx context.Context, key []byte, value []byte) (err error) {
+ esEntry := &ESKVEntry{value}
+ val, err := jsoniter.Marshal(esEntry)
+ if err != nil {
+ glog.Errorf("insert key(%s) %v.", string(key), err)
+ return fmt.Errorf("insert key %v.", err)
+ }
+ _, err = store.client.Index().
+ Index(indexKV).
+ Type(indexType).
+ Id(string(key)).
+ BodyJson(string(val)).
+ Do(ctx)
+ if err != nil {
+ return fmt.Errorf("kv put: %v", err)
+ }
+ return nil
+}
diff --git a/weed/filer/entry.go b/weed/filer/entry.go
new file mode 100644
index 000000000..b7c8370e6
--- /dev/null
+++ b/weed/filer/entry.go
@@ -0,0 +1,113 @@
+package filer
+
+import (
+ "os"
+ "time"
+
+ "github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
+ "github.com/chrislusf/seaweedfs/weed/util"
+)
+
+type Attr struct {
+ Mtime time.Time // time of last modification
+ Crtime time.Time // time of creation (OS X only)
+ Mode os.FileMode // file mode
+ Uid uint32 // owner uid
+ Gid uint32 // group gid
+ Mime string // mime type
+ Replication string // replication
+ Collection string // collection name
+ TtlSec int32 // ttl in seconds
+ DiskType string
+ UserName string
+ GroupNames []string
+ SymlinkTarget string
+ Md5 []byte
+ FileSize uint64
+}
+
+func (attr Attr) IsDirectory() bool {
+ return attr.Mode&os.ModeDir > 0
+}
+
+type Entry struct {
+ util.FullPath
+
+ Attr
+ Extended map[string][]byte
+
+ // the following is for files
+ Chunks []*filer_pb.FileChunk `json:"chunks,omitempty"`
+
+ HardLinkId HardLinkId
+ HardLinkCounter int32
+ Content []byte
+}
+
+func (entry *Entry) Size() uint64 {
+ return maxUint64(maxUint64(TotalSize(entry.Chunks), entry.FileSize), uint64(len(entry.Content)))
+}
+
+func (entry *Entry) Timestamp() time.Time {
+ if entry.IsDirectory() {
+ return entry.Crtime
+ } else {
+ return entry.Mtime
+ }
+}
+
+func (entry *Entry) ToProtoEntry() *filer_pb.Entry {
+ if entry == nil {
+ return nil
+ }
+ return &filer_pb.Entry{
+ Name: entry.FullPath.Name(),
+ IsDirectory: entry.IsDirectory(),
+ Attributes: EntryAttributeToPb(entry),
+ Chunks: entry.Chunks,
+ Extended: entry.Extended,
+ HardLinkId: entry.HardLinkId,
+ HardLinkCounter: entry.HardLinkCounter,
+ Content: entry.Content,
+ }
+}
+
+func (entry *Entry) ToProtoFullEntry() *filer_pb.FullEntry {
+ if entry == nil {
+ return nil
+ }
+ dir, _ := entry.FullPath.DirAndName()
+ return &filer_pb.FullEntry{
+ Dir: dir,
+ Entry: entry.ToProtoEntry(),
+ }
+}
+
+func (entry *Entry) Clone() *Entry {
+ return &Entry{
+ FullPath: entry.FullPath,
+ Attr: entry.Attr,
+ Chunks: entry.Chunks,
+ Extended: entry.Extended,
+ HardLinkId: entry.HardLinkId,
+ HardLinkCounter: entry.HardLinkCounter,
+ }
+}
+
+func FromPbEntry(dir string, entry *filer_pb.Entry) *Entry {
+ return &Entry{
+ FullPath: util.NewFullPath(dir, entry.Name),
+ Attr: PbToEntryAttribute(entry.Attributes),
+ Chunks: entry.Chunks,
+ HardLinkId: HardLinkId(entry.HardLinkId),
+ HardLinkCounter: entry.HardLinkCounter,
+ Content: entry.Content,
+ }
+}
+
+func maxUint64(x, y uint64) uint64 {
+ if x > y {
+ return x
+ }
+ return y
+}
diff --git a/weed/filer2/entry_codec.go b/weed/filer/entry_codec.go
similarity index 73%
rename from weed/filer2/entry_codec.go
rename to weed/filer/entry_codec.go
index 3a2dc6134..4c613f068 100644
--- a/weed/filer2/entry_codec.go
+++ b/weed/filer/entry_codec.go
@@ -1,4 +1,4 @@
-package filer2
+package filer
import (
"bytes"
@@ -13,9 +13,12 @@ import (
func (entry *Entry) EncodeAttributesAndChunks() ([]byte, error) {
message := &filer_pb.Entry{
- Attributes: EntryAttributeToPb(entry),
- Chunks: entry.Chunks,
- Extended: entry.Extended,
+ Attributes: EntryAttributeToPb(entry),
+ Chunks: entry.Chunks,
+ Extended: entry.Extended,
+ HardLinkId: entry.HardLinkId,
+ HardLinkCounter: entry.HardLinkCounter,
+ Content: entry.Content,
}
return proto.Marshal(message)
}
@@ -34,6 +37,10 @@ func (entry *Entry) DecodeAttributesAndChunks(blob []byte) error {
entry.Chunks = message.Chunks
+ entry.HardLinkId = message.HardLinkId
+ entry.HardLinkCounter = message.HardLinkCounter
+ entry.Content = message.Content
+
return nil
}
@@ -49,9 +56,12 @@ func EntryAttributeToPb(entry *Entry) *filer_pb.FuseAttributes {
Collection: entry.Attr.Collection,
Replication: entry.Attr.Replication,
TtlSec: entry.Attr.TtlSec,
+ DiskType: entry.Attr.DiskType,
UserName: entry.Attr.UserName,
GroupName: entry.Attr.GroupNames,
SymlinkTarget: entry.Attr.SymlinkTarget,
+ Md5: entry.Attr.Md5,
+ FileSize: entry.Attr.FileSize,
}
}
@@ -59,6 +69,10 @@ func PbToEntryAttribute(attr *filer_pb.FuseAttributes) Attr {
t := Attr{}
+ if attr == nil {
+ return t
+ }
+
t.Crtime = time.Unix(attr.Crtime, 0)
t.Mtime = time.Unix(attr.Mtime, 0)
t.Mode = os.FileMode(attr.FileMode)
@@ -68,9 +82,12 @@ func PbToEntryAttribute(attr *filer_pb.FuseAttributes) Attr {
t.Collection = attr.Collection
t.Replication = attr.Replication
t.TtlSec = attr.TtlSec
+ t.DiskType = attr.DiskType
t.UserName = attr.UserName
t.GroupNames = attr.GroupName
t.SymlinkTarget = attr.SymlinkTarget
+ t.Md5 = attr.Md5
+ t.FileSize = attr.FileSize
return t
}
@@ -93,11 +110,25 @@ func EqualEntry(a, b *Entry) bool {
return false
}
+ if !bytes.Equal(a.Md5, b.Md5) {
+ return false
+ }
+
for i := 0; i < len(a.Chunks); i++ {
if !proto.Equal(a.Chunks[i], b.Chunks[i]) {
return false
}
}
+
+ if !bytes.Equal(a.HardLinkId, b.HardLinkId) {
+ return false
+ }
+ if a.HardLinkCounter != b.HardLinkCounter {
+ return false
+ }
+ if !bytes.Equal(a.Content, b.Content) {
+ return false
+ }
return true
}
diff --git a/weed/filer2/etcd/etcd_store.go b/weed/filer/etcd/etcd_store.go
similarity index 60%
rename from weed/filer2/etcd/etcd_store.go
rename to weed/filer/etcd/etcd_store.go
index 2eb9e3e86..71ed738f9 100644
--- a/weed/filer2/etcd/etcd_store.go
+++ b/weed/filer/etcd/etcd_store.go
@@ -1,15 +1,18 @@
package etcd
import (
+ "bytes"
"context"
"fmt"
"strings"
"time"
- "github.com/chrislusf/seaweedfs/weed/filer2"
+ "go.etcd.io/etcd/clientv3"
+
+ "github.com/chrislusf/seaweedfs/weed/filer"
"github.com/chrislusf/seaweedfs/weed/glog"
+ "github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
weed_util "github.com/chrislusf/seaweedfs/weed/util"
- "go.etcd.io/etcd/clientv3"
)
const (
@@ -17,7 +20,7 @@ const (
)
func init() {
- filer2.Stores = append(filer2.Stores, &EtcdStore{})
+ filer.Stores = append(filer.Stores, &EtcdStore{})
}
type EtcdStore struct {
@@ -28,13 +31,13 @@ func (store *EtcdStore) GetName() string {
return "etcd"
}
-func (store *EtcdStore) Initialize(configuration weed_util.Configuration) (err error) {
- servers := configuration.GetString("servers")
+func (store *EtcdStore) Initialize(configuration weed_util.Configuration, prefix string) (err error) {
+ servers := configuration.GetString(prefix + "servers")
if servers == "" {
servers = "localhost:2379"
}
- timeout := configuration.GetString("timeout")
+ timeout := configuration.GetString(prefix + "timeout")
if timeout == "" {
timeout = "3s"
}
@@ -71,41 +74,45 @@ func (store *EtcdStore) RollbackTransaction(ctx context.Context) error {
return nil
}
-func (store *EtcdStore) InsertEntry(ctx context.Context, entry *filer2.Entry) (err error) {
+func (store *EtcdStore) InsertEntry(ctx context.Context, entry *filer.Entry) (err error) {
key := genKey(entry.DirAndName())
- value, err := entry.EncodeAttributesAndChunks()
+ meta, err := entry.EncodeAttributesAndChunks()
if err != nil {
return fmt.Errorf("encoding %s %+v: %v", entry.FullPath, entry.Attr, err)
}
- if _, err := store.client.Put(ctx, string(key), string(value)); err != nil {
+ if len(entry.Chunks) > 50 {
+ meta = weed_util.MaybeGzipData(meta)
+ }
+
+ if _, err := store.client.Put(ctx, string(key), string(meta)); err != nil {
return fmt.Errorf("persisting %s : %v", entry.FullPath, err)
}
return nil
}
-func (store *EtcdStore) UpdateEntry(ctx context.Context, entry *filer2.Entry) (err error) {
+func (store *EtcdStore) UpdateEntry(ctx context.Context, entry *filer.Entry) (err error) {
return store.InsertEntry(ctx, entry)
}
-func (store *EtcdStore) FindEntry(ctx context.Context, fullpath filer2.FullPath) (entry *filer2.Entry, err error) {
+func (store *EtcdStore) FindEntry(ctx context.Context, fullpath weed_util.FullPath) (entry *filer.Entry, err error) {
key := genKey(fullpath.DirAndName())
resp, err := store.client.Get(ctx, string(key))
if err != nil {
- return nil, fmt.Errorf("get %s : %v", entry.FullPath, err)
+ return nil, fmt.Errorf("get %s : %v", fullpath, err)
}
if len(resp.Kvs) == 0 {
- return nil, filer2.ErrNotFound
+ return nil, filer_pb.ErrNotFound
}
- entry = &filer2.Entry{
+ entry = &filer.Entry{
FullPath: fullpath,
}
- err = entry.DecodeAttributesAndChunks(resp.Kvs[0].Value)
+ err = entry.DecodeAttributesAndChunks(weed_util.MaybeDecompressData(resp.Kvs[0].Value))
if err != nil {
return entry, fmt.Errorf("decode %s : %v", entry.FullPath, err)
}
@@ -113,7 +120,7 @@ func (store *EtcdStore) FindEntry(ctx context.Context, fullpath filer2.FullPath)
return entry, nil
}
-func (store *EtcdStore) DeleteEntry(ctx context.Context, fullpath filer2.FullPath) (err error) {
+func (store *EtcdStore) DeleteEntry(ctx context.Context, fullpath weed_util.FullPath) (err error) {
key := genKey(fullpath.DirAndName())
if _, err := store.client.Delete(ctx, string(key)); err != nil {
@@ -123,7 +130,7 @@ func (store *EtcdStore) DeleteEntry(ctx context.Context, fullpath filer2.FullPat
return nil
}
-func (store *EtcdStore) DeleteFolderChildren(ctx context.Context, fullpath filer2.FullPath) (err error) {
+func (store *EtcdStore) DeleteFolderChildren(ctx context.Context, fullpath weed_util.FullPath) (err error) {
directoryPrefix := genDirectoryKeyPrefix(fullpath, "")
if _, err := store.client.Delete(ctx, string(directoryPrefix), clientv3.WithPrefix()); err != nil {
@@ -133,41 +140,53 @@ func (store *EtcdStore) DeleteFolderChildren(ctx context.Context, fullpath filer
return nil
}
-func (store *EtcdStore) ListDirectoryEntries(
- ctx context.Context, fullpath filer2.FullPath, startFileName string, inclusive bool, limit int,
-) (entries []*filer2.Entry, err error) {
- directoryPrefix := genDirectoryKeyPrefix(fullpath, "")
+func (store *EtcdStore) ListDirectoryPrefixedEntries(ctx context.Context, dirPath weed_util.FullPath, startFileName string, includeStartFile bool, limit int64, prefix string, eachEntryFunc filer.ListEachEntryFunc) (lastFileName string, err error) {
+ return lastFileName, filer.ErrUnsupportedListDirectoryPrefixed
+}
- resp, err := store.client.Get(ctx, string(directoryPrefix),
+func (store *EtcdStore) ListDirectoryEntries(ctx context.Context, dirPath weed_util.FullPath, startFileName string, includeStartFile bool, limit int64, eachEntryFunc filer.ListEachEntryFunc) (lastFileName string, err error) {
+ directoryPrefix := genDirectoryKeyPrefix(dirPath, "")
+ lastFileStart := directoryPrefix
+ if startFileName != "" {
+ lastFileStart = genDirectoryKeyPrefix(dirPath, startFileName)
+ }
+
+ resp, err := store.client.Get(ctx, string(lastFileStart),
clientv3.WithPrefix(), clientv3.WithSort(clientv3.SortByKey, clientv3.SortDescend))
if err != nil {
- return nil, fmt.Errorf("list %s : %v", fullpath, err)
+ return lastFileName, fmt.Errorf("list %s : %v", dirPath, err)
}
for _, kv := range resp.Kvs {
+ if !bytes.HasPrefix(kv.Key, directoryPrefix) {
+ break
+ }
fileName := getNameFromKey(kv.Key)
if fileName == "" {
continue
}
- if fileName == startFileName && !inclusive {
+ if fileName == startFileName && !includeStartFile {
continue
}
limit--
if limit < 0 {
break
}
- entry := &filer2.Entry{
- FullPath: filer2.NewFullPath(string(fullpath), fileName),
+ entry := &filer.Entry{
+ FullPath: weed_util.NewFullPath(string(dirPath), fileName),
}
- if decodeErr := entry.DecodeAttributesAndChunks(kv.Value); decodeErr != nil {
+ if decodeErr := entry.DecodeAttributesAndChunks(weed_util.MaybeDecompressData(kv.Value)); decodeErr != nil {
err = decodeErr
glog.V(0).Infof("list %s : %v", entry.FullPath, err)
break
}
- entries = append(entries, entry)
+ if !eachEntryFunc(entry) {
+ break
+ }
+ lastFileName = fileName
}
- return entries, err
+ return lastFileName, err
}
func genKey(dirPath, fileName string) (key []byte) {
@@ -177,7 +196,7 @@ func genKey(dirPath, fileName string) (key []byte) {
return key
}
-func genDirectoryKeyPrefix(fullpath filer2.FullPath, startFileName string) (keyPrefix []byte) {
+func genDirectoryKeyPrefix(fullpath weed_util.FullPath, startFileName string) (keyPrefix []byte) {
keyPrefix = []byte(string(fullpath))
keyPrefix = append(keyPrefix, DIR_FILE_SEPARATOR)
if len(startFileName) > 0 {
@@ -194,3 +213,7 @@ func getNameFromKey(key []byte) string {
return string(key[sepIndex+1:])
}
+
+func (store *EtcdStore) Shutdown() {
+ store.client.Close()
+}
diff --git a/weed/filer/etcd/etcd_store_kv.go b/weed/filer/etcd/etcd_store_kv.go
new file mode 100644
index 000000000..df252f46c
--- /dev/null
+++ b/weed/filer/etcd/etcd_store_kv.go
@@ -0,0 +1,44 @@
+package etcd
+
+import (
+ "context"
+ "fmt"
+ "github.com/chrislusf/seaweedfs/weed/filer"
+)
+
+func (store *EtcdStore) KvPut(ctx context.Context, key []byte, value []byte) (err error) {
+
+ _, err = store.client.Put(ctx, string(key), string(value))
+
+ if err != nil {
+ return fmt.Errorf("kv put: %v", err)
+ }
+
+ return nil
+}
+
+func (store *EtcdStore) KvGet(ctx context.Context, key []byte) (value []byte, err error) {
+
+ resp, err := store.client.Get(ctx, string(key))
+
+ if err != nil {
+ return nil, fmt.Errorf("kv get: %v", err)
+ }
+
+ if len(resp.Kvs) == 0 {
+ return nil, filer.ErrKvNotFound
+ }
+
+ return resp.Kvs[0].Value, nil
+}
+
+func (store *EtcdStore) KvDelete(ctx context.Context, key []byte) (err error) {
+
+ _, err = store.client.Delete(ctx, string(key))
+
+ if err != nil {
+ return fmt.Errorf("kv delete: %v", err)
+ }
+
+ return nil
+}
diff --git a/weed/filer/filechunk_manifest.go b/weed/filer/filechunk_manifest.go
new file mode 100644
index 000000000..c709dc819
--- /dev/null
+++ b/weed/filer/filechunk_manifest.go
@@ -0,0 +1,194 @@
+package filer
+
+import (
+ "bytes"
+ "fmt"
+ "github.com/chrislusf/seaweedfs/weed/wdclient"
+ "io"
+ "math"
+ "time"
+
+ "github.com/golang/protobuf/proto"
+
+ "github.com/chrislusf/seaweedfs/weed/glog"
+ "github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
+ "github.com/chrislusf/seaweedfs/weed/util"
+)
+
+const (
+ ManifestBatch = 1000
+)
+
+func HasChunkManifest(chunks []*filer_pb.FileChunk) bool {
+ for _, chunk := range chunks {
+ if chunk.IsChunkManifest {
+ return true
+ }
+ }
+ return false
+}
+
+func SeparateManifestChunks(chunks []*filer_pb.FileChunk) (manifestChunks, nonManifestChunks []*filer_pb.FileChunk) {
+ for _, c := range chunks {
+ if c.IsChunkManifest {
+ manifestChunks = append(manifestChunks, c)
+ } else {
+ nonManifestChunks = append(nonManifestChunks, c)
+ }
+ }
+ return
+}
+
+func ResolveChunkManifest(lookupFileIdFn wdclient.LookupFileIdFunctionType, chunks []*filer_pb.FileChunk) (dataChunks, manifestChunks []*filer_pb.FileChunk, manifestResolveErr error) {
+ // TODO maybe parallel this
+ for _, chunk := range chunks {
+ if !chunk.IsChunkManifest {
+ dataChunks = append(dataChunks, chunk)
+ continue
+ }
+
+ resolvedChunks, err := ResolveOneChunkManifest(lookupFileIdFn, chunk)
+ if err != nil {
+ return chunks, nil, err
+ }
+
+ manifestChunks = append(manifestChunks, chunk)
+ // recursive
+ dchunks, mchunks, subErr := ResolveChunkManifest(lookupFileIdFn, resolvedChunks)
+ if subErr != nil {
+ return chunks, nil, subErr
+ }
+ dataChunks = append(dataChunks, dchunks...)
+ manifestChunks = append(manifestChunks, mchunks...)
+ }
+ return
+}
+
+func ResolveOneChunkManifest(lookupFileIdFn wdclient.LookupFileIdFunctionType, chunk *filer_pb.FileChunk) (dataChunks []*filer_pb.FileChunk, manifestResolveErr error) {
+ if !chunk.IsChunkManifest {
+ return
+ }
+
+ // IsChunkManifest
+ data, err := fetchChunk(lookupFileIdFn, chunk.GetFileIdString(), chunk.CipherKey, chunk.IsCompressed)
+ if err != nil {
+ return nil, fmt.Errorf("fail to read manifest %s: %v", chunk.GetFileIdString(), err)
+ }
+ m := &filer_pb.FileChunkManifest{}
+ if err := proto.Unmarshal(data, m); err != nil {
+ return nil, fmt.Errorf("fail to unmarshal manifest %s: %v", chunk.GetFileIdString(), err)
+ }
+
+ // recursive
+ filer_pb.AfterEntryDeserialization(m.Chunks)
+ return m.Chunks, nil
+}
+
+// TODO fetch from cache for weed mount?
+func fetchChunk(lookupFileIdFn wdclient.LookupFileIdFunctionType, fileId string, cipherKey []byte, isGzipped bool) ([]byte, error) {
+ urlStrings, err := lookupFileIdFn(fileId)
+ if err != nil {
+ glog.Errorf("operation LookupFileId %s failed, err: %v", fileId, err)
+ return nil, err
+ }
+ return retriedFetchChunkData(urlStrings, cipherKey, isGzipped, true, 0, 0)
+}
+
+func retriedFetchChunkData(urlStrings []string, cipherKey []byte, isGzipped bool, isFullChunk bool, offset int64, size int) ([]byte, error) {
+
+ var err error
+ var shouldRetry bool
+ receivedData := make([]byte, 0, size)
+
+ for waitTime := time.Second; waitTime < util.RetryWaitTime; waitTime += waitTime / 2 {
+ for _, urlString := range urlStrings {
+ receivedData = receivedData[:0]
+ shouldRetry, err = util.ReadUrlAsStream(urlString+"?readDeleted=true", cipherKey, isGzipped, isFullChunk, offset, size, func(data []byte) {
+ receivedData = append(receivedData, data...)
+ })
+ if !shouldRetry {
+ break
+ }
+ if err != nil {
+ glog.V(0).Infof("read %s failed, err: %v", urlString, err)
+ } else {
+ break
+ }
+ }
+ if err != nil && shouldRetry {
+ glog.V(0).Infof("retry reading in %v", waitTime)
+ time.Sleep(waitTime)
+ } else {
+ break
+ }
+ }
+
+ return receivedData, err
+
+}
+
+func MaybeManifestize(saveFunc SaveDataAsChunkFunctionType, inputChunks []*filer_pb.FileChunk) (chunks []*filer_pb.FileChunk, err error) {
+ return doMaybeManifestize(saveFunc, inputChunks, ManifestBatch, mergeIntoManifest)
+}
+
+func doMaybeManifestize(saveFunc SaveDataAsChunkFunctionType, inputChunks []*filer_pb.FileChunk, mergeFactor int, mergefn func(saveFunc SaveDataAsChunkFunctionType, dataChunks []*filer_pb.FileChunk) (manifestChunk *filer_pb.FileChunk, err error)) (chunks []*filer_pb.FileChunk, err error) {
+
+ var dataChunks []*filer_pb.FileChunk
+ for _, chunk := range inputChunks {
+ if !chunk.IsChunkManifest {
+ dataChunks = append(dataChunks, chunk)
+ } else {
+ chunks = append(chunks, chunk)
+ }
+ }
+
+ remaining := len(dataChunks)
+ for i := 0; i+mergeFactor <= len(dataChunks); i += mergeFactor {
+ chunk, err := mergefn(saveFunc, dataChunks[i:i+mergeFactor])
+ if err != nil {
+ return dataChunks, err
+ }
+ chunks = append(chunks, chunk)
+ remaining -= mergeFactor
+ }
+ // remaining
+ for i := len(dataChunks) - remaining; i < len(dataChunks); i++ {
+ chunks = append(chunks, dataChunks[i])
+ }
+ return
+}
+
+func mergeIntoManifest(saveFunc SaveDataAsChunkFunctionType, dataChunks []*filer_pb.FileChunk) (manifestChunk *filer_pb.FileChunk, err error) {
+
+ filer_pb.BeforeEntrySerialization(dataChunks)
+
+ // create and serialize the manifest
+ data, serErr := proto.Marshal(&filer_pb.FileChunkManifest{
+ Chunks: dataChunks,
+ })
+ if serErr != nil {
+ return nil, fmt.Errorf("serializing manifest: %v", serErr)
+ }
+
+ minOffset, maxOffset := int64(math.MaxInt64), int64(math.MinInt64)
+ for _, chunk := range dataChunks {
+ if minOffset > int64(chunk.Offset) {
+ minOffset = chunk.Offset
+ }
+ if maxOffset < int64(chunk.Size)+chunk.Offset {
+ maxOffset = int64(chunk.Size) + chunk.Offset
+ }
+ }
+
+ manifestChunk, _, _, err = saveFunc(bytes.NewReader(data), "", 0)
+ if err != nil {
+ return nil, err
+ }
+ manifestChunk.IsChunkManifest = true
+ manifestChunk.Offset = minOffset
+ manifestChunk.Size = uint64(maxOffset - minOffset)
+
+ return
+}
+
+type SaveDataAsChunkFunctionType func(reader io.Reader, name string, offset int64) (chunk *filer_pb.FileChunk, collection, replication string, err error)
diff --git a/weed/filer/filechunk_manifest_test.go b/weed/filer/filechunk_manifest_test.go
new file mode 100644
index 000000000..ce12c5da6
--- /dev/null
+++ b/weed/filer/filechunk_manifest_test.go
@@ -0,0 +1,113 @@
+package filer
+
+import (
+ "bytes"
+ "math"
+ "testing"
+
+ "github.com/stretchr/testify/assert"
+
+ "github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
+)
+
+func TestDoMaybeManifestize(t *testing.T) {
+ var manifestTests = []struct {
+ inputs []*filer_pb.FileChunk
+ expected []*filer_pb.FileChunk
+ }{
+ {
+ inputs: []*filer_pb.FileChunk{
+ {FileId: "1", IsChunkManifest: false},
+ {FileId: "2", IsChunkManifest: false},
+ {FileId: "3", IsChunkManifest: false},
+ {FileId: "4", IsChunkManifest: false},
+ },
+ expected: []*filer_pb.FileChunk{
+ {FileId: "12", IsChunkManifest: true},
+ {FileId: "34", IsChunkManifest: true},
+ },
+ },
+ {
+ inputs: []*filer_pb.FileChunk{
+ {FileId: "1", IsChunkManifest: true},
+ {FileId: "2", IsChunkManifest: false},
+ {FileId: "3", IsChunkManifest: false},
+ {FileId: "4", IsChunkManifest: false},
+ },
+ expected: []*filer_pb.FileChunk{
+ {FileId: "1", IsChunkManifest: true},
+ {FileId: "23", IsChunkManifest: true},
+ {FileId: "4", IsChunkManifest: false},
+ },
+ },
+ {
+ inputs: []*filer_pb.FileChunk{
+ {FileId: "1", IsChunkManifest: false},
+ {FileId: "2", IsChunkManifest: true},
+ {FileId: "3", IsChunkManifest: false},
+ {FileId: "4", IsChunkManifest: false},
+ },
+ expected: []*filer_pb.FileChunk{
+ {FileId: "2", IsChunkManifest: true},
+ {FileId: "13", IsChunkManifest: true},
+ {FileId: "4", IsChunkManifest: false},
+ },
+ },
+ {
+ inputs: []*filer_pb.FileChunk{
+ {FileId: "1", IsChunkManifest: true},
+ {FileId: "2", IsChunkManifest: true},
+ {FileId: "3", IsChunkManifest: false},
+ {FileId: "4", IsChunkManifest: false},
+ },
+ expected: []*filer_pb.FileChunk{
+ {FileId: "1", IsChunkManifest: true},
+ {FileId: "2", IsChunkManifest: true},
+ {FileId: "34", IsChunkManifest: true},
+ },
+ },
+ }
+
+ for i, mtest := range manifestTests {
+ println("test", i)
+ actual, _ := doMaybeManifestize(nil, mtest.inputs, 2, mockMerge)
+ assertEqualChunks(t, mtest.expected, actual)
+ }
+
+}
+
+func assertEqualChunks(t *testing.T, expected, actual []*filer_pb.FileChunk) {
+ assert.Equal(t, len(expected), len(actual))
+ for i := 0; i < len(actual); i++ {
+ assertEqualChunk(t, actual[i], expected[i])
+ }
+}
+func assertEqualChunk(t *testing.T, expected, actual *filer_pb.FileChunk) {
+ assert.Equal(t, expected.FileId, actual.FileId)
+ assert.Equal(t, expected.IsChunkManifest, actual.IsChunkManifest)
+}
+
+func mockMerge(saveFunc SaveDataAsChunkFunctionType, dataChunks []*filer_pb.FileChunk) (manifestChunk *filer_pb.FileChunk, err error) {
+
+ var buf bytes.Buffer
+ minOffset, maxOffset := int64(math.MaxInt64), int64(math.MinInt64)
+ for k := 0; k < len(dataChunks); k++ {
+ chunk := dataChunks[k]
+ buf.WriteString(chunk.FileId)
+ if minOffset > int64(chunk.Offset) {
+ minOffset = chunk.Offset
+ }
+ if maxOffset < int64(chunk.Size)+chunk.Offset {
+ maxOffset = int64(chunk.Size) + chunk.Offset
+ }
+ }
+
+ manifestChunk = &filer_pb.FileChunk{
+ FileId: buf.String(),
+ }
+ manifestChunk.IsChunkManifest = true
+ manifestChunk.Offset = minOffset
+ manifestChunk.Size = uint64(maxOffset - minOffset)
+
+ return
+}
diff --git a/weed/filer/filechunks.go b/weed/filer/filechunks.go
new file mode 100644
index 000000000..68f308a51
--- /dev/null
+++ b/weed/filer/filechunks.go
@@ -0,0 +1,292 @@
+package filer
+
+import (
+ "bytes"
+ "encoding/hex"
+ "fmt"
+ "github.com/chrislusf/seaweedfs/weed/wdclient"
+ "math"
+ "sort"
+ "sync"
+
+ "github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
+ "github.com/chrislusf/seaweedfs/weed/util"
+)
+
+func TotalSize(chunks []*filer_pb.FileChunk) (size uint64) {
+ for _, c := range chunks {
+ t := uint64(c.Offset + int64(c.Size))
+ if size < t {
+ size = t
+ }
+ }
+ return
+}
+
+func FileSize(entry *filer_pb.Entry) (size uint64) {
+ return maxUint64(TotalSize(entry.Chunks), entry.Attributes.FileSize)
+}
+
+func ETag(entry *filer_pb.Entry) (etag string) {
+ if entry.Attributes == nil || entry.Attributes.Md5 == nil {
+ return ETagChunks(entry.Chunks)
+ }
+ return fmt.Sprintf("%x", entry.Attributes.Md5)
+}
+
+func ETagEntry(entry *Entry) (etag string) {
+ if entry.Attr.Md5 == nil {
+ return ETagChunks(entry.Chunks)
+ }
+ return fmt.Sprintf("%x", entry.Attr.Md5)
+}
+
+func ETagChunks(chunks []*filer_pb.FileChunk) (etag string) {
+ if len(chunks) == 1 {
+ return chunks[0].ETag
+ }
+ md5_digests := [][]byte{}
+ for _, c := range chunks {
+ md5_decoded, _ := hex.DecodeString(c.ETag)
+ md5_digests = append(md5_digests, md5_decoded)
+ }
+ return fmt.Sprintf("%x-%d", util.Md5(bytes.Join(md5_digests, nil)), len(chunks))
+}
+
+func CompactFileChunks(lookupFileIdFn wdclient.LookupFileIdFunctionType, chunks []*filer_pb.FileChunk) (compacted, garbage []*filer_pb.FileChunk) {
+
+ visibles, _ := NonOverlappingVisibleIntervals(lookupFileIdFn, chunks)
+
+ fileIds := make(map[string]bool)
+ for _, interval := range visibles {
+ fileIds[interval.fileId] = true
+ }
+ for _, chunk := range chunks {
+ if _, found := fileIds[chunk.GetFileIdString()]; found {
+ compacted = append(compacted, chunk)
+ } else {
+ garbage = append(garbage, chunk)
+ }
+ }
+
+ return
+}
+
+func MinusChunks(lookupFileIdFn wdclient.LookupFileIdFunctionType, as, bs []*filer_pb.FileChunk) (delta []*filer_pb.FileChunk, err error) {
+
+ aData, aMeta, aErr := ResolveChunkManifest(lookupFileIdFn, as)
+ if aErr != nil {
+ return nil, aErr
+ }
+ bData, bMeta, bErr := ResolveChunkManifest(lookupFileIdFn, bs)
+ if bErr != nil {
+ return nil, bErr
+ }
+
+ delta = append(delta, DoMinusChunks(aData, bData)...)
+ delta = append(delta, DoMinusChunks(aMeta, bMeta)...)
+ return
+}
+
+func DoMinusChunks(as, bs []*filer_pb.FileChunk) (delta []*filer_pb.FileChunk) {
+
+ fileIds := make(map[string]bool)
+ for _, interval := range bs {
+ fileIds[interval.GetFileIdString()] = true
+ }
+ for _, chunk := range as {
+ if _, found := fileIds[chunk.GetFileIdString()]; !found {
+ delta = append(delta, chunk)
+ }
+ }
+
+ return
+}
+
+type ChunkView struct {
+ FileId string
+ Offset int64
+ Size uint64
+ LogicOffset int64 // actual offset in the file, for the data specified via [offset, offset+size) in current chunk
+ ChunkSize uint64
+ CipherKey []byte
+ IsGzipped bool
+}
+
+func (cv *ChunkView) IsFullChunk() bool {
+ return cv.Size == cv.ChunkSize
+}
+
+func ViewFromChunks(lookupFileIdFn wdclient.LookupFileIdFunctionType, chunks []*filer_pb.FileChunk, offset int64, size int64) (views []*ChunkView) {
+
+ visibles, _ := NonOverlappingVisibleIntervals(lookupFileIdFn, chunks)
+
+ return ViewFromVisibleIntervals(visibles, offset, size)
+
+}
+
+func ViewFromVisibleIntervals(visibles []VisibleInterval, offset int64, size int64) (views []*ChunkView) {
+
+ stop := offset + size
+ if size == math.MaxInt64 {
+ stop = math.MaxInt64
+ }
+ if stop < offset {
+ stop = math.MaxInt64
+ }
+
+ for _, chunk := range visibles {
+
+ chunkStart, chunkStop := max(offset, chunk.start), min(stop, chunk.stop)
+
+ if chunkStart < chunkStop {
+ views = append(views, &ChunkView{
+ FileId: chunk.fileId,
+ Offset: chunkStart - chunk.start + chunk.chunkOffset,
+ Size: uint64(chunkStop - chunkStart),
+ LogicOffset: chunkStart,
+ ChunkSize: chunk.chunkSize,
+ CipherKey: chunk.cipherKey,
+ IsGzipped: chunk.isGzipped,
+ })
+ }
+ }
+
+ return views
+
+}
+
+func logPrintf(name string, visibles []VisibleInterval) {
+
+ /*
+ glog.V(0).Infof("%s len %d", name, len(visibles))
+ for _, v := range visibles {
+ glog.V(0).Infof("%s: [%d,%d) %s %d", name, v.start, v.stop, v.fileId, v.chunkOffset)
+ }
+ */
+}
+
+var bufPool = sync.Pool{
+ New: func() interface{} {
+ return new(VisibleInterval)
+ },
+}
+
+func MergeIntoVisibles(visibles []VisibleInterval, chunk *filer_pb.FileChunk) (newVisibles []VisibleInterval) {
+
+ newV := newVisibleInterval(chunk.Offset, chunk.Offset+int64(chunk.Size), chunk.GetFileIdString(), chunk.Mtime, 0, chunk.Size, chunk.CipherKey, chunk.IsCompressed)
+
+ length := len(visibles)
+ if length == 0 {
+ return append(visibles, newV)
+ }
+ last := visibles[length-1]
+ if last.stop <= chunk.Offset {
+ return append(visibles, newV)
+ }
+
+ logPrintf(" before", visibles)
+ // glog.V(0).Infof("newVisibles %d adding chunk [%d,%d) %s size:%d", len(newVisibles), chunk.Offset, chunk.Offset+int64(chunk.Size), chunk.GetFileIdString(), chunk.Size)
+ chunkStop := chunk.Offset + int64(chunk.Size)
+ for _, v := range visibles {
+ if v.start < chunk.Offset && chunk.Offset < v.stop {
+ t := newVisibleInterval(v.start, chunk.Offset, v.fileId, v.modifiedTime, v.chunkOffset, v.chunkSize, v.cipherKey, v.isGzipped)
+ newVisibles = append(newVisibles, t)
+ // glog.V(0).Infof("visible %d [%d,%d) =1> [%d,%d)", i, v.start, v.stop, t.start, t.stop)
+ }
+ if v.start < chunkStop && chunkStop < v.stop {
+ t := newVisibleInterval(chunkStop, v.stop, v.fileId, v.modifiedTime, v.chunkOffset+(chunkStop-v.start), v.chunkSize, v.cipherKey, v.isGzipped)
+ newVisibles = append(newVisibles, t)
+ // glog.V(0).Infof("visible %d [%d,%d) =2> [%d,%d)", i, v.start, v.stop, t.start, t.stop)
+ }
+ if chunkStop <= v.start || v.stop <= chunk.Offset {
+ newVisibles = append(newVisibles, v)
+ // glog.V(0).Infof("visible %d [%d,%d) =3> [%d,%d)", i, v.start, v.stop, v.start, v.stop)
+ }
+ }
+ newVisibles = append(newVisibles, newV)
+
+ logPrintf(" append", newVisibles)
+
+ for i := len(newVisibles) - 1; i >= 0; i-- {
+ if i > 0 && newV.start < newVisibles[i-1].start {
+ newVisibles[i] = newVisibles[i-1]
+ } else {
+ newVisibles[i] = newV
+ break
+ }
+ }
+ logPrintf(" sorted", newVisibles)
+
+ return newVisibles
+}
+
+// NonOverlappingVisibleIntervals translates the file chunk into VisibleInterval in memory
+// If the file chunk content is a chunk manifest
+func NonOverlappingVisibleIntervals(lookupFileIdFn wdclient.LookupFileIdFunctionType, chunks []*filer_pb.FileChunk) (visibles []VisibleInterval, err error) {
+
+ chunks, _, err = ResolveChunkManifest(lookupFileIdFn, chunks)
+
+ sort.Slice(chunks, func(i, j int) bool {
+ if chunks[i].Mtime == chunks[j].Mtime {
+ filer_pb.EnsureFid(chunks[i])
+ filer_pb.EnsureFid(chunks[j])
+ if chunks[i].Fid == nil || chunks[j].Fid == nil {
+ return true
+ }
+ return chunks[i].Fid.FileKey < chunks[j].Fid.FileKey
+ }
+ return chunks[i].Mtime < chunks[j].Mtime // keep this to make tests run
+ })
+
+ for _, chunk := range chunks {
+
+ // glog.V(0).Infof("merge [%d,%d)", chunk.Offset, chunk.Offset+int64(chunk.Size))
+ visibles = MergeIntoVisibles(visibles, chunk)
+
+ logPrintf("add", visibles)
+
+ }
+
+ return
+}
+
+// find non-overlapping visible intervals
+// visible interval map to one file chunk
+
+type VisibleInterval struct {
+ start int64
+ stop int64
+ modifiedTime int64
+ fileId string
+ chunkOffset int64
+ chunkSize uint64
+ cipherKey []byte
+ isGzipped bool
+}
+
+func newVisibleInterval(start, stop int64, fileId string, modifiedTime int64, chunkOffset int64, chunkSize uint64, cipherKey []byte, isGzipped bool) VisibleInterval {
+ return VisibleInterval{
+ start: start,
+ stop: stop,
+ fileId: fileId,
+ modifiedTime: modifiedTime,
+ chunkOffset: chunkOffset, // the starting position in the chunk
+ chunkSize: chunkSize,
+ cipherKey: cipherKey,
+ isGzipped: isGzipped,
+ }
+}
+
+func min(x, y int64) int64 {
+ if x <= y {
+ return x
+ }
+ return y
+}
+func max(x, y int64) int64 {
+ if x <= y {
+ return y
+ }
+ return x
+}
diff --git a/weed/filer/filechunks2_test.go b/weed/filer/filechunks2_test.go
new file mode 100644
index 000000000..9f9566d9b
--- /dev/null
+++ b/weed/filer/filechunks2_test.go
@@ -0,0 +1,46 @@
+package filer
+
+import (
+ "sort"
+ "testing"
+
+ "github.com/chrislusf/seaweedfs/weed/glog"
+ "github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
+)
+
+func TestCompactFileChunksRealCase(t *testing.T) {
+
+ chunks := []*filer_pb.FileChunk{
+ {FileId: "2,512f31f2c0700a", Offset: 0, Size: 25 - 0, Mtime: 5320497},
+ {FileId: "6,512f2c2e24e9e8", Offset: 868352, Size: 917585 - 868352, Mtime: 5320492},
+ {FileId: "7,514468dd5954ca", Offset: 884736, Size: 901120 - 884736, Mtime: 5325928},
+ {FileId: "5,5144463173fe77", Offset: 917504, Size: 2297856 - 917504, Mtime: 5325894},
+ {FileId: "4,51444c7ab54e2d", Offset: 2301952, Size: 2367488 - 2301952, Mtime: 5325900},
+ {FileId: "4,514450e643ad22", Offset: 2371584, Size: 2420736 - 2371584, Mtime: 5325904},
+ {FileId: "6,514456a5e9e4d7", Offset: 2449408, Size: 2490368 - 2449408, Mtime: 5325910},
+ {FileId: "3,51444f8d53eebe", Offset: 2494464, Size: 2555904 - 2494464, Mtime: 5325903},
+ {FileId: "4,5144578b097c7e", Offset: 2560000, Size: 2596864 - 2560000, Mtime: 5325911},
+ {FileId: "3,51445500b6b4ac", Offset: 2637824, Size: 2678784 - 2637824, Mtime: 5325909},
+ {FileId: "1,51446285e52a61", Offset: 2695168, Size: 2715648 - 2695168, Mtime: 5325922},
+ }
+
+ printChunks("before", chunks)
+
+ compacted, garbage := CompactFileChunks(nil, chunks)
+
+ printChunks("compacted", compacted)
+ printChunks("garbage", garbage)
+
+}
+
+func printChunks(name string, chunks []*filer_pb.FileChunk) {
+ sort.Slice(chunks, func(i, j int) bool {
+ if chunks[i].Offset == chunks[j].Offset {
+ return chunks[i].Mtime < chunks[j].Mtime
+ }
+ return chunks[i].Offset < chunks[j].Offset
+ })
+ for _, chunk := range chunks {
+ glog.V(0).Infof("%s chunk %s [%10d,%10d)", name, chunk.GetFileIdString(), chunk.Offset, chunk.Offset+int64(chunk.Size))
+ }
+}
diff --git a/weed/filer2/filechunks_test.go b/weed/filer/filechunks_test.go
similarity index 62%
rename from weed/filer2/filechunks_test.go
rename to weed/filer/filechunks_test.go
index e75e60753..699e7e298 100644
--- a/weed/filer2/filechunks_test.go
+++ b/weed/filer/filechunks_test.go
@@ -1,10 +1,15 @@
-package filer2
+package filer
import (
+ "fmt"
"log"
+ "math"
+ "math/rand"
+ "strconv"
"testing"
- "fmt"
+ "github.com/stretchr/testify/assert"
+
"github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
)
@@ -16,7 +21,7 @@ func TestCompactFileChunks(t *testing.T) {
{Offset: 110, Size: 200, FileId: "jkl", Mtime: 300},
}
- compacted, garbage := CompactFileChunks(chunks)
+ compacted, garbage := CompactFileChunks(nil, chunks)
if len(compacted) != 3 {
t.Fatalf("unexpected compacted: %d", len(compacted))
@@ -49,7 +54,7 @@ func TestCompactFileChunks2(t *testing.T) {
})
}
- compacted, garbage := CompactFileChunks(chunks)
+ compacted, garbage := CompactFileChunks(nil, chunks)
if len(compacted) != 4 {
t.Fatalf("unexpected compacted: %d", len(compacted))
@@ -59,6 +64,42 @@ func TestCompactFileChunks2(t *testing.T) {
}
}
+func TestRandomFileChunksCompact(t *testing.T) {
+
+ data := make([]byte, 1024)
+
+ var chunks []*filer_pb.FileChunk
+ for i := 0; i < 15; i++ {
+ start, stop := rand.Intn(len(data)), rand.Intn(len(data))
+ if start > stop {
+ start, stop = stop, start
+ }
+ if start+16 < stop {
+ stop = start + 16
+ }
+ chunk := &filer_pb.FileChunk{
+ FileId: strconv.Itoa(i),
+ Offset: int64(start),
+ Size: uint64(stop - start),
+ Mtime: int64(i),
+ Fid: &filer_pb.FileId{FileKey: uint64(i)},
+ }
+ chunks = append(chunks, chunk)
+ for x := start; x < stop; x++ {
+ data[x] = byte(i)
+ }
+ }
+
+ visibles, _ := NonOverlappingVisibleIntervals(nil, chunks)
+
+ for _, v := range visibles {
+ for x := v.start; x < v.stop; x++ {
+ assert.Equal(t, strconv.Itoa(int(data[x])), v.fileId)
+ }
+ }
+
+}
+
func TestIntervalMerging(t *testing.T) {
testcases := []struct {
@@ -91,12 +132,12 @@ func TestIntervalMerging(t *testing.T) {
// case 2: updates overwrite part of previous chunks
{
Chunks: []*filer_pb.FileChunk{
- {Offset: 0, Size: 100, FileId: "abc", Mtime: 123},
- {Offset: 0, Size: 50, FileId: "asdf", Mtime: 134},
+ {Offset: 0, Size: 100, FileId: "a", Mtime: 123},
+ {Offset: 0, Size: 70, FileId: "b", Mtime: 134},
},
Expected: []*VisibleInterval{
- {start: 0, stop: 50, fileId: "asdf"},
- {start: 50, stop: 100, fileId: "abc"},
+ {start: 0, stop: 70, fileId: "b"},
+ {start: 70, stop: 100, fileId: "a", chunkOffset: 70},
},
},
// case 3: updates overwrite full chunks
@@ -126,25 +167,25 @@ func TestIntervalMerging(t *testing.T) {
// case 5: updates overwrite full chunks
{
Chunks: []*filer_pb.FileChunk{
- {Offset: 0, Size: 100, FileId: "abc", Mtime: 123},
- {Offset: 0, Size: 200, FileId: "asdf", Mtime: 184},
- {Offset: 70, Size: 150, FileId: "abc", Mtime: 143},
- {Offset: 80, Size: 100, FileId: "xxxx", Mtime: 134},
+ {Offset: 0, Size: 100, FileId: "a", Mtime: 123},
+ {Offset: 0, Size: 200, FileId: "d", Mtime: 184},
+ {Offset: 70, Size: 150, FileId: "c", Mtime: 143},
+ {Offset: 80, Size: 100, FileId: "b", Mtime: 134},
},
Expected: []*VisibleInterval{
- {start: 0, stop: 200, fileId: "asdf"},
- {start: 200, stop: 220, fileId: "abc"},
+ {start: 0, stop: 200, fileId: "d"},
+ {start: 200, stop: 220, fileId: "c", chunkOffset: 130},
},
},
// case 6: same updates
{
Chunks: []*filer_pb.FileChunk{
- {Offset: 0, Size: 100, FileId: "abc", Mtime: 123},
- {Offset: 0, Size: 100, FileId: "abc", Mtime: 123},
- {Offset: 0, Size: 100, FileId: "abc", Mtime: 123},
+ {Offset: 0, Size: 100, FileId: "abc", Fid: &filer_pb.FileId{FileKey: 1}, Mtime: 123},
+ {Offset: 0, Size: 100, FileId: "axf", Fid: &filer_pb.FileId{FileKey: 2}, Mtime: 123},
+ {Offset: 0, Size: 100, FileId: "xyz", Fid: &filer_pb.FileId{FileKey: 3}, Mtime: 123},
},
Expected: []*VisibleInterval{
- {start: 0, stop: 100, fileId: "abc"},
+ {start: 0, stop: 100, fileId: "xyz"},
},
},
// case 7: real updates
@@ -186,7 +227,7 @@ func TestIntervalMerging(t *testing.T) {
for i, testcase := range testcases {
log.Printf("++++++++++ merged test case %d ++++++++++++++++++++", i)
- intervals := NonOverlappingVisibleIntervals(testcase.Chunks)
+ intervals, _ := NonOverlappingVisibleIntervals(nil, testcase.Chunks)
for x, interval := range intervals {
log.Printf("test case %d, interval %d, start=%d, stop=%d, fileId=%s",
i, x, interval.start, interval.stop, interval.fileId)
@@ -204,6 +245,10 @@ func TestIntervalMerging(t *testing.T) {
t.Fatalf("failed on test case %d, interval %d, chunkId %s, expect %s",
i, x, interval.fileId, testcase.Expected[x].fileId)
}
+ if interval.chunkOffset != testcase.Expected[x].chunkOffset {
+ t.Fatalf("failed on test case %d, interval %d, chunkOffset %d, expect %d",
+ i, x, interval.chunkOffset, testcase.Expected[x].chunkOffset)
+ }
}
if len(intervals) != len(testcase.Expected) {
t.Fatalf("failed to compact test case %d, len %d expected %d", i, len(intervals), len(testcase.Expected))
@@ -218,7 +263,7 @@ func TestChunksReading(t *testing.T) {
testcases := []struct {
Chunks []*filer_pb.FileChunk
Offset int64
- Size int
+ Size int64
Expected []*ChunkView
}{
// case 0: normal
@@ -251,14 +296,14 @@ func TestChunksReading(t *testing.T) {
// case 2: updates overwrite part of previous chunks
{
Chunks: []*filer_pb.FileChunk{
- {Offset: 0, Size: 100, FileId: "abc", Mtime: 123},
- {Offset: 0, Size: 50, FileId: "asdf", Mtime: 134},
+ {Offset: 3, Size: 100, FileId: "a", Mtime: 123},
+ {Offset: 10, Size: 50, FileId: "b", Mtime: 134},
},
- Offset: 25,
- Size: 50,
+ Offset: 30,
+ Size: 40,
Expected: []*ChunkView{
- {Offset: 25, Size: 25, FileId: "asdf", LogicOffset: 25},
- {Offset: 0, Size: 25, FileId: "abc", LogicOffset: 50},
+ {Offset: 20, Size: 30, FileId: "b", LogicOffset: 30},
+ {Offset: 57, Size: 10, FileId: "a", LogicOffset: 60},
},
},
// case 3: updates overwrite full chunks
@@ -286,35 +331,35 @@ func TestChunksReading(t *testing.T) {
Size: 400,
Expected: []*ChunkView{
{Offset: 0, Size: 200, FileId: "asdf", LogicOffset: 0},
- // {Offset: 0, Size: 150, FileId: "xxxx"}, // missing intervals should not happen
+ {Offset: 0, Size: 150, FileId: "xxxx", LogicOffset: 250},
},
},
// case 5: updates overwrite full chunks
{
Chunks: []*filer_pb.FileChunk{
- {Offset: 0, Size: 100, FileId: "abc", Mtime: 123},
- {Offset: 0, Size: 200, FileId: "asdf", Mtime: 184},
- {Offset: 70, Size: 150, FileId: "abc", Mtime: 143},
+ {Offset: 0, Size: 100, FileId: "a", Mtime: 123},
+ {Offset: 0, Size: 200, FileId: "c", Mtime: 184},
+ {Offset: 70, Size: 150, FileId: "b", Mtime: 143},
{Offset: 80, Size: 100, FileId: "xxxx", Mtime: 134},
},
Offset: 0,
Size: 220,
Expected: []*ChunkView{
- {Offset: 0, Size: 200, FileId: "asdf", LogicOffset: 0},
- {Offset: 0, Size: 20, FileId: "abc", LogicOffset: 200},
+ {Offset: 0, Size: 200, FileId: "c", LogicOffset: 0},
+ {Offset: 130, Size: 20, FileId: "b", LogicOffset: 200},
},
},
// case 6: same updates
{
Chunks: []*filer_pb.FileChunk{
- {Offset: 0, Size: 100, FileId: "abc", Mtime: 123},
- {Offset: 0, Size: 100, FileId: "abc", Mtime: 123},
- {Offset: 0, Size: 100, FileId: "abc", Mtime: 123},
+ {Offset: 0, Size: 100, FileId: "abc", Fid: &filer_pb.FileId{FileKey: 1}, Mtime: 123},
+ {Offset: 0, Size: 100, FileId: "def", Fid: &filer_pb.FileId{FileKey: 2}, Mtime: 123},
+ {Offset: 0, Size: 100, FileId: "xyz", Fid: &filer_pb.FileId{FileKey: 3}, Mtime: 123},
},
Offset: 0,
Size: 100,
Expected: []*ChunkView{
- {Offset: 0, Size: 100, FileId: "abc", LogicOffset: 0},
+ {Offset: 0, Size: 100, FileId: "xyz", LogicOffset: 0},
},
},
// case 7: edge cases
@@ -331,21 +376,60 @@ func TestChunksReading(t *testing.T) {
{Offset: 0, Size: 100, FileId: "asdf", LogicOffset: 100},
},
},
+ // case 8: edge cases
+ {
+ Chunks: []*filer_pb.FileChunk{
+ {Offset: 0, Size: 100, FileId: "abc", Mtime: 123},
+ {Offset: 90, Size: 200, FileId: "asdf", Mtime: 134},
+ {Offset: 190, Size: 300, FileId: "fsad", Mtime: 353},
+ },
+ Offset: 0,
+ Size: 300,
+ Expected: []*ChunkView{
+ {Offset: 0, Size: 90, FileId: "abc", LogicOffset: 0},
+ {Offset: 0, Size: 100, FileId: "asdf", LogicOffset: 90},
+ {Offset: 0, Size: 110, FileId: "fsad", LogicOffset: 190},
+ },
+ },
+ // case 9: edge cases
+ {
+ Chunks: []*filer_pb.FileChunk{
+ {Offset: 0, Size: 43175947, FileId: "2,111fc2cbfac1", Mtime: 1},
+ {Offset: 43175936, Size: 52981771 - 43175936, FileId: "2,112a36ea7f85", Mtime: 2},
+ {Offset: 52981760, Size: 72564747 - 52981760, FileId: "4,112d5f31c5e7", Mtime: 3},
+ {Offset: 72564736, Size: 133255179 - 72564736, FileId: "1,113245f0cdb6", Mtime: 4},
+ {Offset: 133255168, Size: 137269259 - 133255168, FileId: "3,1141a70733b5", Mtime: 5},
+ {Offset: 137269248, Size: 153578836 - 137269248, FileId: "1,114201d5bbdb", Mtime: 6},
+ },
+ Offset: 0,
+ Size: 153578836,
+ Expected: []*ChunkView{
+ {Offset: 0, Size: 43175936, FileId: "2,111fc2cbfac1", LogicOffset: 0},
+ {Offset: 0, Size: 52981760 - 43175936, FileId: "2,112a36ea7f85", LogicOffset: 43175936},
+ {Offset: 0, Size: 72564736 - 52981760, FileId: "4,112d5f31c5e7", LogicOffset: 52981760},
+ {Offset: 0, Size: 133255168 - 72564736, FileId: "1,113245f0cdb6", LogicOffset: 72564736},
+ {Offset: 0, Size: 137269248 - 133255168, FileId: "3,1141a70733b5", LogicOffset: 133255168},
+ {Offset: 0, Size: 153578836 - 137269248, FileId: "1,114201d5bbdb", LogicOffset: 137269248},
+ },
+ },
}
for i, testcase := range testcases {
+ if i != 2 {
+ // continue
+ }
log.Printf("++++++++++ read test case %d ++++++++++++++++++++", i)
- chunks := ViewFromChunks(testcase.Chunks, testcase.Offset, testcase.Size)
+ chunks := ViewFromChunks(nil, testcase.Chunks, testcase.Offset, testcase.Size)
for x, chunk := range chunks {
log.Printf("read case %d, chunk %d, offset=%d, size=%d, fileId=%s",
i, x, chunk.Offset, chunk.Size, chunk.FileId)
if chunk.Offset != testcase.Expected[x].Offset {
- t.Fatalf("failed on read case %d, chunk %d, Offset %d, expect %d",
- i, x, chunk.Offset, testcase.Expected[x].Offset)
+ t.Fatalf("failed on read case %d, chunk %s, Offset %d, expect %d",
+ i, chunk.FileId, chunk.Offset, testcase.Expected[x].Offset)
}
if chunk.Size != testcase.Expected[x].Size {
- t.Fatalf("failed on read case %d, chunk %d, Size %d, expect %d",
- i, x, chunk.Size, testcase.Expected[x].Size)
+ t.Fatalf("failed on read case %d, chunk %s, Size %d, expect %d",
+ i, chunk.FileId, chunk.Size, testcase.Expected[x].Size)
}
if chunk.FileId != testcase.Expected[x].FileId {
t.Fatalf("failed on read case %d, chunk %d, FileId %s, expect %s",
@@ -379,6 +463,77 @@ func BenchmarkCompactFileChunks(b *testing.B) {
}
for n := 0; n < b.N; n++ {
- CompactFileChunks(chunks)
+ CompactFileChunks(nil, chunks)
+ }
+}
+
+func TestViewFromVisibleIntervals(t *testing.T) {
+ visibles := []VisibleInterval{
+ {
+ start: 0,
+ stop: 25,
+ fileId: "fid1",
+ },
+ {
+ start: 4096,
+ stop: 8192,
+ fileId: "fid2",
+ },
+ {
+ start: 16384,
+ stop: 18551,
+ fileId: "fid3",
+ },
}
+
+ views := ViewFromVisibleIntervals(visibles, 0, math.MaxInt32)
+
+ if len(views) != len(visibles) {
+ assert.Equal(t, len(visibles), len(views), "ViewFromVisibleIntervals error")
+ }
+
+}
+
+func TestViewFromVisibleIntervals2(t *testing.T) {
+ visibles := []VisibleInterval{
+ {
+ start: 344064,
+ stop: 348160,
+ fileId: "fid1",
+ },
+ {
+ start: 348160,
+ stop: 356352,
+ fileId: "fid2",
+ },
+ }
+
+ views := ViewFromVisibleIntervals(visibles, 0, math.MaxInt32)
+
+ if len(views) != len(visibles) {
+ assert.Equal(t, len(visibles), len(views), "ViewFromVisibleIntervals error")
+ }
+
+}
+
+func TestViewFromVisibleIntervals3(t *testing.T) {
+ visibles := []VisibleInterval{
+ {
+ start: 1000,
+ stop: 2000,
+ fileId: "fid1",
+ },
+ {
+ start: 3000,
+ stop: 4000,
+ fileId: "fid2",
+ },
+ }
+
+ views := ViewFromVisibleIntervals(visibles, 1700, 1500)
+
+ if len(views) != len(visibles) {
+ assert.Equal(t, len(visibles), len(views), "ViewFromVisibleIntervals error")
+ }
+
}
diff --git a/weed/filer/filer.go b/weed/filer/filer.go
new file mode 100644
index 000000000..effdc0e4e
--- /dev/null
+++ b/weed/filer/filer.go
@@ -0,0 +1,304 @@
+package filer
+
+import (
+ "context"
+ "fmt"
+ "os"
+ "strings"
+ "time"
+
+ "google.golang.org/grpc"
+
+ "github.com/chrislusf/seaweedfs/weed/glog"
+ "github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
+ "github.com/chrislusf/seaweedfs/weed/util"
+ "github.com/chrislusf/seaweedfs/weed/util/log_buffer"
+ "github.com/chrislusf/seaweedfs/weed/wdclient"
+)
+
+const (
+ LogFlushInterval = time.Minute
+ PaginationSize = 1024
+ FilerStoreId = "filer.store.id"
+)
+
+var (
+ OS_UID = uint32(os.Getuid())
+ OS_GID = uint32(os.Getgid())
+)
+
+type Filer struct {
+ Store VirtualFilerStore
+ MasterClient *wdclient.MasterClient
+ fileIdDeletionQueue *util.UnboundedQueue
+ GrpcDialOption grpc.DialOption
+ DirBucketsPath string
+ FsyncBuckets []string
+ buckets *FilerBuckets
+ Cipher bool
+ LocalMetaLogBuffer *log_buffer.LogBuffer
+ metaLogCollection string
+ metaLogReplication string
+ MetaAggregator *MetaAggregator
+ Signature int32
+ FilerConf *FilerConf
+}
+
+func NewFiler(masters []string, grpcDialOption grpc.DialOption,
+ filerHost string, filerGrpcPort uint32, collection string, replication string, dataCenter string, notifyFn func()) *Filer {
+ f := &Filer{
+ MasterClient: wdclient.NewMasterClient(grpcDialOption, "filer", filerHost, filerGrpcPort, dataCenter, masters),
+ fileIdDeletionQueue: util.NewUnboundedQueue(),
+ GrpcDialOption: grpcDialOption,
+ FilerConf: NewFilerConf(),
+ }
+ f.LocalMetaLogBuffer = log_buffer.NewLogBuffer(LogFlushInterval, f.logFlushFunc, notifyFn)
+ f.metaLogCollection = collection
+ f.metaLogReplication = replication
+
+ go f.loopProcessingDeletion()
+
+ return f
+}
+
+func (f *Filer) AggregateFromPeers(self string, filers []string) {
+
+ // set peers
+ found := false
+ for _, peer := range filers {
+ if peer == self {
+ found = true
+ }
+ }
+ if !found {
+ filers = append(filers, self)
+ }
+
+ f.MetaAggregator = NewMetaAggregator(filers, f.GrpcDialOption)
+ f.MetaAggregator.StartLoopSubscribe(f, self)
+
+}
+
+func (f *Filer) SetStore(store FilerStore) {
+ f.Store = NewFilerStoreWrapper(store)
+
+ f.setOrLoadFilerStoreSignature(store)
+
+}
+
+func (f *Filer) setOrLoadFilerStoreSignature(store FilerStore) {
+ storeIdBytes, err := store.KvGet(context.Background(), []byte(FilerStoreId))
+ if err == ErrKvNotFound || err == nil && len(storeIdBytes) == 0 {
+ f.Signature = util.RandomInt32()
+ storeIdBytes = make([]byte, 4)
+ util.Uint32toBytes(storeIdBytes, uint32(f.Signature))
+ if err = store.KvPut(context.Background(), []byte(FilerStoreId), storeIdBytes); err != nil {
+ glog.Fatalf("set %s=%d : %v", FilerStoreId, f.Signature, err)
+ }
+ glog.V(0).Infof("create %s to %d", FilerStoreId, f.Signature)
+ } else if err == nil && len(storeIdBytes) == 4 {
+ f.Signature = int32(util.BytesToUint32(storeIdBytes))
+ glog.V(0).Infof("existing %s = %d", FilerStoreId, f.Signature)
+ } else {
+ glog.Fatalf("read %v=%v : %v", FilerStoreId, string(storeIdBytes), err)
+ }
+}
+
+func (f *Filer) GetStore() (store FilerStore) {
+ return f.Store
+}
+
+func (fs *Filer) GetMaster() string {
+ return fs.MasterClient.GetMaster()
+}
+
+func (fs *Filer) KeepConnectedToMaster() {
+ fs.MasterClient.KeepConnectedToMaster()
+}
+
+func (f *Filer) BeginTransaction(ctx context.Context) (context.Context, error) {
+ return f.Store.BeginTransaction(ctx)
+}
+
+func (f *Filer) CommitTransaction(ctx context.Context) error {
+ return f.Store.CommitTransaction(ctx)
+}
+
+func (f *Filer) RollbackTransaction(ctx context.Context) error {
+ return f.Store.RollbackTransaction(ctx)
+}
+
+func (f *Filer) CreateEntry(ctx context.Context, entry *Entry, o_excl bool, isFromOtherCluster bool, signatures []int32) error {
+
+ if string(entry.FullPath) == "/" {
+ return nil
+ }
+
+ oldEntry, _ := f.FindEntry(ctx, entry.FullPath)
+
+ /*
+ if !hasWritePermission(lastDirectoryEntry, entry) {
+ glog.V(0).Infof("directory %s: %v, entry: uid=%d gid=%d",
+ lastDirectoryEntry.FullPath, lastDirectoryEntry.Attr, entry.Uid, entry.Gid)
+ return fmt.Errorf("no write permission in folder %v", lastDirectoryEntry.FullPath)
+ }
+ */
+
+ if oldEntry == nil {
+
+ dirParts := strings.Split(string(entry.FullPath), "/")
+ if err := f.ensureParentDirecotryEntry(ctx, entry, dirParts, len(dirParts)-1, isFromOtherCluster); err != nil {
+ return err
+ }
+
+ glog.V(4).Infof("InsertEntry %s: new entry: %v", entry.FullPath, entry.Name())
+ if err := f.Store.InsertEntry(ctx, entry); err != nil {
+ glog.Errorf("insert entry %s: %v", entry.FullPath, err)
+ return fmt.Errorf("insert entry %s: %v", entry.FullPath, err)
+ }
+ } else {
+ if o_excl {
+ glog.V(3).Infof("EEXIST: entry %s already exists", entry.FullPath)
+ return fmt.Errorf("EEXIST: entry %s already exists", entry.FullPath)
+ }
+ glog.V(4).Infof("UpdateEntry %s: old entry: %v", entry.FullPath, oldEntry.Name())
+ if err := f.UpdateEntry(ctx, oldEntry, entry); err != nil {
+ glog.Errorf("update entry %s: %v", entry.FullPath, err)
+ return fmt.Errorf("update entry %s: %v", entry.FullPath, err)
+ }
+ }
+
+ f.maybeAddBucket(entry)
+ f.NotifyUpdateEvent(ctx, oldEntry, entry, true, isFromOtherCluster, signatures)
+
+ f.deleteChunksIfNotNew(oldEntry, entry)
+
+ glog.V(4).Infof("CreateEntry %s: created", entry.FullPath)
+
+ return nil
+}
+
+func (f *Filer) ensureParentDirecotryEntry(ctx context.Context, entry *Entry, dirParts []string, level int, isFromOtherCluster bool) (err error) {
+
+ if level == 0 {
+ return nil
+ }
+
+ dirPath := "/" + util.Join(dirParts[:level]...)
+ // fmt.Printf("%d directory: %+v\n", i, dirPath)
+
+ // check the store directly
+ glog.V(4).Infof("find uncached directory: %s", dirPath)
+ dirEntry, _ := f.FindEntry(ctx, util.FullPath(dirPath))
+
+ // no such existing directory
+ if dirEntry == nil {
+
+ // ensure parent directory
+ if err = f.ensureParentDirecotryEntry(ctx, entry, dirParts, level-1, isFromOtherCluster); err != nil {
+ return err
+ }
+
+ // create the directory
+ now := time.Now()
+
+ dirEntry = &Entry{
+ FullPath: util.FullPath(dirPath),
+ Attr: Attr{
+ Mtime: now,
+ Crtime: now,
+ Mode: os.ModeDir | entry.Mode | 0110,
+ Uid: entry.Uid,
+ Gid: entry.Gid,
+ Collection: entry.Collection,
+ Replication: entry.Replication,
+ UserName: entry.UserName,
+ GroupNames: entry.GroupNames,
+ },
+ }
+
+ glog.V(2).Infof("create directory: %s %v", dirPath, dirEntry.Mode)
+ mkdirErr := f.Store.InsertEntry(ctx, dirEntry)
+ if mkdirErr != nil {
+ if _, err := f.FindEntry(ctx, util.FullPath(dirPath)); err == filer_pb.ErrNotFound {
+ glog.V(3).Infof("mkdir %s: %v", dirPath, mkdirErr)
+ return fmt.Errorf("mkdir %s: %v", dirPath, mkdirErr)
+ }
+ } else {
+ f.maybeAddBucket(dirEntry)
+ f.NotifyUpdateEvent(ctx, nil, dirEntry, false, isFromOtherCluster, nil)
+ }
+
+ } else if !dirEntry.IsDirectory() {
+ glog.Errorf("CreateEntry %s: %s should be a directory", entry.FullPath, dirPath)
+ return fmt.Errorf("%s is a file", dirPath)
+ }
+
+ return nil
+}
+
+func (f *Filer) UpdateEntry(ctx context.Context, oldEntry, entry *Entry) (err error) {
+ if oldEntry != nil {
+ entry.Attr.Crtime = oldEntry.Attr.Crtime
+ if oldEntry.IsDirectory() && !entry.IsDirectory() {
+ glog.Errorf("existing %s is a directory", oldEntry.FullPath)
+ return fmt.Errorf("existing %s is a directory", oldEntry.FullPath)
+ }
+ if !oldEntry.IsDirectory() && entry.IsDirectory() {
+ glog.Errorf("existing %s is a file", oldEntry.FullPath)
+ return fmt.Errorf("existing %s is a file", oldEntry.FullPath)
+ }
+ }
+ return f.Store.UpdateEntry(ctx, entry)
+}
+
+var (
+ Root = &Entry{
+ FullPath: "/",
+ Attr: Attr{
+ Mtime: time.Now(),
+ Crtime: time.Now(),
+ Mode: os.ModeDir | 0755,
+ Uid: OS_UID,
+ Gid: OS_GID,
+ },
+ }
+)
+
+func (f *Filer) FindEntry(ctx context.Context, p util.FullPath) (entry *Entry, err error) {
+
+ if string(p) == "/" {
+ return Root, nil
+ }
+ entry, err = f.Store.FindEntry(ctx, p)
+ if entry != nil && entry.TtlSec > 0 {
+ if entry.Crtime.Add(time.Duration(entry.TtlSec) * time.Second).Before(time.Now()) {
+ f.Store.DeleteOneEntry(ctx, entry)
+ return nil, filer_pb.ErrNotFound
+ }
+ }
+ return
+
+}
+
+func (f *Filer) doListDirectoryEntries(ctx context.Context, p util.FullPath, startFileName string, inclusive bool, limit int64, prefix string, eachEntryFunc ListEachEntryFunc) (expiredCount int64, lastFileName string, err error) {
+ lastFileName, err = f.Store.ListDirectoryPrefixedEntries(ctx, p, startFileName, inclusive, limit, prefix, func(entry *Entry) bool {
+ if entry.TtlSec > 0 {
+ if entry.Crtime.Add(time.Duration(entry.TtlSec) * time.Second).Before(time.Now()) {
+ f.Store.DeleteOneEntry(ctx, entry)
+ expiredCount++
+ return true
+ }
+ }
+ return eachEntryFunc(entry)
+ })
+ if err != nil {
+ return expiredCount, lastFileName, err
+ }
+ return
+}
+
+func (f *Filer) Shutdown() {
+ f.LocalMetaLogBuffer.Shutdown()
+ f.Store.Shutdown()
+}
diff --git a/weed/filer/filer_buckets.go b/weed/filer/filer_buckets.go
new file mode 100644
index 000000000..43fb000c9
--- /dev/null
+++ b/weed/filer/filer_buckets.go
@@ -0,0 +1,121 @@
+package filer
+
+import (
+ "context"
+ "math"
+ "sync"
+
+ "github.com/chrislusf/seaweedfs/weed/glog"
+ "github.com/chrislusf/seaweedfs/weed/util"
+)
+
+type BucketName string
+type BucketOption struct {
+ Name BucketName
+ Replication string
+ fsync bool
+}
+type FilerBuckets struct {
+ dirBucketsPath string
+ buckets map[BucketName]*BucketOption
+ sync.RWMutex
+}
+
+func (f *Filer) LoadBuckets() {
+
+ f.buckets = &FilerBuckets{
+ buckets: make(map[BucketName]*BucketOption),
+ }
+
+ limit := int64(math.MaxInt32)
+
+ entries, _, err := f.ListDirectoryEntries(context.Background(), util.FullPath(f.DirBucketsPath), "", false, limit, "", "", "")
+
+ if err != nil {
+ glog.V(1).Infof("no buckets found: %v", err)
+ return
+ }
+
+ shouldFsyncMap := make(map[string]bool)
+ for _, bucket := range f.FsyncBuckets {
+ shouldFsyncMap[bucket] = true
+ }
+
+ glog.V(1).Infof("buckets found: %d", len(entries))
+
+ f.buckets.Lock()
+ for _, entry := range entries {
+ _, shouldFsnyc := shouldFsyncMap[entry.Name()]
+ f.buckets.buckets[BucketName(entry.Name())] = &BucketOption{
+ Name: BucketName(entry.Name()),
+ Replication: entry.Replication,
+ fsync: shouldFsnyc,
+ }
+ }
+ f.buckets.Unlock()
+
+}
+
+func (f *Filer) ReadBucketOption(buketName string) (replication string, fsync bool) {
+
+ f.buckets.RLock()
+ defer f.buckets.RUnlock()
+
+ option, found := f.buckets.buckets[BucketName(buketName)]
+
+ if !found {
+ return "", false
+ }
+ return option.Replication, option.fsync
+
+}
+
+func (f *Filer) isBucket(entry *Entry) bool {
+ if !entry.IsDirectory() {
+ return false
+ }
+ parent, dirName := entry.FullPath.DirAndName()
+ if parent != f.DirBucketsPath {
+ return false
+ }
+
+ f.buckets.RLock()
+ defer f.buckets.RUnlock()
+
+ _, found := f.buckets.buckets[BucketName(dirName)]
+
+ return found
+
+}
+
+func (f *Filer) maybeAddBucket(entry *Entry) {
+ if !entry.IsDirectory() {
+ return
+ }
+ parent, dirName := entry.FullPath.DirAndName()
+ if parent != f.DirBucketsPath {
+ return
+ }
+ f.addBucket(dirName, &BucketOption{
+ Name: BucketName(dirName),
+ Replication: entry.Replication,
+ })
+}
+
+func (f *Filer) addBucket(buketName string, bucketOption *BucketOption) {
+
+ f.buckets.Lock()
+ defer f.buckets.Unlock()
+
+ f.buckets.buckets[BucketName(buketName)] = bucketOption
+
+}
+
+func (f *Filer) deleteBucket(buketName string) {
+
+ f.buckets.Lock()
+ defer f.buckets.Unlock()
+
+ delete(f.buckets.buckets, BucketName(buketName))
+
+}
diff --git a/weed/filer/filer_conf.go b/weed/filer/filer_conf.go
new file mode 100644
index 000000000..ab5afc5cc
--- /dev/null
+++ b/weed/filer/filer_conf.go
@@ -0,0 +1,149 @@
+package filer
+
+import (
+ "bytes"
+ "context"
+ "io"
+
+ "github.com/chrislusf/seaweedfs/weed/glog"
+ "github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
+ "github.com/chrislusf/seaweedfs/weed/util"
+ "github.com/golang/protobuf/jsonpb"
+ "github.com/viant/ptrie"
+)
+
+const (
+ DirectoryEtcRoot = "/etc"
+ DirectoryEtcSeaweedFS = "/etc/seaweedfs"
+ FilerConfName = "filer.conf"
+ IamConfigDirecotry = "/etc/iam"
+ IamIdentityFile = "identity.json"
+ IamPoliciesFile = "policies.json"
+)
+
+type FilerConf struct {
+ rules ptrie.Trie
+}
+
+func NewFilerConf() (fc *FilerConf) {
+ fc = &FilerConf{
+ rules: ptrie.New(),
+ }
+ return fc
+}
+
+func (fc *FilerConf) loadFromFiler(filer *Filer) (err error) {
+ filerConfPath := util.NewFullPath(DirectoryEtcSeaweedFS, FilerConfName)
+ entry, err := filer.FindEntry(context.Background(), filerConfPath)
+ if err != nil {
+ if err == filer_pb.ErrNotFound {
+ return nil
+ }
+ glog.Errorf("read filer conf entry %s: %v", filerConfPath, err)
+ return
+ }
+
+ if len(entry.Content) > 0 {
+ return fc.LoadFromBytes(entry.Content)
+ }
+
+ return fc.loadFromChunks(filer, entry.Content, entry.Chunks)
+}
+
+func (fc *FilerConf) loadFromChunks(filer *Filer, content []byte, chunks []*filer_pb.FileChunk) (err error) {
+ if len(content) == 0 {
+ content, err = filer.readEntry(chunks)
+ if err != nil {
+ glog.Errorf("read filer conf content: %v", err)
+ return
+ }
+ }
+
+ return fc.LoadFromBytes(content)
+}
+
+func (fc *FilerConf) LoadFromBytes(data []byte) (err error) {
+ conf := &filer_pb.FilerConf{}
+
+ if err := jsonpb.Unmarshal(bytes.NewReader(data), conf); err != nil {
+ return err
+ }
+
+ return fc.doLoadConf(conf)
+}
+
+func (fc *FilerConf) doLoadConf(conf *filer_pb.FilerConf) (err error) {
+ for _, location := range conf.Locations {
+ err = fc.AddLocationConf(location)
+ if err != nil {
+ // this is not recoverable
+ return nil
+ }
+ }
+ return nil
+}
+
+func (fc *FilerConf) AddLocationConf(locConf *filer_pb.FilerConf_PathConf) (err error) {
+ err = fc.rules.Put([]byte(locConf.LocationPrefix), locConf)
+ if err != nil {
+ glog.Errorf("put location prefix: %v", err)
+ }
+ return
+}
+
+func (fc *FilerConf) DeleteLocationConf(locationPrefix string) {
+ rules := ptrie.New()
+ fc.rules.Walk(func(key []byte, value interface{}) bool {
+ if string(key) == locationPrefix {
+ return true
+ }
+ rules.Put(key, value)
+ return true
+ })
+ fc.rules = rules
+ return
+}
+
+func (fc *FilerConf) MatchStorageRule(path string) (pathConf *filer_pb.FilerConf_PathConf) {
+ pathConf = &filer_pb.FilerConf_PathConf{}
+ fc.rules.MatchPrefix([]byte(path), func(key []byte, value interface{}) bool {
+ t := value.(*filer_pb.FilerConf_PathConf)
+ mergePathConf(pathConf, t)
+ return true
+ })
+ return pathConf
+}
+
+// merge if values in b is not empty, merge them into a
+func mergePathConf(a, b *filer_pb.FilerConf_PathConf) {
+ a.Collection = util.Nvl(b.Collection, a.Collection)
+ a.Replication = util.Nvl(b.Replication, a.Replication)
+ a.Ttl = util.Nvl(b.Ttl, a.Ttl)
+ if b.DiskType != "" {
+ a.DiskType = b.DiskType
+ }
+ a.Fsync = b.Fsync || a.Fsync
+ if b.VolumeGrowthCount > 0 {
+ a.VolumeGrowthCount = b.VolumeGrowthCount
+ }
+}
+
+func (fc *FilerConf) ToProto() *filer_pb.FilerConf {
+ m := &filer_pb.FilerConf{}
+ fc.rules.Walk(func(key []byte, value interface{}) bool {
+ pathConf := value.(*filer_pb.FilerConf_PathConf)
+ m.Locations = append(m.Locations, pathConf)
+ return true
+ })
+ return m
+}
+
+func (fc *FilerConf) ToText(writer io.Writer) error {
+
+ m := jsonpb.Marshaler{
+ EmitDefaults: false,
+ Indent: " ",
+ }
+
+ return m.Marshal(writer, fc.ToProto())
+}
diff --git a/weed/filer/filer_conf_test.go b/weed/filer/filer_conf_test.go
new file mode 100644
index 000000000..ff868a3ec
--- /dev/null
+++ b/weed/filer/filer_conf_test.go
@@ -0,0 +1,34 @@
+package filer
+
+import (
+ "testing"
+
+ "github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
+ "github.com/stretchr/testify/assert"
+)
+
+func TestFilerConf(t *testing.T) {
+
+ fc := NewFilerConf()
+
+ conf := &filer_pb.FilerConf{Locations: []*filer_pb.FilerConf_PathConf{
+ {
+ LocationPrefix: "/buckets/abc",
+ Collection: "abc",
+ },
+ {
+ LocationPrefix: "/buckets/abcd",
+ Collection: "abcd",
+ },
+ {
+ LocationPrefix: "/buckets/",
+ Replication: "001",
+ },
+ }}
+ fc.doLoadConf(conf)
+
+ assert.Equal(t, "abc", fc.MatchStorageRule("/buckets/abc/jasdf").Collection)
+ assert.Equal(t, "abcd", fc.MatchStorageRule("/buckets/abcd/jasdf").Collection)
+ assert.Equal(t, "001", fc.MatchStorageRule("/buckets/abc/jasdf").Replication)
+
+}
diff --git a/weed/filer/filer_delete_entry.go b/weed/filer/filer_delete_entry.go
new file mode 100644
index 000000000..3ef3cfff9
--- /dev/null
+++ b/weed/filer/filer_delete_entry.go
@@ -0,0 +1,161 @@
+package filer
+
+import (
+ "context"
+ "fmt"
+ "github.com/chrislusf/seaweedfs/weed/glog"
+ "github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
+ "github.com/chrislusf/seaweedfs/weed/pb/master_pb"
+ "github.com/chrislusf/seaweedfs/weed/util"
+)
+
+type HardLinkId []byte
+
+const (
+ MsgFailDelNonEmptyFolder = "fail to delete non-empty folder"
+)
+
+func (f *Filer) DeleteEntryMetaAndData(ctx context.Context, p util.FullPath, isRecursive, ignoreRecursiveError, shouldDeleteChunks, isFromOtherCluster bool, signatures []int32) (err error) {
+ if p == "/" {
+ return nil
+ }
+
+ entry, findErr := f.FindEntry(ctx, p)
+ if findErr != nil {
+ return findErr
+ }
+
+ isDeleteCollection := f.isBucket(entry)
+
+ var chunks []*filer_pb.FileChunk
+ var hardLinkIds []HardLinkId
+ chunks = append(chunks, entry.Chunks...)
+ if entry.IsDirectory() {
+ // delete the folder children, not including the folder itself
+ var dirChunks []*filer_pb.FileChunk
+ var dirHardLinkIds []HardLinkId
+ dirChunks, dirHardLinkIds, err = f.doBatchDeleteFolderMetaAndData(ctx, entry, isRecursive, ignoreRecursiveError, shouldDeleteChunks && !isDeleteCollection, isDeleteCollection, isFromOtherCluster, signatures)
+ if err != nil {
+ glog.V(0).Infof("delete directory %s: %v", p, err)
+ return fmt.Errorf("delete directory %s: %v", p, err)
+ }
+ chunks = append(chunks, dirChunks...)
+ hardLinkIds = append(hardLinkIds, dirHardLinkIds...)
+ }
+
+ // delete the file or folder
+ err = f.doDeleteEntryMetaAndData(ctx, entry, shouldDeleteChunks, isFromOtherCluster, signatures)
+ if err != nil {
+ return fmt.Errorf("delete file %s: %v", p, err)
+ }
+
+ if shouldDeleteChunks && !isDeleteCollection {
+ f.DirectDeleteChunks(chunks)
+ }
+ // A case not handled:
+ // what if the chunk is in a different collection?
+ if shouldDeleteChunks {
+ f.maybeDeleteHardLinks(hardLinkIds)
+ }
+
+ if isDeleteCollection {
+ collectionName := entry.Name()
+ f.doDeleteCollection(collectionName)
+ f.deleteBucket(collectionName)
+ }
+
+ return nil
+}
+
+func (f *Filer) doBatchDeleteFolderMetaAndData(ctx context.Context, entry *Entry, isRecursive, ignoreRecursiveError, shouldDeleteChunks, isDeletingBucket, isFromOtherCluster bool, signatures []int32) (chunks []*filer_pb.FileChunk, hardlinkIds []HardLinkId, err error) {
+
+ lastFileName := ""
+ includeLastFile := false
+ if !isDeletingBucket {
+ for {
+ entries, _, err := f.ListDirectoryEntries(ctx, entry.FullPath, lastFileName, includeLastFile, PaginationSize, "", "", "")
+ if err != nil {
+ glog.Errorf("list folder %s: %v", entry.FullPath, err)
+ return nil, nil, fmt.Errorf("list folder %s: %v", entry.FullPath, err)
+ }
+ if lastFileName == "" && !isRecursive && len(entries) > 0 {
+ // only for first iteration in the loop
+ glog.Errorf("deleting a folder %s has children: %+v ...", entry.FullPath, entries[0].Name())
+ return nil, nil, fmt.Errorf("%s: %s", MsgFailDelNonEmptyFolder, entry.FullPath)
+ }
+
+ for _, sub := range entries {
+ lastFileName = sub.Name()
+ var dirChunks []*filer_pb.FileChunk
+ var dirHardLinkIds []HardLinkId
+ if sub.IsDirectory() {
+ subIsDeletingBucket := f.isBucket(sub)
+ dirChunks, dirHardLinkIds, err = f.doBatchDeleteFolderMetaAndData(ctx, sub, isRecursive, ignoreRecursiveError, shouldDeleteChunks, subIsDeletingBucket, false, nil)
+ chunks = append(chunks, dirChunks...)
+ hardlinkIds = append(hardlinkIds, dirHardLinkIds...)
+ } else {
+ f.NotifyUpdateEvent(ctx, sub, nil, shouldDeleteChunks, isFromOtherCluster, nil)
+ if len(sub.HardLinkId) != 0 {
+ // hard link chunk data are deleted separately
+ hardlinkIds = append(hardlinkIds, sub.HardLinkId)
+ } else {
+ chunks = append(chunks, sub.Chunks...)
+ }
+ }
+ if err != nil && !ignoreRecursiveError {
+ return nil, nil, err
+ }
+ }
+
+ if len(entries) < PaginationSize {
+ break
+ }
+ }
+ }
+
+ glog.V(3).Infof("deleting directory %v delete %d chunks: %v", entry.FullPath, len(chunks), shouldDeleteChunks)
+
+ if storeDeletionErr := f.Store.DeleteFolderChildren(ctx, entry.FullPath); storeDeletionErr != nil {
+ return nil, nil, fmt.Errorf("filer store delete: %v", storeDeletionErr)
+ }
+
+ f.NotifyUpdateEvent(ctx, entry, nil, shouldDeleteChunks, isFromOtherCluster, signatures)
+
+ return chunks, hardlinkIds, nil
+}
+
+func (f *Filer) doDeleteEntryMetaAndData(ctx context.Context, entry *Entry, shouldDeleteChunks bool, isFromOtherCluster bool, signatures []int32) (err error) {
+
+ glog.V(3).Infof("deleting entry %v, delete chunks: %v", entry.FullPath, shouldDeleteChunks)
+
+ if storeDeletionErr := f.Store.DeleteOneEntry(ctx, entry); storeDeletionErr != nil {
+ return fmt.Errorf("filer store delete: %v", storeDeletionErr)
+ }
+ if !entry.IsDirectory() {
+ f.NotifyUpdateEvent(ctx, entry, nil, shouldDeleteChunks, isFromOtherCluster, signatures)
+ }
+
+ return nil
+}
+
+func (f *Filer) doDeleteCollection(collectionName string) (err error) {
+
+ return f.MasterClient.WithClient(func(client master_pb.SeaweedClient) error {
+ _, err := client.CollectionDelete(context.Background(), &master_pb.CollectionDeleteRequest{
+ Name: collectionName,
+ })
+ if err != nil {
+ glog.Infof("delete collection %s: %v", collectionName, err)
+ }
+ return err
+ })
+
+}
+
+func (f *Filer) maybeDeleteHardLinks(hardLinkIds []HardLinkId) {
+ for _, hardLinkId := range hardLinkIds {
+ if err := f.Store.DeleteHardLink(context.Background(), hardLinkId); err != nil {
+ glog.Errorf("delete hard link id %d : %v", hardLinkId, err)
+ }
+ }
+}
diff --git a/weed/filer/filer_deletion.go b/weed/filer/filer_deletion.go
new file mode 100644
index 000000000..9eee38277
--- /dev/null
+++ b/weed/filer/filer_deletion.go
@@ -0,0 +1,153 @@
+package filer
+
+import (
+ "strings"
+ "time"
+
+ "github.com/chrislusf/seaweedfs/weed/glog"
+ "github.com/chrislusf/seaweedfs/weed/operation"
+ "github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
+ "github.com/chrislusf/seaweedfs/weed/wdclient"
+)
+
+func LookupByMasterClientFn(masterClient *wdclient.MasterClient) func(vids []string) (map[string]operation.LookupResult, error) {
+ return func(vids []string) (map[string]operation.LookupResult, error) {
+ m := make(map[string]operation.LookupResult)
+ for _, vid := range vids {
+ locs, _ := masterClient.GetVidLocations(vid)
+ var locations []operation.Location
+ for _, loc := range locs {
+ locations = append(locations, operation.Location{
+ Url: loc.Url,
+ PublicUrl: loc.PublicUrl,
+ })
+ }
+ m[vid] = operation.LookupResult{
+ VolumeId: vid,
+ Locations: locations,
+ }
+ }
+ return m, nil
+ }
+}
+
+func (f *Filer) loopProcessingDeletion() {
+
+ lookupFunc := LookupByMasterClientFn(f.MasterClient)
+
+ DeletionBatchSize := 100000 // roughly 20 bytes cost per file id.
+
+ var deletionCount int
+ for {
+ deletionCount = 0
+ f.fileIdDeletionQueue.Consume(func(fileIds []string) {
+ for len(fileIds) > 0 {
+ var toDeleteFileIds []string
+ if len(fileIds) > DeletionBatchSize {
+ toDeleteFileIds = fileIds[:DeletionBatchSize]
+ fileIds = fileIds[DeletionBatchSize:]
+ } else {
+ toDeleteFileIds = fileIds
+ fileIds = fileIds[:0]
+ }
+ deletionCount = len(toDeleteFileIds)
+ _, err := operation.DeleteFilesWithLookupVolumeId(f.GrpcDialOption, toDeleteFileIds, lookupFunc)
+ if err != nil {
+ if !strings.Contains(err.Error(), "already deleted") {
+ glog.V(0).Infof("deleting fileIds len=%d error: %v", deletionCount, err)
+ }
+ } else {
+ glog.V(1).Infof("deleting fileIds len=%d", deletionCount)
+ }
+ }
+ })
+
+ if deletionCount == 0 {
+ time.Sleep(1123 * time.Millisecond)
+ }
+ }
+}
+
+func (f *Filer) doDeleteFileIds(fileIds []string) {
+
+ lookupFunc := LookupByMasterClientFn(f.MasterClient)
+ DeletionBatchSize := 100000 // roughly 20 bytes cost per file id.
+
+ for len(fileIds) > 0 {
+ var toDeleteFileIds []string
+ if len(fileIds) > DeletionBatchSize {
+ toDeleteFileIds = fileIds[:DeletionBatchSize]
+ fileIds = fileIds[DeletionBatchSize:]
+ } else {
+ toDeleteFileIds = fileIds
+ fileIds = fileIds[:0]
+ }
+ deletionCount := len(toDeleteFileIds)
+ _, err := operation.DeleteFilesWithLookupVolumeId(f.GrpcDialOption, toDeleteFileIds, lookupFunc)
+ if err != nil {
+ if !strings.Contains(err.Error(), "already deleted") {
+ glog.V(0).Infof("deleting fileIds len=%d error: %v", deletionCount, err)
+ }
+ }
+ }
+}
+
+func (f *Filer) DirectDeleteChunks(chunks []*filer_pb.FileChunk) {
+ var fildIdsToDelete []string
+ for _, chunk := range chunks {
+ if !chunk.IsChunkManifest {
+ fildIdsToDelete = append(fildIdsToDelete, chunk.GetFileIdString())
+ continue
+ }
+ dataChunks, manifestResolveErr := ResolveOneChunkManifest(f.MasterClient.LookupFileId, chunk)
+ if manifestResolveErr != nil {
+ glog.V(0).Infof("failed to resolve manifest %s: %v", chunk.FileId, manifestResolveErr)
+ }
+ for _, dChunk := range dataChunks {
+ fildIdsToDelete = append(fildIdsToDelete, dChunk.GetFileIdString())
+ }
+ fildIdsToDelete = append(fildIdsToDelete, chunk.GetFileIdString())
+ }
+
+ f.doDeleteFileIds(fildIdsToDelete)
+}
+
+func (f *Filer) DeleteChunks(chunks []*filer_pb.FileChunk) {
+ for _, chunk := range chunks {
+ if !chunk.IsChunkManifest {
+ f.fileIdDeletionQueue.EnQueue(chunk.GetFileIdString())
+ continue
+ }
+ dataChunks, manifestResolveErr := ResolveOneChunkManifest(f.MasterClient.LookupFileId, chunk)
+ if manifestResolveErr != nil {
+ glog.V(0).Infof("failed to resolve manifest %s: %v", chunk.FileId, manifestResolveErr)
+ }
+ for _, dChunk := range dataChunks {
+ f.fileIdDeletionQueue.EnQueue(dChunk.GetFileIdString())
+ }
+ f.fileIdDeletionQueue.EnQueue(chunk.GetFileIdString())
+ }
+}
+
+func (f *Filer) deleteChunksIfNotNew(oldEntry, newEntry *Entry) {
+
+ if oldEntry == nil {
+ return
+ }
+ if newEntry == nil {
+ f.DeleteChunks(oldEntry.Chunks)
+ }
+
+ var toDelete []*filer_pb.FileChunk
+ newChunkIds := make(map[string]bool)
+ for _, newChunk := range newEntry.Chunks {
+ newChunkIds[newChunk.GetFileIdString()] = true
+ }
+
+ for _, oldChunk := range oldEntry.Chunks {
+ if _, found := newChunkIds[oldChunk.GetFileIdString()]; !found {
+ toDelete = append(toDelete, oldChunk)
+ }
+ }
+ f.DeleteChunks(toDelete)
+}
diff --git a/weed/filer/filer_notify.go b/weed/filer/filer_notify.go
new file mode 100644
index 000000000..7ab101102
--- /dev/null
+++ b/weed/filer/filer_notify.go
@@ -0,0 +1,185 @@
+package filer
+
+import (
+ "context"
+ "fmt"
+ "io"
+ "strings"
+ "time"
+
+ "github.com/golang/protobuf/proto"
+
+ "github.com/chrislusf/seaweedfs/weed/glog"
+ "github.com/chrislusf/seaweedfs/weed/notification"
+ "github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
+ "github.com/chrislusf/seaweedfs/weed/util"
+)
+
+func (f *Filer) NotifyUpdateEvent(ctx context.Context, oldEntry, newEntry *Entry, deleteChunks, isFromOtherCluster bool, signatures []int32) {
+ var fullpath string
+ if oldEntry != nil {
+ fullpath = string(oldEntry.FullPath)
+ } else if newEntry != nil {
+ fullpath = string(newEntry.FullPath)
+ } else {
+ return
+ }
+
+ // println("fullpath:", fullpath)
+
+ if strings.HasPrefix(fullpath, SystemLogDir) {
+ return
+ }
+ foundSelf := false
+ for _, sig := range signatures {
+ if sig == f.Signature {
+ foundSelf = true
+ }
+ }
+ if !foundSelf {
+ signatures = append(signatures, f.Signature)
+ }
+
+ newParentPath := ""
+ if newEntry != nil {
+ newParentPath, _ = newEntry.FullPath.DirAndName()
+ }
+ eventNotification := &filer_pb.EventNotification{
+ OldEntry: oldEntry.ToProtoEntry(),
+ NewEntry: newEntry.ToProtoEntry(),
+ DeleteChunks: deleteChunks,
+ NewParentPath: newParentPath,
+ IsFromOtherCluster: isFromOtherCluster,
+ Signatures: signatures,
+ }
+
+ if notification.Queue != nil {
+ glog.V(3).Infof("notifying entry update %v", fullpath)
+ if err := notification.Queue.SendMessage(fullpath, eventNotification); err != nil {
+ // throw message
+ glog.Error(err)
+ }
+ }
+
+ f.logMetaEvent(ctx, fullpath, eventNotification)
+
+}
+
+func (f *Filer) logMetaEvent(ctx context.Context, fullpath string, eventNotification *filer_pb.EventNotification) {
+
+ dir, _ := util.FullPath(fullpath).DirAndName()
+
+ event := &filer_pb.SubscribeMetadataResponse{
+ Directory: dir,
+ EventNotification: eventNotification,
+ TsNs: time.Now().UnixNano(),
+ }
+ data, err := proto.Marshal(event)
+ if err != nil {
+ glog.Errorf("failed to marshal filer_pb.SubscribeMetadataResponse %+v: %v", event, err)
+ return
+ }
+
+ f.LocalMetaLogBuffer.AddToBuffer([]byte(dir), data, event.TsNs)
+
+}
+
+func (f *Filer) logFlushFunc(startTime, stopTime time.Time, buf []byte) {
+
+ if len(buf) == 0 {
+ return
+ }
+
+ startTime, stopTime = startTime.UTC(), stopTime.UTC()
+
+ targetFile := fmt.Sprintf("%s/%04d-%02d-%02d/%02d-%02d.segment", SystemLogDir,
+ startTime.Year(), startTime.Month(), startTime.Day(), startTime.Hour(), startTime.Minute(),
+ // startTime.Second(), startTime.Nanosecond(),
+ )
+
+ for {
+ if err := f.appendToFile(targetFile, buf); err != nil {
+ glog.V(1).Infof("log write failed %s: %v", targetFile, err)
+ time.Sleep(737 * time.Millisecond)
+ } else {
+ break
+ }
+ }
+}
+
+func (f *Filer) ReadPersistedLogBuffer(startTime time.Time, eachLogEntryFn func(logEntry *filer_pb.LogEntry) error) (lastTsNs int64, err error) {
+
+ startTime = startTime.UTC()
+ startDate := fmt.Sprintf("%04d-%02d-%02d", startTime.Year(), startTime.Month(), startTime.Day())
+ startHourMinute := fmt.Sprintf("%02d-%02d.segment", startTime.Hour(), startTime.Minute())
+
+ sizeBuf := make([]byte, 4)
+ startTsNs := startTime.UnixNano()
+
+ dayEntries, _, listDayErr := f.ListDirectoryEntries(context.Background(), SystemLogDir, startDate, true, 366, "", "", "")
+ if listDayErr != nil {
+ return lastTsNs, fmt.Errorf("fail to list log by day: %v", listDayErr)
+ }
+ for _, dayEntry := range dayEntries {
+ // println("checking day", dayEntry.FullPath)
+ hourMinuteEntries, _, listHourMinuteErr := f.ListDirectoryEntries(context.Background(), util.NewFullPath(SystemLogDir, dayEntry.Name()), "", false, 24*60, "", "", "")
+ if listHourMinuteErr != nil {
+ return lastTsNs, fmt.Errorf("fail to list log %s by day: %v", dayEntry.Name(), listHourMinuteErr)
+ }
+ for _, hourMinuteEntry := range hourMinuteEntries {
+ // println("checking hh-mm", hourMinuteEntry.FullPath)
+ if dayEntry.Name() == startDate {
+ if strings.Compare(hourMinuteEntry.Name(), startHourMinute) < 0 {
+ continue
+ }
+ }
+ // println("processing", hourMinuteEntry.FullPath)
+ chunkedFileReader := NewChunkStreamReaderFromFiler(f.MasterClient, hourMinuteEntry.Chunks)
+ if lastTsNs, err = ReadEachLogEntry(chunkedFileReader, sizeBuf, startTsNs, eachLogEntryFn); err != nil {
+ chunkedFileReader.Close()
+ if err == io.EOF {
+ continue
+ }
+ return lastTsNs, fmt.Errorf("reading %s: %v", hourMinuteEntry.FullPath, err)
+ }
+ chunkedFileReader.Close()
+ }
+ }
+
+ return lastTsNs, nil
+}
+
+func ReadEachLogEntry(r io.Reader, sizeBuf []byte, ns int64, eachLogEntryFn func(logEntry *filer_pb.LogEntry) error) (lastTsNs int64, err error) {
+ for {
+ n, err := r.Read(sizeBuf)
+ if err != nil {
+ return lastTsNs, err
+ }
+ if n != 4 {
+ return lastTsNs, fmt.Errorf("size %d bytes, expected 4 bytes", n)
+ }
+ size := util.BytesToUint32(sizeBuf)
+ // println("entry size", size)
+ entryData := make([]byte, size)
+ n, err = r.Read(entryData)
+ if err != nil {
+ return lastTsNs, err
+ }
+ if n != int(size) {
+ return lastTsNs, fmt.Errorf("entry data %d bytes, expected %d bytes", n, size)
+ }
+ logEntry := &filer_pb.LogEntry{}
+ if err = proto.Unmarshal(entryData, logEntry); err != nil {
+ return lastTsNs, err
+ }
+ if logEntry.TsNs <= ns {
+ continue
+ }
+ // println("each log: ", logEntry.TsNs)
+ if err := eachLogEntryFn(logEntry); err != nil {
+ return lastTsNs, err
+ } else {
+ lastTsNs = logEntry.TsNs
+ }
+ }
+}
diff --git a/weed/filer/filer_notify_append.go b/weed/filer/filer_notify_append.go
new file mode 100644
index 000000000..d441bbbc9
--- /dev/null
+++ b/weed/filer/filer_notify_append.go
@@ -0,0 +1,75 @@
+package filer
+
+import (
+ "context"
+ "fmt"
+ "os"
+ "time"
+
+ "github.com/chrislusf/seaweedfs/weed/operation"
+ "github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
+ "github.com/chrislusf/seaweedfs/weed/util"
+)
+
+func (f *Filer) appendToFile(targetFile string, data []byte) error {
+
+ assignResult, uploadResult, err2 := f.assignAndUpload(targetFile, data)
+ if err2 != nil {
+ return err2
+ }
+
+ // find out existing entry
+ fullpath := util.FullPath(targetFile)
+ entry, err := f.FindEntry(context.Background(), fullpath)
+ var offset int64 = 0
+ if err == filer_pb.ErrNotFound {
+ entry = &Entry{
+ FullPath: fullpath,
+ Attr: Attr{
+ Crtime: time.Now(),
+ Mtime: time.Now(),
+ Mode: os.FileMode(0644),
+ Uid: OS_UID,
+ Gid: OS_GID,
+ },
+ }
+ } else {
+ offset = int64(TotalSize(entry.Chunks))
+ }
+
+ // append to existing chunks
+ entry.Chunks = append(entry.Chunks, uploadResult.ToPbFileChunk(assignResult.Fid, offset))
+
+ // update the entry
+ err = f.CreateEntry(context.Background(), entry, false, false, nil)
+
+ return err
+}
+
+func (f *Filer) assignAndUpload(targetFile string, data []byte) (*operation.AssignResult, *operation.UploadResult, error) {
+ // assign a volume location
+ rule := f.FilerConf.MatchStorageRule(targetFile)
+ assignRequest := &operation.VolumeAssignRequest{
+ Count: 1,
+ Collection: util.Nvl(f.metaLogCollection, rule.Collection),
+ Replication: util.Nvl(f.metaLogReplication, rule.Replication),
+ WritableVolumeCount: rule.VolumeGrowthCount,
+ }
+
+ assignResult, err := operation.Assign(f.GetMaster, f.GrpcDialOption, assignRequest)
+ if err != nil {
+ return nil, nil, fmt.Errorf("AssignVolume: %v", err)
+ }
+ if assignResult.Error != "" {
+ return nil, nil, fmt.Errorf("AssignVolume error: %v", assignResult.Error)
+ }
+
+ // upload data
+ targetUrl := "http://" + assignResult.Url + "/" + assignResult.Fid
+ uploadResult, err := operation.UploadData(targetUrl, "", f.Cipher, data, false, "", nil, assignResult.Auth)
+ if err != nil {
+ return nil, nil, fmt.Errorf("upload data %s: %v", targetUrl, err)
+ }
+ // println("uploaded to", targetUrl)
+ return assignResult, uploadResult, nil
+}
diff --git a/weed/filer2/filer_notify_test.go b/weed/filer/filer_notify_test.go
similarity index 90%
rename from weed/filer2/filer_notify_test.go
rename to weed/filer/filer_notify_test.go
index b74e2ad35..6a2be8f18 100644
--- a/weed/filer2/filer_notify_test.go
+++ b/weed/filer/filer_notify_test.go
@@ -1,17 +1,19 @@
-package filer2
+package filer
import (
"testing"
"time"
"github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
+ "github.com/chrislusf/seaweedfs/weed/util"
+
"github.com/golang/protobuf/proto"
)
func TestProtoMarshalText(t *testing.T) {
oldEntry := &Entry{
- FullPath: FullPath("/this/path/to"),
+ FullPath: util.FullPath("/this/path/to"),
Attr: Attr{
Mtime: time.Now(),
Mode: 0644,
diff --git a/weed/filer/filer_on_meta_event.go b/weed/filer/filer_on_meta_event.go
new file mode 100644
index 000000000..a91faeb24
--- /dev/null
+++ b/weed/filer/filer_on_meta_event.go
@@ -0,0 +1,82 @@
+package filer
+
+import (
+ "bytes"
+ "math"
+
+ "github.com/chrislusf/seaweedfs/weed/glog"
+ "github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
+ "github.com/chrislusf/seaweedfs/weed/util"
+)
+
+// onMetadataChangeEvent is triggered after filer processed change events from local or remote filers
+func (f *Filer) onMetadataChangeEvent(event *filer_pb.SubscribeMetadataResponse) {
+ f.maybeReloadFilerConfiguration(event)
+ f.onBucketEvents(event)
+}
+
+func (f *Filer) onBucketEvents(event *filer_pb.SubscribeMetadataResponse) {
+ message := event.EventNotification
+ for _, sig := range message.Signatures {
+ if sig == f.Signature {
+ return
+ }
+ }
+ if f.DirBucketsPath == event.Directory {
+ if message.OldEntry == nil && message.NewEntry != nil {
+ f.Store.OnBucketCreation(message.NewEntry.Name)
+ }
+ if message.OldEntry != nil && message.NewEntry == nil {
+ f.Store.OnBucketDeletion(message.OldEntry.Name)
+ }
+ }
+}
+
+func (f *Filer) maybeReloadFilerConfiguration(event *filer_pb.SubscribeMetadataResponse) {
+ if DirectoryEtcSeaweedFS != event.Directory {
+ if DirectoryEtcSeaweedFS != event.EventNotification.NewParentPath {
+ return
+ }
+ }
+
+ entry := event.EventNotification.NewEntry
+ if entry == nil {
+ return
+ }
+
+ glog.V(0).Infof("procesing %v", event)
+ if entry.Name == FilerConfName {
+ f.reloadFilerConfiguration(entry)
+ }
+}
+
+func (f *Filer) readEntry(chunks []*filer_pb.FileChunk) ([]byte, error) {
+ var buf bytes.Buffer
+ err := StreamContent(f.MasterClient, &buf, chunks, 0, math.MaxInt64, false)
+ if err != nil {
+ return nil, err
+ }
+ return buf.Bytes(), nil
+}
+
+func (f *Filer) reloadFilerConfiguration(entry *filer_pb.Entry) {
+ fc := NewFilerConf()
+ err := fc.loadFromChunks(f, entry.Content, entry.Chunks)
+ if err != nil {
+ glog.Errorf("read filer conf chunks: %v", err)
+ return
+ }
+ f.FilerConf = fc
+}
+
+func (f *Filer) LoadFilerConf() {
+ fc := NewFilerConf()
+ err := util.Retry("loadFilerConf", func() error {
+ return fc.loadFromFiler(f)
+ })
+ if err != nil {
+ glog.Errorf("read filer conf: %v", err)
+ return
+ }
+ f.FilerConf = fc
+}
diff --git a/weed/filer/filer_rename.go b/weed/filer/filer_rename.go
new file mode 100644
index 000000000..b6f0cf6de
--- /dev/null
+++ b/weed/filer/filer_rename.go
@@ -0,0 +1,30 @@
+package filer
+
+import (
+ "fmt"
+ "github.com/chrislusf/seaweedfs/weed/util"
+ "strings"
+)
+
+func (f *Filer) CanRename(source, target util.FullPath) error {
+ sourceBucket := f.DetectBucket(source)
+ targetBucket := f.DetectBucket(target)
+ if sourceBucket != targetBucket {
+ return fmt.Errorf("can not move across collection %s => %s", sourceBucket, targetBucket)
+ }
+ return nil
+}
+
+func (f *Filer) DetectBucket(source util.FullPath) (bucket string) {
+ if strings.HasPrefix(string(source), f.DirBucketsPath+"/") {
+ bucketAndObjectKey := string(source)[len(f.DirBucketsPath)+1:]
+ t := strings.Index(bucketAndObjectKey, "/")
+ if t < 0 {
+ bucket = bucketAndObjectKey
+ }
+ if t > 0 {
+ bucket = bucketAndObjectKey[:t]
+ }
+ }
+ return bucket
+}
diff --git a/weed/filer/filer_search.go b/weed/filer/filer_search.go
new file mode 100644
index 000000000..f43312cfa
--- /dev/null
+++ b/weed/filer/filer_search.go
@@ -0,0 +1,98 @@
+package filer
+
+import (
+ "context"
+ "github.com/chrislusf/seaweedfs/weed/util"
+ "path/filepath"
+ "strings"
+)
+
+func splitPattern(pattern string) (prefix string, restPattern string) {
+ position := strings.Index(pattern, "*")
+ if position >= 0 {
+ return pattern[:position], pattern[position:]
+ }
+ position = strings.Index(pattern, "?")
+ if position >= 0 {
+ return pattern[:position], pattern[position:]
+ }
+ return "", restPattern
+}
+
+// For now, prefix and namePattern are mutually exclusive
+func (f *Filer) ListDirectoryEntries(ctx context.Context, p util.FullPath, startFileName string, inclusive bool, limit int64, prefix string, namePattern string, namePatternExclude string) (entries []*Entry, hasMore bool, err error) {
+
+ _, err = f.StreamListDirectoryEntries(ctx, p, startFileName, inclusive, limit+1, prefix, namePattern, namePatternExclude, func(entry *Entry) bool {
+ entries = append(entries, entry)
+ return true
+ })
+
+ hasMore = int64(len(entries)) >= limit+1
+ if hasMore {
+ entries = entries[:limit]
+ }
+
+ return entries, hasMore, err
+}
+
+// For now, prefix and namePattern are mutually exclusive
+func (f *Filer) StreamListDirectoryEntries(ctx context.Context, p util.FullPath, startFileName string, inclusive bool, limit int64, prefix string, namePattern string, namePatternExclude string, eachEntryFunc ListEachEntryFunc) (lastFileName string, err error) {
+ if strings.HasSuffix(string(p), "/") && len(p) > 1 {
+ p = p[0 : len(p)-1]
+ }
+
+ prefixInNamePattern, restNamePattern := splitPattern(namePattern)
+ if prefixInNamePattern != "" {
+ prefix = prefixInNamePattern
+ }
+ var missedCount int64
+
+ missedCount, lastFileName, err = f.doListPatternMatchedEntries(ctx, p, startFileName, inclusive, limit, prefix, restNamePattern, namePatternExclude, eachEntryFunc)
+
+ for missedCount > 0 && err == nil {
+ missedCount, lastFileName, err = f.doListPatternMatchedEntries(ctx, p, lastFileName, false, missedCount, prefix, restNamePattern, namePatternExclude, eachEntryFunc)
+ }
+
+ return
+}
+
+func (f *Filer) doListPatternMatchedEntries(ctx context.Context, p util.FullPath, startFileName string, inclusive bool, limit int64, prefix, restNamePattern string, namePatternExclude string, eachEntryFunc ListEachEntryFunc) (missedCount int64, lastFileName string, err error) {
+
+ if len(restNamePattern) == 0 && len(namePatternExclude) == 0{
+ lastFileName, err = f.doListValidEntries(ctx, p, startFileName, inclusive, limit, prefix, eachEntryFunc)
+ return 0, lastFileName, err
+ }
+
+ lastFileName, err = f.doListValidEntries(ctx, p, startFileName, inclusive, limit, prefix, func(entry *Entry) bool {
+ nameToTest := entry.Name()
+ if len(namePatternExclude) > 0 {
+ if matched, matchErr := filepath.Match(namePatternExclude, nameToTest); matchErr == nil && matched {
+ missedCount++
+ return true
+ }
+ }
+ if len(restNamePattern) > 0 {
+ if matched, matchErr := filepath.Match(restNamePattern, nameToTest[len(prefix):]); matchErr == nil && !matched {
+ missedCount++
+ return true
+ }
+ }
+ if !eachEntryFunc(entry) {
+ return false
+ }
+ return true
+ })
+ if err != nil {
+ return
+ }
+ return
+}
+
+func (f *Filer) doListValidEntries(ctx context.Context, p util.FullPath, startFileName string, inclusive bool, limit int64, prefix string, eachEntryFunc ListEachEntryFunc) (lastFileName string, err error) {
+ var expiredCount int64
+ expiredCount, lastFileName, err = f.doListDirectoryEntries(ctx, p, startFileName, inclusive, limit, prefix, eachEntryFunc)
+ for expiredCount > 0 && err == nil {
+ expiredCount, lastFileName, err = f.doListDirectoryEntries(ctx, p, lastFileName, false, expiredCount, prefix, eachEntryFunc)
+ }
+ return
+}
diff --git a/weed/filer/filerstore.go b/weed/filer/filerstore.go
new file mode 100644
index 000000000..a5b2f25de
--- /dev/null
+++ b/weed/filer/filerstore.go
@@ -0,0 +1,46 @@
+package filer
+
+import (
+ "context"
+ "errors"
+ "github.com/chrislusf/seaweedfs/weed/util"
+)
+
+var (
+ ErrUnsupportedListDirectoryPrefixed = errors.New("unsupported directory prefix listing")
+ ErrUnsupportedSuperLargeDirectoryListing = errors.New("unsupported super large directory listing")
+ ErrKvNotImplemented = errors.New("kv not implemented yet")
+ ErrKvNotFound = errors.New("kv: not found")
+)
+
+type ListEachEntryFunc func(entry *Entry) bool
+
+type FilerStore interface {
+ // GetName gets the name to locate the configuration in filer.toml file
+ GetName() string
+ // Initialize initializes the file store
+ Initialize(configuration util.Configuration, prefix string) error
+ InsertEntry(context.Context, *Entry) error
+ UpdateEntry(context.Context, *Entry) (err error)
+ // err == filer_pb.ErrNotFound if not found
+ FindEntry(context.Context, util.FullPath) (entry *Entry, err error)
+ DeleteEntry(context.Context, util.FullPath) (err error)
+ DeleteFolderChildren(context.Context, util.FullPath) (err error)
+ ListDirectoryEntries(ctx context.Context, dirPath util.FullPath, startFileName string, includeStartFile bool, limit int64, eachEntryFunc ListEachEntryFunc) (lastFileName string, err error)
+ ListDirectoryPrefixedEntries(ctx context.Context, dirPath util.FullPath, startFileName string, includeStartFile bool, limit int64, prefix string, eachEntryFunc ListEachEntryFunc) (lastFileName string, err error)
+
+ BeginTransaction(ctx context.Context) (context.Context, error)
+ CommitTransaction(ctx context.Context) error
+ RollbackTransaction(ctx context.Context) error
+
+ KvPut(ctx context.Context, key []byte, value []byte) (err error)
+ KvGet(ctx context.Context, key []byte) (value []byte, err error)
+ KvDelete(ctx context.Context, key []byte) (err error)
+
+ Shutdown()
+}
+
+type BucketAware interface {
+ OnBucketCreation(bucket string)
+ OnBucketDeletion(bucket string)
+}
diff --git a/weed/filer/filerstore_hardlink.go b/weed/filer/filerstore_hardlink.go
new file mode 100644
index 000000000..316c76a0c
--- /dev/null
+++ b/weed/filer/filerstore_hardlink.go
@@ -0,0 +1,102 @@
+package filer
+
+import (
+ "bytes"
+ "context"
+ "fmt"
+ "github.com/chrislusf/seaweedfs/weed/glog"
+ "github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
+)
+
+func (fsw *FilerStoreWrapper) handleUpdateToHardLinks(ctx context.Context, entry *Entry) error {
+ if len(entry.HardLinkId) == 0 {
+ return nil
+ }
+ // handle hard links
+ if err := fsw.setHardLink(ctx, entry); err != nil {
+ return fmt.Errorf("setHardLink %d: %v", entry.HardLinkId, err)
+ }
+
+ // check what is existing entry
+ glog.V(4).Infof("handleUpdateToHardLinks FindEntry %s", entry.FullPath)
+ actualStore := fsw.getActualStore(entry.FullPath)
+ existingEntry, err := actualStore.FindEntry(ctx, entry.FullPath)
+ if err != nil && err != filer_pb.ErrNotFound {
+ return fmt.Errorf("update existing entry %s: %v", entry.FullPath, err)
+ }
+
+ // remove old hard link
+ if err == nil && len(existingEntry.HardLinkId) != 0 && bytes.Compare(existingEntry.HardLinkId, entry.HardLinkId) != 0 {
+ glog.V(4).Infof("handleUpdateToHardLinks DeleteHardLink %s", entry.FullPath)
+ if err = fsw.DeleteHardLink(ctx, existingEntry.HardLinkId); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+func (fsw *FilerStoreWrapper) setHardLink(ctx context.Context, entry *Entry) error {
+ if len(entry.HardLinkId) == 0 {
+ return nil
+ }
+ key := entry.HardLinkId
+
+ newBlob, encodeErr := entry.EncodeAttributesAndChunks()
+ if encodeErr != nil {
+ return encodeErr
+ }
+
+ return fsw.KvPut(ctx, key, newBlob)
+}
+
+func (fsw *FilerStoreWrapper) maybeReadHardLink(ctx context.Context, entry *Entry) error {
+ if len(entry.HardLinkId) == 0 {
+ return nil
+ }
+ key := entry.HardLinkId
+
+ glog.V(4).Infof("maybeReadHardLink KvGet %v", key)
+ value, err := fsw.KvGet(ctx, key)
+ if err != nil {
+ glog.Errorf("read %s hardlink %d: %v", entry.FullPath, entry.HardLinkId, err)
+ return err
+ }
+
+ if err = entry.DecodeAttributesAndChunks(value); err != nil {
+ glog.Errorf("decode %s hardlink %d: %v", entry.FullPath, entry.HardLinkId, err)
+ return err
+ }
+
+ return nil
+}
+
+func (fsw *FilerStoreWrapper) DeleteHardLink(ctx context.Context, hardLinkId HardLinkId) error {
+ key := hardLinkId
+ value, err := fsw.KvGet(ctx, key)
+ if err == ErrKvNotFound {
+ return nil
+ }
+ if err != nil {
+ return err
+ }
+
+ entry := &Entry{}
+ if err = entry.DecodeAttributesAndChunks(value); err != nil {
+ return err
+ }
+
+ entry.HardLinkCounter--
+ if entry.HardLinkCounter <= 0 {
+ glog.V(4).Infof("DeleteHardLink KvDelete %v", key)
+ return fsw.KvDelete(ctx, key)
+ }
+
+ newBlob, encodeErr := entry.EncodeAttributesAndChunks()
+ if encodeErr != nil {
+ return encodeErr
+ }
+
+ glog.V(4).Infof("DeleteHardLink KvPut %v", key)
+ return fsw.KvPut(ctx, key, newBlob)
+
+}
diff --git a/weed/filer/filerstore_translate_path.go b/weed/filer/filerstore_translate_path.go
new file mode 100644
index 000000000..00bf82ed4
--- /dev/null
+++ b/weed/filer/filerstore_translate_path.go
@@ -0,0 +1,153 @@
+package filer
+
+import (
+ "context"
+ "github.com/chrislusf/seaweedfs/weed/util"
+ "strings"
+)
+
+var (
+ _ = FilerStore(&FilerStorePathTranlator{})
+)
+
+type FilerStorePathTranlator struct {
+ actualStore FilerStore
+ storeRoot string
+}
+
+func NewFilerStorePathTranlator(storeRoot string, store FilerStore) *FilerStorePathTranlator {
+ if innerStore, ok := store.(*FilerStorePathTranlator); ok {
+ return innerStore
+ }
+
+ if !strings.HasSuffix(storeRoot, "/") {
+ storeRoot += "/"
+ }
+
+ return &FilerStorePathTranlator{
+ actualStore: store,
+ storeRoot: storeRoot,
+ }
+}
+
+func (t *FilerStorePathTranlator) translatePath(fp util.FullPath) (newPath util.FullPath) {
+ newPath = fp
+ if t.storeRoot == "/" {
+ return
+ }
+ newPath = fp[len(t.storeRoot)-1:]
+ if newPath == "" {
+ newPath = "/"
+ }
+ return
+}
+func (t *FilerStorePathTranlator) changeEntryPath(entry *Entry) (previousPath util.FullPath) {
+ previousPath = entry.FullPath
+ if t.storeRoot == "/" {
+ return
+ }
+ entry.FullPath = t.translatePath(previousPath)
+ return
+}
+func (t *FilerStorePathTranlator) recoverEntryPath(entry *Entry, previousPath util.FullPath) {
+ entry.FullPath = previousPath
+}
+
+func (t *FilerStorePathTranlator) GetName() string {
+ return t.actualStore.GetName()
+}
+
+func (t *FilerStorePathTranlator) Initialize(configuration util.Configuration, prefix string) error {
+ return t.actualStore.Initialize(configuration, prefix)
+}
+
+func (t *FilerStorePathTranlator) InsertEntry(ctx context.Context, entry *Entry) error {
+ previousPath := t.changeEntryPath(entry)
+ defer t.recoverEntryPath(entry, previousPath)
+
+ return t.actualStore.InsertEntry(ctx, entry)
+}
+
+func (t *FilerStorePathTranlator) UpdateEntry(ctx context.Context, entry *Entry) error {
+ previousPath := t.changeEntryPath(entry)
+ defer t.recoverEntryPath(entry, previousPath)
+
+ return t.actualStore.UpdateEntry(ctx, entry)
+}
+
+func (t *FilerStorePathTranlator) FindEntry(ctx context.Context, fp util.FullPath) (entry *Entry, err error) {
+ if t.storeRoot == "/" {
+ return t.actualStore.FindEntry(ctx, fp)
+ }
+ newFullPath := t.translatePath(fp)
+ entry, err = t.actualStore.FindEntry(ctx, newFullPath)
+ if err == nil {
+ entry.FullPath = fp[:len(t.storeRoot)-1] + entry.FullPath
+ }
+ return
+}
+
+func (t *FilerStorePathTranlator) DeleteEntry(ctx context.Context, fp util.FullPath) (err error) {
+ newFullPath := t.translatePath(fp)
+ return t.actualStore.DeleteEntry(ctx, newFullPath)
+}
+
+func (t *FilerStorePathTranlator) DeleteOneEntry(ctx context.Context, existingEntry *Entry) (err error) {
+
+ previousPath := t.changeEntryPath(existingEntry)
+ defer t.recoverEntryPath(existingEntry, previousPath)
+
+ return t.actualStore.DeleteEntry(ctx, existingEntry.FullPath)
+}
+
+func (t *FilerStorePathTranlator) DeleteFolderChildren(ctx context.Context, fp util.FullPath) (err error) {
+ newFullPath := t.translatePath(fp)
+
+ return t.actualStore.DeleteFolderChildren(ctx, newFullPath)
+}
+
+func (t *FilerStorePathTranlator) ListDirectoryEntries(ctx context.Context, dirPath util.FullPath, startFileName string, includeStartFile bool, limit int64, eachEntryFunc ListEachEntryFunc) (string, error) {
+
+ newFullPath := t.translatePath(dirPath)
+
+ return t.actualStore.ListDirectoryEntries(ctx, newFullPath, startFileName, includeStartFile, limit, func(entry *Entry) bool {
+ entry.FullPath = dirPath[:len(t.storeRoot)-1] + entry.FullPath
+ return eachEntryFunc(entry)
+ })
+}
+
+func (t *FilerStorePathTranlator) ListDirectoryPrefixedEntries(ctx context.Context, dirPath util.FullPath, startFileName string, includeStartFile bool, limit int64, prefix string, eachEntryFunc ListEachEntryFunc) (string, error) {
+
+ newFullPath := t.translatePath(dirPath)
+
+ return t.actualStore.ListDirectoryPrefixedEntries(ctx, newFullPath, startFileName, includeStartFile, limit, prefix, func(entry *Entry) bool {
+ entry.FullPath = dirPath[:len(t.storeRoot)-1] + entry.FullPath
+ return eachEntryFunc(entry)
+ })
+}
+
+func (t *FilerStorePathTranlator) BeginTransaction(ctx context.Context) (context.Context, error) {
+ return t.actualStore.BeginTransaction(ctx)
+}
+
+func (t *FilerStorePathTranlator) CommitTransaction(ctx context.Context) error {
+ return t.actualStore.CommitTransaction(ctx)
+}
+
+func (t *FilerStorePathTranlator) RollbackTransaction(ctx context.Context) error {
+ return t.actualStore.RollbackTransaction(ctx)
+}
+
+func (t *FilerStorePathTranlator) Shutdown() {
+ t.actualStore.Shutdown()
+}
+
+func (t *FilerStorePathTranlator) KvPut(ctx context.Context, key []byte, value []byte) (err error) {
+ return t.actualStore.KvPut(ctx, key, value)
+}
+func (t *FilerStorePathTranlator) KvGet(ctx context.Context, key []byte) (value []byte, err error) {
+ return t.actualStore.KvGet(ctx, key)
+}
+func (t *FilerStorePathTranlator) KvDelete(ctx context.Context, key []byte) (err error) {
+ return t.actualStore.KvDelete(ctx, key)
+}
diff --git a/weed/filer/filerstore_wrapper.go b/weed/filer/filerstore_wrapper.go
new file mode 100644
index 000000000..cd7c0bea3
--- /dev/null
+++ b/weed/filer/filerstore_wrapper.go
@@ -0,0 +1,322 @@
+package filer
+
+import (
+ "context"
+ "github.com/chrislusf/seaweedfs/weed/glog"
+ "github.com/viant/ptrie"
+ "strings"
+ "time"
+
+ "github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
+ "github.com/chrislusf/seaweedfs/weed/stats"
+ "github.com/chrislusf/seaweedfs/weed/util"
+)
+
+var (
+ _ = VirtualFilerStore(&FilerStoreWrapper{})
+)
+
+type VirtualFilerStore interface {
+ FilerStore
+ DeleteHardLink(ctx context.Context, hardLinkId HardLinkId) error
+ DeleteOneEntry(ctx context.Context, entry *Entry) error
+ AddPathSpecificStore(path string, storeId string, store FilerStore)
+ OnBucketCreation(bucket string)
+ OnBucketDeletion(bucket string)
+}
+
+type FilerStoreWrapper struct {
+ defaultStore FilerStore
+ pathToStore ptrie.Trie
+ storeIdToStore map[string]FilerStore
+}
+
+func NewFilerStoreWrapper(store FilerStore) *FilerStoreWrapper {
+ if innerStore, ok := store.(*FilerStoreWrapper); ok {
+ return innerStore
+ }
+ return &FilerStoreWrapper{
+ defaultStore: store,
+ pathToStore: ptrie.New(),
+ storeIdToStore: make(map[string]FilerStore),
+ }
+}
+
+func (fsw *FilerStoreWrapper) OnBucketCreation(bucket string) {
+ for _, store := range fsw.storeIdToStore {
+ if ba, ok := store.(BucketAware); ok {
+ ba.OnBucketCreation(bucket)
+ }
+ }
+ if ba, ok := fsw.defaultStore.(BucketAware); ok {
+ ba.OnBucketCreation(bucket)
+ }
+}
+func (fsw *FilerStoreWrapper) OnBucketDeletion(bucket string) {
+ for _, store := range fsw.storeIdToStore {
+ if ba, ok := store.(BucketAware); ok {
+ ba.OnBucketDeletion(bucket)
+ }
+ }
+ if ba, ok := fsw.defaultStore.(BucketAware); ok {
+ ba.OnBucketDeletion(bucket)
+ }
+}
+
+func (fsw *FilerStoreWrapper) AddPathSpecificStore(path string, storeId string, store FilerStore) {
+ fsw.storeIdToStore[storeId] = NewFilerStorePathTranlator(path, store)
+ err := fsw.pathToStore.Put([]byte(path), storeId)
+ if err != nil {
+ glog.Fatalf("put path specific store: %v", err)
+ }
+}
+
+func (fsw *FilerStoreWrapper) getActualStore(path util.FullPath) (store FilerStore) {
+ store = fsw.defaultStore
+ if path == "/" {
+ return
+ }
+ var storeId string
+ fsw.pathToStore.MatchPrefix([]byte(path), func(key []byte, value interface{}) bool {
+ storeId = value.(string)
+ return false
+ })
+ if storeId != "" {
+ store = fsw.storeIdToStore[storeId]
+ }
+ return
+}
+
+func (fsw *FilerStoreWrapper) getDefaultStore() (store FilerStore) {
+ return fsw.defaultStore
+}
+
+func (fsw *FilerStoreWrapper) GetName() string {
+ return fsw.getDefaultStore().GetName()
+}
+
+func (fsw *FilerStoreWrapper) Initialize(configuration util.Configuration, prefix string) error {
+ return fsw.getDefaultStore().Initialize(configuration, prefix)
+}
+
+func (fsw *FilerStoreWrapper) InsertEntry(ctx context.Context, entry *Entry) error {
+ actualStore := fsw.getActualStore(entry.FullPath)
+ stats.FilerStoreCounter.WithLabelValues(actualStore.GetName(), "insert").Inc()
+ start := time.Now()
+ defer func() {
+ stats.FilerStoreHistogram.WithLabelValues(actualStore.GetName(), "insert").Observe(time.Since(start).Seconds())
+ }()
+
+ filer_pb.BeforeEntrySerialization(entry.Chunks)
+ if entry.Mime == "application/octet-stream" {
+ entry.Mime = ""
+ }
+
+ if err := fsw.handleUpdateToHardLinks(ctx, entry); err != nil {
+ return err
+ }
+
+ glog.V(4).Infof("InsertEntry %s", entry.FullPath)
+ return actualStore.InsertEntry(ctx, entry)
+}
+
+func (fsw *FilerStoreWrapper) UpdateEntry(ctx context.Context, entry *Entry) error {
+ actualStore := fsw.getActualStore(entry.FullPath)
+ stats.FilerStoreCounter.WithLabelValues(actualStore.GetName(), "update").Inc()
+ start := time.Now()
+ defer func() {
+ stats.FilerStoreHistogram.WithLabelValues(actualStore.GetName(), "update").Observe(time.Since(start).Seconds())
+ }()
+
+ filer_pb.BeforeEntrySerialization(entry.Chunks)
+ if entry.Mime == "application/octet-stream" {
+ entry.Mime = ""
+ }
+
+ if err := fsw.handleUpdateToHardLinks(ctx, entry); err != nil {
+ return err
+ }
+
+ glog.V(4).Infof("UpdateEntry %s", entry.FullPath)
+ return actualStore.UpdateEntry(ctx, entry)
+}
+
+func (fsw *FilerStoreWrapper) FindEntry(ctx context.Context, fp util.FullPath) (entry *Entry, err error) {
+ actualStore := fsw.getActualStore(fp)
+ stats.FilerStoreCounter.WithLabelValues(actualStore.GetName(), "find").Inc()
+ start := time.Now()
+ defer func() {
+ stats.FilerStoreHistogram.WithLabelValues(actualStore.GetName(), "find").Observe(time.Since(start).Seconds())
+ }()
+
+ entry, err = actualStore.FindEntry(ctx, fp)
+ glog.V(4).Infof("FindEntry %s: %v", fp, err)
+ if err != nil {
+ return nil, err
+ }
+
+ fsw.maybeReadHardLink(ctx, entry)
+
+ filer_pb.AfterEntryDeserialization(entry.Chunks)
+ return
+}
+
+func (fsw *FilerStoreWrapper) DeleteEntry(ctx context.Context, fp util.FullPath) (err error) {
+ actualStore := fsw.getActualStore(fp)
+ stats.FilerStoreCounter.WithLabelValues(actualStore.GetName(), "delete").Inc()
+ start := time.Now()
+ defer func() {
+ stats.FilerStoreHistogram.WithLabelValues(actualStore.GetName(), "delete").Observe(time.Since(start).Seconds())
+ }()
+
+ existingEntry, findErr := fsw.FindEntry(ctx, fp)
+ if findErr == filer_pb.ErrNotFound {
+ return nil
+ }
+ if len(existingEntry.HardLinkId) != 0 {
+ // remove hard link
+ glog.V(4).Infof("DeleteHardLink %s", existingEntry.FullPath)
+ if err = fsw.DeleteHardLink(ctx, existingEntry.HardLinkId); err != nil {
+ return err
+ }
+ }
+
+ glog.V(4).Infof("DeleteEntry %s", fp)
+ return actualStore.DeleteEntry(ctx, fp)
+}
+
+func (fsw *FilerStoreWrapper) DeleteOneEntry(ctx context.Context, existingEntry *Entry) (err error) {
+ actualStore := fsw.getActualStore(existingEntry.FullPath)
+ stats.FilerStoreCounter.WithLabelValues(actualStore.GetName(), "delete").Inc()
+ start := time.Now()
+ defer func() {
+ stats.FilerStoreHistogram.WithLabelValues(actualStore.GetName(), "delete").Observe(time.Since(start).Seconds())
+ }()
+
+ if len(existingEntry.HardLinkId) != 0 {
+ // remove hard link
+ glog.V(4).Infof("DeleteHardLink %s", existingEntry.FullPath)
+ if err = fsw.DeleteHardLink(ctx, existingEntry.HardLinkId); err != nil {
+ return err
+ }
+ }
+
+ glog.V(4).Infof("DeleteOneEntry %s", existingEntry.FullPath)
+ return actualStore.DeleteEntry(ctx, existingEntry.FullPath)
+}
+
+func (fsw *FilerStoreWrapper) DeleteFolderChildren(ctx context.Context, fp util.FullPath) (err error) {
+ actualStore := fsw.getActualStore(fp + "/")
+ stats.FilerStoreCounter.WithLabelValues(actualStore.GetName(), "deleteFolderChildren").Inc()
+ start := time.Now()
+ defer func() {
+ stats.FilerStoreHistogram.WithLabelValues(actualStore.GetName(), "deleteFolderChildren").Observe(time.Since(start).Seconds())
+ }()
+
+ glog.V(4).Infof("DeleteFolderChildren %s", fp)
+ return actualStore.DeleteFolderChildren(ctx, fp)
+}
+
+func (fsw *FilerStoreWrapper) ListDirectoryEntries(ctx context.Context, dirPath util.FullPath, startFileName string, includeStartFile bool, limit int64, eachEntryFunc ListEachEntryFunc) (string, error) {
+ actualStore := fsw.getActualStore(dirPath + "/")
+ stats.FilerStoreCounter.WithLabelValues(actualStore.GetName(), "list").Inc()
+ start := time.Now()
+ defer func() {
+ stats.FilerStoreHistogram.WithLabelValues(actualStore.GetName(), "list").Observe(time.Since(start).Seconds())
+ }()
+
+ glog.V(4).Infof("ListDirectoryEntries %s from %s limit %d", dirPath, startFileName, limit)
+ return actualStore.ListDirectoryEntries(ctx, dirPath, startFileName, includeStartFile, limit, func(entry *Entry) bool {
+ fsw.maybeReadHardLink(ctx, entry)
+ filer_pb.AfterEntryDeserialization(entry.Chunks)
+ return eachEntryFunc(entry)
+ })
+}
+
+func (fsw *FilerStoreWrapper) ListDirectoryPrefixedEntries(ctx context.Context, dirPath util.FullPath, startFileName string, includeStartFile bool, limit int64, prefix string, eachEntryFunc ListEachEntryFunc) (lastFileName string, err error) {
+ actualStore := fsw.getActualStore(dirPath + "/")
+ stats.FilerStoreCounter.WithLabelValues(actualStore.GetName(), "prefixList").Inc()
+ start := time.Now()
+ defer func() {
+ stats.FilerStoreHistogram.WithLabelValues(actualStore.GetName(), "prefixList").Observe(time.Since(start).Seconds())
+ }()
+ glog.V(4).Infof("ListDirectoryPrefixedEntries %s from %s prefix %s limit %d", dirPath, startFileName, prefix, limit)
+ lastFileName, err = actualStore.ListDirectoryPrefixedEntries(ctx, dirPath, startFileName, includeStartFile, limit, prefix, eachEntryFunc)
+ if err == ErrUnsupportedListDirectoryPrefixed {
+ lastFileName, err = fsw.prefixFilterEntries(ctx, dirPath, startFileName, includeStartFile, limit, prefix, func(entry *Entry) bool {
+ fsw.maybeReadHardLink(ctx, entry)
+ filer_pb.AfterEntryDeserialization(entry.Chunks)
+ return eachEntryFunc(entry)
+ })
+ }
+ return lastFileName, err
+}
+
+func (fsw *FilerStoreWrapper) prefixFilterEntries(ctx context.Context, dirPath util.FullPath, startFileName string, includeStartFile bool, limit int64, prefix string, eachEntryFunc ListEachEntryFunc) (lastFileName string, err error) {
+ actualStore := fsw.getActualStore(dirPath + "/")
+
+ if prefix == "" {
+ return actualStore.ListDirectoryEntries(ctx, dirPath, startFileName, includeStartFile, limit, eachEntryFunc)
+ }
+
+ var notPrefixed []*Entry
+ lastFileName, err = actualStore.ListDirectoryEntries(ctx, dirPath, startFileName, includeStartFile, limit, func(entry *Entry) bool {
+ notPrefixed = append(notPrefixed, entry)
+ return true
+ })
+ if err != nil {
+ return
+ }
+
+ count := int64(0)
+ for count < limit && len(notPrefixed) > 0 {
+ for _, entry := range notPrefixed {
+ if strings.HasPrefix(entry.Name(), prefix) {
+ count++
+ if !eachEntryFunc(entry) {
+ return
+ }
+ if count >= limit {
+ break
+ }
+ }
+ }
+ if count < limit {
+ notPrefixed = notPrefixed[:0]
+ _, err = actualStore.ListDirectoryEntries(ctx, dirPath, lastFileName, false, limit, func(entry *Entry) bool {
+ notPrefixed = append(notPrefixed, entry)
+ return true
+ })
+ if err != nil {
+ return
+ }
+ }
+ }
+ return
+}
+
+func (fsw *FilerStoreWrapper) BeginTransaction(ctx context.Context) (context.Context, error) {
+ return fsw.getDefaultStore().BeginTransaction(ctx)
+}
+
+func (fsw *FilerStoreWrapper) CommitTransaction(ctx context.Context) error {
+ return fsw.getDefaultStore().CommitTransaction(ctx)
+}
+
+func (fsw *FilerStoreWrapper) RollbackTransaction(ctx context.Context) error {
+ return fsw.getDefaultStore().RollbackTransaction(ctx)
+}
+
+func (fsw *FilerStoreWrapper) Shutdown() {
+ fsw.getDefaultStore().Shutdown()
+}
+
+func (fsw *FilerStoreWrapper) KvPut(ctx context.Context, key []byte, value []byte) (err error) {
+ return fsw.getDefaultStore().KvPut(ctx, key, value)
+}
+func (fsw *FilerStoreWrapper) KvGet(ctx context.Context, key []byte) (value []byte, err error) {
+ return fsw.getDefaultStore().KvGet(ctx, key)
+}
+func (fsw *FilerStoreWrapper) KvDelete(ctx context.Context, key []byte) (err error) {
+ return fsw.getDefaultStore().KvDelete(ctx, key)
+}
diff --git a/weed/filer/hbase/hbase_store.go b/weed/filer/hbase/hbase_store.go
new file mode 100644
index 000000000..e0d878ca7
--- /dev/null
+++ b/weed/filer/hbase/hbase_store.go
@@ -0,0 +1,231 @@
+package hbase
+
+import (
+ "bytes"
+ "context"
+ "fmt"
+ "github.com/chrislusf/seaweedfs/weed/filer"
+ "github.com/chrislusf/seaweedfs/weed/glog"
+ "github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
+ "github.com/chrislusf/seaweedfs/weed/util"
+ "github.com/tsuna/gohbase"
+ "github.com/tsuna/gohbase/hrpc"
+ "io"
+)
+
+func init() {
+ filer.Stores = append(filer.Stores, &HbaseStore{})
+}
+
+type HbaseStore struct {
+ Client gohbase.Client
+ table []byte
+ cfKv string
+ cfMetaDir string
+ column string
+}
+
+func (store *HbaseStore) GetName() string {
+ return "hbase"
+}
+
+func (store *HbaseStore) Initialize(configuration util.Configuration, prefix string) (err error) {
+ return store.initialize(
+ configuration.GetString(prefix+"zkquorum"),
+ configuration.GetString(prefix+"table"),
+ )
+}
+
+func (store *HbaseStore) initialize(zkquorum, table string) (err error) {
+ store.Client = gohbase.NewClient(zkquorum)
+ store.table = []byte(table)
+ store.cfKv = "kv"
+ store.cfMetaDir = "meta"
+ store.column = "a"
+
+ // check table exists
+ key := "whatever"
+ headers := map[string][]string{store.cfMetaDir: nil}
+ get, err := hrpc.NewGet(context.Background(), store.table, []byte(key), hrpc.Families(headers))
+ if err != nil {
+ return fmt.Errorf("NewGet returned an error: %v", err)
+ }
+ _, err = store.Client.Get(get)
+ if err != gohbase.TableNotFound {
+ return nil
+ }
+
+ // create table
+ adminClient := gohbase.NewAdminClient(zkquorum)
+ cFamilies := []string{store.cfKv, store.cfMetaDir}
+ cf := make(map[string]map[string]string, len(cFamilies))
+ for _, f := range cFamilies {
+ cf[f] = nil
+ }
+ ct := hrpc.NewCreateTable(context.Background(), []byte(table), cf)
+ if err := adminClient.CreateTable(ct); err != nil {
+ return err
+ }
+
+ return nil
+}
+
+func (store *HbaseStore) InsertEntry(ctx context.Context, entry *filer.Entry) error {
+ value, err := entry.EncodeAttributesAndChunks()
+ if err != nil {
+ return fmt.Errorf("encoding %s %+v: %v", entry.FullPath, entry.Attr, err)
+ }
+ if len(entry.Chunks) > 50 {
+ value = util.MaybeGzipData(value)
+ }
+
+ return store.doPut(ctx, store.cfMetaDir, []byte(entry.FullPath), value, entry.TtlSec)
+}
+
+func (store *HbaseStore) UpdateEntry(ctx context.Context, entry *filer.Entry) (err error) {
+ return store.InsertEntry(ctx, entry)
+}
+
+func (store *HbaseStore) FindEntry(ctx context.Context, path util.FullPath) (entry *filer.Entry, err error) {
+ value, err := store.doGet(ctx, store.cfMetaDir, []byte(path))
+ if err != nil {
+ if err == filer.ErrKvNotFound {
+ return nil, filer_pb.ErrNotFound
+ }
+ return nil, err
+ }
+
+ entry = &filer.Entry{
+ FullPath: path,
+ }
+ err = entry.DecodeAttributesAndChunks(util.MaybeDecompressData(value))
+ if err != nil {
+ return entry, fmt.Errorf("decode %s : %v", entry.FullPath, err)
+ }
+ return entry, nil
+}
+
+func (store *HbaseStore) DeleteEntry(ctx context.Context, path util.FullPath) (err error) {
+ return store.doDelete(ctx, store.cfMetaDir, []byte(path))
+}
+
+func (store *HbaseStore) DeleteFolderChildren(ctx context.Context, path util.FullPath) (err error) {
+
+ family := map[string][]string{store.cfMetaDir: {COLUMN_NAME}}
+ expectedPrefix := []byte(path.Child(""))
+ scan, err := hrpc.NewScanRange(ctx, store.table, expectedPrefix, nil, hrpc.Families(family))
+ if err != nil {
+ return err
+ }
+
+ scanner := store.Client.Scan(scan)
+ defer scanner.Close()
+ for {
+ res, err := scanner.Next()
+ if err != nil {
+ break
+ }
+ if len(res.Cells) == 0 {
+ continue
+ }
+ cell := res.Cells[0]
+
+ if !bytes.HasPrefix(cell.Row, expectedPrefix) {
+ break
+ }
+ fullpath := util.FullPath(cell.Row)
+ dir, _ := fullpath.DirAndName()
+ if dir != string(path) {
+ continue
+ }
+
+ err = store.doDelete(ctx, store.cfMetaDir, cell.Row)
+ if err != nil {
+ break
+ }
+
+ }
+ return
+}
+
+func (store *HbaseStore) ListDirectoryEntries(ctx context.Context, dirPath util.FullPath, startFileName string, includeStartFile bool, limit int64, eachEntryFunc filer.ListEachEntryFunc) (string, error) {
+ return store.ListDirectoryPrefixedEntries(ctx, dirPath, startFileName, includeStartFile, limit, "", eachEntryFunc)
+}
+
+func (store *HbaseStore) ListDirectoryPrefixedEntries(ctx context.Context, dirPath util.FullPath, startFileName string, includeStartFile bool, limit int64, prefix string, eachEntryFunc filer.ListEachEntryFunc) (lastFileName string, err error) {
+ family := map[string][]string{store.cfMetaDir: {COLUMN_NAME}}
+ expectedPrefix := []byte(dirPath.Child(prefix))
+ scan, err := hrpc.NewScanRange(ctx, store.table, expectedPrefix, nil, hrpc.Families(family))
+ if err != nil {
+ return lastFileName, err
+ }
+
+ scanner := store.Client.Scan(scan)
+ defer scanner.Close()
+ for {
+ res, err := scanner.Next()
+ if err == io.EOF {
+ break
+ }
+ if err != nil {
+ return lastFileName, err
+ }
+ if len(res.Cells) == 0 {
+ continue
+ }
+ cell := res.Cells[0]
+
+ if !bytes.HasPrefix(cell.Row, expectedPrefix) {
+ break
+ }
+
+ fullpath := util.FullPath(cell.Row)
+ dir, fileName := fullpath.DirAndName()
+ if dir != string(dirPath) {
+ continue
+ }
+
+ value := cell.Value
+
+ if fileName == startFileName && !includeStartFile {
+ continue
+ }
+
+ limit--
+ if limit < 0 {
+ break
+ }
+
+ lastFileName = fileName
+
+ entry := &filer.Entry{
+ FullPath: fullpath,
+ }
+ if decodeErr := entry.DecodeAttributesAndChunks(util.MaybeDecompressData(value)); decodeErr != nil {
+ err = decodeErr
+ glog.V(0).Infof("list %s : %v", entry.FullPath, err)
+ break
+ }
+ if !eachEntryFunc(entry) {
+ break
+ }
+ }
+
+ return lastFileName, nil
+}
+
+func (store *HbaseStore) BeginTransaction(ctx context.Context) (context.Context, error) {
+ return ctx, nil
+}
+
+func (store *HbaseStore) CommitTransaction(ctx context.Context) error {
+ return nil
+}
+
+func (store *HbaseStore) RollbackTransaction(ctx context.Context) error {
+ return nil
+}
+
+func (store *HbaseStore) Shutdown() {
+ store.Client.Close()
+}
diff --git a/weed/filer/hbase/hbase_store_kv.go b/weed/filer/hbase/hbase_store_kv.go
new file mode 100644
index 000000000..990e55a24
--- /dev/null
+++ b/weed/filer/hbase/hbase_store_kv.go
@@ -0,0 +1,76 @@
+package hbase
+
+import (
+ "context"
+ "github.com/chrislusf/seaweedfs/weed/filer"
+ "github.com/tsuna/gohbase/hrpc"
+ "time"
+)
+
+const (
+ COLUMN_NAME = "a"
+)
+
+func (store *HbaseStore) KvPut(ctx context.Context, key []byte, value []byte) (err error) {
+ return store.doPut(ctx, store.cfKv, key, value, 0)
+}
+
+func (store *HbaseStore) KvGet(ctx context.Context, key []byte) (value []byte, err error) {
+ return store.doGet(ctx, store.cfKv, key)
+}
+
+func (store *HbaseStore) KvDelete(ctx context.Context, key []byte) (err error) {
+ return store.doDelete(ctx, store.cfKv, key)
+}
+
+func (store *HbaseStore) doPut(ctx context.Context, cf string, key, value []byte, ttlSecond int32) (err error) {
+ if ttlSecond > 0 {
+ return store.doPutWithOptions(ctx, cf, key, value, hrpc.Durability(hrpc.AsyncWal), hrpc.TTL(time.Duration(ttlSecond)*time.Second))
+ }
+ return store.doPutWithOptions(ctx, cf, key, value, hrpc.Durability(hrpc.AsyncWal))
+}
+
+func (store *HbaseStore) doPutWithOptions(ctx context.Context, cf string, key, value []byte, options ...func(hrpc.Call) error) (err error) {
+ values := map[string]map[string][]byte{cf: map[string][]byte{}}
+ values[cf][COLUMN_NAME] = value
+ putRequest, err := hrpc.NewPut(ctx, store.table, key, values, options...)
+ if err != nil {
+ return err
+ }
+ _, err = store.Client.Put(putRequest)
+ if err != nil {
+ return err
+ }
+ return nil
+}
+
+func (store *HbaseStore) doGet(ctx context.Context, cf string, key []byte) (value []byte, err error) {
+ family := map[string][]string{cf: {COLUMN_NAME}}
+ getRequest, err := hrpc.NewGet(context.Background(), store.table, key, hrpc.Families(family))
+ if err != nil {
+ return nil, err
+ }
+ getResp, err := store.Client.Get(getRequest)
+ if err != nil {
+ return nil, err
+ }
+ if len(getResp.Cells) == 0 {
+ return nil, filer.ErrKvNotFound
+ }
+
+ return getResp.Cells[0].Value, nil
+}
+
+func (store *HbaseStore) doDelete(ctx context.Context, cf string, key []byte) (err error) {
+ values := map[string]map[string][]byte{cf: map[string][]byte{}}
+ values[cf][COLUMN_NAME] = nil
+ deleteRequest, err := hrpc.NewDel(ctx, store.table, key, values, hrpc.Durability(hrpc.AsyncWal))
+ if err != nil {
+ return err
+ }
+ _, err = store.Client.Delete(deleteRequest)
+ if err != nil {
+ return err
+ }
+ return nil
+}
diff --git a/weed/filer2/leveldb/leveldb_store.go b/weed/filer/leveldb/leveldb_store.go
similarity index 64%
rename from weed/filer2/leveldb/leveldb_store.go
rename to weed/filer/leveldb/leveldb_store.go
index 4952b3b3a..ce454f36a 100644
--- a/weed/filer2/leveldb/leveldb_store.go
+++ b/weed/filer/leveldb/leveldb_store.go
@@ -4,13 +4,15 @@ import (
"bytes"
"context"
"fmt"
-
"github.com/syndtr/goleveldb/leveldb"
+ leveldb_errors "github.com/syndtr/goleveldb/leveldb/errors"
"github.com/syndtr/goleveldb/leveldb/opt"
leveldb_util "github.com/syndtr/goleveldb/leveldb/util"
+ "os"
- "github.com/chrislusf/seaweedfs/weed/filer2"
+ "github.com/chrislusf/seaweedfs/weed/filer"
"github.com/chrislusf/seaweedfs/weed/glog"
+ "github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
weed_util "github.com/chrislusf/seaweedfs/weed/util"
)
@@ -19,7 +21,7 @@ const (
)
func init() {
- filer2.Stores = append(filer2.Stores, &LevelDBStore{})
+ filer.Stores = append(filer.Stores, &LevelDBStore{})
}
type LevelDBStore struct {
@@ -30,13 +32,14 @@ func (store *LevelDBStore) GetName() string {
return "leveldb"
}
-func (store *LevelDBStore) Initialize(configuration weed_util.Configuration) (err error) {
- dir := configuration.GetString("dir")
+func (store *LevelDBStore) Initialize(configuration weed_util.Configuration, prefix string) (err error) {
+ dir := configuration.GetString(prefix + "dir")
return store.initialize(dir)
}
func (store *LevelDBStore) initialize(dir string) (err error) {
glog.Infof("filer store dir: %s", dir)
+ os.MkdirAll(dir, 0755)
if err := weed_util.TestFolderWritable(dir); err != nil {
return fmt.Errorf("Check Level Folder %s Writable: %s", dir, err)
}
@@ -48,8 +51,13 @@ func (store *LevelDBStore) initialize(dir string) (err error) {
}
if store.db, err = leveldb.OpenFile(dir, opts); err != nil {
- glog.Infof("filer store open dir %s: %v", dir, err)
- return
+ if leveldb_errors.IsCorrupted(err) {
+ store.db, err = leveldb.RecoverFile(dir, opts)
+ }
+ if err != nil {
+ glog.Infof("filer store open dir %s: %v", dir, err)
+ return
+ }
}
return
}
@@ -64,7 +72,7 @@ func (store *LevelDBStore) RollbackTransaction(ctx context.Context) error {
return nil
}
-func (store *LevelDBStore) InsertEntry(ctx context.Context, entry *filer2.Entry) (err error) {
+func (store *LevelDBStore) InsertEntry(ctx context.Context, entry *filer.Entry) (err error) {
key := genKey(entry.DirAndName())
value, err := entry.EncodeAttributesAndChunks()
@@ -72,6 +80,10 @@ func (store *LevelDBStore) InsertEntry(ctx context.Context, entry *filer2.Entry)
return fmt.Errorf("encoding %s %+v: %v", entry.FullPath, entry.Attr, err)
}
+ if len(entry.Chunks) > 50 {
+ value = weed_util.MaybeGzipData(value)
+ }
+
err = store.db.Put(key, value, nil)
if err != nil {
@@ -83,27 +95,27 @@ func (store *LevelDBStore) InsertEntry(ctx context.Context, entry *filer2.Entry)
return nil
}
-func (store *LevelDBStore) UpdateEntry(ctx context.Context, entry *filer2.Entry) (err error) {
+func (store *LevelDBStore) UpdateEntry(ctx context.Context, entry *filer.Entry) (err error) {
return store.InsertEntry(ctx, entry)
}
-func (store *LevelDBStore) FindEntry(ctx context.Context, fullpath filer2.FullPath) (entry *filer2.Entry, err error) {
+func (store *LevelDBStore) FindEntry(ctx context.Context, fullpath weed_util.FullPath) (entry *filer.Entry, err error) {
key := genKey(fullpath.DirAndName())
data, err := store.db.Get(key, nil)
if err == leveldb.ErrNotFound {
- return nil, filer2.ErrNotFound
+ return nil, filer_pb.ErrNotFound
}
if err != nil {
- return nil, fmt.Errorf("get %s : %v", entry.FullPath, err)
+ return nil, fmt.Errorf("get %s : %v", fullpath, err)
}
- entry = &filer2.Entry{
+ entry = &filer.Entry{
FullPath: fullpath,
}
- err = entry.DecodeAttributesAndChunks(data)
+ err = entry.DecodeAttributesAndChunks(weed_util.MaybeDecompressData((data)))
if err != nil {
return entry, fmt.Errorf("decode %s : %v", entry.FullPath, err)
}
@@ -113,7 +125,7 @@ func (store *LevelDBStore) FindEntry(ctx context.Context, fullpath filer2.FullPa
return entry, nil
}
-func (store *LevelDBStore) DeleteEntry(ctx context.Context, fullpath filer2.FullPath) (err error) {
+func (store *LevelDBStore) DeleteEntry(ctx context.Context, fullpath weed_util.FullPath) (err error) {
key := genKey(fullpath.DirAndName())
err = store.db.Delete(key, nil)
@@ -124,7 +136,7 @@ func (store *LevelDBStore) DeleteEntry(ctx context.Context, fullpath filer2.Full
return nil
}
-func (store *LevelDBStore) DeleteFolderChildren(ctx context.Context, fullpath filer2.FullPath) (err error) {
+func (store *LevelDBStore) DeleteFolderChildren(ctx context.Context, fullpath weed_util.FullPath) (err error) {
batch := new(leveldb.Batch)
@@ -152,12 +164,19 @@ func (store *LevelDBStore) DeleteFolderChildren(ctx context.Context, fullpath fi
return nil
}
-func (store *LevelDBStore) ListDirectoryEntries(ctx context.Context, fullpath filer2.FullPath, startFileName string, inclusive bool,
- limit int) (entries []*filer2.Entry, err error) {
+func (store *LevelDBStore) ListDirectoryEntries(ctx context.Context, dirPath weed_util.FullPath, startFileName string, includeStartFile bool, limit int64, eachEntryFunc filer.ListEachEntryFunc) (lastFileName string, err error) {
+ return store.ListDirectoryPrefixedEntries(ctx, dirPath, startFileName, includeStartFile, limit, "", eachEntryFunc)
+}
- directoryPrefix := genDirectoryKeyPrefix(fullpath, "")
+func (store *LevelDBStore) ListDirectoryPrefixedEntries(ctx context.Context, dirPath weed_util.FullPath, startFileName string, includeStartFile bool, limit int64, prefix string, eachEntryFunc filer.ListEachEntryFunc) (lastFileName string, err error) {
- iter := store.db.NewIterator(&leveldb_util.Range{Start: genDirectoryKeyPrefix(fullpath, startFileName)}, nil)
+ directoryPrefix := genDirectoryKeyPrefix(dirPath, prefix)
+ lastFileStart := directoryPrefix
+ if startFileName != "" {
+ lastFileStart = genDirectoryKeyPrefix(dirPath, startFileName)
+ }
+
+ iter := store.db.NewIterator(&leveldb_util.Range{Start: lastFileStart}, nil)
for iter.Next() {
key := iter.Key()
if !bytes.HasPrefix(key, directoryPrefix) {
@@ -167,26 +186,29 @@ func (store *LevelDBStore) ListDirectoryEntries(ctx context.Context, fullpath fi
if fileName == "" {
continue
}
- if fileName == startFileName && !inclusive {
+ if fileName == startFileName && !includeStartFile {
continue
}
limit--
if limit < 0 {
break
}
- entry := &filer2.Entry{
- FullPath: filer2.NewFullPath(string(fullpath), fileName),
+ lastFileName = fileName
+ entry := &filer.Entry{
+ FullPath: weed_util.NewFullPath(string(dirPath), fileName),
}
- if decodeErr := entry.DecodeAttributesAndChunks(iter.Value()); decodeErr != nil {
+ if decodeErr := entry.DecodeAttributesAndChunks(weed_util.MaybeDecompressData(iter.Value())); decodeErr != nil {
err = decodeErr
glog.V(0).Infof("list %s : %v", entry.FullPath, err)
break
}
- entries = append(entries, entry)
+ if !eachEntryFunc(entry) {
+ break
+ }
}
iter.Release()
- return entries, err
+ return lastFileName, err
}
func genKey(dirPath, fileName string) (key []byte) {
@@ -196,7 +218,7 @@ func genKey(dirPath, fileName string) (key []byte) {
return key
}
-func genDirectoryKeyPrefix(fullpath filer2.FullPath, startFileName string) (keyPrefix []byte) {
+func genDirectoryKeyPrefix(fullpath weed_util.FullPath, startFileName string) (keyPrefix []byte) {
keyPrefix = []byte(string(fullpath))
keyPrefix = append(keyPrefix, DIR_FILE_SEPARATOR)
if len(startFileName) > 0 {
@@ -215,3 +237,7 @@ func getNameFromKey(key []byte) string {
return string(key[sepIndex+1:])
}
+
+func (store *LevelDBStore) Shutdown() {
+ store.db.Close()
+}
diff --git a/weed/filer/leveldb/leveldb_store_kv.go b/weed/filer/leveldb/leveldb_store_kv.go
new file mode 100644
index 000000000..f686cbf21
--- /dev/null
+++ b/weed/filer/leveldb/leveldb_store_kv.go
@@ -0,0 +1,45 @@
+package leveldb
+
+import (
+ "context"
+ "fmt"
+ "github.com/chrislusf/seaweedfs/weed/filer"
+ "github.com/syndtr/goleveldb/leveldb"
+)
+
+func (store *LevelDBStore) KvPut(ctx context.Context, key []byte, value []byte) (err error) {
+
+ err = store.db.Put(key, value, nil)
+
+ if err != nil {
+ return fmt.Errorf("kv put: %v", err)
+ }
+
+ return nil
+}
+
+func (store *LevelDBStore) KvGet(ctx context.Context, key []byte) (value []byte, err error) {
+
+ value, err = store.db.Get(key, nil)
+
+ if err == leveldb.ErrNotFound {
+ return nil, filer.ErrKvNotFound
+ }
+
+ if err != nil {
+ return nil, fmt.Errorf("kv get: %v", err)
+ }
+
+ return
+}
+
+func (store *LevelDBStore) KvDelete(ctx context.Context, key []byte) (err error) {
+
+ err = store.db.Delete(key, nil)
+
+ if err != nil {
+ return fmt.Errorf("kv delete: %v", err)
+ }
+
+ return nil
+}
diff --git a/weed/filer/leveldb/leveldb_store_test.go b/weed/filer/leveldb/leveldb_store_test.go
new file mode 100644
index 000000000..d437895f5
--- /dev/null
+++ b/weed/filer/leveldb/leveldb_store_test.go
@@ -0,0 +1,115 @@
+package leveldb
+
+import (
+ "context"
+ "fmt"
+ "io/ioutil"
+ "os"
+ "testing"
+ "time"
+
+ "github.com/chrislusf/seaweedfs/weed/filer"
+ "github.com/chrislusf/seaweedfs/weed/util"
+)
+
+func TestCreateAndFind(t *testing.T) {
+ testFiler := filer.NewFiler(nil, nil, "", 0, "", "", "", nil)
+ dir, _ := ioutil.TempDir("", "seaweedfs_filer_test")
+ defer os.RemoveAll(dir)
+ store := &LevelDBStore{}
+ store.initialize(dir)
+ testFiler.SetStore(store)
+
+ fullpath := util.FullPath("/home/chris/this/is/one/file1.jpg")
+
+ ctx := context.Background()
+
+ entry1 := &filer.Entry{
+ FullPath: fullpath,
+ Attr: filer.Attr{
+ Mode: 0440,
+ Uid: 1234,
+ Gid: 5678,
+ },
+ }
+
+ if err := testFiler.CreateEntry(ctx, entry1, false, false, nil); err != nil {
+ t.Errorf("create entry %v: %v", entry1.FullPath, err)
+ return
+ }
+
+ entry, err := testFiler.FindEntry(ctx, fullpath)
+
+ if err != nil {
+ t.Errorf("find entry: %v", err)
+ return
+ }
+
+ if entry.FullPath != entry1.FullPath {
+ t.Errorf("find wrong entry: %v", entry.FullPath)
+ return
+ }
+
+ // checking one upper directory
+ entries, _, _ := testFiler.ListDirectoryEntries(ctx, util.FullPath("/home/chris/this/is/one"), "", false, 100, "", "", "")
+ if len(entries) != 1 {
+ t.Errorf("list entries count: %v", len(entries))
+ return
+ }
+
+ // checking one upper directory
+ entries, _, _ = testFiler.ListDirectoryEntries(ctx, util.FullPath("/"), "", false, 100, "", "", "")
+ if len(entries) != 1 {
+ t.Errorf("list entries count: %v", len(entries))
+ return
+ }
+
+}
+
+func TestEmptyRoot(t *testing.T) {
+ testFiler := filer.NewFiler(nil, nil, "", 0, "", "", "", nil)
+ dir, _ := ioutil.TempDir("", "seaweedfs_filer_test2")
+ defer os.RemoveAll(dir)
+ store := &LevelDBStore{}
+ store.initialize(dir)
+ testFiler.SetStore(store)
+
+ ctx := context.Background()
+
+ // checking one upper directory
+ entries, _, err := testFiler.ListDirectoryEntries(ctx, util.FullPath("/"), "", false, 100, "", "", "")
+ if err != nil {
+ t.Errorf("list entries: %v", err)
+ return
+ }
+ if len(entries) != 0 {
+ t.Errorf("list entries count: %v", len(entries))
+ return
+ }
+
+}
+
+func BenchmarkInsertEntry(b *testing.B) {
+ testFiler := filer.NewFiler(nil, nil, "", 0, "", "", "", nil)
+ dir, _ := ioutil.TempDir("", "seaweedfs_filer_bench")
+ defer os.RemoveAll(dir)
+ store := &LevelDBStore{}
+ store.initialize(dir)
+ testFiler.SetStore(store)
+
+ ctx := context.Background()
+
+ b.ReportAllocs()
+
+ for i := 0; i < b.N; i++ {
+ entry := &filer.Entry{
+ FullPath: util.FullPath(fmt.Sprintf("/file%d.txt", i)),
+ Attr: filer.Attr{
+ Crtime: time.Now(),
+ Mtime: time.Now(),
+ Mode: os.FileMode(0644),
+ },
+ }
+ store.InsertEntry(ctx, entry)
+ }
+}
diff --git a/weed/filer2/leveldb2/leveldb2_store.go b/weed/filer/leveldb2/leveldb2_store.go
similarity index 68%
rename from weed/filer2/leveldb2/leveldb2_store.go
rename to weed/filer/leveldb2/leveldb2_store.go
index 8a16822ab..124d61c1c 100644
--- a/weed/filer2/leveldb2/leveldb2_store.go
+++ b/weed/filer/leveldb2/leveldb2_store.go
@@ -5,20 +5,21 @@ import (
"context"
"crypto/md5"
"fmt"
- "io"
- "os"
-
"github.com/syndtr/goleveldb/leveldb"
+ leveldb_errors "github.com/syndtr/goleveldb/leveldb/errors"
"github.com/syndtr/goleveldb/leveldb/opt"
leveldb_util "github.com/syndtr/goleveldb/leveldb/util"
+ "io"
+ "os"
- "github.com/chrislusf/seaweedfs/weed/filer2"
+ "github.com/chrislusf/seaweedfs/weed/filer"
"github.com/chrislusf/seaweedfs/weed/glog"
+ "github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
weed_util "github.com/chrislusf/seaweedfs/weed/util"
)
func init() {
- filer2.Stores = append(filer2.Stores, &LevelDB2Store{})
+ filer.Stores = append(filer.Stores, &LevelDB2Store{})
}
type LevelDB2Store struct {
@@ -30,13 +31,14 @@ func (store *LevelDB2Store) GetName() string {
return "leveldb2"
}
-func (store *LevelDB2Store) Initialize(configuration weed_util.Configuration) (err error) {
- dir := configuration.GetString("dir")
+func (store *LevelDB2Store) Initialize(configuration weed_util.Configuration, prefix string) (err error) {
+ dir := configuration.GetString(prefix + "dir")
return store.initialize(dir, 8)
}
func (store *LevelDB2Store) initialize(dir string, dbCount int) (err error) {
glog.Infof("filer store leveldb2 dir: %s", dir)
+ os.MkdirAll(dir, 0755)
if err := weed_util.TestFolderWritable(dir); err != nil {
return fmt.Errorf("Check Level Folder %s Writable: %s", dir, err)
}
@@ -51,9 +53,12 @@ func (store *LevelDB2Store) initialize(dir string, dbCount int) (err error) {
dbFolder := fmt.Sprintf("%s/%02d", dir, d)
os.MkdirAll(dbFolder, 0755)
db, dbErr := leveldb.OpenFile(dbFolder, opts)
+ if leveldb_errors.IsCorrupted(dbErr) {
+ db, dbErr = leveldb.RecoverFile(dbFolder, opts)
+ }
if dbErr != nil {
glog.Errorf("filer store open dir %s: %v", dbFolder, dbErr)
- return
+ return dbErr
}
store.dbs = append(store.dbs, db)
}
@@ -72,7 +77,7 @@ func (store *LevelDB2Store) RollbackTransaction(ctx context.Context) error {
return nil
}
-func (store *LevelDB2Store) InsertEntry(ctx context.Context, entry *filer2.Entry) (err error) {
+func (store *LevelDB2Store) InsertEntry(ctx context.Context, entry *filer.Entry) (err error) {
dir, name := entry.DirAndName()
key, partitionId := genKey(dir, name, store.dbCount)
@@ -81,6 +86,10 @@ func (store *LevelDB2Store) InsertEntry(ctx context.Context, entry *filer2.Entry
return fmt.Errorf("encoding %s %+v: %v", entry.FullPath, entry.Attr, err)
}
+ if len(entry.Chunks) > 50 {
+ value = weed_util.MaybeGzipData(value)
+ }
+
err = store.dbs[partitionId].Put(key, value, nil)
if err != nil {
@@ -92,28 +101,28 @@ func (store *LevelDB2Store) InsertEntry(ctx context.Context, entry *filer2.Entry
return nil
}
-func (store *LevelDB2Store) UpdateEntry(ctx context.Context, entry *filer2.Entry) (err error) {
+func (store *LevelDB2Store) UpdateEntry(ctx context.Context, entry *filer.Entry) (err error) {
return store.InsertEntry(ctx, entry)
}
-func (store *LevelDB2Store) FindEntry(ctx context.Context, fullpath filer2.FullPath) (entry *filer2.Entry, err error) {
+func (store *LevelDB2Store) FindEntry(ctx context.Context, fullpath weed_util.FullPath) (entry *filer.Entry, err error) {
dir, name := fullpath.DirAndName()
key, partitionId := genKey(dir, name, store.dbCount)
data, err := store.dbs[partitionId].Get(key, nil)
if err == leveldb.ErrNotFound {
- return nil, filer2.ErrNotFound
+ return nil, filer_pb.ErrNotFound
}
if err != nil {
- return nil, fmt.Errorf("get %s : %v", entry.FullPath, err)
+ return nil, fmt.Errorf("get %s : %v", fullpath, err)
}
- entry = &filer2.Entry{
+ entry = &filer.Entry{
FullPath: fullpath,
}
- err = entry.DecodeAttributesAndChunks(data)
+ err = entry.DecodeAttributesAndChunks(weed_util.MaybeDecompressData(data))
if err != nil {
return entry, fmt.Errorf("decode %s : %v", entry.FullPath, err)
}
@@ -123,7 +132,7 @@ func (store *LevelDB2Store) FindEntry(ctx context.Context, fullpath filer2.FullP
return entry, nil
}
-func (store *LevelDB2Store) DeleteEntry(ctx context.Context, fullpath filer2.FullPath) (err error) {
+func (store *LevelDB2Store) DeleteEntry(ctx context.Context, fullpath weed_util.FullPath) (err error) {
dir, name := fullpath.DirAndName()
key, partitionId := genKey(dir, name, store.dbCount)
@@ -135,7 +144,7 @@ func (store *LevelDB2Store) DeleteEntry(ctx context.Context, fullpath filer2.Ful
return nil
}
-func (store *LevelDB2Store) DeleteFolderChildren(ctx context.Context, fullpath filer2.FullPath) (err error) {
+func (store *LevelDB2Store) DeleteFolderChildren(ctx context.Context, fullpath weed_util.FullPath) (err error) {
directoryPrefix, partitionId := genDirectoryKeyPrefix(fullpath, "", store.dbCount)
batch := new(leveldb.Batch)
@@ -163,11 +172,17 @@ func (store *LevelDB2Store) DeleteFolderChildren(ctx context.Context, fullpath f
return nil
}
-func (store *LevelDB2Store) ListDirectoryEntries(ctx context.Context, fullpath filer2.FullPath, startFileName string, inclusive bool,
- limit int) (entries []*filer2.Entry, err error) {
+func (store *LevelDB2Store) ListDirectoryEntries(ctx context.Context, dirPath weed_util.FullPath, startFileName string, includeStartFile bool, limit int64, eachEntryFunc filer.ListEachEntryFunc) (lastFileName string, err error) {
+ return store.ListDirectoryPrefixedEntries(ctx, dirPath, startFileName, includeStartFile, limit, "", eachEntryFunc)
+}
+
+func (store *LevelDB2Store) ListDirectoryPrefixedEntries(ctx context.Context, dirPath weed_util.FullPath, startFileName string, includeStartFile bool, limit int64, prefix string, eachEntryFunc filer.ListEachEntryFunc) (lastFileName string, err error) {
- directoryPrefix, partitionId := genDirectoryKeyPrefix(fullpath, "", store.dbCount)
- lastFileStart, _ := genDirectoryKeyPrefix(fullpath, startFileName, store.dbCount)
+ directoryPrefix, partitionId := genDirectoryKeyPrefix(dirPath, prefix, store.dbCount)
+ lastFileStart := directoryPrefix
+ if startFileName != "" {
+ lastFileStart, _ = genDirectoryKeyPrefix(dirPath, startFileName, store.dbCount)
+ }
iter := store.dbs[partitionId].NewIterator(&leveldb_util.Range{Start: lastFileStart}, nil)
for iter.Next() {
@@ -179,29 +194,31 @@ func (store *LevelDB2Store) ListDirectoryEntries(ctx context.Context, fullpath f
if fileName == "" {
continue
}
- if fileName == startFileName && !inclusive {
+ if fileName == startFileName && !includeStartFile {
continue
}
limit--
if limit < 0 {
break
}
- entry := &filer2.Entry{
- FullPath: filer2.NewFullPath(string(fullpath), fileName),
+ lastFileName = fileName
+ entry := &filer.Entry{
+ FullPath: weed_util.NewFullPath(string(dirPath), fileName),
}
// println("list", entry.FullPath, "chunks", len(entry.Chunks))
-
- if decodeErr := entry.DecodeAttributesAndChunks(iter.Value()); decodeErr != nil {
+ if decodeErr := entry.DecodeAttributesAndChunks(weed_util.MaybeDecompressData(iter.Value())); decodeErr != nil {
err = decodeErr
glog.V(0).Infof("list %s : %v", entry.FullPath, err)
break
}
- entries = append(entries, entry)
+ if !eachEntryFunc(entry) {
+ break
+ }
}
iter.Release()
- return entries, err
+ return lastFileName, err
}
func genKey(dirPath, fileName string, dbCount int) (key []byte, partitionId int) {
@@ -210,7 +227,7 @@ func genKey(dirPath, fileName string, dbCount int) (key []byte, partitionId int)
return key, partitionId
}
-func genDirectoryKeyPrefix(fullpath filer2.FullPath, startFileName string, dbCount int) (keyPrefix []byte, partitionId int) {
+func genDirectoryKeyPrefix(fullpath weed_util.FullPath, startFileName string, dbCount int) (keyPrefix []byte, partitionId int) {
keyPrefix, partitionId = hashToBytes(string(fullpath), dbCount)
if len(startFileName) > 0 {
keyPrefix = append(keyPrefix, []byte(startFileName)...)
@@ -235,3 +252,9 @@ func hashToBytes(dir string, dbCount int) ([]byte, int) {
return b, int(x) % dbCount
}
+
+func (store *LevelDB2Store) Shutdown() {
+ for d := 0; d < store.dbCount; d++ {
+ store.dbs[d].Close()
+ }
+}
diff --git a/weed/filer/leveldb2/leveldb2_store_kv.go b/weed/filer/leveldb2/leveldb2_store_kv.go
new file mode 100644
index 000000000..b415d3c32
--- /dev/null
+++ b/weed/filer/leveldb2/leveldb2_store_kv.go
@@ -0,0 +1,56 @@
+package leveldb
+
+import (
+ "context"
+ "fmt"
+
+ "github.com/chrislusf/seaweedfs/weed/filer"
+ "github.com/syndtr/goleveldb/leveldb"
+)
+
+func (store *LevelDB2Store) KvPut(ctx context.Context, key []byte, value []byte) (err error) {
+
+ partitionId := bucketKvKey(key, store.dbCount)
+
+ err = store.dbs[partitionId].Put(key, value, nil)
+
+ if err != nil {
+ return fmt.Errorf("kv bucket %d put: %v", partitionId, err)
+ }
+
+ return nil
+}
+
+func (store *LevelDB2Store) KvGet(ctx context.Context, key []byte) (value []byte, err error) {
+
+ partitionId := bucketKvKey(key, store.dbCount)
+
+ value, err = store.dbs[partitionId].Get(key, nil)
+
+ if err == leveldb.ErrNotFound {
+ return nil, filer.ErrKvNotFound
+ }
+
+ if err != nil {
+ return nil, fmt.Errorf("kv bucket %d get: %v", partitionId, err)
+ }
+
+ return
+}
+
+func (store *LevelDB2Store) KvDelete(ctx context.Context, key []byte) (err error) {
+
+ partitionId := bucketKvKey(key, store.dbCount)
+
+ err = store.dbs[partitionId].Delete(key, nil)
+
+ if err != nil {
+ return fmt.Errorf("kv bucket %d delete: %v", partitionId, err)
+ }
+
+ return nil
+}
+
+func bucketKvKey(key []byte, dbCount int) (partitionId int) {
+ return int(key[len(key)-1]) % dbCount
+}
diff --git a/weed/filer2/leveldb2/leveldb2_store_test.go b/weed/filer/leveldb2/leveldb2_store_test.go
similarity index 59%
rename from weed/filer2/leveldb2/leveldb2_store_test.go
rename to weed/filer/leveldb2/leveldb2_store_test.go
index e28ef7dac..fd0ad18a3 100644
--- a/weed/filer2/leveldb2/leveldb2_store_test.go
+++ b/weed/filer/leveldb2/leveldb2_store_test.go
@@ -2,40 +2,41 @@ package leveldb
import (
"context"
- "github.com/chrislusf/seaweedfs/weed/filer2"
"io/ioutil"
"os"
"testing"
+
+ "github.com/chrislusf/seaweedfs/weed/filer"
+ "github.com/chrislusf/seaweedfs/weed/util"
)
func TestCreateAndFind(t *testing.T) {
- filer := filer2.NewFiler(nil, nil)
+ testFiler := filer.NewFiler(nil, nil, "", 0, "", "", "", nil)
dir, _ := ioutil.TempDir("", "seaweedfs_filer_test")
defer os.RemoveAll(dir)
store := &LevelDB2Store{}
store.initialize(dir, 2)
- filer.SetStore(store)
- filer.DisableDirectoryCache()
+ testFiler.SetStore(store)
- fullpath := filer2.FullPath("/home/chris/this/is/one/file1.jpg")
+ fullpath := util.FullPath("/home/chris/this/is/one/file1.jpg")
ctx := context.Background()
- entry1 := &filer2.Entry{
+ entry1 := &filer.Entry{
FullPath: fullpath,
- Attr: filer2.Attr{
+ Attr: filer.Attr{
Mode: 0440,
Uid: 1234,
Gid: 5678,
},
}
- if err := filer.CreateEntry(ctx, entry1); err != nil {
+ if err := testFiler.CreateEntry(ctx, entry1, false, false, nil); err != nil {
t.Errorf("create entry %v: %v", entry1.FullPath, err)
return
}
- entry, err := filer.FindEntry(ctx, fullpath)
+ entry, err := testFiler.FindEntry(ctx, fullpath)
if err != nil {
t.Errorf("find entry: %v", err)
@@ -48,14 +49,14 @@ func TestCreateAndFind(t *testing.T) {
}
// checking one upper directory
- entries, _ := filer.ListDirectoryEntries(ctx, filer2.FullPath("/home/chris/this/is/one"), "", false, 100)
+ entries, _, _ := testFiler.ListDirectoryEntries(ctx, util.FullPath("/home/chris/this/is/one"), "", false, 100, "", "", "")
if len(entries) != 1 {
t.Errorf("list entries count: %v", len(entries))
return
}
// checking one upper directory
- entries, _ = filer.ListDirectoryEntries(ctx, filer2.FullPath("/"), "", false, 100)
+ entries, _, _ = testFiler.ListDirectoryEntries(ctx, util.FullPath("/"), "", false, 100, "", "", "")
if len(entries) != 1 {
t.Errorf("list entries count: %v", len(entries))
return
@@ -64,18 +65,17 @@ func TestCreateAndFind(t *testing.T) {
}
func TestEmptyRoot(t *testing.T) {
- filer := filer2.NewFiler(nil, nil)
+ testFiler := filer.NewFiler(nil, nil, "", 0, "", "", "", nil)
dir, _ := ioutil.TempDir("", "seaweedfs_filer_test2")
defer os.RemoveAll(dir)
store := &LevelDB2Store{}
store.initialize(dir, 2)
- filer.SetStore(store)
- filer.DisableDirectoryCache()
+ testFiler.SetStore(store)
ctx := context.Background()
// checking one upper directory
- entries, err := filer.ListDirectoryEntries(ctx, filer2.FullPath("/"), "", false, 100)
+ entries, _, err := testFiler.ListDirectoryEntries(ctx, util.FullPath("/"), "", false, 100, "", "", "")
if err != nil {
t.Errorf("list entries: %v", err)
return
diff --git a/weed/filer/leveldb3/leveldb3_store.go b/weed/filer/leveldb3/leveldb3_store.go
new file mode 100644
index 000000000..d1cdfbbf6
--- /dev/null
+++ b/weed/filer/leveldb3/leveldb3_store.go
@@ -0,0 +1,376 @@
+package leveldb
+
+import (
+ "bytes"
+ "context"
+ "crypto/md5"
+ "fmt"
+ "github.com/syndtr/goleveldb/leveldb"
+ leveldb_errors "github.com/syndtr/goleveldb/leveldb/errors"
+ "github.com/syndtr/goleveldb/leveldb/opt"
+ leveldb_util "github.com/syndtr/goleveldb/leveldb/util"
+ "io"
+ "os"
+ "strings"
+ "sync"
+
+ "github.com/chrislusf/seaweedfs/weed/filer"
+ "github.com/chrislusf/seaweedfs/weed/glog"
+ "github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
+ weed_util "github.com/chrislusf/seaweedfs/weed/util"
+)
+
+const (
+ DEFAULT = "_main"
+)
+
+func init() {
+ filer.Stores = append(filer.Stores, &LevelDB3Store{})
+}
+
+type LevelDB3Store struct {
+ dir string
+ dbs map[string]*leveldb.DB
+ dbsLock sync.RWMutex
+}
+
+func (store *LevelDB3Store) GetName() string {
+ return "leveldb3"
+}
+
+func (store *LevelDB3Store) Initialize(configuration weed_util.Configuration, prefix string) (err error) {
+ dir := configuration.GetString(prefix + "dir")
+ return store.initialize(dir)
+}
+
+func (store *LevelDB3Store) initialize(dir string) (err error) {
+ glog.Infof("filer store leveldb3 dir: %s", dir)
+ os.MkdirAll(dir, 0755)
+ if err := weed_util.TestFolderWritable(dir); err != nil {
+ return fmt.Errorf("Check Level Folder %s Writable: %s", dir, err)
+ }
+ store.dir = dir
+
+ db, loadDbErr := store.loadDB(DEFAULT)
+ if loadDbErr != nil {
+ return loadDbErr
+ }
+ store.dbs = make(map[string]*leveldb.DB)
+ store.dbs[DEFAULT] = db
+
+ return
+}
+
+func (store *LevelDB3Store) loadDB(name string) (*leveldb.DB, error) {
+
+ opts := &opt.Options{
+ BlockCacheCapacity: 32 * 1024 * 1024, // default value is 8MiB
+ WriteBuffer: 16 * 1024 * 1024, // default value is 4MiB
+ CompactionTableSizeMultiplier: 4,
+ }
+ if name != DEFAULT {
+ opts = &opt.Options{
+ BlockCacheCapacity: 4 * 1024 * 1024, // default value is 8MiB
+ WriteBuffer: 2 * 1024 * 1024, // default value is 4MiB
+ CompactionTableSizeMultiplier: 4,
+ }
+ }
+
+ dbFolder := fmt.Sprintf("%s/%s", store.dir, name)
+ os.MkdirAll(dbFolder, 0755)
+ db, dbErr := leveldb.OpenFile(dbFolder, opts)
+ if leveldb_errors.IsCorrupted(dbErr) {
+ db, dbErr = leveldb.RecoverFile(dbFolder, opts)
+ }
+ if dbErr != nil {
+ glog.Errorf("filer store open dir %s: %v", dbFolder, dbErr)
+ return nil, dbErr
+ }
+ return db, nil
+}
+
+func (store *LevelDB3Store) findDB(fullpath weed_util.FullPath, isForChildren bool) (*leveldb.DB, string, weed_util.FullPath, error) {
+
+ store.dbsLock.RLock()
+
+ defaultDB := store.dbs[DEFAULT]
+ if !strings.HasPrefix(string(fullpath), "/buckets/") {
+ store.dbsLock.RUnlock()
+ return defaultDB, DEFAULT, fullpath, nil
+ }
+
+ // detect bucket
+ bucketAndObjectKey := string(fullpath)[len("/buckets/"):]
+ t := strings.Index(bucketAndObjectKey, "/")
+ if t < 0 && !isForChildren {
+ store.dbsLock.RUnlock()
+ return defaultDB, DEFAULT, fullpath, nil
+ }
+ bucket := bucketAndObjectKey
+ shortPath := weed_util.FullPath("/")
+ if t > 0 {
+ bucket = bucketAndObjectKey[:t]
+ shortPath = weed_util.FullPath(bucketAndObjectKey[t:])
+ }
+
+ if db, found := store.dbs[bucket]; found {
+ store.dbsLock.RUnlock()
+ return db, bucket, shortPath, nil
+ }
+
+ store.dbsLock.RUnlock()
+ // upgrade to write lock
+ store.dbsLock.Lock()
+ defer store.dbsLock.Unlock()
+
+ // double check after getting the write lock
+ if db, found := store.dbs[bucket]; found {
+ return db, bucket, shortPath, nil
+ }
+
+ // create db
+ db, err := store.loadDB(bucket)
+ if err != nil {
+ return nil, bucket, shortPath, err
+ }
+ store.dbs[bucket] = db
+
+ return db, bucket, shortPath, nil
+}
+
+func (store *LevelDB3Store) closeDB(bucket string) {
+
+ store.dbsLock.Lock()
+ defer store.dbsLock.Unlock()
+
+ if db, found := store.dbs[bucket]; found {
+ db.Close()
+ delete(store.dbs, bucket)
+ }
+
+}
+
+func (store *LevelDB3Store) BeginTransaction(ctx context.Context) (context.Context, error) {
+ return ctx, nil
+}
+func (store *LevelDB3Store) CommitTransaction(ctx context.Context) error {
+ return nil
+}
+func (store *LevelDB3Store) RollbackTransaction(ctx context.Context) error {
+ return nil
+}
+
+func (store *LevelDB3Store) InsertEntry(ctx context.Context, entry *filer.Entry) (err error) {
+
+ db, _, shortPath, err := store.findDB(entry.FullPath, false)
+ if err != nil {
+ return fmt.Errorf("findDB %s : %v", entry.FullPath, err)
+ }
+
+ dir, name := shortPath.DirAndName()
+ key := genKey(dir, name)
+
+ value, err := entry.EncodeAttributesAndChunks()
+ if err != nil {
+ return fmt.Errorf("encoding %s %+v: %v", entry.FullPath, entry.Attr, err)
+ }
+
+ if len(entry.Chunks) > 50 {
+ value = weed_util.MaybeGzipData(value)
+ }
+
+ err = db.Put(key, value, nil)
+
+ if err != nil {
+ return fmt.Errorf("persisting %s : %v", entry.FullPath, err)
+ }
+
+ // println("saved", entry.FullPath, "chunks", len(entry.Chunks))
+
+ return nil
+}
+
+func (store *LevelDB3Store) UpdateEntry(ctx context.Context, entry *filer.Entry) (err error) {
+
+ return store.InsertEntry(ctx, entry)
+}
+
+func (store *LevelDB3Store) FindEntry(ctx context.Context, fullpath weed_util.FullPath) (entry *filer.Entry, err error) {
+
+ db, _, shortPath, err := store.findDB(fullpath, false)
+ if err != nil {
+ return nil, fmt.Errorf("findDB %s : %v", fullpath, err)
+ }
+
+ dir, name := shortPath.DirAndName()
+ key := genKey(dir, name)
+
+ data, err := db.Get(key, nil)
+
+ if err == leveldb.ErrNotFound {
+ return nil, filer_pb.ErrNotFound
+ }
+ if err != nil {
+ return nil, fmt.Errorf("get %s : %v", fullpath, err)
+ }
+
+ entry = &filer.Entry{
+ FullPath: fullpath,
+ }
+ err = entry.DecodeAttributesAndChunks(weed_util.MaybeDecompressData(data))
+ if err != nil {
+ return entry, fmt.Errorf("decode %s : %v", entry.FullPath, err)
+ }
+
+ // println("read", entry.FullPath, "chunks", len(entry.Chunks), "data", len(data), string(data))
+
+ return entry, nil
+}
+
+func (store *LevelDB3Store) DeleteEntry(ctx context.Context, fullpath weed_util.FullPath) (err error) {
+
+ db, _, shortPath, err := store.findDB(fullpath, false)
+ if err != nil {
+ return fmt.Errorf("findDB %s : %v", fullpath, err)
+ }
+
+ dir, name := shortPath.DirAndName()
+ key := genKey(dir, name)
+
+ err = db.Delete(key, nil)
+ if err != nil {
+ return fmt.Errorf("delete %s : %v", fullpath, err)
+ }
+
+ return nil
+}
+
+func (store *LevelDB3Store) DeleteFolderChildren(ctx context.Context, fullpath weed_util.FullPath) (err error) {
+
+ db, bucket, shortPath, err := store.findDB(fullpath, true)
+ if err != nil {
+ return fmt.Errorf("findDB %s : %v", fullpath, err)
+ }
+
+ if bucket != DEFAULT && shortPath == "/" {
+ store.closeDB(bucket)
+ if bucket != "" { // just to make sure
+ os.RemoveAll(store.dir + "/" + bucket)
+ }
+ return nil
+ }
+
+ directoryPrefix := genDirectoryKeyPrefix(shortPath, "")
+
+ batch := new(leveldb.Batch)
+
+ iter := db.NewIterator(&leveldb_util.Range{Start: directoryPrefix}, nil)
+ for iter.Next() {
+ key := iter.Key()
+ if !bytes.HasPrefix(key, directoryPrefix) {
+ break
+ }
+ fileName := getNameFromKey(key)
+ if fileName == "" {
+ continue
+ }
+ batch.Delete(append(directoryPrefix, []byte(fileName)...))
+ }
+ iter.Release()
+
+ err = db.Write(batch, nil)
+
+ if err != nil {
+ return fmt.Errorf("delete %s : %v", fullpath, err)
+ }
+
+ return nil
+}
+
+func (store *LevelDB3Store) ListDirectoryEntries(ctx context.Context, dirPath weed_util.FullPath, startFileName string, includeStartFile bool, limit int64, eachEntryFunc filer.ListEachEntryFunc) (lastFileName string, err error) {
+ return store.ListDirectoryPrefixedEntries(ctx, dirPath, startFileName, includeStartFile, limit, "", eachEntryFunc)
+}
+
+func (store *LevelDB3Store) ListDirectoryPrefixedEntries(ctx context.Context, dirPath weed_util.FullPath, startFileName string, includeStartFile bool, limit int64, prefix string, eachEntryFunc filer.ListEachEntryFunc) (lastFileName string, err error) {
+
+ db, _, shortPath, err := store.findDB(dirPath, true)
+ if err != nil {
+ return lastFileName, fmt.Errorf("findDB %s : %v", dirPath, err)
+ }
+
+ directoryPrefix := genDirectoryKeyPrefix(shortPath, prefix)
+ lastFileStart := directoryPrefix
+ if startFileName != "" {
+ lastFileStart = genDirectoryKeyPrefix(shortPath, startFileName)
+ }
+
+ iter := db.NewIterator(&leveldb_util.Range{Start: lastFileStart}, nil)
+ for iter.Next() {
+ key := iter.Key()
+ if !bytes.HasPrefix(key, directoryPrefix) {
+ break
+ }
+ fileName := getNameFromKey(key)
+ if fileName == "" {
+ continue
+ }
+ if fileName == startFileName && !includeStartFile {
+ continue
+ }
+ limit--
+ if limit < 0 {
+ break
+ }
+ lastFileName = fileName
+ entry := &filer.Entry{
+ FullPath: weed_util.NewFullPath(string(dirPath), fileName),
+ }
+
+ // println("list", entry.FullPath, "chunks", len(entry.Chunks))
+ if decodeErr := entry.DecodeAttributesAndChunks(weed_util.MaybeDecompressData(iter.Value())); decodeErr != nil {
+ err = decodeErr
+ glog.V(0).Infof("list %s : %v", entry.FullPath, err)
+ break
+ }
+ if !eachEntryFunc(entry) {
+ break
+ }
+ }
+ iter.Release()
+
+ return lastFileName, err
+}
+
+func genKey(dirPath, fileName string) (key []byte) {
+ key = hashToBytes(dirPath)
+ key = append(key, []byte(fileName)...)
+ return key
+}
+
+func genDirectoryKeyPrefix(fullpath weed_util.FullPath, startFileName string) (keyPrefix []byte) {
+ keyPrefix = hashToBytes(string(fullpath))
+ if len(startFileName) > 0 {
+ keyPrefix = append(keyPrefix, []byte(startFileName)...)
+ }
+ return keyPrefix
+}
+
+func getNameFromKey(key []byte) string {
+
+ return string(key[md5.Size:])
+
+}
+
+// hash directory
+func hashToBytes(dir string) []byte {
+ h := md5.New()
+ io.WriteString(h, dir)
+ b := h.Sum(nil)
+ return b
+}
+
+func (store *LevelDB3Store) Shutdown() {
+ for _, db := range store.dbs {
+ db.Close()
+ }
+}
diff --git a/weed/filer/leveldb3/leveldb3_store_kv.go b/weed/filer/leveldb3/leveldb3_store_kv.go
new file mode 100644
index 000000000..18d782b80
--- /dev/null
+++ b/weed/filer/leveldb3/leveldb3_store_kv.go
@@ -0,0 +1,46 @@
+package leveldb
+
+import (
+ "context"
+ "fmt"
+
+ "github.com/chrislusf/seaweedfs/weed/filer"
+ "github.com/syndtr/goleveldb/leveldb"
+)
+
+func (store *LevelDB3Store) KvPut(ctx context.Context, key []byte, value []byte) (err error) {
+
+ err = store.dbs[DEFAULT].Put(key, value, nil)
+
+ if err != nil {
+ return fmt.Errorf("kv put: %v", err)
+ }
+
+ return nil
+}
+
+func (store *LevelDB3Store) KvGet(ctx context.Context, key []byte) (value []byte, err error) {
+
+ value, err = store.dbs[DEFAULT].Get(key, nil)
+
+ if err == leveldb.ErrNotFound {
+ return nil, filer.ErrKvNotFound
+ }
+
+ if err != nil {
+ return nil, fmt.Errorf("kv get: %v", err)
+ }
+
+ return
+}
+
+func (store *LevelDB3Store) KvDelete(ctx context.Context, key []byte) (err error) {
+
+ err = store.dbs[DEFAULT].Delete(key, nil)
+
+ if err != nil {
+ return fmt.Errorf("kv delete: %v", err)
+ }
+
+ return nil
+}
diff --git a/weed/filer2/leveldb/leveldb_store_test.go b/weed/filer/leveldb3/leveldb3_store_test.go
similarity index 56%
rename from weed/filer2/leveldb/leveldb_store_test.go
rename to weed/filer/leveldb3/leveldb3_store_test.go
index 904de8c97..0b970a539 100644
--- a/weed/filer2/leveldb/leveldb_store_test.go
+++ b/weed/filer/leveldb3/leveldb3_store_test.go
@@ -2,40 +2,41 @@ package leveldb
import (
"context"
- "github.com/chrislusf/seaweedfs/weed/filer2"
"io/ioutil"
"os"
"testing"
+
+ "github.com/chrislusf/seaweedfs/weed/filer"
+ "github.com/chrislusf/seaweedfs/weed/util"
)
func TestCreateAndFind(t *testing.T) {
- filer := filer2.NewFiler(nil, nil)
+ testFiler := filer.NewFiler(nil, nil, "", 0, "", "", "", nil)
dir, _ := ioutil.TempDir("", "seaweedfs_filer_test")
defer os.RemoveAll(dir)
- store := &LevelDBStore{}
+ store := &LevelDB3Store{}
store.initialize(dir)
- filer.SetStore(store)
- filer.DisableDirectoryCache()
+ testFiler.SetStore(store)
- fullpath := filer2.FullPath("/home/chris/this/is/one/file1.jpg")
+ fullpath := util.FullPath("/home/chris/this/is/one/file1.jpg")
ctx := context.Background()
- entry1 := &filer2.Entry{
+ entry1 := &filer.Entry{
FullPath: fullpath,
- Attr: filer2.Attr{
+ Attr: filer.Attr{
Mode: 0440,
Uid: 1234,
Gid: 5678,
},
}
- if err := filer.CreateEntry(ctx, entry1); err != nil {
+ if err := testFiler.CreateEntry(ctx, entry1, false, false, nil); err != nil {
t.Errorf("create entry %v: %v", entry1.FullPath, err)
return
}
- entry, err := filer.FindEntry(ctx, fullpath)
+ entry, err := testFiler.FindEntry(ctx, fullpath)
if err != nil {
t.Errorf("find entry: %v", err)
@@ -48,14 +49,14 @@ func TestCreateAndFind(t *testing.T) {
}
// checking one upper directory
- entries, _ := filer.ListDirectoryEntries(ctx, filer2.FullPath("/home/chris/this/is/one"), "", false, 100)
+ entries, _, _ := testFiler.ListDirectoryEntries(ctx, util.FullPath("/home/chris/this/is/one"), "", false, 100, "", "", "")
if len(entries) != 1 {
t.Errorf("list entries count: %v", len(entries))
return
}
// checking one upper directory
- entries, _ = filer.ListDirectoryEntries(ctx, filer2.FullPath("/"), "", false, 100)
+ entries, _, _ = testFiler.ListDirectoryEntries(ctx, util.FullPath("/"), "", false, 100, "", "", "")
if len(entries) != 1 {
t.Errorf("list entries count: %v", len(entries))
return
@@ -64,18 +65,17 @@ func TestCreateAndFind(t *testing.T) {
}
func TestEmptyRoot(t *testing.T) {
- filer := filer2.NewFiler(nil, nil)
+ testFiler := filer.NewFiler(nil, nil, "", 0, "", "", "", nil)
dir, _ := ioutil.TempDir("", "seaweedfs_filer_test2")
defer os.RemoveAll(dir)
- store := &LevelDBStore{}
+ store := &LevelDB3Store{}
store.initialize(dir)
- filer.SetStore(store)
- filer.DisableDirectoryCache()
+ testFiler.SetStore(store)
ctx := context.Background()
// checking one upper directory
- entries, err := filer.ListDirectoryEntries(ctx, filer2.FullPath("/"), "", false, 100)
+ entries, _, err := testFiler.ListDirectoryEntries(ctx, util.FullPath("/"), "", false, 100, "", "", "")
if err != nil {
t.Errorf("list entries: %v", err)
return
diff --git a/weed/filer/meta_aggregator.go b/weed/filer/meta_aggregator.go
new file mode 100644
index 000000000..5c368a57e
--- /dev/null
+++ b/weed/filer/meta_aggregator.go
@@ -0,0 +1,213 @@
+package filer
+
+import (
+ "context"
+ "fmt"
+ "github.com/chrislusf/seaweedfs/weed/util"
+ "io"
+ "sync"
+ "time"
+
+ "github.com/golang/protobuf/proto"
+ "google.golang.org/grpc"
+
+ "github.com/chrislusf/seaweedfs/weed/glog"
+ "github.com/chrislusf/seaweedfs/weed/pb"
+ "github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
+ "github.com/chrislusf/seaweedfs/weed/util/log_buffer"
+)
+
+type MetaAggregator struct {
+ filers []string
+ grpcDialOption grpc.DialOption
+ MetaLogBuffer *log_buffer.LogBuffer
+ // notifying clients
+ ListenersLock sync.Mutex
+ ListenersCond *sync.Cond
+}
+
+// MetaAggregator only aggregates data "on the fly". The logs are not re-persisted to disk.
+// The old data comes from what each LocalMetadata persisted on disk.
+func NewMetaAggregator(filers []string, grpcDialOption grpc.DialOption) *MetaAggregator {
+ t := &MetaAggregator{
+ filers: filers,
+ grpcDialOption: grpcDialOption,
+ }
+ t.ListenersCond = sync.NewCond(&t.ListenersLock)
+ t.MetaLogBuffer = log_buffer.NewLogBuffer(LogFlushInterval, nil, func() {
+ t.ListenersCond.Broadcast()
+ })
+ return t
+}
+
+func (ma *MetaAggregator) StartLoopSubscribe(f *Filer, self string) {
+ for _, filer := range ma.filers {
+ go ma.subscribeToOneFiler(f, self, filer)
+ }
+}
+
+func (ma *MetaAggregator) subscribeToOneFiler(f *Filer, self string, peer string) {
+
+ /*
+ Each filer reads the "filer.store.id", which is the store's signature when filer starts.
+
+ When reading from other filers' local meta changes:
+ * if the received change does not contain signature from self, apply the change to current filer store.
+
+ Upon connecting to other filers, need to remember their signature and their offsets.
+
+ */
+
+ var maybeReplicateMetadataChange func(*filer_pb.SubscribeMetadataResponse)
+ lastPersistTime := time.Now()
+ lastTsNs := time.Now().Add(-LogFlushInterval).UnixNano()
+
+ peerSignature, err := ma.readFilerStoreSignature(peer)
+ for err != nil {
+ glog.V(0).Infof("connecting to peer filer %s: %v", peer, err)
+ time.Sleep(1357 * time.Millisecond)
+ peerSignature, err = ma.readFilerStoreSignature(peer)
+ }
+
+ // when filer store is not shared by multiple filers
+ if peerSignature != f.Signature {
+ if prevTsNs, err := ma.readOffset(f, peer, peerSignature); err == nil {
+ lastTsNs = prevTsNs
+ }
+
+ glog.V(0).Infof("follow peer: %v, last %v (%d)", peer, time.Unix(0, lastTsNs), lastTsNs)
+ var counter int64
+ var synced bool
+ maybeReplicateMetadataChange = func(event *filer_pb.SubscribeMetadataResponse) {
+ if err := Replay(f.Store, event); err != nil {
+ glog.Errorf("failed to reply metadata change from %v: %v", peer, err)
+ return
+ }
+ counter++
+ if lastPersistTime.Add(time.Minute).Before(time.Now()) {
+ if err := ma.updateOffset(f, peer, peerSignature, event.TsNs); err == nil {
+ if event.TsNs < time.Now().Add(-2*time.Minute).UnixNano() {
+ glog.V(0).Infof("sync with %s progressed to: %v %0.2f/sec", peer, time.Unix(0, event.TsNs), float64(counter)/60.0)
+ } else if !synced {
+ synced = true
+ glog.V(0).Infof("synced with %s", peer)
+ }
+ lastPersistTime = time.Now()
+ counter = 0
+ } else {
+ glog.V(0).Infof("failed to update offset for %v: %v", peer, err)
+ }
+ }
+ }
+ }
+
+ processEventFn := func(event *filer_pb.SubscribeMetadataResponse) error {
+ data, err := proto.Marshal(event)
+ if err != nil {
+ glog.Errorf("failed to marshal subscribed filer_pb.SubscribeMetadataResponse %+v: %v", event, err)
+ return err
+ }
+ dir := event.Directory
+ // println("received meta change", dir, "size", len(data))
+ ma.MetaLogBuffer.AddToBuffer([]byte(dir), data, 0)
+ if maybeReplicateMetadataChange != nil {
+ maybeReplicateMetadataChange(event)
+ }
+ return nil
+ }
+
+ for {
+ err := pb.WithFilerClient(peer, ma.grpcDialOption, func(client filer_pb.SeaweedFilerClient) error {
+ ctx, cancel := context.WithCancel(context.Background())
+ defer cancel()
+ stream, err := client.SubscribeLocalMetadata(ctx, &filer_pb.SubscribeMetadataRequest{
+ ClientName: "filer:" + self,
+ PathPrefix: "/",
+ SinceNs: lastTsNs,
+ })
+ if err != nil {
+ return fmt.Errorf("subscribe: %v", err)
+ }
+
+ for {
+ resp, listenErr := stream.Recv()
+ if listenErr == io.EOF {
+ return nil
+ }
+ if listenErr != nil {
+ return listenErr
+ }
+
+ if err := processEventFn(resp); err != nil {
+ return fmt.Errorf("process %v: %v", resp, err)
+ }
+ lastTsNs = resp.TsNs
+
+ f.onMetadataChangeEvent(resp)
+
+ }
+ })
+ if err != nil {
+ glog.V(0).Infof("subscribing remote %s meta change: %v", peer, err)
+ time.Sleep(1733 * time.Millisecond)
+ }
+ }
+}
+
+func (ma *MetaAggregator) readFilerStoreSignature(peer string) (sig int32, err error) {
+ err = pb.WithFilerClient(peer, ma.grpcDialOption, func(client filer_pb.SeaweedFilerClient) error {
+ resp, err := client.GetFilerConfiguration(context.Background(), &filer_pb.GetFilerConfigurationRequest{})
+ if err != nil {
+ return err
+ }
+ sig = resp.Signature
+ return nil
+ })
+ return
+}
+
+const (
+ MetaOffsetPrefix = "Meta"
+)
+
+func (ma *MetaAggregator) readOffset(f *Filer, peer string, peerSignature int32) (lastTsNs int64, err error) {
+
+ key := []byte(MetaOffsetPrefix + "xxxx")
+ util.Uint32toBytes(key[len(MetaOffsetPrefix):], uint32(peerSignature))
+
+ value, err := f.Store.KvGet(context.Background(), key)
+
+ if err == ErrKvNotFound {
+ glog.Warningf("readOffset %s not found", peer)
+ return 0, nil
+ }
+
+ if err != nil {
+ return 0, fmt.Errorf("readOffset %s : %v", peer, err)
+ }
+
+ lastTsNs = int64(util.BytesToUint64(value))
+
+ glog.V(0).Infof("readOffset %s : %d", peer, lastTsNs)
+
+ return
+}
+
+func (ma *MetaAggregator) updateOffset(f *Filer, peer string, peerSignature int32, lastTsNs int64) (err error) {
+
+ key := []byte(MetaOffsetPrefix + "xxxx")
+ util.Uint32toBytes(key[len(MetaOffsetPrefix):], uint32(peerSignature))
+
+ value := make([]byte, 8)
+ util.Uint64toBytes(value, uint64(lastTsNs))
+
+ err = f.Store.KvPut(context.Background(), key, value)
+
+ if err != nil {
+ return fmt.Errorf("updateOffset %s : %v", peer, err)
+ }
+
+ glog.V(4).Infof("updateOffset %s : %d", peer, lastTsNs)
+
+ return
+}
diff --git a/weed/filer/meta_replay.go b/weed/filer/meta_replay.go
new file mode 100644
index 000000000..feb76278b
--- /dev/null
+++ b/weed/filer/meta_replay.go
@@ -0,0 +1,37 @@
+package filer
+
+import (
+ "context"
+
+ "github.com/chrislusf/seaweedfs/weed/glog"
+ "github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
+ "github.com/chrislusf/seaweedfs/weed/util"
+)
+
+func Replay(filerStore FilerStore, resp *filer_pb.SubscribeMetadataResponse) error {
+ message := resp.EventNotification
+ var oldPath util.FullPath
+ var newEntry *Entry
+ if message.OldEntry != nil {
+ oldPath = util.NewFullPath(resp.Directory, message.OldEntry.Name)
+ glog.V(4).Infof("deleting %v", oldPath)
+ if err := filerStore.DeleteEntry(context.Background(), oldPath); err != nil {
+ return err
+ }
+ }
+
+ if message.NewEntry != nil {
+ dir := resp.Directory
+ if message.NewParentPath != "" {
+ dir = message.NewParentPath
+ }
+ key := util.NewFullPath(dir, message.NewEntry.Name)
+ glog.V(4).Infof("creating %v", key)
+ newEntry = FromPbEntry(dir, message.NewEntry)
+ if err := filerStore.InsertEntry(context.Background(), newEntry); err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
diff --git a/weed/filer/mongodb/mongodb_store.go b/weed/filer/mongodb/mongodb_store.go
new file mode 100644
index 000000000..1ef5056f4
--- /dev/null
+++ b/weed/filer/mongodb/mongodb_store.go
@@ -0,0 +1,229 @@
+package mongodb
+
+import (
+ "context"
+ "fmt"
+ "github.com/chrislusf/seaweedfs/weed/filer"
+ "github.com/chrislusf/seaweedfs/weed/glog"
+ "github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
+ "github.com/chrislusf/seaweedfs/weed/util"
+ "go.mongodb.org/mongo-driver/bson"
+ "go.mongodb.org/mongo-driver/mongo"
+ "go.mongodb.org/mongo-driver/mongo/options"
+ "go.mongodb.org/mongo-driver/x/bsonx"
+ "time"
+)
+
+func init() {
+ filer.Stores = append(filer.Stores, &MongodbStore{})
+}
+
+type MongodbStore struct {
+ connect *mongo.Client
+ database string
+ collectionName string
+}
+
+type Model struct {
+ Directory string `bson:"directory"`
+ Name string `bson:"name"`
+ Meta []byte `bson:"meta"`
+}
+
+func (store *MongodbStore) GetName() string {
+ return "mongodb"
+}
+
+func (store *MongodbStore) Initialize(configuration util.Configuration, prefix string) (err error) {
+ store.database = configuration.GetString(prefix + "database")
+ store.collectionName = "filemeta"
+ poolSize := configuration.GetInt(prefix + "option_pool_size")
+ return store.connection(configuration.GetString(prefix+"uri"), uint64(poolSize))
+}
+
+func (store *MongodbStore) connection(uri string, poolSize uint64) (err error) {
+ ctx, _ := context.WithTimeout(context.Background(), 10*time.Second)
+ opts := options.Client().ApplyURI(uri)
+
+ if poolSize > 0 {
+ opts.SetMaxPoolSize(poolSize)
+ }
+
+ client, err := mongo.Connect(ctx, opts)
+ if err != nil {
+ return err
+ }
+
+ c := client.Database(store.database).Collection(store.collectionName)
+ err = store.indexUnique(c)
+ store.connect = client
+ return err
+}
+
+func (store *MongodbStore) createIndex(c *mongo.Collection, index mongo.IndexModel, opts *options.CreateIndexesOptions) error {
+ _, err := c.Indexes().CreateOne(context.Background(), index, opts)
+ return err
+}
+
+func (store *MongodbStore) indexUnique(c *mongo.Collection) error {
+ opts := options.CreateIndexes().SetMaxTime(10 * time.Second)
+
+ unique := new(bool)
+ *unique = true
+
+ index := mongo.IndexModel{
+ Keys: bsonx.Doc{{Key: "directory", Value: bsonx.Int32(1)}, {Key: "name", Value: bsonx.Int32(1)}},
+ Options: &options.IndexOptions{
+ Unique: unique,
+ },
+ }
+
+ return store.createIndex(c, index, opts)
+}
+
+func (store *MongodbStore) BeginTransaction(ctx context.Context) (context.Context, error) {
+ return ctx, nil
+}
+
+func (store *MongodbStore) CommitTransaction(ctx context.Context) error {
+ return nil
+}
+
+func (store *MongodbStore) RollbackTransaction(ctx context.Context) error {
+ return nil
+}
+
+func (store *MongodbStore) InsertEntry(ctx context.Context, entry *filer.Entry) (err error) {
+
+ return store.UpdateEntry(ctx, entry)
+
+}
+
+func (store *MongodbStore) UpdateEntry(ctx context.Context, entry *filer.Entry) (err error) {
+
+ dir, name := entry.FullPath.DirAndName()
+ meta, err := entry.EncodeAttributesAndChunks()
+ if err != nil {
+ return fmt.Errorf("encode %s: %s", entry.FullPath, err)
+ }
+
+ if len(entry.Chunks) > 50 {
+ meta = util.MaybeGzipData(meta)
+ }
+
+ c := store.connect.Database(store.database).Collection(store.collectionName)
+
+ opts := options.Update().SetUpsert(true)
+ filter := bson.D{{"directory", dir}, {"name", name}}
+ update := bson.D{{"$set", bson.D{{"meta", meta}}}}
+
+ _, err = c.UpdateOne(ctx, filter, update, opts)
+
+ if err != nil {
+ return fmt.Errorf("UpdateEntry %s: %v", entry.FullPath, err)
+ }
+
+ return nil
+}
+
+func (store *MongodbStore) FindEntry(ctx context.Context, fullpath util.FullPath) (entry *filer.Entry, err error) {
+
+ dir, name := fullpath.DirAndName()
+ var data Model
+
+ var where = bson.M{"directory": dir, "name": name}
+ err = store.connect.Database(store.database).Collection(store.collectionName).FindOne(ctx, where).Decode(&data)
+ if err != mongo.ErrNoDocuments && err != nil {
+ glog.Errorf("find %s: %v", fullpath, err)
+ return nil, filer_pb.ErrNotFound
+ }
+
+ if len(data.Meta) == 0 {
+ return nil, filer_pb.ErrNotFound
+ }
+
+ entry = &filer.Entry{
+ FullPath: fullpath,
+ }
+
+ err = entry.DecodeAttributesAndChunks(util.MaybeDecompressData(data.Meta))
+ if err != nil {
+ return entry, fmt.Errorf("decode %s : %v", entry.FullPath, err)
+ }
+
+ return entry, nil
+}
+
+func (store *MongodbStore) DeleteEntry(ctx context.Context, fullpath util.FullPath) error {
+
+ dir, name := fullpath.DirAndName()
+
+ where := bson.M{"directory": dir, "name": name}
+ _, err := store.connect.Database(store.database).Collection(store.collectionName).DeleteOne(ctx, where)
+ if err != nil {
+ return fmt.Errorf("delete %s : %v", fullpath, err)
+ }
+
+ return nil
+}
+
+func (store *MongodbStore) DeleteFolderChildren(ctx context.Context, fullpath util.FullPath) error {
+
+ where := bson.M{"directory": fullpath}
+ _, err := store.connect.Database(store.database).Collection(store.collectionName).DeleteMany(ctx, where)
+ if err != nil {
+ return fmt.Errorf("delete %s : %v", fullpath, err)
+ }
+
+ return nil
+}
+
+func (store *MongodbStore) ListDirectoryPrefixedEntries(ctx context.Context, dirPath util.FullPath, startFileName string, includeStartFile bool, limit int64, prefix string, eachEntryFunc filer.ListEachEntryFunc) (lastFileName string, err error) {
+ return lastFileName, filer.ErrUnsupportedListDirectoryPrefixed
+}
+
+func (store *MongodbStore) ListDirectoryEntries(ctx context.Context, dirPath util.FullPath, startFileName string, includeStartFile bool, limit int64, eachEntryFunc filer.ListEachEntryFunc) (lastFileName string, err error) {
+
+ var where = bson.M{"directory": string(dirPath), "name": bson.M{"$gt": startFileName}}
+ if includeStartFile {
+ where["name"] = bson.M{
+ "$gte": startFileName,
+ }
+ }
+ optLimit := int64(limit)
+ opts := &options.FindOptions{Limit: &optLimit, Sort: bson.M{"name": 1}}
+ cur, err := store.connect.Database(store.database).Collection(store.collectionName).Find(ctx, where, opts)
+ for cur.Next(ctx) {
+ var data Model
+ err := cur.Decode(&data)
+ if err != nil && err != mongo.ErrNoDocuments {
+ return lastFileName, err
+ }
+
+ entry := &filer.Entry{
+ FullPath: util.NewFullPath(string(dirPath), data.Name),
+ }
+ lastFileName = data.Name
+ if decodeErr := entry.DecodeAttributesAndChunks(util.MaybeDecompressData(data.Meta)); decodeErr != nil {
+ err = decodeErr
+ glog.V(0).Infof("list %s : %v", entry.FullPath, err)
+ break
+ }
+
+ if !eachEntryFunc(entry) {
+ break
+ }
+
+ }
+
+ if err := cur.Close(ctx); err != nil {
+ glog.V(0).Infof("list iterator close: %v", err)
+ }
+
+ return lastFileName, err
+}
+
+func (store *MongodbStore) Shutdown() {
+ ctx, _ := context.WithTimeout(context.Background(), 10*time.Second)
+ store.connect.Disconnect(ctx)
+}
diff --git a/weed/filer/mongodb/mongodb_store_kv.go b/weed/filer/mongodb/mongodb_store_kv.go
new file mode 100644
index 000000000..4aa9c3a33
--- /dev/null
+++ b/weed/filer/mongodb/mongodb_store_kv.go
@@ -0,0 +1,72 @@
+package mongodb
+
+import (
+ "context"
+ "fmt"
+ "github.com/chrislusf/seaweedfs/weed/filer"
+ "github.com/chrislusf/seaweedfs/weed/glog"
+ "go.mongodb.org/mongo-driver/bson"
+ "go.mongodb.org/mongo-driver/mongo"
+)
+
+func (store *MongodbStore) KvPut(ctx context.Context, key []byte, value []byte) (err error) {
+
+ dir, name := genDirAndName(key)
+
+ c := store.connect.Database(store.database).Collection(store.collectionName)
+
+ _, err = c.InsertOne(ctx, Model{
+ Directory: dir,
+ Name: name,
+ Meta: value,
+ })
+
+ if err != nil {
+ return fmt.Errorf("kv put: %v", err)
+ }
+
+ return nil
+}
+
+func (store *MongodbStore) KvGet(ctx context.Context, key []byte) (value []byte, err error) {
+ dir, name := genDirAndName(key)
+
+ var data Model
+
+ var where = bson.M{"directory": dir, "name": name}
+ err = store.connect.Database(store.database).Collection(store.collectionName).FindOne(ctx, where).Decode(&data)
+ if err != mongo.ErrNoDocuments && err != nil {
+ glog.Errorf("kv get: %v", err)
+ return nil, filer.ErrKvNotFound
+ }
+
+ if len(data.Meta) == 0 {
+ return nil, filer.ErrKvNotFound
+ }
+
+ return data.Meta, nil
+}
+
+func (store *MongodbStore) KvDelete(ctx context.Context, key []byte) (err error) {
+
+ dir, name := genDirAndName(key)
+
+ where := bson.M{"directory": dir, "name": name}
+ _, err = store.connect.Database(store.database).Collection(store.collectionName).DeleteOne(ctx, where)
+ if err != nil {
+ return fmt.Errorf("kv delete: %v", err)
+ }
+
+ return nil
+}
+
+func genDirAndName(key []byte) (dir string, name string) {
+ for len(key) < 8 {
+ key = append(key, 0)
+ }
+
+ dir = string(key[:8])
+ name = string(key[8:])
+
+ return
+}
diff --git a/weed/filer/mysql/mysql_sql_gen.go b/weed/filer/mysql/mysql_sql_gen.go
new file mode 100644
index 000000000..93d3e3f9e
--- /dev/null
+++ b/weed/filer/mysql/mysql_sql_gen.go
@@ -0,0 +1,58 @@
+package mysql
+
+import (
+ "fmt"
+
+ "github.com/chrislusf/seaweedfs/weed/filer/abstract_sql"
+ _ "github.com/go-sql-driver/mysql"
+)
+
+type SqlGenMysql struct {
+ CreateTableSqlTemplate string
+ DropTableSqlTemplate string
+ UpsertQueryTemplate string
+}
+
+var (
+ _ = abstract_sql.SqlGenerator(&SqlGenMysql{})
+)
+
+func (gen *SqlGenMysql) GetSqlInsert(tableName string) string {
+ if gen.UpsertQueryTemplate != "" {
+ return fmt.Sprintf(gen.UpsertQueryTemplate, tableName)
+ } else {
+ return fmt.Sprintf("INSERT INTO `%s` (dirhash,name,directory,meta) VALUES(?,?,?,?)", tableName)
+ }
+}
+
+func (gen *SqlGenMysql) GetSqlUpdate(tableName string) string {
+ return fmt.Sprintf("UPDATE `%s` SET meta=? WHERE dirhash=? AND name=? AND directory=?", tableName)
+}
+
+func (gen *SqlGenMysql) GetSqlFind(tableName string) string {
+ return fmt.Sprintf("SELECT meta FROM `%s` WHERE dirhash=? AND name=? AND directory=?", tableName)
+}
+
+func (gen *SqlGenMysql) GetSqlDelete(tableName string) string {
+ return fmt.Sprintf("DELETE FROM `%s` WHERE dirhash=? AND name=? AND directory=?", tableName)
+}
+
+func (gen *SqlGenMysql) GetSqlDeleteFolderChildren(tableName string) string {
+ return fmt.Sprintf("DELETE FROM `%s` WHERE dirhash=? AND directory=?", tableName)
+}
+
+func (gen *SqlGenMysql) GetSqlListExclusive(tableName string) string {
+ return fmt.Sprintf("SELECT NAME, meta FROM `%s` WHERE dirhash=? AND name>? AND directory=? AND name like ? ORDER BY NAME ASC LIMIT ?", tableName)
+}
+
+func (gen *SqlGenMysql) GetSqlListInclusive(tableName string) string {
+ return fmt.Sprintf("SELECT NAME, meta FROM `%s` WHERE dirhash=? AND name>=? AND directory=? AND name like ? ORDER BY NAME ASC LIMIT ?", tableName)
+}
+
+func (gen *SqlGenMysql) GetSqlCreateTable(tableName string) string {
+ return fmt.Sprintf(gen.CreateTableSqlTemplate, tableName)
+}
+
+func (gen *SqlGenMysql) GetSqlDropTable(tableName string) string {
+ return fmt.Sprintf(gen.DropTableSqlTemplate, tableName)
+}
diff --git a/weed/filer/mysql/mysql_store.go b/weed/filer/mysql/mysql_store.go
new file mode 100644
index 000000000..fbaa4d5f9
--- /dev/null
+++ b/weed/filer/mysql/mysql_store.go
@@ -0,0 +1,84 @@
+package mysql
+
+import (
+ "database/sql"
+ "fmt"
+ "time"
+
+ "github.com/chrislusf/seaweedfs/weed/filer"
+
+ "github.com/chrislusf/seaweedfs/weed/filer/abstract_sql"
+ "github.com/chrislusf/seaweedfs/weed/util"
+ _ "github.com/go-sql-driver/mysql"
+)
+
+const (
+ CONNECTION_URL_PATTERN = "%s:%s@tcp(%s:%d)/%s?charset=utf8"
+)
+
+func init() {
+ filer.Stores = append(filer.Stores, &MysqlStore{})
+}
+
+type MysqlStore struct {
+ abstract_sql.AbstractSqlStore
+}
+
+func (store *MysqlStore) GetName() string {
+ return "mysql"
+}
+
+func (store *MysqlStore) Initialize(configuration util.Configuration, prefix string) (err error) {
+ return store.initialize(
+ configuration.GetString(prefix+"upsertQuery"),
+ configuration.GetBool(prefix+"enableUpsert"),
+ configuration.GetString(prefix+"username"),
+ configuration.GetString(prefix+"password"),
+ configuration.GetString(prefix+"hostname"),
+ configuration.GetInt(prefix+"port"),
+ configuration.GetString(prefix+"database"),
+ configuration.GetInt(prefix+"connection_max_idle"),
+ configuration.GetInt(prefix+"connection_max_open"),
+ configuration.GetInt(prefix+"connection_max_lifetime_seconds"),
+ configuration.GetBool(prefix+"interpolateParams"),
+ )
+}
+
+func (store *MysqlStore) initialize(upsertQuery string, enableUpsert bool, user, password, hostname string, port int, database string, maxIdle, maxOpen,
+ maxLifetimeSeconds int, interpolateParams bool) (err error) {
+
+ store.SupportBucketTable = false
+ if !enableUpsert {
+ upsertQuery = ""
+ }
+ store.SqlGenerator = &SqlGenMysql{
+ CreateTableSqlTemplate: "",
+ DropTableSqlTemplate: "drop table `%s`",
+ UpsertQueryTemplate: upsertQuery,
+ }
+
+ sqlUrl := fmt.Sprintf(CONNECTION_URL_PATTERN, user, password, hostname, port, database)
+ adaptedSqlUrl := fmt.Sprintf(CONNECTION_URL_PATTERN, user, "", hostname, port, database)
+ if interpolateParams {
+ sqlUrl += "&interpolateParams=true"
+ adaptedSqlUrl += "&interpolateParams=true"
+ }
+
+ var dbErr error
+ store.DB, dbErr = sql.Open("mysql", sqlUrl)
+ if dbErr != nil {
+ store.DB.Close()
+ store.DB = nil
+ return fmt.Errorf("can not connect to %s error:%v", adaptedSqlUrl, err)
+ }
+
+ store.DB.SetMaxIdleConns(maxIdle)
+ store.DB.SetMaxOpenConns(maxOpen)
+ store.DB.SetConnMaxLifetime(time.Duration(maxLifetimeSeconds) * time.Second)
+
+ if err = store.DB.Ping(); err != nil {
+ return fmt.Errorf("connect to %s error:%v", sqlUrl, err)
+ }
+
+ return nil
+}
diff --git a/weed/filer/mysql2/mysql2_store.go b/weed/filer/mysql2/mysql2_store.go
new file mode 100644
index 000000000..a1f54455a
--- /dev/null
+++ b/weed/filer/mysql2/mysql2_store.go
@@ -0,0 +1,90 @@
+package mysql2
+
+import (
+ "context"
+ "database/sql"
+ "fmt"
+ "time"
+
+ "github.com/chrislusf/seaweedfs/weed/filer"
+ "github.com/chrislusf/seaweedfs/weed/filer/abstract_sql"
+ "github.com/chrislusf/seaweedfs/weed/filer/mysql"
+ "github.com/chrislusf/seaweedfs/weed/util"
+ _ "github.com/go-sql-driver/mysql"
+)
+
+const (
+ CONNECTION_URL_PATTERN = "%s:%s@tcp(%s:%d)/%s?charset=utf8"
+)
+
+func init() {
+ filer.Stores = append(filer.Stores, &MysqlStore2{})
+}
+
+type MysqlStore2 struct {
+ abstract_sql.AbstractSqlStore
+}
+
+func (store *MysqlStore2) GetName() string {
+ return "mysql2"
+}
+
+func (store *MysqlStore2) Initialize(configuration util.Configuration, prefix string) (err error) {
+ return store.initialize(
+ configuration.GetString(prefix+"createTable"),
+ configuration.GetString(prefix+"upsertQuery"),
+ configuration.GetBool(prefix+"enableUpsert"),
+ configuration.GetString(prefix+"username"),
+ configuration.GetString(prefix+"password"),
+ configuration.GetString(prefix+"hostname"),
+ configuration.GetInt(prefix+"port"),
+ configuration.GetString(prefix+"database"),
+ configuration.GetInt(prefix+"connection_max_idle"),
+ configuration.GetInt(prefix+"connection_max_open"),
+ configuration.GetInt(prefix+"connection_max_lifetime_seconds"),
+ configuration.GetBool(prefix+"interpolateParams"),
+ )
+}
+
+func (store *MysqlStore2) initialize(createTable, upsertQuery string, enableUpsert bool, user, password, hostname string, port int, database string, maxIdle, maxOpen,
+ maxLifetimeSeconds int, interpolateParams bool) (err error) {
+
+ store.SupportBucketTable = true
+ if !enableUpsert {
+ upsertQuery = ""
+ }
+ store.SqlGenerator = &mysql.SqlGenMysql{
+ CreateTableSqlTemplate: createTable,
+ DropTableSqlTemplate: "drop table `%s`",
+ UpsertQueryTemplate: upsertQuery,
+ }
+
+ sqlUrl := fmt.Sprintf(CONNECTION_URL_PATTERN, user, password, hostname, port, database)
+ adaptedSqlUrl := fmt.Sprintf(CONNECTION_URL_PATTERN, user, "", hostname, port, database)
+ if interpolateParams {
+ sqlUrl += "&interpolateParams=true"
+ adaptedSqlUrl += "&interpolateParams=true"
+ }
+
+ var dbErr error
+ store.DB, dbErr = sql.Open("mysql", sqlUrl)
+ if dbErr != nil {
+ store.DB.Close()
+ store.DB = nil
+ return fmt.Errorf("can not connect to %s error:%v", adaptedSqlUrl, err)
+ }
+
+ store.DB.SetMaxIdleConns(maxIdle)
+ store.DB.SetMaxOpenConns(maxOpen)
+ store.DB.SetConnMaxLifetime(time.Duration(maxLifetimeSeconds) * time.Second)
+
+ if err = store.DB.Ping(); err != nil {
+ return fmt.Errorf("connect to %s error:%v", sqlUrl, err)
+ }
+
+ if err = store.CreateTable(context.Background(), abstract_sql.DEFAULT_TABLE); err != nil {
+ return fmt.Errorf("init table %s: %v", abstract_sql.DEFAULT_TABLE, err)
+ }
+
+ return nil
+}
diff --git a/weed/filer2/permission.go b/weed/filer/permission.go
similarity index 95%
rename from weed/filer2/permission.go
rename to weed/filer/permission.go
index 8a9508fbc..0d8b8292b 100644
--- a/weed/filer2/permission.go
+++ b/weed/filer/permission.go
@@ -1,4 +1,4 @@
-package filer2
+package filer
func hasWritePermission(dir *Entry, entry *Entry) bool {
diff --git a/weed/filer2/postgres/README.txt b/weed/filer/postgres/README.txt
similarity index 100%
rename from weed/filer2/postgres/README.txt
rename to weed/filer/postgres/README.txt
diff --git a/weed/filer/postgres/postgres_sql_gen.go b/weed/filer/postgres/postgres_sql_gen.go
new file mode 100644
index 000000000..6cee3d2da
--- /dev/null
+++ b/weed/filer/postgres/postgres_sql_gen.go
@@ -0,0 +1,58 @@
+package postgres
+
+import (
+ "fmt"
+
+ "github.com/chrislusf/seaweedfs/weed/filer/abstract_sql"
+ _ "github.com/lib/pq"
+)
+
+type SqlGenPostgres struct {
+ CreateTableSqlTemplate string
+ DropTableSqlTemplate string
+ UpsertQueryTemplate string
+}
+
+var (
+ _ = abstract_sql.SqlGenerator(&SqlGenPostgres{})
+)
+
+func (gen *SqlGenPostgres) GetSqlInsert(tableName string) string {
+ if gen.UpsertQueryTemplate != "" {
+ return fmt.Sprintf(gen.UpsertQueryTemplate, tableName)
+ } else {
+ return fmt.Sprintf(`INSERT INTO "%s" (dirhash,name,directory,meta) VALUES($1,$2,$3,$4)`, tableName)
+ }
+}
+
+func (gen *SqlGenPostgres) GetSqlUpdate(tableName string) string {
+ return fmt.Sprintf(`UPDATE "%s" SET meta=$1 WHERE dirhash=$2 AND name=$3 AND directory=$4`, tableName)
+}
+
+func (gen *SqlGenPostgres) GetSqlFind(tableName string) string {
+ return fmt.Sprintf(`SELECT meta FROM "%s" WHERE dirhash=$1 AND name=$2 AND directory=$3`, tableName)
+}
+
+func (gen *SqlGenPostgres) GetSqlDelete(tableName string) string {
+ return fmt.Sprintf(`DELETE FROM "%s" WHERE dirhash=$1 AND name=$2 AND directory=$3`, tableName)
+}
+
+func (gen *SqlGenPostgres) GetSqlDeleteFolderChildren(tableName string) string {
+ return fmt.Sprintf(`DELETE FROM "%s" WHERE dirhash=$1 AND directory=$2`, tableName)
+}
+
+func (gen *SqlGenPostgres) GetSqlListExclusive(tableName string) string {
+ return fmt.Sprintf(`SELECT NAME, meta FROM "%s" WHERE dirhash=$1 AND name>$2 AND directory=$3 AND name like $4 ORDER BY NAME ASC LIMIT $5`, tableName)
+}
+
+func (gen *SqlGenPostgres) GetSqlListInclusive(tableName string) string {
+ return fmt.Sprintf(`SELECT NAME, meta FROM "%s" WHERE dirhash=$1 AND name>=$2 AND directory=$3 AND name like $4 ORDER BY NAME ASC LIMIT $5`, tableName)
+}
+
+func (gen *SqlGenPostgres) GetSqlCreateTable(tableName string) string {
+ return fmt.Sprintf(gen.CreateTableSqlTemplate, tableName)
+}
+
+func (gen *SqlGenPostgres) GetSqlDropTable(tableName string) string {
+ return fmt.Sprintf(gen.DropTableSqlTemplate, tableName)
+}
diff --git a/weed/filer/postgres/postgres_store.go b/weed/filer/postgres/postgres_store.go
new file mode 100644
index 000000000..a1e16a92a
--- /dev/null
+++ b/weed/filer/postgres/postgres_store.go
@@ -0,0 +1,93 @@
+package postgres
+
+import (
+ "database/sql"
+ "fmt"
+ "time"
+
+ "github.com/chrislusf/seaweedfs/weed/filer"
+ "github.com/chrislusf/seaweedfs/weed/filer/abstract_sql"
+ "github.com/chrislusf/seaweedfs/weed/util"
+ _ "github.com/lib/pq"
+)
+
+const (
+ CONNECTION_URL_PATTERN = "host=%s port=%d sslmode=%s connect_timeout=30"
+)
+
+func init() {
+ filer.Stores = append(filer.Stores, &PostgresStore{})
+}
+
+type PostgresStore struct {
+ abstract_sql.AbstractSqlStore
+}
+
+func (store *PostgresStore) GetName() string {
+ return "postgres"
+}
+
+func (store *PostgresStore) Initialize(configuration util.Configuration, prefix string) (err error) {
+ return store.initialize(
+ configuration.GetString(prefix+"upsertQuery"),
+ configuration.GetBool(prefix+"enableUpsert"),
+ configuration.GetString(prefix+"username"),
+ configuration.GetString(prefix+"password"),
+ configuration.GetString(prefix+"hostname"),
+ configuration.GetInt(prefix+"port"),
+ configuration.GetString(prefix+"database"),
+ configuration.GetString(prefix+"schema"),
+ configuration.GetString(prefix+"sslmode"),
+ configuration.GetInt(prefix+"connection_max_idle"),
+ configuration.GetInt(prefix+"connection_max_open"),
+ configuration.GetInt(prefix+"connection_max_lifetime_seconds"),
+ )
+}
+
+func (store *PostgresStore) initialize(upsertQuery string, enableUpsert bool, user, password, hostname string, port int, database, schema, sslmode string, maxIdle, maxOpen, maxLifetimeSeconds int) (err error) {
+
+ store.SupportBucketTable = false
+ if !enableUpsert {
+ upsertQuery = ""
+ }
+ store.SqlGenerator = &SqlGenPostgres{
+ CreateTableSqlTemplate: "",
+ DropTableSqlTemplate: `drop table "%s"`,
+ UpsertQueryTemplate: upsertQuery,
+ }
+
+ sqlUrl := fmt.Sprintf(CONNECTION_URL_PATTERN, hostname, port, sslmode)
+ if user != "" {
+ sqlUrl += " user=" + user
+ }
+ adaptedSqlUrl := sqlUrl
+ if password != "" {
+ sqlUrl += " password=" + password
+ adaptedSqlUrl += " password=ADAPTED"
+ }
+ if database != "" {
+ sqlUrl += " dbname=" + database
+ adaptedSqlUrl += " dbname=" + database
+ }
+ if schema != "" {
+ sqlUrl += " search_path=" + schema
+ adaptedSqlUrl += " search_path=" + schema
+ }
+ var dbErr error
+ store.DB, dbErr = sql.Open("postgres", sqlUrl)
+ if dbErr != nil {
+ store.DB.Close()
+ store.DB = nil
+ return fmt.Errorf("can not connect to %s error:%v", adaptedSqlUrl, err)
+ }
+
+ store.DB.SetMaxIdleConns(maxIdle)
+ store.DB.SetMaxOpenConns(maxOpen)
+ store.DB.SetConnMaxLifetime(time.Duration(maxLifetimeSeconds) * time.Second)
+
+ if err = store.DB.Ping(); err != nil {
+ return fmt.Errorf("connect to %s error:%v", sqlUrl, err)
+ }
+
+ return nil
+}
diff --git a/weed/filer/postgres2/postgres2_store.go b/weed/filer/postgres2/postgres2_store.go
new file mode 100644
index 000000000..0f573d8d0
--- /dev/null
+++ b/weed/filer/postgres2/postgres2_store.go
@@ -0,0 +1,100 @@
+package postgres2
+
+import (
+ "context"
+ "database/sql"
+ "fmt"
+ "time"
+
+ "github.com/chrislusf/seaweedfs/weed/filer"
+ "github.com/chrislusf/seaweedfs/weed/filer/abstract_sql"
+ "github.com/chrislusf/seaweedfs/weed/filer/postgres"
+ "github.com/chrislusf/seaweedfs/weed/util"
+ _ "github.com/lib/pq"
+)
+
+const (
+ CONNECTION_URL_PATTERN = "host=%s port=%d sslmode=%s connect_timeout=30"
+)
+
+func init() {
+ filer.Stores = append(filer.Stores, &PostgresStore2{})
+}
+
+type PostgresStore2 struct {
+ abstract_sql.AbstractSqlStore
+}
+
+func (store *PostgresStore2) GetName() string {
+ return "postgres2"
+}
+
+func (store *PostgresStore2) Initialize(configuration util.Configuration, prefix string) (err error) {
+ return store.initialize(
+ configuration.GetString(prefix+"createTable"),
+ configuration.GetString(prefix+"upsertQuery"),
+ configuration.GetBool(prefix+"enableUpsert"),
+ configuration.GetString(prefix+"username"),
+ configuration.GetString(prefix+"password"),
+ configuration.GetString(prefix+"hostname"),
+ configuration.GetInt(prefix+"port"),
+ configuration.GetString(prefix+"database"),
+ configuration.GetString(prefix+"schema"),
+ configuration.GetString(prefix+"sslmode"),
+ configuration.GetInt(prefix+"connection_max_idle"),
+ configuration.GetInt(prefix+"connection_max_open"),
+ configuration.GetInt(prefix+"connection_max_lifetime_seconds"),
+ )
+}
+
+func (store *PostgresStore2) initialize(createTable, upsertQuery string, enableUpsert bool, user, password, hostname string, port int, database, schema, sslmode string, maxIdle, maxOpen, maxLifetimeSeconds int) (err error) {
+
+ store.SupportBucketTable = true
+ if !enableUpsert {
+ upsertQuery = ""
+ }
+ store.SqlGenerator = &postgres.SqlGenPostgres{
+ CreateTableSqlTemplate: createTable,
+ DropTableSqlTemplate: `drop table "%s"`,
+ UpsertQueryTemplate: upsertQuery,
+ }
+
+ sqlUrl := fmt.Sprintf(CONNECTION_URL_PATTERN, hostname, port, sslmode)
+ if user != "" {
+ sqlUrl += " user=" + user
+ }
+ adaptedSqlUrl := sqlUrl
+ if password != "" {
+ sqlUrl += " password=" + password
+ adaptedSqlUrl += " password=ADAPTED"
+ }
+ if database != "" {
+ sqlUrl += " dbname=" + database
+ adaptedSqlUrl += " dbname=" + database
+ }
+ if schema != "" {
+ sqlUrl += " search_path=" + schema
+ adaptedSqlUrl += " search_path=" + schema
+ }
+ var dbErr error
+ store.DB, dbErr = sql.Open("postgres", sqlUrl)
+ if dbErr != nil {
+ store.DB.Close()
+ store.DB = nil
+ return fmt.Errorf("can not connect to %s error:%v", adaptedSqlUrl, err)
+ }
+
+ store.DB.SetMaxIdleConns(maxIdle)
+ store.DB.SetMaxOpenConns(maxOpen)
+ store.DB.SetConnMaxLifetime(time.Duration(maxLifetimeSeconds) * time.Second)
+
+ if err = store.DB.Ping(); err != nil {
+ return fmt.Errorf("connect to %s error:%v", sqlUrl, err)
+ }
+
+ if err = store.CreateTable(context.Background(), abstract_sql.DEFAULT_TABLE); err != nil {
+ return fmt.Errorf("init table %s: %v", abstract_sql.DEFAULT_TABLE, err)
+ }
+
+ return nil
+}
diff --git a/weed/filer/read_write.go b/weed/filer/read_write.go
new file mode 100644
index 000000000..d92d526d5
--- /dev/null
+++ b/weed/filer/read_write.go
@@ -0,0 +1,116 @@
+package filer
+
+import (
+ "bytes"
+ "fmt"
+ "github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
+ "github.com/chrislusf/seaweedfs/weed/util"
+ "github.com/chrislusf/seaweedfs/weed/wdclient"
+ "io/ioutil"
+ "math"
+ "net/http"
+ "time"
+)
+
+func ReadEntry(masterClient *wdclient.MasterClient, filerClient filer_pb.SeaweedFilerClient, dir, name string, byteBuffer *bytes.Buffer) error {
+
+ request := &filer_pb.LookupDirectoryEntryRequest{
+ Directory: dir,
+ Name: name,
+ }
+ respLookupEntry, err := filer_pb.LookupEntry(filerClient, request)
+ if err != nil {
+ return err
+ }
+ if len(respLookupEntry.Entry.Content) > 0 {
+ _, err = byteBuffer.Write(respLookupEntry.Entry.Content)
+ return err
+ }
+
+ return StreamContent(masterClient, byteBuffer, respLookupEntry.Entry.Chunks, 0, math.MaxInt64, false)
+
+}
+
+func ReadContent(filerAddress string, dir, name string) ([]byte, error) {
+
+ target := fmt.Sprintf("http://%s%s/%s", filerAddress, dir, name)
+
+ data, _, err := util.Get(target)
+
+ return data, err
+}
+
+func SaveAs(host string, port int, dir, name string, contentType string, byteBuffer *bytes.Buffer) error {
+ var target string
+ if port == 0 {
+ target = fmt.Sprintf("http://%s%s/%s", host, dir, name)
+ } else {
+ target = fmt.Sprintf("http://%s:%d%s/%s", host, port, dir, name)
+ }
+
+ // set the HTTP method, url, and request body
+ req, err := http.NewRequest(http.MethodPut, target, byteBuffer)
+ if err != nil {
+ return err
+ }
+
+ // set the request header Content-Type for json
+ if contentType != "" {
+ req.Header.Set("Content-Type", contentType)
+ }
+ resp, err := http.DefaultClient.Do(req)
+ if err != nil {
+ return err
+ }
+ defer util.CloseResponse(resp)
+
+ b, err := ioutil.ReadAll(resp.Body)
+ if err != nil {
+ return err
+ }
+
+ if resp.StatusCode >= 400 {
+ return fmt.Errorf("%s: %s %v", target, resp.Status, string(b))
+ }
+
+ return nil
+
+}
+
+func SaveInsideFiler(client filer_pb.SeaweedFilerClient, dir, name string, content []byte) error {
+
+ resp, err := filer_pb.LookupEntry(client, &filer_pb.LookupDirectoryEntryRequest{
+ Directory: dir,
+ Name: name,
+ })
+
+ if err == filer_pb.ErrNotFound {
+ err = filer_pb.CreateEntry(client, &filer_pb.CreateEntryRequest{
+ Directory: dir,
+ Entry: &filer_pb.Entry{
+ Name: name,
+ IsDirectory: false,
+ Attributes: &filer_pb.FuseAttributes{
+ Mtime: time.Now().Unix(),
+ Crtime: time.Now().Unix(),
+ FileMode: uint32(0644),
+ Collection: "",
+ Replication: "",
+ FileSize: uint64(len(content)),
+ },
+ Content: content,
+ },
+ })
+ } else if err == nil {
+ entry := resp.Entry
+ entry.Content = content
+ entry.Attributes.Mtime = time.Now().Unix()
+ entry.Attributes.FileSize = uint64(len(content))
+ err = filer_pb.UpdateEntry(client, &filer_pb.UpdateEntryRequest{
+ Directory: dir,
+ Entry: entry,
+ })
+ }
+
+ return err
+}
diff --git a/weed/filer/reader_at.go b/weed/filer/reader_at.go
new file mode 100644
index 000000000..a1e989684
--- /dev/null
+++ b/weed/filer/reader_at.go
@@ -0,0 +1,229 @@
+package filer
+
+import (
+ "context"
+ "fmt"
+ "io"
+ "math/rand"
+ "sync"
+
+ "github.com/chrislusf/seaweedfs/weed/glog"
+ "github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
+ "github.com/chrislusf/seaweedfs/weed/util"
+ "github.com/chrislusf/seaweedfs/weed/util/chunk_cache"
+ "github.com/chrislusf/seaweedfs/weed/wdclient"
+ "github.com/golang/groupcache/singleflight"
+)
+
+type ChunkReadAt struct {
+ masterClient *wdclient.MasterClient
+ chunkViews []*ChunkView
+ lookupFileId wdclient.LookupFileIdFunctionType
+ readerLock sync.Mutex
+ fileSize int64
+
+ fetchGroup singleflight.Group
+ chunkCache chunk_cache.ChunkCache
+ lastChunkFileId string
+ lastChunkData []byte
+}
+
+var _ = io.ReaderAt(&ChunkReadAt{})
+var _ = io.Closer(&ChunkReadAt{})
+
+func LookupFn(filerClient filer_pb.FilerClient) wdclient.LookupFileIdFunctionType {
+
+ vidCache := make(map[string]*filer_pb.Locations)
+ var vicCacheLock sync.RWMutex
+ return func(fileId string) (targetUrls []string, err error) {
+ vid := VolumeId(fileId)
+ vicCacheLock.RLock()
+ locations, found := vidCache[vid]
+ vicCacheLock.RUnlock()
+
+ if !found {
+ util.Retry("lookup volume "+vid, func() error {
+ err = filerClient.WithFilerClient(func(client filer_pb.SeaweedFilerClient) error {
+ resp, err := client.LookupVolume(context.Background(), &filer_pb.LookupVolumeRequest{
+ VolumeIds: []string{vid},
+ })
+ if err != nil {
+ return err
+ }
+
+ locations = resp.LocationsMap[vid]
+ if locations == nil || len(locations.Locations) == 0 {
+ glog.V(0).Infof("failed to locate %s", fileId)
+ return fmt.Errorf("failed to locate %s", fileId)
+ }
+ vicCacheLock.Lock()
+ vidCache[vid] = locations
+ vicCacheLock.Unlock()
+
+ return nil
+ })
+ return err
+ })
+ }
+
+ if err != nil {
+ return nil, err
+ }
+
+ for _, loc := range locations.Locations {
+ volumeServerAddress := filerClient.AdjustedUrl(loc)
+ targetUrl := fmt.Sprintf("http://%s/%s", volumeServerAddress, fileId)
+ targetUrls = append(targetUrls, targetUrl)
+ }
+
+ for i := len(targetUrls) - 1; i > 0; i-- {
+ j := rand.Intn(i + 1)
+ targetUrls[i], targetUrls[j] = targetUrls[j], targetUrls[i]
+ }
+
+ return
+ }
+}
+
+func NewChunkReaderAtFromClient(lookupFn wdclient.LookupFileIdFunctionType, chunkViews []*ChunkView, chunkCache chunk_cache.ChunkCache, fileSize int64) *ChunkReadAt {
+
+ return &ChunkReadAt{
+ chunkViews: chunkViews,
+ lookupFileId: lookupFn,
+ chunkCache: chunkCache,
+ fileSize: fileSize,
+ }
+}
+
+func (c *ChunkReadAt) Close() error {
+ c.lastChunkData = nil
+ c.lastChunkFileId = ""
+ return nil
+}
+
+func (c *ChunkReadAt) ReadAt(p []byte, offset int64) (n int, err error) {
+
+ c.readerLock.Lock()
+ defer c.readerLock.Unlock()
+
+ glog.V(4).Infof("ReadAt [%d,%d) of total file size %d bytes %d chunk views", offset, offset+int64(len(p)), c.fileSize, len(c.chunkViews))
+ return c.doReadAt(p, offset)
+}
+
+func (c *ChunkReadAt) doReadAt(p []byte, offset int64) (n int, err error) {
+
+ startOffset, remaining := offset, int64(len(p))
+ var nextChunk *ChunkView
+ for i, chunk := range c.chunkViews {
+ if remaining <= 0 {
+ break
+ }
+ if i+1 < len(c.chunkViews) {
+ nextChunk = c.chunkViews[i+1]
+ } else {
+ nextChunk = nil
+ }
+ if startOffset < chunk.LogicOffset {
+ gap := int(chunk.LogicOffset - startOffset)
+ glog.V(4).Infof("zero [%d,%d)", startOffset, startOffset+int64(gap))
+ n += int(min(int64(gap), remaining))
+ startOffset, remaining = chunk.LogicOffset, remaining-int64(gap)
+ if remaining <= 0 {
+ break
+ }
+ }
+ // fmt.Printf(">>> doReadAt [%d,%d), chunk[%d,%d)\n", offset, offset+int64(len(p)), chunk.LogicOffset, chunk.LogicOffset+int64(chunk.Size))
+ chunkStart, chunkStop := max(chunk.LogicOffset, startOffset), min(chunk.LogicOffset+int64(chunk.Size), startOffset+remaining)
+ if chunkStart >= chunkStop {
+ continue
+ }
+ glog.V(4).Infof("read [%d,%d), %d/%d chunk %s [%d,%d)", chunkStart, chunkStop, i, len(c.chunkViews), chunk.FileId, chunk.LogicOffset-chunk.Offset, chunk.LogicOffset-chunk.Offset+int64(chunk.Size))
+ var buffer []byte
+ buffer, err = c.readFromWholeChunkData(chunk, nextChunk)
+ if err != nil {
+ glog.Errorf("fetching chunk %+v: %v\n", chunk, err)
+ return
+ }
+ bufferOffset := chunkStart - chunk.LogicOffset + chunk.Offset
+ copied := copy(p[startOffset-offset:chunkStop-chunkStart+startOffset-offset], buffer[bufferOffset:bufferOffset+chunkStop-chunkStart])
+ n += copied
+ startOffset, remaining = startOffset+int64(copied), remaining-int64(copied)
+ }
+
+ glog.V(4).Infof("doReadAt [%d,%d), n:%v, err:%v", offset, offset+int64(len(p)), n, err)
+
+ if err == nil && remaining > 0 && c.fileSize > startOffset {
+ delta := int(min(remaining, c.fileSize-startOffset))
+ glog.V(4).Infof("zero2 [%d,%d) of file size %d bytes", startOffset, startOffset+int64(delta), c.fileSize)
+ n += delta
+ }
+
+ if err == nil && offset+int64(len(p)) >= c.fileSize {
+ err = io.EOF
+ }
+ // fmt.Printf("~~~ filled %d, err: %v\n\n", n, err)
+
+ return
+
+}
+
+func (c *ChunkReadAt) readFromWholeChunkData(chunkView *ChunkView, nextChunkViews ...*ChunkView) (chunkData []byte, err error) {
+
+ if c.lastChunkFileId == chunkView.FileId {
+ return c.lastChunkData, nil
+ }
+
+ v, doErr := c.readOneWholeChunk(chunkView)
+
+ if doErr != nil {
+ return nil, doErr
+ }
+
+ chunkData = v.([]byte)
+
+ c.lastChunkData = chunkData
+ c.lastChunkFileId = chunkView.FileId
+
+ for _, nextChunkView := range nextChunkViews {
+ if c.chunkCache != nil && nextChunkView != nil {
+ go c.readOneWholeChunk(nextChunkView)
+ }
+ }
+
+ return
+}
+
+func (c *ChunkReadAt) readOneWholeChunk(chunkView *ChunkView) (interface{}, error) {
+
+ var err error
+
+ return c.fetchGroup.Do(chunkView.FileId, func() (interface{}, error) {
+
+ glog.V(4).Infof("readFromWholeChunkData %s offset %d [%d,%d) size at least %d", chunkView.FileId, chunkView.Offset, chunkView.LogicOffset, chunkView.LogicOffset+int64(chunkView.Size), chunkView.ChunkSize)
+
+ data := c.chunkCache.GetChunk(chunkView.FileId, chunkView.ChunkSize)
+ if data != nil {
+ glog.V(4).Infof("cache hit %s [%d,%d)", chunkView.FileId, chunkView.LogicOffset-chunkView.Offset, chunkView.LogicOffset-chunkView.Offset+int64(len(data)))
+ } else {
+ var err error
+ data, err = c.doFetchFullChunkData(chunkView)
+ if err != nil {
+ return data, err
+ }
+ c.chunkCache.SetChunk(chunkView.FileId, data)
+ }
+ return data, err
+ })
+}
+
+func (c *ChunkReadAt) doFetchFullChunkData(chunkView *ChunkView) ([]byte, error) {
+
+ glog.V(4).Infof("+ doFetchFullChunkData %s", chunkView.FileId)
+
+ data, err := fetchChunk(c.lookupFileId, chunkView.FileId, chunkView.CipherKey, chunkView.IsGzipped)
+
+ glog.V(4).Infof("- doFetchFullChunkData %s", chunkView.FileId)
+
+ return data, err
+
+}
diff --git a/weed/filer/reader_at_test.go b/weed/filer/reader_at_test.go
new file mode 100644
index 000000000..37a34f4ea
--- /dev/null
+++ b/weed/filer/reader_at_test.go
@@ -0,0 +1,156 @@
+package filer
+
+import (
+ "fmt"
+ "io"
+ "math"
+ "strconv"
+ "sync"
+ "testing"
+)
+
+type mockChunkCache struct {
+}
+
+func (m *mockChunkCache) GetChunk(fileId string, minSize uint64) (data []byte) {
+ x, _ := strconv.Atoi(fileId)
+ data = make([]byte, minSize)
+ for i := 0; i < int(minSize); i++ {
+ data[i] = byte(x)
+ }
+ return data
+}
+func (m *mockChunkCache) SetChunk(fileId string, data []byte) {
+}
+
+func TestReaderAt(t *testing.T) {
+
+ visibles := []VisibleInterval{
+ {
+ start: 1,
+ stop: 2,
+ fileId: "1",
+ chunkSize: 9,
+ },
+ {
+ start: 3,
+ stop: 4,
+ fileId: "3",
+ chunkSize: 1,
+ },
+ {
+ start: 5,
+ stop: 6,
+ fileId: "5",
+ chunkSize: 2,
+ },
+ {
+ start: 7,
+ stop: 9,
+ fileId: "7",
+ chunkSize: 2,
+ },
+ {
+ start: 9,
+ stop: 10,
+ fileId: "9",
+ chunkSize: 2,
+ },
+ }
+
+ readerAt := &ChunkReadAt{
+ chunkViews: ViewFromVisibleIntervals(visibles, 0, math.MaxInt64),
+ lookupFileId: nil,
+ readerLock: sync.Mutex{},
+ fileSize: 10,
+ chunkCache: &mockChunkCache{},
+ }
+
+ testReadAt(t, readerAt, 0, 10, 10, io.EOF)
+ testReadAt(t, readerAt, 0, 12, 10, io.EOF)
+ testReadAt(t, readerAt, 2, 8, 8, io.EOF)
+ testReadAt(t, readerAt, 3, 6, 6, nil)
+
+}
+
+func testReadAt(t *testing.T, readerAt *ChunkReadAt, offset int64, size int, expected int, expectedErr error) {
+ data := make([]byte, size)
+ n, err := readerAt.ReadAt(data, offset)
+
+ for _, d := range data {
+ fmt.Printf("%x", d)
+ }
+ fmt.Println()
+
+ if expected != n {
+ t.Errorf("unexpected read size: %d, expect: %d", n, expected)
+ }
+ if err != expectedErr {
+ t.Errorf("unexpected read error: %v, expect: %v", err, expectedErr)
+ }
+
+}
+
+func TestReaderAt0(t *testing.T) {
+
+ visibles := []VisibleInterval{
+ {
+ start: 2,
+ stop: 5,
+ fileId: "1",
+ chunkSize: 9,
+ },
+ {
+ start: 7,
+ stop: 9,
+ fileId: "2",
+ chunkSize: 9,
+ },
+ }
+
+ readerAt := &ChunkReadAt{
+ chunkViews: ViewFromVisibleIntervals(visibles, 0, math.MaxInt64),
+ lookupFileId: nil,
+ readerLock: sync.Mutex{},
+ fileSize: 10,
+ chunkCache: &mockChunkCache{},
+ }
+
+ testReadAt(t, readerAt, 0, 10, 10, io.EOF)
+ testReadAt(t, readerAt, 3, 16, 7, io.EOF)
+ testReadAt(t, readerAt, 3, 5, 5, nil)
+
+ testReadAt(t, readerAt, 11, 5, 0, io.EOF)
+ testReadAt(t, readerAt, 10, 5, 0, io.EOF)
+
+}
+
+func TestReaderAt1(t *testing.T) {
+
+ visibles := []VisibleInterval{
+ {
+ start: 2,
+ stop: 5,
+ fileId: "1",
+ chunkSize: 9,
+ },
+ }
+
+ readerAt := &ChunkReadAt{
+ chunkViews: ViewFromVisibleIntervals(visibles, 0, math.MaxInt64),
+ lookupFileId: nil,
+ readerLock: sync.Mutex{},
+ fileSize: 20,
+ chunkCache: &mockChunkCache{},
+ }
+
+ testReadAt(t, readerAt, 0, 20, 20, io.EOF)
+ testReadAt(t, readerAt, 1, 7, 7, nil)
+ testReadAt(t, readerAt, 0, 1, 1, nil)
+ testReadAt(t, readerAt, 18, 4, 2, io.EOF)
+ testReadAt(t, readerAt, 12, 4, 4, nil)
+ testReadAt(t, readerAt, 4, 20, 16, io.EOF)
+ testReadAt(t, readerAt, 4, 10, 10, nil)
+ testReadAt(t, readerAt, 1, 10, 10, nil)
+
+}
diff --git a/weed/filer2/redis/redis_cluster_store.go b/weed/filer/redis/redis_cluster_store.go
similarity index 56%
rename from weed/filer2/redis/redis_cluster_store.go
rename to weed/filer/redis/redis_cluster_store.go
index f1ad4b35c..9572058a8 100644
--- a/weed/filer2/redis/redis_cluster_store.go
+++ b/weed/filer/redis/redis_cluster_store.go
@@ -1,13 +1,13 @@
package redis
import (
- "github.com/chrislusf/seaweedfs/weed/filer2"
+ "github.com/chrislusf/seaweedfs/weed/filer"
"github.com/chrislusf/seaweedfs/weed/util"
- "github.com/go-redis/redis"
+ "github.com/go-redis/redis/v8"
)
func init() {
- filer2.Stores = append(filer2.Stores, &RedisClusterStore{})
+ filer.Stores = append(filer.Stores, &RedisClusterStore{})
}
type RedisClusterStore struct {
@@ -18,16 +18,16 @@ func (store *RedisClusterStore) GetName() string {
return "redis_cluster"
}
-func (store *RedisClusterStore) Initialize(configuration util.Configuration) (err error) {
+func (store *RedisClusterStore) Initialize(configuration util.Configuration, prefix string) (err error) {
- configuration.SetDefault("useReadOnly", true)
- configuration.SetDefault("routeByLatency", true)
+ configuration.SetDefault(prefix+"useReadOnly", false)
+ configuration.SetDefault(prefix+"routeByLatency", false)
return store.initialize(
- configuration.GetStringSlice("addresses"),
- configuration.GetString("password"),
- configuration.GetBool("useReadOnly"),
- configuration.GetBool("routeByLatency"),
+ configuration.GetStringSlice(prefix+"addresses"),
+ configuration.GetString(prefix+"password"),
+ configuration.GetBool(prefix+"useReadOnly"),
+ configuration.GetBool(prefix+"routeByLatency"),
)
}
diff --git a/weed/filer2/redis/redis_store.go b/weed/filer/redis/redis_store.go
similarity index 63%
rename from weed/filer2/redis/redis_store.go
rename to weed/filer/redis/redis_store.go
index c56fa014c..665352a63 100644
--- a/weed/filer2/redis/redis_store.go
+++ b/weed/filer/redis/redis_store.go
@@ -1,13 +1,13 @@
package redis
import (
- "github.com/chrislusf/seaweedfs/weed/filer2"
+ "github.com/chrislusf/seaweedfs/weed/filer"
"github.com/chrislusf/seaweedfs/weed/util"
- "github.com/go-redis/redis"
+ "github.com/go-redis/redis/v8"
)
func init() {
- filer2.Stores = append(filer2.Stores, &RedisStore{})
+ filer.Stores = append(filer.Stores, &RedisStore{})
}
type RedisStore struct {
@@ -18,11 +18,11 @@ func (store *RedisStore) GetName() string {
return "redis"
}
-func (store *RedisStore) Initialize(configuration util.Configuration) (err error) {
+func (store *RedisStore) Initialize(configuration util.Configuration, prefix string) (err error) {
return store.initialize(
- configuration.GetString("address"),
- configuration.GetString("password"),
- configuration.GetInt("database"),
+ configuration.GetString(prefix+"address"),
+ configuration.GetString(prefix+"password"),
+ configuration.GetInt(prefix+"database"),
)
}
diff --git a/weed/filer2/redis/universal_redis_store.go b/weed/filer/redis/universal_redis_store.go
similarity index 53%
rename from weed/filer2/redis/universal_redis_store.go
rename to weed/filer/redis/universal_redis_store.go
index 62257e91e..30d11a7f4 100644
--- a/weed/filer2/redis/universal_redis_store.go
+++ b/weed/filer/redis/universal_redis_store.go
@@ -3,12 +3,16 @@ package redis
import (
"context"
"fmt"
- "github.com/chrislusf/seaweedfs/weed/filer2"
- "github.com/chrislusf/seaweedfs/weed/glog"
- "github.com/go-redis/redis"
"sort"
"strings"
"time"
+
+ "github.com/go-redis/redis/v8"
+
+ "github.com/chrislusf/seaweedfs/weed/filer"
+ "github.com/chrislusf/seaweedfs/weed/glog"
+ "github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
+ "github.com/chrislusf/seaweedfs/weed/util"
)
const (
@@ -29,14 +33,18 @@ func (store *UniversalRedisStore) RollbackTransaction(ctx context.Context) error
return nil
}
-func (store *UniversalRedisStore) InsertEntry(ctx context.Context, entry *filer2.Entry) (err error) {
+func (store *UniversalRedisStore) InsertEntry(ctx context.Context, entry *filer.Entry) (err error) {
value, err := entry.EncodeAttributesAndChunks()
if err != nil {
return fmt.Errorf("encoding %s %+v: %v", entry.FullPath, entry.Attr, err)
}
- _, err = store.Client.Set(string(entry.FullPath), value, time.Duration(entry.TtlSec)*time.Second).Result()
+ if len(entry.Chunks) > 50 {
+ value = util.MaybeGzipData(value)
+ }
+
+ _, err = store.Client.Set(ctx, string(entry.FullPath), value, time.Duration(entry.TtlSec)*time.Second).Result()
if err != nil {
return fmt.Errorf("persisting %s : %v", entry.FullPath, err)
@@ -44,7 +52,7 @@ func (store *UniversalRedisStore) InsertEntry(ctx context.Context, entry *filer2
dir, name := entry.FullPath.DirAndName()
if name != "" {
- _, err = store.Client.SAdd(genDirectoryListKey(dir), name).Result()
+ _, err = store.Client.SAdd(ctx, genDirectoryListKey(dir), name).Result()
if err != nil {
return fmt.Errorf("persisting %s in parent dir: %v", entry.FullPath, err)
}
@@ -53,26 +61,26 @@ func (store *UniversalRedisStore) InsertEntry(ctx context.Context, entry *filer2
return nil
}
-func (store *UniversalRedisStore) UpdateEntry(ctx context.Context, entry *filer2.Entry) (err error) {
+func (store *UniversalRedisStore) UpdateEntry(ctx context.Context, entry *filer.Entry) (err error) {
return store.InsertEntry(ctx, entry)
}
-func (store *UniversalRedisStore) FindEntry(ctx context.Context, fullpath filer2.FullPath) (entry *filer2.Entry, err error) {
+func (store *UniversalRedisStore) FindEntry(ctx context.Context, fullpath util.FullPath) (entry *filer.Entry, err error) {
- data, err := store.Client.Get(string(fullpath)).Result()
+ data, err := store.Client.Get(ctx, string(fullpath)).Result()
if err == redis.Nil {
- return nil, filer2.ErrNotFound
+ return nil, filer_pb.ErrNotFound
}
if err != nil {
return nil, fmt.Errorf("get %s : %v", fullpath, err)
}
- entry = &filer2.Entry{
+ entry = &filer.Entry{
FullPath: fullpath,
}
- err = entry.DecodeAttributesAndChunks([]byte(data))
+ err = entry.DecodeAttributesAndChunks(util.MaybeDecompressData([]byte(data)))
if err != nil {
return entry, fmt.Errorf("decode %s : %v", entry.FullPath, err)
}
@@ -80,9 +88,9 @@ func (store *UniversalRedisStore) FindEntry(ctx context.Context, fullpath filer2
return entry, nil
}
-func (store *UniversalRedisStore) DeleteEntry(ctx context.Context, fullpath filer2.FullPath) (err error) {
+func (store *UniversalRedisStore) DeleteEntry(ctx context.Context, fullpath util.FullPath) (err error) {
- _, err = store.Client.Del(string(fullpath)).Result()
+ _, err = store.Client.Del(ctx, string(fullpath)).Result()
if err != nil {
return fmt.Errorf("delete %s : %v", fullpath, err)
@@ -90,7 +98,7 @@ func (store *UniversalRedisStore) DeleteEntry(ctx context.Context, fullpath file
dir, name := fullpath.DirAndName()
if name != "" {
- _, err = store.Client.SRem(genDirectoryListKey(dir), name).Result()
+ _, err = store.Client.SRem(ctx, genDirectoryListKey(dir), name).Result()
if err != nil {
return fmt.Errorf("delete %s in parent dir: %v", fullpath, err)
}
@@ -99,16 +107,16 @@ func (store *UniversalRedisStore) DeleteEntry(ctx context.Context, fullpath file
return nil
}
-func (store *UniversalRedisStore) DeleteFolderChildren(ctx context.Context, fullpath filer2.FullPath) (err error) {
+func (store *UniversalRedisStore) DeleteFolderChildren(ctx context.Context, fullpath util.FullPath) (err error) {
- members, err := store.Client.SMembers(genDirectoryListKey(string(fullpath))).Result()
+ members, err := store.Client.SMembers(ctx, genDirectoryListKey(string(fullpath))).Result()
if err != nil {
return fmt.Errorf("delete folder %s : %v", fullpath, err)
}
for _, fileName := range members {
- path := filer2.NewFullPath(string(fullpath), fileName)
- _, err = store.Client.Del(string(path)).Result()
+ path := util.NewFullPath(string(fullpath), fileName)
+ _, err = store.Client.Del(ctx, string(path)).Result()
if err != nil {
return fmt.Errorf("delete %s in parent dir: %v", fullpath, err)
}
@@ -117,12 +125,16 @@ func (store *UniversalRedisStore) DeleteFolderChildren(ctx context.Context, full
return nil
}
-func (store *UniversalRedisStore) ListDirectoryEntries(ctx context.Context, fullpath filer2.FullPath, startFileName string, inclusive bool,
- limit int) (entries []*filer2.Entry, err error) {
+func (store *UniversalRedisStore) ListDirectoryPrefixedEntries(ctx context.Context, dirPath util.FullPath, startFileName string, includeStartFile bool, limit int64, prefix string, eachEntryFunc filer.ListEachEntryFunc) (lastFileName string, err error) {
+ return lastFileName, filer.ErrUnsupportedListDirectoryPrefixed
+}
+
+func (store *UniversalRedisStore) ListDirectoryEntries(ctx context.Context, dirPath util.FullPath, startFileName string, includeStartFile bool, limit int64, eachEntryFunc filer.ListEachEntryFunc) (lastFileName string, err error) {
- members, err := store.Client.SMembers(genDirectoryListKey(string(fullpath))).Result()
+ dirListKey := genDirectoryListKey(string(dirPath))
+ members, err := store.Client.SMembers(ctx, dirListKey).Result()
if err != nil {
- return nil, fmt.Errorf("list %s : %v", fullpath, err)
+ return lastFileName, fmt.Errorf("list %s : %v", dirPath, err)
}
// skip
@@ -131,7 +143,7 @@ func (store *UniversalRedisStore) ListDirectoryEntries(ctx context.Context, full
for _, m := range members {
if strings.Compare(m, startFileName) >= 0 {
if m == startFileName {
- if inclusive {
+ if includeStartFile {
t = append(t, m)
}
} else {
@@ -148,24 +160,41 @@ func (store *UniversalRedisStore) ListDirectoryEntries(ctx context.Context, full
})
// limit
- if limit < len(members) {
+ if limit < int64(len(members)) {
members = members[:limit]
}
// fetch entry meta
for _, fileName := range members {
- path := filer2.NewFullPath(string(fullpath), fileName)
+ path := util.NewFullPath(string(dirPath), fileName)
entry, err := store.FindEntry(ctx, path)
+ lastFileName = fileName
if err != nil {
glog.V(0).Infof("list %s : %v", path, err)
+ if err == filer_pb.ErrNotFound {
+ continue
+ }
} else {
- entries = append(entries, entry)
+ if entry.TtlSec > 0 {
+ if entry.Attr.Crtime.Add(time.Duration(entry.TtlSec) * time.Second).Before(time.Now()) {
+ store.Client.Del(ctx, string(path)).Result()
+ store.Client.SRem(ctx, dirListKey, fileName).Result()
+ continue
+ }
+ }
+ if !eachEntryFunc(entry) {
+ break
+ }
}
}
- return entries, err
+ return lastFileName, err
}
func genDirectoryListKey(dir string) (dirList string) {
return dir + DIR_LIST_MARKER
}
+
+func (store *UniversalRedisStore) Shutdown() {
+ store.Client.Close()
+}
diff --git a/weed/filer/redis/universal_redis_store_kv.go b/weed/filer/redis/universal_redis_store_kv.go
new file mode 100644
index 000000000..ad6e389ed
--- /dev/null
+++ b/weed/filer/redis/universal_redis_store_kv.go
@@ -0,0 +1,42 @@
+package redis
+
+import (
+ "context"
+ "fmt"
+
+ "github.com/chrislusf/seaweedfs/weed/filer"
+ "github.com/go-redis/redis/v8"
+)
+
+func (store *UniversalRedisStore) KvPut(ctx context.Context, key []byte, value []byte) (err error) {
+
+ _, err = store.Client.Set(ctx, string(key), value, 0).Result()
+
+ if err != nil {
+ return fmt.Errorf("kv put: %v", err)
+ }
+
+ return nil
+}
+
+func (store *UniversalRedisStore) KvGet(ctx context.Context, key []byte) (value []byte, err error) {
+
+ data, err := store.Client.Get(ctx, string(key)).Result()
+
+ if err == redis.Nil {
+ return nil, filer.ErrKvNotFound
+ }
+
+ return []byte(data), err
+}
+
+func (store *UniversalRedisStore) KvDelete(ctx context.Context, key []byte) (err error) {
+
+ _, err = store.Client.Del(ctx, string(key)).Result()
+
+ if err != nil {
+ return fmt.Errorf("kv delete: %v", err)
+ }
+
+ return nil
+}
diff --git a/weed/filer/redis2/redis_cluster_store.go b/weed/filer/redis2/redis_cluster_store.go
new file mode 100644
index 000000000..22d09da25
--- /dev/null
+++ b/weed/filer/redis2/redis_cluster_store.go
@@ -0,0 +1,44 @@
+package redis2
+
+import (
+ "github.com/chrislusf/seaweedfs/weed/filer"
+ "github.com/chrislusf/seaweedfs/weed/util"
+ "github.com/go-redis/redis/v8"
+)
+
+func init() {
+ filer.Stores = append(filer.Stores, &RedisCluster2Store{})
+}
+
+type RedisCluster2Store struct {
+ UniversalRedis2Store
+}
+
+func (store *RedisCluster2Store) GetName() string {
+ return "redis_cluster2"
+}
+
+func (store *RedisCluster2Store) Initialize(configuration util.Configuration, prefix string) (err error) {
+
+ configuration.SetDefault(prefix+"useReadOnly", false)
+ configuration.SetDefault(prefix+"routeByLatency", false)
+
+ return store.initialize(
+ configuration.GetStringSlice(prefix+"addresses"),
+ configuration.GetString(prefix+"password"),
+ configuration.GetBool(prefix+"useReadOnly"),
+ configuration.GetBool(prefix+"routeByLatency"),
+ configuration.GetStringSlice(prefix+"superLargeDirectories"),
+ )
+}
+
+func (store *RedisCluster2Store) initialize(addresses []string, password string, readOnly, routeByLatency bool, superLargeDirectories []string) (err error) {
+ store.Client = redis.NewClusterClient(&redis.ClusterOptions{
+ Addrs: addresses,
+ Password: password,
+ ReadOnly: readOnly,
+ RouteByLatency: routeByLatency,
+ })
+ store.loadSuperLargeDirectories(superLargeDirectories)
+ return
+}
diff --git a/weed/filer/redis2/redis_store.go b/weed/filer/redis2/redis_store.go
new file mode 100644
index 000000000..8eb97e374
--- /dev/null
+++ b/weed/filer/redis2/redis_store.go
@@ -0,0 +1,38 @@
+package redis2
+
+import (
+ "github.com/chrislusf/seaweedfs/weed/filer"
+ "github.com/chrislusf/seaweedfs/weed/util"
+ "github.com/go-redis/redis/v8"
+)
+
+func init() {
+ filer.Stores = append(filer.Stores, &Redis2Store{})
+}
+
+type Redis2Store struct {
+ UniversalRedis2Store
+}
+
+func (store *Redis2Store) GetName() string {
+ return "redis2"
+}
+
+func (store *Redis2Store) Initialize(configuration util.Configuration, prefix string) (err error) {
+ return store.initialize(
+ configuration.GetString(prefix+"address"),
+ configuration.GetString(prefix+"password"),
+ configuration.GetInt(prefix+"database"),
+ configuration.GetStringSlice(prefix+"superLargeDirectories"),
+ )
+}
+
+func (store *Redis2Store) initialize(hostPort string, password string, database int, superLargeDirectories []string) (err error) {
+ store.Client = redis.NewClient(&redis.Options{
+ Addr: hostPort,
+ Password: password,
+ DB: database,
+ })
+ store.loadSuperLargeDirectories(superLargeDirectories)
+ return
+}
diff --git a/weed/filer/redis2/universal_redis_store.go b/weed/filer/redis2/universal_redis_store.go
new file mode 100644
index 000000000..aab3d1f4a
--- /dev/null
+++ b/weed/filer/redis2/universal_redis_store.go
@@ -0,0 +1,204 @@
+package redis2
+
+import (
+ "context"
+ "fmt"
+ "time"
+
+ "github.com/go-redis/redis/v8"
+
+ "github.com/chrislusf/seaweedfs/weed/filer"
+ "github.com/chrislusf/seaweedfs/weed/glog"
+ "github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
+ "github.com/chrislusf/seaweedfs/weed/util"
+)
+
+const (
+ DIR_LIST_MARKER = "\x00"
+)
+
+type UniversalRedis2Store struct {
+ Client redis.UniversalClient
+ superLargeDirectoryHash map[string]bool
+}
+
+func (store *UniversalRedis2Store) isSuperLargeDirectory(dir string) (isSuperLargeDirectory bool) {
+ _, isSuperLargeDirectory = store.superLargeDirectoryHash[dir]
+ return
+}
+
+func (store *UniversalRedis2Store) loadSuperLargeDirectories(superLargeDirectories []string) {
+ // set directory hash
+ store.superLargeDirectoryHash = make(map[string]bool)
+ for _, dir := range superLargeDirectories {
+ store.superLargeDirectoryHash[dir] = true
+ }
+}
+
+func (store *UniversalRedis2Store) BeginTransaction(ctx context.Context) (context.Context, error) {
+ return ctx, nil
+}
+func (store *UniversalRedis2Store) CommitTransaction(ctx context.Context) error {
+ return nil
+}
+func (store *UniversalRedis2Store) RollbackTransaction(ctx context.Context) error {
+ return nil
+}
+
+func (store *UniversalRedis2Store) InsertEntry(ctx context.Context, entry *filer.Entry) (err error) {
+
+ value, err := entry.EncodeAttributesAndChunks()
+ if err != nil {
+ return fmt.Errorf("encoding %s %+v: %v", entry.FullPath, entry.Attr, err)
+ }
+
+ if len(entry.Chunks) > 50 {
+ value = util.MaybeGzipData(value)
+ }
+
+ if err = store.Client.Set(ctx, string(entry.FullPath), value, time.Duration(entry.TtlSec)*time.Second).Err(); err != nil {
+ return fmt.Errorf("persisting %s : %v", entry.FullPath, err)
+ }
+
+ dir, name := entry.FullPath.DirAndName()
+ if store.isSuperLargeDirectory(dir) {
+ return nil
+ }
+
+ if name != "" {
+ if err = store.Client.ZAddNX(ctx, genDirectoryListKey(dir), &redis.Z{Score: 0, Member: name}).Err(); err != nil {
+ return fmt.Errorf("persisting %s in parent dir: %v", entry.FullPath, err)
+ }
+ }
+
+ return nil
+}
+
+func (store *UniversalRedis2Store) UpdateEntry(ctx context.Context, entry *filer.Entry) (err error) {
+
+ return store.InsertEntry(ctx, entry)
+}
+
+func (store *UniversalRedis2Store) FindEntry(ctx context.Context, fullpath util.FullPath) (entry *filer.Entry, err error) {
+
+ data, err := store.Client.Get(ctx, string(fullpath)).Result()
+ if err == redis.Nil {
+ return nil, filer_pb.ErrNotFound
+ }
+
+ if err != nil {
+ return nil, fmt.Errorf("get %s : %v", fullpath, err)
+ }
+
+ entry = &filer.Entry{
+ FullPath: fullpath,
+ }
+ err = entry.DecodeAttributesAndChunks(util.MaybeDecompressData([]byte(data)))
+ if err != nil {
+ return entry, fmt.Errorf("decode %s : %v", entry.FullPath, err)
+ }
+
+ return entry, nil
+}
+
+func (store *UniversalRedis2Store) DeleteEntry(ctx context.Context, fullpath util.FullPath) (err error) {
+
+ _, err = store.Client.Del(ctx, genDirectoryListKey(string(fullpath))).Result()
+ if err != nil {
+ return fmt.Errorf("delete dir list %s : %v", fullpath, err)
+ }
+
+ _, err = store.Client.Del(ctx, string(fullpath)).Result()
+ if err != nil {
+ return fmt.Errorf("delete %s : %v", fullpath, err)
+ }
+
+ dir, name := fullpath.DirAndName()
+ if store.isSuperLargeDirectory(dir) {
+ return nil
+ }
+ if name != "" {
+ _, err = store.Client.ZRem(ctx, genDirectoryListKey(dir), name).Result()
+ if err != nil {
+ return fmt.Errorf("DeleteEntry %s in parent dir: %v", fullpath, err)
+ }
+ }
+
+ return nil
+}
+
+func (store *UniversalRedis2Store) DeleteFolderChildren(ctx context.Context, fullpath util.FullPath) (err error) {
+
+ if store.isSuperLargeDirectory(string(fullpath)) {
+ return nil
+ }
+
+ members, err := store.Client.ZRange(ctx, genDirectoryListKey(string(fullpath)), 0, -1).Result()
+ if err != nil {
+ return fmt.Errorf("DeleteFolderChildren %s : %v", fullpath, err)
+ }
+
+ for _, fileName := range members {
+ path := util.NewFullPath(string(fullpath), fileName)
+ _, err = store.Client.Del(ctx, string(path)).Result()
+ if err != nil {
+ return fmt.Errorf("DeleteFolderChildren %s in parent dir: %v", fullpath, err)
+ }
+ }
+
+ return nil
+}
+
+func (store *UniversalRedis2Store) ListDirectoryPrefixedEntries(ctx context.Context, dirPath util.FullPath, startFileName string, includeStartFile bool, limit int64, prefix string, eachEntryFunc filer.ListEachEntryFunc) (lastFileName string, err error) {
+ return lastFileName, filer.ErrUnsupportedListDirectoryPrefixed
+}
+
+func (store *UniversalRedis2Store) ListDirectoryEntries(ctx context.Context, dirPath util.FullPath, startFileName string, includeStartFile bool, limit int64, eachEntryFunc filer.ListEachEntryFunc) (lastFileName string, err error) {
+
+ dirListKey := genDirectoryListKey(string(dirPath))
+ start := int64(0)
+ if startFileName != "" {
+ start, _ = store.Client.ZRank(ctx, dirListKey, startFileName).Result()
+ if !includeStartFile {
+ start++
+ }
+ }
+ members, err := store.Client.ZRange(ctx, dirListKey, start, start+int64(limit)-1).Result()
+ if err != nil {
+ return lastFileName, fmt.Errorf("list %s : %v", dirPath, err)
+ }
+
+ // fetch entry meta
+ for _, fileName := range members {
+ path := util.NewFullPath(string(dirPath), fileName)
+ entry, err := store.FindEntry(ctx, path)
+ lastFileName = fileName
+ if err != nil {
+ glog.V(0).Infof("list %s : %v", path, err)
+ if err == filer_pb.ErrNotFound {
+ continue
+ }
+ } else {
+ if entry.TtlSec > 0 {
+ if entry.Attr.Crtime.Add(time.Duration(entry.TtlSec) * time.Second).Before(time.Now()) {
+ store.Client.Del(ctx, string(path)).Result()
+ store.Client.ZRem(ctx, dirListKey, fileName).Result()
+ continue
+ }
+ }
+ if !eachEntryFunc(entry) {
+ break
+ }
+ }
+ }
+
+ return lastFileName, err
+}
+
+func genDirectoryListKey(dir string) (dirList string) {
+ return dir + DIR_LIST_MARKER
+}
+
+func (store *UniversalRedis2Store) Shutdown() {
+ store.Client.Close()
+}
diff --git a/weed/filer/redis2/universal_redis_store_kv.go b/weed/filer/redis2/universal_redis_store_kv.go
new file mode 100644
index 000000000..bde994dc9
--- /dev/null
+++ b/weed/filer/redis2/universal_redis_store_kv.go
@@ -0,0 +1,42 @@
+package redis2
+
+import (
+ "context"
+ "fmt"
+
+ "github.com/chrislusf/seaweedfs/weed/filer"
+ "github.com/go-redis/redis/v8"
+)
+
+func (store *UniversalRedis2Store) KvPut(ctx context.Context, key []byte, value []byte) (err error) {
+
+ _, err = store.Client.Set(ctx, string(key), value, 0).Result()
+
+ if err != nil {
+ return fmt.Errorf("kv put: %v", err)
+ }
+
+ return nil
+}
+
+func (store *UniversalRedis2Store) KvGet(ctx context.Context, key []byte) (value []byte, err error) {
+
+ data, err := store.Client.Get(ctx, string(key)).Result()
+
+ if err == redis.Nil {
+ return nil, filer.ErrKvNotFound
+ }
+
+ return []byte(data), err
+}
+
+func (store *UniversalRedis2Store) KvDelete(ctx context.Context, key []byte) (err error) {
+
+ _, err = store.Client.Del(ctx, string(key)).Result()
+
+ if err != nil {
+ return fmt.Errorf("kv delete: %v", err)
+ }
+
+ return nil
+}
diff --git a/weed/filer/rocksdb/README.md b/weed/filer/rocksdb/README.md
new file mode 100644
index 000000000..6bae6d34e
--- /dev/null
+++ b/weed/filer/rocksdb/README.md
@@ -0,0 +1,41 @@
+# Prepare the compilation environment on linux
+- sudo add-apt-repository -y ppa:ubuntu-toolchain-r/test
+- sudo apt-get update -qq
+- sudo apt-get install gcc-6 g++-6 libsnappy-dev zlib1g-dev libbz2-dev -qq
+- export CXX="g++-6" CC="gcc-6"
+
+- wget https://launchpad.net/ubuntu/+archive/primary/+files/libgflags2_2.0-1.1ubuntu1_amd64.deb
+- sudo dpkg -i libgflags2_2.0-1.1ubuntu1_amd64.deb
+- wget https://launchpad.net/ubuntu/+archive/primary/+files/libgflags-dev_2.0-1.1ubuntu1_amd64.deb
+- sudo dpkg -i libgflags-dev_2.0-1.1ubuntu1_amd64.deb
+
+# Prepare the compilation environment on mac os
+```
+brew install snappy
+```
+
+# install rocksdb:
+```
+ export ROCKSDB_HOME=/Users/chris/dev/rocksdb
+
+ git clone https://github.com/facebook/rocksdb.git $ROCKSDB_HOME
+ pushd $ROCKSDB_HOME
+ make clean
+ make install-static
+ popd
+```
+
+# install gorocksdb
+
+```
+export CGO_CFLAGS="-I$ROCKSDB_HOME/include"
+export CGO_LDFLAGS="-L$ROCKSDB_HOME -lrocksdb -lstdc++ -lm -lz -lbz2 -lsnappy -llz4 -lzstd"
+
+go get github.com/tecbot/gorocksdb
+```
+# compile with rocksdb
+
+```
+cd ~/go/src/github.com/chrislusf/seaweedfs/weed
+go install -tags rocksdb
+```
diff --git a/weed/filer/rocksdb/rocksdb_store.go b/weed/filer/rocksdb/rocksdb_store.go
new file mode 100644
index 000000000..379a18c62
--- /dev/null
+++ b/weed/filer/rocksdb/rocksdb_store.go
@@ -0,0 +1,304 @@
+// +build rocksdb
+
+package rocksdb
+
+import (
+ "bytes"
+ "context"
+ "crypto/md5"
+ "fmt"
+ "io"
+ "os"
+
+ "github.com/tecbot/gorocksdb"
+
+ "github.com/chrislusf/seaweedfs/weed/filer"
+ "github.com/chrislusf/seaweedfs/weed/glog"
+ "github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
+ weed_util "github.com/chrislusf/seaweedfs/weed/util"
+)
+
+func init() {
+ filer.Stores = append(filer.Stores, &RocksDBStore{})
+}
+
+type options struct {
+ opt *gorocksdb.Options
+ ro *gorocksdb.ReadOptions
+ wo *gorocksdb.WriteOptions
+}
+
+func (opt *options) init() {
+ opt.opt = gorocksdb.NewDefaultOptions()
+ opt.ro = gorocksdb.NewDefaultReadOptions()
+ opt.wo = gorocksdb.NewDefaultWriteOptions()
+}
+
+func (opt *options) close() {
+ opt.opt.Destroy()
+ opt.ro.Destroy()
+ opt.wo.Destroy()
+}
+
+type RocksDBStore struct {
+ path string
+ db *gorocksdb.DB
+ options
+}
+
+func (store *RocksDBStore) GetName() string {
+ return "rocksdb"
+}
+
+func (store *RocksDBStore) Initialize(configuration weed_util.Configuration, prefix string) (err error) {
+ dir := configuration.GetString(prefix + "dir")
+ return store.initialize(dir)
+}
+
+func (store *RocksDBStore) initialize(dir string) (err error) {
+ glog.Infof("filer store rocksdb dir: %s", dir)
+ os.MkdirAll(dir, 0755)
+ if err := weed_util.TestFolderWritable(dir); err != nil {
+ return fmt.Errorf("Check Level Folder %s Writable: %s", dir, err)
+ }
+ store.options.init()
+ store.opt.SetCreateIfMissing(true)
+ // reduce write amplification
+ // also avoid expired data stored in highest level never get compacted
+ store.opt.SetLevelCompactionDynamicLevelBytes(true)
+ store.opt.SetCompactionFilter(NewTTLFilter())
+ // store.opt.SetMaxBackgroundCompactions(2)
+
+ store.db, err = gorocksdb.OpenDb(store.opt, dir)
+
+ return
+}
+
+func (store *RocksDBStore) BeginTransaction(ctx context.Context) (context.Context, error) {
+ return ctx, nil
+}
+func (store *RocksDBStore) CommitTransaction(ctx context.Context) error {
+ return nil
+}
+func (store *RocksDBStore) RollbackTransaction(ctx context.Context) error {
+ return nil
+}
+
+func (store *RocksDBStore) InsertEntry(ctx context.Context, entry *filer.Entry) (err error) {
+ dir, name := entry.DirAndName()
+ key := genKey(dir, name)
+
+ value, err := entry.EncodeAttributesAndChunks()
+ if err != nil {
+ return fmt.Errorf("encoding %s %+v: %v", entry.FullPath, entry.Attr, err)
+ }
+
+ err = store.db.Put(store.wo, key, value)
+
+ if err != nil {
+ return fmt.Errorf("persisting %s : %v", entry.FullPath, err)
+ }
+
+ // println("saved", entry.FullPath, "chunks", len(entry.Chunks))
+
+ return nil
+}
+
+func (store *RocksDBStore) UpdateEntry(ctx context.Context, entry *filer.Entry) (err error) {
+
+ return store.InsertEntry(ctx, entry)
+}
+
+func (store *RocksDBStore) FindEntry(ctx context.Context, fullpath weed_util.FullPath) (entry *filer.Entry, err error) {
+ dir, name := fullpath.DirAndName()
+ key := genKey(dir, name)
+ data, err := store.db.Get(store.ro, key)
+
+ if data == nil {
+ return nil, filer_pb.ErrNotFound
+ }
+ defer data.Free()
+
+ if err != nil {
+ return nil, fmt.Errorf("get %s : %v", fullpath, err)
+ }
+
+ entry = &filer.Entry{
+ FullPath: fullpath,
+ }
+ err = entry.DecodeAttributesAndChunks(data.Data())
+ if err != nil {
+ return entry, fmt.Errorf("decode %s : %v", entry.FullPath, err)
+ }
+
+ // println("read", entry.FullPath, "chunks", len(entry.Chunks), "data", len(data), string(data))
+
+ return entry, nil
+}
+
+func (store *RocksDBStore) DeleteEntry(ctx context.Context, fullpath weed_util.FullPath) (err error) {
+ dir, name := fullpath.DirAndName()
+ key := genKey(dir, name)
+
+ err = store.db.Delete(store.wo, key)
+ if err != nil {
+ return fmt.Errorf("delete %s : %v", fullpath, err)
+ }
+
+ return nil
+}
+
+func (store *RocksDBStore) DeleteFolderChildren(ctx context.Context, fullpath weed_util.FullPath) (err error) {
+ directoryPrefix := genDirectoryKeyPrefix(fullpath, "")
+
+ batch := gorocksdb.NewWriteBatch()
+ defer batch.Destroy()
+
+ ro := gorocksdb.NewDefaultReadOptions()
+ defer ro.Destroy()
+ ro.SetFillCache(false)
+
+ iter := store.db.NewIterator(ro)
+ defer iter.Close()
+ err = enumerate(iter, directoryPrefix, nil, false, -1, func(key, value []byte) bool {
+ batch.Delete(key)
+ return true
+ })
+ if err != nil {
+ return fmt.Errorf("delete list %s : %v", fullpath, err)
+ }
+
+ err = store.db.Write(store.wo, batch)
+
+ if err != nil {
+ return fmt.Errorf("delete %s : %v", fullpath, err)
+ }
+
+ return nil
+}
+
+func enumerate(iter *gorocksdb.Iterator, prefix, lastKey []byte, includeLastKey bool, limit int64, fn func(key, value []byte) bool) (err error) {
+
+ if len(lastKey) == 0 {
+ iter.Seek(prefix)
+ } else {
+ iter.Seek(lastKey)
+ if !includeLastKey {
+ if iter.Valid() {
+ if bytes.Equal(iter.Key().Data(), lastKey) {
+ iter.Next()
+ }
+ }
+ }
+ }
+
+ i := int64(0)
+ for ; iter.Valid(); iter.Next() {
+
+ if limit > 0 {
+ i++
+ if i > limit {
+ break
+ }
+ }
+
+ key := iter.Key().Data()
+
+ if !bytes.HasPrefix(key, prefix) {
+ break
+ }
+
+ ret := fn(key, iter.Value().Data())
+
+ if !ret {
+ break
+ }
+
+ }
+
+ if err := iter.Err(); err != nil {
+ return fmt.Errorf("prefix scan iterator: %v", err)
+ }
+ return nil
+}
+
+func (store *RocksDBStore) ListDirectoryEntries(ctx context.Context, dirPath weed_util.FullPath, startFileName string, includeStartFile bool, limit int64, eachEntryFunc filer.ListEachEntryFunc) (lastFileName string, err error) {
+ return store.ListDirectoryPrefixedEntries(ctx, dirPath, startFileName, includeStartFile, limit, "", eachEntryFunc)
+}
+
+func (store *RocksDBStore) ListDirectoryPrefixedEntries(ctx context.Context, dirPath weed_util.FullPath, startFileName string, includeStartFile bool, limit int64, prefix string, eachEntryFunc filer.ListEachEntryFunc) (lastFileName string, err error) {
+
+ directoryPrefix := genDirectoryKeyPrefix(dirPath, prefix)
+ lastFileStart := directoryPrefix
+ if startFileName != "" {
+ lastFileStart = genDirectoryKeyPrefix(dirPath, startFileName)
+ }
+
+ ro := gorocksdb.NewDefaultReadOptions()
+ defer ro.Destroy()
+ ro.SetFillCache(false)
+
+ iter := store.db.NewIterator(ro)
+ defer iter.Close()
+ err = enumerate(iter, directoryPrefix, lastFileStart, includeStartFile, limit, func(key, value []byte) bool {
+ fileName := getNameFromKey(key)
+ if fileName == "" {
+ return true
+ }
+ entry := &filer.Entry{
+ FullPath: weed_util.NewFullPath(string(dirPath), fileName),
+ }
+ lastFileName = fileName
+
+ // println("list", entry.FullPath, "chunks", len(entry.Chunks))
+ if decodeErr := entry.DecodeAttributesAndChunks(value); decodeErr != nil {
+ err = decodeErr
+ glog.V(0).Infof("list %s : %v", entry.FullPath, err)
+ return false
+ }
+ if !eachEntryFunc(entry) {
+ return false
+ }
+ return true
+ })
+ if err != nil {
+ return lastFileName, fmt.Errorf("prefix list %s : %v", dirPath, err)
+ }
+
+ return lastFileName, err
+}
+
+func genKey(dirPath, fileName string) (key []byte) {
+ key = hashToBytes(dirPath)
+ key = append(key, []byte(fileName)...)
+ return key
+}
+
+func genDirectoryKeyPrefix(fullpath weed_util.FullPath, startFileName string) (keyPrefix []byte) {
+ keyPrefix = hashToBytes(string(fullpath))
+ if len(startFileName) > 0 {
+ keyPrefix = append(keyPrefix, []byte(startFileName)...)
+ }
+ return keyPrefix
+}
+
+func getNameFromKey(key []byte) string {
+
+ return string(key[md5.Size:])
+
+}
+
+// hash directory, and use last byte for partitioning
+func hashToBytes(dir string) []byte {
+ h := md5.New()
+ io.WriteString(h, dir)
+
+ b := h.Sum(nil)
+
+ return b
+}
+
+func (store *RocksDBStore) Shutdown() {
+ store.db.Close()
+ store.options.close()
+}
diff --git a/weed/filer/rocksdb/rocksdb_store_kv.go b/weed/filer/rocksdb/rocksdb_store_kv.go
new file mode 100644
index 000000000..cf1214d5b
--- /dev/null
+++ b/weed/filer/rocksdb/rocksdb_store_kv.go
@@ -0,0 +1,47 @@
+// +build rocksdb
+
+package rocksdb
+
+import (
+ "context"
+ "fmt"
+
+ "github.com/chrislusf/seaweedfs/weed/filer"
+)
+
+func (store *RocksDBStore) KvPut(ctx context.Context, key []byte, value []byte) (err error) {
+
+ err = store.db.Put(store.wo, key, value)
+
+ if err != nil {
+ return fmt.Errorf("kv put: %v", err)
+ }
+
+ return nil
+}
+
+func (store *RocksDBStore) KvGet(ctx context.Context, key []byte) (value []byte, err error) {
+
+ value, err = store.db.GetBytes(store.ro, key)
+
+ if value == nil {
+ return nil, filer.ErrKvNotFound
+ }
+
+ if err != nil {
+ return nil, fmt.Errorf("kv get: %v", err)
+ }
+
+ return
+}
+
+func (store *RocksDBStore) KvDelete(ctx context.Context, key []byte) (err error) {
+
+ err = store.db.Delete(store.wo, key)
+
+ if err != nil {
+ return fmt.Errorf("kv delete: %v", err)
+ }
+
+ return nil
+}
diff --git a/weed/filer/rocksdb/rocksdb_store_test.go b/weed/filer/rocksdb/rocksdb_store_test.go
new file mode 100644
index 000000000..f6e755b4b
--- /dev/null
+++ b/weed/filer/rocksdb/rocksdb_store_test.go
@@ -0,0 +1,117 @@
+// +build rocksdb
+
+package rocksdb
+
+import (
+ "context"
+ "fmt"
+ "io/ioutil"
+ "os"
+ "testing"
+ "time"
+
+ "github.com/chrislusf/seaweedfs/weed/filer"
+ "github.com/chrislusf/seaweedfs/weed/util"
+)
+
+func TestCreateAndFind(t *testing.T) {
+ testFiler := filer.NewFiler(nil, nil, "", 0, "", "", "", nil)
+ dir, _ := ioutil.TempDir("", "seaweedfs_filer_test")
+ defer os.RemoveAll(dir)
+ store := &RocksDBStore{}
+ store.initialize(dir)
+ testFiler.SetStore(store)
+
+ fullpath := util.FullPath("/home/chris/this/is/one/file1.jpg")
+
+ ctx := context.Background()
+
+ entry1 := &filer.Entry{
+ FullPath: fullpath,
+ Attr: filer.Attr{
+ Mode: 0440,
+ Uid: 1234,
+ Gid: 5678,
+ },
+ }
+
+ if err := testFiler.CreateEntry(ctx, entry1, false, false, nil); err != nil {
+ t.Errorf("create entry %v: %v", entry1.FullPath, err)
+ return
+ }
+
+ entry, err := testFiler.FindEntry(ctx, fullpath)
+
+ if err != nil {
+ t.Errorf("find entry: %v", err)
+ return
+ }
+
+ if entry.FullPath != entry1.FullPath {
+ t.Errorf("find wrong entry: %v", entry.FullPath)
+ return
+ }
+
+ // checking one upper directory
+ entries, _, _ := testFiler.ListDirectoryEntries(ctx, util.FullPath("/home/chris/this/is/one"), "", false, 100, "", "", "")
+ if len(entries) != 1 {
+ t.Errorf("list entries count: %v", len(entries))
+ return
+ }
+
+ // checking one upper directory
+ entries, _, _ = testFiler.ListDirectoryEntries(ctx, util.FullPath("/"), "", false, 100, "", "", "")
+ if len(entries) != 1 {
+ t.Errorf("list entries count: %v", len(entries))
+ return
+ }
+
+}
+
+func TestEmptyRoot(t *testing.T) {
+ testFiler := filer.NewFiler(nil, nil, "", 0, "", "", "", nil)
+ dir, _ := ioutil.TempDir("", "seaweedfs_filer_test2")
+ defer os.RemoveAll(dir)
+ store := &RocksDBStore{}
+ store.initialize(dir)
+ testFiler.SetStore(store)
+
+ ctx := context.Background()
+
+ // checking one upper directory
+ entries, _, err := testFiler.ListDirectoryEntries(ctx, util.FullPath("/"), "", false, 100, "", "", "")
+ if err != nil {
+ t.Errorf("list entries: %v", err)
+ return
+ }
+ if len(entries) != 0 {
+ t.Errorf("list entries count: %v", len(entries))
+ return
+ }
+
+}
+
+func BenchmarkInsertEntry(b *testing.B) {
+ testFiler := filer.NewFiler(nil, nil, "", 0, "", "", "", nil)
+ dir, _ := ioutil.TempDir("", "seaweedfs_filer_bench")
+ defer os.RemoveAll(dir)
+ store := &RocksDBStore{}
+ store.initialize(dir)
+ testFiler.SetStore(store)
+
+ ctx := context.Background()
+
+ b.ReportAllocs()
+
+ for i := 0; i < b.N; i++ {
+ entry := &filer.Entry{
+ FullPath: util.FullPath(fmt.Sprintf("/file%d.txt", i)),
+ Attr: filer.Attr{
+ Crtime: time.Now(),
+ Mtime: time.Now(),
+ Mode: os.FileMode(0644),
+ },
+ }
+ store.InsertEntry(ctx, entry)
+ }
+}
diff --git a/weed/filer/rocksdb/rocksdb_ttl.go b/weed/filer/rocksdb/rocksdb_ttl.go
new file mode 100644
index 000000000..faed22310
--- /dev/null
+++ b/weed/filer/rocksdb/rocksdb_ttl.go
@@ -0,0 +1,40 @@
+//+build rocksdb
+
+package rocksdb
+
+import (
+ "time"
+
+ "github.com/tecbot/gorocksdb"
+
+ "github.com/chrislusf/seaweedfs/weed/filer"
+)
+
+type TTLFilter struct {
+ skipLevel0 bool
+}
+
+func NewTTLFilter() gorocksdb.CompactionFilter {
+ return &TTLFilter{
+ skipLevel0: true,
+ }
+}
+
+func (t *TTLFilter) Filter(level int, key, val []byte) (remove bool, newVal []byte) {
+ // decode could be slow, causing write stall
+ // level >0 sst can run compaction in parallel
+ if !t.skipLevel0 || level > 0 {
+ entry := filer.Entry{}
+ if err := entry.DecodeAttributesAndChunks(val); err == nil {
+ if entry.TtlSec > 0 &&
+ entry.Crtime.Add(time.Duration(entry.TtlSec)*time.Second).Before(time.Now()) {
+ return true, nil
+ }
+ }
+ }
+ return false, val
+}
+
+func (t *TTLFilter) Name() string {
+ return "TTLFilter"
+}
diff --git a/weed/filer/s3iam_conf.go b/weed/filer/s3iam_conf.go
new file mode 100644
index 000000000..92387fb09
--- /dev/null
+++ b/weed/filer/s3iam_conf.go
@@ -0,0 +1,25 @@
+package filer
+
+import (
+ "bytes"
+ "github.com/chrislusf/seaweedfs/weed/pb/iam_pb"
+ "github.com/golang/protobuf/jsonpb"
+ "io"
+)
+
+func ParseS3ConfigurationFromBytes(content []byte, config *iam_pb.S3ApiConfiguration) error {
+ if err := jsonpb.Unmarshal(bytes.NewBuffer(content), config); err != nil {
+ return err
+ }
+ return nil
+}
+
+func S3ConfigurationToText(writer io.Writer, config *iam_pb.S3ApiConfiguration) error {
+
+ m := jsonpb.Marshaler{
+ EmitDefaults: false,
+ Indent: " ",
+ }
+
+ return m.Marshal(writer, config)
+}
diff --git a/weed/filer/s3iam_conf_test.go b/weed/filer/s3iam_conf_test.go
new file mode 100644
index 000000000..65cc49840
--- /dev/null
+++ b/weed/filer/s3iam_conf_test.go
@@ -0,0 +1,57 @@
+package filer
+
+import (
+ "bytes"
+ . "github.com/chrislusf/seaweedfs/weed/s3api/s3_constants"
+ "testing"
+
+ "github.com/chrislusf/seaweedfs/weed/pb/iam_pb"
+
+ "github.com/stretchr/testify/assert"
+)
+
+func TestS3Conf(t *testing.T) {
+ s3Conf := &iam_pb.S3ApiConfiguration{
+ Identities: []*iam_pb.Identity{
+ {
+ Name: "some_name",
+ Credentials: []*iam_pb.Credential{
+ {
+ AccessKey: "some_access_key1",
+ SecretKey: "some_secret_key1",
+ },
+ },
+ Actions: []string{
+ ACTION_ADMIN,
+ ACTION_READ,
+ ACTION_WRITE,
+ },
+ },
+ {
+ Name: "some_read_only_user",
+ Credentials: []*iam_pb.Credential{
+ {
+ AccessKey: "some_access_key2",
+ SecretKey: "some_secret_key2",
+ },
+ },
+ Actions: []string{
+ ACTION_READ,
+ ACTION_TAGGING,
+ ACTION_LIST,
+ },
+ },
+ },
+ }
+ var buf bytes.Buffer
+ err := S3ConfigurationToText(&buf, s3Conf)
+ assert.Equal(t, err, nil)
+ s3ConfSaved := &iam_pb.S3ApiConfiguration{}
+ err = ParseS3ConfigurationFromBytes(buf.Bytes(), s3ConfSaved)
+ assert.Equal(t, err, nil)
+
+ assert.Equal(t, "some_name", s3ConfSaved.Identities[0].Name)
+ assert.Equal(t, "some_read_only_user", s3ConfSaved.Identities[1].Name)
+ assert.Equal(t, "some_access_key1", s3ConfSaved.Identities[0].Credentials[0].AccessKey)
+ assert.Equal(t, "some_secret_key2", s3ConfSaved.Identities[1].Credentials[0].SecretKey)
+}
diff --git a/weed/filer/stream.go b/weed/filer/stream.go
new file mode 100644
index 000000000..661a210ea
--- /dev/null
+++ b/weed/filer/stream.go
@@ -0,0 +1,245 @@
+package filer
+
+import (
+ "bytes"
+ "fmt"
+ "golang.org/x/sync/errgroup"
+ "io"
+ "math"
+ "strings"
+
+ "github.com/chrislusf/seaweedfs/weed/glog"
+ "github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
+ "github.com/chrislusf/seaweedfs/weed/util"
+ "github.com/chrislusf/seaweedfs/weed/wdclient"
+)
+
+func StreamContent(masterClient wdclient.HasLookupFileIdFunction, w io.Writer, chunks []*filer_pb.FileChunk, offset int64, size int64, isCheck bool) error {
+
+ glog.V(9).Infof("start to stream content for chunks: %+v\n", chunks)
+ chunkViews := ViewFromChunks(masterClient.GetLookupFileIdFunction(), chunks, offset, size)
+
+ fileId2Url := make(map[string][]string)
+
+ for _, chunkView := range chunkViews {
+
+ urlStrings, err := masterClient.GetLookupFileIdFunction()(chunkView.FileId)
+ if err != nil {
+ glog.V(1).Infof("operation LookupFileId %s failed, err: %v", chunkView.FileId, err)
+ return err
+ } else if len(urlStrings) == 0 {
+ glog.Errorf("operation LookupFileId %s failed, err: urls not found", chunkView.FileId)
+ return fmt.Errorf("operation LookupFileId %s failed, err: urls not found", chunkView.FileId)
+ }
+ fileId2Url[chunkView.FileId] = urlStrings
+ }
+
+ if isCheck {
+ // Pre-check all chunkViews urls
+ gErr := new(errgroup.Group)
+ CheckAllChunkViews(chunkViews, &fileId2Url, gErr)
+ if err := gErr.Wait(); err != nil {
+ glog.Errorf("check all chunks: %v", err)
+ return fmt.Errorf("check all chunks: %v", err)
+ }
+ return nil
+ }
+
+ for _, chunkView := range chunkViews {
+
+ urlStrings := fileId2Url[chunkView.FileId]
+ data, err := retriedFetchChunkData(urlStrings, chunkView.CipherKey, chunkView.IsGzipped, chunkView.IsFullChunk(), chunkView.Offset, int(chunkView.Size))
+ if err != nil {
+ glog.Errorf("read chunk: %v", err)
+ return fmt.Errorf("read chunk: %v", err)
+ }
+
+ _, err = w.Write(data)
+ if err != nil {
+ glog.Errorf("write chunk: %v", err)
+ return fmt.Errorf("write chunk: %v", err)
+ }
+ }
+
+ return nil
+
+}
+
+func CheckAllChunkViews(chunkViews []*ChunkView, fileId2Url *map[string][]string, gErr *errgroup.Group) {
+ for _, chunkView := range chunkViews {
+ urlStrings := (*fileId2Url)[chunkView.FileId]
+ glog.V(9).Infof("Check chunk: %+v\n url: %v", chunkView, urlStrings)
+ gErr.Go(func() error {
+ _, err := retriedFetchChunkData(urlStrings, chunkView.CipherKey, chunkView.IsGzipped, chunkView.IsFullChunk(), chunkView.Offset, int(chunkView.Size))
+ return err
+ })
+ }
+}
+
+// ---------------- ReadAllReader ----------------------------------
+
+func ReadAll(masterClient *wdclient.MasterClient, chunks []*filer_pb.FileChunk) ([]byte, error) {
+
+ buffer := bytes.Buffer{}
+
+ lookupFileIdFn := func(fileId string) (targetUrls []string, err error) {
+ return masterClient.LookupFileId(fileId)
+ }
+
+ chunkViews := ViewFromChunks(lookupFileIdFn, chunks, 0, math.MaxInt64)
+
+ for _, chunkView := range chunkViews {
+ urlStrings, err := lookupFileIdFn(chunkView.FileId)
+ if err != nil {
+ glog.V(1).Infof("operation LookupFileId %s failed, err: %v", chunkView.FileId, err)
+ return nil, err
+ }
+
+ data, err := retriedFetchChunkData(urlStrings, chunkView.CipherKey, chunkView.IsGzipped, chunkView.IsFullChunk(), chunkView.Offset, int(chunkView.Size))
+ if err != nil {
+ return nil, err
+ }
+ buffer.Write(data)
+ }
+ return buffer.Bytes(), nil
+}
+
+// ---------------- ChunkStreamReader ----------------------------------
+type ChunkStreamReader struct {
+ chunkViews []*ChunkView
+ logicOffset int64
+ buffer []byte
+ bufferOffset int64
+ bufferPos int
+ chunkIndex int
+ lookupFileId wdclient.LookupFileIdFunctionType
+}
+
+var _ = io.ReadSeeker(&ChunkStreamReader{})
+
+func NewChunkStreamReaderFromFiler(masterClient *wdclient.MasterClient, chunks []*filer_pb.FileChunk) *ChunkStreamReader {
+
+ lookupFileIdFn := func(fileId string) (targetUrl []string, err error) {
+ return masterClient.LookupFileId(fileId)
+ }
+
+ chunkViews := ViewFromChunks(lookupFileIdFn, chunks, 0, math.MaxInt64)
+
+ return &ChunkStreamReader{
+ chunkViews: chunkViews,
+ lookupFileId: lookupFileIdFn,
+ }
+}
+
+func NewChunkStreamReader(filerClient filer_pb.FilerClient, chunks []*filer_pb.FileChunk) *ChunkStreamReader {
+
+ lookupFileIdFn := LookupFn(filerClient)
+
+ chunkViews := ViewFromChunks(lookupFileIdFn, chunks, 0, math.MaxInt64)
+
+ return &ChunkStreamReader{
+ chunkViews: chunkViews,
+ lookupFileId: lookupFileIdFn,
+ }
+}
+
+func (c *ChunkStreamReader) Read(p []byte) (n int, err error) {
+ for n < len(p) {
+ if c.isBufferEmpty() {
+ if c.chunkIndex >= len(c.chunkViews) {
+ return n, io.EOF
+ }
+ chunkView := c.chunkViews[c.chunkIndex]
+ c.fetchChunkToBuffer(chunkView)
+ c.chunkIndex++
+ }
+ t := copy(p[n:], c.buffer[c.bufferPos:])
+ c.bufferPos += t
+ n += t
+ }
+ return
+}
+
+func (c *ChunkStreamReader) isBufferEmpty() bool {
+ return len(c.buffer) <= c.bufferPos
+}
+
+func (c *ChunkStreamReader) Seek(offset int64, whence int) (int64, error) {
+
+ var totalSize int64
+ for _, chunk := range c.chunkViews {
+ totalSize += int64(chunk.Size)
+ }
+
+ var err error
+ switch whence {
+ case io.SeekStart:
+ case io.SeekCurrent:
+ offset += c.bufferOffset + int64(c.bufferPos)
+ case io.SeekEnd:
+ offset = totalSize + offset
+ }
+ if offset > totalSize {
+ err = io.ErrUnexpectedEOF
+ }
+
+ for i, chunk := range c.chunkViews {
+ if chunk.LogicOffset <= offset && offset < chunk.LogicOffset+int64(chunk.Size) {
+ if c.isBufferEmpty() || c.bufferOffset != chunk.LogicOffset {
+ c.fetchChunkToBuffer(chunk)
+ c.chunkIndex = i + 1
+ break
+ }
+ }
+ }
+ c.bufferPos = int(offset - c.bufferOffset)
+
+ return offset, err
+
+}
+
+func (c *ChunkStreamReader) fetchChunkToBuffer(chunkView *ChunkView) error {
+ urlStrings, err := c.lookupFileId(chunkView.FileId)
+ if err != nil {
+ glog.V(1).Infof("operation LookupFileId %s failed, err: %v", chunkView.FileId, err)
+ return err
+ }
+ var buffer bytes.Buffer
+ var shouldRetry bool
+ for _, urlString := range urlStrings {
+ shouldRetry, err = util.ReadUrlAsStream(urlString, chunkView.CipherKey, chunkView.IsGzipped, chunkView.IsFullChunk(), chunkView.Offset, int(chunkView.Size), func(data []byte) {
+ buffer.Write(data)
+ })
+ if !shouldRetry {
+ break
+ }
+ if err != nil {
+ glog.V(1).Infof("read %s failed, err: %v", chunkView.FileId, err)
+ buffer.Reset()
+ } else {
+ break
+ }
+ }
+ if err != nil {
+ return err
+ }
+ c.buffer = buffer.Bytes()
+ c.bufferPos = 0
+ c.bufferOffset = chunkView.LogicOffset
+
+ // glog.V(0).Infof("read %s [%d,%d)", chunkView.FileId, chunkView.LogicOffset, chunkView.LogicOffset+int64(chunkView.Size))
+
+ return nil
+}
+
+func (c *ChunkStreamReader) Close() {
+ // TODO try to release and reuse buffer
+}
+
+func VolumeId(fileId string) string {
+ lastCommaIndex := strings.LastIndex(fileId, ",")
+ if lastCommaIndex > 0 {
+ return fileId[:lastCommaIndex]
+ }
+ return fileId
+}
diff --git a/weed/filer/topics.go b/weed/filer/topics.go
new file mode 100644
index 000000000..3a2fde8c4
--- /dev/null
+++ b/weed/filer/topics.go
@@ -0,0 +1,6 @@
+package filer
+
+const (
+ TopicsDir = "/topics"
+ SystemLogDir = TopicsDir + "/.system/log"
+)
diff --git a/weed/filer2/abstract_sql/abstract_sql_store.go b/weed/filer2/abstract_sql/abstract_sql_store.go
deleted file mode 100644
index d512467c7..000000000
--- a/weed/filer2/abstract_sql/abstract_sql_store.go
+++ /dev/null
@@ -1,184 +0,0 @@
-package abstract_sql
-
-import (
- "context"
- "database/sql"
- "fmt"
-
- "github.com/chrislusf/seaweedfs/weed/filer2"
- "github.com/chrislusf/seaweedfs/weed/glog"
-)
-
-type AbstractSqlStore struct {
- DB *sql.DB
- SqlInsert string
- SqlUpdate string
- SqlFind string
- SqlDelete string
- SqlDeleteFolderChildren string
- SqlListExclusive string
- SqlListInclusive string
-}
-
-type TxOrDB interface {
- ExecContext(ctx context.Context, query string, args ...interface{}) (sql.Result, error)
- QueryRowContext(ctx context.Context, query string, args ...interface{}) *sql.Row
- QueryContext(ctx context.Context, query string, args ...interface{}) (*sql.Rows, error)
-}
-
-func (store *AbstractSqlStore) BeginTransaction(ctx context.Context) (context.Context, error) {
- tx, err := store.DB.BeginTx(ctx, &sql.TxOptions{
- Isolation: sql.LevelReadCommitted,
- ReadOnly: false,
- })
- if err != nil {
- return ctx, err
- }
-
- return context.WithValue(ctx, "tx", tx), nil
-}
-func (store *AbstractSqlStore) CommitTransaction(ctx context.Context) error {
- if tx, ok := ctx.Value("tx").(*sql.Tx); ok {
- return tx.Commit()
- }
- return nil
-}
-func (store *AbstractSqlStore) RollbackTransaction(ctx context.Context) error {
- if tx, ok := ctx.Value("tx").(*sql.Tx); ok {
- return tx.Rollback()
- }
- return nil
-}
-
-func (store *AbstractSqlStore) getTxOrDB(ctx context.Context) TxOrDB {
- if tx, ok := ctx.Value("tx").(*sql.Tx); ok {
- return tx
- }
- return store.DB
-}
-
-func (store *AbstractSqlStore) InsertEntry(ctx context.Context, entry *filer2.Entry) (err error) {
-
- dir, name := entry.FullPath.DirAndName()
- meta, err := entry.EncodeAttributesAndChunks()
- if err != nil {
- return fmt.Errorf("encode %s: %s", entry.FullPath, err)
- }
-
- res, err := store.getTxOrDB(ctx).ExecContext(ctx, store.SqlInsert, hashToLong(dir), name, dir, meta)
- if err != nil {
- return fmt.Errorf("insert %s: %s", entry.FullPath, err)
- }
-
- _, err = res.RowsAffected()
- if err != nil {
- return fmt.Errorf("insert %s but no rows affected: %s", entry.FullPath, err)
- }
- return nil
-}
-
-func (store *AbstractSqlStore) UpdateEntry(ctx context.Context, entry *filer2.Entry) (err error) {
-
- dir, name := entry.FullPath.DirAndName()
- meta, err := entry.EncodeAttributesAndChunks()
- if err != nil {
- return fmt.Errorf("encode %s: %s", entry.FullPath, err)
- }
-
- res, err := store.getTxOrDB(ctx).ExecContext(ctx, store.SqlUpdate, meta, hashToLong(dir), name, dir)
- if err != nil {
- return fmt.Errorf("update %s: %s", entry.FullPath, err)
- }
-
- _, err = res.RowsAffected()
- if err != nil {
- return fmt.Errorf("update %s but no rows affected: %s", entry.FullPath, err)
- }
- return nil
-}
-
-func (store *AbstractSqlStore) FindEntry(ctx context.Context, fullpath filer2.FullPath) (*filer2.Entry, error) {
-
- dir, name := fullpath.DirAndName()
- row := store.getTxOrDB(ctx).QueryRowContext(ctx, store.SqlFind, hashToLong(dir), name, dir)
- var data []byte
- if err := row.Scan(&data); err != nil {
- return nil, filer2.ErrNotFound
- }
-
- entry := &filer2.Entry{
- FullPath: fullpath,
- }
- if err := entry.DecodeAttributesAndChunks(data); err != nil {
- return entry, fmt.Errorf("decode %s : %v", entry.FullPath, err)
- }
-
- return entry, nil
-}
-
-func (store *AbstractSqlStore) DeleteEntry(ctx context.Context, fullpath filer2.FullPath) error {
-
- dir, name := fullpath.DirAndName()
-
- res, err := store.getTxOrDB(ctx).ExecContext(ctx, store.SqlDelete, hashToLong(dir), name, dir)
- if err != nil {
- return fmt.Errorf("delete %s: %s", fullpath, err)
- }
-
- _, err = res.RowsAffected()
- if err != nil {
- return fmt.Errorf("delete %s but no rows affected: %s", fullpath, err)
- }
-
- return nil
-}
-
-func (store *AbstractSqlStore) DeleteFolderChildren(ctx context.Context, fullpath filer2.FullPath) error {
-
- res, err := store.getTxOrDB(ctx).ExecContext(ctx, store.SqlDeleteFolderChildren, hashToLong(string(fullpath)), fullpath)
- if err != nil {
- return fmt.Errorf("deleteFolderChildren %s: %s", fullpath, err)
- }
-
- _, err = res.RowsAffected()
- if err != nil {
- return fmt.Errorf("deleteFolderChildren %s but no rows affected: %s", fullpath, err)
- }
-
- return nil
-}
-
-func (store *AbstractSqlStore) ListDirectoryEntries(ctx context.Context, fullpath filer2.FullPath, startFileName string, inclusive bool, limit int) (entries []*filer2.Entry, err error) {
-
- sqlText := store.SqlListExclusive
- if inclusive {
- sqlText = store.SqlListInclusive
- }
-
- rows, err := store.getTxOrDB(ctx).QueryContext(ctx, sqlText, hashToLong(string(fullpath)), startFileName, string(fullpath), limit)
- if err != nil {
- return nil, fmt.Errorf("list %s : %v", fullpath, err)
- }
- defer rows.Close()
-
- for rows.Next() {
- var name string
- var data []byte
- if err = rows.Scan(&name, &data); err != nil {
- glog.V(0).Infof("scan %s : %v", fullpath, err)
- return nil, fmt.Errorf("scan %s: %v", fullpath, err)
- }
-
- entry := &filer2.Entry{
- FullPath: filer2.NewFullPath(string(fullpath), name),
- }
- if err = entry.DecodeAttributesAndChunks(data); err != nil {
- glog.V(0).Infof("scan decode %s : %v", entry.FullPath, err)
- return nil, fmt.Errorf("scan decode %s : %v", entry.FullPath, err)
- }
-
- entries = append(entries, entry)
- }
-
- return entries, nil
-}
diff --git a/weed/filer2/abstract_sql/hashing.go b/weed/filer2/abstract_sql/hashing.go
deleted file mode 100644
index 5c982c537..000000000
--- a/weed/filer2/abstract_sql/hashing.go
+++ /dev/null
@@ -1,32 +0,0 @@
-package abstract_sql
-
-import (
- "crypto/md5"
- "io"
-)
-
-// returns a 64 bit big int
-func hashToLong(dir string) (v int64) {
- h := md5.New()
- io.WriteString(h, dir)
-
- b := h.Sum(nil)
-
- v += int64(b[0])
- v <<= 8
- v += int64(b[1])
- v <<= 8
- v += int64(b[2])
- v <<= 8
- v += int64(b[3])
- v <<= 8
- v += int64(b[4])
- v <<= 8
- v += int64(b[5])
- v <<= 8
- v += int64(b[6])
- v <<= 8
- v += int64(b[7])
-
- return
-}
diff --git a/weed/filer2/cassandra/cassandra_store.go b/weed/filer2/cassandra/cassandra_store.go
deleted file mode 100644
index dcaab8bc4..000000000
--- a/weed/filer2/cassandra/cassandra_store.go
+++ /dev/null
@@ -1,153 +0,0 @@
-package cassandra
-
-import (
- "context"
- "fmt"
- "github.com/chrislusf/seaweedfs/weed/filer2"
- "github.com/chrislusf/seaweedfs/weed/glog"
- "github.com/chrislusf/seaweedfs/weed/util"
- "github.com/gocql/gocql"
-)
-
-func init() {
- filer2.Stores = append(filer2.Stores, &CassandraStore{})
-}
-
-type CassandraStore struct {
- cluster *gocql.ClusterConfig
- session *gocql.Session
-}
-
-func (store *CassandraStore) GetName() string {
- return "cassandra"
-}
-
-func (store *CassandraStore) Initialize(configuration util.Configuration) (err error) {
- return store.initialize(
- configuration.GetString("keyspace"),
- configuration.GetStringSlice("hosts"),
- )
-}
-
-func (store *CassandraStore) initialize(keyspace string, hosts []string) (err error) {
- store.cluster = gocql.NewCluster(hosts...)
- store.cluster.Keyspace = keyspace
- store.cluster.Consistency = gocql.LocalQuorum
- store.session, err = store.cluster.CreateSession()
- if err != nil {
- glog.V(0).Infof("Failed to open cassandra store, hosts %v, keyspace %s", hosts, keyspace)
- }
- return
-}
-
-func (store *CassandraStore) BeginTransaction(ctx context.Context) (context.Context, error) {
- return ctx, nil
-}
-func (store *CassandraStore) CommitTransaction(ctx context.Context) error {
- return nil
-}
-func (store *CassandraStore) RollbackTransaction(ctx context.Context) error {
- return nil
-}
-
-func (store *CassandraStore) InsertEntry(ctx context.Context, entry *filer2.Entry) (err error) {
-
- dir, name := entry.FullPath.DirAndName()
- meta, err := entry.EncodeAttributesAndChunks()
- if err != nil {
- return fmt.Errorf("encode %s: %s", entry.FullPath, err)
- }
-
- if err := store.session.Query(
- "INSERT INTO filemeta (directory,name,meta) VALUES(?,?,?) USING TTL ? ",
- dir, name, meta, entry.TtlSec).Exec(); err != nil {
- return fmt.Errorf("insert %s: %s", entry.FullPath, err)
- }
-
- return nil
-}
-
-func (store *CassandraStore) UpdateEntry(ctx context.Context, entry *filer2.Entry) (err error) {
-
- return store.InsertEntry(ctx, entry)
-}
-
-func (store *CassandraStore) FindEntry(ctx context.Context, fullpath filer2.FullPath) (entry *filer2.Entry, err error) {
-
- dir, name := fullpath.DirAndName()
- var data []byte
- if err := store.session.Query(
- "SELECT meta FROM filemeta WHERE directory=? AND name=?",
- dir, name).Consistency(gocql.One).Scan(&data); err != nil {
- if err != gocql.ErrNotFound {
- return nil, filer2.ErrNotFound
- }
- }
-
- if len(data) == 0 {
- return nil, filer2.ErrNotFound
- }
-
- entry = &filer2.Entry{
- FullPath: fullpath,
- }
- err = entry.DecodeAttributesAndChunks(data)
- if err != nil {
- return entry, fmt.Errorf("decode %s : %v", entry.FullPath, err)
- }
-
- return entry, nil
-}
-
-func (store *CassandraStore) DeleteEntry(ctx context.Context, fullpath filer2.FullPath) error {
-
- dir, name := fullpath.DirAndName()
-
- if err := store.session.Query(
- "DELETE FROM filemeta WHERE directory=? AND name=?",
- dir, name).Exec(); err != nil {
- return fmt.Errorf("delete %s : %v", fullpath, err)
- }
-
- return nil
-}
-
-func (store *CassandraStore) DeleteFolderChildren(ctx context.Context, fullpath filer2.FullPath) error {
-
- if err := store.session.Query(
- "DELETE FROM filemeta WHERE directory=?",
- fullpath).Exec(); err != nil {
- return fmt.Errorf("delete %s : %v", fullpath, err)
- }
-
- return nil
-}
-
-func (store *CassandraStore) ListDirectoryEntries(ctx context.Context, fullpath filer2.FullPath, startFileName string, inclusive bool,
- limit int) (entries []*filer2.Entry, err error) {
-
- cqlStr := "SELECT NAME, meta FROM filemeta WHERE directory=? AND name>? ORDER BY NAME ASC LIMIT ?"
- if inclusive {
- cqlStr = "SELECT NAME, meta FROM filemeta WHERE directory=? AND name>=? ORDER BY NAME ASC LIMIT ?"
- }
-
- var data []byte
- var name string
- iter := store.session.Query(cqlStr, string(fullpath), startFileName, limit).Iter()
- for iter.Scan(&name, &data) {
- entry := &filer2.Entry{
- FullPath: filer2.NewFullPath(string(fullpath), name),
- }
- if decodeErr := entry.DecodeAttributesAndChunks(data); decodeErr != nil {
- err = decodeErr
- glog.V(0).Infof("list %s : %v", entry.FullPath, err)
- break
- }
- entries = append(entries, entry)
- }
- if err := iter.Close(); err != nil {
- glog.V(0).Infof("list iterator close: %v", err)
- }
-
- return entries, err
-}
diff --git a/weed/filer2/configuration.go b/weed/filer2/configuration.go
deleted file mode 100644
index 7b05b53dc..000000000
--- a/weed/filer2/configuration.go
+++ /dev/null
@@ -1,51 +0,0 @@
-package filer2
-
-import (
- "os"
-
- "github.com/chrislusf/seaweedfs/weed/glog"
- "github.com/spf13/viper"
-)
-
-var (
- Stores []FilerStore
-)
-
-func (f *Filer) LoadConfiguration(config *viper.Viper) {
-
- validateOneEnabledStore(config)
-
- for _, store := range Stores {
- if config.GetBool(store.GetName() + ".enabled") {
- viperSub := config.Sub(store.GetName())
- if err := store.Initialize(viperSub); err != nil {
- glog.Fatalf("Failed to initialize store for %s: %+v",
- store.GetName(), err)
- }
- f.SetStore(store)
- glog.V(0).Infof("Configure filer for %s", store.GetName())
- return
- }
- }
-
- println()
- println("Supported filer stores are:")
- for _, store := range Stores {
- println(" " + store.GetName())
- }
-
- os.Exit(-1)
-}
-
-func validateOneEnabledStore(config *viper.Viper) {
- enabledStore := ""
- for _, store := range Stores {
- if config.GetBool(store.GetName() + ".enabled") {
- if enabledStore == "" {
- enabledStore = store.GetName()
- } else {
- glog.Fatalf("Filer store is enabled for both %s and %s", enabledStore, store.GetName())
- }
- }
- }
-}
diff --git a/weed/filer2/entry.go b/weed/filer2/entry.go
deleted file mode 100644
index c901927bb..000000000
--- a/weed/filer2/entry.go
+++ /dev/null
@@ -1,73 +0,0 @@
-package filer2
-
-import (
- "os"
- "time"
-
- "github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
-)
-
-type Attr struct {
- Mtime time.Time // time of last modification
- Crtime time.Time // time of creation (OS X only)
- Mode os.FileMode // file mode
- Uid uint32 // owner uid
- Gid uint32 // group gid
- Mime string // mime type
- Replication string // replication
- Collection string // collection name
- TtlSec int32 // ttl in seconds
- UserName string
- GroupNames []string
- SymlinkTarget string
-}
-
-func (attr Attr) IsDirectory() bool {
- return attr.Mode&os.ModeDir > 0
-}
-
-type Entry struct {
- FullPath
-
- Attr
- Extended map[string][]byte
-
- // the following is for files
- Chunks []*filer_pb.FileChunk `json:"chunks,omitempty"`
-}
-
-func (entry *Entry) Size() uint64 {
- return TotalSize(entry.Chunks)
-}
-
-func (entry *Entry) Timestamp() time.Time {
- if entry.IsDirectory() {
- return entry.Crtime
- } else {
- return entry.Mtime
- }
-}
-
-func (entry *Entry) ToProtoEntry() *filer_pb.Entry {
- if entry == nil {
- return nil
- }
- return &filer_pb.Entry{
- Name: entry.FullPath.Name(),
- IsDirectory: entry.IsDirectory(),
- Attributes: EntryAttributeToPb(entry),
- Chunks: entry.Chunks,
- Extended: entry.Extended,
- }
-}
-
-func (entry *Entry) ToProtoFullEntry() *filer_pb.FullEntry {
- if entry == nil {
- return nil
- }
- dir, _ := entry.FullPath.DirAndName()
- return &filer_pb.FullEntry{
- Dir: dir,
- Entry: entry.ToProtoEntry(),
- }
-}
diff --git a/weed/filer2/filechunks.go b/weed/filer2/filechunks.go
deleted file mode 100644
index b5876df82..000000000
--- a/weed/filer2/filechunks.go
+++ /dev/null
@@ -1,228 +0,0 @@
-package filer2
-
-import (
- "fmt"
- "hash/fnv"
- "sort"
- "sync"
-
- "github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
-)
-
-func TotalSize(chunks []*filer_pb.FileChunk) (size uint64) {
- for _, c := range chunks {
- t := uint64(c.Offset + int64(c.Size))
- if size < t {
- size = t
- }
- }
- return
-}
-
-func ETag(chunks []*filer_pb.FileChunk) (etag string) {
- if len(chunks) == 1 {
- return chunks[0].ETag
- }
-
- h := fnv.New32a()
- for _, c := range chunks {
- h.Write([]byte(c.ETag))
- }
- return fmt.Sprintf("%x", h.Sum32())
-}
-
-func CompactFileChunks(chunks []*filer_pb.FileChunk) (compacted, garbage []*filer_pb.FileChunk) {
-
- visibles := NonOverlappingVisibleIntervals(chunks)
-
- fileIds := make(map[string]bool)
- for _, interval := range visibles {
- fileIds[interval.fileId] = true
- }
- for _, chunk := range chunks {
- if _, found := fileIds[chunk.GetFileIdString()]; found {
- compacted = append(compacted, chunk)
- } else {
- garbage = append(garbage, chunk)
- }
- }
-
- return
-}
-
-func MinusChunks(as, bs []*filer_pb.FileChunk) (delta []*filer_pb.FileChunk) {
-
- fileIds := make(map[string]bool)
- for _, interval := range bs {
- fileIds[interval.GetFileIdString()] = true
- }
- for _, chunk := range as {
- if _, found := fileIds[chunk.GetFileIdString()]; !found {
- delta = append(delta, chunk)
- }
- }
-
- return
-}
-
-type ChunkView struct {
- FileId string
- Offset int64
- Size uint64
- LogicOffset int64
- IsFullChunk bool
-}
-
-func ViewFromChunks(chunks []*filer_pb.FileChunk, offset int64, size int) (views []*ChunkView) {
-
- visibles := NonOverlappingVisibleIntervals(chunks)
-
- return ViewFromVisibleIntervals(visibles, offset, size)
-
-}
-
-func ViewFromVisibleIntervals(visibles []VisibleInterval, offset int64, size int) (views []*ChunkView) {
-
- stop := offset + int64(size)
-
- for _, chunk := range visibles {
- if chunk.start <= offset && offset < chunk.stop && offset < stop {
- isFullChunk := chunk.isFullChunk && chunk.start == offset && chunk.stop <= stop
- views = append(views, &ChunkView{
- FileId: chunk.fileId,
- Offset: offset - chunk.start, // offset is the data starting location in this file id
- Size: uint64(min(chunk.stop, stop) - offset),
- LogicOffset: offset,
- IsFullChunk: isFullChunk,
- })
- offset = min(chunk.stop, stop)
- }
- }
-
- return views
-
-}
-
-func logPrintf(name string, visibles []VisibleInterval) {
- /*
- log.Printf("%s len %d", name, len(visibles))
- for _, v := range visibles {
- log.Printf("%s: => %+v", name, v)
- }
- */
-}
-
-var bufPool = sync.Pool{
- New: func() interface{} {
- return new(VisibleInterval)
- },
-}
-
-func MergeIntoVisibles(visibles, newVisibles []VisibleInterval, chunk *filer_pb.FileChunk) []VisibleInterval {
-
- newV := newVisibleInterval(
- chunk.Offset,
- chunk.Offset+int64(chunk.Size),
- chunk.GetFileIdString(),
- chunk.Mtime,
- true,
- )
-
- length := len(visibles)
- if length == 0 {
- return append(visibles, newV)
- }
- last := visibles[length-1]
- if last.stop <= chunk.Offset {
- return append(visibles, newV)
- }
-
- logPrintf(" before", visibles)
- for _, v := range visibles {
- if v.start < chunk.Offset && chunk.Offset < v.stop {
- newVisibles = append(newVisibles, newVisibleInterval(
- v.start,
- chunk.Offset,
- v.fileId,
- v.modifiedTime,
- false,
- ))
- }
- chunkStop := chunk.Offset + int64(chunk.Size)
- if v.start < chunkStop && chunkStop < v.stop {
- newVisibles = append(newVisibles, newVisibleInterval(
- chunkStop,
- v.stop,
- v.fileId,
- v.modifiedTime,
- false,
- ))
- }
- if chunkStop <= v.start || v.stop <= chunk.Offset {
- newVisibles = append(newVisibles, v)
- }
- }
- newVisibles = append(newVisibles, newV)
-
- logPrintf(" append", newVisibles)
-
- for i := len(newVisibles) - 1; i >= 0; i-- {
- if i > 0 && newV.start < newVisibles[i-1].start {
- newVisibles[i] = newVisibles[i-1]
- } else {
- newVisibles[i] = newV
- break
- }
- }
- logPrintf(" sorted", newVisibles)
-
- return newVisibles
-}
-
-func NonOverlappingVisibleIntervals(chunks []*filer_pb.FileChunk) (visibles []VisibleInterval) {
-
- sort.Slice(chunks, func(i, j int) bool {
- return chunks[i].Mtime < chunks[j].Mtime
- })
-
- var newVisibles []VisibleInterval
- for _, chunk := range chunks {
- newVisibles = MergeIntoVisibles(visibles, newVisibles, chunk)
- t := visibles[:0]
- visibles = newVisibles
- newVisibles = t
-
- logPrintf("add", visibles)
-
- }
-
- return
-}
-
-// find non-overlapping visible intervals
-// visible interval map to one file chunk
-
-type VisibleInterval struct {
- start int64
- stop int64
- modifiedTime int64
- fileId string
- isFullChunk bool
-}
-
-func newVisibleInterval(start, stop int64, fileId string, modifiedTime int64, isFullChunk bool) VisibleInterval {
- return VisibleInterval{
- start: start,
- stop: stop,
- fileId: fileId,
- modifiedTime: modifiedTime,
- isFullChunk: isFullChunk,
- }
-}
-
-func min(x, y int64) int64 {
- if x <= y {
- return x
- }
- return y
-}
diff --git a/weed/filer2/filer.go b/weed/filer2/filer.go
deleted file mode 100644
index b724e20fd..000000000
--- a/weed/filer2/filer.go
+++ /dev/null
@@ -1,253 +0,0 @@
-package filer2
-
-import (
- "context"
- "fmt"
- "os"
- "path/filepath"
- "strings"
- "time"
-
- "google.golang.org/grpc"
-
- "github.com/karlseguin/ccache"
-
- "github.com/chrislusf/seaweedfs/weed/glog"
- "github.com/chrislusf/seaweedfs/weed/wdclient"
-)
-
-const PaginationSize = 1024 * 256
-
-var (
- OS_UID = uint32(os.Getuid())
- OS_GID = uint32(os.Getgid())
-)
-
-type Filer struct {
- store *FilerStoreWrapper
- directoryCache *ccache.Cache
- MasterClient *wdclient.MasterClient
- fileIdDeletionChan chan string
- GrpcDialOption grpc.DialOption
-}
-
-func NewFiler(masters []string, grpcDialOption grpc.DialOption) *Filer {
- f := &Filer{
- directoryCache: ccache.New(ccache.Configure().MaxSize(1000).ItemsToPrune(100)),
- MasterClient: wdclient.NewMasterClient(context.Background(), grpcDialOption, "filer", masters),
- fileIdDeletionChan: make(chan string, PaginationSize),
- GrpcDialOption: grpcDialOption,
- }
-
- go f.loopProcessingDeletion()
-
- return f
-}
-
-func (f *Filer) SetStore(store FilerStore) {
- f.store = NewFilerStoreWrapper(store)
-}
-
-func (f *Filer) DisableDirectoryCache() {
- f.directoryCache = nil
-}
-
-func (fs *Filer) GetMaster() string {
- return fs.MasterClient.GetMaster()
-}
-
-func (fs *Filer) KeepConnectedToMaster() {
- fs.MasterClient.KeepConnectedToMaster()
-}
-
-func (f *Filer) BeginTransaction(ctx context.Context) (context.Context, error) {
- return f.store.BeginTransaction(ctx)
-}
-
-func (f *Filer) CommitTransaction(ctx context.Context) error {
- return f.store.CommitTransaction(ctx)
-}
-
-func (f *Filer) RollbackTransaction(ctx context.Context) error {
- return f.store.RollbackTransaction(ctx)
-}
-
-func (f *Filer) CreateEntry(ctx context.Context, entry *Entry) error {
-
- if string(entry.FullPath) == "/" {
- return nil
- }
-
- dirParts := strings.Split(string(entry.FullPath), "/")
-
- // fmt.Printf("directory parts: %+v\n", dirParts)
-
- var lastDirectoryEntry *Entry
-
- for i := 1; i < len(dirParts); i++ {
- dirPath := "/" + filepath.ToSlash(filepath.Join(dirParts[:i]...))
- // fmt.Printf("%d directory: %+v\n", i, dirPath)
-
- // first check local cache
- dirEntry := f.cacheGetDirectory(dirPath)
-
- // not found, check the store directly
- if dirEntry == nil {
- glog.V(4).Infof("find uncached directory: %s", dirPath)
- dirEntry, _ = f.FindEntry(ctx, FullPath(dirPath))
- } else {
- glog.V(4).Infof("found cached directory: %s", dirPath)
- }
-
- // no such existing directory
- if dirEntry == nil {
-
- // create the directory
- now := time.Now()
-
- dirEntry = &Entry{
- FullPath: FullPath(dirPath),
- Attr: Attr{
- Mtime: now,
- Crtime: now,
- Mode: os.ModeDir | 0770,
- Uid: entry.Uid,
- Gid: entry.Gid,
- },
- }
-
- glog.V(2).Infof("create directory: %s %v", dirPath, dirEntry.Mode)
- mkdirErr := f.store.InsertEntry(ctx, dirEntry)
- if mkdirErr != nil {
- if _, err := f.FindEntry(ctx, FullPath(dirPath)); err == ErrNotFound {
- return fmt.Errorf("mkdir %s: %v", dirPath, mkdirErr)
- }
- } else {
- f.NotifyUpdateEvent(nil, dirEntry, false)
- }
-
- } else if !dirEntry.IsDirectory() {
- return fmt.Errorf("%s is a file", dirPath)
- }
-
- // cache the directory entry
- f.cacheSetDirectory(dirPath, dirEntry, i)
-
- // remember the direct parent directory entry
- if i == len(dirParts)-1 {
- lastDirectoryEntry = dirEntry
- }
-
- }
-
- if lastDirectoryEntry == nil {
- return fmt.Errorf("parent folder not found: %v", entry.FullPath)
- }
-
- /*
- if !hasWritePermission(lastDirectoryEntry, entry) {
- glog.V(0).Infof("directory %s: %v, entry: uid=%d gid=%d",
- lastDirectoryEntry.FullPath, lastDirectoryEntry.Attr, entry.Uid, entry.Gid)
- return fmt.Errorf("no write permission in folder %v", lastDirectoryEntry.FullPath)
- }
- */
-
- oldEntry, _ := f.FindEntry(ctx, entry.FullPath)
-
- if oldEntry == nil {
- if err := f.store.InsertEntry(ctx, entry); err != nil {
- glog.Errorf("insert entry %s: %v", entry.FullPath, err)
- return fmt.Errorf("insert entry %s: %v", entry.FullPath, err)
- }
- } else {
- if err := f.UpdateEntry(ctx, oldEntry, entry); err != nil {
- glog.Errorf("update entry %s: %v", entry.FullPath, err)
- return fmt.Errorf("update entry %s: %v", entry.FullPath, err)
- }
- }
-
- f.NotifyUpdateEvent(oldEntry, entry, true)
-
- f.deleteChunksIfNotNew(oldEntry, entry)
-
- return nil
-}
-
-func (f *Filer) UpdateEntry(ctx context.Context, oldEntry, entry *Entry) (err error) {
- if oldEntry != nil {
- if oldEntry.IsDirectory() && !entry.IsDirectory() {
- glog.Errorf("existing %s is a directory", entry.FullPath)
- return fmt.Errorf("existing %s is a directory", entry.FullPath)
- }
- if !oldEntry.IsDirectory() && entry.IsDirectory() {
- glog.Errorf("existing %s is a file", entry.FullPath)
- return fmt.Errorf("existing %s is a file", entry.FullPath)
- }
- }
- return f.store.UpdateEntry(ctx, entry)
-}
-
-func (f *Filer) FindEntry(ctx context.Context, p FullPath) (entry *Entry, err error) {
-
- now := time.Now()
-
- if string(p) == "/" {
- return &Entry{
- FullPath: p,
- Attr: Attr{
- Mtime: now,
- Crtime: now,
- Mode: os.ModeDir | 0755,
- Uid: OS_UID,
- Gid: OS_GID,
- },
- }, nil
- }
- return f.store.FindEntry(ctx, p)
-}
-
-func (f *Filer) ListDirectoryEntries(ctx context.Context, p FullPath, startFileName string, inclusive bool, limit int) ([]*Entry, error) {
- if strings.HasSuffix(string(p), "/") && len(p) > 1 {
- p = p[0 : len(p)-1]
- }
- return f.store.ListDirectoryEntries(ctx, p, startFileName, inclusive, limit)
-}
-
-func (f *Filer) cacheDelDirectory(dirpath string) {
-
- if dirpath == "/" {
- return
- }
-
- if f.directoryCache == nil {
- return
- }
- f.directoryCache.Delete(dirpath)
- return
-}
-
-func (f *Filer) cacheGetDirectory(dirpath string) *Entry {
-
- if f.directoryCache == nil {
- return nil
- }
- item := f.directoryCache.Get(dirpath)
- if item == nil {
- return nil
- }
- return item.Value().(*Entry)
-}
-
-func (f *Filer) cacheSetDirectory(dirpath string, dirEntry *Entry, level int) {
-
- if f.directoryCache == nil {
- return
- }
-
- minutes := 60
- if level < 10 {
- minutes -= level * 6
- }
-
- f.directoryCache.Set(dirpath, dirEntry, time.Duration(minutes)*time.Minute)
-}
diff --git a/weed/filer2/filer_client_util.go b/weed/filer2/filer_client_util.go
deleted file mode 100644
index 1a10f7c20..000000000
--- a/weed/filer2/filer_client_util.go
+++ /dev/null
@@ -1,172 +0,0 @@
-package filer2
-
-import (
- "context"
- "fmt"
- "io"
- "math"
- "strings"
- "sync"
-
- "github.com/chrislusf/seaweedfs/weed/glog"
- "github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
- "github.com/chrislusf/seaweedfs/weed/util"
-)
-
-func VolumeId(fileId string) string {
- lastCommaIndex := strings.LastIndex(fileId, ",")
- if lastCommaIndex > 0 {
- return fileId[:lastCommaIndex]
- }
- return fileId
-}
-
-type FilerClient interface {
- WithFilerClient(ctx context.Context, fn func(filer_pb.SeaweedFilerClient) error) error
-}
-
-func ReadIntoBuffer(ctx context.Context, filerClient FilerClient, fullFilePath string, buff []byte, chunkViews []*ChunkView, baseOffset int64) (totalRead int64, err error) {
- var vids []string
- for _, chunkView := range chunkViews {
- vids = append(vids, VolumeId(chunkView.FileId))
- }
-
- vid2Locations := make(map[string]*filer_pb.Locations)
-
- err = filerClient.WithFilerClient(ctx, func(client filer_pb.SeaweedFilerClient) error {
-
- glog.V(4).Infof("read fh lookup volume id locations: %v", vids)
- resp, err := client.LookupVolume(ctx, &filer_pb.LookupVolumeRequest{
- VolumeIds: vids,
- })
- if err != nil {
- return err
- }
-
- vid2Locations = resp.LocationsMap
-
- return nil
- })
-
- if err != nil {
- return 0, fmt.Errorf("failed to lookup volume ids %v: %v", vids, err)
- }
-
- var wg sync.WaitGroup
- for _, chunkView := range chunkViews {
- wg.Add(1)
- go func(chunkView *ChunkView) {
- defer wg.Done()
-
- glog.V(4).Infof("read fh reading chunk: %+v", chunkView)
-
- locations := vid2Locations[VolumeId(chunkView.FileId)]
- if locations == nil || len(locations.Locations) == 0 {
- glog.V(0).Infof("failed to locate %s", chunkView.FileId)
- err = fmt.Errorf("failed to locate %s", chunkView.FileId)
- return
- }
-
- var n int64
- n, err = util.ReadUrl(
- fmt.Sprintf("http://%s/%s", locations.Locations[0].Url, chunkView.FileId),
- chunkView.Offset,
- int(chunkView.Size),
- buff[chunkView.LogicOffset-baseOffset:chunkView.LogicOffset-baseOffset+int64(chunkView.Size)],
- !chunkView.IsFullChunk)
-
- if err != nil {
-
- glog.V(0).Infof("%v read http://%s/%v %v bytes: %v", fullFilePath, locations.Locations[0].Url, chunkView.FileId, n, err)
-
- err = fmt.Errorf("failed to read http://%s/%s: %v",
- locations.Locations[0].Url, chunkView.FileId, err)
- return
- }
-
- glog.V(4).Infof("read fh read %d bytes: %+v", n, chunkView)
- totalRead += n
-
- }(chunkView)
- }
- wg.Wait()
- return
-}
-
-func GetEntry(ctx context.Context, filerClient FilerClient, fullFilePath string) (entry *filer_pb.Entry, err error) {
-
- dir, name := FullPath(fullFilePath).DirAndName()
-
- err = filerClient.WithFilerClient(ctx, func(client filer_pb.SeaweedFilerClient) error {
-
- request := &filer_pb.LookupDirectoryEntryRequest{
- Directory: dir,
- Name: name,
- }
-
- glog.V(3).Infof("read %s request: %v", fullFilePath, request)
- resp, err := client.LookupDirectoryEntry(ctx, request)
- if err != nil {
- if err == ErrNotFound || strings.Contains(err.Error(), ErrNotFound.Error()) {
- return nil
- }
- glog.V(3).Infof("read %s attr %v: %v", fullFilePath, request, err)
- return err
- }
-
- if resp.Entry == nil {
- glog.V(3).Infof("read %s entry: %v", fullFilePath, entry)
- return nil
- }
-
- entry = resp.Entry
- return nil
- })
-
- return
-}
-
-func ReadDirAllEntries(ctx context.Context, filerClient FilerClient, fullDirPath, prefix string, fn func(entry *filer_pb.Entry, isLast bool)) (err error) {
-
- err = filerClient.WithFilerClient(ctx, func(client filer_pb.SeaweedFilerClient) error {
-
- lastEntryName := ""
-
- request := &filer_pb.ListEntriesRequest{
- Directory: fullDirPath,
- Prefix: prefix,
- StartFromFileName: lastEntryName,
- Limit: math.MaxUint32,
- }
-
- glog.V(3).Infof("read directory: %v", request)
- stream, err := client.ListEntries(ctx, request)
- if err != nil {
- return fmt.Errorf("list %s: %v", fullDirPath, err)
- }
-
- var prevEntry *filer_pb.Entry
- for {
- resp, recvErr := stream.Recv()
- if recvErr != nil {
- if recvErr == io.EOF {
- if prevEntry != nil {
- fn(prevEntry, true)
- }
- break
- } else {
- return recvErr
- }
- }
- if prevEntry != nil {
- fn(prevEntry, false)
- }
- prevEntry = resp.Entry
- }
-
- return nil
-
- })
-
- return
-}
diff --git a/weed/filer2/filer_delete_entry.go b/weed/filer2/filer_delete_entry.go
deleted file mode 100644
index 75a09e7ef..000000000
--- a/weed/filer2/filer_delete_entry.go
+++ /dev/null
@@ -1,102 +0,0 @@
-package filer2
-
-import (
- "context"
- "fmt"
-
- "github.com/chrislusf/seaweedfs/weed/glog"
- "github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
-)
-
-func (f *Filer) DeleteEntryMetaAndData(ctx context.Context, p FullPath, isRecursive bool, ignoreRecursiveError, shouldDeleteChunks bool) (err error) {
- if p == "/" {
- return nil
- }
-
- entry, findErr := f.FindEntry(ctx, p)
- if findErr != nil {
- return findErr
- }
-
- var chunks []*filer_pb.FileChunk
- chunks = append(chunks, entry.Chunks...)
- if entry.IsDirectory() {
- // delete the folder children, not including the folder itself
- var dirChunks []*filer_pb.FileChunk
- dirChunks, err = f.doBatchDeleteFolderMetaAndData(ctx, entry, isRecursive, ignoreRecursiveError, shouldDeleteChunks)
- if err != nil {
- return fmt.Errorf("delete directory %s: %v", p, err)
- }
- chunks = append(chunks, dirChunks...)
- f.cacheDelDirectory(string(p))
- }
- // delete the file or folder
- err = f.doDeleteEntryMetaAndData(ctx, entry, shouldDeleteChunks)
- if err != nil {
- return fmt.Errorf("delete file %s: %v", p, err)
- }
-
- if shouldDeleteChunks {
- go f.DeleteChunks(chunks)
- }
-
- return nil
-}
-
-func (f *Filer) doBatchDeleteFolderMetaAndData(ctx context.Context, entry *Entry, isRecursive bool, ignoreRecursiveError, shouldDeleteChunks bool) (chunks []*filer_pb.FileChunk, err error) {
-
- lastFileName := ""
- includeLastFile := false
- for {
- entries, err := f.ListDirectoryEntries(ctx, entry.FullPath, lastFileName, includeLastFile, PaginationSize)
- if err != nil {
- glog.Errorf("list folder %s: %v", entry.FullPath, err)
- return nil, fmt.Errorf("list folder %s: %v", entry.FullPath, err)
- }
- if lastFileName == "" && !isRecursive && len(entries) > 0 {
- // only for first iteration in the loop
- return nil, fmt.Errorf("fail to delete non-empty folder: %s", entry.FullPath)
- }
-
- for _, sub := range entries {
- lastFileName = sub.Name()
- var dirChunks []*filer_pb.FileChunk
- if sub.IsDirectory() {
- dirChunks, err = f.doBatchDeleteFolderMetaAndData(ctx, sub, isRecursive, ignoreRecursiveError, shouldDeleteChunks)
- }
- if err != nil && !ignoreRecursiveError {
- return nil, err
- }
- if shouldDeleteChunks {
- chunks = append(chunks, dirChunks...)
- }
- }
-
- if len(entries) < PaginationSize {
- break
- }
- }
-
- f.cacheDelDirectory(string(entry.FullPath))
-
- glog.V(3).Infof("deleting directory %v", entry.FullPath)
-
- if storeDeletionErr := f.store.DeleteFolderChildren(ctx, entry.FullPath); storeDeletionErr != nil {
- return nil, fmt.Errorf("filer store delete: %v", storeDeletionErr)
- }
- f.NotifyUpdateEvent(entry, nil, shouldDeleteChunks)
-
- return chunks, nil
-}
-
-func (f *Filer) doDeleteEntryMetaAndData(ctx context.Context, entry *Entry, shouldDeleteChunks bool) (err error) {
-
- glog.V(3).Infof("deleting entry %v", entry.FullPath)
-
- if storeDeletionErr := f.store.DeleteEntry(ctx, entry.FullPath); storeDeletionErr != nil {
- return fmt.Errorf("filer store delete: %v", storeDeletionErr)
- }
- f.NotifyUpdateEvent(entry, nil, shouldDeleteChunks)
-
- return nil
-}
diff --git a/weed/filer2/filer_deletion.go b/weed/filer2/filer_deletion.go
deleted file mode 100644
index 9937685f7..000000000
--- a/weed/filer2/filer_deletion.go
+++ /dev/null
@@ -1,87 +0,0 @@
-package filer2
-
-import (
- "time"
-
- "github.com/chrislusf/seaweedfs/weed/glog"
- "github.com/chrislusf/seaweedfs/weed/operation"
- "github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
-)
-
-func (f *Filer) loopProcessingDeletion() {
-
- ticker := time.NewTicker(5 * time.Second)
-
- lookupFunc := func(vids []string) (map[string]operation.LookupResult, error) {
- m := make(map[string]operation.LookupResult)
- for _, vid := range vids {
- locs, _ := f.MasterClient.GetVidLocations(vid)
- var locations []operation.Location
- for _, loc := range locs {
- locations = append(locations, operation.Location{
- Url: loc.Url,
- PublicUrl: loc.PublicUrl,
- })
- }
- m[vid] = operation.LookupResult{
- VolumeId: vid,
- Locations: locations,
- }
- }
- return m, nil
- }
-
- var fileIds []string
- for {
- select {
- case fid := <-f.fileIdDeletionChan:
- fileIds = append(fileIds, fid)
- if len(fileIds) >= 4096 {
- glog.V(1).Infof("deleting fileIds len=%d", len(fileIds))
- operation.DeleteFilesWithLookupVolumeId(f.GrpcDialOption, fileIds, lookupFunc)
- fileIds = fileIds[:0]
- }
- case <-ticker.C:
- if len(fileIds) > 0 {
- glog.V(1).Infof("timed deletion fileIds len=%d", len(fileIds))
- operation.DeleteFilesWithLookupVolumeId(f.GrpcDialOption, fileIds, lookupFunc)
- fileIds = fileIds[:0]
- }
- }
- }
-}
-
-func (f *Filer) DeleteChunks(chunks []*filer_pb.FileChunk) {
- for _, chunk := range chunks {
- f.fileIdDeletionChan <- chunk.GetFileIdString()
- }
-}
-
-// DeleteFileByFileId direct delete by file id.
-// Only used when the fileId is not being managed by snapshots.
-func (f *Filer) DeleteFileByFileId(fileId string) {
- f.fileIdDeletionChan <- fileId
-}
-
-func (f *Filer) deleteChunksIfNotNew(oldEntry, newEntry *Entry) {
-
- if oldEntry == nil {
- return
- }
- if newEntry == nil {
- f.DeleteChunks(oldEntry.Chunks)
- }
-
- var toDelete []*filer_pb.FileChunk
- newChunkIds := make(map[string]bool)
- for _, newChunk := range newEntry.Chunks {
- newChunkIds[newChunk.GetFileIdString()] = true
- }
-
- for _, oldChunk := range oldEntry.Chunks {
- if _, found := newChunkIds[oldChunk.GetFileIdString()]; !found {
- toDelete = append(toDelete, oldChunk)
- }
- }
- f.DeleteChunks(toDelete)
-}
diff --git a/weed/filer2/filer_notify.go b/weed/filer2/filer_notify.go
deleted file mode 100644
index c37381116..000000000
--- a/weed/filer2/filer_notify.go
+++ /dev/null
@@ -1,39 +0,0 @@
-package filer2
-
-import (
- "github.com/chrislusf/seaweedfs/weed/glog"
- "github.com/chrislusf/seaweedfs/weed/notification"
- "github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
-)
-
-func (f *Filer) NotifyUpdateEvent(oldEntry, newEntry *Entry, deleteChunks bool) {
- var key string
- if oldEntry != nil {
- key = string(oldEntry.FullPath)
- } else if newEntry != nil {
- key = string(newEntry.FullPath)
- } else {
- return
- }
-
- if notification.Queue != nil {
-
- glog.V(3).Infof("notifying entry update %v", key)
-
- newParentPath := ""
- if newEntry != nil {
- newParentPath, _ = newEntry.FullPath.DirAndName()
- }
-
- notification.Queue.SendMessage(
- key,
- &filer_pb.EventNotification{
- OldEntry: oldEntry.ToProtoEntry(),
- NewEntry: newEntry.ToProtoEntry(),
- DeleteChunks: deleteChunks,
- NewParentPath: newParentPath,
- },
- )
-
- }
-}
diff --git a/weed/filer2/filerstore.go b/weed/filer2/filerstore.go
deleted file mode 100644
index 0bb0bd611..000000000
--- a/weed/filer2/filerstore.go
+++ /dev/null
@@ -1,138 +0,0 @@
-package filer2
-
-import (
- "context"
- "errors"
- "time"
-
- "github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
- "github.com/chrislusf/seaweedfs/weed/stats"
- "github.com/chrislusf/seaweedfs/weed/util"
-)
-
-type FilerStore interface {
- // GetName gets the name to locate the configuration in filer.toml file
- GetName() string
- // Initialize initializes the file store
- Initialize(configuration util.Configuration) error
- InsertEntry(context.Context, *Entry) error
- UpdateEntry(context.Context, *Entry) (err error)
- // err == filer2.ErrNotFound if not found
- FindEntry(context.Context, FullPath) (entry *Entry, err error)
- DeleteEntry(context.Context, FullPath) (err error)
- DeleteFolderChildren(context.Context, FullPath) (err error)
- ListDirectoryEntries(ctx context.Context, dirPath FullPath, startFileName string, includeStartFile bool, limit int) ([]*Entry, error)
-
- BeginTransaction(ctx context.Context) (context.Context, error)
- CommitTransaction(ctx context.Context) error
- RollbackTransaction(ctx context.Context) error
-}
-
-var ErrNotFound = errors.New("filer: no entry is found in filer store")
-
-type FilerStoreWrapper struct {
- actualStore FilerStore
-}
-
-func NewFilerStoreWrapper(store FilerStore) *FilerStoreWrapper {
- if innerStore, ok := store.(*FilerStoreWrapper); ok {
- return innerStore
- }
- return &FilerStoreWrapper{
- actualStore: store,
- }
-}
-
-func (fsw *FilerStoreWrapper) GetName() string {
- return fsw.actualStore.GetName()
-}
-
-func (fsw *FilerStoreWrapper) Initialize(configuration util.Configuration) error {
- return fsw.actualStore.Initialize(configuration)
-}
-
-func (fsw *FilerStoreWrapper) InsertEntry(ctx context.Context, entry *Entry) error {
- stats.FilerStoreCounter.WithLabelValues(fsw.actualStore.GetName(), "insert").Inc()
- start := time.Now()
- defer func() {
- stats.FilerStoreHistogram.WithLabelValues(fsw.actualStore.GetName(), "insert").Observe(time.Since(start).Seconds())
- }()
-
- filer_pb.BeforeEntrySerialization(entry.Chunks)
- return fsw.actualStore.InsertEntry(ctx, entry)
-}
-
-func (fsw *FilerStoreWrapper) UpdateEntry(ctx context.Context, entry *Entry) error {
- stats.FilerStoreCounter.WithLabelValues(fsw.actualStore.GetName(), "update").Inc()
- start := time.Now()
- defer func() {
- stats.FilerStoreHistogram.WithLabelValues(fsw.actualStore.GetName(), "update").Observe(time.Since(start).Seconds())
- }()
-
- filer_pb.BeforeEntrySerialization(entry.Chunks)
- return fsw.actualStore.UpdateEntry(ctx, entry)
-}
-
-func (fsw *FilerStoreWrapper) FindEntry(ctx context.Context, fp FullPath) (entry *Entry, err error) {
- stats.FilerStoreCounter.WithLabelValues(fsw.actualStore.GetName(), "find").Inc()
- start := time.Now()
- defer func() {
- stats.FilerStoreHistogram.WithLabelValues(fsw.actualStore.GetName(), "find").Observe(time.Since(start).Seconds())
- }()
-
- entry, err = fsw.actualStore.FindEntry(ctx, fp)
- if err != nil {
- return nil, err
- }
- filer_pb.AfterEntryDeserialization(entry.Chunks)
- return
-}
-
-func (fsw *FilerStoreWrapper) DeleteEntry(ctx context.Context, fp FullPath) (err error) {
- stats.FilerStoreCounter.WithLabelValues(fsw.actualStore.GetName(), "delete").Inc()
- start := time.Now()
- defer func() {
- stats.FilerStoreHistogram.WithLabelValues(fsw.actualStore.GetName(), "delete").Observe(time.Since(start).Seconds())
- }()
-
- return fsw.actualStore.DeleteEntry(ctx, fp)
-}
-
-func (fsw *FilerStoreWrapper) DeleteFolderChildren(ctx context.Context, fp FullPath) (err error) {
- stats.FilerStoreCounter.WithLabelValues(fsw.actualStore.GetName(), "deleteFolderChildren").Inc()
- start := time.Now()
- defer func() {
- stats.FilerStoreHistogram.WithLabelValues(fsw.actualStore.GetName(), "deleteFolderChildren").Observe(time.Since(start).Seconds())
- }()
-
- return fsw.actualStore.DeleteFolderChildren(ctx, fp)
-}
-
-func (fsw *FilerStoreWrapper) ListDirectoryEntries(ctx context.Context, dirPath FullPath, startFileName string, includeStartFile bool, limit int) ([]*Entry, error) {
- stats.FilerStoreCounter.WithLabelValues(fsw.actualStore.GetName(), "list").Inc()
- start := time.Now()
- defer func() {
- stats.FilerStoreHistogram.WithLabelValues(fsw.actualStore.GetName(), "list").Observe(time.Since(start).Seconds())
- }()
-
- entries, err := fsw.actualStore.ListDirectoryEntries(ctx, dirPath, startFileName, includeStartFile, limit)
- if err != nil {
- return nil, err
- }
- for _, entry := range entries {
- filer_pb.AfterEntryDeserialization(entry.Chunks)
- }
- return entries, err
-}
-
-func (fsw *FilerStoreWrapper) BeginTransaction(ctx context.Context) (context.Context, error) {
- return fsw.actualStore.BeginTransaction(ctx)
-}
-
-func (fsw *FilerStoreWrapper) CommitTransaction(ctx context.Context) error {
- return fsw.actualStore.CommitTransaction(ctx)
-}
-
-func (fsw *FilerStoreWrapper) RollbackTransaction(ctx context.Context) error {
- return fsw.actualStore.RollbackTransaction(ctx)
-}
diff --git a/weed/filer2/mysql/mysql_store.go b/weed/filer2/mysql/mysql_store.go
deleted file mode 100644
index d1b06ece5..000000000
--- a/weed/filer2/mysql/mysql_store.go
+++ /dev/null
@@ -1,74 +0,0 @@
-package mysql
-
-import (
- "database/sql"
- "fmt"
-
- "github.com/chrislusf/seaweedfs/weed/filer2"
- "github.com/chrislusf/seaweedfs/weed/filer2/abstract_sql"
- "github.com/chrislusf/seaweedfs/weed/util"
- _ "github.com/go-sql-driver/mysql"
-)
-
-const (
- CONNECTION_URL_PATTERN = "%s:%s@tcp(%s:%d)/%s?charset=utf8"
-)
-
-func init() {
- filer2.Stores = append(filer2.Stores, &MysqlStore{})
-}
-
-type MysqlStore struct {
- abstract_sql.AbstractSqlStore
-}
-
-func (store *MysqlStore) GetName() string {
- return "mysql"
-}
-
-func (store *MysqlStore) Initialize(configuration util.Configuration) (err error) {
- return store.initialize(
- configuration.GetString("username"),
- configuration.GetString("password"),
- configuration.GetString("hostname"),
- configuration.GetInt("port"),
- configuration.GetString("database"),
- configuration.GetInt("connection_max_idle"),
- configuration.GetInt("connection_max_open"),
- configuration.GetBool("interpolateParams"),
- )
-}
-
-func (store *MysqlStore) initialize(user, password, hostname string, port int, database string, maxIdle, maxOpen int,
- interpolateParams bool) (err error) {
-
- store.SqlInsert = "INSERT INTO filemeta (dirhash,name,directory,meta) VALUES(?,?,?,?)"
- store.SqlUpdate = "UPDATE filemeta SET meta=? WHERE dirhash=? AND name=? AND directory=?"
- store.SqlFind = "SELECT meta FROM filemeta WHERE dirhash=? AND name=? AND directory=?"
- store.SqlDelete = "DELETE FROM filemeta WHERE dirhash=? AND name=? AND directory=?"
- store.SqlDeleteFolderChildren = "DELETE FROM filemeta WHERE dirhash=? AND directory=?"
- store.SqlListExclusive = "SELECT NAME, meta FROM filemeta WHERE dirhash=? AND name>? AND directory=? ORDER BY NAME ASC LIMIT ?"
- store.SqlListInclusive = "SELECT NAME, meta FROM filemeta WHERE dirhash=? AND name>=? AND directory=? ORDER BY NAME ASC LIMIT ?"
-
- sqlUrl := fmt.Sprintf(CONNECTION_URL_PATTERN, user, password, hostname, port, database)
- if interpolateParams {
- sqlUrl += "&interpolateParams=true"
- }
-
- var dbErr error
- store.DB, dbErr = sql.Open("mysql", sqlUrl)
- if dbErr != nil {
- store.DB.Close()
- store.DB = nil
- return fmt.Errorf("can not connect to %s error:%v", sqlUrl, err)
- }
-
- store.DB.SetMaxIdleConns(maxIdle)
- store.DB.SetMaxOpenConns(maxOpen)
-
- if err = store.DB.Ping(); err != nil {
- return fmt.Errorf("connect to %s error:%v", sqlUrl, err)
- }
-
- return nil
-}
diff --git a/weed/filer2/postgres/postgres_store.go b/weed/filer2/postgres/postgres_store.go
deleted file mode 100644
index 3ec000fe0..000000000
--- a/weed/filer2/postgres/postgres_store.go
+++ /dev/null
@@ -1,69 +0,0 @@
-package postgres
-
-import (
- "database/sql"
- "fmt"
-
- "github.com/chrislusf/seaweedfs/weed/filer2"
- "github.com/chrislusf/seaweedfs/weed/filer2/abstract_sql"
- "github.com/chrislusf/seaweedfs/weed/util"
- _ "github.com/lib/pq"
-)
-
-const (
- CONNECTION_URL_PATTERN = "host=%s port=%d user=%s password=%s dbname=%s sslmode=%s connect_timeout=30"
-)
-
-func init() {
- filer2.Stores = append(filer2.Stores, &PostgresStore{})
-}
-
-type PostgresStore struct {
- abstract_sql.AbstractSqlStore
-}
-
-func (store *PostgresStore) GetName() string {
- return "postgres"
-}
-
-func (store *PostgresStore) Initialize(configuration util.Configuration) (err error) {
- return store.initialize(
- configuration.GetString("username"),
- configuration.GetString("password"),
- configuration.GetString("hostname"),
- configuration.GetInt("port"),
- configuration.GetString("database"),
- configuration.GetString("sslmode"),
- configuration.GetInt("connection_max_idle"),
- configuration.GetInt("connection_max_open"),
- )
-}
-
-func (store *PostgresStore) initialize(user, password, hostname string, port int, database, sslmode string, maxIdle, maxOpen int) (err error) {
-
- store.SqlInsert = "INSERT INTO filemeta (dirhash,name,directory,meta) VALUES($1,$2,$3,$4)"
- store.SqlUpdate = "UPDATE filemeta SET meta=$1 WHERE dirhash=$2 AND name=$3 AND directory=$4"
- store.SqlFind = "SELECT meta FROM filemeta WHERE dirhash=$1 AND name=$2 AND directory=$3"
- store.SqlDelete = "DELETE FROM filemeta WHERE dirhash=$1 AND name=$2 AND directory=$3"
- store.SqlDeleteFolderChildren = "DELETE FROM filemeta WHERE dirhash=$1 AND directory=$2"
- store.SqlListExclusive = "SELECT NAME, meta FROM filemeta WHERE dirhash=$1 AND name>$2 AND directory=$3 ORDER BY NAME ASC LIMIT $4"
- store.SqlListInclusive = "SELECT NAME, meta FROM filemeta WHERE dirhash=$1 AND name>=$2 AND directory=$3 ORDER BY NAME ASC LIMIT $4"
-
- sqlUrl := fmt.Sprintf(CONNECTION_URL_PATTERN, hostname, port, user, password, database, sslmode)
- var dbErr error
- store.DB, dbErr = sql.Open("postgres", sqlUrl)
- if dbErr != nil {
- store.DB.Close()
- store.DB = nil
- return fmt.Errorf("can not connect to %s error:%v", sqlUrl, err)
- }
-
- store.DB.SetMaxIdleConns(maxIdle)
- store.DB.SetMaxOpenConns(maxOpen)
-
- if err = store.DB.Ping(); err != nil {
- return fmt.Errorf("connect to %s error:%v", sqlUrl, err)
- }
-
- return nil
-}
diff --git a/weed/filer2/stream.go b/weed/filer2/stream.go
deleted file mode 100644
index 01b87cad1..000000000
--- a/weed/filer2/stream.go
+++ /dev/null
@@ -1,41 +0,0 @@
-package filer2
-
-import (
- "io"
-
- "github.com/chrislusf/seaweedfs/weed/glog"
- "github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
- "github.com/chrislusf/seaweedfs/weed/util"
- "github.com/chrislusf/seaweedfs/weed/wdclient"
-)
-
-func StreamContent(masterClient *wdclient.MasterClient, w io.Writer, chunks []*filer_pb.FileChunk, offset int64, size int) error {
-
- chunkViews := ViewFromChunks(chunks, offset, size)
-
- fileId2Url := make(map[string]string)
-
- for _, chunkView := range chunkViews {
-
- urlString, err := masterClient.LookupFileId(chunkView.FileId)
- if err != nil {
- glog.V(1).Infof("operation LookupFileId %s failed, err: %v", chunkView.FileId, err)
- return err
- }
- fileId2Url[chunkView.FileId] = urlString
- }
-
- for _, chunkView := range chunkViews {
- urlString := fileId2Url[chunkView.FileId]
- _, err := util.ReadUrlAsStream(urlString, chunkView.Offset, int(chunkView.Size), func(data []byte) {
- w.Write(data)
- })
- if err != nil {
- glog.V(1).Infof("read %s failed, err: %v", chunkView.FileId, err)
- return err
- }
- }
-
- return nil
-
-}
diff --git a/weed/filer2/tikv/tikv_store.go b/weed/filer2/tikv/tikv_store.go
deleted file mode 100644
index 4eb8cb90d..000000000
--- a/weed/filer2/tikv/tikv_store.go
+++ /dev/null
@@ -1,251 +0,0 @@
-// +build !386
-// +build !arm
-
-package tikv
-
-import (
- "bytes"
- "context"
- "crypto/md5"
- "fmt"
- "io"
-
- "github.com/chrislusf/seaweedfs/weed/filer2"
- "github.com/chrislusf/seaweedfs/weed/glog"
- weed_util "github.com/chrislusf/seaweedfs/weed/util"
-
- "github.com/pingcap/tidb/kv"
- "github.com/pingcap/tidb/store/tikv"
-)
-
-func init() {
- filer2.Stores = append(filer2.Stores, &TikvStore{})
-}
-
-type TikvStore struct {
- store kv.Storage
-}
-
-func (store *TikvStore) GetName() string {
- return "tikv"
-}
-
-func (store *TikvStore) Initialize(configuration weed_util.Configuration) (err error) {
- pdAddr := configuration.GetString("pdAddress")
- return store.initialize(pdAddr)
-}
-
-func (store *TikvStore) initialize(pdAddr string) (err error) {
- glog.Infof("filer store tikv pd address: %s", pdAddr)
-
- driver := tikv.Driver{}
-
- store.store, err = driver.Open(fmt.Sprintf("tikv://%s", pdAddr))
-
- if err != nil {
- return fmt.Errorf("open tikv %s : %v", pdAddr, err)
- }
-
- return
-}
-
-func (store *TikvStore) BeginTransaction(ctx context.Context) (context.Context, error) {
- tx, err := store.store.Begin()
- if err != nil {
- return ctx, err
- }
- return context.WithValue(ctx, "tx", tx), nil
-}
-func (store *TikvStore) CommitTransaction(ctx context.Context) error {
- tx, ok := ctx.Value("tx").(kv.Transaction)
- if ok {
- return tx.Commit(ctx)
- }
- return nil
-}
-func (store *TikvStore) RollbackTransaction(ctx context.Context) error {
- tx, ok := ctx.Value("tx").(kv.Transaction)
- if ok {
- return tx.Rollback()
- }
- return nil
-}
-
-func (store *TikvStore) getTx(ctx context.Context) kv.Transaction {
- if tx, ok := ctx.Value("tx").(kv.Transaction); ok {
- return tx
- }
- return nil
-}
-
-func (store *TikvStore) InsertEntry(ctx context.Context, entry *filer2.Entry) (err error) {
- dir, name := entry.DirAndName()
- key := genKey(dir, name)
-
- value, err := entry.EncodeAttributesAndChunks()
- if err != nil {
- return fmt.Errorf("encoding %s %+v: %v", entry.FullPath, entry.Attr, err)
- }
-
- err = store.getTx(ctx).Set(key, value)
-
- if err != nil {
- return fmt.Errorf("persisting %s : %v", entry.FullPath, err)
- }
-
- // println("saved", entry.FullPath, "chunks", len(entry.Chunks))
-
- return nil
-}
-
-func (store *TikvStore) UpdateEntry(ctx context.Context, entry *filer2.Entry) (err error) {
-
- return store.InsertEntry(ctx, entry)
-}
-
-func (store *TikvStore) FindEntry(ctx context.Context, fullpath filer2.FullPath) (entry *filer2.Entry, err error) {
- dir, name := fullpath.DirAndName()
- key := genKey(dir, name)
-
- data, err := store.getTx(ctx).Get(ctx, key)
-
- if err == kv.ErrNotExist {
- return nil, filer2.ErrNotFound
- }
- if err != nil {
- return nil, fmt.Errorf("get %s : %v", entry.FullPath, err)
- }
-
- entry = &filer2.Entry{
- FullPath: fullpath,
- }
- err = entry.DecodeAttributesAndChunks(data)
- if err != nil {
- return entry, fmt.Errorf("decode %s : %v", entry.FullPath, err)
- }
-
- // println("read", entry.FullPath, "chunks", len(entry.Chunks), "data", len(data), string(data))
-
- return entry, nil
-}
-
-func (store *TikvStore) DeleteEntry(ctx context.Context, fullpath filer2.FullPath) (err error) {
- dir, name := fullpath.DirAndName()
- key := genKey(dir, name)
-
- err = store.getTx(ctx).Delete(key)
- if err != nil {
- return fmt.Errorf("delete %s : %v", fullpath, err)
- }
-
- return nil
-}
-
-func (store *TikvStore) DeleteFolderChildren(ctx context.Context, fullpath filer2.FullPath) (err error) {
-
- directoryPrefix := genDirectoryKeyPrefix(fullpath, "")
-
- tx := store.getTx(ctx)
-
- iter, err := tx.Iter(directoryPrefix, nil)
- if err != nil {
- return fmt.Errorf("deleteFolderChildren %s: %v", fullpath, err)
- }
- defer iter.Close()
- for iter.Valid() {
- key := iter.Key()
- if !bytes.HasPrefix(key, directoryPrefix) {
- break
- }
- fileName := getNameFromKey(key)
- if fileName == "" {
- iter.Next()
- continue
- }
-
- if err = tx.Delete(genKey(string(fullpath), fileName)); err != nil {
- return fmt.Errorf("delete %s : %v", fullpath, err)
- }
-
- iter.Next()
- }
-
- return nil
-}
-
-func (store *TikvStore) ListDirectoryEntries(ctx context.Context, fullpath filer2.FullPath, startFileName string, inclusive bool,
- limit int) (entries []*filer2.Entry, err error) {
-
- directoryPrefix := genDirectoryKeyPrefix(fullpath, "")
- lastFileStart := genDirectoryKeyPrefix(fullpath, startFileName)
-
- iter, err := store.getTx(ctx).Iter(lastFileStart, nil)
- if err != nil {
- return nil, fmt.Errorf("list %s: %v", fullpath, err)
- }
- defer iter.Close()
- for iter.Valid() {
- key := iter.Key()
- if !bytes.HasPrefix(key, directoryPrefix) {
- break
- }
- fileName := getNameFromKey(key)
- if fileName == "" {
- iter.Next()
- continue
- }
- if fileName == startFileName && !inclusive {
- iter.Next()
- continue
- }
- limit--
- if limit < 0 {
- break
- }
- entry := &filer2.Entry{
- FullPath: filer2.NewFullPath(string(fullpath), fileName),
- }
-
- // println("list", entry.FullPath, "chunks", len(entry.Chunks))
-
- if decodeErr := entry.DecodeAttributesAndChunks(iter.Value()); decodeErr != nil {
- err = decodeErr
- glog.V(0).Infof("list %s : %v", entry.FullPath, err)
- break
- }
- entries = append(entries, entry)
- iter.Next()
- }
-
- return entries, err
-}
-
-func genKey(dirPath, fileName string) (key []byte) {
- key = hashToBytes(dirPath)
- key = append(key, []byte(fileName)...)
- return key
-}
-
-func genDirectoryKeyPrefix(fullpath filer2.FullPath, startFileName string) (keyPrefix []byte) {
- keyPrefix = hashToBytes(string(fullpath))
- if len(startFileName) > 0 {
- keyPrefix = append(keyPrefix, []byte(startFileName)...)
- }
- return keyPrefix
-}
-
-func getNameFromKey(key []byte) string {
-
- return string(key[md5.Size:])
-
-}
-
-// hash directory
-func hashToBytes(dir string) []byte {
- h := md5.New()
- io.WriteString(h, dir)
-
- b := h.Sum(nil)
-
- return b
-}
diff --git a/weed/filer2/tikv/tikv_store_unsupported.go b/weed/filer2/tikv/tikv_store_unsupported.go
deleted file mode 100644
index 36de2d974..000000000
--- a/weed/filer2/tikv/tikv_store_unsupported.go
+++ /dev/null
@@ -1,65 +0,0 @@
-// +build 386 arm
-
-package tikv
-
-import (
- "context"
- "fmt"
-
- "github.com/chrislusf/seaweedfs/weed/filer2"
- weed_util "github.com/chrislusf/seaweedfs/weed/util"
-)
-
-func init() {
- filer2.Stores = append(filer2.Stores, &TikvStore{})
-}
-
-type TikvStore struct {
-}
-
-func (store *TikvStore) GetName() string {
- return "tikv"
-}
-
-func (store *TikvStore) Initialize(configuration weed_util.Configuration) (err error) {
- return fmt.Errorf("not implemented for 32 bit computers")
-}
-
-func (store *TikvStore) initialize(pdAddr string) (err error) {
- return fmt.Errorf("not implemented for 32 bit computers")
-}
-
-func (store *TikvStore) BeginTransaction(ctx context.Context) (context.Context, error) {
- return nil, fmt.Errorf("not implemented for 32 bit computers")
-}
-func (store *TikvStore) CommitTransaction(ctx context.Context) error {
- return fmt.Errorf("not implemented for 32 bit computers")
-}
-func (store *TikvStore) RollbackTransaction(ctx context.Context) error {
- return fmt.Errorf("not implemented for 32 bit computers")
-}
-
-func (store *TikvStore) InsertEntry(ctx context.Context, entry *filer2.Entry) (err error) {
- return fmt.Errorf("not implemented for 32 bit computers")
-}
-
-func (store *TikvStore) UpdateEntry(ctx context.Context, entry *filer2.Entry) (err error) {
- return fmt.Errorf("not implemented for 32 bit computers")
-}
-
-func (store *TikvStore) FindEntry(ctx context.Context, fullpath filer2.FullPath) (entry *filer2.Entry, err error) {
- return nil, fmt.Errorf("not implemented for 32 bit computers")
-}
-
-func (store *TikvStore) DeleteEntry(ctx context.Context, fullpath filer2.FullPath) (err error) {
- return fmt.Errorf("not implemented for 32 bit computers")
-}
-
-func (store *TikvStore) DeleteFolderChildren(ctx context.Context, fullpath filer2.FullPath) (err error) {
- return fmt.Errorf("not implemented for 32 bit computers")
-}
-
-func (store *TikvStore) ListDirectoryEntries(ctx context.Context, fullpath filer2.FullPath, startFileName string, inclusive bool,
- limit int) (entries []*filer2.Entry, err error) {
- return nil, fmt.Errorf("not implemented for 32 bit computers")
-}
diff --git a/weed/filesys/dir.go b/weed/filesys/dir.go
index 7b24a1ec5..6ee20974b 100644
--- a/weed/filesys/dir.go
+++ b/weed/filesys/dir.go
@@ -1,27 +1,39 @@
package filesys
import (
+ "bytes"
"context"
+ "math"
"os"
- "path"
+ "strings"
+ "syscall"
"time"
- "github.com/chrislusf/seaweedfs/weed/filer2"
- "github.com/chrislusf/seaweedfs/weed/glog"
- "github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
"github.com/seaweedfs/fuse"
"github.com/seaweedfs/fuse/fs"
+
+ "github.com/chrislusf/seaweedfs/weed/filer"
+ "github.com/chrislusf/seaweedfs/weed/filesys/meta_cache"
+ "github.com/chrislusf/seaweedfs/weed/glog"
+ "github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
+ "github.com/chrislusf/seaweedfs/weed/util"
)
type Dir struct {
- Path string
- wfs *WFS
- entry *filer_pb.Entry
+ name string
+ wfs *WFS
+ entry *filer_pb.Entry
+ parent *Dir
+ id uint64
}
var _ = fs.Node(&Dir{})
+
+//var _ = fs.NodeIdentifier(&Dir{})
var _ = fs.NodeCreater(&Dir{})
+var _ = fs.NodeMknoder(&Dir{})
var _ = fs.NodeMkdirer(&Dir{})
+var _ = fs.NodeFsyncer(&Dir{})
var _ = fs.NodeRequestLookuper(&Dir{})
var _ = fs.HandleReadDirAller(&Dir{})
var _ = fs.NodeRemover(&Dir{})
@@ -31,44 +43,57 @@ var _ = fs.NodeGetxattrer(&Dir{})
var _ = fs.NodeSetxattrer(&Dir{})
var _ = fs.NodeRemovexattrer(&Dir{})
var _ = fs.NodeListxattrer(&Dir{})
+var _ = fs.NodeForgetter(&Dir{})
-func (dir *Dir) Attr(ctx context.Context, attr *fuse.Attr) error {
+func (dir *Dir) xId() uint64 {
+ return dir.id
+}
- glog.V(3).Infof("dir Attr %s", dir.Path)
+func (dir *Dir) Attr(ctx context.Context, attr *fuse.Attr) error {
// https://github.com/bazil/fuse/issues/196
attr.Valid = time.Second
- if dir.Path == dir.wfs.option.FilerMountRootPath {
+ if dir.FullPath() == dir.wfs.option.FilerMountRootPath {
dir.setRootDirAttributes(attr)
+ glog.V(3).Infof("root dir Attr %s, attr: %+v", dir.FullPath(), attr)
return nil
}
- if err := dir.maybeLoadEntry(ctx); err != nil {
+ entry, err := dir.maybeLoadEntry()
+ if err != nil {
+ glog.V(3).Infof("dir Attr %s,err: %+v", dir.FullPath(), err)
return err
}
- attr.Mode = os.FileMode(dir.entry.Attributes.FileMode) | os.ModeDir
- attr.Mtime = time.Unix(dir.entry.Attributes.Mtime, 0)
- attr.Ctime = time.Unix(dir.entry.Attributes.Crtime, 0)
- attr.Gid = dir.entry.Attributes.Gid
- attr.Uid = dir.entry.Attributes.Uid
+ // attr.Inode = dir.Id()
+ attr.Mode = os.FileMode(entry.Attributes.FileMode) | os.ModeDir
+ attr.Mtime = time.Unix(entry.Attributes.Mtime, 0)
+ attr.Crtime = time.Unix(entry.Attributes.Crtime, 0)
+ attr.Gid = entry.Attributes.Gid
+ attr.Uid = entry.Attributes.Uid
+
+ glog.V(4).Infof("dir Attr %s, attr: %+v", dir.FullPath(), attr)
return nil
}
func (dir *Dir) Getxattr(ctx context.Context, req *fuse.GetxattrRequest, resp *fuse.GetxattrResponse) error {
- glog.V(4).Infof("dir Getxattr %s", dir.Path)
+ glog.V(4).Infof("dir Getxattr %s", dir.FullPath())
- if err := dir.maybeLoadEntry(ctx); err != nil {
+ entry, err := dir.maybeLoadEntry()
+ if err != nil {
return err
}
- return getxattr(dir.entry, req, resp)
+ return getxattr(entry, req, resp)
}
func (dir *Dir) setRootDirAttributes(attr *fuse.Attr) {
+ // attr.Inode = 1 // filer2.FullPath(dir.Path).AsInode()
+ attr.Valid = time.Second
+ attr.Inode = 1 // dir.Id()
attr.Uid = dir.wfs.option.MountUid
attr.Gid = dir.wfs.option.MountGid
attr.Mode = dir.wfs.option.MountMode
@@ -76,84 +101,178 @@ func (dir *Dir) setRootDirAttributes(attr *fuse.Attr) {
attr.Ctime = dir.wfs.option.MountCtime
attr.Mtime = dir.wfs.option.MountMtime
attr.Atime = dir.wfs.option.MountMtime
+ attr.BlockSize = blockSize
+}
+
+func (dir *Dir) Fsync(ctx context.Context, req *fuse.FsyncRequest) error {
+ // fsync works at OS level
+ // write the file chunks to the filerGrpcAddress
+ glog.V(3).Infof("dir %s fsync %+v", dir.FullPath(), req)
+
+ return nil
}
-func (dir *Dir) newFile(name string, entry *filer_pb.Entry) *File {
+func (dir *Dir) newFile(name string) fs.Node {
+
+ fileFullPath := util.NewFullPath(dir.FullPath(), name)
+ fileId := fileFullPath.AsInode()
+ dir.wfs.handlesLock.Lock()
+ existingHandle, found := dir.wfs.handles[fileId]
+ dir.wfs.handlesLock.Unlock()
+
+ if found {
+ glog.V(4).Infof("newFile found opened file handle: %+v", fileFullPath)
+ return existingHandle.f
+ }
return &File{
- Name: name,
- dir: dir,
- wfs: dir.wfs,
- entry: entry,
- entryViewCache: nil,
+ Name: name,
+ dir: dir,
+ wfs: dir.wfs,
+ id: fileId,
}
}
+func (dir *Dir) newDirectory(fullpath util.FullPath) fs.Node {
+
+ return &Dir{name: fullpath.Name(), wfs: dir.wfs, parent: dir, id: fullpath.AsInode()}
+
+}
+
func (dir *Dir) Create(ctx context.Context, req *fuse.CreateRequest,
resp *fuse.CreateResponse) (fs.Node, fs.Handle, error) {
+ exclusive := req.Flags&fuse.OpenExclusive != 0
+ isDirectory := req.Mode&os.ModeDir > 0
+
+ if exclusive || isDirectory {
+ _, err := dir.doCreateEntry(req.Name, req.Mode, req.Uid, req.Gid, exclusive)
+ if err != nil {
+ return nil, nil, err
+ }
+ }
+ var node fs.Node
+ if isDirectory {
+ node = dir.newDirectory(util.NewFullPath(dir.FullPath(), req.Name))
+ return node, nil, nil
+ }
+
+ node = dir.newFile(req.Name)
+ file := node.(*File)
+ file.entry = &filer_pb.Entry{
+ Name: req.Name,
+ IsDirectory: req.Mode&os.ModeDir > 0,
+ Attributes: &filer_pb.FuseAttributes{
+ Mtime: time.Now().Unix(),
+ Crtime: time.Now().Unix(),
+ FileMode: uint32(req.Mode &^ dir.wfs.option.Umask),
+ Uid: req.Uid,
+ Gid: req.Gid,
+ Collection: dir.wfs.option.Collection,
+ Replication: dir.wfs.option.Replication,
+ TtlSec: dir.wfs.option.TtlSec,
+ },
+ }
+ file.dirtyMetadata = true
+ fh := dir.wfs.AcquireHandle(file, req.Uid, req.Gid)
+ return file, fh, nil
+
+}
+
+func (dir *Dir) Mknod(ctx context.Context, req *fuse.MknodRequest) (fs.Node, error) {
+
+ _, err := dir.doCreateEntry(req.Name, req.Mode, req.Uid, req.Gid, false)
+
+ if err != nil {
+ return nil, err
+ }
+ var node fs.Node
+ node = dir.newFile(req.Name)
+ return node, nil
+}
+
+func (dir *Dir) doCreateEntry(name string, mode os.FileMode, uid, gid uint32, exclusive bool) (*filer_pb.CreateEntryRequest, error) {
+ dirFullPath := dir.FullPath()
request := &filer_pb.CreateEntryRequest{
- Directory: dir.Path,
+ Directory: dirFullPath,
Entry: &filer_pb.Entry{
- Name: req.Name,
- IsDirectory: req.Mode&os.ModeDir > 0,
+ Name: name,
+ IsDirectory: mode&os.ModeDir > 0,
Attributes: &filer_pb.FuseAttributes{
Mtime: time.Now().Unix(),
Crtime: time.Now().Unix(),
- FileMode: uint32(req.Mode &^ dir.wfs.option.Umask),
- Uid: req.Uid,
- Gid: req.Gid,
+ FileMode: uint32(mode &^ dir.wfs.option.Umask),
+ Uid: uid,
+ Gid: gid,
Collection: dir.wfs.option.Collection,
Replication: dir.wfs.option.Replication,
TtlSec: dir.wfs.option.TtlSec,
},
},
+ OExcl: exclusive,
+ Signatures: []int32{dir.wfs.signature},
}
- glog.V(1).Infof("create: %v", request)
+ glog.V(1).Infof("create %s/%s", dirFullPath, name)
- if request.Entry.IsDirectory {
- if err := dir.wfs.WithFilerClient(ctx, func(client filer_pb.SeaweedFilerClient) error {
- if _, err := client.CreateEntry(ctx, request); err != nil {
- glog.V(0).Infof("create %s/%s: %v", dir.Path, req.Name, err)
- return fuse.EIO
+ err := dir.wfs.WithFilerClient(func(client filer_pb.SeaweedFilerClient) error {
+
+ dir.wfs.mapPbIdFromLocalToFiler(request.Entry)
+ defer dir.wfs.mapPbIdFromFilerToLocal(request.Entry)
+
+ if err := filer_pb.CreateEntry(client, request); err != nil {
+ if strings.Contains(err.Error(), "EEXIST") {
+ return fuse.EEXIST
}
- return nil
- }); err != nil {
- return nil, nil, err
+ glog.V(0).Infof("create %s/%s: %v", dirFullPath, name, err)
+ return fuse.EIO
}
- }
- file := dir.newFile(req.Name, request.Entry)
- if !request.Entry.IsDirectory {
- file.isOpen = true
- }
- fh := dir.wfs.AcquireHandle(file, req.Uid, req.Gid)
- fh.dirtyMetadata = true
- return file, fh, nil
+ if err := dir.wfs.metaCache.InsertEntry(context.Background(), filer.FromPbEntry(request.Directory, request.Entry)); err != nil {
+ glog.Errorf("local InsertEntry dir %s/%s: %v", dirFullPath, name, err)
+ return fuse.EIO
+ }
+ return nil
+ })
+ return request, err
}
func (dir *Dir) Mkdir(ctx context.Context, req *fuse.MkdirRequest) (fs.Node, error) {
- err := dir.wfs.WithFilerClient(ctx, func(client filer_pb.SeaweedFilerClient) error {
+ glog.V(4).Infof("mkdir %s: %s", dir.FullPath(), req.Name)
+
+ newEntry := &filer_pb.Entry{
+ Name: req.Name,
+ IsDirectory: true,
+ Attributes: &filer_pb.FuseAttributes{
+ Mtime: time.Now().Unix(),
+ Crtime: time.Now().Unix(),
+ FileMode: uint32(req.Mode &^ dir.wfs.option.Umask),
+ Uid: req.Uid,
+ Gid: req.Gid,
+ },
+ }
+
+ dirFullPath := dir.FullPath()
+
+ err := dir.wfs.WithFilerClient(func(client filer_pb.SeaweedFilerClient) error {
+
+ dir.wfs.mapPbIdFromLocalToFiler(newEntry)
+ defer dir.wfs.mapPbIdFromFilerToLocal(newEntry)
request := &filer_pb.CreateEntryRequest{
- Directory: dir.Path,
- Entry: &filer_pb.Entry{
- Name: req.Name,
- IsDirectory: true,
- Attributes: &filer_pb.FuseAttributes{
- Mtime: time.Now().Unix(),
- Crtime: time.Now().Unix(),
- FileMode: uint32(req.Mode &^ dir.wfs.option.Umask),
- Uid: req.Uid,
- Gid: req.Gid,
- },
- },
+ Directory: dirFullPath,
+ Entry: newEntry,
+ Signatures: []int32{dir.wfs.signature},
}
glog.V(1).Infof("mkdir: %v", request)
- if _, err := client.CreateEntry(ctx, request); err != nil {
- glog.V(0).Infof("mkdir %s/%s: %v", dir.Path, req.Name, err)
+ if err := filer_pb.CreateEntry(client, request); err != nil {
+ glog.V(0).Infof("mkdir %s/%s: %v", dirFullPath, req.Name, err)
+ return err
+ }
+
+ if err := dir.wfs.metaCache.InsertEntry(context.Background(), filer.FromPbEntry(request.Directory, request.Entry)); err != nil {
+ glog.Errorf("local mkdir dir %s/%s: %v", dirFullPath, req.Name, err)
return fuse.EIO
}
@@ -161,221 +280,258 @@ func (dir *Dir) Mkdir(ctx context.Context, req *fuse.MkdirRequest) (fs.Node, err
})
if err == nil {
- node := &Dir{Path: path.Join(dir.Path, req.Name), wfs: dir.wfs}
+ node := dir.newDirectory(util.NewFullPath(dirFullPath, req.Name))
+
return node, nil
}
- return nil, err
+ glog.V(0).Infof("mkdir %s/%s: %v", dirFullPath, req.Name, err)
+
+ return nil, fuse.EIO
}
func (dir *Dir) Lookup(ctx context.Context, req *fuse.LookupRequest, resp *fuse.LookupResponse) (node fs.Node, err error) {
- glog.V(4).Infof("dir Lookup %s: %s", dir.Path, req.Name)
+ dirPath := util.FullPath(dir.FullPath())
+ glog.V(4).Infof("dir Lookup %s: %s by %s", dirPath, req.Name, req.Header.String())
- var entry *filer_pb.Entry
- fullFilePath := path.Join(dir.Path, req.Name)
-
- item := dir.wfs.listDirectoryEntriesCache.Get(fullFilePath)
- if item != nil && !item.Expired() {
- entry = item.Value().(*filer_pb.Entry)
+ fullFilePath := dirPath.Child(req.Name)
+ visitErr := meta_cache.EnsureVisited(dir.wfs.metaCache, dir.wfs, dirPath)
+ if visitErr != nil {
+ glog.Errorf("dir Lookup %s: %v", dirPath, visitErr)
+ return nil, fuse.EIO
+ }
+ localEntry, cacheErr := dir.wfs.metaCache.FindEntry(context.Background(), fullFilePath)
+ if cacheErr == filer_pb.ErrNotFound {
+ return nil, fuse.ENOENT
}
- if entry == nil {
- glog.V(3).Infof("dir Lookup cache miss %s", fullFilePath)
- entry, err = filer2.GetEntry(ctx, dir.wfs, fullFilePath)
+ if localEntry == nil {
+ // glog.V(3).Infof("dir Lookup cache miss %s", fullFilePath)
+ entry, err := filer_pb.GetEntry(dir.wfs, fullFilePath)
if err != nil {
- return nil, err
- }
- if entry != nil {
- dir.wfs.listDirectoryEntriesCache.Set(fullFilePath, entry, 5*time.Minute)
+ glog.V(1).Infof("dir GetEntry %s: %v", fullFilePath, err)
+ return nil, fuse.ENOENT
}
+ localEntry = filer.FromPbEntry(string(dirPath), entry)
} else {
glog.V(4).Infof("dir Lookup cache hit %s", fullFilePath)
}
- if entry != nil {
- if entry.IsDirectory {
- node = &Dir{Path: path.Join(dir.Path, req.Name), wfs: dir.wfs, entry: entry}
+ if localEntry != nil {
+ if localEntry.IsDirectory() {
+ node = dir.newDirectory(fullFilePath)
} else {
- node = dir.newFile(req.Name, entry)
+ node = dir.newFile(req.Name)
}
- resp.EntryValid = time.Duration(0)
- resp.Attr.Mtime = time.Unix(entry.Attributes.Mtime, 0)
- resp.Attr.Ctime = time.Unix(entry.Attributes.Crtime, 0)
- resp.Attr.Mode = os.FileMode(entry.Attributes.FileMode)
- resp.Attr.Gid = entry.Attributes.Gid
- resp.Attr.Uid = entry.Attributes.Uid
+ // resp.EntryValid = time.Second
+ resp.Attr.Inode = fullFilePath.AsInode()
+ resp.Attr.Valid = time.Second
+ resp.Attr.Mtime = localEntry.Attr.Mtime
+ resp.Attr.Crtime = localEntry.Attr.Crtime
+ resp.Attr.Mode = localEntry.Attr.Mode
+ resp.Attr.Gid = localEntry.Attr.Gid
+ resp.Attr.Uid = localEntry.Attr.Uid
+ if localEntry.HardLinkCounter > 0 {
+ resp.Attr.Nlink = uint32(localEntry.HardLinkCounter)
+ }
return node, nil
}
+ glog.V(4).Infof("not found dir GetEntry %s: %v", fullFilePath, err)
return nil, fuse.ENOENT
}
func (dir *Dir) ReadDirAll(ctx context.Context) (ret []fuse.Dirent, err error) {
- glog.V(3).Infof("dir ReadDirAll %s", dir.Path)
+ dirPath := util.FullPath(dir.FullPath())
+ glog.V(4).Infof("dir ReadDirAll %s", dirPath)
- cacheTtl := 5 * time.Minute
-
- readErr := filer2.ReadDirAllEntries(ctx, dir.wfs, dir.Path, "", func(entry *filer_pb.Entry, isLast bool) {
- if entry.IsDirectory {
- dirent := fuse.Dirent{Name: entry.Name, Type: fuse.DT_Dir}
+ processEachEntryFn := func(entry *filer.Entry, isLast bool) {
+ if entry.IsDirectory() {
+ dirent := fuse.Dirent{Name: entry.Name(), Type: fuse.DT_Dir, Inode: dirPath.Child(entry.Name()).AsInode()}
ret = append(ret, dirent)
} else {
- dirent := fuse.Dirent{Name: entry.Name, Type: fuse.DT_File}
+ dirent := fuse.Dirent{Name: entry.Name(), Type: findFileType(uint16(entry.Attr.Mode)), Inode: dirPath.Child(entry.Name()).AsInode()}
ret = append(ret, dirent)
}
- dir.wfs.listDirectoryEntriesCache.Set(path.Join(dir.Path, entry.Name), entry, cacheTtl)
+ }
+
+ if err = meta_cache.EnsureVisited(dir.wfs.metaCache, dir.wfs, dirPath); err != nil {
+ glog.Errorf("dir ReadDirAll %s: %v", dirPath, err)
+ return nil, fuse.EIO
+ }
+ listErr := dir.wfs.metaCache.ListDirectoryEntries(context.Background(), dirPath, "", false, int64(math.MaxInt32), func(entry *filer.Entry) bool {
+ processEachEntryFn(entry, false)
+ return true
})
- if readErr != nil {
- glog.V(0).Infof("list %s: %v", dir.Path, err)
- return ret, fuse.EIO
+ if listErr != nil {
+ glog.Errorf("list meta cache: %v", listErr)
+ return nil, fuse.EIO
}
+ return
+}
- return ret, err
+func findFileType(mode uint16) fuse.DirentType {
+ switch mode & (syscall.S_IFMT & 0xffff) {
+ case syscall.S_IFSOCK:
+ return fuse.DT_Socket
+ case syscall.S_IFLNK:
+ return fuse.DT_Link
+ case syscall.S_IFREG:
+ return fuse.DT_File
+ case syscall.S_IFBLK:
+ return fuse.DT_Block
+ case syscall.S_IFDIR:
+ return fuse.DT_Dir
+ case syscall.S_IFCHR:
+ return fuse.DT_Char
+ case syscall.S_IFIFO:
+ return fuse.DT_FIFO
+ }
+ return fuse.DT_File
}
func (dir *Dir) Remove(ctx context.Context, req *fuse.RemoveRequest) error {
if !req.Dir {
- return dir.removeOneFile(ctx, req)
+ return dir.removeOneFile(req)
}
- return dir.removeFolder(ctx, req)
+ return dir.removeFolder(req)
}
-func (dir *Dir) removeOneFile(ctx context.Context, req *fuse.RemoveRequest) error {
+func (dir *Dir) removeOneFile(req *fuse.RemoveRequest) error {
- entry, err := filer2.GetEntry(ctx, dir.wfs, path.Join(dir.Path, req.Name))
+ dirFullPath := dir.FullPath()
+ filePath := util.NewFullPath(dirFullPath, req.Name)
+ entry, err := filer_pb.GetEntry(dir.wfs, filePath)
if err != nil {
return err
}
- dir.wfs.deleteFileChunks(ctx, entry.Chunks)
-
- dir.wfs.listDirectoryEntriesCache.Delete(path.Join(dir.Path, req.Name))
-
- return dir.wfs.WithFilerClient(ctx, func(client filer_pb.SeaweedFilerClient) error {
+ // first, ensure the filer store can correctly delete
+ glog.V(3).Infof("remove file: %v", req)
+ isDeleteData := entry != nil && entry.HardLinkCounter <= 1
+ err = filer_pb.Remove(dir.wfs, dirFullPath, req.Name, isDeleteData, false, false, false, []int32{dir.wfs.signature})
+ if err != nil {
+ glog.V(3).Infof("not found remove file %s: %v", filePath, err)
+ return fuse.ENOENT
+ }
- request := &filer_pb.DeleteEntryRequest{
- Directory: dir.Path,
- Name: req.Name,
- IsDeleteData: false,
- }
+ // then, delete meta cache and fsNode cache
+ if err = dir.wfs.metaCache.DeleteEntry(context.Background(), filePath); err != nil {
+ glog.V(3).Infof("local DeleteEntry %s: %v", filePath, err)
+ return fuse.ESTALE
+ }
- glog.V(3).Infof("remove file: %v", request)
- _, err := client.DeleteEntry(ctx, request)
- if err != nil {
- glog.V(3).Infof("remove file %s/%s: %v", dir.Path, req.Name, err)
- return fuse.ENOENT
- }
+ // remove current file handle if any
+ dir.wfs.handlesLock.Lock()
+ defer dir.wfs.handlesLock.Unlock()
+ inodeId := filePath.AsInode()
+ delete(dir.wfs.handles, inodeId)
- return nil
- })
+ return nil
}
-func (dir *Dir) removeFolder(ctx context.Context, req *fuse.RemoveRequest) error {
-
- dir.wfs.listDirectoryEntriesCache.Delete(path.Join(dir.Path, req.Name))
+func (dir *Dir) removeFolder(req *fuse.RemoveRequest) error {
- return dir.wfs.WithFilerClient(ctx, func(client filer_pb.SeaweedFilerClient) error {
-
- request := &filer_pb.DeleteEntryRequest{
- Directory: dir.Path,
- Name: req.Name,
- IsDeleteData: true,
+ dirFullPath := dir.FullPath()
+ glog.V(3).Infof("remove directory entry: %v", req)
+ ignoreRecursiveErr := true // ignore recursion error since the OS should manage it
+ err := filer_pb.Remove(dir.wfs, dirFullPath, req.Name, true, true, ignoreRecursiveErr, false, []int32{dir.wfs.signature})
+ if err != nil {
+ glog.V(0).Infof("remove %s/%s: %v", dirFullPath, req.Name, err)
+ if strings.Contains(err.Error(), "non-empty") {
+ return fuse.EEXIST
}
+ return fuse.ENOENT
+ }
- glog.V(3).Infof("remove directory entry: %v", request)
- _, err := client.DeleteEntry(ctx, request)
- if err != nil {
- glog.V(3).Infof("remove %s/%s: %v", dir.Path, req.Name, err)
- return fuse.ENOENT
- }
+ t := util.NewFullPath(dirFullPath, req.Name)
+ dir.wfs.metaCache.DeleteEntry(context.Background(), t)
- return nil
- })
+ return nil
}
func (dir *Dir) Setattr(ctx context.Context, req *fuse.SetattrRequest, resp *fuse.SetattrResponse) error {
- if err := dir.maybeLoadEntry(ctx); err != nil {
+ glog.V(4).Infof("%v dir setattr %+v", dir.FullPath(), req)
+
+ entry, err := dir.maybeLoadEntry()
+ if err != nil {
return err
}
- glog.V(3).Infof("%v dir setattr %+v, fh=%d", dir.Path, req, req.Handle)
if req.Valid.Mode() {
- dir.entry.Attributes.FileMode = uint32(req.Mode)
+ entry.Attributes.FileMode = uint32(req.Mode)
}
if req.Valid.Uid() {
- dir.entry.Attributes.Uid = req.Uid
+ entry.Attributes.Uid = req.Uid
}
if req.Valid.Gid() {
- dir.entry.Attributes.Gid = req.Gid
+ entry.Attributes.Gid = req.Gid
}
if req.Valid.Mtime() {
- dir.entry.Attributes.Mtime = req.Mtime.Unix()
+ entry.Attributes.Mtime = req.Mtime.Unix()
}
- dir.wfs.listDirectoryEntriesCache.Delete(dir.Path)
-
- return dir.saveEntry(ctx)
+ return dir.saveEntry(entry)
}
func (dir *Dir) Setxattr(ctx context.Context, req *fuse.SetxattrRequest) error {
- glog.V(4).Infof("dir Setxattr %s: %s", dir.Path, req.Name)
+ glog.V(4).Infof("dir Setxattr %s: %s", dir.FullPath(), req.Name)
- if err := dir.maybeLoadEntry(ctx); err != nil {
+ entry, err := dir.maybeLoadEntry()
+ if err != nil {
return err
}
- if err := setxattr(dir.entry, req); err != nil {
+ if err := setxattr(entry, req); err != nil {
return err
}
- dir.wfs.listDirectoryEntriesCache.Delete(dir.Path)
-
- return dir.saveEntry(ctx)
+ return dir.saveEntry(entry)
}
func (dir *Dir) Removexattr(ctx context.Context, req *fuse.RemovexattrRequest) error {
- glog.V(4).Infof("dir Removexattr %s: %s", dir.Path, req.Name)
+ glog.V(4).Infof("dir Removexattr %s: %s", dir.FullPath(), req.Name)
- if err := dir.maybeLoadEntry(ctx); err != nil {
+ entry, err := dir.maybeLoadEntry()
+ if err != nil {
return err
}
- if err := removexattr(dir.entry, req); err != nil {
+ if err := removexattr(entry, req); err != nil {
return err
}
- dir.wfs.listDirectoryEntriesCache.Delete(dir.Path)
-
- return dir.saveEntry(ctx)
+ return dir.saveEntry(entry)
}
func (dir *Dir) Listxattr(ctx context.Context, req *fuse.ListxattrRequest, resp *fuse.ListxattrResponse) error {
- glog.V(4).Infof("dir Listxattr %s", dir.Path)
+ glog.V(4).Infof("dir Listxattr %s", dir.FullPath())
- if err := dir.maybeLoadEntry(ctx); err != nil {
+ entry, err := dir.maybeLoadEntry()
+ if err != nil {
return err
}
- if err := listxattr(dir.entry, req, resp); err != nil {
+ if err := listxattr(entry, req, resp); err != nil {
return err
}
@@ -383,39 +539,66 @@ func (dir *Dir) Listxattr(ctx context.Context, req *fuse.ListxattrRequest, resp
}
-func (dir *Dir) maybeLoadEntry(ctx context.Context) error {
- if dir.entry == nil {
- parentDirPath, name := filer2.FullPath(dir.Path).DirAndName()
- entry, err := dir.wfs.maybeLoadEntry(ctx, parentDirPath, name)
- if err != nil {
- return err
- }
- if entry == nil {
- return fuse.ENOENT
- }
- dir.entry = entry
- }
- return nil
+func (dir *Dir) Forget() {
+ glog.V(4).Infof("Forget dir %s", dir.FullPath())
}
-func (dir *Dir) saveEntry(ctx context.Context) error {
+func (dir *Dir) maybeLoadEntry() (*filer_pb.Entry, error) {
+ parentDirPath, name := util.FullPath(dir.FullPath()).DirAndName()
+ return dir.wfs.maybeLoadEntry(parentDirPath, name)
+}
+
+func (dir *Dir) saveEntry(entry *filer_pb.Entry) error {
- parentDir, name := filer2.FullPath(dir.Path).DirAndName()
+ parentDir, name := util.FullPath(dir.FullPath()).DirAndName()
- return dir.wfs.WithFilerClient(ctx, func(client filer_pb.SeaweedFilerClient) error {
+ return dir.wfs.WithFilerClient(func(client filer_pb.SeaweedFilerClient) error {
+
+ dir.wfs.mapPbIdFromLocalToFiler(entry)
+ defer dir.wfs.mapPbIdFromFilerToLocal(entry)
request := &filer_pb.UpdateEntryRequest{
- Directory: parentDir,
- Entry: dir.entry,
+ Directory: parentDir,
+ Entry: entry,
+ Signatures: []int32{dir.wfs.signature},
}
glog.V(1).Infof("save dir entry: %v", request)
- _, err := client.UpdateEntry(ctx, request)
+ _, err := client.UpdateEntry(context.Background(), request)
if err != nil {
- glog.V(0).Infof("UpdateEntry dir %s/%s: %v", parentDir, name, err)
+ glog.Errorf("UpdateEntry dir %s/%s: %v", parentDir, name, err)
return fuse.EIO
}
+ if err := dir.wfs.metaCache.UpdateEntry(context.Background(), filer.FromPbEntry(request.Directory, request.Entry)); err != nil {
+ glog.Errorf("UpdateEntry dir %s/%s: %v", parentDir, name, err)
+ return fuse.ESTALE
+ }
+
return nil
})
}
+
+func (dir *Dir) FullPath() string {
+ var parts []string
+ for p := dir; p != nil; p = p.parent {
+ if strings.HasPrefix(p.name, "/") {
+ if len(p.name) > 1 {
+ parts = append(parts, p.name[1:])
+ }
+ } else {
+ parts = append(parts, p.name)
+ }
+ }
+
+ if len(parts) == 0 {
+ return "/"
+ }
+
+ var buf bytes.Buffer
+ for i := len(parts) - 1; i >= 0; i-- {
+ buf.WriteString("/")
+ buf.WriteString(parts[i])
+ }
+ return buf.String()
+}
diff --git a/weed/filesys/dir_link.go b/weed/filesys/dir_link.go
index 8e60872d3..acdcd2de4 100644
--- a/weed/filesys/dir_link.go
+++ b/weed/filesys/dir_link.go
@@ -2,25 +2,110 @@ package filesys
import (
"context"
+ "github.com/chrislusf/seaweedfs/weed/util"
"os"
"syscall"
"time"
+ "github.com/chrislusf/seaweedfs/weed/filer"
"github.com/chrislusf/seaweedfs/weed/glog"
"github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
"github.com/seaweedfs/fuse"
"github.com/seaweedfs/fuse/fs"
)
+var _ = fs.NodeLinker(&Dir{})
var _ = fs.NodeSymlinker(&Dir{})
var _ = fs.NodeReadlinker(&File{})
+const (
+ HARD_LINK_MARKER = '\x01'
+)
+
+func (dir *Dir) Link(ctx context.Context, req *fuse.LinkRequest, old fs.Node) (fs.Node, error) {
+
+ oldFile, ok := old.(*File)
+ if !ok {
+ glog.Errorf("old node is not a file: %+v", old)
+ }
+
+ glog.V(4).Infof("Link: %v/%v -> %v/%v", oldFile.dir.FullPath(), oldFile.Name, dir.FullPath(), req.NewName)
+
+ oldEntry, err := oldFile.maybeLoadEntry(ctx)
+ if err != nil {
+ return nil, err
+ }
+
+ if oldEntry == nil {
+ return nil, fuse.EIO
+ }
+
+ // update old file to hardlink mode
+ if len(oldEntry.HardLinkId) == 0 {
+ oldEntry.HardLinkId = append(util.RandomBytes(16), HARD_LINK_MARKER)
+ oldEntry.HardLinkCounter = 1
+ }
+ oldEntry.HardLinkCounter++
+ updateOldEntryRequest := &filer_pb.UpdateEntryRequest{
+ Directory: oldFile.dir.FullPath(),
+ Entry: oldEntry,
+ Signatures: []int32{dir.wfs.signature},
+ }
+
+ // CreateLink 1.2 : update new file to hardlink mode
+ request := &filer_pb.CreateEntryRequest{
+ Directory: dir.FullPath(),
+ Entry: &filer_pb.Entry{
+ Name: req.NewName,
+ IsDirectory: false,
+ Attributes: oldEntry.Attributes,
+ Chunks: oldEntry.Chunks,
+ Extended: oldEntry.Extended,
+ HardLinkId: oldEntry.HardLinkId,
+ HardLinkCounter: oldEntry.HardLinkCounter,
+ },
+ Signatures: []int32{dir.wfs.signature},
+ }
+
+ // apply changes to the filer, and also apply to local metaCache
+ err = dir.wfs.WithFilerClient(func(client filer_pb.SeaweedFilerClient) error {
+
+ dir.wfs.mapPbIdFromLocalToFiler(request.Entry)
+ defer dir.wfs.mapPbIdFromFilerToLocal(request.Entry)
+
+ if err := filer_pb.UpdateEntry(client, updateOldEntryRequest); err != nil {
+ glog.V(0).Infof("Link %v/%v -> %s/%s: %v", oldFile.dir.FullPath(), oldFile.Name, dir.FullPath(), req.NewName, err)
+ return fuse.EIO
+ }
+ dir.wfs.metaCache.UpdateEntry(context.Background(), filer.FromPbEntry(updateOldEntryRequest.Directory, updateOldEntryRequest.Entry))
+
+ if err := filer_pb.CreateEntry(client, request); err != nil {
+ glog.V(0).Infof("Link %v/%v -> %s/%s: %v", oldFile.dir.FullPath(), oldFile.Name, dir.FullPath(), req.NewName, err)
+ return fuse.EIO
+ }
+ dir.wfs.metaCache.InsertEntry(context.Background(), filer.FromPbEntry(request.Directory, request.Entry))
+
+ return nil
+ })
+
+ if err != nil {
+ return nil, fuse.EIO
+ }
+
+ // create new file node
+ newNode := dir.newFile(req.NewName)
+ newFile := newNode.(*File)
+
+ return newFile, err
+
+}
+
func (dir *Dir) Symlink(ctx context.Context, req *fuse.SymlinkRequest) (fs.Node, error) {
- glog.V(3).Infof("Symlink: %v/%v to %v", dir.Path, req.NewName, req.Target)
+ glog.V(4).Infof("Symlink: %v/%v to %v", dir.FullPath(), req.NewName, req.Target)
request := &filer_pb.CreateEntryRequest{
- Directory: dir.Path,
+ Directory: dir.FullPath(),
Entry: &filer_pb.Entry{
Name: req.NewName,
IsDirectory: false,
@@ -33,17 +118,25 @@ func (dir *Dir) Symlink(ctx context.Context, req *fuse.SymlinkRequest) (fs.Node,
SymlinkTarget: req.Target,
},
},
+ Signatures: []int32{dir.wfs.signature},
}
- err := dir.wfs.WithFilerClient(ctx, func(client filer_pb.SeaweedFilerClient) error {
- if _, err := client.CreateEntry(ctx, request); err != nil {
- glog.V(0).Infof("symlink %s/%s: %v", dir.Path, req.NewName, err)
+ err := dir.wfs.WithFilerClient(func(client filer_pb.SeaweedFilerClient) error {
+
+ dir.wfs.mapPbIdFromLocalToFiler(request.Entry)
+ defer dir.wfs.mapPbIdFromFilerToLocal(request.Entry)
+
+ if err := filer_pb.CreateEntry(client, request); err != nil {
+ glog.V(0).Infof("symlink %s/%s: %v", dir.FullPath(), req.NewName, err)
return fuse.EIO
}
+
+ dir.wfs.metaCache.InsertEntry(context.Background(), filer.FromPbEntry(request.Directory, request.Entry))
+
return nil
})
- symlink := dir.newFile(req.NewName, request.Entry)
+ symlink := dir.newFile(req.NewName)
return symlink, err
@@ -51,16 +144,17 @@ func (dir *Dir) Symlink(ctx context.Context, req *fuse.SymlinkRequest) (fs.Node,
func (file *File) Readlink(ctx context.Context, req *fuse.ReadlinkRequest) (string, error) {
- if err := file.maybeLoadEntry(ctx); err != nil {
+ entry, err := file.maybeLoadEntry(ctx)
+ if err != nil {
return "", err
}
- if os.FileMode(file.entry.Attributes.FileMode)&os.ModeSymlink == 0 {
+ if os.FileMode(entry.Attributes.FileMode)&os.ModeSymlink == 0 {
return "", fuse.Errno(syscall.EINVAL)
}
- glog.V(3).Infof("Readlink: %v/%v => %v", file.dir.Path, file.Name, file.entry.Attributes.SymlinkTarget)
+ glog.V(4).Infof("Readlink: %v/%v => %v", file.dir.FullPath(), file.Name, entry.Attributes.SymlinkTarget)
- return file.entry.Attributes.SymlinkTarget, nil
+ return entry.Attributes.SymlinkTarget, nil
}
diff --git a/weed/filesys/dir_rename.go b/weed/filesys/dir_rename.go
index e72a15758..b07710d17 100644
--- a/weed/filesys/dir_rename.go
+++ b/weed/filesys/dir_rename.go
@@ -2,32 +2,90 @@ package filesys
import (
"context"
- "fmt"
- "github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
+
"github.com/seaweedfs/fuse"
"github.com/seaweedfs/fuse/fs"
+
+ "github.com/chrislusf/seaweedfs/weed/glog"
+ "github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
+ "github.com/chrislusf/seaweedfs/weed/util"
)
func (dir *Dir) Rename(ctx context.Context, req *fuse.RenameRequest, newDirectory fs.Node) error {
newDir := newDirectory.(*Dir)
- return dir.wfs.WithFilerClient(ctx, func(client filer_pb.SeaweedFilerClient) error {
+ newPath := util.NewFullPath(newDir.FullPath(), req.NewName)
+ oldPath := util.NewFullPath(dir.FullPath(), req.OldName)
+
+ glog.V(4).Infof("dir Rename %s => %s", oldPath, newPath)
+
+ // find local old entry
+ oldEntry, err := dir.wfs.metaCache.FindEntry(context.Background(), oldPath)
+ if err != nil {
+ glog.Errorf("dir Rename can not find source %s : %v", oldPath, err)
+ return fuse.ENOENT
+ }
+
+ // update remote filer
+ err = dir.wfs.WithFilerClient(func(client filer_pb.SeaweedFilerClient) error {
+ ctx, cancel := context.WithCancel(context.Background())
+ defer cancel()
request := &filer_pb.AtomicRenameEntryRequest{
- OldDirectory: dir.Path,
+ OldDirectory: dir.FullPath(),
OldName: req.OldName,
- NewDirectory: newDir.Path,
+ NewDirectory: newDir.FullPath(),
NewName: req.NewName,
}
_, err := client.AtomicRenameEntry(ctx, request)
if err != nil {
- return fmt.Errorf("renaming %s/%s => %s/%s: %v", dir.Path, req.OldName, newDir.Path, req.NewName, err)
+ glog.Errorf("dir AtomicRenameEntry %s => %s : %v", oldPath, newPath, err)
+ return fuse.EXDEV
}
return nil
})
+ if err != nil {
+ glog.V(0).Infof("dir Rename %s => %s : %v", oldPath, newPath, err)
+ return fuse.EIO
+ }
+
+ // TODO: replicate renaming logic on filer
+ if err := dir.wfs.metaCache.DeleteEntry(context.Background(), oldPath); err != nil {
+ glog.V(0).Infof("dir Rename delete local %s => %s : %v", oldPath, newPath, err)
+ return fuse.EIO
+ }
+ oldEntry.FullPath = newPath
+ if err := dir.wfs.metaCache.InsertEntry(context.Background(), oldEntry); err != nil {
+ glog.V(0).Infof("dir Rename insert local %s => %s : %v", oldPath, newPath, err)
+ return fuse.EIO
+ }
+
+ oldFsNode := NodeWithId(oldPath.AsInode())
+ newFsNode := NodeWithId(newPath.AsInode())
+ dir.wfs.Server.InvalidateInternalNode(oldFsNode, newFsNode, func(internalNode fs.Node) {
+ if file, ok := internalNode.(*File); ok {
+ glog.V(4).Infof("internal node %s", file.Name)
+ file.Name = req.NewName
+ file.id = uint64(newFsNode)
+ }
+ })
+
+ // change file handle
+ dir.wfs.handlesLock.Lock()
+ defer dir.wfs.handlesLock.Unlock()
+ inodeId := oldPath.AsInode()
+ existingHandle, found := dir.wfs.handles[inodeId]
+ glog.V(4).Infof("has open filehandle %s: %v", oldPath, found)
+ if !found || existingHandle == nil {
+ return nil
+ }
+ glog.V(4).Infof("opened filehandle %s => %s", oldPath, newPath)
+ delete(dir.wfs.handles, inodeId)
+ dir.wfs.handles[newPath.AsInode()] = existingHandle
+ return nil
}
diff --git a/weed/filesys/dirty_page.go b/weed/filesys/dirty_page.go
index 35d8f249a..8888cff96 100644
--- a/weed/filesys/dirty_page.go
+++ b/weed/filesys/dirty_page.go
@@ -2,214 +2,117 @@ package filesys
import (
"bytes"
- "context"
- "fmt"
+ "io"
"sync"
- "sync/atomic"
"time"
"github.com/chrislusf/seaweedfs/weed/glog"
- "github.com/chrislusf/seaweedfs/weed/operation"
"github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
- "github.com/chrislusf/seaweedfs/weed/security"
)
type ContinuousDirtyPages struct {
- hasData bool
- Offset int64
- Size int64
- Data []byte
- f *File
- lock sync.Mutex
+ intervals *ContinuousIntervals
+ f *File
+ writeWaitGroup sync.WaitGroup
+ chunkAddLock sync.Mutex
+ lastErr error
+ collection string
+ replication string
}
func newDirtyPages(file *File) *ContinuousDirtyPages {
- return &ContinuousDirtyPages{
- Data: nil,
- f: file,
+ dirtyPages := &ContinuousDirtyPages{
+ intervals: &ContinuousIntervals{},
+ f: file,
}
+ return dirtyPages
}
-func (pages *ContinuousDirtyPages) releaseResource() {
- if pages.Data != nil {
- pages.f.wfs.bufPool.Put(pages.Data)
- pages.Data = nil
- atomic.AddInt32(&counter, -1)
- glog.V(3).Infof("%s/%s releasing resource %d", pages.f.dir.Path, pages.f.Name, counter)
- }
-}
-
-var counter = int32(0)
+func (pages *ContinuousDirtyPages) AddPage(offset int64, data []byte) {
-func (pages *ContinuousDirtyPages) AddPage(ctx context.Context, offset int64, data []byte) (chunks []*filer_pb.FileChunk, err error) {
-
- pages.lock.Lock()
- defer pages.lock.Unlock()
-
- var chunk *filer_pb.FileChunk
+ glog.V(4).Infof("%s AddPage [%d,%d)", pages.f.fullpath(), offset, offset+int64(len(data)))
if len(data) > int(pages.f.wfs.option.ChunkSizeLimit) {
// this is more than what buffer can hold.
- return pages.flushAndSave(ctx, offset, data)
+ pages.flushAndSave(offset, data)
}
- if pages.Data == nil {
- pages.Data = pages.f.wfs.bufPool.Get().([]byte)
- atomic.AddInt32(&counter, 1)
- glog.V(3).Infof("%s/%s acquire resource %d", pages.f.dir.Path, pages.f.Name, counter)
- }
+ pages.intervals.AddInterval(data, offset)
- if offset < pages.Offset || offset >= pages.Offset+int64(len(pages.Data)) ||
- pages.Offset+int64(len(pages.Data)) < offset+int64(len(data)) {
- // if the data is out of range,
- // or buffer is full if adding new data,
- // flush current buffer and add new data
-
- // println("offset", offset, "size", len(data), "existing offset", pages.Offset, "size", pages.Size)
-
- if chunk, err = pages.saveExistingPagesToStorage(ctx); err == nil {
- if chunk != nil {
- glog.V(4).Infof("%s/%s add save [%d,%d)", pages.f.dir.Path, pages.f.Name, chunk.Offset, chunk.Offset+int64(chunk.Size))
- chunks = append(chunks, chunk)
- }
- } else {
- glog.V(0).Infof("%s/%s add save [%d,%d): %v", pages.f.dir.Path, pages.f.Name, chunk.Offset, chunk.Offset+int64(chunk.Size), err)
- return
- }
- pages.Offset = offset
- copy(pages.Data, data)
- pages.Size = int64(len(data))
- return
+ if pages.intervals.TotalSize() >= pages.f.wfs.option.ChunkSizeLimit {
+ pages.saveExistingLargestPageToStorage()
}
- if offset != pages.Offset+pages.Size {
- // when this happens, debug shows the data overlapping with existing data is empty
- // the data is not just append
- if offset == pages.Offset && int(pages.Size) < len(data) {
- // glog.V(2).Infof("pages[%d,%d) pages.Data len=%v, data len=%d, pages.Size=%d", pages.Offset, pages.Offset+pages.Size, len(pages.Data), len(data), pages.Size)
- copy(pages.Data[pages.Size:], data[pages.Size:])
- } else {
- if pages.Size != 0 {
- glog.V(1).Infof("%s/%s add page: pages [%d, %d) write [%d, %d)", pages.f.dir.Path, pages.f.Name, pages.Offset, pages.Offset+pages.Size, offset, offset+int64(len(data)))
- }
- return pages.flushAndSave(ctx, offset, data)
- }
- } else {
- copy(pages.Data[offset-pages.Offset:], data)
- }
-
- pages.Size = max(pages.Size, offset+int64(len(data))-pages.Offset)
-
return
}
-func (pages *ContinuousDirtyPages) flushAndSave(ctx context.Context, offset int64, data []byte) (chunks []*filer_pb.FileChunk, err error) {
-
- var chunk *filer_pb.FileChunk
+func (pages *ContinuousDirtyPages) flushAndSave(offset int64, data []byte) {
// flush existing
- if chunk, err = pages.saveExistingPagesToStorage(ctx); err == nil {
- if chunk != nil {
- glog.V(4).Infof("%s/%s flush existing [%d,%d) to %s", pages.f.dir.Path, pages.f.Name, chunk.Offset, chunk.Offset+int64(chunk.Size), chunk.FileId)
- chunks = append(chunks, chunk)
- }
- } else {
- glog.V(0).Infof("%s/%s failed to flush1 [%d,%d): %v", pages.f.dir.Path, pages.f.Name, chunk.Offset, chunk.Offset+int64(chunk.Size), err)
- return
- }
- pages.Size = 0
- pages.Offset = 0
+ pages.saveExistingPagesToStorage()
// flush the new page
- if chunk, err = pages.saveToStorage(ctx, data, offset); err == nil {
- if chunk != nil {
- glog.V(4).Infof("%s/%s flush big request [%d,%d) to %s", pages.f.dir.Path, pages.f.Name, chunk.Offset, chunk.Offset+int64(chunk.Size), chunk.FileId)
- chunks = append(chunks, chunk)
- }
- } else {
- glog.V(0).Infof("%s/%s failed to flush2 [%d,%d): %v", pages.f.dir.Path, pages.f.Name, chunk.Offset, chunk.Offset+int64(chunk.Size), err)
- return
- }
+ pages.saveToStorage(bytes.NewReader(data), offset, int64(len(data)))
return
}
-func (pages *ContinuousDirtyPages) FlushToStorage(ctx context.Context) (chunk *filer_pb.FileChunk, err error) {
+func (pages *ContinuousDirtyPages) saveExistingPagesToStorage() {
+ for pages.saveExistingLargestPageToStorage() {
+ }
+}
- pages.lock.Lock()
- defer pages.lock.Unlock()
+func (pages *ContinuousDirtyPages) saveExistingLargestPageToStorage() (hasSavedData bool) {
- if pages.Size == 0 {
- return nil, nil
+ maxList := pages.intervals.RemoveLargestIntervalLinkedList()
+ if maxList == nil {
+ return false
}
- if chunk, err = pages.saveExistingPagesToStorage(ctx); err == nil {
- pages.Size = 0
- pages.Offset = 0
- if chunk != nil {
- glog.V(4).Infof("%s/%s flush [%d,%d)", pages.f.dir.Path, pages.f.Name, chunk.Offset, chunk.Offset+int64(chunk.Size))
- }
+ entry := pages.f.getEntry()
+ if entry == nil {
+ return false
}
- return
-}
-func (pages *ContinuousDirtyPages) saveExistingPagesToStorage(ctx context.Context) (*filer_pb.FileChunk, error) {
+ fileSize := int64(entry.Attributes.FileSize)
- if pages.Size == 0 {
- return nil, nil
+ chunkSize := min(maxList.Size(), fileSize-maxList.Offset())
+ if chunkSize == 0 {
+ return false
}
- return pages.saveToStorage(ctx, pages.Data[:pages.Size], pages.Offset)
-}
-
-func (pages *ContinuousDirtyPages) saveToStorage(ctx context.Context, buf []byte, offset int64) (*filer_pb.FileChunk, error) {
+ pages.saveToStorage(maxList.ToReader(), maxList.Offset(), chunkSize)
- var fileId, host string
- var auth security.EncodedJwt
+ return true
+}
- if err := pages.f.wfs.WithFilerClient(ctx, func(client filer_pb.SeaweedFilerClient) error {
+func (pages *ContinuousDirtyPages) saveToStorage(reader io.Reader, offset int64, size int64) {
- request := &filer_pb.AssignVolumeRequest{
- Count: 1,
- Replication: pages.f.wfs.option.Replication,
- Collection: pages.f.wfs.option.Collection,
- TtlSec: pages.f.wfs.option.TtlSec,
- DataCenter: pages.f.wfs.option.DataCenter,
- }
+ mtime := time.Now().UnixNano()
+ pages.writeWaitGroup.Add(1)
+ writer := func() {
+ defer pages.writeWaitGroup.Done()
- resp, err := client.AssignVolume(ctx, request)
+ reader = io.LimitReader(reader, size)
+ chunk, collection, replication, err := pages.f.wfs.saveDataAsChunk(pages.f.fullpath())(reader, pages.f.Name, offset)
if err != nil {
- glog.V(0).Infof("assign volume failure %v: %v", request, err)
- return err
+ glog.V(0).Infof("%s saveToStorage [%d,%d): %v", pages.f.fullpath(), offset, offset+size, err)
+ pages.lastErr = err
+ return
}
-
- fileId, host, auth = resp.FileId, resp.Url, security.EncodedJwt(resp.Auth)
-
- return nil
- }); err != nil {
- return nil, fmt.Errorf("filerGrpcAddress assign volume: %v", err)
+ chunk.Mtime = mtime
+ pages.collection, pages.replication = collection, replication
+ pages.chunkAddLock.Lock()
+ defer pages.chunkAddLock.Unlock()
+ pages.f.addChunks([]*filer_pb.FileChunk{chunk})
+ glog.V(3).Infof("%s saveToStorage [%d,%d)", pages.f.fullpath(), offset, offset+size)
}
- fileUrl := fmt.Sprintf("http://%s/%s", host, fileId)
- bufReader := bytes.NewReader(buf)
- uploadResult, err := operation.Upload(fileUrl, pages.f.Name, bufReader, false, "", nil, auth)
- if err != nil {
- glog.V(0).Infof("upload data %v to %s: %v", pages.f.Name, fileUrl, err)
- return nil, fmt.Errorf("upload data: %v", err)
- }
- if uploadResult.Error != "" {
- glog.V(0).Infof("upload failure %v to %s: %v", pages.f.Name, fileUrl, err)
- return nil, fmt.Errorf("upload result: %v", uploadResult.Error)
+ if pages.f.wfs.concurrentWriters != nil {
+ pages.f.wfs.concurrentWriters.Execute(writer)
+ } else {
+ go writer()
}
-
- return &filer_pb.FileChunk{
- FileId: fileId,
- Offset: offset,
- Size: uint64(len(buf)),
- Mtime: time.Now().UnixNano(),
- ETag: uploadResult.ETag,
- }, nil
-
}
func max(x, y int64) int64 {
@@ -218,3 +121,13 @@ func max(x, y int64) int64 {
}
return y
}
+func min(x, y int64) int64 {
+ if x < y {
+ return x
+ }
+ return y
+}
+
+func (pages *ContinuousDirtyPages) ReadDirtyDataAt(data []byte, startOffset int64) (maxStop int64) {
+ return pages.intervals.ReadDataAt(data, startOffset)
+}
diff --git a/weed/filesys/dirty_page_interval.go b/weed/filesys/dirty_page_interval.go
new file mode 100644
index 000000000..1404bf78c
--- /dev/null
+++ b/weed/filesys/dirty_page_interval.go
@@ -0,0 +1,223 @@
+package filesys
+
+import (
+ "bytes"
+ "io"
+
+ "github.com/chrislusf/seaweedfs/weed/util"
+)
+
+type IntervalNode struct {
+ Data []byte
+ Offset int64
+ Size int64
+ Next *IntervalNode
+}
+
+type IntervalLinkedList struct {
+ Head *IntervalNode
+ Tail *IntervalNode
+}
+
+type ContinuousIntervals struct {
+ lists []*IntervalLinkedList
+}
+
+func (list *IntervalLinkedList) Offset() int64 {
+ return list.Head.Offset
+}
+func (list *IntervalLinkedList) Size() int64 {
+ return list.Tail.Offset + list.Tail.Size - list.Head.Offset
+}
+func (list *IntervalLinkedList) addNodeToTail(node *IntervalNode) {
+ // glog.V(4).Infof("add to tail [%d,%d) + [%d,%d) => [%d,%d)", list.Head.Offset, list.Tail.Offset+list.Tail.Size, node.Offset, node.Offset+node.Size, list.Head.Offset, node.Offset+node.Size)
+ list.Tail.Next = node
+ list.Tail = node
+}
+func (list *IntervalLinkedList) addNodeToHead(node *IntervalNode) {
+ // glog.V(4).Infof("add to head [%d,%d) + [%d,%d) => [%d,%d)", node.Offset, node.Offset+node.Size, list.Head.Offset, list.Tail.Offset+list.Tail.Size, node.Offset, list.Tail.Offset+list.Tail.Size)
+ node.Next = list.Head
+ list.Head = node
+}
+
+func (list *IntervalLinkedList) ReadData(buf []byte, start, stop int64) {
+ t := list.Head
+ for {
+
+ nodeStart, nodeStop := max(start, t.Offset), min(stop, t.Offset+t.Size)
+ if nodeStart < nodeStop {
+ // glog.V(0).Infof("copying start=%d stop=%d t=[%d,%d) t.data=%d => bufSize=%d nodeStart=%d, nodeStop=%d", start, stop, t.Offset, t.Offset+t.Size, len(t.Data), len(buf), nodeStart, nodeStop)
+ copy(buf[nodeStart-start:], t.Data[nodeStart-t.Offset:nodeStop-t.Offset])
+ }
+
+ if t.Next == nil {
+ break
+ }
+ t = t.Next
+ }
+}
+
+func (c *ContinuousIntervals) TotalSize() (total int64) {
+ for _, list := range c.lists {
+ total += list.Size()
+ }
+ return
+}
+
+func subList(list *IntervalLinkedList, start, stop int64) *IntervalLinkedList {
+ var nodes []*IntervalNode
+ for t := list.Head; t != nil; t = t.Next {
+ nodeStart, nodeStop := max(start, t.Offset), min(stop, t.Offset+t.Size)
+ if nodeStart >= nodeStop {
+ // skip non overlapping IntervalNode
+ continue
+ }
+ nodes = append(nodes, &IntervalNode{
+ Data: t.Data[nodeStart-t.Offset : nodeStop-t.Offset],
+ Offset: nodeStart,
+ Size: nodeStop - nodeStart,
+ Next: nil,
+ })
+ }
+ for i := 1; i < len(nodes); i++ {
+ nodes[i-1].Next = nodes[i]
+ }
+ return &IntervalLinkedList{
+ Head: nodes[0],
+ Tail: nodes[len(nodes)-1],
+ }
+}
+
+func (c *ContinuousIntervals) AddInterval(data []byte, offset int64) {
+
+ interval := &IntervalNode{Data: data, Offset: offset, Size: int64(len(data))}
+
+ // append to the tail and return
+ if len(c.lists) == 1 {
+ lastSpan := c.lists[0]
+ if lastSpan.Tail.Offset+lastSpan.Tail.Size == offset {
+ lastSpan.addNodeToTail(interval)
+ return
+ }
+ }
+
+ var newLists []*IntervalLinkedList
+ for _, list := range c.lists {
+ // if list is to the left of new interval, add to the new list
+ if list.Tail.Offset+list.Tail.Size <= interval.Offset {
+ newLists = append(newLists, list)
+ }
+ // if list is to the right of new interval, add to the new list
+ if interval.Offset+interval.Size <= list.Head.Offset {
+ newLists = append(newLists, list)
+ }
+ // if new interval overwrite the right part of the list
+ if list.Head.Offset < interval.Offset && interval.Offset < list.Tail.Offset+list.Tail.Size {
+ // create a new list of the left part of existing list
+ newLists = append(newLists, subList(list, list.Offset(), interval.Offset))
+ }
+ // if new interval overwrite the left part of the list
+ if list.Head.Offset < interval.Offset+interval.Size && interval.Offset+interval.Size < list.Tail.Offset+list.Tail.Size {
+ // create a new list of the right part of existing list
+ newLists = append(newLists, subList(list, interval.Offset+interval.Size, list.Tail.Offset+list.Tail.Size))
+ }
+ // skip anything that is fully overwritten by the new interval
+ }
+
+ c.lists = newLists
+ // add the new interval to the lists, connecting neighbor lists
+ var prevList, nextList *IntervalLinkedList
+
+ for _, list := range c.lists {
+ if list.Head.Offset == interval.Offset+interval.Size {
+ nextList = list
+ break
+ }
+ }
+
+ for _, list := range c.lists {
+ if list.Head.Offset+list.Size() == offset {
+ list.addNodeToTail(interval)
+ prevList = list
+ break
+ }
+ }
+
+ if prevList != nil && nextList != nil {
+ // glog.V(4).Infof("connecting [%d,%d) + [%d,%d) => [%d,%d)", prevList.Head.Offset, prevList.Tail.Offset+prevList.Tail.Size, nextList.Head.Offset, nextList.Tail.Offset+nextList.Tail.Size, prevList.Head.Offset, nextList.Tail.Offset+nextList.Tail.Size)
+ prevList.Tail.Next = nextList.Head
+ prevList.Tail = nextList.Tail
+ c.removeList(nextList)
+ } else if nextList != nil {
+ // add to head was not done when checking
+ nextList.addNodeToHead(interval)
+ }
+ if prevList == nil && nextList == nil {
+ c.lists = append(c.lists, &IntervalLinkedList{
+ Head: interval,
+ Tail: interval,
+ })
+ }
+
+ return
+}
+
+func (c *ContinuousIntervals) RemoveLargestIntervalLinkedList() *IntervalLinkedList {
+ var maxSize int64
+ maxIndex := -1
+ for k, list := range c.lists {
+ if maxSize <= list.Size() {
+ maxSize = list.Size()
+ maxIndex = k
+ }
+ }
+ if maxSize <= 0 {
+ return nil
+ }
+
+ t := c.lists[maxIndex]
+ c.lists = append(c.lists[0:maxIndex], c.lists[maxIndex+1:]...)
+ return t
+
+}
+
+func (c *ContinuousIntervals) removeList(target *IntervalLinkedList) {
+ index := -1
+ for k, list := range c.lists {
+ if list.Offset() == target.Offset() {
+ index = k
+ }
+ }
+ if index < 0 {
+ return
+ }
+
+ c.lists = append(c.lists[0:index], c.lists[index+1:]...)
+
+}
+
+func (c *ContinuousIntervals) ReadDataAt(data []byte, startOffset int64) (maxStop int64) {
+ for _, list := range c.lists {
+ start := max(startOffset, list.Offset())
+ stop := min(startOffset+int64(len(data)), list.Offset()+list.Size())
+ if start < stop {
+ list.ReadData(data[start-startOffset:], start, stop)
+ maxStop = max(maxStop, stop)
+ }
+ }
+ return
+}
+
+func (l *IntervalLinkedList) ToReader() io.Reader {
+ var readers []io.Reader
+ t := l.Head
+ readers = append(readers, util.NewBytesReader(t.Data))
+ for t.Next != nil {
+ t = t.Next
+ readers = append(readers, bytes.NewReader(t.Data))
+ }
+ if len(readers) == 1 {
+ return readers[0]
+ }
+ return io.MultiReader(readers...)
+}
diff --git a/weed/filesys/dirty_page_interval_test.go b/weed/filesys/dirty_page_interval_test.go
new file mode 100644
index 000000000..d02ad27fd
--- /dev/null
+++ b/weed/filesys/dirty_page_interval_test.go
@@ -0,0 +1,113 @@
+package filesys
+
+import (
+ "bytes"
+ "math/rand"
+ "testing"
+)
+
+func TestContinuousIntervals_AddIntervalAppend(t *testing.T) {
+
+ c := &ContinuousIntervals{}
+
+ // 25, 25, 25
+ c.AddInterval(getBytes(25, 3), 0)
+ // _, _, 23, 23, 23, 23
+ c.AddInterval(getBytes(23, 4), 2)
+
+ expectedData(t, c, 0, 25, 25, 23, 23, 23, 23)
+
+}
+
+func TestContinuousIntervals_AddIntervalInnerOverwrite(t *testing.T) {
+
+ c := &ContinuousIntervals{}
+
+ // 25, 25, 25, 25, 25
+ c.AddInterval(getBytes(25, 5), 0)
+ // _, _, 23, 23
+ c.AddInterval(getBytes(23, 2), 2)
+
+ expectedData(t, c, 0, 25, 25, 23, 23, 25)
+
+}
+
+func TestContinuousIntervals_AddIntervalFullOverwrite(t *testing.T) {
+
+ c := &ContinuousIntervals{}
+
+ // 1,
+ c.AddInterval(getBytes(1, 1), 0)
+ // _, 2,
+ c.AddInterval(getBytes(2, 1), 1)
+ // _, _, 3, 3, 3
+ c.AddInterval(getBytes(3, 3), 2)
+ // _, _, _, 4, 4, 4
+ c.AddInterval(getBytes(4, 3), 3)
+
+ expectedData(t, c, 0, 1, 2, 3, 4, 4, 4)
+
+}
+
+func TestContinuousIntervals_RealCase1(t *testing.T) {
+
+ c := &ContinuousIntervals{}
+
+ // 25,
+ c.AddInterval(getBytes(25, 1), 0)
+ // _, _, _, _, 23, 23
+ c.AddInterval(getBytes(23, 2), 4)
+ // _, _, _, 24, 24, 24, 24
+ c.AddInterval(getBytes(24, 4), 3)
+
+ // _, 22, 22
+ c.AddInterval(getBytes(22, 2), 1)
+
+ expectedData(t, c, 0, 25, 22, 22, 24, 24, 24, 24)
+
+}
+
+func TestRandomWrites(t *testing.T) {
+
+ c := &ContinuousIntervals{}
+
+ data := make([]byte, 1024)
+
+ for i := 0; i < 1024; i++ {
+
+ start, stop := rand.Intn(len(data)), rand.Intn(len(data))
+ if start > stop {
+ start, stop = stop, start
+ }
+
+ rand.Read(data[start : stop+1])
+
+ c.AddInterval(data[start:stop+1], int64(start))
+
+ expectedData(t, c, 0, data...)
+
+ }
+
+}
+
+func expectedData(t *testing.T, c *ContinuousIntervals, offset int, data ...byte) {
+ start, stop := int64(offset), int64(offset+len(data))
+ for _, list := range c.lists {
+ nodeStart, nodeStop := max(start, list.Head.Offset), min(stop, list.Head.Offset+list.Size())
+ if nodeStart < nodeStop {
+ buf := make([]byte, nodeStop-nodeStart)
+ list.ReadData(buf, nodeStart, nodeStop)
+ if bytes.Compare(buf, data[nodeStart-start:nodeStop-start]) != 0 {
+ t.Errorf("expected %v actual %v", data[nodeStart-start:nodeStop-start], buf)
+ }
+ }
+ }
+}
+
+func getBytes(content byte, length int) []byte {
+ data := make([]byte, length)
+ for i := 0; i < length; i++ {
+ data[i] = content
+ }
+ return data
+}
diff --git a/weed/filesys/file.go b/weed/filesys/file.go
index afe78ee0f..bb57988cd 100644
--- a/weed/filesys/file.go
+++ b/weed/filesys/file.go
@@ -3,20 +3,22 @@ package filesys
import (
"context"
"os"
- "path/filepath"
"sort"
"time"
- "github.com/chrislusf/seaweedfs/weed/filer2"
- "github.com/chrislusf/seaweedfs/weed/glog"
- "github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
"github.com/seaweedfs/fuse"
"github.com/seaweedfs/fuse/fs"
+
+ "github.com/chrislusf/seaweedfs/weed/filer"
+ "github.com/chrislusf/seaweedfs/weed/glog"
+ "github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
+ "github.com/chrislusf/seaweedfs/weed/util"
)
const blockSize = 512
var _ = fs.Node(&File{})
+var _ = fs.NodeIdentifier(&File{})
var _ = fs.NodeOpener(&File{})
var _ = fs.NodeFsyncer(&File{})
var _ = fs.NodeSetattrer(&File{})
@@ -24,35 +26,56 @@ var _ = fs.NodeGetxattrer(&File{})
var _ = fs.NodeSetxattrer(&File{})
var _ = fs.NodeRemovexattrer(&File{})
var _ = fs.NodeListxattrer(&File{})
+var _ = fs.NodeForgetter(&File{})
type File struct {
- Name string
- dir *Dir
- wfs *WFS
- entry *filer_pb.Entry
- entryViewCache []filer2.VisibleInterval
- isOpen bool
+ Name string
+ dir *Dir
+ wfs *WFS
+ entry *filer_pb.Entry
+ isOpen int
+ dirtyMetadata bool
+ id uint64
+}
+
+func (file *File) fullpath() util.FullPath {
+ return util.NewFullPath(file.dir.FullPath(), file.Name)
}
-func (file *File) fullpath() string {
- return filepath.Join(file.dir.Path, file.Name)
+func (file *File) Id() uint64 {
+ return file.id
}
-func (file *File) Attr(ctx context.Context, attr *fuse.Attr) error {
+func (file *File) Attr(ctx context.Context, attr *fuse.Attr) (err error) {
- glog.V(4).Infof("file Attr %s", file.fullpath())
+ glog.V(4).Infof("file Attr %s, open:%v existing:%v", file.fullpath(), file.isOpen, attr)
- if err := file.maybeLoadEntry(ctx); err != nil {
+ entry, err := file.maybeLoadEntry(ctx)
+ if err != nil {
return err
}
- attr.Mode = os.FileMode(file.entry.Attributes.FileMode)
- attr.Size = filer2.TotalSize(file.entry.Chunks)
- attr.Mtime = time.Unix(file.entry.Attributes.Mtime, 0)
- attr.Gid = file.entry.Attributes.Gid
- attr.Uid = file.entry.Attributes.Uid
+ if entry == nil {
+ return fuse.ENOENT
+ }
+
+ attr.Inode = file.Id()
+ attr.Valid = time.Second
+ attr.Mode = os.FileMode(entry.Attributes.FileMode)
+ attr.Size = filer.FileSize(entry)
+ if file.isOpen > 0 {
+ attr.Size = entry.Attributes.FileSize
+ glog.V(4).Infof("file Attr %s, open:%v, size: %d", file.fullpath(), file.isOpen, attr.Size)
+ }
+ attr.Crtime = time.Unix(entry.Attributes.Crtime, 0)
+ attr.Mtime = time.Unix(entry.Attributes.Mtime, 0)
+ attr.Gid = entry.Attributes.Gid
+ attr.Uid = entry.Attributes.Uid
attr.Blocks = attr.Size/blockSize + 1
attr.BlockSize = uint32(file.wfs.option.ChunkSizeLimit)
+ if entry.HardLinkCounter > 0 {
+ attr.Nlink = uint32(entry.HardLinkCounter)
+ }
return nil
@@ -62,24 +85,23 @@ func (file *File) Getxattr(ctx context.Context, req *fuse.GetxattrRequest, resp
glog.V(4).Infof("file Getxattr %s", file.fullpath())
- if err := file.maybeLoadEntry(ctx); err != nil {
+ entry, err := file.maybeLoadEntry(ctx)
+ if err != nil {
return err
}
- return getxattr(file.entry, req, resp)
+ return getxattr(entry, req, resp)
}
func (file *File) Open(ctx context.Context, req *fuse.OpenRequest, resp *fuse.OpenResponse) (fs.Handle, error) {
glog.V(4).Infof("file %v open %+v", file.fullpath(), req)
- file.isOpen = true
-
handle := file.wfs.AcquireHandle(file, req.Uid, req.Gid)
resp.Handle = fuse.HandleID(handle.handle)
- glog.V(3).Infof("%v file open handle id = %d", file.fullpath(), handle.handle)
+ glog.V(4).Infof("%v file open handle id = %d", file.fullpath(), handle.handle)
return handle, nil
@@ -87,48 +109,89 @@ func (file *File) Open(ctx context.Context, req *fuse.OpenRequest, resp *fuse.Op
func (file *File) Setattr(ctx context.Context, req *fuse.SetattrRequest, resp *fuse.SetattrResponse) error {
- if err := file.maybeLoadEntry(ctx); err != nil {
+ glog.V(4).Infof("%v file setattr %+v", file.fullpath(), req)
+
+ entry, err := file.maybeLoadEntry(ctx)
+ if err != nil {
return err
}
+ if file.isOpen > 0 {
+ file.wfs.handlesLock.Lock()
+ fileHandle := file.wfs.handles[file.Id()]
+ file.wfs.handlesLock.Unlock()
+
+ if fileHandle != nil {
+ fileHandle.Lock()
+ defer fileHandle.Unlock()
+ }
+ }
- glog.V(3).Infof("%v file setattr %+v, old:%+v", file.fullpath(), req, file.entry.Attributes)
if req.Valid.Size() {
- glog.V(3).Infof("%v file setattr set size=%v", file.fullpath(), req.Size)
- if req.Size == 0 {
+ glog.V(4).Infof("%v file setattr set size=%v chunks=%d", file.fullpath(), req.Size, len(entry.Chunks))
+ if req.Size < filer.FileSize(entry) {
// fmt.Printf("truncate %v \n", fullPath)
- file.entry.Chunks = nil
- file.entryViewCache = nil
+ var chunks []*filer_pb.FileChunk
+ var truncatedChunks []*filer_pb.FileChunk
+ for _, chunk := range entry.Chunks {
+ int64Size := int64(chunk.Size)
+ if chunk.Offset+int64Size > int64(req.Size) {
+ // this chunk is truncated
+ int64Size = int64(req.Size) - chunk.Offset
+ if int64Size > 0 {
+ chunks = append(chunks, chunk)
+ glog.V(4).Infof("truncated chunk %+v from %d to %d\n", chunk.GetFileIdString(), chunk.Size, int64Size)
+ chunk.Size = uint64(int64Size)
+ } else {
+ glog.V(4).Infof("truncated whole chunk %+v\n", chunk.GetFileIdString())
+ truncatedChunks = append(truncatedChunks, chunk)
+ }
+ }
+ }
+ entry.Chunks = chunks
}
- file.entry.Attributes.FileSize = req.Size
+ entry.Attributes.FileSize = req.Size
+ file.dirtyMetadata = true
}
+
if req.Valid.Mode() {
- file.entry.Attributes.FileMode = uint32(req.Mode)
+ entry.Attributes.FileMode = uint32(req.Mode)
+ file.dirtyMetadata = true
}
if req.Valid.Uid() {
- file.entry.Attributes.Uid = req.Uid
+ entry.Attributes.Uid = req.Uid
+ file.dirtyMetadata = true
}
if req.Valid.Gid() {
- file.entry.Attributes.Gid = req.Gid
+ entry.Attributes.Gid = req.Gid
+ file.dirtyMetadata = true
}
if req.Valid.Crtime() {
- file.entry.Attributes.Crtime = req.Crtime.Unix()
+ entry.Attributes.Crtime = req.Crtime.Unix()
+ file.dirtyMetadata = true
}
if req.Valid.Mtime() {
- file.entry.Attributes.Mtime = req.Mtime.Unix()
+ entry.Attributes.Mtime = req.Mtime.Unix()
+ file.dirtyMetadata = true
+ }
+
+ if req.Valid.Handle() {
+ // fmt.Printf("file handle => %d\n", req.Handle)
}
- if file.isOpen {
+ if file.isOpen > 0 {
return nil
}
- file.wfs.listDirectoryEntriesCache.Delete(file.fullpath())
+ if !file.dirtyMetadata {
+ return nil
+ }
- return file.saveEntry(ctx)
+ return file.saveEntry(entry)
}
@@ -136,17 +199,16 @@ func (file *File) Setxattr(ctx context.Context, req *fuse.SetxattrRequest) error
glog.V(4).Infof("file Setxattr %s: %s", file.fullpath(), req.Name)
- if err := file.maybeLoadEntry(ctx); err != nil {
+ entry, err := file.maybeLoadEntry(ctx)
+ if err != nil {
return err
}
- if err := setxattr(file.entry, req); err != nil {
+ if err := setxattr(entry, req); err != nil {
return err
}
- file.wfs.listDirectoryEntriesCache.Delete(file.fullpath())
-
- return file.saveEntry(ctx)
+ return file.saveEntry(entry)
}
@@ -154,17 +216,16 @@ func (file *File) Removexattr(ctx context.Context, req *fuse.RemovexattrRequest)
glog.V(4).Infof("file Removexattr %s: %s", file.fullpath(), req.Name)
- if err := file.maybeLoadEntry(ctx); err != nil {
+ entry, err := file.maybeLoadEntry(ctx)
+ if err != nil {
return err
}
- if err := removexattr(file.entry, req); err != nil {
+ if err := removexattr(entry, req); err != nil {
return err
}
- file.wfs.listDirectoryEntriesCache.Delete(file.fullpath())
-
- return file.saveEntry(ctx)
+ return file.saveEntry(entry)
}
@@ -172,11 +233,12 @@ func (file *File) Listxattr(ctx context.Context, req *fuse.ListxattrRequest, res
glog.V(4).Infof("file Listxattr %s", file.fullpath())
- if err := file.maybeLoadEntry(ctx); err != nil {
+ entry, err := file.maybeLoadEntry(ctx)
+ if err != nil {
return err
}
- if err := listxattr(file.entry, req, resp); err != nil {
+ if err := listxattr(entry, req, resp); err != nil {
return err
}
@@ -187,69 +249,112 @@ func (file *File) Listxattr(ctx context.Context, req *fuse.ListxattrRequest, res
func (file *File) Fsync(ctx context.Context, req *fuse.FsyncRequest) error {
// fsync works at OS level
// write the file chunks to the filerGrpcAddress
- glog.V(3).Infof("%s/%s fsync file %+v", file.dir.Path, file.Name, req)
+ glog.V(4).Infof("%s/%s fsync file %+v", file.dir.FullPath(), file.Name, req)
return nil
}
-func (file *File) maybeLoadEntry(ctx context.Context) error {
- if file.entry == nil || !file.isOpen {
- entry, err := file.wfs.maybeLoadEntry(ctx, file.dir.Path, file.Name)
- if err != nil {
- return err
- }
- if entry != nil {
- file.setEntry(entry)
+func (file *File) Forget() {
+ t := util.NewFullPath(file.dir.FullPath(), file.Name)
+ glog.V(4).Infof("Forget file %s", t)
+ file.wfs.ReleaseHandle(t, fuse.HandleID(t.AsInode()))
+}
+
+func (file *File) maybeLoadEntry(ctx context.Context) (entry *filer_pb.Entry, err error) {
+
+ file.wfs.handlesLock.Lock()
+ handle, found := file.wfs.handles[file.Id()]
+ file.wfs.handlesLock.Unlock()
+ entry = file.entry
+ if found {
+ glog.V(4).Infof("maybeLoadEntry found opened file %s/%s: %v %v", file.dir.FullPath(), file.Name, handle.f.entry, entry)
+ entry = handle.f.entry
+ }
+
+ if entry != nil {
+ if len(entry.HardLinkId) == 0 {
+ // only always reload hard link
+ return entry, nil
}
}
- return nil
+ entry, err = file.wfs.maybeLoadEntry(file.dir.FullPath(), file.Name)
+ if err != nil {
+ glog.V(3).Infof("maybeLoadEntry file %s/%s: %v", file.dir.FullPath(), file.Name, err)
+ return entry, err
+ }
+ if entry != nil {
+ // file.entry = entry
+ } else {
+ glog.Warningf("maybeLoadEntry not found entry %s/%s: %v", file.dir.FullPath(), file.Name, err)
+ }
+ return entry, nil
}
-func (file *File) addChunk(chunk *filer_pb.FileChunk) {
- if chunk != nil {
- file.addChunks([]*filer_pb.FileChunk{chunk})
+func lessThan(a, b *filer_pb.FileChunk) bool {
+ if a.Mtime == b.Mtime {
+ return a.Fid.FileKey < b.Fid.FileKey
}
+ return a.Mtime < b.Mtime
}
func (file *File) addChunks(chunks []*filer_pb.FileChunk) {
- sort.Slice(chunks, func(i, j int) bool {
- return chunks[i].Mtime < chunks[j].Mtime
- })
+ // find the earliest incoming chunk
+ newChunks := chunks
+ earliestChunk := newChunks[0]
+ for i := 1; i < len(newChunks); i++ {
+ if lessThan(earliestChunk, newChunks[i]) {
+ earliestChunk = newChunks[i]
+ }
+ }
- var newVisibles []filer2.VisibleInterval
- for _, chunk := range chunks {
- newVisibles = filer2.MergeIntoVisibles(file.entryViewCache, newVisibles, chunk)
- t := file.entryViewCache[:0]
- file.entryViewCache = newVisibles
- newVisibles = t
+ entry := file.getEntry()
+ if entry == nil {
+ return
}
- glog.V(3).Infof("%s existing %d chunks adds %d more", file.fullpath(), len(file.entry.Chunks), len(chunks))
+ // pick out-of-order chunks from existing chunks
+ for _, chunk := range entry.Chunks {
+ if lessThan(earliestChunk, chunk) {
+ chunks = append(chunks, chunk)
+ }
+ }
- file.entry.Chunks = append(file.entry.Chunks, chunks...)
-}
+ // sort incoming chunks
+ sort.Slice(chunks, func(i, j int) bool {
+ return lessThan(chunks[i], chunks[j])
+ })
-func (file *File) setEntry(entry *filer_pb.Entry) {
- file.entry = entry
- file.entryViewCache = filer2.NonOverlappingVisibleIntervals(file.entry.Chunks)
+ glog.V(4).Infof("%s existing %d chunks adds %d more", file.fullpath(), len(entry.Chunks), len(chunks))
+
+ entry.Chunks = append(entry.Chunks, newChunks...)
}
-func (file *File) saveEntry(ctx context.Context) error {
- return file.wfs.WithFilerClient(ctx, func(client filer_pb.SeaweedFilerClient) error {
+func (file *File) saveEntry(entry *filer_pb.Entry) error {
+ return file.wfs.WithFilerClient(func(client filer_pb.SeaweedFilerClient) error {
+
+ file.wfs.mapPbIdFromLocalToFiler(entry)
+ defer file.wfs.mapPbIdFromFilerToLocal(entry)
request := &filer_pb.UpdateEntryRequest{
- Directory: file.dir.Path,
- Entry: file.entry,
+ Directory: file.dir.FullPath(),
+ Entry: entry,
+ Signatures: []int32{file.wfs.signature},
}
- glog.V(1).Infof("save file entry: %v", request)
- _, err := client.UpdateEntry(ctx, request)
+ glog.V(4).Infof("save file entry: %v", request)
+ _, err := client.UpdateEntry(context.Background(), request)
if err != nil {
- glog.V(0).Infof("UpdateEntry file %s/%s: %v", file.dir.Path, file.Name, err)
+ glog.Errorf("UpdateEntry file %s/%s: %v", file.dir.FullPath(), file.Name, err)
return fuse.EIO
}
+ file.wfs.metaCache.UpdateEntry(context.Background(), filer.FromPbEntry(request.Directory, request.Entry))
+
return nil
})
}
+
+func (file *File) getEntry() *filer_pb.Entry {
+ return file.entry
+}
diff --git a/weed/filesys/filehandle.go b/weed/filesys/filehandle.go
index 101f5c056..27ffab6e1 100644
--- a/weed/filesys/filehandle.go
+++ b/weed/filesys/filehandle.go
@@ -3,39 +3,51 @@ package filesys
import (
"context"
"fmt"
- "mime"
- "path"
+ "io"
+ "math"
+ "net/http"
+ "os"
+ "sync"
"time"
- "github.com/chrislusf/seaweedfs/weed/filer2"
- "github.com/chrislusf/seaweedfs/weed/glog"
- "github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
- "github.com/gabriel-vasile/mimetype"
"github.com/seaweedfs/fuse"
"github.com/seaweedfs/fuse/fs"
+
+ "github.com/chrislusf/seaweedfs/weed/filer"
+ "github.com/chrislusf/seaweedfs/weed/glog"
+ "github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
)
type FileHandle struct {
// cache file has been written to
- dirtyPages *ContinuousDirtyPages
- contentType string
- dirtyMetadata bool
- handle uint64
+ dirtyPages *ContinuousDirtyPages
+ entryViewCache []filer.VisibleInterval
+ reader io.ReaderAt
+ contentType string
+ handle uint64
+ sync.Mutex
f *File
RequestId fuse.RequestID // unique ID for request
NodeId fuse.NodeID // file or directory the request is about
Uid uint32 // user ID of process making request
Gid uint32 // group ID of process making request
+
}
func newFileHandle(file *File, uid, gid uint32) *FileHandle {
- return &FileHandle{
+ fh := &FileHandle{
f: file,
dirtyPages: newDirtyPages(file),
Uid: uid,
Gid: gid,
}
+ entry := fh.f.getEntry()
+ if entry != nil {
+ entry.Attributes.FileSize = filer.FileSize(entry)
+ }
+
+ return fh
}
var _ = fs.Handle(&FileHandle{})
@@ -48,134 +60,263 @@ var _ = fs.HandleReleaser(&FileHandle{})
func (fh *FileHandle) Read(ctx context.Context, req *fuse.ReadRequest, resp *fuse.ReadResponse) error {
- glog.V(4).Infof("%s read fh %d: [%d,%d)", fh.f.fullpath(), fh.handle, req.Offset, req.Offset+int64(req.Size))
+ glog.V(4).Infof("%s read fh %d: [%d,%d) size %d resp.Data cap=%d", fh.f.fullpath(), fh.handle, req.Offset, req.Offset+int64(req.Size), req.Size, cap(resp.Data))
+ fh.Lock()
+ defer fh.Unlock()
- // this value should come from the filer instead of the old f
- if len(fh.f.entry.Chunks) == 0 {
- glog.V(1).Infof("empty fh %v/%v", fh.f.dir.Path, fh.f.Name)
+ if req.Size <= 0 {
return nil
}
- buff := make([]byte, req.Size)
-
- if fh.f.entryViewCache == nil {
- fh.f.entryViewCache = filer2.NonOverlappingVisibleIntervals(fh.f.entry.Chunks)
+ buff := resp.Data[:cap(resp.Data)]
+ if req.Size > cap(resp.Data) {
+ // should not happen
+ buff = make([]byte, req.Size)
}
- chunkViews := filer2.ViewFromVisibleIntervals(fh.f.entryViewCache, req.Offset, req.Size)
-
- totalRead, err := filer2.ReadIntoBuffer(ctx, fh.f.wfs, fh.f.fullpath(), buff, chunkViews, req.Offset)
+ totalRead, err := fh.readFromChunks(buff, req.Offset)
+ if err == nil || err == io.EOF {
+ maxStop := fh.readFromDirtyPages(buff, req.Offset)
+ totalRead = max(maxStop-req.Offset, totalRead)
+ }
- resp.Data = buff[:totalRead]
+ if err == io.EOF {
+ err = nil
+ }
if err != nil {
- glog.Errorf("file handle read %s: %v", fh.f.fullpath(), err)
+ glog.Warningf("file handle read %s %d: %v", fh.f.fullpath(), totalRead, err)
+ return fuse.EIO
+ }
+
+ if totalRead > int64(len(buff)) {
+ glog.Warningf("%s FileHandle Read %d: [%d,%d) size %d totalRead %d", fh.f.fullpath(), fh.handle, req.Offset, req.Offset+int64(req.Size), req.Size, totalRead)
+ totalRead = min(int64(len(buff)), totalRead)
+ }
+ if err == nil {
+ resp.Data = buff[:totalRead]
}
return err
}
-// Write to the file handle
-func (fh *FileHandle) Write(ctx context.Context, req *fuse.WriteRequest, resp *fuse.WriteResponse) error {
+func (fh *FileHandle) readFromDirtyPages(buff []byte, startOffset int64) (maxStop int64) {
+ maxStop = fh.dirtyPages.ReadDirtyDataAt(buff, startOffset)
+ return
+}
- // write the request to volume servers
+func (fh *FileHandle) readFromChunks(buff []byte, offset int64) (int64, error) {
- glog.V(4).Infof("%+v/%v write fh %d: [%d,%d)", fh.f.dir.Path, fh.f.Name, fh.handle, req.Offset, req.Offset+int64(len(req.Data)))
+ entry := fh.f.getEntry()
+ if entry == nil {
+ return 0, io.EOF
+ }
- chunks, err := fh.dirtyPages.AddPage(ctx, req.Offset, req.Data)
- if err != nil {
- glog.Errorf("%+v/%v write fh %d: [%d,%d): %v", fh.f.dir.Path, fh.f.Name, fh.handle, req.Offset, req.Offset+int64(len(req.Data)), err)
- return fmt.Errorf("write %s/%s at [%d,%d): %v", fh.f.dir.Path, fh.f.Name, req.Offset, req.Offset+int64(len(req.Data)), err)
+ fileSize := int64(filer.FileSize(entry))
+ fileFullPath := fh.f.fullpath()
+
+ if fileSize == 0 {
+ glog.V(1).Infof("empty fh %v", fileFullPath)
+ return 0, io.EOF
}
- resp.Size = len(req.Data)
+ if offset+int64(len(buff)) <= int64(len(entry.Content)) {
+ totalRead := copy(buff, entry.Content[offset:])
+ glog.V(4).Infof("file handle read cached %s [%d,%d] %d", fileFullPath, offset, offset+int64(totalRead), totalRead)
+ return int64(totalRead), nil
+ }
- if req.Offset == 0 {
- // detect mime type
- detectedMIME := mimetype.Detect(req.Data)
- fh.contentType = detectedMIME.String()
- if ext := path.Ext(fh.f.Name); ext != detectedMIME.Extension() {
- fh.contentType = mime.TypeByExtension(ext)
+ var chunkResolveErr error
+ if fh.entryViewCache == nil {
+ fh.entryViewCache, chunkResolveErr = filer.NonOverlappingVisibleIntervals(fh.f.wfs.LookupFn(), entry.Chunks)
+ if chunkResolveErr != nil {
+ return 0, fmt.Errorf("fail to resolve chunk manifest: %v", chunkResolveErr)
}
+ fh.reader = nil
+ }
+
+ reader := fh.reader
+ if reader == nil {
+ chunkViews := filer.ViewFromVisibleIntervals(fh.entryViewCache, 0, math.MaxInt64)
+ reader = filer.NewChunkReaderAtFromClient(fh.f.wfs.LookupFn(), chunkViews, fh.f.wfs.chunkCache, fileSize)
+ }
+ fh.reader = reader
- fh.dirtyMetadata = true
+ totalRead, err := reader.ReadAt(buff, offset)
+
+ if err != nil && err != io.EOF {
+ glog.Errorf("file handle read %s: %v", fileFullPath, err)
}
- if len(chunks) > 0 {
+ glog.V(4).Infof("file handle read %s [%d,%d] %d : %v", fileFullPath, offset, offset+int64(totalRead), totalRead, err)
+
+ return int64(totalRead), err
+}
- fh.f.addChunks(chunks)
+// Write to the file handle
+func (fh *FileHandle) Write(ctx context.Context, req *fuse.WriteRequest, resp *fuse.WriteResponse) error {
+
+ fh.Lock()
+ defer fh.Unlock()
+
+ // write the request to volume servers
+ data := req.Data
+ if len(data) <= 512 {
+ // fuse message cacheable size
+ data = make([]byte, len(req.Data))
+ copy(data, req.Data)
+ }
- fh.dirtyMetadata = true
+ entry := fh.f.getEntry()
+ if entry == nil {
+ return fuse.EIO
}
+ entry.Content = nil
+ entry.Attributes.FileSize = uint64(max(req.Offset+int64(len(data)), int64(entry.Attributes.FileSize)))
+ glog.V(4).Infof("%v write [%d,%d) %d", fh.f.fullpath(), req.Offset, req.Offset+int64(len(req.Data)), len(req.Data))
+
+ fh.dirtyPages.AddPage(req.Offset, data)
+
+ resp.Size = len(data)
+
+ if req.Offset == 0 {
+ // detect mime type
+ fh.contentType = http.DetectContentType(data)
+ fh.f.dirtyMetadata = true
+ }
+
+ fh.f.dirtyMetadata = true
+
return nil
}
func (fh *FileHandle) Release(ctx context.Context, req *fuse.ReleaseRequest) error {
- glog.V(4).Infof("%v release fh %d", fh.f.fullpath(), fh.handle)
+ glog.V(4).Infof("Release %v fh %d open=%d", fh.f.fullpath(), fh.handle, fh.f.isOpen)
+
+ fh.Lock()
+ defer fh.Unlock()
- fh.dirtyPages.releaseResource()
+ fh.f.isOpen--
- fh.f.wfs.ReleaseHandle(fh.f.fullpath(), fuse.HandleID(fh.handle))
+ if fh.f.isOpen <= 0 {
+ fh.f.entry = nil
+ fh.entryViewCache = nil
+ fh.reader = nil
- fh.f.isOpen = false
+ fh.f.wfs.ReleaseHandle(fh.f.fullpath(), fuse.HandleID(fh.handle))
+ }
+
+ if fh.f.isOpen < 0 {
+ glog.V(0).Infof("Release reset %s open count %d => %d", fh.f.Name, fh.f.isOpen, 0)
+ fh.f.isOpen = 0
+ return nil
+ }
return nil
}
func (fh *FileHandle) Flush(ctx context.Context, req *fuse.FlushRequest) error {
- // fflush works at fh level
- // send the data to the OS
- glog.V(4).Infof("%s fh %d flush %v", fh.f.fullpath(), fh.handle, req)
- chunk, err := fh.dirtyPages.FlushToStorage(ctx)
- if err != nil {
- glog.Errorf("flush %s/%s: %v", fh.f.dir.Path, fh.f.Name, err)
- return fmt.Errorf("flush %s/%s: %v", fh.f.dir.Path, fh.f.Name, err)
+ glog.V(4).Infof("Flush %v fh %d", fh.f.fullpath(), fh.handle)
+
+ fh.Lock()
+ defer fh.Unlock()
+
+ if err := fh.doFlush(ctx, req.Header); err != nil {
+ glog.Errorf("Flush doFlush %s: %v", fh.f.Name, err)
+ return err
}
- fh.f.addChunk(chunk)
+ glog.V(4).Infof("Flush %v fh %d success", fh.f.fullpath(), fh.handle)
+ return nil
+}
+
+func (fh *FileHandle) doFlush(ctx context.Context, header fuse.Header) error {
+ // flush works at fh level
+ // send the data to the OS
+ glog.V(4).Infof("doFlush %s fh %d", fh.f.fullpath(), fh.handle)
+
+ fh.dirtyPages.saveExistingPagesToStorage()
+
+ fh.dirtyPages.writeWaitGroup.Wait()
- if !fh.dirtyMetadata {
+ if fh.dirtyPages.lastErr != nil {
+ glog.Errorf("%v doFlush last err: %v", fh.f.fullpath(), fh.dirtyPages.lastErr)
+ return fuse.EIO
+ }
+
+ if !fh.f.dirtyMetadata {
return nil
}
- return fh.f.wfs.WithFilerClient(ctx, func(client filer_pb.SeaweedFilerClient) error {
+ err := fh.f.wfs.WithFilerClient(func(client filer_pb.SeaweedFilerClient) error {
+
+ entry := fh.f.getEntry()
+ if entry == nil {
+ return nil
+ }
- if fh.f.entry.Attributes != nil {
- fh.f.entry.Attributes.Mime = fh.contentType
- fh.f.entry.Attributes.Uid = req.Uid
- fh.f.entry.Attributes.Gid = req.Gid
- fh.f.entry.Attributes.Mtime = time.Now().Unix()
- fh.f.entry.Attributes.Crtime = time.Now().Unix()
- fh.f.entry.Attributes.FileMode = uint32(0777 &^ fh.f.wfs.option.Umask)
+ if entry.Attributes != nil {
+ entry.Attributes.Mime = fh.contentType
+ if entry.Attributes.Uid == 0 {
+ entry.Attributes.Uid = header.Uid
+ }
+ if entry.Attributes.Gid == 0 {
+ entry.Attributes.Gid = header.Gid
+ }
+ if entry.Attributes.Crtime == 0 {
+ entry.Attributes.Crtime = time.Now().Unix()
+ }
+ entry.Attributes.Mtime = time.Now().Unix()
+ entry.Attributes.FileMode = uint32(os.FileMode(entry.Attributes.FileMode) &^ fh.f.wfs.option.Umask)
+ entry.Attributes.Collection = fh.dirtyPages.collection
+ entry.Attributes.Replication = fh.dirtyPages.replication
}
request := &filer_pb.CreateEntryRequest{
- Directory: fh.f.dir.Path,
- Entry: fh.f.entry,
+ Directory: fh.f.dir.FullPath(),
+ Entry: entry,
+ Signatures: []int32{fh.f.wfs.signature},
}
- glog.V(3).Infof("%s/%s set chunks: %v", fh.f.dir.Path, fh.f.Name, len(fh.f.entry.Chunks))
- for i, chunk := range fh.f.entry.Chunks {
- glog.V(3).Infof("%s/%s chunks %d: %v [%d,%d)", fh.f.dir.Path, fh.f.Name, i, chunk.FileId, chunk.Offset, chunk.Offset+int64(chunk.Size))
+ glog.V(4).Infof("%s set chunks: %v", fh.f.fullpath(), len(entry.Chunks))
+ for i, chunk := range entry.Chunks {
+ glog.V(4).Infof("%s chunks %d: %v [%d,%d)", fh.f.fullpath(), i, chunk.GetFileIdString(), chunk.Offset, chunk.Offset+int64(chunk.Size))
}
- chunks, garbages := filer2.CompactFileChunks(fh.f.entry.Chunks)
- fh.f.entry.Chunks = chunks
- // fh.f.entryViewCache = nil
+ manifestChunks, nonManifestChunks := filer.SeparateManifestChunks(entry.Chunks)
- if _, err := client.CreateEntry(ctx, request); err != nil {
- glog.Errorf("update fh: %v", err)
- return fmt.Errorf("update fh: %v", err)
+ chunks, _ := filer.CompactFileChunks(fh.f.wfs.LookupFn(), nonManifestChunks)
+ chunks, manifestErr := filer.MaybeManifestize(fh.f.wfs.saveDataAsChunk(fh.f.fullpath()), chunks)
+ if manifestErr != nil {
+ // not good, but should be ok
+ glog.V(0).Infof("MaybeManifestize: %v", manifestErr)
}
+ entry.Chunks = append(chunks, manifestChunks...)
- fh.f.wfs.deleteFileChunks(ctx, garbages)
- for i, chunk := range garbages {
- glog.V(3).Infof("garbage %s/%s chunks %d: %v [%d,%d)", fh.f.dir.Path, fh.f.Name, i, chunk.FileId, chunk.Offset, chunk.Offset+int64(chunk.Size))
+ fh.f.wfs.mapPbIdFromLocalToFiler(request.Entry)
+ defer fh.f.wfs.mapPbIdFromFilerToLocal(request.Entry)
+
+ if err := filer_pb.CreateEntry(client, request); err != nil {
+ glog.Errorf("fh flush create %s: %v", fh.f.fullpath(), err)
+ return fmt.Errorf("fh flush create %s: %v", fh.f.fullpath(), err)
}
+ fh.f.wfs.metaCache.InsertEntry(context.Background(), filer.FromPbEntry(request.Directory, request.Entry))
+
return nil
})
+
+ if err == nil {
+ fh.f.dirtyMetadata = false
+ }
+
+ if err != nil {
+ glog.Errorf("%v fh %d flush: %v", fh.f.fullpath(), fh.handle, err)
+ return fuse.EIO
+ }
+
+ return nil
}
diff --git a/weed/filesys/fscache.go b/weed/filesys/fscache.go
new file mode 100644
index 000000000..6b1012090
--- /dev/null
+++ b/weed/filesys/fscache.go
@@ -0,0 +1,213 @@
+package filesys
+
+import (
+ "sync"
+
+ "github.com/seaweedfs/fuse/fs"
+
+ "github.com/chrislusf/seaweedfs/weed/util"
+)
+
+type FsCache struct {
+ root *FsNode
+ sync.RWMutex
+}
+type FsNode struct {
+ parent *FsNode
+ node fs.Node
+ name string
+ childrenLock sync.RWMutex
+ children map[string]*FsNode
+}
+
+func newFsCache(root fs.Node) *FsCache {
+ return &FsCache{
+ root: &FsNode{
+ node: root,
+ },
+ }
+}
+
+func (c *FsCache) GetFsNode(path util.FullPath) fs.Node {
+
+ c.RLock()
+ defer c.RUnlock()
+
+ return c.doGetFsNode(path)
+}
+
+func (c *FsCache) doGetFsNode(path util.FullPath) fs.Node {
+ t := c.root
+ for _, p := range path.Split() {
+ t = t.findChild(p)
+ if t == nil {
+ return nil
+ }
+ }
+ return t.node
+}
+
+func (c *FsCache) SetFsNode(path util.FullPath, node fs.Node) {
+
+ c.Lock()
+ defer c.Unlock()
+
+ c.doSetFsNode(path, node)
+}
+
+func (c *FsCache) doSetFsNode(path util.FullPath, node fs.Node) {
+ t := c.root
+ for _, p := range path.Split() {
+ t = t.ensureChild(p)
+ }
+ t.node = node
+}
+
+func (c *FsCache) EnsureFsNode(path util.FullPath, genNodeFn func() fs.Node) fs.Node {
+
+ c.Lock()
+ defer c.Unlock()
+
+ t := c.doGetFsNode(path)
+ if t != nil {
+ return t
+ }
+ t = genNodeFn()
+ c.doSetFsNode(path, t)
+ return t
+}
+
+func (c *FsCache) DeleteFsNode(path util.FullPath) {
+
+ c.Lock()
+ defer c.Unlock()
+
+ t := c.root
+ for _, p := range path.Split() {
+ t = t.findChild(p)
+ if t == nil {
+ return
+ }
+ }
+ if t.parent != nil {
+ t.parent.disconnectChild(t)
+ }
+ t.deleteSelf()
+}
+
+// oldPath and newPath are full path including the new name
+func (c *FsCache) Move(oldPath util.FullPath, newPath util.FullPath) *FsNode {
+
+ c.Lock()
+ defer c.Unlock()
+
+ // find old node
+ src := c.root
+ for _, p := range oldPath.Split() {
+ src = src.findChild(p)
+ if src == nil {
+ return src
+ }
+ }
+ if src.parent != nil {
+ src.parent.disconnectChild(src)
+ }
+
+ // find new node
+ target := c.root
+ for _, p := range newPath.Split() {
+ target = target.ensureChild(p)
+ }
+ parent := target.parent
+ if dir, ok := src.node.(*Dir); ok {
+ dir.name = target.name // target is not Dir, but a shortcut
+ }
+ if f, ok := src.node.(*File); ok {
+ f.Name = target.name
+ entry := f.getEntry()
+ if entry != nil {
+ entry.Name = f.Name
+ }
+ }
+ parent.disconnectChild(target)
+
+ target.deleteSelf()
+
+ src.name = target.name
+ src.connectToParent(parent)
+
+ return src
+}
+
+func (n *FsNode) connectToParent(parent *FsNode) {
+ n.parent = parent
+ oldNode := parent.findChild(n.name)
+ if oldNode != nil {
+ oldNode.deleteSelf()
+ }
+ if dir, ok := n.node.(*Dir); ok {
+ if parent.node != nil {
+ dir.parent = parent.node.(*Dir)
+ }
+ }
+ if f, ok := n.node.(*File); ok {
+ if parent.node != nil {
+ f.dir = parent.node.(*Dir)
+ }
+ }
+ n.childrenLock.Lock()
+ parent.children[n.name] = n
+ n.childrenLock.Unlock()
+}
+
+func (n *FsNode) findChild(name string) *FsNode {
+ n.childrenLock.RLock()
+ defer n.childrenLock.RUnlock()
+
+ child, found := n.children[name]
+ if found {
+ return child
+ }
+ return nil
+}
+
+func (n *FsNode) ensureChild(name string) *FsNode {
+ n.childrenLock.Lock()
+ defer n.childrenLock.Unlock()
+
+ if n.children == nil {
+ n.children = make(map[string]*FsNode)
+ }
+ child, found := n.children[name]
+ if found {
+ return child
+ }
+ t := &FsNode{
+ parent: n,
+ node: nil,
+ name: name,
+ children: nil,
+ }
+ n.children[name] = t
+ return t
+}
+
+func (n *FsNode) disconnectChild(child *FsNode) {
+ n.childrenLock.Lock()
+ delete(n.children, child.name)
+ n.childrenLock.Unlock()
+ child.parent = nil
+}
+
+func (n *FsNode) deleteSelf() {
+ n.childrenLock.Lock()
+ for _, child := range n.children {
+ child.deleteSelf()
+ }
+ n.children = nil
+ n.childrenLock.Unlock()
+
+ n.node = nil
+ n.parent = nil
+
+}
diff --git a/weed/filesys/fscache_test.go b/weed/filesys/fscache_test.go
new file mode 100644
index 000000000..1152eb32e
--- /dev/null
+++ b/weed/filesys/fscache_test.go
@@ -0,0 +1,115 @@
+package filesys
+
+import (
+ "testing"
+
+ "github.com/chrislusf/seaweedfs/weed/util"
+)
+
+func TestPathSplit(t *testing.T) {
+ parts := util.FullPath("/").Split()
+ if len(parts) != 0 {
+ t.Errorf("expecting an empty list, but getting %d", len(parts))
+ }
+
+ parts = util.FullPath("/readme.md").Split()
+ if len(parts) != 1 {
+ t.Errorf("expecting an empty list, but getting %d", len(parts))
+ }
+
+}
+
+func TestFsCache(t *testing.T) {
+
+ cache := newFsCache(nil)
+
+ x := cache.GetFsNode(util.FullPath("/y/x"))
+ if x != nil {
+ t.Errorf("wrong node!")
+ }
+
+ p := util.FullPath("/a/b/c")
+ cache.SetFsNode(p, &File{Name: "cc"})
+ tNode := cache.GetFsNode(p)
+ tFile := tNode.(*File)
+ if tFile.Name != "cc" {
+ t.Errorf("expecting a FsNode")
+ }
+
+ cache.SetFsNode(util.FullPath("/a/b/d"), &File{Name: "dd"})
+ cache.SetFsNode(util.FullPath("/a/b/e"), &File{Name: "ee"})
+ cache.SetFsNode(util.FullPath("/a/b/f"), &File{Name: "ff"})
+ cache.SetFsNode(util.FullPath("/z"), &File{Name: "zz"})
+ cache.SetFsNode(util.FullPath("/a"), &File{Name: "aa"})
+
+ b := cache.GetFsNode(util.FullPath("/a/b"))
+ if b != nil {
+ t.Errorf("unexpected node!")
+ }
+
+ a := cache.GetFsNode(util.FullPath("/a"))
+ if a == nil {
+ t.Errorf("missing node!")
+ }
+
+ cache.DeleteFsNode(util.FullPath("/a"))
+ if b != nil {
+ t.Errorf("unexpected node!")
+ }
+
+ a = cache.GetFsNode(util.FullPath("/a"))
+ if a != nil {
+ t.Errorf("wrong DeleteFsNode!")
+ }
+
+ z := cache.GetFsNode(util.FullPath("/z"))
+ if z == nil {
+ t.Errorf("missing node!")
+ }
+
+ y := cache.GetFsNode(util.FullPath("/x/y"))
+ if y != nil {
+ t.Errorf("wrong node!")
+ }
+
+}
+
+func TestFsCacheMove(t *testing.T) {
+
+ cache := newFsCache(nil)
+
+ cache.SetFsNode(util.FullPath("/a/b/d"), &File{Name: "dd"})
+ cache.SetFsNode(util.FullPath("/a/b/e"), &File{Name: "ee"})
+ cache.SetFsNode(util.FullPath("/z"), &File{Name: "zz"})
+ cache.SetFsNode(util.FullPath("/a"), &File{Name: "aa"})
+
+ cache.Move(util.FullPath("/a/b"), util.FullPath("/z/x"))
+
+ d := cache.GetFsNode(util.FullPath("/z/x/d"))
+ if d == nil {
+ t.Errorf("unexpected nil node!")
+ }
+ if d.(*File).Name != "dd" {
+ t.Errorf("unexpected non dd node!")
+ }
+
+}
+
+func TestFsCacheMove2(t *testing.T) {
+
+ cache := newFsCache(nil)
+
+ cache.SetFsNode(util.FullPath("/a/b/d"), &File{Name: "dd"})
+ cache.SetFsNode(util.FullPath("/a/b/e"), &File{Name: "ee"})
+
+ cache.Move(util.FullPath("/a/b/d"), util.FullPath("/a/b/e"))
+
+ d := cache.GetFsNode(util.FullPath("/a/b/e"))
+ if d == nil {
+ t.Errorf("unexpected nil node!")
+ }
+ if d.(*File).Name != "e" {
+ t.Errorf("unexpected node!")
+ }
+
+}
diff --git a/weed/filesys/meta_cache/cache_config.go b/weed/filesys/meta_cache/cache_config.go
new file mode 100644
index 000000000..e6593ebde
--- /dev/null
+++ b/weed/filesys/meta_cache/cache_config.go
@@ -0,0 +1,32 @@
+package meta_cache
+
+import "github.com/chrislusf/seaweedfs/weed/util"
+
+var (
+ _ = util.Configuration(&cacheConfig{})
+)
+
+// implementing util.Configuraion
+type cacheConfig struct {
+ dir string
+}
+
+func (c cacheConfig) GetString(key string) string {
+ return c.dir
+}
+
+func (c cacheConfig) GetBool(key string) bool {
+ panic("implement me")
+}
+
+func (c cacheConfig) GetInt(key string) int {
+ panic("implement me")
+}
+
+func (c cacheConfig) GetStringSlice(key string) []string {
+ panic("implement me")
+}
+
+func (c cacheConfig) SetDefault(key string, value interface{}) {
+ panic("implement me")
+}
diff --git a/weed/filesys/meta_cache/id_mapper.go b/weed/filesys/meta_cache/id_mapper.go
new file mode 100644
index 000000000..4a2179f31
--- /dev/null
+++ b/weed/filesys/meta_cache/id_mapper.go
@@ -0,0 +1,101 @@
+package meta_cache
+
+import (
+ "fmt"
+ "strconv"
+ "strings"
+)
+
+type UidGidMapper struct {
+ uidMapper *IdMapper
+ gidMapper *IdMapper
+}
+
+type IdMapper struct {
+ localToFiler map[uint32]uint32
+ filerToLocal map[uint32]uint32
+}
+
+// UidGidMapper translates local uid/gid to filer uid/gid
+// The local storage always persists the same as the filer.
+// The local->filer translation happens when updating the filer first and later saving to meta_cache.
+// And filer->local happens when reading from the meta_cache.
+func NewUidGidMapper(uidPairsStr, gidPairStr string) (*UidGidMapper, error) {
+ uidMapper, err := newIdMapper(uidPairsStr)
+ if err != nil {
+ return nil, err
+ }
+ gidMapper, err := newIdMapper(gidPairStr)
+ if err != nil {
+ return nil, err
+ }
+
+ return &UidGidMapper{
+ uidMapper: uidMapper,
+ gidMapper: gidMapper,
+ }, nil
+}
+
+func (m *UidGidMapper) LocalToFiler(uid, gid uint32) (uint32, uint32) {
+ return m.uidMapper.LocalToFiler(uid), m.gidMapper.LocalToFiler(gid)
+}
+func (m *UidGidMapper) FilerToLocal(uid, gid uint32) (uint32, uint32) {
+ return m.uidMapper.FilerToLocal(uid), m.gidMapper.FilerToLocal(gid)
+}
+
+func (m *IdMapper) LocalToFiler(id uint32) uint32 {
+ value, found := m.localToFiler[id]
+ if found {
+ return value
+ }
+ return id
+}
+func (m *IdMapper) FilerToLocal(id uint32) uint32 {
+ value, found := m.filerToLocal[id]
+ if found {
+ return value
+ }
+ return id
+}
+
+func newIdMapper(pairsStr string) (*IdMapper, error) {
+
+ localToFiler, filerToLocal, err := parseUint32Pairs(pairsStr)
+ if err != nil {
+ return nil, err
+ }
+
+ return &IdMapper{
+ localToFiler: localToFiler,
+ filerToLocal: filerToLocal,
+ }, nil
+
+}
+
+func parseUint32Pairs(pairsStr string) (localToFiler, filerToLocal map[uint32]uint32, err error) {
+
+ if pairsStr == "" {
+ return
+ }
+
+ localToFiler = make(map[uint32]uint32)
+ filerToLocal = make(map[uint32]uint32)
+ for _, pairStr := range strings.Split(pairsStr, ",") {
+ pair := strings.Split(pairStr, ":")
+ localUidStr, filerUidStr := pair[0], pair[1]
+ localUid, localUidErr := strconv.Atoi(localUidStr)
+ if localUidErr != nil {
+ err = fmt.Errorf("failed to parse local %s: %v", localUidStr, localUidErr)
+ return
+ }
+ filerUid, filerUidErr := strconv.Atoi(filerUidStr)
+ if filerUidErr != nil {
+ err = fmt.Errorf("failed to parse remote %s: %v", filerUidStr, filerUidErr)
+ return
+ }
+ localToFiler[uint32(localUid)] = uint32(filerUid)
+ filerToLocal[uint32(filerUid)] = uint32(localUid)
+ }
+
+ return
+}
diff --git a/weed/filesys/meta_cache/meta_cache.go b/weed/filesys/meta_cache/meta_cache.go
new file mode 100644
index 000000000..b9d4724c9
--- /dev/null
+++ b/weed/filesys/meta_cache/meta_cache.go
@@ -0,0 +1,152 @@
+package meta_cache
+
+import (
+ "context"
+ "fmt"
+ "os"
+ "strings"
+ "sync"
+
+ "github.com/chrislusf/seaweedfs/weed/filer"
+ "github.com/chrislusf/seaweedfs/weed/filer/leveldb"
+ "github.com/chrislusf/seaweedfs/weed/glog"
+ "github.com/chrislusf/seaweedfs/weed/util"
+ "github.com/chrislusf/seaweedfs/weed/util/bounded_tree"
+)
+
+// need to have logic similar to FilerStoreWrapper
+// e.g. fill fileId field for chunks
+
+type MetaCache struct {
+ localStore filer.VirtualFilerStore
+ sync.RWMutex
+ visitedBoundary *bounded_tree.BoundedTree
+ uidGidMapper *UidGidMapper
+ invalidateFunc func(util.FullPath)
+}
+
+func NewMetaCache(dbFolder string, baseDir util.FullPath, uidGidMapper *UidGidMapper, invalidateFunc func(util.FullPath)) *MetaCache {
+ return &MetaCache{
+ localStore: openMetaStore(dbFolder),
+ visitedBoundary: bounded_tree.NewBoundedTree(baseDir),
+ uidGidMapper: uidGidMapper,
+ invalidateFunc: func(fullpath util.FullPath) {
+ if baseDir != "/" && strings.HasPrefix(string(fullpath), string(baseDir)) {
+ fullpath = fullpath[len(baseDir):]
+ }
+ invalidateFunc(fullpath)
+ },
+ }
+}
+
+func openMetaStore(dbFolder string) filer.VirtualFilerStore {
+
+ os.RemoveAll(dbFolder)
+ os.MkdirAll(dbFolder, 0755)
+
+ store := &leveldb.LevelDBStore{}
+ config := &cacheConfig{
+ dir: dbFolder,
+ }
+
+ if err := store.Initialize(config, ""); err != nil {
+ glog.Fatalf("Failed to initialize metadata cache store for %s: %+v", store.GetName(), err)
+ }
+
+ return filer.NewFilerStoreWrapper(store)
+
+}
+
+func (mc *MetaCache) InsertEntry(ctx context.Context, entry *filer.Entry) error {
+ mc.Lock()
+ defer mc.Unlock()
+ return mc.doInsertEntry(ctx, entry)
+}
+
+func (mc *MetaCache) doInsertEntry(ctx context.Context, entry *filer.Entry) error {
+ return mc.localStore.InsertEntry(ctx, entry)
+}
+
+func (mc *MetaCache) AtomicUpdateEntryFromFiler(ctx context.Context, oldPath util.FullPath, newEntry *filer.Entry) error {
+ mc.Lock()
+ defer mc.Unlock()
+
+ oldDir, _ := oldPath.DirAndName()
+ if mc.visitedBoundary.HasVisited(util.FullPath(oldDir)) {
+ if oldPath != "" {
+ if newEntry != nil && oldPath == newEntry.FullPath {
+ // skip the unnecessary deletion
+ // leave the update to the following InsertEntry operation
+ } else {
+ glog.V(3).Infof("DeleteEntry %s/%s", oldPath, oldPath.Name())
+ if err := mc.localStore.DeleteEntry(ctx, oldPath); err != nil {
+ return err
+ }
+ }
+ }
+ } else {
+ // println("unknown old directory:", oldDir)
+ }
+
+ if newEntry != nil {
+ newDir, _ := newEntry.DirAndName()
+ if mc.visitedBoundary.HasVisited(util.FullPath(newDir)) {
+ glog.V(3).Infof("InsertEntry %s/%s", newDir, newEntry.Name())
+ if err := mc.localStore.InsertEntry(ctx, newEntry); err != nil {
+ return err
+ }
+ }
+ }
+ return nil
+}
+
+func (mc *MetaCache) UpdateEntry(ctx context.Context, entry *filer.Entry) error {
+ mc.Lock()
+ defer mc.Unlock()
+ return mc.localStore.UpdateEntry(ctx, entry)
+}
+
+func (mc *MetaCache) FindEntry(ctx context.Context, fp util.FullPath) (entry *filer.Entry, err error) {
+ mc.RLock()
+ defer mc.RUnlock()
+ entry, err = mc.localStore.FindEntry(ctx, fp)
+ if err != nil {
+ return nil, err
+ }
+ mc.mapIdFromFilerToLocal(entry)
+ return
+}
+
+func (mc *MetaCache) DeleteEntry(ctx context.Context, fp util.FullPath) (err error) {
+ mc.Lock()
+ defer mc.Unlock()
+ return mc.localStore.DeleteEntry(ctx, fp)
+}
+
+func (mc *MetaCache) ListDirectoryEntries(ctx context.Context, dirPath util.FullPath, startFileName string, includeStartFile bool, limit int64, eachEntryFunc filer.ListEachEntryFunc) error {
+ mc.RLock()
+ defer mc.RUnlock()
+
+ if !mc.visitedBoundary.HasVisited(dirPath) {
+ return fmt.Errorf("unsynchronized dir: %v", dirPath)
+ }
+
+ _, err := mc.localStore.ListDirectoryEntries(ctx, dirPath, startFileName, includeStartFile, limit, func(entry *filer.Entry) bool {
+ mc.mapIdFromFilerToLocal(entry)
+ return eachEntryFunc(entry)
+ })
+ if err != nil {
+ return err
+ }
+ return err
+}
+
+func (mc *MetaCache) Shutdown() {
+ mc.Lock()
+ defer mc.Unlock()
+ mc.localStore.Shutdown()
+}
+
+func (mc *MetaCache) mapIdFromFilerToLocal(entry *filer.Entry) {
+ entry.Attr.Uid, entry.Attr.Gid = mc.uidGidMapper.FilerToLocal(entry.Attr.Uid, entry.Attr.Gid)
+}
diff --git a/weed/filesys/meta_cache/meta_cache_init.go b/weed/filesys/meta_cache/meta_cache_init.go
new file mode 100644
index 000000000..1ca3b16d5
--- /dev/null
+++ b/weed/filesys/meta_cache/meta_cache_init.go
@@ -0,0 +1,47 @@
+package meta_cache
+
+import (
+ "context"
+ "fmt"
+
+ "github.com/chrislusf/seaweedfs/weed/filer"
+ "github.com/chrislusf/seaweedfs/weed/glog"
+ "github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
+ "github.com/chrislusf/seaweedfs/weed/util"
+)
+
+func EnsureVisited(mc *MetaCache, client filer_pb.FilerClient, dirPath util.FullPath) error {
+
+ return mc.visitedBoundary.EnsureVisited(dirPath, func(path util.FullPath) (childDirectories []string, err error) {
+
+ glog.V(4).Infof("ReadDirAllEntries %s ...", path)
+
+ util.Retry("ReadDirAllEntries", func() error {
+ err = filer_pb.ReadDirAllEntries(client, dirPath, "", func(pbEntry *filer_pb.Entry, isLast bool) error {
+ entry := filer.FromPbEntry(string(dirPath), pbEntry)
+ if IsHiddenSystemEntry(string(dirPath), entry.Name()) {
+ return nil
+ }
+ if err := mc.doInsertEntry(context.Background(), entry); err != nil {
+ glog.V(0).Infof("read %s: %v", entry.FullPath, err)
+ return err
+ }
+ if entry.IsDirectory() {
+ childDirectories = append(childDirectories, entry.Name())
+ }
+ return nil
+ })
+ return err
+ })
+
+ if err != nil {
+ err = fmt.Errorf("list %s: %v", dirPath, err)
+ }
+
+ return
+ })
+}
+
+func IsHiddenSystemEntry(dir, name string) bool {
+ return dir == "/" && name == "topics"
+}
diff --git a/weed/filesys/meta_cache/meta_cache_subscribe.go b/weed/filesys/meta_cache/meta_cache_subscribe.go
new file mode 100644
index 000000000..f9973f436
--- /dev/null
+++ b/weed/filesys/meta_cache/meta_cache_subscribe.go
@@ -0,0 +1,86 @@
+package meta_cache
+
+import (
+ "context"
+ "fmt"
+ "io"
+ "time"
+
+ "github.com/chrislusf/seaweedfs/weed/filer"
+ "github.com/chrislusf/seaweedfs/weed/glog"
+ "github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
+ "github.com/chrislusf/seaweedfs/weed/util"
+)
+
+func SubscribeMetaEvents(mc *MetaCache, selfSignature int32, client filer_pb.FilerClient, dir string, lastTsNs int64) error {
+
+ processEventFn := func(resp *filer_pb.SubscribeMetadataResponse) error {
+ message := resp.EventNotification
+
+ for _, sig := range message.Signatures {
+ if sig == selfSignature && selfSignature != 0 {
+ return nil
+ }
+ }
+
+ dir := resp.Directory
+ var oldPath util.FullPath
+ var newEntry *filer.Entry
+ if message.OldEntry != nil {
+ oldPath = util.NewFullPath(dir, message.OldEntry.Name)
+ glog.V(4).Infof("deleting %v", oldPath)
+ }
+
+ if message.NewEntry != nil {
+ if message.NewParentPath != "" {
+ dir = message.NewParentPath
+ }
+ key := util.NewFullPath(dir, message.NewEntry.Name)
+ glog.V(4).Infof("creating %v", key)
+ newEntry = filer.FromPbEntry(dir, message.NewEntry)
+ }
+ err := mc.AtomicUpdateEntryFromFiler(context.Background(), oldPath, newEntry)
+ if err == nil && message.OldEntry != nil && message.NewEntry != nil {
+ key := util.NewFullPath(dir, message.NewEntry.Name)
+ mc.invalidateFunc(key)
+ }
+
+ return err
+
+ }
+
+ for {
+ err := client.WithFilerClient(func(client filer_pb.SeaweedFilerClient) error {
+ ctx, cancel := context.WithCancel(context.Background())
+ defer cancel()
+ stream, err := client.SubscribeMetadata(ctx, &filer_pb.SubscribeMetadataRequest{
+ ClientName: "mount",
+ PathPrefix: dir,
+ SinceNs: lastTsNs,
+ Signature: selfSignature,
+ })
+ if err != nil {
+ return fmt.Errorf("subscribe: %v", err)
+ }
+
+ for {
+ resp, listenErr := stream.Recv()
+ if listenErr == io.EOF {
+ return nil
+ }
+ if listenErr != nil {
+ return listenErr
+ }
+
+ if err := processEventFn(resp); err != nil {
+ glog.Fatalf("process %v: %v", resp, err)
+ }
+ lastTsNs = resp.TsNs
+ }
+ })
+ if err != nil {
+ glog.Errorf("subscribing filer meta change: %v", err)
+ }
+ time.Sleep(time.Second)
+ }
+}
diff --git a/weed/filesys/unimplemented.go b/weed/filesys/unimplemented.go
new file mode 100644
index 000000000..5c2dcf0e1
--- /dev/null
+++ b/weed/filesys/unimplemented.go
@@ -0,0 +1,22 @@
+package filesys
+
+import (
+ "context"
+
+ "github.com/seaweedfs/fuse"
+ "github.com/seaweedfs/fuse/fs"
+)
+
+// https://github.com/bazil/fuse/issues/130
+
+var _ = fs.NodeAccesser(&Dir{})
+
+func (dir *Dir) Access(ctx context.Context, req *fuse.AccessRequest) error {
+ return fuse.ENOSYS
+}
+
+var _ = fs.NodeAccesser(&File{})
+
+func (file *File) Access(ctx context.Context, req *fuse.AccessRequest) error {
+ return fuse.ENOSYS
+}
diff --git a/weed/filesys/wfs.go b/weed/filesys/wfs.go
index e924783ec..42816d23d 100644
--- a/weed/filesys/wfs.go
+++ b/weed/filesys/wfs.go
@@ -3,32 +3,44 @@ package filesys
import (
"context"
"fmt"
+ "github.com/chrislusf/seaweedfs/weed/filer"
+ "github.com/chrislusf/seaweedfs/weed/storage/types"
+ "github.com/chrislusf/seaweedfs/weed/wdclient"
"math"
"os"
+ "path"
"sync"
"time"
- "github.com/karlseguin/ccache"
"google.golang.org/grpc"
+ "github.com/chrislusf/seaweedfs/weed/util/grace"
+
+ "github.com/seaweedfs/fuse"
+ "github.com/seaweedfs/fuse/fs"
+
+ "github.com/chrislusf/seaweedfs/weed/filesys/meta_cache"
"github.com/chrislusf/seaweedfs/weed/glog"
"github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
"github.com/chrislusf/seaweedfs/weed/util"
- "github.com/seaweedfs/fuse"
- "github.com/seaweedfs/fuse/fs"
+ "github.com/chrislusf/seaweedfs/weed/util/chunk_cache"
)
type Option struct {
+ MountDirectory string
+ FilerAddress string
FilerGrpcAddress string
GrpcDialOption grpc.DialOption
FilerMountRootPath string
Collection string
Replication string
TtlSec int32
+ DiskType types.DiskType
ChunkSizeLimit int64
+ ConcurrentWriters int
+ CacheDir string
+ CacheSizeMB int64
DataCenter string
- DirListCacheLimit int64
- EntryCacheTtl time.Duration
Umask os.FileMode
MountUid uint32
@@ -36,22 +48,36 @@ type Option struct {
MountMode os.FileMode
MountCtime time.Time
MountMtime time.Time
+
+ VolumeServerAccess string // how to access volume servers
+ Cipher bool // whether encrypt data on volume server
+ UidGidMapper *meta_cache.UidGidMapper
}
var _ = fs.FS(&WFS{})
var _ = fs.FSStatfser(&WFS{})
type WFS struct {
- option *Option
- listDirectoryEntriesCache *ccache.Cache
+ option *Option
+
+ // contains all open handles, protected by handlesLock
+ handlesLock sync.Mutex
+ handles map[uint64]*FileHandle
- // contains all open handles
- handles []*FileHandle
- pathToHandleIndex map[string]int
- pathToHandleLock sync.Mutex
- bufPool sync.Pool
+ bufPool sync.Pool
stats statsCache
+
+ root fs.Node
+ fsNodeCache *FsCache
+
+ chunkCache *chunk_cache.TieredChunkCache
+ metaCache *meta_cache.MetaCache
+ signature int32
+
+ // throttle writers
+ concurrentWriters *util.LimitedConcurrentExecutor
+ Server *fs.Server
}
type statsCache struct {
filer_pb.StatisticsResponse
@@ -60,72 +86,92 @@ type statsCache struct {
func NewSeaweedFileSystem(option *Option) *WFS {
wfs := &WFS{
- option: option,
- listDirectoryEntriesCache: ccache.New(ccache.Configure().MaxSize(option.DirListCacheLimit * 3).ItemsToPrune(100)),
- pathToHandleIndex: make(map[string]int),
+ option: option,
+ handles: make(map[uint64]*FileHandle),
bufPool: sync.Pool{
New: func() interface{} {
return make([]byte, option.ChunkSizeLimit)
},
},
+ signature: util.RandomInt32(),
+ }
+ cacheUniqueId := util.Md5String([]byte(option.MountDirectory + option.FilerGrpcAddress + option.FilerMountRootPath + util.Version()))[0:8]
+ cacheDir := path.Join(option.CacheDir, cacheUniqueId)
+ if option.CacheSizeMB > 0 {
+ os.MkdirAll(cacheDir, os.FileMode(0777)&^option.Umask)
+ wfs.chunkCache = chunk_cache.NewTieredChunkCache(256, cacheDir, option.CacheSizeMB, 1024*1024)
}
- return wfs
-}
+ wfs.metaCache = meta_cache.NewMetaCache(path.Join(cacheDir, "meta"), util.FullPath(option.FilerMountRootPath), option.UidGidMapper, func(filePath util.FullPath) {
-func (wfs *WFS) Root() (fs.Node, error) {
- return &Dir{Path: wfs.option.FilerMountRootPath, wfs: wfs}, nil
-}
+ fsNode := NodeWithId(filePath.AsInode())
+ if err := wfs.Server.InvalidateNodeData(fsNode); err != nil {
+ glog.V(4).Infof("InvalidateNodeData %s : %v", filePath, err)
+ }
-func (wfs *WFS) WithFilerClient(ctx context.Context, fn func(filer_pb.SeaweedFilerClient) error) error {
+ dir, name := filePath.DirAndName()
+ parent := NodeWithId(util.FullPath(dir).AsInode())
+ if err := wfs.Server.InvalidateEntry(parent, name); err != nil {
+ glog.V(4).Infof("InvalidateEntry %s : %v", filePath, err)
+ }
+ })
+ startTime := time.Now()
+ go meta_cache.SubscribeMetaEvents(wfs.metaCache, wfs.signature, wfs, wfs.option.FilerMountRootPath, startTime.UnixNano())
+ grace.OnInterrupt(func() {
+ wfs.metaCache.Shutdown()
+ })
+
+ wfs.root = &Dir{name: wfs.option.FilerMountRootPath, wfs: wfs}
+ wfs.fsNodeCache = newFsCache(wfs.root)
+
+ if wfs.option.ConcurrentWriters > 0 {
+ wfs.concurrentWriters = util.NewLimitedConcurrentExecutor(wfs.option.ConcurrentWriters)
+ }
- return util.WithCachedGrpcClient(ctx, func(grpcConnection *grpc.ClientConn) error {
- client := filer_pb.NewSeaweedFilerClient(grpcConnection)
- return fn(client)
- }, wfs.option.FilerGrpcAddress, wfs.option.GrpcDialOption)
+ return wfs
+}
+func (wfs *WFS) Root() (fs.Node, error) {
+ return wfs.root, nil
}
func (wfs *WFS) AcquireHandle(file *File, uid, gid uint32) (fileHandle *FileHandle) {
- wfs.pathToHandleLock.Lock()
- defer wfs.pathToHandleLock.Unlock()
fullpath := file.fullpath()
+ glog.V(4).Infof("AcquireHandle %s uid=%d gid=%d", fullpath, uid, gid)
+
+ inodeId := file.Id()
- index, found := wfs.pathToHandleIndex[fullpath]
- if found && wfs.handles[index] != nil {
- glog.V(2).Infoln(fullpath, "found fileHandle id", index)
- return wfs.handles[index]
+ wfs.handlesLock.Lock()
+ existingHandle, found := wfs.handles[inodeId]
+ wfs.handlesLock.Unlock()
+ if found && existingHandle != nil {
+ existingHandle.f.isOpen++
+ glog.V(4).Infof("Acquired Handle %s open %d", fullpath, existingHandle.f.isOpen)
+ return existingHandle
}
+ entry, _ := file.maybeLoadEntry(context.Background())
+ file.entry = entry
fileHandle = newFileHandle(file, uid, gid)
- for i, h := range wfs.handles {
- if h == nil {
- wfs.handles[i] = fileHandle
- fileHandle.handle = uint64(i)
- wfs.pathToHandleIndex[fullpath] = i
- glog.V(4).Infoln(fullpath, "reuse fileHandle id", fileHandle.handle)
- return
- }
- }
+ file.isOpen++
- wfs.handles = append(wfs.handles, fileHandle)
- fileHandle.handle = uint64(len(wfs.handles) - 1)
- glog.V(2).Infoln(fullpath, "new fileHandle id", fileHandle.handle)
- wfs.pathToHandleIndex[fullpath] = int(fileHandle.handle)
+ wfs.handlesLock.Lock()
+ wfs.handles[inodeId] = fileHandle
+ wfs.handlesLock.Unlock()
+ fileHandle.handle = inodeId
+ glog.V(4).Infof("Acquired new Handle %s open %d", fullpath, file.isOpen)
return
}
-func (wfs *WFS) ReleaseHandle(fullpath string, handleId fuse.HandleID) {
- wfs.pathToHandleLock.Lock()
- defer wfs.pathToHandleLock.Unlock()
+func (wfs *WFS) ReleaseHandle(fullpath util.FullPath, handleId fuse.HandleID) {
+ wfs.handlesLock.Lock()
+ defer wfs.handlesLock.Unlock()
- glog.V(4).Infof("%s releasing handle id %d current handles length %d", fullpath, handleId, len(wfs.handles))
- delete(wfs.pathToHandleIndex, fullpath)
- if int(handleId) < len(wfs.handles) {
- wfs.handles[int(handleId)] = nil
- }
+ glog.V(4).Infof("ReleaseHandle %s id %d current handles length %d", fullpath, handleId, len(wfs.handles))
+
+ delete(wfs.handles, uint64(handleId))
return
}
@@ -137,16 +183,17 @@ func (wfs *WFS) Statfs(ctx context.Context, req *fuse.StatfsRequest, resp *fuse.
if wfs.stats.lastChecked < time.Now().Unix()-20 {
- err := wfs.WithFilerClient(ctx, func(client filer_pb.SeaweedFilerClient) error {
+ err := wfs.WithFilerClient(func(client filer_pb.SeaweedFilerClient) error {
request := &filer_pb.StatisticsRequest{
Collection: wfs.option.Collection,
Replication: wfs.option.Replication,
Ttl: fmt.Sprintf("%ds", wfs.option.TtlSec),
+ DiskType: string(wfs.option.DiskType),
}
glog.V(4).Infof("reading filer stats: %+v", request)
- resp, err := client.Statistics(ctx, request)
+ resp, err := client.Statistics(context.Background(), request)
if err != nil {
glog.V(0).Infof("reading filer stats %v: %v", request, err)
return err
@@ -191,3 +238,34 @@ func (wfs *WFS) Statfs(ctx context.Context, req *fuse.StatfsRequest, resp *fuse.
return nil
}
+
+func (wfs *WFS) mapPbIdFromFilerToLocal(entry *filer_pb.Entry) {
+ if entry.Attributes == nil {
+ return
+ }
+ entry.Attributes.Uid, entry.Attributes.Gid = wfs.option.UidGidMapper.FilerToLocal(entry.Attributes.Uid, entry.Attributes.Gid)
+}
+func (wfs *WFS) mapPbIdFromLocalToFiler(entry *filer_pb.Entry) {
+ if entry.Attributes == nil {
+ return
+ }
+ entry.Attributes.Uid, entry.Attributes.Gid = wfs.option.UidGidMapper.LocalToFiler(entry.Attributes.Uid, entry.Attributes.Gid)
+}
+
+func (wfs *WFS) LookupFn() wdclient.LookupFileIdFunctionType {
+ if wfs.option.VolumeServerAccess == "filerProxy" {
+ return func(fileId string) (targetUrls []string, err error) {
+ return []string{"http://" + wfs.option.FilerAddress + "/?proxyChunkId=" + fileId}, nil
+ }
+ }
+ return filer.LookupFn(wfs)
+
+}
+
+type NodeWithId uint64
+func (n NodeWithId) Id() uint64 {
+ return uint64(n)
+}
+func (n NodeWithId) Attr(ctx context.Context, attr *fuse.Attr) error {
+ return nil
+}
diff --git a/weed/filesys/wfs_deletion.go b/weed/filesys/wfs_deletion.go
deleted file mode 100644
index 6e586b7df..000000000
--- a/weed/filesys/wfs_deletion.go
+++ /dev/null
@@ -1,69 +0,0 @@
-package filesys
-
-import (
- "context"
-
- "github.com/chrislusf/seaweedfs/weed/filer2"
- "github.com/chrislusf/seaweedfs/weed/glog"
- "github.com/chrislusf/seaweedfs/weed/operation"
- "github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
- "google.golang.org/grpc"
-)
-
-func (wfs *WFS) deleteFileChunks(ctx context.Context, chunks []*filer_pb.FileChunk) {
- if len(chunks) == 0 {
- return
- }
-
- var fileIds []string
- for _, chunk := range chunks {
- fileIds = append(fileIds, chunk.GetFileIdString())
- }
-
- wfs.WithFilerClient(ctx, func(client filer_pb.SeaweedFilerClient) error {
- deleteFileIds(ctx, wfs.option.GrpcDialOption, client, fileIds)
- return nil
- })
-}
-
-func deleteFileIds(ctx context.Context, grpcDialOption grpc.DialOption, client filer_pb.SeaweedFilerClient, fileIds []string) error {
-
- var vids []string
- for _, fileId := range fileIds {
- vids = append(vids, filer2.VolumeId(fileId))
- }
-
- lookupFunc := func(vids []string) (map[string]operation.LookupResult, error) {
-
- m := make(map[string]operation.LookupResult)
-
- glog.V(4).Infof("remove file lookup volume id locations: %v", vids)
- resp, err := client.LookupVolume(ctx, &filer_pb.LookupVolumeRequest{
- VolumeIds: vids,
- })
- if err != nil {
- return m, err
- }
-
- for _, vid := range vids {
- lr := operation.LookupResult{
- VolumeId: vid,
- Locations: nil,
- }
- locations := resp.LocationsMap[vid]
- for _, loc := range locations.Locations {
- lr.Locations = append(lr.Locations, operation.Location{
- Url: loc.Url,
- PublicUrl: loc.PublicUrl,
- })
- }
- m[vid] = lr
- }
-
- return m, err
- }
-
- _, err := operation.DeleteFilesWithLookupVolumeId(grpcDialOption, fileIds, lookupFunc)
-
- return err
-}
diff --git a/weed/filesys/wfs_filer_client.go b/weed/filesys/wfs_filer_client.go
new file mode 100644
index 000000000..671d20ba2
--- /dev/null
+++ b/weed/filesys/wfs_filer_client.go
@@ -0,0 +1,34 @@
+package filesys
+
+import (
+ "github.com/chrislusf/seaweedfs/weed/util"
+ "google.golang.org/grpc"
+
+ "github.com/chrislusf/seaweedfs/weed/pb"
+ "github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
+)
+
+var _ = filer_pb.FilerClient(&WFS{})
+
+func (wfs *WFS) WithFilerClient(fn func(filer_pb.SeaweedFilerClient) error) error {
+
+ err := util.Retry("filer grpc "+wfs.option.FilerGrpcAddress, func() error {
+ return pb.WithCachedGrpcClient(func(grpcConnection *grpc.ClientConn) error {
+ client := filer_pb.NewSeaweedFilerClient(grpcConnection)
+ return fn(client)
+ }, wfs.option.FilerGrpcAddress, wfs.option.GrpcDialOption)
+ })
+
+ if err == nil {
+ return nil
+ }
+ return err
+
+}
+
+func (wfs *WFS) AdjustedUrl(location *filer_pb.Location) string {
+ if wfs.option.VolumeServerAccess == "publicUrl" {
+ return location.PublicUrl
+ }
+ return location.Url
+}
diff --git a/weed/filesys/wfs_write.go b/weed/filesys/wfs_write.go
new file mode 100644
index 000000000..dbec3bebc
--- /dev/null
+++ b/weed/filesys/wfs_write.go
@@ -0,0 +1,75 @@
+package filesys
+
+import (
+ "context"
+ "fmt"
+ "io"
+
+ "github.com/chrislusf/seaweedfs/weed/filer"
+ "github.com/chrislusf/seaweedfs/weed/glog"
+ "github.com/chrislusf/seaweedfs/weed/operation"
+ "github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
+ "github.com/chrislusf/seaweedfs/weed/security"
+ "github.com/chrislusf/seaweedfs/weed/util"
+)
+
+func (wfs *WFS) saveDataAsChunk(fullPath util.FullPath) filer.SaveDataAsChunkFunctionType {
+
+ return func(reader io.Reader, filename string, offset int64) (chunk *filer_pb.FileChunk, collection, replication string, err error) {
+ var fileId, host string
+ var auth security.EncodedJwt
+
+ if err := wfs.WithFilerClient(func(client filer_pb.SeaweedFilerClient) error {
+
+ request := &filer_pb.AssignVolumeRequest{
+ Count: 1,
+ Replication: wfs.option.Replication,
+ Collection: wfs.option.Collection,
+ TtlSec: wfs.option.TtlSec,
+ DiskType: string(wfs.option.DiskType),
+ DataCenter: wfs.option.DataCenter,
+ Path: string(fullPath),
+ }
+
+ resp, err := client.AssignVolume(context.Background(), request)
+ if err != nil {
+ glog.V(0).Infof("assign volume failure %v: %v", request, err)
+ return err
+ }
+ if resp.Error != "" {
+ return fmt.Errorf("assign volume failure %v: %v", request, resp.Error)
+ }
+
+ fileId, auth = resp.FileId, security.EncodedJwt(resp.Auth)
+ loc := &filer_pb.Location{
+ Url: resp.Url,
+ PublicUrl: resp.PublicUrl,
+ }
+ host = wfs.AdjustedUrl(loc)
+ collection, replication = resp.Collection, resp.Replication
+
+ return nil
+ }); err != nil {
+ return nil, "", "", fmt.Errorf("filerGrpcAddress assign volume: %v", err)
+ }
+
+ fileUrl := fmt.Sprintf("http://%s/%s", host, fileId)
+ if wfs.option.VolumeServerAccess == "filerProxy" {
+ fileUrl = fmt.Sprintf("http://%s/?proxyChunkId=%s", wfs.option.FilerAddress, fileId)
+ }
+ uploadResult, err, data := operation.Upload(fileUrl, filename, wfs.option.Cipher, reader, false, "", nil, auth)
+ if err != nil {
+ glog.V(0).Infof("upload data %v to %s: %v", filename, fileUrl, err)
+ return nil, "", "", fmt.Errorf("upload data: %v", err)
+ }
+ if uploadResult.Error != "" {
+ glog.V(0).Infof("upload failure %v to %s: %v", filename, fileUrl, err)
+ return nil, "", "", fmt.Errorf("upload result: %v", uploadResult.Error)
+ }
+
+ wfs.chunkCache.SetChunk(fileId, data)
+
+ chunk = uploadResult.ToPbFileChunk(fileId, offset)
+ return chunk, collection, replication, nil
+ }
+}
diff --git a/weed/filesys/xattr.go b/weed/filesys/xattr.go
index 3c0ba164a..92e43b675 100644
--- a/weed/filesys/xattr.go
+++ b/weed/filesys/xattr.go
@@ -2,11 +2,12 @@ package filesys
import (
"context"
- "path/filepath"
- "github.com/chrislusf/seaweedfs/weed/glog"
- "github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
"github.com/seaweedfs/fuse"
+
+ "github.com/chrislusf/seaweedfs/weed/filesys/meta_cache"
+ "github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
+ "github.com/chrislusf/seaweedfs/weed/util"
)
func getxattr(entry *filer_pb.Entry, req *fuse.GetxattrRequest, resp *fuse.GetxattrResponse) error {
@@ -107,36 +108,16 @@ func listxattr(entry *filer_pb.Entry, req *fuse.ListxattrRequest, resp *fuse.Lis
}
-func (wfs *WFS) maybeLoadEntry(ctx context.Context, dir, name string) (entry *filer_pb.Entry, err error) {
+func (wfs *WFS) maybeLoadEntry(dir, name string) (entry *filer_pb.Entry, err error) {
- fullpath := filepath.Join(dir, name)
- item := wfs.listDirectoryEntriesCache.Get(fullpath)
- if item != nil && !item.Expired() {
- entry = item.Value().(*filer_pb.Entry)
- return
- }
- glog.V(3).Infof("read entry cache miss %s", fullpath)
-
- err = wfs.WithFilerClient(ctx, func(client filer_pb.SeaweedFilerClient) error {
-
- request := &filer_pb.LookupDirectoryEntryRequest{
- Name: name,
- Directory: dir,
- }
+ fullpath := util.NewFullPath(dir, name)
+ // glog.V(3).Infof("read entry cache miss %s", fullpath)
- resp, err := client.LookupDirectoryEntry(ctx, request)
- if err != nil {
- glog.V(3).Infof("file attr read file %v: %v", request, err)
- return fuse.ENOENT
- }
-
- entry = resp.Entry
- if entry != nil {
- wfs.listDirectoryEntriesCache.Set(fullpath, entry, wfs.option.EntryCacheTtl)
- }
-
- return nil
- })
-
- return
+ // read from async meta cache
+ meta_cache.EnsureVisited(wfs.metaCache, wfs, util.FullPath(dir))
+ cachedEntry, cacheErr := wfs.metaCache.FindEntry(context.Background(), fullpath)
+ if cacheErr == filer_pb.ErrNotFound {
+ return nil, fuse.ENOENT
+ }
+ return cachedEntry.ToProtoEntry(), cacheErr
}
diff --git a/weed/ftpd/ftp_server.go b/weed/ftpd/ftp_server.go
new file mode 100644
index 000000000..4a0dca2c3
--- /dev/null
+++ b/weed/ftpd/ftp_server.go
@@ -0,0 +1,81 @@
+package ftpd
+
+import (
+ "crypto/tls"
+ "errors"
+ "fmt"
+ "net"
+
+ ftpserver "github.com/fclairamb/ftpserverlib"
+ "google.golang.org/grpc"
+)
+
+type FtpServerOption struct {
+ Filer string
+ IP string
+ IpBind string
+ Port int
+ FilerGrpcAddress string
+ FtpRoot string
+ GrpcDialOption grpc.DialOption
+ PassivePortStart int
+ PassivePortStop int
+}
+
+type SftpServer struct {
+ option *FtpServerOption
+ ftpListener net.Listener
+}
+
+var _ = ftpserver.MainDriver(&SftpServer{})
+
+// NewServer returns a new FTP server driver
+func NewFtpServer(ftpListener net.Listener, option *FtpServerOption) (*SftpServer, error) {
+ var err error
+ server := &SftpServer{
+ option: option,
+ ftpListener: ftpListener,
+ }
+ return server, err
+}
+
+// GetSettings returns some general settings around the server setup
+func (s *SftpServer) GetSettings() (*ftpserver.Settings, error) {
+ var portRange *ftpserver.PortRange
+ if s.option.PassivePortStart > 0 && s.option.PassivePortStop > s.option.PassivePortStart {
+ portRange = &ftpserver.PortRange{
+ Start: s.option.PassivePortStart,
+ End: s.option.PassivePortStop,
+ }
+ }
+
+ return &ftpserver.Settings{
+ Listener: s.ftpListener,
+ ListenAddr: fmt.Sprintf("%s:%d", s.option.IpBind, s.option.Port),
+ PublicHost: s.option.IP,
+ PassiveTransferPortRange: portRange,
+ ActiveTransferPortNon20: true,
+ IdleTimeout: -1,
+ ConnectionTimeout: 20,
+ }, nil
+}
+
+// ClientConnected is called to send the very first welcome message
+func (s *SftpServer) ClientConnected(cc ftpserver.ClientContext) (string, error) {
+ return "Welcome to SeaweedFS FTP Server", nil
+}
+
+// ClientDisconnected is called when the user disconnects, even if he never authenticated
+func (s *SftpServer) ClientDisconnected(cc ftpserver.ClientContext) {
+}
+
+// AuthUser authenticates the user and selects an handling driver
+func (s *SftpServer) AuthUser(cc ftpserver.ClientContext, username, password string) (ftpserver.ClientDriver, error) {
+ return nil, nil
+}
+
+// GetTLSConfig returns a TLS Certificate to use
+// The certificate could frequently change if we use something like "let's encrypt"
+func (s *SftpServer) GetTLSConfig() (*tls.Config, error) {
+ return nil, errors.New("no TLS certificate configured")
+}
diff --git a/weed/glog/glog.go b/weed/glog/glog.go
index f46632f1c..352a7e185 100644
--- a/weed/glog/glog.go
+++ b/weed/glog/glog.go
@@ -74,8 +74,8 @@ import (
"bufio"
"bytes"
"errors"
- "flag"
"fmt"
+ flag "github.com/chrislusf/seaweedfs/weed/util/fla9"
"io"
stdLog "log"
"os"
@@ -398,7 +398,7 @@ type flushSyncWriter interface {
func init() {
flag.BoolVar(&logging.toStderr, "logtostderr", false, "log to standard error instead of files")
flag.BoolVar(&logging.alsoToStderr, "alsologtostderr", true, "log to standard error as well as files")
- flag.Var(&logging.verbosity, "v", "log level for V logs")
+ flag.Var(&logging.verbosity, "v", "log levels [0|1|2|3|4], default to 0")
flag.Var(&logging.stderrThreshold, "stderrthreshold", "logs at or above this threshold go to stderr")
flag.Var(&logging.vmodule, "vmodule", "comma-separated list of pattern=N settings for file-filtered logging")
flag.Var(&logging.traceLocation, "log_backtrace_at", "when logging hits line file:N, emit a stack trace")
diff --git a/weed/glog/glog_file.go b/weed/glog/glog_file.go
index bb8e6902f..3f700d8fc 100644
--- a/weed/glog/glog_file.go
+++ b/weed/glog/glog_file.go
@@ -20,8 +20,8 @@ package glog
import (
"errors"
- "flag"
"fmt"
+ flag "github.com/chrislusf/seaweedfs/weed/util/fla9"
"os"
"os/user"
"path/filepath"
diff --git a/weed/iamapi/iamapi_handlers.go b/weed/iamapi/iamapi_handlers.go
new file mode 100644
index 000000000..2e5f709f3
--- /dev/null
+++ b/weed/iamapi/iamapi_handlers.go
@@ -0,0 +1,105 @@
+package iamapi
+
+import (
+ "bytes"
+ "encoding/xml"
+ "fmt"
+ "strconv"
+
+ "net/http"
+ "net/url"
+ "time"
+
+ "github.com/chrislusf/seaweedfs/weed/glog"
+ "github.com/chrislusf/seaweedfs/weed/s3api/s3err"
+
+ "github.com/aws/aws-sdk-go/service/iam"
+)
+
+type mimeType string
+
+const (
+ mimeNone mimeType = ""
+ mimeXML mimeType = "application/xml"
+)
+
+func setCommonHeaders(w http.ResponseWriter) {
+ w.Header().Set("x-amz-request-id", fmt.Sprintf("%d", time.Now().UnixNano()))
+ w.Header().Set("Accept-Ranges", "bytes")
+}
+
+// Encodes the response headers into XML format.
+func encodeResponse(response interface{}) []byte {
+ var bytesBuffer bytes.Buffer
+ bytesBuffer.WriteString(xml.Header)
+ e := xml.NewEncoder(&bytesBuffer)
+ e.Encode(response)
+ return bytesBuffer.Bytes()
+}
+
+// If none of the http routes match respond with MethodNotAllowed
+func notFoundHandler(w http.ResponseWriter, r *http.Request) {
+ glog.V(0).Infof("unsupported %s %s", r.Method, r.RequestURI)
+ writeErrorResponse(w, s3err.ErrMethodNotAllowed, r.URL)
+}
+
+func writeErrorResponse(w http.ResponseWriter, errorCode s3err.ErrorCode, reqURL *url.URL) {
+ apiError := s3err.GetAPIError(errorCode)
+ errorResponse := getRESTErrorResponse(apiError, reqURL.Path)
+ encodedErrorResponse := encodeResponse(errorResponse)
+ writeResponse(w, apiError.HTTPStatusCode, encodedErrorResponse, mimeXML)
+}
+
+func writeIamErrorResponse(w http.ResponseWriter, err error, object string, value string, msg error) {
+ errCode := err.Error()
+ errorResp := ErrorResponse{}
+ errorResp.Error.Type = "Sender"
+ errorResp.Error.Code = &errCode
+ if msg != nil {
+ errMsg := msg.Error()
+ errorResp.Error.Message = &errMsg
+ }
+ glog.Errorf("Response %+v", err)
+ switch errCode {
+ case iam.ErrCodeNoSuchEntityException:
+ msg := fmt.Sprintf("The %s with name %s cannot be found.", object, value)
+ errorResp.Error.Message = &msg
+ writeResponse(w, http.StatusNotFound, encodeResponse(errorResp), mimeXML)
+ case iam.ErrCodeServiceFailureException:
+ writeResponse(w, http.StatusInternalServerError, encodeResponse(errorResp), mimeXML)
+ default:
+ writeResponse(w, http.StatusInternalServerError, encodeResponse(errorResp), mimeXML)
+ }
+}
+
+func getRESTErrorResponse(err s3err.APIError, resource string) s3err.RESTErrorResponse {
+ return s3err.RESTErrorResponse{
+ Code: err.Code,
+ Message: err.Description,
+ Resource: resource,
+ RequestID: fmt.Sprintf("%d", time.Now().UnixNano()),
+ }
+}
+
+func writeResponse(w http.ResponseWriter, statusCode int, response []byte, mType mimeType) {
+ setCommonHeaders(w)
+ if response != nil {
+ w.Header().Set("Content-Length", strconv.Itoa(len(response)))
+ }
+ if mType != mimeNone {
+ w.Header().Set("Content-Type", string(mType))
+ }
+ w.WriteHeader(statusCode)
+ if response != nil {
+ glog.V(4).Infof("status %d %s: %s", statusCode, mType, string(response))
+ _, err := w.Write(response)
+ if err != nil {
+ glog.V(0).Infof("write err: %v", err)
+ }
+ w.(http.Flusher).Flush()
+ }
+}
+
+func writeSuccessResponseXML(w http.ResponseWriter, response []byte) {
+ writeResponse(w, http.StatusOK, response, mimeXML)
+}
diff --git a/weed/iamapi/iamapi_management_handlers.go b/weed/iamapi/iamapi_management_handlers.go
new file mode 100644
index 000000000..b00ada234
--- /dev/null
+++ b/weed/iamapi/iamapi_management_handlers.go
@@ -0,0 +1,449 @@
+package iamapi
+
+import (
+ "crypto/sha1"
+ "encoding/json"
+ "fmt"
+ "github.com/chrislusf/seaweedfs/weed/glog"
+ "github.com/chrislusf/seaweedfs/weed/pb/iam_pb"
+ "github.com/chrislusf/seaweedfs/weed/s3api/s3_constants"
+ "github.com/chrislusf/seaweedfs/weed/s3api/s3err"
+ "math/rand"
+ "net/http"
+ "net/url"
+ "reflect"
+ "strings"
+ "sync"
+ "time"
+
+ "github.com/aws/aws-sdk-go/service/iam"
+)
+
+const (
+ charsetUpper = "ABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789"
+ charset = charsetUpper + "abcdefghijklmnopqrstuvwxyz/"
+ policyDocumentVersion = "2012-10-17"
+ StatementActionAdmin = "*"
+ StatementActionWrite = "Put*"
+ StatementActionRead = "Get*"
+ StatementActionList = "List*"
+ StatementActionTagging = "Tagging*"
+)
+
+var (
+ seededRand *rand.Rand = rand.New(
+ rand.NewSource(time.Now().UnixNano()))
+ policyDocuments = map[string]*PolicyDocument{}
+ policyLock = sync.RWMutex{}
+)
+
+func MapToStatementAction(action string) string {
+ switch action {
+ case StatementActionAdmin:
+ return s3_constants.ACTION_ADMIN
+ case StatementActionWrite:
+ return s3_constants.ACTION_WRITE
+ case StatementActionRead:
+ return s3_constants.ACTION_READ
+ case StatementActionList:
+ return s3_constants.ACTION_LIST
+ case StatementActionTagging:
+ return s3_constants.ACTION_TAGGING
+ default:
+ return ""
+ }
+}
+
+func MapToIdentitiesAction(action string) string {
+ switch action {
+ case s3_constants.ACTION_ADMIN:
+ return StatementActionAdmin
+ case s3_constants.ACTION_WRITE:
+ return StatementActionWrite
+ case s3_constants.ACTION_READ:
+ return StatementActionRead
+ case s3_constants.ACTION_LIST:
+ return StatementActionList
+ case s3_constants.ACTION_TAGGING:
+ return StatementActionTagging
+ default:
+ return ""
+ }
+}
+
+type Statement struct {
+ Effect string `json:"Effect"`
+ Action []string `json:"Action"`
+ Resource []string `json:"Resource"`
+}
+
+type Policies struct {
+ Policies map[string]PolicyDocument `json:"policies"`
+}
+
+type PolicyDocument struct {
+ Version string `json:"Version"`
+ Statement []*Statement `json:"Statement"`
+}
+
+func (p PolicyDocument) String() string {
+ b, _ := json.Marshal(p)
+ return string(b)
+}
+
+func Hash(s *string) string {
+ h := sha1.New()
+ h.Write([]byte(*s))
+ return fmt.Sprintf("%x", h.Sum(nil))
+}
+
+func StringWithCharset(length int, charset string) string {
+ b := make([]byte, length)
+ for i := range b {
+ b[i] = charset[seededRand.Intn(len(charset))]
+ }
+ return string(b)
+}
+
+func (iama *IamApiServer) ListUsers(s3cfg *iam_pb.S3ApiConfiguration, values url.Values) (resp ListUsersResponse) {
+ for _, ident := range s3cfg.Identities {
+ resp.ListUsersResult.Users = append(resp.ListUsersResult.Users, &iam.User{UserName: &ident.Name})
+ }
+ return resp
+}
+
+func (iama *IamApiServer) ListAccessKeys(s3cfg *iam_pb.S3ApiConfiguration, values url.Values) (resp ListAccessKeysResponse) {
+ status := iam.StatusTypeActive
+ for _, ident := range s3cfg.Identities {
+ for _, cred := range ident.Credentials {
+ resp.ListAccessKeysResult.AccessKeyMetadata = append(resp.ListAccessKeysResult.AccessKeyMetadata,
+ &iam.AccessKeyMetadata{UserName: &ident.Name, AccessKeyId: &cred.AccessKey, Status: &status},
+ )
+ }
+ }
+ return resp
+}
+
+func (iama *IamApiServer) CreateUser(s3cfg *iam_pb.S3ApiConfiguration, values url.Values) (resp CreateUserResponse) {
+ userName := values.Get("UserName")
+ resp.CreateUserResult.User.UserName = &userName
+ s3cfg.Identities = append(s3cfg.Identities, &iam_pb.Identity{Name: userName})
+ return resp
+}
+
+func (iama *IamApiServer) DeleteUser(s3cfg *iam_pb.S3ApiConfiguration, userName string) (resp DeleteUserResponse, err error) {
+ for i, ident := range s3cfg.Identities {
+ if userName == ident.Name {
+ s3cfg.Identities = append(s3cfg.Identities[:i], s3cfg.Identities[i+1:]...)
+ return resp, nil
+ }
+ }
+ return resp, fmt.Errorf(iam.ErrCodeNoSuchEntityException)
+}
+
+func (iama *IamApiServer) GetUser(s3cfg *iam_pb.S3ApiConfiguration, userName string) (resp GetUserResponse, err error) {
+ for _, ident := range s3cfg.Identities {
+ if userName == ident.Name {
+ resp.GetUserResult.User = iam.User{UserName: &ident.Name}
+ return resp, nil
+ }
+ }
+ return resp, fmt.Errorf(iam.ErrCodeNoSuchEntityException)
+}
+
+func GetPolicyDocument(policy *string) (policyDocument PolicyDocument, err error) {
+ if err = json.Unmarshal([]byte(*policy), &policyDocument); err != nil {
+ return PolicyDocument{}, err
+ }
+ return policyDocument, err
+}
+
+func (iama *IamApiServer) CreatePolicy(s3cfg *iam_pb.S3ApiConfiguration, values url.Values) (resp CreatePolicyResponse, err error) {
+ policyName := values.Get("PolicyName")
+ policyDocumentString := values.Get("PolicyDocument")
+ policyDocument, err := GetPolicyDocument(&policyDocumentString)
+ if err != nil {
+ return CreatePolicyResponse{}, err
+ }
+ policyId := Hash(&policyDocumentString)
+ arn := fmt.Sprintf("arn:aws:iam:::policy/%s", policyName)
+ resp.CreatePolicyResult.Policy.PolicyName = &policyName
+ resp.CreatePolicyResult.Policy.Arn = &arn
+ resp.CreatePolicyResult.Policy.PolicyId = &policyId
+ policies := Policies{}
+ policyLock.Lock()
+ defer policyLock.Unlock()
+ if err = iama.s3ApiConfig.GetPolicies(&policies); err != nil {
+ return resp, err
+ }
+ policies.Policies[policyName] = policyDocument
+ if err = iama.s3ApiConfig.PutPolicies(&policies); err != nil {
+ return resp, err
+ }
+ return resp, nil
+}
+
+func (iama *IamApiServer) PutUserPolicy(s3cfg *iam_pb.S3ApiConfiguration, values url.Values) (resp PutUserPolicyResponse, err error) {
+ userName := values.Get("UserName")
+ policyName := values.Get("PolicyName")
+ policyDocumentString := values.Get("PolicyDocument")
+ policyDocument, err := GetPolicyDocument(&policyDocumentString)
+ if err != nil {
+ return PutUserPolicyResponse{}, err
+ }
+ policyDocuments[policyName] = &policyDocument
+ actions := GetActions(&policyDocument)
+ for _, ident := range s3cfg.Identities {
+ if userName == ident.Name {
+ for _, action := range actions {
+ ident.Actions = append(ident.Actions, action)
+ }
+ break
+ }
+ }
+ return resp, nil
+}
+
+func (iama *IamApiServer) GetUserPolicy(s3cfg *iam_pb.S3ApiConfiguration, values url.Values) (resp GetUserPolicyResponse, err error) {
+ userName := values.Get("UserName")
+ policyName := values.Get("PolicyName")
+ for _, ident := range s3cfg.Identities {
+ if userName != ident.Name {
+ continue
+ }
+
+ resp.GetUserPolicyResult.UserName = userName
+ resp.GetUserPolicyResult.PolicyName = policyName
+ if len(ident.Actions) == 0 {
+ return resp, fmt.Errorf(iam.ErrCodeNoSuchEntityException)
+ }
+
+ policyDocument := PolicyDocument{Version: policyDocumentVersion}
+ statements := make(map[string][]string)
+ for _, action := range ident.Actions {
+ // parse "Read:EXAMPLE-BUCKET"
+ act := strings.Split(action, ":")
+
+ resource := "*"
+ if len(act) == 2 {
+ resource = fmt.Sprintf("arn:aws:s3:::%s/*", act[1])
+ }
+ statements[resource] = append(statements[resource],
+ fmt.Sprintf("s3:%s", MapToIdentitiesAction(act[0])),
+ )
+ }
+ for resource, actions := range statements {
+ isEqAction := false
+ for i, statement := range policyDocument.Statement {
+ if reflect.DeepEqual(statement.Action, actions) {
+ policyDocument.Statement[i].Resource = append(
+ policyDocument.Statement[i].Resource, resource)
+ isEqAction = true
+ break
+ }
+ }
+ if isEqAction {
+ continue
+ }
+ policyDocumentStatement := Statement{
+ Effect: "Allow",
+ Action: actions,
+ }
+ policyDocumentStatement.Resource = append(policyDocumentStatement.Resource, resource)
+ policyDocument.Statement = append(policyDocument.Statement, &policyDocumentStatement)
+ }
+ resp.GetUserPolicyResult.PolicyDocument = policyDocument.String()
+ return resp, nil
+ }
+ return resp, fmt.Errorf(iam.ErrCodeNoSuchEntityException)
+}
+
+func (iama *IamApiServer) DeleteUserPolicy(s3cfg *iam_pb.S3ApiConfiguration, values url.Values) (resp PutUserPolicyResponse, err error) {
+ userName := values.Get("UserName")
+ for i, ident := range s3cfg.Identities {
+ if ident.Name == userName {
+ s3cfg.Identities = append(s3cfg.Identities[:i], s3cfg.Identities[i+1:]...)
+ return resp, nil
+ }
+ }
+ return resp, fmt.Errorf(iam.ErrCodeNoSuchEntityException)
+}
+
+func GetActions(policy *PolicyDocument) (actions []string) {
+ for _, statement := range policy.Statement {
+ if statement.Effect != "Allow" {
+ continue
+ }
+ for _, resource := range statement.Resource {
+ // Parse "arn:aws:s3:::my-bucket/shared/*"
+ res := strings.Split(resource, ":")
+ if len(res) != 6 || res[0] != "arn" || res[1] != "aws" || res[2] != "s3" {
+ glog.Infof("not match resource: %s", res)
+ continue
+ }
+ for _, action := range statement.Action {
+ // Parse "s3:Get*"
+ act := strings.Split(action, ":")
+ if len(act) != 2 || act[0] != "s3" {
+ glog.Infof("not match action: %s", act)
+ continue
+ }
+ statementAction := MapToStatementAction(act[1])
+ if res[5] == "*" {
+ actions = append(actions, statementAction)
+ continue
+ }
+ // Parse my-bucket/shared/*
+ path := strings.Split(res[5], "/")
+ if len(path) != 2 || path[1] != "*" {
+ glog.Infof("not match bucket: %s", path)
+ continue
+ }
+ actions = append(actions, fmt.Sprintf("%s:%s", statementAction, path[0]))
+ }
+ }
+ }
+ return actions
+}
+
+func (iama *IamApiServer) CreateAccessKey(s3cfg *iam_pb.S3ApiConfiguration, values url.Values) (resp CreateAccessKeyResponse) {
+ userName := values.Get("UserName")
+ status := iam.StatusTypeActive
+ accessKeyId := StringWithCharset(21, charsetUpper)
+ secretAccessKey := StringWithCharset(42, charset)
+ resp.CreateAccessKeyResult.AccessKey.AccessKeyId = &accessKeyId
+ resp.CreateAccessKeyResult.AccessKey.SecretAccessKey = &secretAccessKey
+ resp.CreateAccessKeyResult.AccessKey.UserName = &userName
+ resp.CreateAccessKeyResult.AccessKey.Status = &status
+ changed := false
+ for _, ident := range s3cfg.Identities {
+ if userName == ident.Name {
+ ident.Credentials = append(ident.Credentials,
+ &iam_pb.Credential{AccessKey: accessKeyId, SecretKey: secretAccessKey})
+ changed = true
+ break
+ }
+ }
+ if !changed {
+ s3cfg.Identities = append(s3cfg.Identities,
+ &iam_pb.Identity{Name: userName,
+ Credentials: []*iam_pb.Credential{
+ {
+ AccessKey: accessKeyId,
+ SecretKey: secretAccessKey,
+ },
+ },
+ },
+ )
+ }
+ return resp
+}
+
+func (iama *IamApiServer) DeleteAccessKey(s3cfg *iam_pb.S3ApiConfiguration, values url.Values) (resp DeleteAccessKeyResponse) {
+ userName := values.Get("UserName")
+ accessKeyId := values.Get("AccessKeyId")
+ for _, ident := range s3cfg.Identities {
+ if userName == ident.Name {
+ for i, cred := range ident.Credentials {
+ if cred.AccessKey == accessKeyId {
+ ident.Credentials = append(ident.Credentials[:i], ident.Credentials[i+1:]...)
+ break
+ }
+ }
+ break
+ }
+ }
+ return resp
+}
+
+func (iama *IamApiServer) DoActions(w http.ResponseWriter, r *http.Request) {
+ if err := r.ParseForm(); err != nil {
+ writeErrorResponse(w, s3err.ErrInvalidRequest, r.URL)
+ return
+ }
+ values := r.PostForm
+ var s3cfgLock sync.RWMutex
+ s3cfgLock.RLock()
+ s3cfg := &iam_pb.S3ApiConfiguration{}
+ if err := iama.s3ApiConfig.GetS3ApiConfiguration(s3cfg); err != nil {
+ writeErrorResponse(w, s3err.ErrInternalError, r.URL)
+ return
+ }
+ s3cfgLock.RUnlock()
+
+ glog.V(4).Infof("DoActions: %+v", values)
+ var response interface{}
+ var err error
+ changed := true
+ switch r.Form.Get("Action") {
+ case "ListUsers":
+ response = iama.ListUsers(s3cfg, values)
+ changed = false
+ case "ListAccessKeys":
+ response = iama.ListAccessKeys(s3cfg, values)
+ changed = false
+ case "CreateUser":
+ response = iama.CreateUser(s3cfg, values)
+ case "GetUser":
+ userName := values.Get("UserName")
+ response, err = iama.GetUser(s3cfg, userName)
+ if err != nil {
+ writeIamErrorResponse(w, err, "user", userName, nil)
+ return
+ }
+ changed = false
+ case "DeleteUser":
+ userName := values.Get("UserName")
+ response, err = iama.DeleteUser(s3cfg, userName)
+ if err != nil {
+ writeIamErrorResponse(w, err, "user", userName, nil)
+ return
+ }
+ case "CreateAccessKey":
+ response = iama.CreateAccessKey(s3cfg, values)
+ case "DeleteAccessKey":
+ response = iama.DeleteAccessKey(s3cfg, values)
+ case "CreatePolicy":
+ response, err = iama.CreatePolicy(s3cfg, values)
+ if err != nil {
+ glog.Errorf("CreatePolicy: %+v", err)
+ writeErrorResponse(w, s3err.ErrInvalidRequest, r.URL)
+ return
+ }
+ case "PutUserPolicy":
+ response, err = iama.PutUserPolicy(s3cfg, values)
+ if err != nil {
+ glog.Errorf("PutUserPolicy: %+v", err)
+ writeErrorResponse(w, s3err.ErrInvalidRequest, r.URL)
+ return
+ }
+ case "GetUserPolicy":
+ response, err = iama.GetUserPolicy(s3cfg, values)
+ if err != nil {
+ writeIamErrorResponse(w, err, "user", values.Get("UserName"), nil)
+ return
+ }
+ changed = false
+ case "DeleteUserPolicy":
+ if response, err = iama.DeleteUserPolicy(s3cfg, values); err != nil {
+ writeIamErrorResponse(w, err, "user", values.Get("UserName"), nil)
+ }
+ default:
+ errNotImplemented := s3err.GetAPIError(s3err.ErrNotImplemented)
+ errorResponse := ErrorResponse{}
+ errorResponse.Error.Code = &errNotImplemented.Code
+ errorResponse.Error.Message = &errNotImplemented.Description
+ writeResponse(w, errNotImplemented.HTTPStatusCode, encodeResponse(errorResponse), mimeXML)
+ return
+ }
+ if changed {
+ s3cfgLock.Lock()
+ err := iama.s3ApiConfig.PutS3ApiConfiguration(s3cfg)
+ s3cfgLock.Unlock()
+ if err != nil {
+ writeIamErrorResponse(w, fmt.Errorf(iam.ErrCodeServiceFailureException), "", "", err)
+ return
+ }
+ }
+ writeSuccessResponseXML(w, encodeResponse(response))
+}
diff --git a/weed/iamapi/iamapi_response.go b/weed/iamapi/iamapi_response.go
new file mode 100644
index 000000000..77328b608
--- /dev/null
+++ b/weed/iamapi/iamapi_response.go
@@ -0,0 +1,103 @@
+package iamapi
+
+import (
+ "encoding/xml"
+ "fmt"
+ "time"
+
+ "github.com/aws/aws-sdk-go/service/iam"
+)
+
+type CommonResponse struct {
+ ResponseMetadata struct {
+ RequestId string `xml:"RequestId"`
+ } `xml:"ResponseMetadata"`
+}
+
+type ListUsersResponse struct {
+ CommonResponse
+ XMLName xml.Name `xml:"https://iam.amazonaws.com/doc/2010-05-08/ ListUsersResponse"`
+ ListUsersResult struct {
+ Users []*iam.User `xml:"Users>member"`
+ IsTruncated bool `xml:"IsTruncated"`
+ } `xml:"ListUsersResult"`
+}
+
+type ListAccessKeysResponse struct {
+ CommonResponse
+ XMLName xml.Name `xml:"https://iam.amazonaws.com/doc/2010-05-08/ ListAccessKeysResponse"`
+ ListAccessKeysResult struct {
+ AccessKeyMetadata []*iam.AccessKeyMetadata `xml:"AccessKeyMetadata>member"`
+ IsTruncated bool `xml:"IsTruncated"`
+ } `xml:"ListAccessKeysResult"`
+}
+
+type DeleteAccessKeyResponse struct {
+ CommonResponse
+ XMLName xml.Name `xml:"https://iam.amazonaws.com/doc/2010-05-08/ DeleteAccessKeyResponse"`
+}
+
+type CreatePolicyResponse struct {
+ CommonResponse
+ XMLName xml.Name `xml:"https://iam.amazonaws.com/doc/2010-05-08/ CreatePolicyResponse"`
+ CreatePolicyResult struct {
+ Policy iam.Policy `xml:"Policy"`
+ } `xml:"CreatePolicyResult"`
+}
+
+type CreateUserResponse struct {
+ CommonResponse
+ XMLName xml.Name `xml:"https://iam.amazonaws.com/doc/2010-05-08/ CreateUserResponse"`
+ CreateUserResult struct {
+ User iam.User `xml:"User"`
+ } `xml:"CreateUserResult"`
+}
+
+type DeleteUserResponse struct {
+ CommonResponse
+ XMLName xml.Name `xml:"https://iam.amazonaws.com/doc/2010-05-08/ DeleteUserResponse"`
+}
+
+type GetUserResponse struct {
+ CommonResponse
+ XMLName xml.Name `xml:"https://iam.amazonaws.com/doc/2010-05-08/ GetUserResponse"`
+ GetUserResult struct {
+ User iam.User `xml:"User"`
+ } `xml:"GetUserResult"`
+}
+
+type CreateAccessKeyResponse struct {
+ CommonResponse
+ XMLName xml.Name `xml:"https://iam.amazonaws.com/doc/2010-05-08/ CreateAccessKeyResponse"`
+ CreateAccessKeyResult struct {
+ AccessKey iam.AccessKey `xml:"AccessKey"`
+ } `xml:"CreateAccessKeyResult"`
+}
+
+type PutUserPolicyResponse struct {
+ CommonResponse
+ XMLName xml.Name `xml:"https://iam.amazonaws.com/doc/2010-05-08/ PutUserPolicyResponse"`
+}
+
+type GetUserPolicyResponse struct {
+ CommonResponse
+ XMLName xml.Name `xml:"https://iam.amazonaws.com/doc/2010-05-08/ GetUserPolicyResponse"`
+ GetUserPolicyResult struct {
+ UserName string `xml:"UserName"`
+ PolicyName string `xml:"PolicyName"`
+ PolicyDocument string `xml:"PolicyDocument"`
+ } `xml:"GetUserPolicyResult"`
+}
+
+type ErrorResponse struct {
+ CommonResponse
+ XMLName xml.Name `xml:"https://iam.amazonaws.com/doc/2010-05-08/ ErrorResponse"`
+ Error struct {
+ iam.ErrorDetails
+ Type string `xml:"Type"`
+ } `xml:"Error"`
+}
+
+func (r *CommonResponse) SetRequestId() {
+ r.ResponseMetadata.RequestId = fmt.Sprintf("%d", time.Now().UnixNano())
+}
diff --git a/weed/iamapi/iamapi_server.go b/weed/iamapi/iamapi_server.go
new file mode 100644
index 000000000..18af1a919
--- /dev/null
+++ b/weed/iamapi/iamapi_server.go
@@ -0,0 +1,149 @@
+package iamapi
+
+// https://docs.aws.amazon.com/cli/latest/reference/iam/list-roles.html
+
+import (
+ "bytes"
+ "encoding/json"
+ "fmt"
+ "github.com/chrislusf/seaweedfs/weed/filer"
+ "github.com/chrislusf/seaweedfs/weed/pb"
+ "github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
+ "github.com/chrislusf/seaweedfs/weed/pb/iam_pb"
+ "github.com/chrislusf/seaweedfs/weed/s3api"
+ . "github.com/chrislusf/seaweedfs/weed/s3api/s3_constants"
+ "github.com/chrislusf/seaweedfs/weed/wdclient"
+ "github.com/gorilla/mux"
+ "google.golang.org/grpc"
+ "net/http"
+ "strings"
+)
+
+type IamS3ApiConfig interface {
+ GetS3ApiConfiguration(s3cfg *iam_pb.S3ApiConfiguration) (err error)
+ PutS3ApiConfiguration(s3cfg *iam_pb.S3ApiConfiguration) (err error)
+ GetPolicies(policies *Policies) (err error)
+ PutPolicies(policies *Policies) (err error)
+}
+
+type IamS3ApiConfigure struct {
+ option *IamServerOption
+ masterClient *wdclient.MasterClient
+}
+
+type IamServerOption struct {
+ Masters string
+ Filer string
+ Port int
+ FilerGrpcAddress string
+ GrpcDialOption grpc.DialOption
+}
+
+type IamApiServer struct {
+ s3ApiConfig IamS3ApiConfig
+ iam *s3api.IdentityAccessManagement
+}
+
+var s3ApiConfigure IamS3ApiConfig
+
+func NewIamApiServer(router *mux.Router, option *IamServerOption) (iamApiServer *IamApiServer, err error) {
+ s3ApiConfigure = IamS3ApiConfigure{
+ option: option,
+ masterClient: wdclient.NewMasterClient(option.GrpcDialOption, pb.AdminShellClient, "", 0, "", strings.Split(option.Masters, ",")),
+ }
+ s3Option := s3api.S3ApiServerOption{Filer: option.Filer}
+ iamApiServer = &IamApiServer{
+ s3ApiConfig: s3ApiConfigure,
+ iam: s3api.NewIdentityAccessManagement(&s3Option),
+ }
+
+ iamApiServer.registerRouter(router)
+
+ return iamApiServer, nil
+}
+
+func (iama *IamApiServer) registerRouter(router *mux.Router) {
+ // API Router
+ apiRouter := router.PathPrefix("/").Subrouter()
+ // ListBuckets
+
+ // apiRouter.Methods("GET").Path("/").HandlerFunc(track(s3a.iam.Auth(s3a.ListBucketsHandler, ACTION_ADMIN), "LIST"))
+ apiRouter.Methods("POST").Path("/").HandlerFunc(iama.iam.Auth(iama.DoActions, ACTION_ADMIN))
+ //
+ // NotFound
+ apiRouter.NotFoundHandler = http.HandlerFunc(notFoundHandler)
+}
+
+func (iam IamS3ApiConfigure) GetS3ApiConfiguration(s3cfg *iam_pb.S3ApiConfiguration) (err error) {
+ var buf bytes.Buffer
+ err = pb.WithGrpcFilerClient(iam.option.FilerGrpcAddress, iam.option.GrpcDialOption, func(client filer_pb.SeaweedFilerClient) error {
+ if err = filer.ReadEntry(iam.masterClient, client, filer.IamConfigDirecotry, filer.IamIdentityFile, &buf); err != nil {
+ return err
+ }
+ return nil
+ })
+ if err != nil {
+ return err
+ }
+ if buf.Len() > 0 {
+ if err = filer.ParseS3ConfigurationFromBytes(buf.Bytes(), s3cfg); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+func (iam IamS3ApiConfigure) PutS3ApiConfiguration(s3cfg *iam_pb.S3ApiConfiguration) (err error) {
+ buf := bytes.Buffer{}
+ if err := filer.S3ConfigurationToText(&buf, s3cfg); err != nil {
+ return fmt.Errorf("S3ConfigurationToText: %s", err)
+ }
+ return pb.WithGrpcFilerClient(
+ iam.option.FilerGrpcAddress,
+ iam.option.GrpcDialOption,
+ func(client filer_pb.SeaweedFilerClient) error {
+ if err := filer.SaveInsideFiler(client, filer.IamConfigDirecotry, filer.IamIdentityFile, buf.Bytes()); err != nil {
+ return err
+ }
+ return nil
+ },
+ )
+}
+
+func (iam IamS3ApiConfigure) GetPolicies(policies *Policies) (err error) {
+ var buf bytes.Buffer
+ err = pb.WithGrpcFilerClient(iam.option.FilerGrpcAddress, iam.option.GrpcDialOption, func(client filer_pb.SeaweedFilerClient) error {
+ if err = filer.ReadEntry(iam.masterClient, client, filer.IamConfigDirecotry, filer.IamPoliciesFile, &buf); err != nil {
+ return err
+ }
+ return nil
+ })
+ if err != nil {
+ return err
+ }
+ if buf.Len() == 0 {
+ policies.Policies = make(map[string]PolicyDocument)
+ return nil
+ }
+ if err := json.Unmarshal(buf.Bytes(), policies); err != nil {
+ return err
+ }
+ return nil
+}
+
+func (iam IamS3ApiConfigure) PutPolicies(policies *Policies) (err error) {
+ var b []byte
+ if b, err = json.Marshal(policies); err != nil {
+ return err
+ }
+ return pb.WithGrpcFilerClient(
+ iam.option.FilerGrpcAddress,
+ iam.option.GrpcDialOption,
+ func(client filer_pb.SeaweedFilerClient) error {
+ if err := filer.SaveInsideFiler(client, filer.IamConfigDirecotry, filer.IamPoliciesFile, b); err != nil {
+ return err
+ }
+ return nil
+ },
+ )
+}
diff --git a/weed/iamapi/iamapi_test.go b/weed/iamapi/iamapi_test.go
new file mode 100644
index 000000000..09aaf0ac8
--- /dev/null
+++ b/weed/iamapi/iamapi_test.go
@@ -0,0 +1,181 @@
+package iamapi
+
+import (
+ "encoding/xml"
+ "github.com/aws/aws-sdk-go/aws"
+ "github.com/aws/aws-sdk-go/aws/session"
+ "github.com/aws/aws-sdk-go/service/iam"
+ "github.com/chrislusf/seaweedfs/weed/pb/iam_pb"
+ "github.com/gorilla/mux"
+ "github.com/jinzhu/copier"
+ "github.com/stretchr/testify/assert"
+ "net/http"
+ "net/http/httptest"
+ "testing"
+)
+
+var GetS3ApiConfiguration func(s3cfg *iam_pb.S3ApiConfiguration) (err error)
+var PutS3ApiConfiguration func(s3cfg *iam_pb.S3ApiConfiguration) (err error)
+var GetPolicies func(policies *Policies) (err error)
+var PutPolicies func(policies *Policies) (err error)
+
+var s3config = iam_pb.S3ApiConfiguration{}
+var policiesFile = Policies{Policies: make(map[string]PolicyDocument)}
+var ias = IamApiServer{s3ApiConfig: iamS3ApiConfigureMock{}}
+
+type iamS3ApiConfigureMock struct{}
+
+func (iam iamS3ApiConfigureMock) GetS3ApiConfiguration(s3cfg *iam_pb.S3ApiConfiguration) (err error) {
+ _ = copier.Copy(&s3cfg.Identities, &s3config.Identities)
+ return nil
+}
+
+func (iam iamS3ApiConfigureMock) PutS3ApiConfiguration(s3cfg *iam_pb.S3ApiConfiguration) (err error) {
+ _ = copier.Copy(&s3config.Identities, &s3cfg.Identities)
+ return nil
+}
+
+func (iam iamS3ApiConfigureMock) GetPolicies(policies *Policies) (err error) {
+ _ = copier.Copy(&policies, &policiesFile)
+ return nil
+}
+
+func (iam iamS3ApiConfigureMock) PutPolicies(policies *Policies) (err error) {
+ _ = copier.Copy(&policiesFile, &policies)
+ return nil
+}
+
+func TestCreateUser(t *testing.T) {
+ userName := aws.String("Test")
+ params := &iam.CreateUserInput{UserName: userName}
+ req, _ := iam.New(session.New()).CreateUserRequest(params)
+ _ = req.Build()
+ out := CreateUserResponse{}
+ response, err := executeRequest(req.HTTPRequest, out)
+ assert.Equal(t, nil, err)
+ assert.Equal(t, http.StatusOK, response.Code)
+ //assert.Equal(t, out.XMLName, "lol")
+}
+
+func TestListUsers(t *testing.T) {
+ params := &iam.ListUsersInput{}
+ req, _ := iam.New(session.New()).ListUsersRequest(params)
+ _ = req.Build()
+ out := ListUsersResponse{}
+ response, err := executeRequest(req.HTTPRequest, out)
+ assert.Equal(t, nil, err)
+ assert.Equal(t, http.StatusOK, response.Code)
+}
+
+func TestListAccessKeys(t *testing.T) {
+ svc := iam.New(session.New())
+ params := &iam.ListAccessKeysInput{}
+ req, _ := svc.ListAccessKeysRequest(params)
+ _ = req.Build()
+ out := ListAccessKeysResponse{}
+ response, err := executeRequest(req.HTTPRequest, out)
+ assert.Equal(t, nil, err)
+ assert.Equal(t, http.StatusOK, response.Code)
+}
+
+func TestGetUser(t *testing.T) {
+ userName := aws.String("Test")
+ params := &iam.GetUserInput{UserName: userName}
+ req, _ := iam.New(session.New()).GetUserRequest(params)
+ _ = req.Build()
+ out := GetUserResponse{}
+ response, err := executeRequest(req.HTTPRequest, out)
+ assert.Equal(t, nil, err)
+ assert.Equal(t, http.StatusOK, response.Code)
+}
+
+// Todo flat statement
+func TestCreatePolicy(t *testing.T) {
+ params := &iam.CreatePolicyInput{
+ PolicyName: aws.String("S3-read-only-example-bucket"),
+ PolicyDocument: aws.String(`
+ {
+ "Version": "2012-10-17",
+ "Statement": [
+ {
+ "Effect": "Allow",
+ "Action": [
+ "s3:Get*",
+ "s3:List*"
+ ],
+ "Resource": [
+ "arn:aws:s3:::EXAMPLE-BUCKET",
+ "arn:aws:s3:::EXAMPLE-BUCKET/*"
+ ]
+ }
+ ]
+ }`),
+ }
+ req, _ := iam.New(session.New()).CreatePolicyRequest(params)
+ _ = req.Build()
+ out := CreatePolicyResponse{}
+ response, err := executeRequest(req.HTTPRequest, out)
+ assert.Equal(t, nil, err)
+ assert.Equal(t, http.StatusOK, response.Code)
+}
+
+func TestPutUserPolicy(t *testing.T) {
+ userName := aws.String("Test")
+ params := &iam.PutUserPolicyInput{
+ UserName: userName,
+ PolicyName: aws.String("S3-read-only-example-bucket"),
+ PolicyDocument: aws.String(
+ `{
+ "Version": "2012-10-17",
+ "Statement": [
+ {
+ "Effect": "Allow",
+ "Action": [
+ "s3:Get*",
+ "s3:List*"
+ ],
+ "Resource": [
+ "arn:aws:s3:::EXAMPLE-BUCKET",
+ "arn:aws:s3:::EXAMPLE-BUCKET/*"
+ ]
+ }
+ ]
+ }`),
+ }
+ req, _ := iam.New(session.New()).PutUserPolicyRequest(params)
+ _ = req.Build()
+ out := PutUserPolicyResponse{}
+ response, err := executeRequest(req.HTTPRequest, out)
+ assert.Equal(t, nil, err)
+ assert.Equal(t, http.StatusOK, response.Code)
+}
+
+func TestGetUserPolicy(t *testing.T) {
+ userName := aws.String("Test")
+ params := &iam.GetUserPolicyInput{UserName: userName, PolicyName: aws.String("S3-read-only-example-bucket")}
+ req, _ := iam.New(session.New()).GetUserPolicyRequest(params)
+ _ = req.Build()
+ out := GetUserPolicyResponse{}
+ response, err := executeRequest(req.HTTPRequest, out)
+ assert.Equal(t, nil, err)
+ assert.Equal(t, http.StatusOK, response.Code)
+}
+
+func TestDeleteUser(t *testing.T) {
+ userName := aws.String("Test")
+ params := &iam.DeleteUserInput{UserName: userName}
+ req, _ := iam.New(session.New()).DeleteUserRequest(params)
+ _ = req.Build()
+ out := DeleteUserResponse{}
+ response, err := executeRequest(req.HTTPRequest, out)
+ assert.Equal(t, nil, err)
+ assert.Equal(t, http.StatusOK, response.Code)
+}
+
+func executeRequest(req *http.Request, v interface{}) (*httptest.ResponseRecorder, error) {
+ rr := httptest.NewRecorder()
+ apiRouter := mux.NewRouter().SkipClean(true)
+ apiRouter.Path("/").Methods("POST").HandlerFunc(ias.DoActions)
+ apiRouter.ServeHTTP(rr, req)
+ return rr, xml.Unmarshal(rr.Body.Bytes(), &v)
+}
diff --git a/weed/images/orientation.go b/weed/images/orientation.go
index 4bff89311..a592a7d8b 100644
--- a/weed/images/orientation.go
+++ b/weed/images/orientation.go
@@ -7,7 +7,7 @@ import (
"image/jpeg"
"log"
- "github.com/rwcarlsen/goexif/exif"
+ "github.com/seaweedfs/goexif/exif"
)
//many code is copied from http://camlistore.org/pkg/images/images.go
diff --git a/weed/images/resizing.go b/weed/images/resizing.go
index ff0eff5e1..b048daa1c 100644
--- a/weed/images/resizing.go
+++ b/weed/images/resizing.go
@@ -6,10 +6,11 @@ import (
"image/gif"
"image/jpeg"
"image/png"
+ "io"
- "github.com/chrislusf/seaweedfs/weed/glog"
"github.com/disintegration/imaging"
- "io"
+
+ "github.com/chrislusf/seaweedfs/weed/glog"
)
func Resized(ext string, read io.ReadSeeker, width, height int, mode string) (resized io.ReadSeeker, w int, h int) {
@@ -35,6 +36,7 @@ func Resized(ext string, read io.ReadSeeker, width, height int, mode string) (re
}
}
} else {
+ read.Seek(0, 0)
return read, bounds.Dx(), bounds.Dy()
}
var buf bytes.Buffer
diff --git a/weed/messaging/broker/broker_append.go b/weed/messaging/broker/broker_append.go
new file mode 100644
index 000000000..8e5b56fd0
--- /dev/null
+++ b/weed/messaging/broker/broker_append.go
@@ -0,0 +1,113 @@
+package broker
+
+import (
+ "context"
+ "fmt"
+ "io"
+
+ "github.com/chrislusf/seaweedfs/weed/glog"
+ "github.com/chrislusf/seaweedfs/weed/operation"
+ "github.com/chrislusf/seaweedfs/weed/pb"
+ "github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
+ "github.com/chrislusf/seaweedfs/weed/pb/messaging_pb"
+ "github.com/chrislusf/seaweedfs/weed/security"
+ "github.com/chrislusf/seaweedfs/weed/util"
+)
+
+func (broker *MessageBroker) appendToFile(targetFile string, topicConfig *messaging_pb.TopicConfiguration, data []byte) error {
+
+ assignResult, uploadResult, err2 := broker.assignAndUpload(topicConfig, data)
+ if err2 != nil {
+ return err2
+ }
+
+ dir, name := util.FullPath(targetFile).DirAndName()
+
+ // append the chunk
+ if err := broker.WithFilerClient(func(client filer_pb.SeaweedFilerClient) error {
+
+ request := &filer_pb.AppendToEntryRequest{
+ Directory: dir,
+ EntryName: name,
+ Chunks: []*filer_pb.FileChunk{uploadResult.ToPbFileChunk(assignResult.Fid, 0)},
+ }
+
+ _, err := client.AppendToEntry(context.Background(), request)
+ if err != nil {
+ glog.V(0).Infof("append to file %v: %v", request, err)
+ return err
+ }
+
+ return nil
+ }); err != nil {
+ return fmt.Errorf("append to file %v: %v", targetFile, err)
+ }
+
+ return nil
+}
+
+func (broker *MessageBroker) assignAndUpload(topicConfig *messaging_pb.TopicConfiguration, data []byte) (*operation.AssignResult, *operation.UploadResult, error) {
+
+ var assignResult = &operation.AssignResult{}
+
+ // assign a volume location
+ if err := broker.WithFilerClient(func(client filer_pb.SeaweedFilerClient) error {
+
+ request := &filer_pb.AssignVolumeRequest{
+ Count: 1,
+ Replication: topicConfig.Replication,
+ Collection: topicConfig.Collection,
+ }
+
+ resp, err := client.AssignVolume(context.Background(), request)
+ if err != nil {
+ glog.V(0).Infof("assign volume failure %v: %v", request, err)
+ return err
+ }
+ if resp.Error != "" {
+ return fmt.Errorf("assign volume failure %v: %v", request, resp.Error)
+ }
+
+ assignResult.Auth = security.EncodedJwt(resp.Auth)
+ assignResult.Fid = resp.FileId
+ assignResult.Url = resp.Url
+ assignResult.PublicUrl = resp.PublicUrl
+ assignResult.Count = uint64(resp.Count)
+
+ return nil
+ }); err != nil {
+ return nil, nil, err
+ }
+
+ // upload data
+ targetUrl := fmt.Sprintf("http://%s/%s", assignResult.Url, assignResult.Fid)
+ uploadResult, err := operation.UploadData(targetUrl, "", broker.option.Cipher, data, false, "", nil, assignResult.Auth)
+ if err != nil {
+ return nil, nil, fmt.Errorf("upload data %s: %v", targetUrl, err)
+ }
+ // println("uploaded to", targetUrl)
+ return assignResult, uploadResult, nil
+}
+
+var _ = filer_pb.FilerClient(&MessageBroker{})
+
+func (broker *MessageBroker) WithFilerClient(fn func(filer_pb.SeaweedFilerClient) error) (err error) {
+
+ for _, filer := range broker.option.Filers {
+ if err = pb.WithFilerClient(filer, broker.grpcDialOption, fn); err != nil {
+ if err == io.EOF {
+ return
+ }
+ glog.V(0).Infof("fail to connect to %s: %v", filer, err)
+ } else {
+ break
+ }
+ }
+
+ return
+
+}
+
+func (broker *MessageBroker) AdjustedUrl(location *filer_pb.Location) string {
+ return location.Url
+}
diff --git a/weed/messaging/broker/broker_grpc_server.go b/weed/messaging/broker/broker_grpc_server.go
new file mode 100644
index 000000000..ba141fdd0
--- /dev/null
+++ b/weed/messaging/broker/broker_grpc_server.go
@@ -0,0 +1,37 @@
+package broker
+
+import (
+ "context"
+ "fmt"
+
+ "github.com/chrislusf/seaweedfs/weed/filer"
+ "github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
+ "github.com/chrislusf/seaweedfs/weed/pb/messaging_pb"
+)
+
+func (broker *MessageBroker) ConfigureTopic(c context.Context, request *messaging_pb.ConfigureTopicRequest) (*messaging_pb.ConfigureTopicResponse, error) {
+ panic("implement me")
+}
+
+func (broker *MessageBroker) DeleteTopic(c context.Context, request *messaging_pb.DeleteTopicRequest) (*messaging_pb.DeleteTopicResponse, error) {
+ resp := &messaging_pb.DeleteTopicResponse{}
+ dir, entry := genTopicDirEntry(request.Namespace, request.Topic)
+ if exists, err := filer_pb.Exists(broker, dir, entry, true); err != nil {
+ return nil, err
+ } else if exists {
+ err = filer_pb.Remove(broker, dir, entry, true, true, true, false, nil)
+ }
+ return resp, nil
+}
+
+func (broker *MessageBroker) GetTopicConfiguration(c context.Context, request *messaging_pb.GetTopicConfigurationRequest) (*messaging_pb.GetTopicConfigurationResponse, error) {
+ panic("implement me")
+}
+
+func genTopicDir(namespace, topic string) string {
+ return fmt.Sprintf("%s/%s/%s", filer.TopicsDir, namespace, topic)
+}
+
+func genTopicDirEntry(namespace, topic string) (dir, entry string) {
+ return fmt.Sprintf("%s/%s", filer.TopicsDir, namespace), topic
+}
diff --git a/weed/messaging/broker/broker_grpc_server_discovery.go b/weed/messaging/broker/broker_grpc_server_discovery.go
new file mode 100644
index 000000000..3c14f3220
--- /dev/null
+++ b/weed/messaging/broker/broker_grpc_server_discovery.go
@@ -0,0 +1,116 @@
+package broker
+
+import (
+ "context"
+ "fmt"
+ "time"
+
+ "github.com/chrislusf/seaweedfs/weed/glog"
+ "github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
+ "github.com/chrislusf/seaweedfs/weed/pb/master_pb"
+ "github.com/chrislusf/seaweedfs/weed/pb/messaging_pb"
+)
+
+/*
+Topic discovery:
+
+When pub or sub connects, it ask for the whole broker list, and run consistent hashing to find the broker.
+
+The broker will check peers whether it is already hosted by some other broker, if that broker is alive and acknowledged alive, redirect to it.
+Otherwise, just host the topic.
+
+So, if the pub or sub connects around the same time, they would connect to the same broker. Everyone is happy.
+If one of the pub or sub connects very late, and the system topo changed quite a bit with new servers added or old servers died, checking peers will help.
+
+*/
+
+func (broker *MessageBroker) FindBroker(c context.Context, request *messaging_pb.FindBrokerRequest) (*messaging_pb.FindBrokerResponse, error) {
+
+ t := &messaging_pb.FindBrokerResponse{}
+ var peers []string
+
+ targetTopicPartition := fmt.Sprintf(TopicPartitionFmt, request.Namespace, request.Topic, request.Parition)
+
+ for _, filer := range broker.option.Filers {
+ err := broker.withFilerClient(filer, func(client filer_pb.SeaweedFilerClient) error {
+ resp, err := client.LocateBroker(context.Background(), &filer_pb.LocateBrokerRequest{
+ Resource: targetTopicPartition,
+ })
+ if err != nil {
+ return err
+ }
+ if resp.Found && len(resp.Resources) > 0 {
+ t.Broker = resp.Resources[0].GrpcAddresses
+ return nil
+ }
+ for _, b := range resp.Resources {
+ peers = append(peers, b.GrpcAddresses)
+ }
+ return nil
+ })
+ if err != nil {
+ return nil, err
+ }
+ }
+
+ t.Broker = PickMember(peers, []byte(targetTopicPartition))
+
+ return t, nil
+
+}
+
+func (broker *MessageBroker) checkFilers() {
+
+ // contact a filer about masters
+ var masters []string
+ found := false
+ for !found {
+ for _, filer := range broker.option.Filers {
+ err := broker.withFilerClient(filer, func(client filer_pb.SeaweedFilerClient) error {
+ resp, err := client.GetFilerConfiguration(context.Background(), &filer_pb.GetFilerConfigurationRequest{})
+ if err != nil {
+ return err
+ }
+ masters = append(masters, resp.Masters...)
+ return nil
+ })
+ if err == nil {
+ found = true
+ break
+ }
+ glog.V(0).Infof("failed to read masters from %+v: %v", broker.option.Filers, err)
+ time.Sleep(time.Second)
+ }
+ }
+ glog.V(0).Infof("received master list: %s", masters)
+
+ // contact each masters for filers
+ var filers []string
+ found = false
+ for !found {
+ for _, master := range masters {
+ err := broker.withMasterClient(master, func(client master_pb.SeaweedClient) error {
+ resp, err := client.ListMasterClients(context.Background(), &master_pb.ListMasterClientsRequest{
+ ClientType: "filer",
+ })
+ if err != nil {
+ return err
+ }
+
+ filers = append(filers, resp.GrpcAddresses...)
+
+ return nil
+ })
+ if err == nil {
+ found = true
+ break
+ }
+ glog.V(0).Infof("failed to list filers: %v", err)
+ time.Sleep(time.Second)
+ }
+ }
+ glog.V(0).Infof("received filer list: %s", filers)
+
+ broker.option.Filers = filers
+
+}
diff --git a/weed/messaging/broker/broker_grpc_server_publish.go b/weed/messaging/broker/broker_grpc_server_publish.go
new file mode 100644
index 000000000..6e6b723d1
--- /dev/null
+++ b/weed/messaging/broker/broker_grpc_server_publish.go
@@ -0,0 +1,112 @@
+package broker
+
+import (
+ "crypto/md5"
+ "fmt"
+ "io"
+
+ "github.com/golang/protobuf/proto"
+
+ "github.com/chrislusf/seaweedfs/weed/filer"
+ "github.com/chrislusf/seaweedfs/weed/glog"
+ "github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
+ "github.com/chrislusf/seaweedfs/weed/pb/messaging_pb"
+)
+
+func (broker *MessageBroker) Publish(stream messaging_pb.SeaweedMessaging_PublishServer) error {
+
+ // process initial request
+ in, err := stream.Recv()
+ if err == io.EOF {
+ return nil
+ }
+ if err != nil {
+ return err
+ }
+
+ // TODO look it up
+ topicConfig := &messaging_pb.TopicConfiguration{
+ // IsTransient: true,
+ }
+
+ // send init response
+ initResponse := &messaging_pb.PublishResponse{
+ Config: nil,
+ Redirect: nil,
+ }
+ err = stream.Send(initResponse)
+ if err != nil {
+ return err
+ }
+ if initResponse.Redirect != nil {
+ return nil
+ }
+
+ // get lock
+ tp := TopicPartition{
+ Namespace: in.Init.Namespace,
+ Topic: in.Init.Topic,
+ Partition: in.Init.Partition,
+ }
+
+ tpDir := fmt.Sprintf("%s/%s/%s", filer.TopicsDir, tp.Namespace, tp.Topic)
+ md5File := fmt.Sprintf("p%02d.md5", tp.Partition)
+ // println("chan data stored under", tpDir, "as", md5File)
+
+ if exists, err := filer_pb.Exists(broker, tpDir, md5File, false); err == nil && exists {
+ return fmt.Errorf("channel is already closed")
+ }
+
+ tl := broker.topicManager.RequestLock(tp, topicConfig, true)
+ defer broker.topicManager.ReleaseLock(tp, true)
+
+ md5hash := md5.New()
+ // process each message
+ for {
+ // println("recv")
+ in, err := stream.Recv()
+ // glog.V(0).Infof("recieved %v err: %v", in, err)
+ if err == io.EOF {
+ return nil
+ }
+ if err != nil {
+ return err
+ }
+
+ if in.Data == nil {
+ continue
+ }
+
+ // fmt.Printf("received: %d : %s\n", len(in.Data.Value), string(in.Data.Value))
+
+ data, err := proto.Marshal(in.Data)
+ if err != nil {
+ glog.Errorf("marshall error: %v\n", err)
+ continue
+ }
+
+ tl.logBuffer.AddToBuffer(in.Data.Key, data, in.Data.EventTimeNs)
+
+ if in.Data.IsClose {
+ // println("server received closing")
+ break
+ }
+
+ md5hash.Write(in.Data.Value)
+
+ }
+
+ if err := broker.appendToFile(tpDir+"/"+md5File, topicConfig, md5hash.Sum(nil)); err != nil {
+ glog.V(0).Infof("err writing %s: %v", md5File, err)
+ }
+
+ // fmt.Printf("received md5 %X\n", md5hash.Sum(nil))
+
+ // send the close ack
+ // println("server send ack closing")
+ if err := stream.Send(&messaging_pb.PublishResponse{IsClosed: true}); err != nil {
+ glog.V(0).Infof("err sending close response: %v", err)
+ }
+ return nil
+
+}
diff --git a/weed/messaging/broker/broker_grpc_server_subscribe.go b/weed/messaging/broker/broker_grpc_server_subscribe.go
new file mode 100644
index 000000000..3021473e5
--- /dev/null
+++ b/weed/messaging/broker/broker_grpc_server_subscribe.go
@@ -0,0 +1,177 @@
+package broker
+
+import (
+ "fmt"
+ "github.com/chrislusf/seaweedfs/weed/util/log_buffer"
+ "io"
+ "strings"
+ "time"
+
+ "github.com/golang/protobuf/proto"
+
+ "github.com/chrislusf/seaweedfs/weed/filer"
+ "github.com/chrislusf/seaweedfs/weed/glog"
+ "github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
+ "github.com/chrislusf/seaweedfs/weed/pb/messaging_pb"
+)
+
+func (broker *MessageBroker) Subscribe(stream messaging_pb.SeaweedMessaging_SubscribeServer) error {
+
+ // process initial request
+ in, err := stream.Recv()
+ if err == io.EOF {
+ return nil
+ }
+ if err != nil {
+ return err
+ }
+
+ var processedTsNs int64
+ var messageCount int64
+ subscriberId := in.Init.SubscriberId
+
+ // TODO look it up
+ topicConfig := &messaging_pb.TopicConfiguration{
+ // IsTransient: true,
+ }
+
+ // get lock
+ tp := TopicPartition{
+ Namespace: in.Init.Namespace,
+ Topic: in.Init.Topic,
+ Partition: in.Init.Partition,
+ }
+ fmt.Printf("+ subscriber %s for %s\n", subscriberId, tp.String())
+ defer func() {
+ fmt.Printf("- subscriber %s for %s %d messages last %v\n", subscriberId, tp.String(), messageCount, time.Unix(0, processedTsNs))
+ }()
+
+ lock := broker.topicManager.RequestLock(tp, topicConfig, false)
+ defer broker.topicManager.ReleaseLock(tp, false)
+
+ isConnected := true
+ go func() {
+ for isConnected {
+ if _, err := stream.Recv(); err != nil {
+ // println("disconnecting connection to", subscriberId, tp.String())
+ isConnected = false
+ lock.cond.Signal()
+ }
+ }
+ }()
+
+ lastReadTime := time.Now()
+ switch in.Init.StartPosition {
+ case messaging_pb.SubscriberMessage_InitMessage_TIMESTAMP:
+ lastReadTime = time.Unix(0, in.Init.TimestampNs)
+ case messaging_pb.SubscriberMessage_InitMessage_LATEST:
+ case messaging_pb.SubscriberMessage_InitMessage_EARLIEST:
+ lastReadTime = time.Unix(0, 0)
+ }
+
+ // how to process each message
+ // an error returned will end the subscription
+ eachMessageFn := func(m *messaging_pb.Message) error {
+ err := stream.Send(&messaging_pb.BrokerMessage{
+ Data: m,
+ })
+ if err != nil {
+ glog.V(0).Infof("=> subscriber %v: %+v", subscriberId, err)
+ }
+ return err
+ }
+
+ eachLogEntryFn := func(logEntry *filer_pb.LogEntry) error {
+ m := &messaging_pb.Message{}
+ if err = proto.Unmarshal(logEntry.Data, m); err != nil {
+ glog.Errorf("unexpected unmarshal messaging_pb.Message: %v", err)
+ return err
+ }
+ // fmt.Printf("sending : %d bytes ts %d\n", len(m.Value), logEntry.TsNs)
+ if err = eachMessageFn(m); err != nil {
+ glog.Errorf("sending %d bytes to %s: %s", len(m.Value), subscriberId, err)
+ return err
+ }
+ if m.IsClose {
+ // println("processed EOF")
+ return io.EOF
+ }
+ processedTsNs = logEntry.TsNs
+ messageCount++
+ return nil
+ }
+
+ // fmt.Printf("subscriber %s read %d on disk log %v\n", subscriberId, messageCount, lastReadTime)
+
+ for {
+
+ if err = broker.readPersistedLogBuffer(&tp, lastReadTime, eachLogEntryFn); err != nil {
+ if err != io.EOF {
+ // println("stopping from persisted logs", err.Error())
+ return err
+ }
+ }
+
+ if processedTsNs != 0 {
+ lastReadTime = time.Unix(0, processedTsNs)
+ }
+
+ lastReadTime, err = lock.logBuffer.LoopProcessLogData(lastReadTime, func() bool {
+ lock.Mutex.Lock()
+ lock.cond.Wait()
+ lock.Mutex.Unlock()
+ return isConnected
+ }, eachLogEntryFn)
+ if err != nil {
+ if err == log_buffer.ResumeFromDiskError {
+ continue
+ }
+ glog.Errorf("processed to %v: %v", lastReadTime, err)
+ time.Sleep(3127 * time.Millisecond)
+ if err != log_buffer.ResumeError {
+ break
+ }
+ }
+ }
+
+ return err
+
+}
+
+func (broker *MessageBroker) readPersistedLogBuffer(tp *TopicPartition, startTime time.Time, eachLogEntryFn func(logEntry *filer_pb.LogEntry) error) (err error) {
+ startTime = startTime.UTC()
+ startDate := fmt.Sprintf("%04d-%02d-%02d", startTime.Year(), startTime.Month(), startTime.Day())
+ startHourMinute := fmt.Sprintf("%02d-%02d.segment", startTime.Hour(), startTime.Minute())
+
+ sizeBuf := make([]byte, 4)
+ startTsNs := startTime.UnixNano()
+
+ topicDir := genTopicDir(tp.Namespace, tp.Topic)
+ partitionSuffix := fmt.Sprintf(".part%02d", tp.Partition)
+
+ return filer_pb.List(broker, topicDir, "", func(dayEntry *filer_pb.Entry, isLast bool) error {
+ dayDir := fmt.Sprintf("%s/%s", topicDir, dayEntry.Name)
+ return filer_pb.List(broker, dayDir, "", func(hourMinuteEntry *filer_pb.Entry, isLast bool) error {
+ if dayEntry.Name == startDate {
+ if strings.Compare(hourMinuteEntry.Name, startHourMinute) < 0 {
+ return nil
+ }
+ }
+ if !strings.HasSuffix(hourMinuteEntry.Name, partitionSuffix) {
+ return nil
+ }
+ // println("partition", tp.Partition, "processing", dayDir, "/", hourMinuteEntry.Name)
+ chunkedFileReader := filer.NewChunkStreamReader(broker, hourMinuteEntry.Chunks)
+ defer chunkedFileReader.Close()
+ if _, err := filer.ReadEachLogEntry(chunkedFileReader, sizeBuf, startTsNs, eachLogEntryFn); err != nil {
+ chunkedFileReader.Close()
+ if err == io.EOF {
+ return err
+ }
+ return fmt.Errorf("reading %s/%s: %v", dayDir, hourMinuteEntry.Name, err)
+ }
+ return nil
+ }, "", false, 24*60)
+ }, startDate, true, 366)
+
+}
diff --git a/weed/messaging/broker/broker_server.go b/weed/messaging/broker/broker_server.go
new file mode 100644
index 000000000..06162471c
--- /dev/null
+++ b/weed/messaging/broker/broker_server.go
@@ -0,0 +1,114 @@
+package broker
+
+import (
+ "context"
+ "time"
+
+ "google.golang.org/grpc"
+
+ "github.com/chrislusf/seaweedfs/weed/glog"
+ "github.com/chrislusf/seaweedfs/weed/pb"
+ "github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
+ "github.com/chrislusf/seaweedfs/weed/pb/master_pb"
+)
+
+type MessageBrokerOption struct {
+ Filers []string
+ DefaultReplication string
+ MaxMB int
+ Ip string
+ Port int
+ Cipher bool
+}
+
+type MessageBroker struct {
+ option *MessageBrokerOption
+ grpcDialOption grpc.DialOption
+ topicManager *TopicManager
+}
+
+func NewMessageBroker(option *MessageBrokerOption, grpcDialOption grpc.DialOption) (messageBroker *MessageBroker, err error) {
+
+ messageBroker = &MessageBroker{
+ option: option,
+ grpcDialOption: grpcDialOption,
+ }
+
+ messageBroker.topicManager = NewTopicManager(messageBroker)
+
+ messageBroker.checkFilers()
+
+ go messageBroker.keepConnectedToOneFiler()
+
+ return messageBroker, nil
+}
+
+func (broker *MessageBroker) keepConnectedToOneFiler() {
+
+ for {
+ for _, filer := range broker.option.Filers {
+ broker.withFilerClient(filer, func(client filer_pb.SeaweedFilerClient) error {
+ ctx, cancel := context.WithCancel(context.Background())
+ defer cancel()
+ stream, err := client.KeepConnected(ctx)
+ if err != nil {
+ glog.V(0).Infof("%s:%d failed to keep connected to %s: %v", broker.option.Ip, broker.option.Port, filer, err)
+ return err
+ }
+
+ initRequest := &filer_pb.KeepConnectedRequest{
+ Name: broker.option.Ip,
+ GrpcPort: uint32(broker.option.Port),
+ }
+ for _, tp := range broker.topicManager.ListTopicPartitions() {
+ initRequest.Resources = append(initRequest.Resources, tp.String())
+ }
+ if err := stream.Send(&filer_pb.KeepConnectedRequest{
+ Name: broker.option.Ip,
+ GrpcPort: uint32(broker.option.Port),
+ }); err != nil {
+ glog.V(0).Infof("broker %s:%d failed to init at %s: %v", broker.option.Ip, broker.option.Port, filer, err)
+ return err
+ }
+
+ // TODO send events of adding/removing topics
+
+ glog.V(0).Infof("conntected with filer: %v", filer)
+ for {
+ if err := stream.Send(&filer_pb.KeepConnectedRequest{
+ Name: broker.option.Ip,
+ GrpcPort: uint32(broker.option.Port),
+ }); err != nil {
+ glog.V(0).Infof("%s:%d failed to sendto %s: %v", broker.option.Ip, broker.option.Port, filer, err)
+ return err
+ }
+ // println("send heartbeat")
+ if _, err := stream.Recv(); err != nil {
+ glog.V(0).Infof("%s:%d failed to receive from %s: %v", broker.option.Ip, broker.option.Port, filer, err)
+ return err
+ }
+ // println("received reply")
+ time.Sleep(11 * time.Second)
+ // println("woke up")
+ }
+ return nil
+ })
+ time.Sleep(3 * time.Second)
+ }
+ }
+
+}
+
+func (broker *MessageBroker) withFilerClient(filer string, fn func(filer_pb.SeaweedFilerClient) error) error {
+
+ return pb.WithFilerClient(filer, broker.grpcDialOption, fn)
+
+}
+
+func (broker *MessageBroker) withMasterClient(master string, fn func(client master_pb.SeaweedClient) error) error {
+
+ return pb.WithMasterClient(master, broker.grpcDialOption, func(client master_pb.SeaweedClient) error {
+ return fn(client)
+ })
+
+}
diff --git a/weed/messaging/broker/consistent_distribution.go b/weed/messaging/broker/consistent_distribution.go
new file mode 100644
index 000000000..465a2a8f2
--- /dev/null
+++ b/weed/messaging/broker/consistent_distribution.go
@@ -0,0 +1,38 @@
+package broker
+
+import (
+ "github.com/buraksezer/consistent"
+ "github.com/cespare/xxhash"
+)
+
+type Member string
+
+func (m Member) String() string {
+ return string(m)
+}
+
+type hasher struct{}
+
+func (h hasher) Sum64(data []byte) uint64 {
+ return xxhash.Sum64(data)
+}
+
+func PickMember(members []string, key []byte) string {
+ cfg := consistent.Config{
+ PartitionCount: 9791,
+ ReplicationFactor: 2,
+ Load: 1.25,
+ Hasher: hasher{},
+ }
+
+ cmembers := []consistent.Member{}
+ for _, m := range members {
+ cmembers = append(cmembers, Member(m))
+ }
+
+ c := consistent.New(cmembers, cfg)
+
+ m := c.LocateKey(key)
+
+ return m.String()
+}
diff --git a/weed/messaging/broker/consistent_distribution_test.go b/weed/messaging/broker/consistent_distribution_test.go
new file mode 100644
index 000000000..f58fe4e0e
--- /dev/null
+++ b/weed/messaging/broker/consistent_distribution_test.go
@@ -0,0 +1,32 @@
+package broker
+
+import (
+ "fmt"
+ "testing"
+)
+
+func TestPickMember(t *testing.T) {
+
+ servers := []string{
+ "s1:port",
+ "s2:port",
+ "s3:port",
+ "s5:port",
+ "s4:port",
+ }
+
+ total := 1000
+
+ distribution := make(map[string]int)
+ for i := 0; i < total; i++ {
+ tp := fmt.Sprintf("tp:%2d", i)
+ m := PickMember(servers, []byte(tp))
+ // println(tp, "=>", m)
+ distribution[m]++
+ }
+
+ for member, count := range distribution {
+ fmt.Printf("member: %s, key count: %d load=%.2f\n", member, count, float64(count*100)/float64(total/len(servers)))
+ }
+
+}
diff --git a/weed/messaging/broker/topic_manager.go b/weed/messaging/broker/topic_manager.go
new file mode 100644
index 000000000..edddca813
--- /dev/null
+++ b/weed/messaging/broker/topic_manager.go
@@ -0,0 +1,124 @@
+package broker
+
+import (
+ "fmt"
+ "sync"
+ "time"
+
+ "github.com/chrislusf/seaweedfs/weed/filer"
+ "github.com/chrislusf/seaweedfs/weed/glog"
+ "github.com/chrislusf/seaweedfs/weed/pb/messaging_pb"
+ "github.com/chrislusf/seaweedfs/weed/util/log_buffer"
+)
+
+type TopicPartition struct {
+ Namespace string
+ Topic string
+ Partition int32
+}
+
+const (
+ TopicPartitionFmt = "%s/%s_%02d"
+)
+
+func (tp *TopicPartition) String() string {
+ return fmt.Sprintf(TopicPartitionFmt, tp.Namespace, tp.Topic, tp.Partition)
+}
+
+type TopicControl struct {
+ sync.Mutex
+ cond *sync.Cond
+ subscriberCount int
+ publisherCount int
+ logBuffer *log_buffer.LogBuffer
+}
+
+type TopicManager struct {
+ sync.Mutex
+ topicControls map[TopicPartition]*TopicControl
+ broker *MessageBroker
+}
+
+func NewTopicManager(messageBroker *MessageBroker) *TopicManager {
+ return &TopicManager{
+ topicControls: make(map[TopicPartition]*TopicControl),
+ broker: messageBroker,
+ }
+}
+
+func (tm *TopicManager) buildLogBuffer(tl *TopicControl, tp TopicPartition, topicConfig *messaging_pb.TopicConfiguration) *log_buffer.LogBuffer {
+
+ flushFn := func(startTime, stopTime time.Time, buf []byte) {
+
+ if topicConfig.IsTransient {
+ // return
+ }
+
+ // fmt.Printf("flushing with topic config %+v\n", topicConfig)
+
+ startTime, stopTime = startTime.UTC(), stopTime.UTC()
+ targetFile := fmt.Sprintf(
+ "%s/%s/%s/%04d-%02d-%02d/%02d-%02d.part%02d",
+ filer.TopicsDir, tp.Namespace, tp.Topic,
+ startTime.Year(), startTime.Month(), startTime.Day(), startTime.Hour(), startTime.Minute(),
+ tp.Partition,
+ )
+
+ if err := tm.broker.appendToFile(targetFile, topicConfig, buf); err != nil {
+ glog.V(0).Infof("log write failed %s: %v", targetFile, err)
+ }
+ }
+ logBuffer := log_buffer.NewLogBuffer(time.Minute, flushFn, func() {
+ tl.cond.Broadcast()
+ })
+
+ return logBuffer
+}
+
+func (tm *TopicManager) RequestLock(partition TopicPartition, topicConfig *messaging_pb.TopicConfiguration, isPublisher bool) *TopicControl {
+ tm.Lock()
+ defer tm.Unlock()
+
+ tc, found := tm.topicControls[partition]
+ if !found {
+ tc = &TopicControl{}
+ tc.cond = sync.NewCond(&tc.Mutex)
+ tm.topicControls[partition] = tc
+ tc.logBuffer = tm.buildLogBuffer(tc, partition, topicConfig)
+ }
+ if isPublisher {
+ tc.publisherCount++
+ } else {
+ tc.subscriberCount++
+ }
+ return tc
+}
+
+func (tm *TopicManager) ReleaseLock(partition TopicPartition, isPublisher bool) {
+ tm.Lock()
+ defer tm.Unlock()
+
+ lock, found := tm.topicControls[partition]
+ if !found {
+ return
+ }
+ if isPublisher {
+ lock.publisherCount--
+ } else {
+ lock.subscriberCount--
+ }
+ if lock.subscriberCount <= 0 && lock.publisherCount <= 0 {
+ delete(tm.topicControls, partition)
+ lock.logBuffer.Shutdown()
+ }
+}
+
+func (tm *TopicManager) ListTopicPartitions() (tps []TopicPartition) {
+ tm.Lock()
+ defer tm.Unlock()
+
+ for k := range tm.topicControls {
+ tps = append(tps, k)
+ }
+ return
+}
diff --git a/weed/messaging/msgclient/chan_config.go b/weed/messaging/msgclient/chan_config.go
new file mode 100644
index 000000000..a75678815
--- /dev/null
+++ b/weed/messaging/msgclient/chan_config.go
@@ -0,0 +1,5 @@
+package msgclient
+
+func (mc *MessagingClient) DeleteChannel(chanName string) error {
+ return mc.DeleteTopic("chan", chanName)
+}
diff --git a/weed/messaging/msgclient/chan_pub.go b/weed/messaging/msgclient/chan_pub.go
new file mode 100644
index 000000000..9bc88f7c0
--- /dev/null
+++ b/weed/messaging/msgclient/chan_pub.go
@@ -0,0 +1,76 @@
+package msgclient
+
+import (
+ "crypto/md5"
+ "hash"
+ "io"
+ "log"
+
+ "google.golang.org/grpc"
+
+ "github.com/chrislusf/seaweedfs/weed/messaging/broker"
+ "github.com/chrislusf/seaweedfs/weed/pb/messaging_pb"
+)
+
+type PubChannel struct {
+ client messaging_pb.SeaweedMessaging_PublishClient
+ grpcConnection *grpc.ClientConn
+ md5hash hash.Hash
+}
+
+func (mc *MessagingClient) NewPubChannel(chanName string) (*PubChannel, error) {
+ tp := broker.TopicPartition{
+ Namespace: "chan",
+ Topic: chanName,
+ Partition: 0,
+ }
+ grpcConnection, err := mc.findBroker(tp)
+ if err != nil {
+ return nil, err
+ }
+ pc, err := setupPublisherClient(grpcConnection, tp)
+ if err != nil {
+ return nil, err
+ }
+ return &PubChannel{
+ client: pc,
+ grpcConnection: grpcConnection,
+ md5hash: md5.New(),
+ }, nil
+}
+
+func (pc *PubChannel) Publish(m []byte) error {
+ err := pc.client.Send(&messaging_pb.PublishRequest{
+ Data: &messaging_pb.Message{
+ Value: m,
+ },
+ })
+ if err == nil {
+ pc.md5hash.Write(m)
+ }
+ return err
+}
+func (pc *PubChannel) Close() error {
+
+ // println("send closing")
+ if err := pc.client.Send(&messaging_pb.PublishRequest{
+ Data: &messaging_pb.Message{
+ IsClose: true,
+ },
+ }); err != nil {
+ log.Printf("err send close: %v", err)
+ }
+ // println("receive closing")
+ if _, err := pc.client.Recv(); err != nil && err != io.EOF {
+ log.Printf("err receive close: %v", err)
+ }
+ // println("close connection")
+ if err := pc.grpcConnection.Close(); err != nil {
+ log.Printf("err connection close: %v", err)
+ }
+ return nil
+}
+
+func (pc *PubChannel) Md5() []byte {
+ return pc.md5hash.Sum(nil)
+}
diff --git a/weed/messaging/msgclient/chan_sub.go b/weed/messaging/msgclient/chan_sub.go
new file mode 100644
index 000000000..213ff4666
--- /dev/null
+++ b/weed/messaging/msgclient/chan_sub.go
@@ -0,0 +1,85 @@
+package msgclient
+
+import (
+ "context"
+ "crypto/md5"
+ "hash"
+ "io"
+ "log"
+ "time"
+
+ "github.com/chrislusf/seaweedfs/weed/messaging/broker"
+ "github.com/chrislusf/seaweedfs/weed/pb/messaging_pb"
+)
+
+type SubChannel struct {
+ ch chan []byte
+ stream messaging_pb.SeaweedMessaging_SubscribeClient
+ md5hash hash.Hash
+ cancel context.CancelFunc
+}
+
+func (mc *MessagingClient) NewSubChannel(subscriberId, chanName string) (*SubChannel, error) {
+ tp := broker.TopicPartition{
+ Namespace: "chan",
+ Topic: chanName,
+ Partition: 0,
+ }
+ grpcConnection, err := mc.findBroker(tp)
+ if err != nil {
+ return nil, err
+ }
+ ctx, cancel := context.WithCancel(context.Background())
+ sc, err := setupSubscriberClient(ctx, grpcConnection, tp, subscriberId, time.Unix(0, 0))
+ if err != nil {
+ return nil, err
+ }
+
+ t := &SubChannel{
+ ch: make(chan []byte),
+ stream: sc,
+ md5hash: md5.New(),
+ cancel: cancel,
+ }
+
+ go func() {
+ for {
+ resp, subErr := t.stream.Recv()
+ if subErr == io.EOF {
+ return
+ }
+ if subErr != nil {
+ log.Printf("fail to receive from netchan %s: %v", chanName, subErr)
+ return
+ }
+ if resp.Data == nil {
+ // this could be heartbeat from broker
+ continue
+ }
+ if resp.Data.IsClose {
+ t.stream.Send(&messaging_pb.SubscriberMessage{
+ IsClose: true,
+ })
+ close(t.ch)
+ cancel()
+ return
+ }
+ t.ch <- resp.Data.Value
+ t.md5hash.Write(resp.Data.Value)
+ }
+ }()
+
+ return t, nil
+}
+
+func (sc *SubChannel) Channel() chan []byte {
+ return sc.ch
+}
+
+func (sc *SubChannel) Md5() []byte {
+ return sc.md5hash.Sum(nil)
+}
+
+func (sc *SubChannel) Cancel() {
+ sc.cancel()
+}
diff --git a/weed/messaging/msgclient/client.go b/weed/messaging/msgclient/client.go
new file mode 100644
index 000000000..4d7ef2b8e
--- /dev/null
+++ b/weed/messaging/msgclient/client.go
@@ -0,0 +1,55 @@
+package msgclient
+
+import (
+ "context"
+ "fmt"
+ "log"
+
+ "google.golang.org/grpc"
+
+ "github.com/chrislusf/seaweedfs/weed/messaging/broker"
+ "github.com/chrislusf/seaweedfs/weed/pb"
+ "github.com/chrislusf/seaweedfs/weed/pb/messaging_pb"
+ "github.com/chrislusf/seaweedfs/weed/security"
+ "github.com/chrislusf/seaweedfs/weed/util"
+)
+
+type MessagingClient struct {
+ bootstrapBrokers []string
+ grpcConnections map[broker.TopicPartition]*grpc.ClientConn
+ grpcDialOption grpc.DialOption
+}
+
+func NewMessagingClient(bootstrapBrokers ...string) *MessagingClient {
+ return &MessagingClient{
+ bootstrapBrokers: bootstrapBrokers,
+ grpcConnections: make(map[broker.TopicPartition]*grpc.ClientConn),
+ grpcDialOption: security.LoadClientTLS(util.GetViper(), "grpc.msg_client"),
+ }
+}
+
+func (mc *MessagingClient) findBroker(tp broker.TopicPartition) (*grpc.ClientConn, error) {
+
+ for _, broker := range mc.bootstrapBrokers {
+ grpcConnection, err := pb.GrpcDial(context.Background(), broker, mc.grpcDialOption)
+ if err != nil {
+ log.Printf("dial broker %s: %v", broker, err)
+ continue
+ }
+ defer grpcConnection.Close()
+
+ resp, err := messaging_pb.NewSeaweedMessagingClient(grpcConnection).FindBroker(context.Background(),
+ &messaging_pb.FindBrokerRequest{
+ Namespace: tp.Namespace,
+ Topic: tp.Topic,
+ Parition: tp.Partition,
+ })
+ if err != nil {
+ return nil, err
+ }
+
+ targetBroker := resp.Broker
+ return pb.GrpcDial(context.Background(), targetBroker, mc.grpcDialOption)
+ }
+ return nil, fmt.Errorf("no broker found for %+v", tp)
+}
diff --git a/weed/messaging/msgclient/config.go b/weed/messaging/msgclient/config.go
new file mode 100644
index 000000000..2b9eba1a8
--- /dev/null
+++ b/weed/messaging/msgclient/config.go
@@ -0,0 +1,63 @@
+package msgclient
+
+import (
+ "context"
+ "log"
+
+ "github.com/chrislusf/seaweedfs/weed/messaging/broker"
+ "github.com/chrislusf/seaweedfs/weed/pb"
+ "github.com/chrislusf/seaweedfs/weed/pb/messaging_pb"
+)
+
+func (mc *MessagingClient) configureTopic(tp broker.TopicPartition) error {
+
+ return mc.withAnyBroker(func(client messaging_pb.SeaweedMessagingClient) error {
+ _, err := client.ConfigureTopic(context.Background(),
+ &messaging_pb.ConfigureTopicRequest{
+ Namespace: tp.Namespace,
+ Topic: tp.Topic,
+ Configuration: &messaging_pb.TopicConfiguration{
+ PartitionCount: 0,
+ Collection: "",
+ Replication: "",
+ IsTransient: false,
+ Partitoning: 0,
+ },
+ })
+ return err
+ })
+
+}
+
+func (mc *MessagingClient) DeleteTopic(namespace, topic string) error {
+
+ return mc.withAnyBroker(func(client messaging_pb.SeaweedMessagingClient) error {
+ _, err := client.DeleteTopic(context.Background(),
+ &messaging_pb.DeleteTopicRequest{
+ Namespace: namespace,
+ Topic: topic,
+ })
+ return err
+ })
+}
+
+func (mc *MessagingClient) withAnyBroker(fn func(client messaging_pb.SeaweedMessagingClient) error) error {
+
+ var lastErr error
+ for _, broker := range mc.bootstrapBrokers {
+ grpcConnection, err := pb.GrpcDial(context.Background(), broker, mc.grpcDialOption)
+ if err != nil {
+ log.Printf("dial broker %s: %v", broker, err)
+ continue
+ }
+ defer grpcConnection.Close()
+
+ err = fn(messaging_pb.NewSeaweedMessagingClient(grpcConnection))
+ if err == nil {
+ return nil
+ }
+ lastErr = err
+ }
+
+ return lastErr
+}
diff --git a/weed/messaging/msgclient/publisher.go b/weed/messaging/msgclient/publisher.go
new file mode 100644
index 000000000..1aa483ff8
--- /dev/null
+++ b/weed/messaging/msgclient/publisher.go
@@ -0,0 +1,118 @@
+package msgclient
+
+import (
+ "context"
+
+ "github.com/OneOfOne/xxhash"
+ "google.golang.org/grpc"
+
+ "github.com/chrislusf/seaweedfs/weed/messaging/broker"
+ "github.com/chrislusf/seaweedfs/weed/pb/messaging_pb"
+)
+
+type Publisher struct {
+ publishClients []messaging_pb.SeaweedMessaging_PublishClient
+ topicConfiguration *messaging_pb.TopicConfiguration
+ messageCount uint64
+ publisherId string
+}
+
+func (mc *MessagingClient) NewPublisher(publisherId, namespace, topic string) (*Publisher, error) {
+ // read topic configuration
+ topicConfiguration := &messaging_pb.TopicConfiguration{
+ PartitionCount: 4,
+ }
+ publishClients := make([]messaging_pb.SeaweedMessaging_PublishClient, topicConfiguration.PartitionCount)
+ for i := 0; i < int(topicConfiguration.PartitionCount); i++ {
+ tp := broker.TopicPartition{
+ Namespace: namespace,
+ Topic: topic,
+ Partition: int32(i),
+ }
+ grpcClientConn, err := mc.findBroker(tp)
+ if err != nil {
+ return nil, err
+ }
+ client, err := setupPublisherClient(grpcClientConn, tp)
+ if err != nil {
+ return nil, err
+ }
+ publishClients[i] = client
+ }
+ return &Publisher{
+ publishClients: publishClients,
+ topicConfiguration: topicConfiguration,
+ }, nil
+}
+
+func setupPublisherClient(grpcConnection *grpc.ClientConn, tp broker.TopicPartition) (messaging_pb.SeaweedMessaging_PublishClient, error) {
+
+ stream, err := messaging_pb.NewSeaweedMessagingClient(grpcConnection).Publish(context.Background())
+ if err != nil {
+ return nil, err
+ }
+
+ // send init message
+ err = stream.Send(&messaging_pb.PublishRequest{
+ Init: &messaging_pb.PublishRequest_InitMessage{
+ Namespace: tp.Namespace,
+ Topic: tp.Topic,
+ Partition: tp.Partition,
+ },
+ })
+ if err != nil {
+ return nil, err
+ }
+
+ // process init response
+ initResponse, err := stream.Recv()
+ if err != nil {
+ return nil, err
+ }
+ if initResponse.Redirect != nil {
+ // TODO follow redirection
+ }
+ if initResponse.Config != nil {
+ }
+
+ // setup looks for control messages
+ doneChan := make(chan error, 1)
+ go func() {
+ for {
+ in, err := stream.Recv()
+ if err != nil {
+ doneChan <- err
+ return
+ }
+ if in.Redirect != nil {
+ }
+ if in.Config != nil {
+ }
+ }
+ }()
+
+ return stream, nil
+
+}
+
+func (p *Publisher) Publish(m *messaging_pb.Message) error {
+ hashValue := p.messageCount
+ p.messageCount++
+ if p.topicConfiguration.Partitoning == messaging_pb.TopicConfiguration_NonNullKeyHash {
+ if m.Key != nil {
+ hashValue = xxhash.Checksum64(m.Key)
+ }
+ } else if p.topicConfiguration.Partitoning == messaging_pb.TopicConfiguration_KeyHash {
+ hashValue = xxhash.Checksum64(m.Key)
+ } else {
+ // round robin
+ }
+
+ idx := int(hashValue) % len(p.publishClients)
+ if idx < 0 {
+ idx += len(p.publishClients)
+ }
+ return p.publishClients[idx].Send(&messaging_pb.PublishRequest{
+ Data: m,
+ })
+}
diff --git a/weed/messaging/msgclient/subscriber.go b/weed/messaging/msgclient/subscriber.go
new file mode 100644
index 000000000..6c7dc1ab7
--- /dev/null
+++ b/weed/messaging/msgclient/subscriber.go
@@ -0,0 +1,120 @@
+package msgclient
+
+import (
+ "context"
+ "io"
+ "sync"
+ "time"
+
+ "github.com/chrislusf/seaweedfs/weed/messaging/broker"
+ "github.com/chrislusf/seaweedfs/weed/pb/messaging_pb"
+ "google.golang.org/grpc"
+)
+
+type Subscriber struct {
+ subscriberClients []messaging_pb.SeaweedMessaging_SubscribeClient
+ subscriberCancels []context.CancelFunc
+ subscriberId string
+}
+
+func (mc *MessagingClient) NewSubscriber(subscriberId, namespace, topic string, partitionId int, startTime time.Time) (*Subscriber, error) {
+ // read topic configuration
+ topicConfiguration := &messaging_pb.TopicConfiguration{
+ PartitionCount: 4,
+ }
+ subscriberClients := make([]messaging_pb.SeaweedMessaging_SubscribeClient, topicConfiguration.PartitionCount)
+ subscriberCancels := make([]context.CancelFunc, topicConfiguration.PartitionCount)
+
+ for i := 0; i < int(topicConfiguration.PartitionCount); i++ {
+ if partitionId >= 0 && i != partitionId {
+ continue
+ }
+ tp := broker.TopicPartition{
+ Namespace: namespace,
+ Topic: topic,
+ Partition: int32(i),
+ }
+ grpcClientConn, err := mc.findBroker(tp)
+ if err != nil {
+ return nil, err
+ }
+ ctx, cancel := context.WithCancel(context.Background())
+ client, err := setupSubscriberClient(ctx, grpcClientConn, tp, subscriberId, startTime)
+ if err != nil {
+ return nil, err
+ }
+ subscriberClients[i] = client
+ subscriberCancels[i] = cancel
+ }
+
+ return &Subscriber{
+ subscriberClients: subscriberClients,
+ subscriberCancels: subscriberCancels,
+ subscriberId: subscriberId,
+ }, nil
+}
+
+func setupSubscriberClient(ctx context.Context, grpcConnection *grpc.ClientConn, tp broker.TopicPartition, subscriberId string, startTime time.Time) (stream messaging_pb.SeaweedMessaging_SubscribeClient, err error) {
+ stream, err = messaging_pb.NewSeaweedMessagingClient(grpcConnection).Subscribe(ctx)
+ if err != nil {
+ return
+ }
+
+ // send init message
+ err = stream.Send(&messaging_pb.SubscriberMessage{
+ Init: &messaging_pb.SubscriberMessage_InitMessage{
+ Namespace: tp.Namespace,
+ Topic: tp.Topic,
+ Partition: tp.Partition,
+ StartPosition: messaging_pb.SubscriberMessage_InitMessage_TIMESTAMP,
+ TimestampNs: startTime.UnixNano(),
+ SubscriberId: subscriberId,
+ },
+ })
+ if err != nil {
+ return
+ }
+
+ return stream, nil
+}
+
+func doSubscribe(subscriberClient messaging_pb.SeaweedMessaging_SubscribeClient, processFn func(m *messaging_pb.Message)) error {
+ for {
+ resp, listenErr := subscriberClient.Recv()
+ if listenErr == io.EOF {
+ return nil
+ }
+ if listenErr != nil {
+ println(listenErr.Error())
+ return listenErr
+ }
+ if resp.Data == nil {
+ // this could be heartbeat from broker
+ continue
+ }
+ processFn(resp.Data)
+ }
+}
+
+// Subscribe starts goroutines to process the messages
+func (s *Subscriber) Subscribe(processFn func(m *messaging_pb.Message)) {
+ var wg sync.WaitGroup
+ for i := 0; i < len(s.subscriberClients); i++ {
+ if s.subscriberClients[i] != nil {
+ wg.Add(1)
+ go func(subscriberClient messaging_pb.SeaweedMessaging_SubscribeClient) {
+ defer wg.Done()
+ doSubscribe(subscriberClient, processFn)
+ }(s.subscriberClients[i])
+ }
+ }
+ wg.Wait()
+}
+
+func (s *Subscriber) Shutdown() {
+ for i := 0; i < len(s.subscriberClients); i++ {
+ if s.subscriberCancels[i] != nil {
+ s.subscriberCancels[i]()
+ }
+ }
+}
diff --git a/weed/notification/aws_sqs/aws_sqs_pub.go b/weed/notification/aws_sqs/aws_sqs_pub.go
index 4c1302abb..d881049dd 100644
--- a/weed/notification/aws_sqs/aws_sqs_pub.go
+++ b/weed/notification/aws_sqs/aws_sqs_pub.go
@@ -27,14 +27,14 @@ func (k *AwsSqsPub) GetName() string {
return "aws_sqs"
}
-func (k *AwsSqsPub) Initialize(configuration util.Configuration) (err error) {
- glog.V(0).Infof("filer.notification.aws_sqs.region: %v", configuration.GetString("region"))
- glog.V(0).Infof("filer.notification.aws_sqs.sqs_queue_name: %v", configuration.GetString("sqs_queue_name"))
+func (k *AwsSqsPub) Initialize(configuration util.Configuration, prefix string) (err error) {
+ glog.V(0).Infof("filer.notification.aws_sqs.region: %v", configuration.GetString(prefix+"region"))
+ glog.V(0).Infof("filer.notification.aws_sqs.sqs_queue_name: %v", configuration.GetString(prefix+"sqs_queue_name"))
return k.initialize(
- configuration.GetString("aws_access_key_id"),
- configuration.GetString("aws_secret_access_key"),
- configuration.GetString("region"),
- configuration.GetString("sqs_queue_name"),
+ configuration.GetString(prefix+"aws_access_key_id"),
+ configuration.GetString(prefix+"aws_secret_access_key"),
+ configuration.GetString(prefix+"region"),
+ configuration.GetString(prefix+"sqs_queue_name"),
)
}
diff --git a/weed/notification/configuration.go b/weed/notification/configuration.go
index 7f8765cc3..541a453e9 100644
--- a/weed/notification/configuration.go
+++ b/weed/notification/configuration.go
@@ -4,14 +4,13 @@ import (
"github.com/chrislusf/seaweedfs/weed/glog"
"github.com/chrislusf/seaweedfs/weed/util"
"github.com/golang/protobuf/proto"
- "github.com/spf13/viper"
)
type MessageQueue interface {
// GetName gets the name to locate the configuration in filer.toml file
GetName() string
// Initialize initializes the file store
- Initialize(configuration util.Configuration) error
+ Initialize(configuration util.Configuration, prefix string) error
SendMessage(key string, message proto.Message) error
}
@@ -21,7 +20,7 @@ var (
Queue MessageQueue
)
-func LoadConfiguration(config *viper.Viper) {
+func LoadConfiguration(config *util.ViperProxy, prefix string) {
if config == nil {
return
@@ -30,9 +29,8 @@ func LoadConfiguration(config *viper.Viper) {
validateOneEnabledQueue(config)
for _, queue := range MessageQueues {
- if config.GetBool(queue.GetName() + ".enabled") {
- viperSub := config.Sub(queue.GetName())
- if err := queue.Initialize(viperSub); err != nil {
+ if config.GetBool(prefix + queue.GetName() + ".enabled") {
+ if err := queue.Initialize(config, prefix+queue.GetName()+"."); err != nil {
glog.Fatalf("Failed to initialize notification for %s: %+v",
queue.GetName(), err)
}
@@ -44,7 +42,7 @@ func LoadConfiguration(config *viper.Viper) {
}
-func validateOneEnabledQueue(config *viper.Viper) {
+func validateOneEnabledQueue(config *util.ViperProxy) {
enabledQueue := ""
for _, queue := range MessageQueues {
if config.GetBool(queue.GetName() + ".enabled") {
diff --git a/weed/notification/gocdk_pub_sub/gocdk_pub_sub.go b/weed/notification/gocdk_pub_sub/gocdk_pub_sub.go
index ebf44ea6f..01c4d901f 100644
--- a/weed/notification/gocdk_pub_sub/gocdk_pub_sub.go
+++ b/weed/notification/gocdk_pub_sub/gocdk_pub_sub.go
@@ -17,23 +17,34 @@ package gocdk_pub_sub
import (
"context"
"fmt"
+ "github.com/golang/protobuf/proto"
+ "github.com/streadway/amqp"
+ "gocloud.dev/pubsub"
+ _ "gocloud.dev/pubsub/awssnssqs"
+ "gocloud.dev/pubsub/rabbitpubsub"
+ "net/url"
+ "path"
+ "time"
"github.com/chrislusf/seaweedfs/weed/glog"
"github.com/chrislusf/seaweedfs/weed/notification"
"github.com/chrislusf/seaweedfs/weed/util"
- "github.com/golang/protobuf/proto"
- "gocloud.dev/pubsub"
- _ "gocloud.dev/pubsub/awssnssqs"
// _ "gocloud.dev/pubsub/azuresb"
_ "gocloud.dev/pubsub/gcppubsub"
_ "gocloud.dev/pubsub/natspubsub"
_ "gocloud.dev/pubsub/rabbitpubsub"
+ "os"
)
func init() {
notification.MessageQueues = append(notification.MessageQueues, &GoCDKPubSub{})
}
+func getPath(rawUrl string) string {
+ parsedUrl, _ := url.Parse(rawUrl)
+ return path.Join(parsedUrl.Host, parsedUrl.Path)
+}
+
type GoCDKPubSub struct {
topicURL string
topic *pubsub.Topic
@@ -43,14 +54,37 @@ func (k *GoCDKPubSub) GetName() string {
return "gocdk_pub_sub"
}
-func (k *GoCDKPubSub) Initialize(config util.Configuration) error {
- k.topicURL = config.GetString("topic_url")
+func (k *GoCDKPubSub) doReconnect() {
+ var conn *amqp.Connection
+ if k.topic.As(&conn) {
+ go func() {
+ <-conn.NotifyClose(make(chan *amqp.Error))
+ conn.Close()
+ k.topic.Shutdown(context.Background())
+ for {
+ glog.Info("Try reconnect")
+ conn, err := amqp.Dial(os.Getenv("RABBIT_SERVER_URL"))
+ if err == nil {
+ k.topic = rabbitpubsub.OpenTopic(conn, getPath(k.topicURL), nil)
+ k.doReconnect()
+ break
+ }
+ glog.Error(err)
+ time.Sleep(time.Second)
+ }
+ }()
+ }
+}
+
+func (k *GoCDKPubSub) Initialize(configuration util.Configuration, prefix string) error {
+ k.topicURL = configuration.GetString(prefix + "topic_url")
glog.V(0).Infof("notification.gocdk_pub_sub.topic_url: %v", k.topicURL)
topic, err := pubsub.OpenTopic(context.Background(), k.topicURL)
if err != nil {
glog.Fatalf("Failed to open topic: %v", err)
}
k.topic = topic
+ k.doReconnect()
return nil
}
@@ -59,8 +93,7 @@ func (k *GoCDKPubSub) SendMessage(key string, message proto.Message) error {
if err != nil {
return err
}
- ctx := context.Background()
- err = k.topic.Send(ctx, &pubsub.Message{
+ err = k.topic.Send(context.Background(), &pubsub.Message{
Body: bytes,
Metadata: map[string]string{"key": key},
})
diff --git a/weed/notification/google_pub_sub/google_pub_sub.go b/weed/notification/google_pub_sub/google_pub_sub.go
index 7b26bfe38..363a86eb6 100644
--- a/weed/notification/google_pub_sub/google_pub_sub.go
+++ b/weed/notification/google_pub_sub/google_pub_sub.go
@@ -25,13 +25,13 @@ func (k *GooglePubSub) GetName() string {
return "google_pub_sub"
}
-func (k *GooglePubSub) Initialize(configuration util.Configuration) (err error) {
- glog.V(0).Infof("notification.google_pub_sub.project_id: %v", configuration.GetString("project_id"))
- glog.V(0).Infof("notification.google_pub_sub.topic: %v", configuration.GetString("topic"))
+func (k *GooglePubSub) Initialize(configuration util.Configuration, prefix string) (err error) {
+ glog.V(0).Infof("notification.google_pub_sub.project_id: %v", configuration.GetString(prefix+"project_id"))
+ glog.V(0).Infof("notification.google_pub_sub.topic: %v", configuration.GetString(prefix+"topic"))
return k.initialize(
- configuration.GetString("google_application_credentials"),
- configuration.GetString("project_id"),
- configuration.GetString("topic"),
+ configuration.GetString(prefix+"google_application_credentials"),
+ configuration.GetString(prefix+"project_id"),
+ configuration.GetString(prefix+"topic"),
)
}
diff --git a/weed/notification/kafka/kafka_queue.go b/weed/notification/kafka/kafka_queue.go
index fd545722b..8d83b5892 100644
--- a/weed/notification/kafka/kafka_queue.go
+++ b/weed/notification/kafka/kafka_queue.go
@@ -21,12 +21,12 @@ func (k *KafkaQueue) GetName() string {
return "kafka"
}
-func (k *KafkaQueue) Initialize(configuration util.Configuration) (err error) {
- glog.V(0).Infof("filer.notification.kafka.hosts: %v\n", configuration.GetStringSlice("hosts"))
- glog.V(0).Infof("filer.notification.kafka.topic: %v\n", configuration.GetString("topic"))
+func (k *KafkaQueue) Initialize(configuration util.Configuration, prefix string) (err error) {
+ glog.V(0).Infof("filer.notification.kafka.hosts: %v\n", configuration.GetStringSlice(prefix+"hosts"))
+ glog.V(0).Infof("filer.notification.kafka.topic: %v\n", configuration.GetString(prefix+"topic"))
return k.initialize(
- configuration.GetStringSlice("hosts"),
- configuration.GetString("topic"),
+ configuration.GetStringSlice(prefix+"hosts"),
+ configuration.GetString(prefix+"topic"),
)
}
diff --git a/weed/notification/log/log_queue.go b/weed/notification/log/log_queue.go
index dcc038dfc..1ca4786a1 100644
--- a/weed/notification/log/log_queue.go
+++ b/weed/notification/log/log_queue.go
@@ -18,7 +18,7 @@ func (k *LogQueue) GetName() string {
return "log"
}
-func (k *LogQueue) Initialize(configuration util.Configuration) (err error) {
+func (k *LogQueue) Initialize(configuration util.Configuration, prefix string) (err error) {
return nil
}
diff --git a/weed/operation/assign_file_id.go b/weed/operation/assign_file_id.go
index 2dfa44483..ffd3e4938 100644
--- a/weed/operation/assign_file_id.go
+++ b/weed/operation/assign_file_id.go
@@ -3,11 +3,14 @@ package operation
import (
"context"
"fmt"
+ "strings"
+
+ "github.com/chrislusf/seaweedfs/weed/storage/needle"
+ "google.golang.org/grpc"
+
"github.com/chrislusf/seaweedfs/weed/pb/master_pb"
"github.com/chrislusf/seaweedfs/weed/security"
"github.com/chrislusf/seaweedfs/weed/util"
- "google.golang.org/grpc"
- "strings"
)
type VolumeAssignRequest struct {
@@ -15,6 +18,7 @@ type VolumeAssignRequest struct {
Replication string
Collection string
Ttl string
+ DiskType string
DataCenter string
Rack string
DataNode string
@@ -30,7 +34,7 @@ type AssignResult struct {
Auth security.EncodedJwt `json:"auth,omitempty"`
}
-func Assign(server string, grpcDialOption grpc.DialOption, primaryRequest *VolumeAssignRequest, alternativeRequests ...*VolumeAssignRequest) (*AssignResult, error) {
+func Assign(masterFn GetMasterFn, grpcDialOption grpc.DialOption, primaryRequest *VolumeAssignRequest, alternativeRequests ...*VolumeAssignRequest) (*AssignResult, error) {
var requests []*VolumeAssignRequest
requests = append(requests, primaryRequest)
@@ -44,17 +48,18 @@ func Assign(server string, grpcDialOption grpc.DialOption, primaryRequest *Volum
continue
}
- lastError = WithMasterServerClient(server, grpcDialOption, func(masterClient master_pb.SeaweedClient) error {
+ lastError = WithMasterServerClient(masterFn(), grpcDialOption, func(masterClient master_pb.SeaweedClient) error {
req := &master_pb.AssignRequest{
- Count: primaryRequest.Count,
- Replication: primaryRequest.Replication,
- Collection: primaryRequest.Collection,
- Ttl: primaryRequest.Ttl,
- DataCenter: primaryRequest.DataCenter,
- Rack: primaryRequest.Rack,
- DataNode: primaryRequest.DataNode,
- WritableVolumeCount: primaryRequest.WritableVolumeCount,
+ Count: request.Count,
+ Replication: request.Replication,
+ Collection: request.Collection,
+ Ttl: request.Ttl,
+ DiskType: request.DiskType,
+ DataCenter: request.DataCenter,
+ Rack: request.Rack,
+ DataNode: request.DataNode,
+ WritableVolumeCount: request.WritableVolumeCount,
}
resp, grpcErr := masterClient.Assign(context.Background(), req)
if grpcErr != nil {
@@ -81,6 +86,7 @@ func Assign(server string, grpcDialOption grpc.DialOption, primaryRequest *Volum
continue
}
+ break
}
return ret, lastError
@@ -99,3 +105,44 @@ func LookupJwt(master string, fileId string) security.EncodedJwt {
return security.EncodedJwt(tokenStr)
}
+
+type StorageOption struct {
+ Replication string
+ DiskType string
+ Collection string
+ DataCenter string
+ Rack string
+ TtlSeconds int32
+ Fsync bool
+ VolumeGrowthCount uint32
+}
+
+func (so *StorageOption) TtlString() string {
+ return needle.SecondsToTTL(so.TtlSeconds)
+}
+
+func (so *StorageOption) ToAssignRequests(count int) (ar *VolumeAssignRequest, altRequest *VolumeAssignRequest) {
+ ar = &VolumeAssignRequest{
+ Count: uint64(count),
+ Replication: so.Replication,
+ Collection: so.Collection,
+ Ttl: so.TtlString(),
+ DiskType: so.DiskType,
+ DataCenter: so.DataCenter,
+ Rack: so.Rack,
+ WritableVolumeCount: so.VolumeGrowthCount,
+ }
+ if so.DataCenter != "" || so.Rack != "" {
+ altRequest = &VolumeAssignRequest{
+ Count: uint64(count),
+ Replication: so.Replication,
+ Collection: so.Collection,
+ Ttl: so.TtlString(),
+ DiskType: so.DiskType,
+ DataCenter: "",
+ Rack: "",
+ WritableVolumeCount: so.VolumeGrowthCount,
+ }
+ }
+ return
+}
diff --git a/weed/operation/buffer_pool.go b/weed/operation/buffer_pool.go
new file mode 100644
index 000000000..9cbe4787f
--- /dev/null
+++ b/weed/operation/buffer_pool.go
@@ -0,0 +1,24 @@
+package operation
+
+import (
+ "github.com/valyala/bytebufferpool"
+ "sync/atomic"
+)
+
+var bufferCounter int64
+
+func GetBuffer() *bytebufferpool.ByteBuffer {
+ defer func() {
+ atomic.AddInt64(&bufferCounter, 1)
+ // println("+", bufferCounter)
+ }()
+ return bytebufferpool.Get()
+}
+
+func PutBuffer(buf *bytebufferpool.ByteBuffer) {
+ defer func() {
+ atomic.AddInt64(&bufferCounter, -1)
+ // println("-", bufferCounter)
+ }()
+ bytebufferpool.Put(buf)
+}
diff --git a/weed/operation/chunked_file.go b/weed/operation/chunked_file.go
index 295204dd8..8506e0518 100644
--- a/weed/operation/chunked_file.go
+++ b/weed/operation/chunked_file.go
@@ -8,11 +8,10 @@ import (
"io/ioutil"
"net/http"
"sort"
+ "sync"
"google.golang.org/grpc"
- "sync"
-
"github.com/chrislusf/seaweedfs/weed/glog"
"github.com/chrislusf/seaweedfs/weed/util"
)
@@ -41,23 +40,24 @@ type ChunkManifest struct {
// seekable chunked file reader
type ChunkedFileReader struct {
- Manifest *ChunkManifest
- Master string
- pos int64
- pr *io.PipeReader
- pw *io.PipeWriter
- mutex sync.Mutex
+ totalSize int64
+ chunkList []*ChunkInfo
+ master string
+ pos int64
+ pr *io.PipeReader
+ pw *io.PipeWriter
+ mutex sync.Mutex
}
func (s ChunkList) Len() int { return len(s) }
func (s ChunkList) Less(i, j int) bool { return s[i].Offset < s[j].Offset }
func (s ChunkList) Swap(i, j int) { s[i], s[j] = s[j], s[i] }
-func LoadChunkManifest(buffer []byte, isGzipped bool) (*ChunkManifest, error) {
- if isGzipped {
+func LoadChunkManifest(buffer []byte, isCompressed bool) (*ChunkManifest, error) {
+ if isCompressed {
var err error
- if buffer, err = util.UnGzipData(buffer); err != nil {
- return nil, err
+ if buffer, err = util.DecompressData(buffer); err != nil {
+ glog.V(0).Infof("fail to decompress chunk manifest: %v", err)
}
}
cm := ChunkManifest{}
@@ -72,12 +72,12 @@ func (cm *ChunkManifest) Marshal() ([]byte, error) {
return json.Marshal(cm)
}
-func (cm *ChunkManifest) DeleteChunks(master string, grpcDialOption grpc.DialOption) error {
+func (cm *ChunkManifest) DeleteChunks(masterFn GetMasterFn, usePublicUrl bool, grpcDialOption grpc.DialOption) error {
var fileIds []string
for _, ci := range cm.Chunks {
fileIds = append(fileIds, ci.Fid)
}
- results, err := DeleteFiles(master, grpcDialOption, fileIds)
+ results, err := DeleteFiles(masterFn, usePublicUrl, grpcDialOption, fileIds)
if err != nil {
glog.V(0).Infof("delete %+v: %v", fileIds, err)
return fmt.Errorf("chunk delete: %v", err)
@@ -126,16 +126,29 @@ func readChunkNeedle(fileUrl string, w io.Writer, offset int64) (written int64,
return io.Copy(w, resp.Body)
}
+func NewChunkedFileReader(chunkList []*ChunkInfo, master string) *ChunkedFileReader {
+ var totalSize int64
+ for _, chunk := range chunkList {
+ totalSize += chunk.Size
+ }
+ sort.Sort(ChunkList(chunkList))
+ return &ChunkedFileReader{
+ totalSize: totalSize,
+ chunkList: chunkList,
+ master: master,
+ }
+}
+
func (cf *ChunkedFileReader) Seek(offset int64, whence int) (int64, error) {
var err error
switch whence {
- case 0:
- case 1:
+ case io.SeekStart:
+ case io.SeekCurrent:
offset += cf.pos
- case 2:
- offset = cf.Manifest.Size - offset
+ case io.SeekEnd:
+ offset = cf.totalSize + offset
}
- if offset > cf.Manifest.Size {
+ if offset > cf.totalSize {
err = ErrInvalidRange
}
if cf.pos != offset {
@@ -146,10 +159,9 @@ func (cf *ChunkedFileReader) Seek(offset int64, whence int) (int64, error) {
}
func (cf *ChunkedFileReader) WriteTo(w io.Writer) (n int64, err error) {
- cm := cf.Manifest
chunkIndex := -1
chunkStartOffset := int64(0)
- for i, ci := range cm.Chunks {
+ for i, ci := range cf.chunkList {
if cf.pos >= ci.Offset && cf.pos < ci.Offset+ci.Size {
chunkIndex = i
chunkStartOffset = cf.pos - ci.Offset
@@ -159,10 +171,12 @@ func (cf *ChunkedFileReader) WriteTo(w io.Writer) (n int64, err error) {
if chunkIndex < 0 {
return n, ErrInvalidRange
}
- for ; chunkIndex < cm.Chunks.Len(); chunkIndex++ {
- ci := cm.Chunks[chunkIndex]
+ for ; chunkIndex < len(cf.chunkList); chunkIndex++ {
+ ci := cf.chunkList[chunkIndex]
// if we need read date from local volume server first?
- fileUrl, lookupError := LookupFileId(cf.Master, ci.Fid)
+ fileUrl, lookupError := LookupFileId(func() string {
+ return cf.master
+ }, ci.Fid)
if lookupError != nil {
return n, lookupError
}
diff --git a/weed/operation/delete_content.go b/weed/operation/delete_content.go
index 358399324..8f87882b1 100644
--- a/weed/operation/delete_content.go
+++ b/weed/operation/delete_content.go
@@ -4,12 +4,12 @@ import (
"context"
"errors"
"fmt"
- "github.com/chrislusf/seaweedfs/weed/glog"
- "github.com/chrislusf/seaweedfs/weed/pb/volume_server_pb"
"google.golang.org/grpc"
"net/http"
"strings"
"sync"
+
+ "github.com/chrislusf/seaweedfs/weed/pb/volume_server_pb"
)
type DeleteResult struct {
@@ -28,10 +28,18 @@ func ParseFileId(fid string) (vid string, key_cookie string, err error) {
}
// DeleteFiles batch deletes a list of fileIds
-func DeleteFiles(master string, grpcDialOption grpc.DialOption, fileIds []string) ([]*volume_server_pb.DeleteResult, error) {
-
- lookupFunc := func(vids []string) (map[string]LookupResult, error) {
- return LookupVolumeIds(master, grpcDialOption, vids)
+func DeleteFiles(masterFn GetMasterFn, usePublicUrl bool, grpcDialOption grpc.DialOption, fileIds []string) ([]*volume_server_pb.DeleteResult, error) {
+
+ lookupFunc := func(vids []string) (results map[string]LookupResult, err error) {
+ results, err = LookupVolumeIds(masterFn, grpcDialOption, vids)
+ if err == nil && usePublicUrl {
+ for _, result := range results {
+ for _, loc := range result.Locations {
+ loc.Url = loc.PublicUrl
+ }
+ }
+ }
+ return
}
return DeleteFilesWithLookupVolumeId(grpcDialOption, fileIds, lookupFunc)
@@ -92,9 +100,9 @@ func DeleteFilesWithLookupVolumeId(grpcDialOption grpc.DialOption, fileIds []str
go func(server string, fidList []string) {
defer wg.Done()
- if deleteResults, deleteErr := DeleteFilesAtOneVolumeServer(server, grpcDialOption, fidList); deleteErr != nil {
+ if deleteResults, deleteErr := DeleteFilesAtOneVolumeServer(server, grpcDialOption, fidList, true); deleteErr != nil {
err = deleteErr
- } else {
+ } else if deleteResults != nil {
resultChan <- deleteResults
}
@@ -107,18 +115,17 @@ func DeleteFilesWithLookupVolumeId(grpcDialOption grpc.DialOption, fileIds []str
ret = append(ret, result...)
}
- glog.V(1).Infof("deleted %d items", len(ret))
-
return ret, err
}
// DeleteFilesAtOneVolumeServer deletes a list of files that is on one volume server via gRpc
-func DeleteFilesAtOneVolumeServer(volumeServer string, grpcDialOption grpc.DialOption, fileIds []string) (ret []*volume_server_pb.DeleteResult, err error) {
+func DeleteFilesAtOneVolumeServer(volumeServer string, grpcDialOption grpc.DialOption, fileIds []string, includeCookie bool) (ret []*volume_server_pb.DeleteResult, err error) {
err = WithVolumeServerClient(volumeServer, grpcDialOption, func(volumeServerClient volume_server_pb.VolumeServerClient) error {
req := &volume_server_pb.BatchDeleteRequest{
- FileIds: fileIds,
+ FileIds: fileIds,
+ SkipCookieCheck: !includeCookie,
}
resp, err := volumeServerClient.BatchDelete(context.Background(), req)
diff --git a/weed/operation/grpc_client.go b/weed/operation/grpc_client.go
index f6b2b69e9..025a65b38 100644
--- a/weed/operation/grpc_client.go
+++ b/weed/operation/grpc_client.go
@@ -1,27 +1,27 @@
package operation
import (
- "context"
"fmt"
+ "strconv"
+ "strings"
+
+ "google.golang.org/grpc"
+
"github.com/chrislusf/seaweedfs/weed/glog"
+ "github.com/chrislusf/seaweedfs/weed/pb"
+ "github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
"github.com/chrislusf/seaweedfs/weed/pb/master_pb"
"github.com/chrislusf/seaweedfs/weed/pb/volume_server_pb"
- "github.com/chrislusf/seaweedfs/weed/util"
- "google.golang.org/grpc"
- "strconv"
- "strings"
)
func WithVolumeServerClient(volumeServer string, grpcDialOption grpc.DialOption, fn func(volume_server_pb.VolumeServerClient) error) error {
- ctx := context.Background()
-
grpcAddress, err := toVolumeServerGrpcAddress(volumeServer)
if err != nil {
- return err
+ return fmt.Errorf("failed to parse volume server %v: %v", volumeServer, err)
}
- return util.WithCachedGrpcClient(ctx, func(grpcConnection *grpc.ClientConn) error {
+ return pb.WithCachedGrpcClient(func(grpcConnection *grpc.ClientConn) error {
client := volume_server_pb.NewVolumeServerClient(grpcConnection)
return fn(client)
}, grpcAddress, grpcDialOption)
@@ -40,16 +40,28 @@ func toVolumeServerGrpcAddress(volumeServer string) (grpcAddress string, err err
func WithMasterServerClient(masterServer string, grpcDialOption grpc.DialOption, fn func(masterClient master_pb.SeaweedClient) error) error {
- ctx := context.Background()
-
- masterGrpcAddress, parseErr := util.ParseServerToGrpcAddress(masterServer)
+ masterGrpcAddress, parseErr := pb.ParseServerToGrpcAddress(masterServer)
if parseErr != nil {
- return fmt.Errorf("failed to parse master grpc %v: %v", masterServer, parseErr)
+ return fmt.Errorf("failed to parse master %v: %v", masterServer, parseErr)
}
- return util.WithCachedGrpcClient(ctx, func(grpcConnection *grpc.ClientConn) error {
+ return pb.WithCachedGrpcClient(func(grpcConnection *grpc.ClientConn) error {
client := master_pb.NewSeaweedClient(grpcConnection)
return fn(client)
}, masterGrpcAddress, grpcDialOption)
}
+
+func WithFilerServerClient(filerServer string, grpcDialOption grpc.DialOption, fn func(masterClient filer_pb.SeaweedFilerClient) error) error {
+
+ filerGrpcAddress, parseErr := pb.ParseServerToGrpcAddress(filerServer)
+ if parseErr != nil {
+ return fmt.Errorf("failed to parse filer %v: %v", filerGrpcAddress, parseErr)
+ }
+
+ return pb.WithCachedGrpcClient(func(grpcConnection *grpc.ClientConn) error {
+ client := filer_pb.NewSeaweedFilerClient(grpcConnection)
+ return fn(client)
+ }, filerGrpcAddress, grpcDialOption)
+
+}
diff --git a/weed/operation/lookup.go b/weed/operation/lookup.go
index d0773e7fd..0372e47b0 100644
--- a/weed/operation/lookup.go
+++ b/weed/operation/lookup.go
@@ -33,10 +33,10 @@ var (
vc VidCache // caching of volume locations, re-check if after 10 minutes
)
-func Lookup(server string, vid string) (ret *LookupResult, err error) {
+func Lookup(masterFn GetMasterFn, vid string) (ret *LookupResult, err error) {
locations, cache_err := vc.Get(vid)
if cache_err != nil {
- if ret, err = do_lookup(server, vid); err == nil {
+ if ret, err = do_lookup(masterFn, vid); err == nil {
vc.Set(vid, ret.Locations, 10*time.Minute)
}
} else {
@@ -45,9 +45,10 @@ func Lookup(server string, vid string) (ret *LookupResult, err error) {
return
}
-func do_lookup(server string, vid string) (*LookupResult, error) {
+func do_lookup(masterFn GetMasterFn, vid string) (*LookupResult, error) {
values := make(url.Values)
values.Add("volumeId", vid)
+ server := masterFn()
jsonBlob, err := util.Post("http://"+server+"/dir/lookup", values)
if err != nil {
return nil, err
@@ -63,12 +64,12 @@ func do_lookup(server string, vid string) (*LookupResult, error) {
return &ret, nil
}
-func LookupFileId(server string, fileId string) (fullUrl string, err error) {
+func LookupFileId(masterFn GetMasterFn, fileId string) (fullUrl string, err error) {
parts := strings.Split(fileId, ",")
if len(parts) != 2 {
return "", errors.New("Invalid fileId " + fileId)
}
- lookup, lookupError := Lookup(server, parts[0])
+ lookup, lookupError := Lookup(masterFn, parts[0])
if lookupError != nil {
return "", lookupError
}
@@ -79,7 +80,7 @@ func LookupFileId(server string, fileId string) (fullUrl string, err error) {
}
// LookupVolumeIds find volume locations by cache and actual lookup
-func LookupVolumeIds(server string, grpcDialOption grpc.DialOption, vids []string) (map[string]LookupResult, error) {
+func LookupVolumeIds(masterFn GetMasterFn, grpcDialOption grpc.DialOption, vids []string) (map[string]LookupResult, error) {
ret := make(map[string]LookupResult)
var unknown_vids []string
@@ -99,7 +100,7 @@ func LookupVolumeIds(server string, grpcDialOption grpc.DialOption, vids []strin
//only query unknown_vids
- err := WithMasterServerClient(server, grpcDialOption, func(masterClient master_pb.SeaweedClient) error {
+ err := WithMasterServerClient(masterFn(), grpcDialOption, func(masterClient master_pb.SeaweedClient) error {
req := &master_pb.LookupVolumeRequest{
VolumeIds: unknown_vids,
diff --git a/weed/operation/needle_parse_test.go b/weed/operation/needle_parse_test.go
new file mode 100644
index 000000000..202374e1b
--- /dev/null
+++ b/weed/operation/needle_parse_test.go
@@ -0,0 +1,131 @@
+package operation
+
+import (
+ "bytes"
+ "fmt"
+ "io"
+ "net/http"
+ "testing"
+
+ "github.com/stretchr/testify/assert"
+
+ "github.com/chrislusf/seaweedfs/weed/storage/needle"
+ "github.com/chrislusf/seaweedfs/weed/util"
+)
+
+type MockClient struct {
+ needleHandling func(n *needle.Needle, originalSize int, e error)
+}
+
+func (m *MockClient) Do(req *http.Request) (*http.Response, error) {
+ n, originalSize, _, err := needle.CreateNeedleFromRequest(req, false, 1024*1024)
+ if m.needleHandling != nil {
+ m.needleHandling(n, originalSize, err)
+ }
+ return &http.Response{
+ StatusCode: http.StatusNoContent,
+ }, io.EOF
+}
+
+/*
+
+The mime type is always the value passed in.
+
+Compress or not depends on the content detection, file name extension, and compression ratio.
+
+If the content is already compressed, need to know the content size.
+
+*/
+
+func TestCreateNeedleFromRequest(t *testing.T) {
+ mc := &MockClient{}
+ tmp := HttpClient
+ HttpClient = mc
+ defer func() {
+ HttpClient = tmp
+ }()
+
+ {
+ mc.needleHandling = func(n *needle.Needle, originalSize int, err error) {
+ assert.Equal(t, nil, err, "upload: %v", err)
+ assert.Equal(t, "", string(n.Mime), "mime detection failed: %v", string(n.Mime))
+ assert.Equal(t, true, n.IsCompressed(), "this should be compressed")
+ assert.Equal(t, true, util.IsGzippedContent(n.Data), "this should be gzip")
+ fmt.Printf("needle: %v, originalSize: %d\n", n, originalSize)
+ }
+ uploadResult, err, data := Upload("http://localhost:8080/389,0f084d17353afda0", "t.txt", false, bytes.NewReader([]byte(textContent)), false, "", nil, "")
+ if len(data) != len(textContent) {
+ t.Errorf("data actual %d expected %d", len(data), len(textContent))
+ }
+ if err != nil {
+ fmt.Printf("err: %v\n", err)
+ }
+ fmt.Printf("uploadResult: %+v\n", uploadResult)
+ }
+
+ {
+ mc.needleHandling = func(n *needle.Needle, originalSize int, err error) {
+ assert.Equal(t, nil, err, "upload: %v", err)
+ assert.Equal(t, "text/plain", string(n.Mime), "mime detection failed: %v", string(n.Mime))
+ assert.Equal(t, true, n.IsCompressed(), "this should be compressed")
+ assert.Equal(t, true, util.IsGzippedContent(n.Data), "this should be gzip")
+ fmt.Printf("needle: %v, dataSize:%d originalSize:%d\n", n, len(n.Data), originalSize)
+ }
+ gzippedData, _ := util.GzipData([]byte(textContent))
+ Upload("http://localhost:8080/389,0f084d17353afda0", "t.txt", false, bytes.NewReader(gzippedData), true, "text/plain", nil, "")
+ }
+
+ /*
+ {
+ mc.needleHandling = func(n *needle.Needle, originalSize int, err error) {
+ assert.Equal(t, nil, err, "upload: %v", err)
+ assert.Equal(t, "text/plain", string(n.Mime), "mime detection failed: %v", string(n.Mime))
+ assert.Equal(t, true, n.IsCompressed(), "this should be compressed")
+ assert.Equal(t, true, util.IsZstdContent(n.Data), "this should be zstd")
+ fmt.Printf("needle: %v, dataSize:%d originalSize:%d\n", n, len(n.Data), originalSize)
+ }
+ zstdData, _ := util.ZstdData([]byte(textContent))
+ Upload("http://localhost:8080/389,0f084d17353afda0", "t.txt", false, bytes.NewReader(zstdData), true, "text/plain", nil, "")
+ }
+
+ {
+ mc.needleHandling = func(n *needle.Needle, originalSize int, err error) {
+ assert.Equal(t, nil, err, "upload: %v", err)
+ assert.Equal(t, "application/zstd", string(n.Mime), "mime detection failed: %v", string(n.Mime))
+ assert.Equal(t, false, n.IsCompressed(), "this should not be compressed")
+ assert.Equal(t, true, util.IsZstdContent(n.Data), "this should still be zstd")
+ fmt.Printf("needle: %v, dataSize:%d originalSize:%d\n", n, len(n.Data), originalSize)
+ }
+ zstdData, _ := util.ZstdData([]byte(textContent))
+ Upload("http://localhost:8080/389,0f084d17353afda0", "t.txt", false, bytes.NewReader(zstdData), false, "application/zstd", nil, "")
+ }
+ */
+
+}
+
+var textContent = `Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are
+met:
+
+ * Redistributions of source code must retain the above copyright
+notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above
+copyright notice, this list of conditions and the following disclaimer
+in the documentation and/or other materials provided with the
+distribution.
+ * Neither the name of Google Inc. nor the names of its
+contributors may be used to endorse or promote products derived from
+this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+`
diff --git a/weed/operation/stats.go b/weed/operation/stats.go
deleted file mode 100644
index b69a33750..000000000
--- a/weed/operation/stats.go
+++ /dev/null
@@ -1,26 +0,0 @@
-package operation
-
-import (
- "context"
- "google.golang.org/grpc"
-
- "github.com/chrislusf/seaweedfs/weed/pb/master_pb"
-)
-
-func Statistics(server string, grpcDialOption grpc.DialOption, req *master_pb.StatisticsRequest) (resp *master_pb.StatisticsResponse, err error) {
-
- err = WithMasterServerClient(server, grpcDialOption, func(masterClient master_pb.SeaweedClient) error {
-
- grpcResponse, grpcErr := masterClient.Statistics(context.Background(), req)
- if grpcErr != nil {
- return grpcErr
- }
-
- resp = grpcResponse
-
- return nil
-
- })
-
- return
-}
diff --git a/weed/operation/submit.go b/weed/operation/submit.go
index 62f067430..87c5e4279 100644
--- a/weed/operation/submit.go
+++ b/weed/operation/submit.go
@@ -1,8 +1,6 @@
package operation
import (
- "bytes"
- "google.golang.org/grpc"
"io"
"mime"
"net/url"
@@ -11,6 +9,8 @@ import (
"strconv"
"strings"
+ "google.golang.org/grpc"
+
"github.com/chrislusf/seaweedfs/weed/glog"
"github.com/chrislusf/seaweedfs/weed/security"
)
@@ -25,20 +25,23 @@ type FilePart struct {
Collection string
DataCenter string
Ttl string
+ DiskType string
Server string //this comes from assign result
Fid string //this comes from assign result, but customizable
+ Fsync bool
}
type SubmitResult struct {
FileName string `json:"fileName,omitempty"`
- FileUrl string `json:"fileUrl,omitempty"`
+ FileUrl string `json:"url,omitempty"`
Fid string `json:"fid,omitempty"`
Size uint32 `json:"size,omitempty"`
Error string `json:"error,omitempty"`
}
-func SubmitFiles(master string, grpcDialOption grpc.DialOption, files []FilePart,
- replication string, collection string, dataCenter string, ttl string, maxMB int) ([]SubmitResult, error) {
+type GetMasterFn func() string
+
+func SubmitFiles(masterFn GetMasterFn, grpcDialOption grpc.DialOption, files []FilePart, replication string, collection string, dataCenter string, ttl string, diskType string, maxMB int, usePublicUrl bool) ([]SubmitResult, error) {
results := make([]SubmitResult, len(files))
for index, file := range files {
results[index].FileName = file.FileName
@@ -49,10 +52,11 @@ func SubmitFiles(master string, grpcDialOption grpc.DialOption, files []FilePart
Collection: collection,
DataCenter: dataCenter,
Ttl: ttl,
+ DiskType: diskType,
}
- ret, err := Assign(master, grpcDialOption, ar)
+ ret, err := Assign(masterFn, grpcDialOption, ar)
if err != nil {
- for index, _ := range files {
+ for index := range files {
results[index].Error = err.Error()
}
return results, err
@@ -63,10 +67,15 @@ func SubmitFiles(master string, grpcDialOption grpc.DialOption, files []FilePart
file.Fid = file.Fid + "_" + strconv.Itoa(index)
}
file.Server = ret.Url
+ if usePublicUrl {
+ file.Server = ret.PublicUrl
+ }
file.Replication = replication
file.Collection = collection
file.DataCenter = dataCenter
- results[index].Size, err = file.Upload(maxMB, master, ret.Auth, grpcDialOption)
+ file.Ttl = ttl
+ file.DiskType = diskType
+ results[index].Size, err = file.Upload(maxMB, masterFn, usePublicUrl, ret.Auth, grpcDialOption)
if err != nil {
results[index].Error = err.Error()
}
@@ -109,11 +118,14 @@ func newFilePart(fullPathFilename string) (ret FilePart, err error) {
return ret, nil
}
-func (fi FilePart) Upload(maxMB int, master string, jwt security.EncodedJwt, grpcDialOption grpc.DialOption) (retSize uint32, err error) {
+func (fi FilePart) Upload(maxMB int, masterFn GetMasterFn, usePublicUrl bool, jwt security.EncodedJwt, grpcDialOption grpc.DialOption) (retSize uint32, err error) {
fileUrl := "http://" + fi.Server + "/" + fi.Fid
if fi.ModTime != 0 {
fileUrl += "?ts=" + strconv.Itoa(int(fi.ModTime))
}
+ if fi.Fsync {
+ fileUrl += "?fsync=true"
+ }
if closer, ok := fi.Reader.(io.Closer); ok {
defer closer.Close()
}
@@ -136,8 +148,9 @@ func (fi FilePart) Upload(maxMB int, master string, jwt security.EncodedJwt, grp
Replication: fi.Replication,
Collection: fi.Collection,
Ttl: fi.Ttl,
+ DiskType: fi.DiskType,
}
- ret, err = Assign(master, grpcDialOption, ar)
+ ret, err = Assign(masterFn, grpcDialOption, ar)
if err != nil {
return
}
@@ -149,11 +162,12 @@ func (fi FilePart) Upload(maxMB int, master string, jwt security.EncodedJwt, grp
Replication: fi.Replication,
Collection: fi.Collection,
Ttl: fi.Ttl,
+ DiskType: fi.DiskType,
}
- ret, err = Assign(master, grpcDialOption, ar)
+ ret, err = Assign(masterFn, grpcDialOption, ar)
if err != nil {
// delete all uploaded chunks
- cm.DeleteChunks(master, grpcDialOption)
+ cm.DeleteChunks(masterFn, usePublicUrl, grpcDialOption)
return
}
id = ret.Fid
@@ -164,14 +178,17 @@ func (fi FilePart) Upload(maxMB int, master string, jwt security.EncodedJwt, grp
}
}
fileUrl := "http://" + ret.Url + "/" + id
+ if usePublicUrl {
+ fileUrl = "http://" + ret.PublicUrl + "/" + id
+ }
count, e := upload_one_chunk(
baseName+"-"+strconv.FormatInt(i+1, 10),
io.LimitReader(fi.Reader, chunkSize),
- master, fileUrl,
+ masterFn, fileUrl,
ret.Auth)
if e != nil {
// delete all uploaded chunks
- cm.DeleteChunks(master, grpcDialOption)
+ cm.DeleteChunks(masterFn, usePublicUrl, grpcDialOption)
return 0, e
}
cm.Chunks = append(cm.Chunks,
@@ -186,10 +203,10 @@ func (fi FilePart) Upload(maxMB int, master string, jwt security.EncodedJwt, grp
err = upload_chunked_file_manifest(fileUrl, &cm, jwt)
if err != nil {
// delete all uploaded chunks
- cm.DeleteChunks(master, grpcDialOption)
+ cm.DeleteChunks(masterFn, usePublicUrl, grpcDialOption)
}
} else {
- ret, e := Upload(fileUrl, baseName, fi.Reader, false, fi.MimeType, nil, jwt)
+ ret, e, _ := Upload(fileUrl, baseName, false, fi.Reader, false, fi.MimeType, nil, jwt)
if e != nil {
return 0, e
}
@@ -198,12 +215,11 @@ func (fi FilePart) Upload(maxMB int, master string, jwt security.EncodedJwt, grp
return
}
-func upload_one_chunk(filename string, reader io.Reader, master,
+func upload_one_chunk(filename string, reader io.Reader, masterFn GetMasterFn,
fileUrl string, jwt security.EncodedJwt,
) (size uint32, e error) {
glog.V(4).Info("Uploading part ", filename, " to ", fileUrl, "...")
- uploadResult, uploadError := Upload(fileUrl, filename, reader, false,
- "", nil, jwt)
+ uploadResult, uploadError, _ := Upload(fileUrl, filename, false, reader, false, "", nil, jwt)
if uploadError != nil {
return 0, uploadError
}
@@ -215,12 +231,11 @@ func upload_chunked_file_manifest(fileUrl string, manifest *ChunkManifest, jwt s
if e != nil {
return e
}
- bufReader := bytes.NewReader(buf)
glog.V(4).Info("Uploading chunks manifest ", manifest.Name, " to ", fileUrl, "...")
u, _ := url.Parse(fileUrl)
q := u.Query()
q.Set("cm", "true")
u.RawQuery = q.Encode()
- _, e = Upload(u.String(), manifest.Name, bufReader, false, "application/json", nil, jwt)
+ _, e = UploadData(u.String(), manifest.Name, false, buf, false, "application/json", nil, jwt)
return e
}
diff --git a/weed/operation/tail_volume.go b/weed/operation/tail_volume.go
index b53f18ce1..045948274 100644
--- a/weed/operation/tail_volume.go
+++ b/weed/operation/tail_volume.go
@@ -5,14 +5,15 @@ import (
"fmt"
"io"
+ "google.golang.org/grpc"
+
"github.com/chrislusf/seaweedfs/weed/pb/volume_server_pb"
"github.com/chrislusf/seaweedfs/weed/storage/needle"
- "google.golang.org/grpc"
)
-func TailVolume(master string, grpcDialOption grpc.DialOption, vid needle.VolumeId, sinceNs uint64, timeoutSeconds int, fn func(n *needle.Needle) error) error {
+func TailVolume(masterFn GetMasterFn, grpcDialOption grpc.DialOption, vid needle.VolumeId, sinceNs uint64, timeoutSeconds int, fn func(n *needle.Needle) error) error {
// find volume location, replication, ttl info
- lookup, err := Lookup(master, vid.String())
+ lookup, err := Lookup(masterFn, vid.String())
if err != nil {
return fmt.Errorf("look up volume %d: %v", vid, err)
}
@@ -27,8 +28,10 @@ func TailVolume(master string, grpcDialOption grpc.DialOption, vid needle.Volume
func TailVolumeFromSource(volumeServer string, grpcDialOption grpc.DialOption, vid needle.VolumeId, sinceNs uint64, idleTimeoutSeconds int, fn func(n *needle.Needle) error) error {
return WithVolumeServerClient(volumeServer, grpcDialOption, func(client volume_server_pb.VolumeServerClient) error {
+ ctx, cancel := context.WithCancel(context.Background())
+ defer cancel()
- stream, err := client.VolumeTailSender(context.Background(), &volume_server_pb.VolumeTailSenderRequest{
+ stream, err := client.VolumeTailSender(ctx, &volume_server_pb.VolumeTailSenderRequest{
VolumeId: uint32(vid),
SinceNs: sinceNs,
IdleTimeoutSeconds: uint32(idleTimeoutSeconds),
diff --git a/weed/operation/upload_content.go b/weed/operation/upload_content.go
index c387d0230..944186eeb 100644
--- a/weed/operation/upload_content.go
+++ b/weed/operation/upload_content.go
@@ -2,10 +2,7 @@ package operation
import (
"bytes"
- "compress/flate"
- "compress/gzip"
"encoding/json"
- "errors"
"fmt"
"io"
"io/ioutil"
@@ -15,73 +12,188 @@ import (
"net/textproto"
"path/filepath"
"strings"
+ "time"
"github.com/chrislusf/seaweedfs/weed/glog"
+ "github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
"github.com/chrislusf/seaweedfs/weed/security"
"github.com/chrislusf/seaweedfs/weed/util"
)
type UploadResult struct {
- Name string `json:"name,omitempty"`
- Size uint32 `json:"size,omitempty"`
- Error string `json:"error,omitempty"`
- ETag string `json:"eTag,omitempty"`
+ Name string `json:"name,omitempty"`
+ Size uint32 `json:"size,omitempty"`
+ Error string `json:"error,omitempty"`
+ ETag string `json:"eTag,omitempty"`
+ CipherKey []byte `json:"cipherKey,omitempty"`
+ Mime string `json:"mime,omitempty"`
+ Gzip uint32 `json:"gzip,omitempty"`
+ ContentMd5 string `json:"contentMd5,omitempty"`
+ RetryCount int `json:"-"`
+}
+
+func (uploadResult *UploadResult) ToPbFileChunk(fileId string, offset int64) *filer_pb.FileChunk {
+ fid, _ := filer_pb.ToFileIdObject(fileId)
+ return &filer_pb.FileChunk{
+ FileId: fileId,
+ Offset: offset,
+ Size: uint64(uploadResult.Size),
+ Mtime: time.Now().UnixNano(),
+ ETag: uploadResult.ETag,
+ CipherKey: uploadResult.CipherKey,
+ IsCompressed: uploadResult.Gzip > 0,
+ Fid: fid,
+ }
+}
+
+// HTTPClient interface for testing
+type HTTPClient interface {
+ Do(req *http.Request) (*http.Response, error)
}
var (
- client *http.Client
+ HttpClient HTTPClient
)
func init() {
- client = &http.Client{Transport: &http.Transport{
+ HttpClient = &http.Client{Transport: &http.Transport{
+ MaxIdleConns: 1024,
MaxIdleConnsPerHost: 1024,
}}
}
-var fileNameEscaper = strings.NewReplacer("\\", "\\\\", "\"", "\\\"")
+var fileNameEscaper = strings.NewReplacer(`\`, `\\`, `"`, `\"`)
// Upload sends a POST request to a volume server to upload the content with adjustable compression level
-func UploadWithLocalCompressionLevel(uploadUrl string, filename string, reader io.Reader, isGzipped bool, mtype string, pairMap map[string]string, jwt security.EncodedJwt, compressionLevel int) (*UploadResult, error) {
- if compressionLevel < 1 {
- compressionLevel = 1
- }
- if compressionLevel > 9 {
- compressionLevel = 9
- }
- return doUpload(uploadUrl, filename, reader, isGzipped, mtype, pairMap, compressionLevel, jwt)
+func UploadData(uploadUrl string, filename string, cipher bool, data []byte, isInputCompressed bool, mtype string, pairMap map[string]string, jwt security.EncodedJwt) (uploadResult *UploadResult, err error) {
+ uploadResult, err = retriedUploadData(uploadUrl, filename, cipher, data, isInputCompressed, mtype, pairMap, jwt)
+ return
}
// Upload sends a POST request to a volume server to upload the content with fast compression
-func Upload(uploadUrl string, filename string, reader io.Reader, isGzipped bool, mtype string, pairMap map[string]string, jwt security.EncodedJwt) (*UploadResult, error) {
- return doUpload(uploadUrl, filename, reader, isGzipped, mtype, pairMap, flate.BestSpeed, jwt)
+func Upload(uploadUrl string, filename string, cipher bool, reader io.Reader, isInputCompressed bool, mtype string, pairMap map[string]string, jwt security.EncodedJwt) (uploadResult *UploadResult, err error, data []byte) {
+ uploadResult, err, data = doUpload(uploadUrl, filename, cipher, reader, isInputCompressed, mtype, pairMap, jwt)
+ return
+}
+
+func doUpload(uploadUrl string, filename string, cipher bool, reader io.Reader, isInputCompressed bool, mtype string, pairMap map[string]string, jwt security.EncodedJwt) (uploadResult *UploadResult, err error, data []byte) {
+ bytesReader, ok := reader.(*util.BytesReader)
+ if ok {
+ data = bytesReader.Bytes
+ } else {
+ data, err = ioutil.ReadAll(reader)
+ if err != nil {
+ err = fmt.Errorf("read input: %v", err)
+ return
+ }
+ }
+ uploadResult, uploadErr := retriedUploadData(uploadUrl, filename, cipher, data, isInputCompressed, mtype, pairMap, jwt)
+ return uploadResult, uploadErr, data
}
-func doUpload(uploadUrl string, filename string, reader io.Reader, isGzipped bool, mtype string, pairMap map[string]string, compression int, jwt security.EncodedJwt) (*UploadResult, error) {
- contentIsGzipped := isGzipped
+func retriedUploadData(uploadUrl string, filename string, cipher bool, data []byte, isInputCompressed bool, mtype string, pairMap map[string]string, jwt security.EncodedJwt) (uploadResult *UploadResult, err error) {
+ for i := 0; i < 3; i++ {
+ uploadResult, err = doUploadData(uploadUrl, filename, cipher, data, isInputCompressed, mtype, pairMap, jwt)
+ if err == nil {
+ uploadResult.RetryCount = i
+ return
+ } else {
+ glog.Warningf("uploading to %s: %v", uploadUrl, err)
+ }
+ time.Sleep(time.Millisecond * time.Duration(237*(i+1)))
+ }
+ return
+}
+
+func doUploadData(uploadUrl string, filename string, cipher bool, data []byte, isInputCompressed bool, mtype string, pairMap map[string]string, jwt security.EncodedJwt) (uploadResult *UploadResult, err error) {
+ contentIsGzipped := isInputCompressed
shouldGzipNow := false
- if !isGzipped {
- if shouldBeZipped, iAmSure := util.IsGzippableFileType(filepath.Base(filename), mtype); iAmSure && shouldBeZipped {
+ if !isInputCompressed {
+ if mtype == "" {
+ mtype = http.DetectContentType(data)
+ // println("detect1 mimetype to", mtype)
+ if mtype == "application/octet-stream" {
+ mtype = ""
+ }
+ }
+ if shouldBeCompressed, iAmSure := util.IsCompressableFileType(filepath.Base(filename), mtype); iAmSure && shouldBeCompressed {
shouldGzipNow = true
+ } else if !iAmSure && mtype == "" && len(data) > 16*1024 {
+ var compressed []byte
+ compressed, err = util.GzipData(data[0:128])
+ shouldGzipNow = len(compressed)*10 < 128*9 // can not compress to less than 90%
+ }
+ }
+
+ var clearDataLen int
+
+ // gzip if possible
+ // this could be double copying
+ clearDataLen = len(data)
+ clearData := data
+ if shouldGzipNow && !cipher {
+ compressed, compressErr := util.GzipData(data)
+ // fmt.Printf("data is compressed from %d ==> %d\n", len(data), len(compressed))
+ if compressErr == nil {
+ data = compressed
contentIsGzipped = true
}
+ } else if isInputCompressed {
+ // just to get the clear data length
+ clearData, err = util.DecompressData(data)
+ if err == nil {
+ clearDataLen = len(clearData)
+ }
}
- return upload_content(uploadUrl, func(w io.Writer) (err error) {
- if shouldGzipNow {
- gzWriter, _ := gzip.NewWriterLevel(w, compression)
- _, err = io.Copy(gzWriter, reader)
- gzWriter.Close()
- } else {
- _, err = io.Copy(w, reader)
+
+ if cipher {
+ // encrypt(gzip(data))
+
+ // encrypt
+ cipherKey := util.GenCipherKey()
+ encryptedData, encryptionErr := util.Encrypt(clearData, cipherKey)
+ if encryptionErr != nil {
+ err = fmt.Errorf("encrypt input: %v", encryptionErr)
+ return
}
- return
- }, filename, contentIsGzipped, mtype, pairMap, jwt)
+
+ // upload data
+ uploadResult, err = upload_content(uploadUrl, func(w io.Writer) (err error) {
+ _, err = w.Write(encryptedData)
+ return
+ }, "", false, len(encryptedData), "", nil, jwt)
+ if uploadResult == nil {
+ return
+ }
+ uploadResult.Name = filename
+ uploadResult.Mime = mtype
+ uploadResult.CipherKey = cipherKey
+ uploadResult.Size = uint32(clearDataLen)
+ } else {
+ // upload data
+ uploadResult, err = upload_content(uploadUrl, func(w io.Writer) (err error) {
+ _, err = w.Write(data)
+ return
+ }, filename, contentIsGzipped, len(data), mtype, pairMap, jwt)
+ if uploadResult == nil {
+ return
+ }
+ uploadResult.Size = uint32(clearDataLen)
+ if contentIsGzipped {
+ uploadResult.Gzip = 1
+ }
+ }
+
+ return uploadResult, err
}
-func upload_content(uploadUrl string, fillBufferFunction func(w io.Writer) error, filename string, isGzipped bool, mtype string, pairMap map[string]string, jwt security.EncodedJwt) (*UploadResult, error) {
- body_buf := bytes.NewBufferString("")
- body_writer := multipart.NewWriter(body_buf)
+func upload_content(uploadUrl string, fillBufferFunction func(w io.Writer) error, filename string, isGzipped bool, originalDataSize int, mtype string, pairMap map[string]string, jwt security.EncodedJwt) (*UploadResult, error) {
+ buf := GetBuffer()
+ defer PutBuffer(buf)
+ body_writer := multipart.NewWriter(buf)
h := make(textproto.MIMEHeader)
h.Set("Content-Disposition", fmt.Sprintf(`form-data; name="file"; filename="%s"`, fileNameEscaper.Replace(filename)))
+ h.Set("Idempotency-Key", uploadUrl)
if mtype == "" {
mtype = mime.TypeByExtension(strings.ToLower(filepath.Ext(filename)))
}
@@ -107,10 +219,10 @@ func upload_content(uploadUrl string, fillBufferFunction func(w io.Writer) error
return nil, err
}
- req, postErr := http.NewRequest("POST", uploadUrl, body_buf)
+ req, postErr := http.NewRequest("POST", uploadUrl, bytes.NewReader(buf.Bytes()))
if postErr != nil {
- glog.V(0).Infoln("failing to upload to", uploadUrl, postErr.Error())
- return nil, postErr
+ glog.V(1).Infof("create upload request %s: %v", uploadUrl, postErr)
+ return nil, fmt.Errorf("create upload request %s: %v", uploadUrl, postErr)
}
req.Header.Set("Content-Type", content_type)
for k, v := range pairMap {
@@ -119,27 +231,42 @@ func upload_content(uploadUrl string, fillBufferFunction func(w io.Writer) error
if jwt != "" {
req.Header.Set("Authorization", "BEARER "+string(jwt))
}
- resp, post_err := client.Do(req)
+ // print("+")
+ resp, post_err := HttpClient.Do(req)
if post_err != nil {
- glog.V(0).Infoln("failing to upload to", uploadUrl, post_err.Error())
- return nil, post_err
+ if strings.Contains(post_err.Error(), "connection reset by peer") ||
+ strings.Contains(post_err.Error(), "use of closed network connection") {
+ resp, post_err = HttpClient.Do(req)
+ }
}
- defer resp.Body.Close()
+ if post_err != nil {
+ return nil, fmt.Errorf("upload %s %d bytes to %v: %v", filename, originalDataSize, uploadUrl, post_err)
+ }
+ // print("-")
+ defer util.CloseResponse(resp)
+
+ var ret UploadResult
etag := getEtag(resp)
+ if resp.StatusCode == http.StatusNoContent {
+ ret.ETag = etag
+ return &ret, nil
+ }
+
resp_body, ra_err := ioutil.ReadAll(resp.Body)
if ra_err != nil {
- return nil, ra_err
+ return nil, fmt.Errorf("read response body %v: %v", uploadUrl, ra_err)
}
- var ret UploadResult
+
unmarshal_err := json.Unmarshal(resp_body, &ret)
if unmarshal_err != nil {
- glog.V(0).Infoln("failing to read upload response", uploadUrl, string(resp_body))
- return nil, unmarshal_err
+ glog.Errorf("unmarshal %s: %v", uploadUrl, string(resp_body))
+ return nil, fmt.Errorf("unmarshal %v: %v", uploadUrl, unmarshal_err)
}
if ret.Error != "" {
- return nil, errors.New(ret.Error)
+ return nil, fmt.Errorf("unmarshalled error %v: %v", uploadUrl, ret.Error)
}
ret.ETag = etag
+ ret.ContentMd5 = resp.Header.Get("Content-MD5")
return &ret, nil
}
diff --git a/weed/pb/Makefile b/weed/pb/Makefile
index c50410574..d2618937b 100644
--- a/weed/pb/Makefile
+++ b/weed/pb/Makefile
@@ -3,8 +3,10 @@ all: gen
.PHONY : gen
gen:
- protoc master.proto --go_out=plugins=grpc:./master_pb
- protoc volume_server.proto --go_out=plugins=grpc:./volume_server_pb
- protoc filer.proto --go_out=plugins=grpc:./filer_pb
+ protoc master.proto --go_out=plugins=grpc:./master_pb --go_opt=paths=source_relative
+ protoc volume_server.proto --go_out=plugins=grpc:./volume_server_pb --go_opt=paths=source_relative
+ protoc filer.proto --go_out=plugins=grpc:./filer_pb --go_opt=paths=source_relative
+ protoc iam.proto --go_out=plugins=grpc:./iam_pb --go_opt=paths=source_relative
+ protoc messaging.proto --go_out=plugins=grpc:./messaging_pb --go_opt=paths=source_relative
# protoc filer.proto --java_out=../../other/java/client/src/main/java
cp filer.proto ../../other/java/client/src/main/proto
diff --git a/weed/pb/filer.proto b/weed/pb/filer.proto
index ef847cbe7..ac4c9a0e7 100644
--- a/weed/pb/filer.proto
+++ b/weed/pb/filer.proto
@@ -2,6 +2,7 @@ syntax = "proto3";
package filer_pb;
+option go_package = "github.com/chrislusf/seaweedfs/weed/pb/filer_pb";
option java_package = "seaweedfs.client";
option java_outer_classname = "FilerProto";
@@ -21,6 +22,9 @@ service SeaweedFiler {
rpc UpdateEntry (UpdateEntryRequest) returns (UpdateEntryResponse) {
}
+ rpc AppendToEntry (AppendToEntryRequest) returns (AppendToEntryResponse) {
+ }
+
rpc DeleteEntry (DeleteEntryRequest) returns (DeleteEntryResponse) {
}
@@ -33,6 +37,9 @@ service SeaweedFiler {
rpc LookupVolume (LookupVolumeRequest) returns (LookupVolumeResponse) {
}
+ rpc CollectionList (CollectionListRequest) returns (CollectionListResponse) {
+ }
+
rpc DeleteCollection (DeleteCollectionRequest) returns (DeleteCollectionResponse) {
}
@@ -42,6 +49,24 @@ service SeaweedFiler {
rpc GetFilerConfiguration (GetFilerConfigurationRequest) returns (GetFilerConfigurationResponse) {
}
+ rpc SubscribeMetadata (SubscribeMetadataRequest) returns (stream SubscribeMetadataResponse) {
+ }
+
+ rpc SubscribeLocalMetadata (SubscribeMetadataRequest) returns (stream SubscribeMetadataResponse) {
+ }
+
+ rpc KeepConnected (stream KeepConnectedRequest) returns (stream KeepConnectedResponse) {
+ }
+
+ rpc LocateBroker (LocateBrokerRequest) returns (LocateBrokerResponse) {
+ }
+
+ rpc KvGet (KvGetRequest) returns (KvGetResponse) {
+ }
+
+ rpc KvPut (KvPutRequest) returns (KvPutResponse) {
+ }
+
}
//////////////////////////////////////////////////
@@ -73,6 +98,9 @@ message Entry {
repeated FileChunk chunks = 3;
FuseAttributes attributes = 4;
map extended = 5;
+ bytes hard_link_id = 7;
+ int32 hard_link_counter = 8; // only exists in hard link meta data
+ bytes content = 9; // if not empty, the file content
}
message FullEntry {
@@ -85,6 +113,8 @@ message EventNotification {
Entry new_entry = 2;
bool delete_chunks = 3;
string new_parent_path = 4;
+ bool is_from_other_cluster = 5;
+ repeated int32 signatures = 6;
}
message FileChunk {
@@ -96,6 +126,13 @@ message FileChunk {
string source_file_id = 6; // to be deprecated
FileId fid = 7;
FileId source_fid = 8;
+ bytes cipher_key = 9;
+ bool is_compressed = 10;
+ bool is_chunk_manifest = 11; // content is a list of FileChunks
+}
+
+message FileChunkManifest {
+ repeated FileChunk chunks = 1;
}
message FileId {
@@ -118,23 +155,39 @@ message FuseAttributes {
string user_name = 11; // for hdfs
repeated string group_name = 12; // for hdfs
string symlink_target = 13;
+ bytes md5 = 14;
+ string disk_type = 15;
}
message CreateEntryRequest {
string directory = 1;
Entry entry = 2;
+ bool o_excl = 3;
+ bool is_from_other_cluster = 4;
+ repeated int32 signatures = 5;
}
message CreateEntryResponse {
+ string error = 1;
}
message UpdateEntryRequest {
string directory = 1;
Entry entry = 2;
+ bool is_from_other_cluster = 3;
+ repeated int32 signatures = 4;
}
message UpdateEntryResponse {
}
+message AppendToEntryRequest {
+ string directory = 1;
+ string entry_name = 2;
+ repeated FileChunk chunks = 3;
+}
+message AppendToEntryResponse {
+}
+
message DeleteEntryRequest {
string directory = 1;
string name = 2;
@@ -142,9 +195,12 @@ message DeleteEntryRequest {
bool is_delete_data = 4;
bool is_recursive = 5;
bool ignore_recursive_error = 6;
+ bool is_from_other_cluster = 7;
+ repeated int32 signatures = 8;
}
message DeleteEntryResponse {
+ string error = 1;
}
message AtomicRenameEntryRequest {
@@ -163,6 +219,9 @@ message AssignVolumeRequest {
string replication = 3;
int32 ttl_sec = 4;
string data_center = 5;
+ string path = 6;
+ string rack = 7;
+ string disk_type = 8;
}
message AssignVolumeResponse {
@@ -171,6 +230,9 @@ message AssignVolumeResponse {
string public_url = 3;
int32 count = 4;
string auth = 5;
+ string collection = 6;
+ string replication = 7;
+ string error = 8;
}
message LookupVolumeRequest {
@@ -189,6 +251,16 @@ message LookupVolumeResponse {
map locations_map = 1;
}
+message Collection {
+ string name = 1;
+}
+message CollectionListRequest {
+ bool include_normal_volumes = 1;
+ bool include_ec_volumes = 2;
+}
+message CollectionListResponse {
+ repeated Collection collections = 1;
+}
message DeleteCollectionRequest {
string collection = 1;
}
@@ -200,11 +272,9 @@ message StatisticsRequest {
string replication = 1;
string collection = 2;
string ttl = 3;
+ string disk_type = 4;
}
message StatisticsResponse {
- string replication = 1;
- string collection = 2;
- string ttl = 3;
uint64 total_size = 4;
uint64 used_size = 5;
uint64 file_count = 6;
@@ -217,4 +287,80 @@ message GetFilerConfigurationResponse {
string replication = 2;
string collection = 3;
uint32 max_mb = 4;
+ string dir_buckets = 5;
+ bool cipher = 7;
+ int32 signature = 8;
+ string metrics_address = 9;
+ int32 metrics_interval_sec = 10;
+}
+
+message SubscribeMetadataRequest {
+ string client_name = 1;
+ string path_prefix = 2;
+ int64 since_ns = 3;
+ int32 signature = 4;
+}
+message SubscribeMetadataResponse {
+ string directory = 1;
+ EventNotification event_notification = 2;
+ int64 ts_ns = 3;
+}
+
+message LogEntry {
+ int64 ts_ns = 1;
+ int32 partition_key_hash = 2;
+ bytes data = 3;
+}
+
+message KeepConnectedRequest {
+ string name = 1;
+ uint32 grpc_port = 2;
+ repeated string resources = 3;
+}
+message KeepConnectedResponse {
+}
+
+message LocateBrokerRequest {
+ string resource = 1;
+}
+message LocateBrokerResponse {
+ bool found = 1;
+ // if found, send the exact address
+ // if not found, send the full list of existing brokers
+ message Resource {
+ string grpc_addresses = 1;
+ int32 resource_count = 2;
+ }
+ repeated Resource resources = 2;
+}
+
+// Key-Value operations
+message KvGetRequest {
+ bytes key = 1;
+}
+message KvGetResponse {
+ bytes value = 1;
+ string error = 2;
+}
+message KvPutRequest {
+ bytes key = 1;
+ bytes value = 2;
+}
+message KvPutResponse {
+ string error = 1;
+}
+
+// path-based configurations
+message FilerConf {
+ int32 version = 1;
+ message PathConf {
+ string location_prefix = 1;
+ string collection = 2;
+ string replication = 3;
+ string ttl = 4;
+ string disk_type = 5;
+ bool fsync = 6;
+ uint32 volume_growth_count = 7;
+ }
+ repeated PathConf locations = 2;
}
diff --git a/weed/pb/filer_pb/filer.pb.go b/weed/pb/filer_pb/filer.pb.go
index c8214aa94..902c39514 100644
--- a/weed/pb/filer_pb/filer.pb.go
+++ b/weed/pb/filer_pb/filer.pb.go
@@ -1,1064 +1,4535 @@
-// Code generated by protoc-gen-go.
+// Code generated by protoc-gen-go. DO NOT EDIT.
+// versions:
+// protoc-gen-go v1.25.0
+// protoc v3.12.3
// source: filer.proto
-// DO NOT EDIT!
-
-/*
-Package filer_pb is a generated protocol buffer package.
-
-It is generated from these files:
- filer.proto
-
-It has these top-level messages:
- LookupDirectoryEntryRequest
- LookupDirectoryEntryResponse
- ListEntriesRequest
- ListEntriesResponse
- Entry
- FullEntry
- EventNotification
- FileChunk
- FileId
- FuseAttributes
- CreateEntryRequest
- CreateEntryResponse
- UpdateEntryRequest
- UpdateEntryResponse
- DeleteEntryRequest
- DeleteEntryResponse
- AtomicRenameEntryRequest
- AtomicRenameEntryResponse
- AssignVolumeRequest
- AssignVolumeResponse
- LookupVolumeRequest
- Locations
- Location
- LookupVolumeResponse
- DeleteCollectionRequest
- DeleteCollectionResponse
- StatisticsRequest
- StatisticsResponse
- GetFilerConfigurationRequest
- GetFilerConfigurationResponse
-*/
-package filer_pb
-import proto "github.com/golang/protobuf/proto"
-import fmt "fmt"
-import math "math"
+package filer_pb
import (
- context "golang.org/x/net/context"
+ context "context"
+ proto "github.com/golang/protobuf/proto"
grpc "google.golang.org/grpc"
+ codes "google.golang.org/grpc/codes"
+ status "google.golang.org/grpc/status"
+ protoreflect "google.golang.org/protobuf/reflect/protoreflect"
+ protoimpl "google.golang.org/protobuf/runtime/protoimpl"
+ reflect "reflect"
+ sync "sync"
)
-// Reference imports to suppress errors if they are not otherwise used.
-var _ = proto.Marshal
-var _ = fmt.Errorf
-var _ = math.Inf
+const (
+ // Verify that this generated code is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
+ // Verify that runtime/protoimpl is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
+)
-// This is a compile-time assertion to ensure that this generated file
-// is compatible with the proto package it is being compiled against.
-// A compilation error at this line likely means your copy of the
-// proto package needs to be updated.
-const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package
+// This is a compile-time assertion that a sufficiently up-to-date version
+// of the legacy proto package is being used.
+const _ = proto.ProtoPackageIsVersion4
type LookupDirectoryEntryRequest struct {
- Directory string `protobuf:"bytes,1,opt,name=directory" json:"directory,omitempty"`
- Name string `protobuf:"bytes,2,opt,name=name" json:"name,omitempty"`
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ Directory string `protobuf:"bytes,1,opt,name=directory,proto3" json:"directory,omitempty"`
+ Name string `protobuf:"bytes,2,opt,name=name,proto3" json:"name,omitempty"`
+}
+
+func (x *LookupDirectoryEntryRequest) Reset() {
+ *x = LookupDirectoryEntryRequest{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_filer_proto_msgTypes[0]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *LookupDirectoryEntryRequest) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*LookupDirectoryEntryRequest) ProtoMessage() {}
+
+func (x *LookupDirectoryEntryRequest) ProtoReflect() protoreflect.Message {
+ mi := &file_filer_proto_msgTypes[0]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
}
-func (m *LookupDirectoryEntryRequest) Reset() { *m = LookupDirectoryEntryRequest{} }
-func (m *LookupDirectoryEntryRequest) String() string { return proto.CompactTextString(m) }
-func (*LookupDirectoryEntryRequest) ProtoMessage() {}
-func (*LookupDirectoryEntryRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0} }
+// Deprecated: Use LookupDirectoryEntryRequest.ProtoReflect.Descriptor instead.
+func (*LookupDirectoryEntryRequest) Descriptor() ([]byte, []int) {
+ return file_filer_proto_rawDescGZIP(), []int{0}
+}
-func (m *LookupDirectoryEntryRequest) GetDirectory() string {
- if m != nil {
- return m.Directory
+func (x *LookupDirectoryEntryRequest) GetDirectory() string {
+ if x != nil {
+ return x.Directory
}
return ""
}
-func (m *LookupDirectoryEntryRequest) GetName() string {
- if m != nil {
- return m.Name
+func (x *LookupDirectoryEntryRequest) GetName() string {
+ if x != nil {
+ return x.Name
}
return ""
}
type LookupDirectoryEntryResponse struct {
- Entry *Entry `protobuf:"bytes,1,opt,name=entry" json:"entry,omitempty"`
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ Entry *Entry `protobuf:"bytes,1,opt,name=entry,proto3" json:"entry,omitempty"`
+}
+
+func (x *LookupDirectoryEntryResponse) Reset() {
+ *x = LookupDirectoryEntryResponse{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_filer_proto_msgTypes[1]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *LookupDirectoryEntryResponse) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*LookupDirectoryEntryResponse) ProtoMessage() {}
+
+func (x *LookupDirectoryEntryResponse) ProtoReflect() protoreflect.Message {
+ mi := &file_filer_proto_msgTypes[1]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
}
-func (m *LookupDirectoryEntryResponse) Reset() { *m = LookupDirectoryEntryResponse{} }
-func (m *LookupDirectoryEntryResponse) String() string { return proto.CompactTextString(m) }
-func (*LookupDirectoryEntryResponse) ProtoMessage() {}
-func (*LookupDirectoryEntryResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{1} }
+// Deprecated: Use LookupDirectoryEntryResponse.ProtoReflect.Descriptor instead.
+func (*LookupDirectoryEntryResponse) Descriptor() ([]byte, []int) {
+ return file_filer_proto_rawDescGZIP(), []int{1}
+}
-func (m *LookupDirectoryEntryResponse) GetEntry() *Entry {
- if m != nil {
- return m.Entry
+func (x *LookupDirectoryEntryResponse) GetEntry() *Entry {
+ if x != nil {
+ return x.Entry
}
return nil
}
type ListEntriesRequest struct {
- Directory string `protobuf:"bytes,1,opt,name=directory" json:"directory,omitempty"`
- Prefix string `protobuf:"bytes,2,opt,name=prefix" json:"prefix,omitempty"`
- StartFromFileName string `protobuf:"bytes,3,opt,name=startFromFileName" json:"startFromFileName,omitempty"`
- InclusiveStartFrom bool `protobuf:"varint,4,opt,name=inclusiveStartFrom" json:"inclusiveStartFrom,omitempty"`
- Limit uint32 `protobuf:"varint,5,opt,name=limit" json:"limit,omitempty"`
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ Directory string `protobuf:"bytes,1,opt,name=directory,proto3" json:"directory,omitempty"`
+ Prefix string `protobuf:"bytes,2,opt,name=prefix,proto3" json:"prefix,omitempty"`
+ StartFromFileName string `protobuf:"bytes,3,opt,name=startFromFileName,proto3" json:"startFromFileName,omitempty"`
+ InclusiveStartFrom bool `protobuf:"varint,4,opt,name=inclusiveStartFrom,proto3" json:"inclusiveStartFrom,omitempty"`
+ Limit uint32 `protobuf:"varint,5,opt,name=limit,proto3" json:"limit,omitempty"`
+}
+
+func (x *ListEntriesRequest) Reset() {
+ *x = ListEntriesRequest{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_filer_proto_msgTypes[2]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *ListEntriesRequest) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*ListEntriesRequest) ProtoMessage() {}
+
+func (x *ListEntriesRequest) ProtoReflect() protoreflect.Message {
+ mi := &file_filer_proto_msgTypes[2]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
}
-func (m *ListEntriesRequest) Reset() { *m = ListEntriesRequest{} }
-func (m *ListEntriesRequest) String() string { return proto.CompactTextString(m) }
-func (*ListEntriesRequest) ProtoMessage() {}
-func (*ListEntriesRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{2} }
+// Deprecated: Use ListEntriesRequest.ProtoReflect.Descriptor instead.
+func (*ListEntriesRequest) Descriptor() ([]byte, []int) {
+ return file_filer_proto_rawDescGZIP(), []int{2}
+}
-func (m *ListEntriesRequest) GetDirectory() string {
- if m != nil {
- return m.Directory
+func (x *ListEntriesRequest) GetDirectory() string {
+ if x != nil {
+ return x.Directory
}
return ""
}
-func (m *ListEntriesRequest) GetPrefix() string {
- if m != nil {
- return m.Prefix
+func (x *ListEntriesRequest) GetPrefix() string {
+ if x != nil {
+ return x.Prefix
}
return ""
}
-func (m *ListEntriesRequest) GetStartFromFileName() string {
- if m != nil {
- return m.StartFromFileName
+func (x *ListEntriesRequest) GetStartFromFileName() string {
+ if x != nil {
+ return x.StartFromFileName
}
return ""
}
-func (m *ListEntriesRequest) GetInclusiveStartFrom() bool {
- if m != nil {
- return m.InclusiveStartFrom
+func (x *ListEntriesRequest) GetInclusiveStartFrom() bool {
+ if x != nil {
+ return x.InclusiveStartFrom
}
return false
}
-func (m *ListEntriesRequest) GetLimit() uint32 {
- if m != nil {
- return m.Limit
+func (x *ListEntriesRequest) GetLimit() uint32 {
+ if x != nil {
+ return x.Limit
}
return 0
}
type ListEntriesResponse struct {
- Entry *Entry `protobuf:"bytes,1,opt,name=entry" json:"entry,omitempty"`
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ Entry *Entry `protobuf:"bytes,1,opt,name=entry,proto3" json:"entry,omitempty"`
+}
+
+func (x *ListEntriesResponse) Reset() {
+ *x = ListEntriesResponse{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_filer_proto_msgTypes[3]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *ListEntriesResponse) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*ListEntriesResponse) ProtoMessage() {}
+
+func (x *ListEntriesResponse) ProtoReflect() protoreflect.Message {
+ mi := &file_filer_proto_msgTypes[3]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
}
-func (m *ListEntriesResponse) Reset() { *m = ListEntriesResponse{} }
-func (m *ListEntriesResponse) String() string { return proto.CompactTextString(m) }
-func (*ListEntriesResponse) ProtoMessage() {}
-func (*ListEntriesResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{3} }
+// Deprecated: Use ListEntriesResponse.ProtoReflect.Descriptor instead.
+func (*ListEntriesResponse) Descriptor() ([]byte, []int) {
+ return file_filer_proto_rawDescGZIP(), []int{3}
+}
-func (m *ListEntriesResponse) GetEntry() *Entry {
- if m != nil {
- return m.Entry
+func (x *ListEntriesResponse) GetEntry() *Entry {
+ if x != nil {
+ return x.Entry
}
return nil
}
type Entry struct {
- Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"`
- IsDirectory bool `protobuf:"varint,2,opt,name=is_directory,json=isDirectory" json:"is_directory,omitempty"`
- Chunks []*FileChunk `protobuf:"bytes,3,rep,name=chunks" json:"chunks,omitempty"`
- Attributes *FuseAttributes `protobuf:"bytes,4,opt,name=attributes" json:"attributes,omitempty"`
- Extended map[string][]byte `protobuf:"bytes,5,rep,name=extended" json:"extended,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value,proto3"`
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
+ IsDirectory bool `protobuf:"varint,2,opt,name=is_directory,json=isDirectory,proto3" json:"is_directory,omitempty"`
+ Chunks []*FileChunk `protobuf:"bytes,3,rep,name=chunks,proto3" json:"chunks,omitempty"`
+ Attributes *FuseAttributes `protobuf:"bytes,4,opt,name=attributes,proto3" json:"attributes,omitempty"`
+ Extended map[string][]byte `protobuf:"bytes,5,rep,name=extended,proto3" json:"extended,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"`
+ HardLinkId []byte `protobuf:"bytes,7,opt,name=hard_link_id,json=hardLinkId,proto3" json:"hard_link_id,omitempty"`
+ HardLinkCounter int32 `protobuf:"varint,8,opt,name=hard_link_counter,json=hardLinkCounter,proto3" json:"hard_link_counter,omitempty"` // only exists in hard link meta data
+ Content []byte `protobuf:"bytes,9,opt,name=content,proto3" json:"content,omitempty"` // if not empty, the file content
+}
+
+func (x *Entry) Reset() {
+ *x = Entry{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_filer_proto_msgTypes[4]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *Entry) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*Entry) ProtoMessage() {}
+
+func (x *Entry) ProtoReflect() protoreflect.Message {
+ mi := &file_filer_proto_msgTypes[4]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
}
-func (m *Entry) Reset() { *m = Entry{} }
-func (m *Entry) String() string { return proto.CompactTextString(m) }
-func (*Entry) ProtoMessage() {}
-func (*Entry) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{4} }
+// Deprecated: Use Entry.ProtoReflect.Descriptor instead.
+func (*Entry) Descriptor() ([]byte, []int) {
+ return file_filer_proto_rawDescGZIP(), []int{4}
+}
-func (m *Entry) GetName() string {
- if m != nil {
- return m.Name
+func (x *Entry) GetName() string {
+ if x != nil {
+ return x.Name
}
return ""
}
-func (m *Entry) GetIsDirectory() bool {
- if m != nil {
- return m.IsDirectory
+func (x *Entry) GetIsDirectory() bool {
+ if x != nil {
+ return x.IsDirectory
}
return false
}
-func (m *Entry) GetChunks() []*FileChunk {
- if m != nil {
- return m.Chunks
+func (x *Entry) GetChunks() []*FileChunk {
+ if x != nil {
+ return x.Chunks
}
return nil
}
-func (m *Entry) GetAttributes() *FuseAttributes {
- if m != nil {
- return m.Attributes
+func (x *Entry) GetAttributes() *FuseAttributes {
+ if x != nil {
+ return x.Attributes
}
return nil
}
-func (m *Entry) GetExtended() map[string][]byte {
- if m != nil {
- return m.Extended
+func (x *Entry) GetExtended() map[string][]byte {
+ if x != nil {
+ return x.Extended
}
return nil
}
-type FullEntry struct {
- Dir string `protobuf:"bytes,1,opt,name=dir" json:"dir,omitempty"`
- Entry *Entry `protobuf:"bytes,2,opt,name=entry" json:"entry,omitempty"`
+func (x *Entry) GetHardLinkId() []byte {
+ if x != nil {
+ return x.HardLinkId
+ }
+ return nil
}
-func (m *FullEntry) Reset() { *m = FullEntry{} }
-func (m *FullEntry) String() string { return proto.CompactTextString(m) }
-func (*FullEntry) ProtoMessage() {}
-func (*FullEntry) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{5} }
-
-func (m *FullEntry) GetDir() string {
- if m != nil {
- return m.Dir
+func (x *Entry) GetHardLinkCounter() int32 {
+ if x != nil {
+ return x.HardLinkCounter
}
- return ""
+ return 0
}
-func (m *FullEntry) GetEntry() *Entry {
- if m != nil {
- return m.Entry
+func (x *Entry) GetContent() []byte {
+ if x != nil {
+ return x.Content
}
return nil
}
-type EventNotification struct {
- OldEntry *Entry `protobuf:"bytes,1,opt,name=old_entry,json=oldEntry" json:"old_entry,omitempty"`
- NewEntry *Entry `protobuf:"bytes,2,opt,name=new_entry,json=newEntry" json:"new_entry,omitempty"`
- DeleteChunks bool `protobuf:"varint,3,opt,name=delete_chunks,json=deleteChunks" json:"delete_chunks,omitempty"`
- NewParentPath string `protobuf:"bytes,4,opt,name=new_parent_path,json=newParentPath" json:"new_parent_path,omitempty"`
-}
-
-func (m *EventNotification) Reset() { *m = EventNotification{} }
-func (m *EventNotification) String() string { return proto.CompactTextString(m) }
-func (*EventNotification) ProtoMessage() {}
-func (*EventNotification) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{6} }
+type FullEntry struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
-func (m *EventNotification) GetOldEntry() *Entry {
- if m != nil {
- return m.OldEntry
- }
- return nil
+ Dir string `protobuf:"bytes,1,opt,name=dir,proto3" json:"dir,omitempty"`
+ Entry *Entry `protobuf:"bytes,2,opt,name=entry,proto3" json:"entry,omitempty"`
}
-func (m *EventNotification) GetNewEntry() *Entry {
- if m != nil {
- return m.NewEntry
+func (x *FullEntry) Reset() {
+ *x = FullEntry{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_filer_proto_msgTypes[5]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
- return nil
}
-func (m *EventNotification) GetDeleteChunks() bool {
- if m != nil {
- return m.DeleteChunks
- }
- return false
+func (x *FullEntry) String() string {
+ return protoimpl.X.MessageStringOf(x)
}
-func (m *EventNotification) GetNewParentPath() string {
- if m != nil {
- return m.NewParentPath
+func (*FullEntry) ProtoMessage() {}
+
+func (x *FullEntry) ProtoReflect() protoreflect.Message {
+ mi := &file_filer_proto_msgTypes[5]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
}
- return ""
+ return mi.MessageOf(x)
}
-type FileChunk struct {
- FileId string `protobuf:"bytes,1,opt,name=file_id,json=fileId" json:"file_id,omitempty"`
- Offset int64 `protobuf:"varint,2,opt,name=offset" json:"offset,omitempty"`
- Size uint64 `protobuf:"varint,3,opt,name=size" json:"size,omitempty"`
- Mtime int64 `protobuf:"varint,4,opt,name=mtime" json:"mtime,omitempty"`
- ETag string `protobuf:"bytes,5,opt,name=e_tag,json=eTag" json:"e_tag,omitempty"`
- SourceFileId string `protobuf:"bytes,6,opt,name=source_file_id,json=sourceFileId" json:"source_file_id,omitempty"`
- Fid *FileId `protobuf:"bytes,7,opt,name=fid" json:"fid,omitempty"`
- SourceFid *FileId `protobuf:"bytes,8,opt,name=source_fid,json=sourceFid" json:"source_fid,omitempty"`
+// Deprecated: Use FullEntry.ProtoReflect.Descriptor instead.
+func (*FullEntry) Descriptor() ([]byte, []int) {
+ return file_filer_proto_rawDescGZIP(), []int{5}
}
-func (m *FileChunk) Reset() { *m = FileChunk{} }
-func (m *FileChunk) String() string { return proto.CompactTextString(m) }
-func (*FileChunk) ProtoMessage() {}
-func (*FileChunk) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{7} }
-
-func (m *FileChunk) GetFileId() string {
- if m != nil {
- return m.FileId
+func (x *FullEntry) GetDir() string {
+ if x != nil {
+ return x.Dir
}
return ""
}
-func (m *FileChunk) GetOffset() int64 {
- if m != nil {
- return m.Offset
+func (x *FullEntry) GetEntry() *Entry {
+ if x != nil {
+ return x.Entry
}
- return 0
+ return nil
}
-func (m *FileChunk) GetSize() uint64 {
- if m != nil {
- return m.Size
- }
- return 0
+type EventNotification struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ OldEntry *Entry `protobuf:"bytes,1,opt,name=old_entry,json=oldEntry,proto3" json:"old_entry,omitempty"`
+ NewEntry *Entry `protobuf:"bytes,2,opt,name=new_entry,json=newEntry,proto3" json:"new_entry,omitempty"`
+ DeleteChunks bool `protobuf:"varint,3,opt,name=delete_chunks,json=deleteChunks,proto3" json:"delete_chunks,omitempty"`
+ NewParentPath string `protobuf:"bytes,4,opt,name=new_parent_path,json=newParentPath,proto3" json:"new_parent_path,omitempty"`
+ IsFromOtherCluster bool `protobuf:"varint,5,opt,name=is_from_other_cluster,json=isFromOtherCluster,proto3" json:"is_from_other_cluster,omitempty"`
+ Signatures []int32 `protobuf:"varint,6,rep,packed,name=signatures,proto3" json:"signatures,omitempty"`
}
-func (m *FileChunk) GetMtime() int64 {
- if m != nil {
- return m.Mtime
+func (x *EventNotification) Reset() {
+ *x = EventNotification{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_filer_proto_msgTypes[6]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
- return 0
}
-func (m *FileChunk) GetETag() string {
- if m != nil {
- return m.ETag
- }
- return ""
+func (x *EventNotification) String() string {
+ return protoimpl.X.MessageStringOf(x)
}
-func (m *FileChunk) GetSourceFileId() string {
- if m != nil {
- return m.SourceFileId
+func (*EventNotification) ProtoMessage() {}
+
+func (x *EventNotification) ProtoReflect() protoreflect.Message {
+ mi := &file_filer_proto_msgTypes[6]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
}
- return ""
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use EventNotification.ProtoReflect.Descriptor instead.
+func (*EventNotification) Descriptor() ([]byte, []int) {
+ return file_filer_proto_rawDescGZIP(), []int{6}
}
-func (m *FileChunk) GetFid() *FileId {
- if m != nil {
- return m.Fid
+func (x *EventNotification) GetOldEntry() *Entry {
+ if x != nil {
+ return x.OldEntry
}
return nil
}
-func (m *FileChunk) GetSourceFid() *FileId {
- if m != nil {
- return m.SourceFid
+func (x *EventNotification) GetNewEntry() *Entry {
+ if x != nil {
+ return x.NewEntry
}
return nil
}
-type FileId struct {
- VolumeId uint32 `protobuf:"varint,1,opt,name=volume_id,json=volumeId" json:"volume_id,omitempty"`
- FileKey uint64 `protobuf:"varint,2,opt,name=file_key,json=fileKey" json:"file_key,omitempty"`
- Cookie uint32 `protobuf:"fixed32,3,opt,name=cookie" json:"cookie,omitempty"`
+func (x *EventNotification) GetDeleteChunks() bool {
+ if x != nil {
+ return x.DeleteChunks
+ }
+ return false
}
-func (m *FileId) Reset() { *m = FileId{} }
-func (m *FileId) String() string { return proto.CompactTextString(m) }
-func (*FileId) ProtoMessage() {}
-func (*FileId) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{8} }
-
-func (m *FileId) GetVolumeId() uint32 {
- if m != nil {
- return m.VolumeId
+func (x *EventNotification) GetNewParentPath() string {
+ if x != nil {
+ return x.NewParentPath
}
- return 0
+ return ""
}
-func (m *FileId) GetFileKey() uint64 {
- if m != nil {
- return m.FileKey
+func (x *EventNotification) GetIsFromOtherCluster() bool {
+ if x != nil {
+ return x.IsFromOtherCluster
}
- return 0
+ return false
}
-func (m *FileId) GetCookie() uint32 {
- if m != nil {
- return m.Cookie
+func (x *EventNotification) GetSignatures() []int32 {
+ if x != nil {
+ return x.Signatures
}
- return 0
+ return nil
}
-type FuseAttributes struct {
- FileSize uint64 `protobuf:"varint,1,opt,name=file_size,json=fileSize" json:"file_size,omitempty"`
- Mtime int64 `protobuf:"varint,2,opt,name=mtime" json:"mtime,omitempty"`
- FileMode uint32 `protobuf:"varint,3,opt,name=file_mode,json=fileMode" json:"file_mode,omitempty"`
- Uid uint32 `protobuf:"varint,4,opt,name=uid" json:"uid,omitempty"`
- Gid uint32 `protobuf:"varint,5,opt,name=gid" json:"gid,omitempty"`
- Crtime int64 `protobuf:"varint,6,opt,name=crtime" json:"crtime,omitempty"`
- Mime string `protobuf:"bytes,7,opt,name=mime" json:"mime,omitempty"`
- Replication string `protobuf:"bytes,8,opt,name=replication" json:"replication,omitempty"`
- Collection string `protobuf:"bytes,9,opt,name=collection" json:"collection,omitempty"`
- TtlSec int32 `protobuf:"varint,10,opt,name=ttl_sec,json=ttlSec" json:"ttl_sec,omitempty"`
- UserName string `protobuf:"bytes,11,opt,name=user_name,json=userName" json:"user_name,omitempty"`
- GroupName []string `protobuf:"bytes,12,rep,name=group_name,json=groupName" json:"group_name,omitempty"`
- SymlinkTarget string `protobuf:"bytes,13,opt,name=symlink_target,json=symlinkTarget" json:"symlink_target,omitempty"`
-}
-
-func (m *FuseAttributes) Reset() { *m = FuseAttributes{} }
-func (m *FuseAttributes) String() string { return proto.CompactTextString(m) }
-func (*FuseAttributes) ProtoMessage() {}
-func (*FuseAttributes) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{9} }
-
-func (m *FuseAttributes) GetFileSize() uint64 {
- if m != nil {
- return m.FileSize
- }
- return 0
+type FileChunk struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ FileId string `protobuf:"bytes,1,opt,name=file_id,json=fileId,proto3" json:"file_id,omitempty"` // to be deprecated
+ Offset int64 `protobuf:"varint,2,opt,name=offset,proto3" json:"offset,omitempty"`
+ Size uint64 `protobuf:"varint,3,opt,name=size,proto3" json:"size,omitempty"`
+ Mtime int64 `protobuf:"varint,4,opt,name=mtime,proto3" json:"mtime,omitempty"`
+ ETag string `protobuf:"bytes,5,opt,name=e_tag,json=eTag,proto3" json:"e_tag,omitempty"`
+ SourceFileId string `protobuf:"bytes,6,opt,name=source_file_id,json=sourceFileId,proto3" json:"source_file_id,omitempty"` // to be deprecated
+ Fid *FileId `protobuf:"bytes,7,opt,name=fid,proto3" json:"fid,omitempty"`
+ SourceFid *FileId `protobuf:"bytes,8,opt,name=source_fid,json=sourceFid,proto3" json:"source_fid,omitempty"`
+ CipherKey []byte `protobuf:"bytes,9,opt,name=cipher_key,json=cipherKey,proto3" json:"cipher_key,omitempty"`
+ IsCompressed bool `protobuf:"varint,10,opt,name=is_compressed,json=isCompressed,proto3" json:"is_compressed,omitempty"`
+ IsChunkManifest bool `protobuf:"varint,11,opt,name=is_chunk_manifest,json=isChunkManifest,proto3" json:"is_chunk_manifest,omitempty"` // content is a list of FileChunks
}
-func (m *FuseAttributes) GetMtime() int64 {
- if m != nil {
- return m.Mtime
+func (x *FileChunk) Reset() {
+ *x = FileChunk{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_filer_proto_msgTypes[7]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
- return 0
}
-func (m *FuseAttributes) GetFileMode() uint32 {
- if m != nil {
- return m.FileMode
+func (x *FileChunk) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*FileChunk) ProtoMessage() {}
+
+func (x *FileChunk) ProtoReflect() protoreflect.Message {
+ mi := &file_filer_proto_msgTypes[7]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
}
- return 0
+ return mi.MessageOf(x)
}
-func (m *FuseAttributes) GetUid() uint32 {
- if m != nil {
- return m.Uid
+// Deprecated: Use FileChunk.ProtoReflect.Descriptor instead.
+func (*FileChunk) Descriptor() ([]byte, []int) {
+ return file_filer_proto_rawDescGZIP(), []int{7}
+}
+
+func (x *FileChunk) GetFileId() string {
+ if x != nil {
+ return x.FileId
}
- return 0
+ return ""
}
-func (m *FuseAttributes) GetGid() uint32 {
- if m != nil {
- return m.Gid
+func (x *FileChunk) GetOffset() int64 {
+ if x != nil {
+ return x.Offset
}
return 0
}
-func (m *FuseAttributes) GetCrtime() int64 {
- if m != nil {
- return m.Crtime
+func (x *FileChunk) GetSize() uint64 {
+ if x != nil {
+ return x.Size
}
return 0
}
-func (m *FuseAttributes) GetMime() string {
- if m != nil {
- return m.Mime
+func (x *FileChunk) GetMtime() int64 {
+ if x != nil {
+ return x.Mtime
}
- return ""
+ return 0
}
-func (m *FuseAttributes) GetReplication() string {
- if m != nil {
- return m.Replication
+func (x *FileChunk) GetETag() string {
+ if x != nil {
+ return x.ETag
}
return ""
}
-func (m *FuseAttributes) GetCollection() string {
- if m != nil {
- return m.Collection
+func (x *FileChunk) GetSourceFileId() string {
+ if x != nil {
+ return x.SourceFileId
}
return ""
}
-func (m *FuseAttributes) GetTtlSec() int32 {
- if m != nil {
- return m.TtlSec
+func (x *FileChunk) GetFid() *FileId {
+ if x != nil {
+ return x.Fid
}
- return 0
+ return nil
}
-func (m *FuseAttributes) GetUserName() string {
- if m != nil {
- return m.UserName
+func (x *FileChunk) GetSourceFid() *FileId {
+ if x != nil {
+ return x.SourceFid
}
- return ""
+ return nil
}
-func (m *FuseAttributes) GetGroupName() []string {
- if m != nil {
- return m.GroupName
+func (x *FileChunk) GetCipherKey() []byte {
+ if x != nil {
+ return x.CipherKey
}
return nil
}
-func (m *FuseAttributes) GetSymlinkTarget() string {
- if m != nil {
- return m.SymlinkTarget
+func (x *FileChunk) GetIsCompressed() bool {
+ if x != nil {
+ return x.IsCompressed
}
- return ""
+ return false
}
-type CreateEntryRequest struct {
- Directory string `protobuf:"bytes,1,opt,name=directory" json:"directory,omitempty"`
- Entry *Entry `protobuf:"bytes,2,opt,name=entry" json:"entry,omitempty"`
+func (x *FileChunk) GetIsChunkManifest() bool {
+ if x != nil {
+ return x.IsChunkManifest
+ }
+ return false
}
-func (m *CreateEntryRequest) Reset() { *m = CreateEntryRequest{} }
-func (m *CreateEntryRequest) String() string { return proto.CompactTextString(m) }
-func (*CreateEntryRequest) ProtoMessage() {}
-func (*CreateEntryRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{10} }
+type FileChunkManifest struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
-func (m *CreateEntryRequest) GetDirectory() string {
- if m != nil {
- return m.Directory
- }
- return ""
+ Chunks []*FileChunk `protobuf:"bytes,1,rep,name=chunks,proto3" json:"chunks,omitempty"`
}
-func (m *CreateEntryRequest) GetEntry() *Entry {
- if m != nil {
- return m.Entry
+func (x *FileChunkManifest) Reset() {
+ *x = FileChunkManifest{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_filer_proto_msgTypes[8]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
- return nil
}
-type CreateEntryResponse struct {
+func (x *FileChunkManifest) String() string {
+ return protoimpl.X.MessageStringOf(x)
}
-func (m *CreateEntryResponse) Reset() { *m = CreateEntryResponse{} }
-func (m *CreateEntryResponse) String() string { return proto.CompactTextString(m) }
-func (*CreateEntryResponse) ProtoMessage() {}
-func (*CreateEntryResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{11} }
+func (*FileChunkManifest) ProtoMessage() {}
-type UpdateEntryRequest struct {
- Directory string `protobuf:"bytes,1,opt,name=directory" json:"directory,omitempty"`
- Entry *Entry `protobuf:"bytes,2,opt,name=entry" json:"entry,omitempty"`
+func (x *FileChunkManifest) ProtoReflect() protoreflect.Message {
+ mi := &file_filer_proto_msgTypes[8]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
}
-func (m *UpdateEntryRequest) Reset() { *m = UpdateEntryRequest{} }
-func (m *UpdateEntryRequest) String() string { return proto.CompactTextString(m) }
-func (*UpdateEntryRequest) ProtoMessage() {}
-func (*UpdateEntryRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{12} }
-
-func (m *UpdateEntryRequest) GetDirectory() string {
- if m != nil {
- return m.Directory
- }
- return ""
+// Deprecated: Use FileChunkManifest.ProtoReflect.Descriptor instead.
+func (*FileChunkManifest) Descriptor() ([]byte, []int) {
+ return file_filer_proto_rawDescGZIP(), []int{8}
}
-func (m *UpdateEntryRequest) GetEntry() *Entry {
- if m != nil {
- return m.Entry
+func (x *FileChunkManifest) GetChunks() []*FileChunk {
+ if x != nil {
+ return x.Chunks
}
return nil
}
-type UpdateEntryResponse struct {
+type FileId struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ VolumeId uint32 `protobuf:"varint,1,opt,name=volume_id,json=volumeId,proto3" json:"volume_id,omitempty"`
+ FileKey uint64 `protobuf:"varint,2,opt,name=file_key,json=fileKey,proto3" json:"file_key,omitempty"`
+ Cookie uint32 `protobuf:"fixed32,3,opt,name=cookie,proto3" json:"cookie,omitempty"`
}
-func (m *UpdateEntryResponse) Reset() { *m = UpdateEntryResponse{} }
-func (m *UpdateEntryResponse) String() string { return proto.CompactTextString(m) }
-func (*UpdateEntryResponse) ProtoMessage() {}
-func (*UpdateEntryResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{13} }
+func (x *FileId) Reset() {
+ *x = FileId{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_filer_proto_msgTypes[9]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
-type DeleteEntryRequest struct {
- Directory string `protobuf:"bytes,1,opt,name=directory" json:"directory,omitempty"`
- Name string `protobuf:"bytes,2,opt,name=name" json:"name,omitempty"`
- // bool is_directory = 3;
- IsDeleteData bool `protobuf:"varint,4,opt,name=is_delete_data,json=isDeleteData" json:"is_delete_data,omitempty"`
- IsRecursive bool `protobuf:"varint,5,opt,name=is_recursive,json=isRecursive" json:"is_recursive,omitempty"`
- IgnoreRecursiveError bool `protobuf:"varint,6,opt,name=ignore_recursive_error,json=ignoreRecursiveError" json:"ignore_recursive_error,omitempty"`
+func (x *FileId) String() string {
+ return protoimpl.X.MessageStringOf(x)
}
-func (m *DeleteEntryRequest) Reset() { *m = DeleteEntryRequest{} }
-func (m *DeleteEntryRequest) String() string { return proto.CompactTextString(m) }
-func (*DeleteEntryRequest) ProtoMessage() {}
-func (*DeleteEntryRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{14} }
+func (*FileId) ProtoMessage() {}
-func (m *DeleteEntryRequest) GetDirectory() string {
- if m != nil {
- return m.Directory
+func (x *FileId) ProtoReflect() protoreflect.Message {
+ mi := &file_filer_proto_msgTypes[9]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
}
- return ""
+ return mi.MessageOf(x)
}
-func (m *DeleteEntryRequest) GetName() string {
- if m != nil {
- return m.Name
- }
- return ""
+// Deprecated: Use FileId.ProtoReflect.Descriptor instead.
+func (*FileId) Descriptor() ([]byte, []int) {
+ return file_filer_proto_rawDescGZIP(), []int{9}
}
-func (m *DeleteEntryRequest) GetIsDeleteData() bool {
- if m != nil {
- return m.IsDeleteData
+func (x *FileId) GetVolumeId() uint32 {
+ if x != nil {
+ return x.VolumeId
}
- return false
+ return 0
}
-func (m *DeleteEntryRequest) GetIsRecursive() bool {
- if m != nil {
- return m.IsRecursive
+func (x *FileId) GetFileKey() uint64 {
+ if x != nil {
+ return x.FileKey
}
- return false
+ return 0
}
-func (m *DeleteEntryRequest) GetIgnoreRecursiveError() bool {
- if m != nil {
- return m.IgnoreRecursiveError
+func (x *FileId) GetCookie() uint32 {
+ if x != nil {
+ return x.Cookie
}
- return false
+ return 0
}
-type DeleteEntryResponse struct {
+type FuseAttributes struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ FileSize uint64 `protobuf:"varint,1,opt,name=file_size,json=fileSize,proto3" json:"file_size,omitempty"`
+ Mtime int64 `protobuf:"varint,2,opt,name=mtime,proto3" json:"mtime,omitempty"` // unix time in seconds
+ FileMode uint32 `protobuf:"varint,3,opt,name=file_mode,json=fileMode,proto3" json:"file_mode,omitempty"`
+ Uid uint32 `protobuf:"varint,4,opt,name=uid,proto3" json:"uid,omitempty"`
+ Gid uint32 `protobuf:"varint,5,opt,name=gid,proto3" json:"gid,omitempty"`
+ Crtime int64 `protobuf:"varint,6,opt,name=crtime,proto3" json:"crtime,omitempty"` // unix time in seconds
+ Mime string `protobuf:"bytes,7,opt,name=mime,proto3" json:"mime,omitempty"`
+ Replication string `protobuf:"bytes,8,opt,name=replication,proto3" json:"replication,omitempty"`
+ Collection string `protobuf:"bytes,9,opt,name=collection,proto3" json:"collection,omitempty"`
+ TtlSec int32 `protobuf:"varint,10,opt,name=ttl_sec,json=ttlSec,proto3" json:"ttl_sec,omitempty"`
+ UserName string `protobuf:"bytes,11,opt,name=user_name,json=userName,proto3" json:"user_name,omitempty"` // for hdfs
+ GroupName []string `protobuf:"bytes,12,rep,name=group_name,json=groupName,proto3" json:"group_name,omitempty"` // for hdfs
+ SymlinkTarget string `protobuf:"bytes,13,opt,name=symlink_target,json=symlinkTarget,proto3" json:"symlink_target,omitempty"`
+ Md5 []byte `protobuf:"bytes,14,opt,name=md5,proto3" json:"md5,omitempty"`
+ DiskType string `protobuf:"bytes,15,opt,name=disk_type,json=diskType,proto3" json:"disk_type,omitempty"`
}
-func (m *DeleteEntryResponse) Reset() { *m = DeleteEntryResponse{} }
-func (m *DeleteEntryResponse) String() string { return proto.CompactTextString(m) }
-func (*DeleteEntryResponse) ProtoMessage() {}
-func (*DeleteEntryResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{15} }
+func (x *FuseAttributes) Reset() {
+ *x = FuseAttributes{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_filer_proto_msgTypes[10]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
-type AtomicRenameEntryRequest struct {
- OldDirectory string `protobuf:"bytes,1,opt,name=old_directory,json=oldDirectory" json:"old_directory,omitempty"`
- OldName string `protobuf:"bytes,2,opt,name=old_name,json=oldName" json:"old_name,omitempty"`
- NewDirectory string `protobuf:"bytes,3,opt,name=new_directory,json=newDirectory" json:"new_directory,omitempty"`
- NewName string `protobuf:"bytes,4,opt,name=new_name,json=newName" json:"new_name,omitempty"`
+func (x *FuseAttributes) String() string {
+ return protoimpl.X.MessageStringOf(x)
}
-func (m *AtomicRenameEntryRequest) Reset() { *m = AtomicRenameEntryRequest{} }
-func (m *AtomicRenameEntryRequest) String() string { return proto.CompactTextString(m) }
-func (*AtomicRenameEntryRequest) ProtoMessage() {}
-func (*AtomicRenameEntryRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{16} }
+func (*FuseAttributes) ProtoMessage() {}
-func (m *AtomicRenameEntryRequest) GetOldDirectory() string {
- if m != nil {
- return m.OldDirectory
+func (x *FuseAttributes) ProtoReflect() protoreflect.Message {
+ mi := &file_filer_proto_msgTypes[10]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
}
- return ""
+ return mi.MessageOf(x)
}
-func (m *AtomicRenameEntryRequest) GetOldName() string {
- if m != nil {
- return m.OldName
- }
- return ""
+// Deprecated: Use FuseAttributes.ProtoReflect.Descriptor instead.
+func (*FuseAttributes) Descriptor() ([]byte, []int) {
+ return file_filer_proto_rawDescGZIP(), []int{10}
}
-func (m *AtomicRenameEntryRequest) GetNewDirectory() string {
- if m != nil {
- return m.NewDirectory
+func (x *FuseAttributes) GetFileSize() uint64 {
+ if x != nil {
+ return x.FileSize
}
- return ""
+ return 0
}
-func (m *AtomicRenameEntryRequest) GetNewName() string {
- if m != nil {
- return m.NewName
+func (x *FuseAttributes) GetMtime() int64 {
+ if x != nil {
+ return x.Mtime
}
- return ""
+ return 0
}
-type AtomicRenameEntryResponse struct {
+func (x *FuseAttributes) GetFileMode() uint32 {
+ if x != nil {
+ return x.FileMode
+ }
+ return 0
}
-func (m *AtomicRenameEntryResponse) Reset() { *m = AtomicRenameEntryResponse{} }
-func (m *AtomicRenameEntryResponse) String() string { return proto.CompactTextString(m) }
-func (*AtomicRenameEntryResponse) ProtoMessage() {}
-func (*AtomicRenameEntryResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{17} }
-
-type AssignVolumeRequest struct {
- Count int32 `protobuf:"varint,1,opt,name=count" json:"count,omitempty"`
- Collection string `protobuf:"bytes,2,opt,name=collection" json:"collection,omitempty"`
- Replication string `protobuf:"bytes,3,opt,name=replication" json:"replication,omitempty"`
- TtlSec int32 `protobuf:"varint,4,opt,name=ttl_sec,json=ttlSec" json:"ttl_sec,omitempty"`
- DataCenter string `protobuf:"bytes,5,opt,name=data_center,json=dataCenter" json:"data_center,omitempty"`
+func (x *FuseAttributes) GetUid() uint32 {
+ if x != nil {
+ return x.Uid
+ }
+ return 0
}
-func (m *AssignVolumeRequest) Reset() { *m = AssignVolumeRequest{} }
-func (m *AssignVolumeRequest) String() string { return proto.CompactTextString(m) }
-func (*AssignVolumeRequest) ProtoMessage() {}
-func (*AssignVolumeRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{18} }
-
-func (m *AssignVolumeRequest) GetCount() int32 {
- if m != nil {
- return m.Count
+func (x *FuseAttributes) GetGid() uint32 {
+ if x != nil {
+ return x.Gid
}
return 0
}
-func (m *AssignVolumeRequest) GetCollection() string {
- if m != nil {
- return m.Collection
+func (x *FuseAttributes) GetCrtime() int64 {
+ if x != nil {
+ return x.Crtime
}
- return ""
+ return 0
}
-func (m *AssignVolumeRequest) GetReplication() string {
- if m != nil {
- return m.Replication
+func (x *FuseAttributes) GetMime() string {
+ if x != nil {
+ return x.Mime
}
return ""
}
-func (m *AssignVolumeRequest) GetTtlSec() int32 {
- if m != nil {
- return m.TtlSec
+func (x *FuseAttributes) GetReplication() string {
+ if x != nil {
+ return x.Replication
}
- return 0
+ return ""
}
-func (m *AssignVolumeRequest) GetDataCenter() string {
- if m != nil {
- return m.DataCenter
+func (x *FuseAttributes) GetCollection() string {
+ if x != nil {
+ return x.Collection
}
return ""
}
-type AssignVolumeResponse struct {
- FileId string `protobuf:"bytes,1,opt,name=file_id,json=fileId" json:"file_id,omitempty"`
- Url string `protobuf:"bytes,2,opt,name=url" json:"url,omitempty"`
- PublicUrl string `protobuf:"bytes,3,opt,name=public_url,json=publicUrl" json:"public_url,omitempty"`
- Count int32 `protobuf:"varint,4,opt,name=count" json:"count,omitempty"`
- Auth string `protobuf:"bytes,5,opt,name=auth" json:"auth,omitempty"`
+func (x *FuseAttributes) GetTtlSec() int32 {
+ if x != nil {
+ return x.TtlSec
+ }
+ return 0
}
-func (m *AssignVolumeResponse) Reset() { *m = AssignVolumeResponse{} }
-func (m *AssignVolumeResponse) String() string { return proto.CompactTextString(m) }
-func (*AssignVolumeResponse) ProtoMessage() {}
-func (*AssignVolumeResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{19} }
-
-func (m *AssignVolumeResponse) GetFileId() string {
- if m != nil {
- return m.FileId
+func (x *FuseAttributes) GetUserName() string {
+ if x != nil {
+ return x.UserName
}
return ""
}
-func (m *AssignVolumeResponse) GetUrl() string {
- if m != nil {
- return m.Url
+func (x *FuseAttributes) GetGroupName() []string {
+ if x != nil {
+ return x.GroupName
}
- return ""
+ return nil
}
-func (m *AssignVolumeResponse) GetPublicUrl() string {
- if m != nil {
- return m.PublicUrl
+func (x *FuseAttributes) GetSymlinkTarget() string {
+ if x != nil {
+ return x.SymlinkTarget
}
return ""
}
-func (m *AssignVolumeResponse) GetCount() int32 {
- if m != nil {
- return m.Count
+func (x *FuseAttributes) GetMd5() []byte {
+ if x != nil {
+ return x.Md5
}
- return 0
+ return nil
}
-func (m *AssignVolumeResponse) GetAuth() string {
- if m != nil {
- return m.Auth
+func (x *FuseAttributes) GetDiskType() string {
+ if x != nil {
+ return x.DiskType
}
return ""
}
-type LookupVolumeRequest struct {
- VolumeIds []string `protobuf:"bytes,1,rep,name=volume_ids,json=volumeIds" json:"volume_ids,omitempty"`
-}
+type CreateEntryRequest struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
-func (m *LookupVolumeRequest) Reset() { *m = LookupVolumeRequest{} }
-func (m *LookupVolumeRequest) String() string { return proto.CompactTextString(m) }
-func (*LookupVolumeRequest) ProtoMessage() {}
-func (*LookupVolumeRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{20} }
+ Directory string `protobuf:"bytes,1,opt,name=directory,proto3" json:"directory,omitempty"`
+ Entry *Entry `protobuf:"bytes,2,opt,name=entry,proto3" json:"entry,omitempty"`
+ OExcl bool `protobuf:"varint,3,opt,name=o_excl,json=oExcl,proto3" json:"o_excl,omitempty"`
+ IsFromOtherCluster bool `protobuf:"varint,4,opt,name=is_from_other_cluster,json=isFromOtherCluster,proto3" json:"is_from_other_cluster,omitempty"`
+ Signatures []int32 `protobuf:"varint,5,rep,packed,name=signatures,proto3" json:"signatures,omitempty"`
+}
-func (m *LookupVolumeRequest) GetVolumeIds() []string {
- if m != nil {
- return m.VolumeIds
+func (x *CreateEntryRequest) Reset() {
+ *x = CreateEntryRequest{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_filer_proto_msgTypes[11]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
- return nil
}
-type Locations struct {
- Locations []*Location `protobuf:"bytes,1,rep,name=locations" json:"locations,omitempty"`
+func (x *CreateEntryRequest) String() string {
+ return protoimpl.X.MessageStringOf(x)
}
-func (m *Locations) Reset() { *m = Locations{} }
-func (m *Locations) String() string { return proto.CompactTextString(m) }
-func (*Locations) ProtoMessage() {}
-func (*Locations) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{21} }
+func (*CreateEntryRequest) ProtoMessage() {}
-func (m *Locations) GetLocations() []*Location {
- if m != nil {
- return m.Locations
+func (x *CreateEntryRequest) ProtoReflect() protoreflect.Message {
+ mi := &file_filer_proto_msgTypes[11]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
}
- return nil
+ return mi.MessageOf(x)
}
-type Location struct {
- Url string `protobuf:"bytes,1,opt,name=url" json:"url,omitempty"`
- PublicUrl string `protobuf:"bytes,2,opt,name=public_url,json=publicUrl" json:"public_url,omitempty"`
+// Deprecated: Use CreateEntryRequest.ProtoReflect.Descriptor instead.
+func (*CreateEntryRequest) Descriptor() ([]byte, []int) {
+ return file_filer_proto_rawDescGZIP(), []int{11}
}
-func (m *Location) Reset() { *m = Location{} }
-func (m *Location) String() string { return proto.CompactTextString(m) }
-func (*Location) ProtoMessage() {}
-func (*Location) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{22} }
-
-func (m *Location) GetUrl() string {
- if m != nil {
- return m.Url
+func (x *CreateEntryRequest) GetDirectory() string {
+ if x != nil {
+ return x.Directory
}
return ""
}
-func (m *Location) GetPublicUrl() string {
- if m != nil {
- return m.PublicUrl
+func (x *CreateEntryRequest) GetEntry() *Entry {
+ if x != nil {
+ return x.Entry
}
- return ""
+ return nil
}
-type LookupVolumeResponse struct {
- LocationsMap map[string]*Locations `protobuf:"bytes,1,rep,name=locations_map,json=locationsMap" json:"locations_map,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"`
+func (x *CreateEntryRequest) GetOExcl() bool {
+ if x != nil {
+ return x.OExcl
+ }
+ return false
}
-func (m *LookupVolumeResponse) Reset() { *m = LookupVolumeResponse{} }
-func (m *LookupVolumeResponse) String() string { return proto.CompactTextString(m) }
-func (*LookupVolumeResponse) ProtoMessage() {}
-func (*LookupVolumeResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{23} }
+func (x *CreateEntryRequest) GetIsFromOtherCluster() bool {
+ if x != nil {
+ return x.IsFromOtherCluster
+ }
+ return false
+}
-func (m *LookupVolumeResponse) GetLocationsMap() map[string]*Locations {
- if m != nil {
- return m.LocationsMap
+func (x *CreateEntryRequest) GetSignatures() []int32 {
+ if x != nil {
+ return x.Signatures
}
return nil
}
-type DeleteCollectionRequest struct {
- Collection string `protobuf:"bytes,1,opt,name=collection" json:"collection,omitempty"`
-}
+type CreateEntryResponse struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
-func (m *DeleteCollectionRequest) Reset() { *m = DeleteCollectionRequest{} }
-func (m *DeleteCollectionRequest) String() string { return proto.CompactTextString(m) }
-func (*DeleteCollectionRequest) ProtoMessage() {}
-func (*DeleteCollectionRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{24} }
+ Error string `protobuf:"bytes,1,opt,name=error,proto3" json:"error,omitempty"`
+}
-func (m *DeleteCollectionRequest) GetCollection() string {
- if m != nil {
- return m.Collection
+func (x *CreateEntryResponse) Reset() {
+ *x = CreateEntryResponse{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_filer_proto_msgTypes[12]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
- return ""
}
-type DeleteCollectionResponse struct {
+func (x *CreateEntryResponse) String() string {
+ return protoimpl.X.MessageStringOf(x)
}
-func (m *DeleteCollectionResponse) Reset() { *m = DeleteCollectionResponse{} }
-func (m *DeleteCollectionResponse) String() string { return proto.CompactTextString(m) }
-func (*DeleteCollectionResponse) ProtoMessage() {}
-func (*DeleteCollectionResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{25} }
+func (*CreateEntryResponse) ProtoMessage() {}
-type StatisticsRequest struct {
- Replication string `protobuf:"bytes,1,opt,name=replication" json:"replication,omitempty"`
- Collection string `protobuf:"bytes,2,opt,name=collection" json:"collection,omitempty"`
- Ttl string `protobuf:"bytes,3,opt,name=ttl" json:"ttl,omitempty"`
+func (x *CreateEntryResponse) ProtoReflect() protoreflect.Message {
+ mi := &file_filer_proto_msgTypes[12]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
}
-func (m *StatisticsRequest) Reset() { *m = StatisticsRequest{} }
-func (m *StatisticsRequest) String() string { return proto.CompactTextString(m) }
-func (*StatisticsRequest) ProtoMessage() {}
-func (*StatisticsRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{26} }
+// Deprecated: Use CreateEntryResponse.ProtoReflect.Descriptor instead.
+func (*CreateEntryResponse) Descriptor() ([]byte, []int) {
+ return file_filer_proto_rawDescGZIP(), []int{12}
+}
-func (m *StatisticsRequest) GetReplication() string {
- if m != nil {
- return m.Replication
+func (x *CreateEntryResponse) GetError() string {
+ if x != nil {
+ return x.Error
}
return ""
}
-func (m *StatisticsRequest) GetCollection() string {
- if m != nil {
- return m.Collection
- }
- return ""
+type UpdateEntryRequest struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ Directory string `protobuf:"bytes,1,opt,name=directory,proto3" json:"directory,omitempty"`
+ Entry *Entry `protobuf:"bytes,2,opt,name=entry,proto3" json:"entry,omitempty"`
+ IsFromOtherCluster bool `protobuf:"varint,3,opt,name=is_from_other_cluster,json=isFromOtherCluster,proto3" json:"is_from_other_cluster,omitempty"`
+ Signatures []int32 `protobuf:"varint,4,rep,packed,name=signatures,proto3" json:"signatures,omitempty"`
}
-func (m *StatisticsRequest) GetTtl() string {
- if m != nil {
- return m.Ttl
+func (x *UpdateEntryRequest) Reset() {
+ *x = UpdateEntryRequest{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_filer_proto_msgTypes[13]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
- return ""
}
-type StatisticsResponse struct {
- Replication string `protobuf:"bytes,1,opt,name=replication" json:"replication,omitempty"`
- Collection string `protobuf:"bytes,2,opt,name=collection" json:"collection,omitempty"`
- Ttl string `protobuf:"bytes,3,opt,name=ttl" json:"ttl,omitempty"`
- TotalSize uint64 `protobuf:"varint,4,opt,name=total_size,json=totalSize" json:"total_size,omitempty"`
- UsedSize uint64 `protobuf:"varint,5,opt,name=used_size,json=usedSize" json:"used_size,omitempty"`
- FileCount uint64 `protobuf:"varint,6,opt,name=file_count,json=fileCount" json:"file_count,omitempty"`
+func (x *UpdateEntryRequest) String() string {
+ return protoimpl.X.MessageStringOf(x)
}
-func (m *StatisticsResponse) Reset() { *m = StatisticsResponse{} }
-func (m *StatisticsResponse) String() string { return proto.CompactTextString(m) }
-func (*StatisticsResponse) ProtoMessage() {}
-func (*StatisticsResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{27} }
+func (*UpdateEntryRequest) ProtoMessage() {}
-func (m *StatisticsResponse) GetReplication() string {
- if m != nil {
- return m.Replication
+func (x *UpdateEntryRequest) ProtoReflect() protoreflect.Message {
+ mi := &file_filer_proto_msgTypes[13]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
}
- return ""
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use UpdateEntryRequest.ProtoReflect.Descriptor instead.
+func (*UpdateEntryRequest) Descriptor() ([]byte, []int) {
+ return file_filer_proto_rawDescGZIP(), []int{13}
}
-func (m *StatisticsResponse) GetCollection() string {
- if m != nil {
- return m.Collection
+func (x *UpdateEntryRequest) GetDirectory() string {
+ if x != nil {
+ return x.Directory
}
return ""
}
-func (m *StatisticsResponse) GetTtl() string {
- if m != nil {
- return m.Ttl
+func (x *UpdateEntryRequest) GetEntry() *Entry {
+ if x != nil {
+ return x.Entry
}
- return ""
+ return nil
}
-func (m *StatisticsResponse) GetTotalSize() uint64 {
- if m != nil {
- return m.TotalSize
+func (x *UpdateEntryRequest) GetIsFromOtherCluster() bool {
+ if x != nil {
+ return x.IsFromOtherCluster
}
- return 0
+ return false
}
-func (m *StatisticsResponse) GetUsedSize() uint64 {
- if m != nil {
- return m.UsedSize
+func (x *UpdateEntryRequest) GetSignatures() []int32 {
+ if x != nil {
+ return x.Signatures
}
- return 0
+ return nil
+}
+
+type UpdateEntryResponse struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
}
-func (m *StatisticsResponse) GetFileCount() uint64 {
- if m != nil {
- return m.FileCount
+func (x *UpdateEntryResponse) Reset() {
+ *x = UpdateEntryResponse{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_filer_proto_msgTypes[14]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
- return 0
}
-type GetFilerConfigurationRequest struct {
+func (x *UpdateEntryResponse) String() string {
+ return protoimpl.X.MessageStringOf(x)
}
-func (m *GetFilerConfigurationRequest) Reset() { *m = GetFilerConfigurationRequest{} }
-func (m *GetFilerConfigurationRequest) String() string { return proto.CompactTextString(m) }
-func (*GetFilerConfigurationRequest) ProtoMessage() {}
-func (*GetFilerConfigurationRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{28} }
+func (*UpdateEntryResponse) ProtoMessage() {}
-type GetFilerConfigurationResponse struct {
- Masters []string `protobuf:"bytes,1,rep,name=masters" json:"masters,omitempty"`
- Replication string `protobuf:"bytes,2,opt,name=replication" json:"replication,omitempty"`
- Collection string `protobuf:"bytes,3,opt,name=collection" json:"collection,omitempty"`
- MaxMb uint32 `protobuf:"varint,4,opt,name=max_mb,json=maxMb" json:"max_mb,omitempty"`
+func (x *UpdateEntryResponse) ProtoReflect() protoreflect.Message {
+ mi := &file_filer_proto_msgTypes[14]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use UpdateEntryResponse.ProtoReflect.Descriptor instead.
+func (*UpdateEntryResponse) Descriptor() ([]byte, []int) {
+ return file_filer_proto_rawDescGZIP(), []int{14}
+}
+
+type AppendToEntryRequest struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ Directory string `protobuf:"bytes,1,opt,name=directory,proto3" json:"directory,omitempty"`
+ EntryName string `protobuf:"bytes,2,opt,name=entry_name,json=entryName,proto3" json:"entry_name,omitempty"`
+ Chunks []*FileChunk `protobuf:"bytes,3,rep,name=chunks,proto3" json:"chunks,omitempty"`
+}
+
+func (x *AppendToEntryRequest) Reset() {
+ *x = AppendToEntryRequest{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_filer_proto_msgTypes[15]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *AppendToEntryRequest) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*AppendToEntryRequest) ProtoMessage() {}
+
+func (x *AppendToEntryRequest) ProtoReflect() protoreflect.Message {
+ mi := &file_filer_proto_msgTypes[15]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use AppendToEntryRequest.ProtoReflect.Descriptor instead.
+func (*AppendToEntryRequest) Descriptor() ([]byte, []int) {
+ return file_filer_proto_rawDescGZIP(), []int{15}
+}
+
+func (x *AppendToEntryRequest) GetDirectory() string {
+ if x != nil {
+ return x.Directory
+ }
+ return ""
}
-func (m *GetFilerConfigurationResponse) Reset() { *m = GetFilerConfigurationResponse{} }
-func (m *GetFilerConfigurationResponse) String() string { return proto.CompactTextString(m) }
-func (*GetFilerConfigurationResponse) ProtoMessage() {}
-func (*GetFilerConfigurationResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{29} }
+func (x *AppendToEntryRequest) GetEntryName() string {
+ if x != nil {
+ return x.EntryName
+ }
+ return ""
+}
-func (m *GetFilerConfigurationResponse) GetMasters() []string {
- if m != nil {
- return m.Masters
+func (x *AppendToEntryRequest) GetChunks() []*FileChunk {
+ if x != nil {
+ return x.Chunks
}
return nil
}
-func (m *GetFilerConfigurationResponse) GetReplication() string {
- if m != nil {
- return m.Replication
+type AppendToEntryResponse struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+}
+
+func (x *AppendToEntryResponse) Reset() {
+ *x = AppendToEntryResponse{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_filer_proto_msgTypes[16]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *AppendToEntryResponse) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*AppendToEntryResponse) ProtoMessage() {}
+
+func (x *AppendToEntryResponse) ProtoReflect() protoreflect.Message {
+ mi := &file_filer_proto_msgTypes[16]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use AppendToEntryResponse.ProtoReflect.Descriptor instead.
+func (*AppendToEntryResponse) Descriptor() ([]byte, []int) {
+ return file_filer_proto_rawDescGZIP(), []int{16}
+}
+
+type DeleteEntryRequest struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ Directory string `protobuf:"bytes,1,opt,name=directory,proto3" json:"directory,omitempty"`
+ Name string `protobuf:"bytes,2,opt,name=name,proto3" json:"name,omitempty"`
+ // bool is_directory = 3;
+ IsDeleteData bool `protobuf:"varint,4,opt,name=is_delete_data,json=isDeleteData,proto3" json:"is_delete_data,omitempty"`
+ IsRecursive bool `protobuf:"varint,5,opt,name=is_recursive,json=isRecursive,proto3" json:"is_recursive,omitempty"`
+ IgnoreRecursiveError bool `protobuf:"varint,6,opt,name=ignore_recursive_error,json=ignoreRecursiveError,proto3" json:"ignore_recursive_error,omitempty"`
+ IsFromOtherCluster bool `protobuf:"varint,7,opt,name=is_from_other_cluster,json=isFromOtherCluster,proto3" json:"is_from_other_cluster,omitempty"`
+ Signatures []int32 `protobuf:"varint,8,rep,packed,name=signatures,proto3" json:"signatures,omitempty"`
+}
+
+func (x *DeleteEntryRequest) Reset() {
+ *x = DeleteEntryRequest{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_filer_proto_msgTypes[17]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *DeleteEntryRequest) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*DeleteEntryRequest) ProtoMessage() {}
+
+func (x *DeleteEntryRequest) ProtoReflect() protoreflect.Message {
+ mi := &file_filer_proto_msgTypes[17]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use DeleteEntryRequest.ProtoReflect.Descriptor instead.
+func (*DeleteEntryRequest) Descriptor() ([]byte, []int) {
+ return file_filer_proto_rawDescGZIP(), []int{17}
+}
+
+func (x *DeleteEntryRequest) GetDirectory() string {
+ if x != nil {
+ return x.Directory
+ }
+ return ""
+}
+
+func (x *DeleteEntryRequest) GetName() string {
+ if x != nil {
+ return x.Name
+ }
+ return ""
+}
+
+func (x *DeleteEntryRequest) GetIsDeleteData() bool {
+ if x != nil {
+ return x.IsDeleteData
+ }
+ return false
+}
+
+func (x *DeleteEntryRequest) GetIsRecursive() bool {
+ if x != nil {
+ return x.IsRecursive
+ }
+ return false
+}
+
+func (x *DeleteEntryRequest) GetIgnoreRecursiveError() bool {
+ if x != nil {
+ return x.IgnoreRecursiveError
+ }
+ return false
+}
+
+func (x *DeleteEntryRequest) GetIsFromOtherCluster() bool {
+ if x != nil {
+ return x.IsFromOtherCluster
+ }
+ return false
+}
+
+func (x *DeleteEntryRequest) GetSignatures() []int32 {
+ if x != nil {
+ return x.Signatures
+ }
+ return nil
+}
+
+type DeleteEntryResponse struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ Error string `protobuf:"bytes,1,opt,name=error,proto3" json:"error,omitempty"`
+}
+
+func (x *DeleteEntryResponse) Reset() {
+ *x = DeleteEntryResponse{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_filer_proto_msgTypes[18]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *DeleteEntryResponse) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*DeleteEntryResponse) ProtoMessage() {}
+
+func (x *DeleteEntryResponse) ProtoReflect() protoreflect.Message {
+ mi := &file_filer_proto_msgTypes[18]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use DeleteEntryResponse.ProtoReflect.Descriptor instead.
+func (*DeleteEntryResponse) Descriptor() ([]byte, []int) {
+ return file_filer_proto_rawDescGZIP(), []int{18}
+}
+
+func (x *DeleteEntryResponse) GetError() string {
+ if x != nil {
+ return x.Error
+ }
+ return ""
+}
+
+type AtomicRenameEntryRequest struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ OldDirectory string `protobuf:"bytes,1,opt,name=old_directory,json=oldDirectory,proto3" json:"old_directory,omitempty"`
+ OldName string `protobuf:"bytes,2,opt,name=old_name,json=oldName,proto3" json:"old_name,omitempty"`
+ NewDirectory string `protobuf:"bytes,3,opt,name=new_directory,json=newDirectory,proto3" json:"new_directory,omitempty"`
+ NewName string `protobuf:"bytes,4,opt,name=new_name,json=newName,proto3" json:"new_name,omitempty"`
+}
+
+func (x *AtomicRenameEntryRequest) Reset() {
+ *x = AtomicRenameEntryRequest{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_filer_proto_msgTypes[19]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *AtomicRenameEntryRequest) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*AtomicRenameEntryRequest) ProtoMessage() {}
+
+func (x *AtomicRenameEntryRequest) ProtoReflect() protoreflect.Message {
+ mi := &file_filer_proto_msgTypes[19]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use AtomicRenameEntryRequest.ProtoReflect.Descriptor instead.
+func (*AtomicRenameEntryRequest) Descriptor() ([]byte, []int) {
+ return file_filer_proto_rawDescGZIP(), []int{19}
+}
+
+func (x *AtomicRenameEntryRequest) GetOldDirectory() string {
+ if x != nil {
+ return x.OldDirectory
+ }
+ return ""
+}
+
+func (x *AtomicRenameEntryRequest) GetOldName() string {
+ if x != nil {
+ return x.OldName
+ }
+ return ""
+}
+
+func (x *AtomicRenameEntryRequest) GetNewDirectory() string {
+ if x != nil {
+ return x.NewDirectory
+ }
+ return ""
+}
+
+func (x *AtomicRenameEntryRequest) GetNewName() string {
+ if x != nil {
+ return x.NewName
+ }
+ return ""
+}
+
+type AtomicRenameEntryResponse struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+}
+
+func (x *AtomicRenameEntryResponse) Reset() {
+ *x = AtomicRenameEntryResponse{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_filer_proto_msgTypes[20]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *AtomicRenameEntryResponse) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*AtomicRenameEntryResponse) ProtoMessage() {}
+
+func (x *AtomicRenameEntryResponse) ProtoReflect() protoreflect.Message {
+ mi := &file_filer_proto_msgTypes[20]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use AtomicRenameEntryResponse.ProtoReflect.Descriptor instead.
+func (*AtomicRenameEntryResponse) Descriptor() ([]byte, []int) {
+ return file_filer_proto_rawDescGZIP(), []int{20}
+}
+
+type AssignVolumeRequest struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ Count int32 `protobuf:"varint,1,opt,name=count,proto3" json:"count,omitempty"`
+ Collection string `protobuf:"bytes,2,opt,name=collection,proto3" json:"collection,omitempty"`
+ Replication string `protobuf:"bytes,3,opt,name=replication,proto3" json:"replication,omitempty"`
+ TtlSec int32 `protobuf:"varint,4,opt,name=ttl_sec,json=ttlSec,proto3" json:"ttl_sec,omitempty"`
+ DataCenter string `protobuf:"bytes,5,opt,name=data_center,json=dataCenter,proto3" json:"data_center,omitempty"`
+ Path string `protobuf:"bytes,6,opt,name=path,proto3" json:"path,omitempty"`
+ Rack string `protobuf:"bytes,7,opt,name=rack,proto3" json:"rack,omitempty"`
+ DiskType string `protobuf:"bytes,8,opt,name=disk_type,json=diskType,proto3" json:"disk_type,omitempty"`
+}
+
+func (x *AssignVolumeRequest) Reset() {
+ *x = AssignVolumeRequest{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_filer_proto_msgTypes[21]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *AssignVolumeRequest) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*AssignVolumeRequest) ProtoMessage() {}
+
+func (x *AssignVolumeRequest) ProtoReflect() protoreflect.Message {
+ mi := &file_filer_proto_msgTypes[21]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use AssignVolumeRequest.ProtoReflect.Descriptor instead.
+func (*AssignVolumeRequest) Descriptor() ([]byte, []int) {
+ return file_filer_proto_rawDescGZIP(), []int{21}
+}
+
+func (x *AssignVolumeRequest) GetCount() int32 {
+ if x != nil {
+ return x.Count
+ }
+ return 0
+}
+
+func (x *AssignVolumeRequest) GetCollection() string {
+ if x != nil {
+ return x.Collection
+ }
+ return ""
+}
+
+func (x *AssignVolumeRequest) GetReplication() string {
+ if x != nil {
+ return x.Replication
+ }
+ return ""
+}
+
+func (x *AssignVolumeRequest) GetTtlSec() int32 {
+ if x != nil {
+ return x.TtlSec
+ }
+ return 0
+}
+
+func (x *AssignVolumeRequest) GetDataCenter() string {
+ if x != nil {
+ return x.DataCenter
+ }
+ return ""
+}
+
+func (x *AssignVolumeRequest) GetPath() string {
+ if x != nil {
+ return x.Path
+ }
+ return ""
+}
+
+func (x *AssignVolumeRequest) GetRack() string {
+ if x != nil {
+ return x.Rack
+ }
+ return ""
+}
+
+func (x *AssignVolumeRequest) GetDiskType() string {
+ if x != nil {
+ return x.DiskType
+ }
+ return ""
+}
+
+type AssignVolumeResponse struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ FileId string `protobuf:"bytes,1,opt,name=file_id,json=fileId,proto3" json:"file_id,omitempty"`
+ Url string `protobuf:"bytes,2,opt,name=url,proto3" json:"url,omitempty"`
+ PublicUrl string `protobuf:"bytes,3,opt,name=public_url,json=publicUrl,proto3" json:"public_url,omitempty"`
+ Count int32 `protobuf:"varint,4,opt,name=count,proto3" json:"count,omitempty"`
+ Auth string `protobuf:"bytes,5,opt,name=auth,proto3" json:"auth,omitempty"`
+ Collection string `protobuf:"bytes,6,opt,name=collection,proto3" json:"collection,omitempty"`
+ Replication string `protobuf:"bytes,7,opt,name=replication,proto3" json:"replication,omitempty"`
+ Error string `protobuf:"bytes,8,opt,name=error,proto3" json:"error,omitempty"`
+}
+
+func (x *AssignVolumeResponse) Reset() {
+ *x = AssignVolumeResponse{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_filer_proto_msgTypes[22]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *AssignVolumeResponse) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*AssignVolumeResponse) ProtoMessage() {}
+
+func (x *AssignVolumeResponse) ProtoReflect() protoreflect.Message {
+ mi := &file_filer_proto_msgTypes[22]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use AssignVolumeResponse.ProtoReflect.Descriptor instead.
+func (*AssignVolumeResponse) Descriptor() ([]byte, []int) {
+ return file_filer_proto_rawDescGZIP(), []int{22}
+}
+
+func (x *AssignVolumeResponse) GetFileId() string {
+ if x != nil {
+ return x.FileId
+ }
+ return ""
+}
+
+func (x *AssignVolumeResponse) GetUrl() string {
+ if x != nil {
+ return x.Url
+ }
+ return ""
+}
+
+func (x *AssignVolumeResponse) GetPublicUrl() string {
+ if x != nil {
+ return x.PublicUrl
+ }
+ return ""
+}
+
+func (x *AssignVolumeResponse) GetCount() int32 {
+ if x != nil {
+ return x.Count
+ }
+ return 0
+}
+
+func (x *AssignVolumeResponse) GetAuth() string {
+ if x != nil {
+ return x.Auth
+ }
+ return ""
+}
+
+func (x *AssignVolumeResponse) GetCollection() string {
+ if x != nil {
+ return x.Collection
+ }
+ return ""
+}
+
+func (x *AssignVolumeResponse) GetReplication() string {
+ if x != nil {
+ return x.Replication
+ }
+ return ""
+}
+
+func (x *AssignVolumeResponse) GetError() string {
+ if x != nil {
+ return x.Error
+ }
+ return ""
+}
+
+type LookupVolumeRequest struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ VolumeIds []string `protobuf:"bytes,1,rep,name=volume_ids,json=volumeIds,proto3" json:"volume_ids,omitempty"`
+}
+
+func (x *LookupVolumeRequest) Reset() {
+ *x = LookupVolumeRequest{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_filer_proto_msgTypes[23]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *LookupVolumeRequest) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*LookupVolumeRequest) ProtoMessage() {}
+
+func (x *LookupVolumeRequest) ProtoReflect() protoreflect.Message {
+ mi := &file_filer_proto_msgTypes[23]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use LookupVolumeRequest.ProtoReflect.Descriptor instead.
+func (*LookupVolumeRequest) Descriptor() ([]byte, []int) {
+ return file_filer_proto_rawDescGZIP(), []int{23}
+}
+
+func (x *LookupVolumeRequest) GetVolumeIds() []string {
+ if x != nil {
+ return x.VolumeIds
+ }
+ return nil
+}
+
+type Locations struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ Locations []*Location `protobuf:"bytes,1,rep,name=locations,proto3" json:"locations,omitempty"`
+}
+
+func (x *Locations) Reset() {
+ *x = Locations{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_filer_proto_msgTypes[24]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *Locations) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*Locations) ProtoMessage() {}
+
+func (x *Locations) ProtoReflect() protoreflect.Message {
+ mi := &file_filer_proto_msgTypes[24]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use Locations.ProtoReflect.Descriptor instead.
+func (*Locations) Descriptor() ([]byte, []int) {
+ return file_filer_proto_rawDescGZIP(), []int{24}
+}
+
+func (x *Locations) GetLocations() []*Location {
+ if x != nil {
+ return x.Locations
+ }
+ return nil
+}
+
+type Location struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ Url string `protobuf:"bytes,1,opt,name=url,proto3" json:"url,omitempty"`
+ PublicUrl string `protobuf:"bytes,2,opt,name=public_url,json=publicUrl,proto3" json:"public_url,omitempty"`
+}
+
+func (x *Location) Reset() {
+ *x = Location{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_filer_proto_msgTypes[25]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *Location) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*Location) ProtoMessage() {}
+
+func (x *Location) ProtoReflect() protoreflect.Message {
+ mi := &file_filer_proto_msgTypes[25]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use Location.ProtoReflect.Descriptor instead.
+func (*Location) Descriptor() ([]byte, []int) {
+ return file_filer_proto_rawDescGZIP(), []int{25}
+}
+
+func (x *Location) GetUrl() string {
+ if x != nil {
+ return x.Url
+ }
+ return ""
+}
+
+func (x *Location) GetPublicUrl() string {
+ if x != nil {
+ return x.PublicUrl
+ }
+ return ""
+}
+
+type LookupVolumeResponse struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ LocationsMap map[string]*Locations `protobuf:"bytes,1,rep,name=locations_map,json=locationsMap,proto3" json:"locations_map,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"`
+}
+
+func (x *LookupVolumeResponse) Reset() {
+ *x = LookupVolumeResponse{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_filer_proto_msgTypes[26]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *LookupVolumeResponse) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*LookupVolumeResponse) ProtoMessage() {}
+
+func (x *LookupVolumeResponse) ProtoReflect() protoreflect.Message {
+ mi := &file_filer_proto_msgTypes[26]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use LookupVolumeResponse.ProtoReflect.Descriptor instead.
+func (*LookupVolumeResponse) Descriptor() ([]byte, []int) {
+ return file_filer_proto_rawDescGZIP(), []int{26}
+}
+
+func (x *LookupVolumeResponse) GetLocationsMap() map[string]*Locations {
+ if x != nil {
+ return x.LocationsMap
+ }
+ return nil
+}
+
+type Collection struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
+}
+
+func (x *Collection) Reset() {
+ *x = Collection{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_filer_proto_msgTypes[27]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *Collection) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*Collection) ProtoMessage() {}
+
+func (x *Collection) ProtoReflect() protoreflect.Message {
+ mi := &file_filer_proto_msgTypes[27]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use Collection.ProtoReflect.Descriptor instead.
+func (*Collection) Descriptor() ([]byte, []int) {
+ return file_filer_proto_rawDescGZIP(), []int{27}
+}
+
+func (x *Collection) GetName() string {
+ if x != nil {
+ return x.Name
+ }
+ return ""
+}
+
+type CollectionListRequest struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ IncludeNormalVolumes bool `protobuf:"varint,1,opt,name=include_normal_volumes,json=includeNormalVolumes,proto3" json:"include_normal_volumes,omitempty"`
+ IncludeEcVolumes bool `protobuf:"varint,2,opt,name=include_ec_volumes,json=includeEcVolumes,proto3" json:"include_ec_volumes,omitempty"`
+}
+
+func (x *CollectionListRequest) Reset() {
+ *x = CollectionListRequest{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_filer_proto_msgTypes[28]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *CollectionListRequest) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*CollectionListRequest) ProtoMessage() {}
+
+func (x *CollectionListRequest) ProtoReflect() protoreflect.Message {
+ mi := &file_filer_proto_msgTypes[28]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use CollectionListRequest.ProtoReflect.Descriptor instead.
+func (*CollectionListRequest) Descriptor() ([]byte, []int) {
+ return file_filer_proto_rawDescGZIP(), []int{28}
+}
+
+func (x *CollectionListRequest) GetIncludeNormalVolumes() bool {
+ if x != nil {
+ return x.IncludeNormalVolumes
+ }
+ return false
+}
+
+func (x *CollectionListRequest) GetIncludeEcVolumes() bool {
+ if x != nil {
+ return x.IncludeEcVolumes
+ }
+ return false
+}
+
+type CollectionListResponse struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ Collections []*Collection `protobuf:"bytes,1,rep,name=collections,proto3" json:"collections,omitempty"`
+}
+
+func (x *CollectionListResponse) Reset() {
+ *x = CollectionListResponse{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_filer_proto_msgTypes[29]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *CollectionListResponse) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*CollectionListResponse) ProtoMessage() {}
+
+func (x *CollectionListResponse) ProtoReflect() protoreflect.Message {
+ mi := &file_filer_proto_msgTypes[29]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use CollectionListResponse.ProtoReflect.Descriptor instead.
+func (*CollectionListResponse) Descriptor() ([]byte, []int) {
+ return file_filer_proto_rawDescGZIP(), []int{29}
+}
+
+func (x *CollectionListResponse) GetCollections() []*Collection {
+ if x != nil {
+ return x.Collections
+ }
+ return nil
+}
+
+type DeleteCollectionRequest struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ Collection string `protobuf:"bytes,1,opt,name=collection,proto3" json:"collection,omitempty"`
+}
+
+func (x *DeleteCollectionRequest) Reset() {
+ *x = DeleteCollectionRequest{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_filer_proto_msgTypes[30]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *DeleteCollectionRequest) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*DeleteCollectionRequest) ProtoMessage() {}
+
+func (x *DeleteCollectionRequest) ProtoReflect() protoreflect.Message {
+ mi := &file_filer_proto_msgTypes[30]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use DeleteCollectionRequest.ProtoReflect.Descriptor instead.
+func (*DeleteCollectionRequest) Descriptor() ([]byte, []int) {
+ return file_filer_proto_rawDescGZIP(), []int{30}
+}
+
+func (x *DeleteCollectionRequest) GetCollection() string {
+ if x != nil {
+ return x.Collection
+ }
+ return ""
+}
+
+type DeleteCollectionResponse struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+}
+
+func (x *DeleteCollectionResponse) Reset() {
+ *x = DeleteCollectionResponse{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_filer_proto_msgTypes[31]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *DeleteCollectionResponse) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*DeleteCollectionResponse) ProtoMessage() {}
+
+func (x *DeleteCollectionResponse) ProtoReflect() protoreflect.Message {
+ mi := &file_filer_proto_msgTypes[31]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use DeleteCollectionResponse.ProtoReflect.Descriptor instead.
+func (*DeleteCollectionResponse) Descriptor() ([]byte, []int) {
+ return file_filer_proto_rawDescGZIP(), []int{31}
+}
+
+type StatisticsRequest struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ Replication string `protobuf:"bytes,1,opt,name=replication,proto3" json:"replication,omitempty"`
+ Collection string `protobuf:"bytes,2,opt,name=collection,proto3" json:"collection,omitempty"`
+ Ttl string `protobuf:"bytes,3,opt,name=ttl,proto3" json:"ttl,omitempty"`
+ DiskType string `protobuf:"bytes,4,opt,name=disk_type,json=diskType,proto3" json:"disk_type,omitempty"`
+}
+
+func (x *StatisticsRequest) Reset() {
+ *x = StatisticsRequest{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_filer_proto_msgTypes[32]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *StatisticsRequest) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*StatisticsRequest) ProtoMessage() {}
+
+func (x *StatisticsRequest) ProtoReflect() protoreflect.Message {
+ mi := &file_filer_proto_msgTypes[32]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use StatisticsRequest.ProtoReflect.Descriptor instead.
+func (*StatisticsRequest) Descriptor() ([]byte, []int) {
+ return file_filer_proto_rawDescGZIP(), []int{32}
+}
+
+func (x *StatisticsRequest) GetReplication() string {
+ if x != nil {
+ return x.Replication
+ }
+ return ""
+}
+
+func (x *StatisticsRequest) GetCollection() string {
+ if x != nil {
+ return x.Collection
+ }
+ return ""
+}
+
+func (x *StatisticsRequest) GetTtl() string {
+ if x != nil {
+ return x.Ttl
+ }
+ return ""
+}
+
+func (x *StatisticsRequest) GetDiskType() string {
+ if x != nil {
+ return x.DiskType
+ }
+ return ""
+}
+
+type StatisticsResponse struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ TotalSize uint64 `protobuf:"varint,4,opt,name=total_size,json=totalSize,proto3" json:"total_size,omitempty"`
+ UsedSize uint64 `protobuf:"varint,5,opt,name=used_size,json=usedSize,proto3" json:"used_size,omitempty"`
+ FileCount uint64 `protobuf:"varint,6,opt,name=file_count,json=fileCount,proto3" json:"file_count,omitempty"`
+}
+
+func (x *StatisticsResponse) Reset() {
+ *x = StatisticsResponse{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_filer_proto_msgTypes[33]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *StatisticsResponse) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*StatisticsResponse) ProtoMessage() {}
+
+func (x *StatisticsResponse) ProtoReflect() protoreflect.Message {
+ mi := &file_filer_proto_msgTypes[33]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use StatisticsResponse.ProtoReflect.Descriptor instead.
+func (*StatisticsResponse) Descriptor() ([]byte, []int) {
+ return file_filer_proto_rawDescGZIP(), []int{33}
+}
+
+func (x *StatisticsResponse) GetTotalSize() uint64 {
+ if x != nil {
+ return x.TotalSize
+ }
+ return 0
+}
+
+func (x *StatisticsResponse) GetUsedSize() uint64 {
+ if x != nil {
+ return x.UsedSize
+ }
+ return 0
+}
+
+func (x *StatisticsResponse) GetFileCount() uint64 {
+ if x != nil {
+ return x.FileCount
+ }
+ return 0
+}
+
+type GetFilerConfigurationRequest struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+}
+
+func (x *GetFilerConfigurationRequest) Reset() {
+ *x = GetFilerConfigurationRequest{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_filer_proto_msgTypes[34]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *GetFilerConfigurationRequest) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*GetFilerConfigurationRequest) ProtoMessage() {}
+
+func (x *GetFilerConfigurationRequest) ProtoReflect() protoreflect.Message {
+ mi := &file_filer_proto_msgTypes[34]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use GetFilerConfigurationRequest.ProtoReflect.Descriptor instead.
+func (*GetFilerConfigurationRequest) Descriptor() ([]byte, []int) {
+ return file_filer_proto_rawDescGZIP(), []int{34}
+}
+
+type GetFilerConfigurationResponse struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ Masters []string `protobuf:"bytes,1,rep,name=masters,proto3" json:"masters,omitempty"`
+ Replication string `protobuf:"bytes,2,opt,name=replication,proto3" json:"replication,omitempty"`
+ Collection string `protobuf:"bytes,3,opt,name=collection,proto3" json:"collection,omitempty"`
+ MaxMb uint32 `protobuf:"varint,4,opt,name=max_mb,json=maxMb,proto3" json:"max_mb,omitempty"`
+ DirBuckets string `protobuf:"bytes,5,opt,name=dir_buckets,json=dirBuckets,proto3" json:"dir_buckets,omitempty"`
+ Cipher bool `protobuf:"varint,7,opt,name=cipher,proto3" json:"cipher,omitempty"`
+ Signature int32 `protobuf:"varint,8,opt,name=signature,proto3" json:"signature,omitempty"`
+ MetricsAddress string `protobuf:"bytes,9,opt,name=metrics_address,json=metricsAddress,proto3" json:"metrics_address,omitempty"`
+ MetricsIntervalSec int32 `protobuf:"varint,10,opt,name=metrics_interval_sec,json=metricsIntervalSec,proto3" json:"metrics_interval_sec,omitempty"`
+}
+
+func (x *GetFilerConfigurationResponse) Reset() {
+ *x = GetFilerConfigurationResponse{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_filer_proto_msgTypes[35]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *GetFilerConfigurationResponse) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*GetFilerConfigurationResponse) ProtoMessage() {}
+
+func (x *GetFilerConfigurationResponse) ProtoReflect() protoreflect.Message {
+ mi := &file_filer_proto_msgTypes[35]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use GetFilerConfigurationResponse.ProtoReflect.Descriptor instead.
+func (*GetFilerConfigurationResponse) Descriptor() ([]byte, []int) {
+ return file_filer_proto_rawDescGZIP(), []int{35}
+}
+
+func (x *GetFilerConfigurationResponse) GetMasters() []string {
+ if x != nil {
+ return x.Masters
+ }
+ return nil
+}
+
+func (x *GetFilerConfigurationResponse) GetReplication() string {
+ if x != nil {
+ return x.Replication
+ }
+ return ""
+}
+
+func (x *GetFilerConfigurationResponse) GetCollection() string {
+ if x != nil {
+ return x.Collection
+ }
+ return ""
+}
+
+func (x *GetFilerConfigurationResponse) GetMaxMb() uint32 {
+ if x != nil {
+ return x.MaxMb
+ }
+ return 0
+}
+
+func (x *GetFilerConfigurationResponse) GetDirBuckets() string {
+ if x != nil {
+ return x.DirBuckets
+ }
+ return ""
+}
+
+func (x *GetFilerConfigurationResponse) GetCipher() bool {
+ if x != nil {
+ return x.Cipher
+ }
+ return false
+}
+
+func (x *GetFilerConfigurationResponse) GetSignature() int32 {
+ if x != nil {
+ return x.Signature
+ }
+ return 0
+}
+
+func (x *GetFilerConfigurationResponse) GetMetricsAddress() string {
+ if x != nil {
+ return x.MetricsAddress
+ }
+ return ""
+}
+
+func (x *GetFilerConfigurationResponse) GetMetricsIntervalSec() int32 {
+ if x != nil {
+ return x.MetricsIntervalSec
+ }
+ return 0
+}
+
+type SubscribeMetadataRequest struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ ClientName string `protobuf:"bytes,1,opt,name=client_name,json=clientName,proto3" json:"client_name,omitempty"`
+ PathPrefix string `protobuf:"bytes,2,opt,name=path_prefix,json=pathPrefix,proto3" json:"path_prefix,omitempty"`
+ SinceNs int64 `protobuf:"varint,3,opt,name=since_ns,json=sinceNs,proto3" json:"since_ns,omitempty"`
+ Signature int32 `protobuf:"varint,4,opt,name=signature,proto3" json:"signature,omitempty"`
+}
+
+func (x *SubscribeMetadataRequest) Reset() {
+ *x = SubscribeMetadataRequest{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_filer_proto_msgTypes[36]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *SubscribeMetadataRequest) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*SubscribeMetadataRequest) ProtoMessage() {}
+
+func (x *SubscribeMetadataRequest) ProtoReflect() protoreflect.Message {
+ mi := &file_filer_proto_msgTypes[36]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use SubscribeMetadataRequest.ProtoReflect.Descriptor instead.
+func (*SubscribeMetadataRequest) Descriptor() ([]byte, []int) {
+ return file_filer_proto_rawDescGZIP(), []int{36}
+}
+
+func (x *SubscribeMetadataRequest) GetClientName() string {
+ if x != nil {
+ return x.ClientName
+ }
+ return ""
+}
+
+func (x *SubscribeMetadataRequest) GetPathPrefix() string {
+ if x != nil {
+ return x.PathPrefix
+ }
+ return ""
+}
+
+func (x *SubscribeMetadataRequest) GetSinceNs() int64 {
+ if x != nil {
+ return x.SinceNs
+ }
+ return 0
+}
+
+func (x *SubscribeMetadataRequest) GetSignature() int32 {
+ if x != nil {
+ return x.Signature
+ }
+ return 0
+}
+
+type SubscribeMetadataResponse struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ Directory string `protobuf:"bytes,1,opt,name=directory,proto3" json:"directory,omitempty"`
+ EventNotification *EventNotification `protobuf:"bytes,2,opt,name=event_notification,json=eventNotification,proto3" json:"event_notification,omitempty"`
+ TsNs int64 `protobuf:"varint,3,opt,name=ts_ns,json=tsNs,proto3" json:"ts_ns,omitempty"`
+}
+
+func (x *SubscribeMetadataResponse) Reset() {
+ *x = SubscribeMetadataResponse{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_filer_proto_msgTypes[37]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *SubscribeMetadataResponse) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*SubscribeMetadataResponse) ProtoMessage() {}
+
+func (x *SubscribeMetadataResponse) ProtoReflect() protoreflect.Message {
+ mi := &file_filer_proto_msgTypes[37]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use SubscribeMetadataResponse.ProtoReflect.Descriptor instead.
+func (*SubscribeMetadataResponse) Descriptor() ([]byte, []int) {
+ return file_filer_proto_rawDescGZIP(), []int{37}
+}
+
+func (x *SubscribeMetadataResponse) GetDirectory() string {
+ if x != nil {
+ return x.Directory
+ }
+ return ""
+}
+
+func (x *SubscribeMetadataResponse) GetEventNotification() *EventNotification {
+ if x != nil {
+ return x.EventNotification
+ }
+ return nil
+}
+
+func (x *SubscribeMetadataResponse) GetTsNs() int64 {
+ if x != nil {
+ return x.TsNs
+ }
+ return 0
+}
+
+type LogEntry struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ TsNs int64 `protobuf:"varint,1,opt,name=ts_ns,json=tsNs,proto3" json:"ts_ns,omitempty"`
+ PartitionKeyHash int32 `protobuf:"varint,2,opt,name=partition_key_hash,json=partitionKeyHash,proto3" json:"partition_key_hash,omitempty"`
+ Data []byte `protobuf:"bytes,3,opt,name=data,proto3" json:"data,omitempty"`
+}
+
+func (x *LogEntry) Reset() {
+ *x = LogEntry{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_filer_proto_msgTypes[38]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *LogEntry) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*LogEntry) ProtoMessage() {}
+
+func (x *LogEntry) ProtoReflect() protoreflect.Message {
+ mi := &file_filer_proto_msgTypes[38]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use LogEntry.ProtoReflect.Descriptor instead.
+func (*LogEntry) Descriptor() ([]byte, []int) {
+ return file_filer_proto_rawDescGZIP(), []int{38}
+}
+
+func (x *LogEntry) GetTsNs() int64 {
+ if x != nil {
+ return x.TsNs
+ }
+ return 0
+}
+
+func (x *LogEntry) GetPartitionKeyHash() int32 {
+ if x != nil {
+ return x.PartitionKeyHash
+ }
+ return 0
+}
+
+func (x *LogEntry) GetData() []byte {
+ if x != nil {
+ return x.Data
+ }
+ return nil
+}
+
+type KeepConnectedRequest struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
+ GrpcPort uint32 `protobuf:"varint,2,opt,name=grpc_port,json=grpcPort,proto3" json:"grpc_port,omitempty"`
+ Resources []string `protobuf:"bytes,3,rep,name=resources,proto3" json:"resources,omitempty"`
+}
+
+func (x *KeepConnectedRequest) Reset() {
+ *x = KeepConnectedRequest{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_filer_proto_msgTypes[39]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *KeepConnectedRequest) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*KeepConnectedRequest) ProtoMessage() {}
+
+func (x *KeepConnectedRequest) ProtoReflect() protoreflect.Message {
+ mi := &file_filer_proto_msgTypes[39]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use KeepConnectedRequest.ProtoReflect.Descriptor instead.
+func (*KeepConnectedRequest) Descriptor() ([]byte, []int) {
+ return file_filer_proto_rawDescGZIP(), []int{39}
+}
+
+func (x *KeepConnectedRequest) GetName() string {
+ if x != nil {
+ return x.Name
+ }
+ return ""
+}
+
+func (x *KeepConnectedRequest) GetGrpcPort() uint32 {
+ if x != nil {
+ return x.GrpcPort
+ }
+ return 0
+}
+
+func (x *KeepConnectedRequest) GetResources() []string {
+ if x != nil {
+ return x.Resources
+ }
+ return nil
+}
+
+type KeepConnectedResponse struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+}
+
+func (x *KeepConnectedResponse) Reset() {
+ *x = KeepConnectedResponse{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_filer_proto_msgTypes[40]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *KeepConnectedResponse) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*KeepConnectedResponse) ProtoMessage() {}
+
+func (x *KeepConnectedResponse) ProtoReflect() protoreflect.Message {
+ mi := &file_filer_proto_msgTypes[40]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use KeepConnectedResponse.ProtoReflect.Descriptor instead.
+func (*KeepConnectedResponse) Descriptor() ([]byte, []int) {
+ return file_filer_proto_rawDescGZIP(), []int{40}
+}
+
+type LocateBrokerRequest struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ Resource string `protobuf:"bytes,1,opt,name=resource,proto3" json:"resource,omitempty"`
+}
+
+func (x *LocateBrokerRequest) Reset() {
+ *x = LocateBrokerRequest{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_filer_proto_msgTypes[41]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *LocateBrokerRequest) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*LocateBrokerRequest) ProtoMessage() {}
+
+func (x *LocateBrokerRequest) ProtoReflect() protoreflect.Message {
+ mi := &file_filer_proto_msgTypes[41]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use LocateBrokerRequest.ProtoReflect.Descriptor instead.
+func (*LocateBrokerRequest) Descriptor() ([]byte, []int) {
+ return file_filer_proto_rawDescGZIP(), []int{41}
+}
+
+func (x *LocateBrokerRequest) GetResource() string {
+ if x != nil {
+ return x.Resource
+ }
+ return ""
+}
+
+type LocateBrokerResponse struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ Found bool `protobuf:"varint,1,opt,name=found,proto3" json:"found,omitempty"`
+ Resources []*LocateBrokerResponse_Resource `protobuf:"bytes,2,rep,name=resources,proto3" json:"resources,omitempty"`
+}
+
+func (x *LocateBrokerResponse) Reset() {
+ *x = LocateBrokerResponse{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_filer_proto_msgTypes[42]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *LocateBrokerResponse) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*LocateBrokerResponse) ProtoMessage() {}
+
+func (x *LocateBrokerResponse) ProtoReflect() protoreflect.Message {
+ mi := &file_filer_proto_msgTypes[42]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use LocateBrokerResponse.ProtoReflect.Descriptor instead.
+func (*LocateBrokerResponse) Descriptor() ([]byte, []int) {
+ return file_filer_proto_rawDescGZIP(), []int{42}
+}
+
+func (x *LocateBrokerResponse) GetFound() bool {
+ if x != nil {
+ return x.Found
+ }
+ return false
+}
+
+func (x *LocateBrokerResponse) GetResources() []*LocateBrokerResponse_Resource {
+ if x != nil {
+ return x.Resources
+ }
+ return nil
+}
+
+// Key-Value operations
+type KvGetRequest struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ Key []byte `protobuf:"bytes,1,opt,name=key,proto3" json:"key,omitempty"`
+}
+
+func (x *KvGetRequest) Reset() {
+ *x = KvGetRequest{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_filer_proto_msgTypes[43]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *KvGetRequest) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*KvGetRequest) ProtoMessage() {}
+
+func (x *KvGetRequest) ProtoReflect() protoreflect.Message {
+ mi := &file_filer_proto_msgTypes[43]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use KvGetRequest.ProtoReflect.Descriptor instead.
+func (*KvGetRequest) Descriptor() ([]byte, []int) {
+ return file_filer_proto_rawDescGZIP(), []int{43}
+}
+
+func (x *KvGetRequest) GetKey() []byte {
+ if x != nil {
+ return x.Key
+ }
+ return nil
+}
+
+type KvGetResponse struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ Value []byte `protobuf:"bytes,1,opt,name=value,proto3" json:"value,omitempty"`
+ Error string `protobuf:"bytes,2,opt,name=error,proto3" json:"error,omitempty"`
+}
+
+func (x *KvGetResponse) Reset() {
+ *x = KvGetResponse{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_filer_proto_msgTypes[44]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *KvGetResponse) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*KvGetResponse) ProtoMessage() {}
+
+func (x *KvGetResponse) ProtoReflect() protoreflect.Message {
+ mi := &file_filer_proto_msgTypes[44]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use KvGetResponse.ProtoReflect.Descriptor instead.
+func (*KvGetResponse) Descriptor() ([]byte, []int) {
+ return file_filer_proto_rawDescGZIP(), []int{44}
+}
+
+func (x *KvGetResponse) GetValue() []byte {
+ if x != nil {
+ return x.Value
+ }
+ return nil
+}
+
+func (x *KvGetResponse) GetError() string {
+ if x != nil {
+ return x.Error
+ }
+ return ""
+}
+
+type KvPutRequest struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ Key []byte `protobuf:"bytes,1,opt,name=key,proto3" json:"key,omitempty"`
+ Value []byte `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"`
+}
+
+func (x *KvPutRequest) Reset() {
+ *x = KvPutRequest{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_filer_proto_msgTypes[45]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *KvPutRequest) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*KvPutRequest) ProtoMessage() {}
+
+func (x *KvPutRequest) ProtoReflect() protoreflect.Message {
+ mi := &file_filer_proto_msgTypes[45]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use KvPutRequest.ProtoReflect.Descriptor instead.
+func (*KvPutRequest) Descriptor() ([]byte, []int) {
+ return file_filer_proto_rawDescGZIP(), []int{45}
+}
+
+func (x *KvPutRequest) GetKey() []byte {
+ if x != nil {
+ return x.Key
+ }
+ return nil
+}
+
+func (x *KvPutRequest) GetValue() []byte {
+ if x != nil {
+ return x.Value
+ }
+ return nil
+}
+
+type KvPutResponse struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ Error string `protobuf:"bytes,1,opt,name=error,proto3" json:"error,omitempty"`
+}
+
+func (x *KvPutResponse) Reset() {
+ *x = KvPutResponse{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_filer_proto_msgTypes[46]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *KvPutResponse) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*KvPutResponse) ProtoMessage() {}
+
+func (x *KvPutResponse) ProtoReflect() protoreflect.Message {
+ mi := &file_filer_proto_msgTypes[46]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use KvPutResponse.ProtoReflect.Descriptor instead.
+func (*KvPutResponse) Descriptor() ([]byte, []int) {
+ return file_filer_proto_rawDescGZIP(), []int{46}
+}
+
+func (x *KvPutResponse) GetError() string {
+ if x != nil {
+ return x.Error
+ }
+ return ""
+}
+
+// path-based configurations
+type FilerConf struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ Version int32 `protobuf:"varint,1,opt,name=version,proto3" json:"version,omitempty"`
+ Locations []*FilerConf_PathConf `protobuf:"bytes,2,rep,name=locations,proto3" json:"locations,omitempty"`
+}
+
+func (x *FilerConf) Reset() {
+ *x = FilerConf{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_filer_proto_msgTypes[47]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *FilerConf) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*FilerConf) ProtoMessage() {}
+
+func (x *FilerConf) ProtoReflect() protoreflect.Message {
+ mi := &file_filer_proto_msgTypes[47]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use FilerConf.ProtoReflect.Descriptor instead.
+func (*FilerConf) Descriptor() ([]byte, []int) {
+ return file_filer_proto_rawDescGZIP(), []int{47}
+}
+
+func (x *FilerConf) GetVersion() int32 {
+ if x != nil {
+ return x.Version
+ }
+ return 0
+}
+
+func (x *FilerConf) GetLocations() []*FilerConf_PathConf {
+ if x != nil {
+ return x.Locations
+ }
+ return nil
+}
+
+// if found, send the exact address
+// if not found, send the full list of existing brokers
+type LocateBrokerResponse_Resource struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ GrpcAddresses string `protobuf:"bytes,1,opt,name=grpc_addresses,json=grpcAddresses,proto3" json:"grpc_addresses,omitempty"`
+ ResourceCount int32 `protobuf:"varint,2,opt,name=resource_count,json=resourceCount,proto3" json:"resource_count,omitempty"`
+}
+
+func (x *LocateBrokerResponse_Resource) Reset() {
+ *x = LocateBrokerResponse_Resource{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_filer_proto_msgTypes[50]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *LocateBrokerResponse_Resource) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*LocateBrokerResponse_Resource) ProtoMessage() {}
+
+func (x *LocateBrokerResponse_Resource) ProtoReflect() protoreflect.Message {
+ mi := &file_filer_proto_msgTypes[50]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use LocateBrokerResponse_Resource.ProtoReflect.Descriptor instead.
+func (*LocateBrokerResponse_Resource) Descriptor() ([]byte, []int) {
+ return file_filer_proto_rawDescGZIP(), []int{42, 0}
+}
+
+func (x *LocateBrokerResponse_Resource) GetGrpcAddresses() string {
+ if x != nil {
+ return x.GrpcAddresses
+ }
+ return ""
+}
+
+func (x *LocateBrokerResponse_Resource) GetResourceCount() int32 {
+ if x != nil {
+ return x.ResourceCount
+ }
+ return 0
+}
+
+type FilerConf_PathConf struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ LocationPrefix string `protobuf:"bytes,1,opt,name=location_prefix,json=locationPrefix,proto3" json:"location_prefix,omitempty"`
+ Collection string `protobuf:"bytes,2,opt,name=collection,proto3" json:"collection,omitempty"`
+ Replication string `protobuf:"bytes,3,opt,name=replication,proto3" json:"replication,omitempty"`
+ Ttl string `protobuf:"bytes,4,opt,name=ttl,proto3" json:"ttl,omitempty"`
+ DiskType string `protobuf:"bytes,5,opt,name=disk_type,json=diskType,proto3" json:"disk_type,omitempty"`
+ Fsync bool `protobuf:"varint,6,opt,name=fsync,proto3" json:"fsync,omitempty"`
+ VolumeGrowthCount uint32 `protobuf:"varint,7,opt,name=volume_growth_count,json=volumeGrowthCount,proto3" json:"volume_growth_count,omitempty"`
+}
+
+func (x *FilerConf_PathConf) Reset() {
+ *x = FilerConf_PathConf{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_filer_proto_msgTypes[51]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *FilerConf_PathConf) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*FilerConf_PathConf) ProtoMessage() {}
+
+func (x *FilerConf_PathConf) ProtoReflect() protoreflect.Message {
+ mi := &file_filer_proto_msgTypes[51]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use FilerConf_PathConf.ProtoReflect.Descriptor instead.
+func (*FilerConf_PathConf) Descriptor() ([]byte, []int) {
+ return file_filer_proto_rawDescGZIP(), []int{47, 0}
+}
+
+func (x *FilerConf_PathConf) GetLocationPrefix() string {
+ if x != nil {
+ return x.LocationPrefix
+ }
+ return ""
+}
+
+func (x *FilerConf_PathConf) GetCollection() string {
+ if x != nil {
+ return x.Collection
+ }
+ return ""
+}
+
+func (x *FilerConf_PathConf) GetReplication() string {
+ if x != nil {
+ return x.Replication
+ }
+ return ""
+}
+
+func (x *FilerConf_PathConf) GetTtl() string {
+ if x != nil {
+ return x.Ttl
+ }
+ return ""
+}
+
+func (x *FilerConf_PathConf) GetDiskType() string {
+ if x != nil {
+ return x.DiskType
}
return ""
}
-func (m *GetFilerConfigurationResponse) GetCollection() string {
- if m != nil {
- return m.Collection
+func (x *FilerConf_PathConf) GetFsync() bool {
+ if x != nil {
+ return x.Fsync
}
- return ""
+ return false
}
-func (m *GetFilerConfigurationResponse) GetMaxMb() uint32 {
- if m != nil {
- return m.MaxMb
+func (x *FilerConf_PathConf) GetVolumeGrowthCount() uint32 {
+ if x != nil {
+ return x.VolumeGrowthCount
}
return 0
}
-func init() {
- proto.RegisterType((*LookupDirectoryEntryRequest)(nil), "filer_pb.LookupDirectoryEntryRequest")
- proto.RegisterType((*LookupDirectoryEntryResponse)(nil), "filer_pb.LookupDirectoryEntryResponse")
- proto.RegisterType((*ListEntriesRequest)(nil), "filer_pb.ListEntriesRequest")
- proto.RegisterType((*ListEntriesResponse)(nil), "filer_pb.ListEntriesResponse")
- proto.RegisterType((*Entry)(nil), "filer_pb.Entry")
- proto.RegisterType((*FullEntry)(nil), "filer_pb.FullEntry")
- proto.RegisterType((*EventNotification)(nil), "filer_pb.EventNotification")
- proto.RegisterType((*FileChunk)(nil), "filer_pb.FileChunk")
- proto.RegisterType((*FileId)(nil), "filer_pb.FileId")
- proto.RegisterType((*FuseAttributes)(nil), "filer_pb.FuseAttributes")
- proto.RegisterType((*CreateEntryRequest)(nil), "filer_pb.CreateEntryRequest")
- proto.RegisterType((*CreateEntryResponse)(nil), "filer_pb.CreateEntryResponse")
- proto.RegisterType((*UpdateEntryRequest)(nil), "filer_pb.UpdateEntryRequest")
- proto.RegisterType((*UpdateEntryResponse)(nil), "filer_pb.UpdateEntryResponse")
- proto.RegisterType((*DeleteEntryRequest)(nil), "filer_pb.DeleteEntryRequest")
- proto.RegisterType((*DeleteEntryResponse)(nil), "filer_pb.DeleteEntryResponse")
- proto.RegisterType((*AtomicRenameEntryRequest)(nil), "filer_pb.AtomicRenameEntryRequest")
- proto.RegisterType((*AtomicRenameEntryResponse)(nil), "filer_pb.AtomicRenameEntryResponse")
- proto.RegisterType((*AssignVolumeRequest)(nil), "filer_pb.AssignVolumeRequest")
- proto.RegisterType((*AssignVolumeResponse)(nil), "filer_pb.AssignVolumeResponse")
- proto.RegisterType((*LookupVolumeRequest)(nil), "filer_pb.LookupVolumeRequest")
- proto.RegisterType((*Locations)(nil), "filer_pb.Locations")
- proto.RegisterType((*Location)(nil), "filer_pb.Location")
- proto.RegisterType((*LookupVolumeResponse)(nil), "filer_pb.LookupVolumeResponse")
- proto.RegisterType((*DeleteCollectionRequest)(nil), "filer_pb.DeleteCollectionRequest")
- proto.RegisterType((*DeleteCollectionResponse)(nil), "filer_pb.DeleteCollectionResponse")
- proto.RegisterType((*StatisticsRequest)(nil), "filer_pb.StatisticsRequest")
- proto.RegisterType((*StatisticsResponse)(nil), "filer_pb.StatisticsResponse")
- proto.RegisterType((*GetFilerConfigurationRequest)(nil), "filer_pb.GetFilerConfigurationRequest")
- proto.RegisterType((*GetFilerConfigurationResponse)(nil), "filer_pb.GetFilerConfigurationResponse")
+var File_filer_proto protoreflect.FileDescriptor
+
+var file_filer_proto_rawDesc = []byte{
+ 0x0a, 0x0b, 0x66, 0x69, 0x6c, 0x65, 0x72, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x08, 0x66,
+ 0x69, 0x6c, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x22, 0x4f, 0x0a, 0x1b, 0x4c, 0x6f, 0x6f, 0x6b, 0x75,
+ 0x70, 0x44, 0x69, 0x72, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x79, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52,
+ 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1c, 0x0a, 0x09, 0x64, 0x69, 0x72, 0x65, 0x63, 0x74,
+ 0x6f, 0x72, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x64, 0x69, 0x72, 0x65, 0x63,
+ 0x74, 0x6f, 0x72, 0x79, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01,
+ 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x22, 0x45, 0x0a, 0x1c, 0x4c, 0x6f, 0x6f, 0x6b,
+ 0x75, 0x70, 0x44, 0x69, 0x72, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x79, 0x45, 0x6e, 0x74, 0x72, 0x79,
+ 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x25, 0x0a, 0x05, 0x65, 0x6e, 0x74, 0x72,
+ 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0f, 0x2e, 0x66, 0x69, 0x6c, 0x65, 0x72, 0x5f,
+ 0x70, 0x62, 0x2e, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x05, 0x65, 0x6e, 0x74, 0x72, 0x79, 0x22,
+ 0xbe, 0x01, 0x0a, 0x12, 0x4c, 0x69, 0x73, 0x74, 0x45, 0x6e, 0x74, 0x72, 0x69, 0x65, 0x73, 0x52,
+ 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1c, 0x0a, 0x09, 0x64, 0x69, 0x72, 0x65, 0x63, 0x74,
+ 0x6f, 0x72, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x64, 0x69, 0x72, 0x65, 0x63,
+ 0x74, 0x6f, 0x72, 0x79, 0x12, 0x16, 0x0a, 0x06, 0x70, 0x72, 0x65, 0x66, 0x69, 0x78, 0x18, 0x02,
+ 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x70, 0x72, 0x65, 0x66, 0x69, 0x78, 0x12, 0x2c, 0x0a, 0x11,
+ 0x73, 0x74, 0x61, 0x72, 0x74, 0x46, 0x72, 0x6f, 0x6d, 0x46, 0x69, 0x6c, 0x65, 0x4e, 0x61, 0x6d,
+ 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x11, 0x73, 0x74, 0x61, 0x72, 0x74, 0x46, 0x72,
+ 0x6f, 0x6d, 0x46, 0x69, 0x6c, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x2e, 0x0a, 0x12, 0x69, 0x6e,
+ 0x63, 0x6c, 0x75, 0x73, 0x69, 0x76, 0x65, 0x53, 0x74, 0x61, 0x72, 0x74, 0x46, 0x72, 0x6f, 0x6d,
+ 0x18, 0x04, 0x20, 0x01, 0x28, 0x08, 0x52, 0x12, 0x69, 0x6e, 0x63, 0x6c, 0x75, 0x73, 0x69, 0x76,
+ 0x65, 0x53, 0x74, 0x61, 0x72, 0x74, 0x46, 0x72, 0x6f, 0x6d, 0x12, 0x14, 0x0a, 0x05, 0x6c, 0x69,
+ 0x6d, 0x69, 0x74, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x05, 0x6c, 0x69, 0x6d, 0x69, 0x74,
+ 0x22, 0x3c, 0x0a, 0x13, 0x4c, 0x69, 0x73, 0x74, 0x45, 0x6e, 0x74, 0x72, 0x69, 0x65, 0x73, 0x52,
+ 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x25, 0x0a, 0x05, 0x65, 0x6e, 0x74, 0x72, 0x79,
+ 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0f, 0x2e, 0x66, 0x69, 0x6c, 0x65, 0x72, 0x5f, 0x70,
+ 0x62, 0x2e, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x05, 0x65, 0x6e, 0x74, 0x72, 0x79, 0x22, 0x85,
+ 0x03, 0x0a, 0x05, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65,
+ 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x21, 0x0a, 0x0c,
+ 0x69, 0x73, 0x5f, 0x64, 0x69, 0x72, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x79, 0x18, 0x02, 0x20, 0x01,
+ 0x28, 0x08, 0x52, 0x0b, 0x69, 0x73, 0x44, 0x69, 0x72, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x79, 0x12,
+ 0x2b, 0x0a, 0x06, 0x63, 0x68, 0x75, 0x6e, 0x6b, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32,
+ 0x13, 0x2e, 0x66, 0x69, 0x6c, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x46, 0x69, 0x6c, 0x65, 0x43,
+ 0x68, 0x75, 0x6e, 0x6b, 0x52, 0x06, 0x63, 0x68, 0x75, 0x6e, 0x6b, 0x73, 0x12, 0x38, 0x0a, 0x0a,
+ 0x61, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b,
+ 0x32, 0x18, 0x2e, 0x66, 0x69, 0x6c, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x46, 0x75, 0x73, 0x65,
+ 0x41, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x73, 0x52, 0x0a, 0x61, 0x74, 0x74, 0x72,
+ 0x69, 0x62, 0x75, 0x74, 0x65, 0x73, 0x12, 0x39, 0x0a, 0x08, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x64,
+ 0x65, 0x64, 0x18, 0x05, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x66, 0x69, 0x6c, 0x65, 0x72,
+ 0x5f, 0x70, 0x62, 0x2e, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x2e, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x64,
+ 0x65, 0x64, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x08, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x64, 0x65,
+ 0x64, 0x12, 0x20, 0x0a, 0x0c, 0x68, 0x61, 0x72, 0x64, 0x5f, 0x6c, 0x69, 0x6e, 0x6b, 0x5f, 0x69,
+ 0x64, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x0a, 0x68, 0x61, 0x72, 0x64, 0x4c, 0x69, 0x6e,
+ 0x6b, 0x49, 0x64, 0x12, 0x2a, 0x0a, 0x11, 0x68, 0x61, 0x72, 0x64, 0x5f, 0x6c, 0x69, 0x6e, 0x6b,
+ 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x65, 0x72, 0x18, 0x08, 0x20, 0x01, 0x28, 0x05, 0x52, 0x0f,
+ 0x68, 0x61, 0x72, 0x64, 0x4c, 0x69, 0x6e, 0x6b, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x65, 0x72, 0x12,
+ 0x18, 0x0a, 0x07, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x18, 0x09, 0x20, 0x01, 0x28, 0x0c,
+ 0x52, 0x07, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x1a, 0x3b, 0x0a, 0x0d, 0x45, 0x78, 0x74,
+ 0x65, 0x6e, 0x64, 0x65, 0x64, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65,
+ 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05,
+ 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x05, 0x76, 0x61, 0x6c,
+ 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0x44, 0x0a, 0x09, 0x46, 0x75, 0x6c, 0x6c, 0x45, 0x6e,
+ 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x64, 0x69, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09,
+ 0x52, 0x03, 0x64, 0x69, 0x72, 0x12, 0x25, 0x0a, 0x05, 0x65, 0x6e, 0x74, 0x72, 0x79, 0x18, 0x02,
+ 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0f, 0x2e, 0x66, 0x69, 0x6c, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e,
+ 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x05, 0x65, 0x6e, 0x74, 0x72, 0x79, 0x22, 0x8f, 0x02, 0x0a,
+ 0x11, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69,
+ 0x6f, 0x6e, 0x12, 0x2c, 0x0a, 0x09, 0x6f, 0x6c, 0x64, 0x5f, 0x65, 0x6e, 0x74, 0x72, 0x79, 0x18,
+ 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0f, 0x2e, 0x66, 0x69, 0x6c, 0x65, 0x72, 0x5f, 0x70, 0x62,
+ 0x2e, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x08, 0x6f, 0x6c, 0x64, 0x45, 0x6e, 0x74, 0x72, 0x79,
+ 0x12, 0x2c, 0x0a, 0x09, 0x6e, 0x65, 0x77, 0x5f, 0x65, 0x6e, 0x74, 0x72, 0x79, 0x18, 0x02, 0x20,
+ 0x01, 0x28, 0x0b, 0x32, 0x0f, 0x2e, 0x66, 0x69, 0x6c, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x45,
+ 0x6e, 0x74, 0x72, 0x79, 0x52, 0x08, 0x6e, 0x65, 0x77, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x23,
+ 0x0a, 0x0d, 0x64, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x5f, 0x63, 0x68, 0x75, 0x6e, 0x6b, 0x73, 0x18,
+ 0x03, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0c, 0x64, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x43, 0x68, 0x75,
+ 0x6e, 0x6b, 0x73, 0x12, 0x26, 0x0a, 0x0f, 0x6e, 0x65, 0x77, 0x5f, 0x70, 0x61, 0x72, 0x65, 0x6e,
+ 0x74, 0x5f, 0x70, 0x61, 0x74, 0x68, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0d, 0x6e, 0x65,
+ 0x77, 0x50, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x50, 0x61, 0x74, 0x68, 0x12, 0x31, 0x0a, 0x15, 0x69,
+ 0x73, 0x5f, 0x66, 0x72, 0x6f, 0x6d, 0x5f, 0x6f, 0x74, 0x68, 0x65, 0x72, 0x5f, 0x63, 0x6c, 0x75,
+ 0x73, 0x74, 0x65, 0x72, 0x18, 0x05, 0x20, 0x01, 0x28, 0x08, 0x52, 0x12, 0x69, 0x73, 0x46, 0x72,
+ 0x6f, 0x6d, 0x4f, 0x74, 0x68, 0x65, 0x72, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x12, 0x1e,
+ 0x0a, 0x0a, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x18, 0x06, 0x20, 0x03,
+ 0x28, 0x05, 0x52, 0x0a, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x22, 0xe6,
+ 0x02, 0x0a, 0x09, 0x46, 0x69, 0x6c, 0x65, 0x43, 0x68, 0x75, 0x6e, 0x6b, 0x12, 0x17, 0x0a, 0x07,
+ 0x66, 0x69, 0x6c, 0x65, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x66,
+ 0x69, 0x6c, 0x65, 0x49, 0x64, 0x12, 0x16, 0x0a, 0x06, 0x6f, 0x66, 0x66, 0x73, 0x65, 0x74, 0x18,
+ 0x02, 0x20, 0x01, 0x28, 0x03, 0x52, 0x06, 0x6f, 0x66, 0x66, 0x73, 0x65, 0x74, 0x12, 0x12, 0x0a,
+ 0x04, 0x73, 0x69, 0x7a, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x04, 0x52, 0x04, 0x73, 0x69, 0x7a,
+ 0x65, 0x12, 0x14, 0x0a, 0x05, 0x6d, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x03,
+ 0x52, 0x05, 0x6d, 0x74, 0x69, 0x6d, 0x65, 0x12, 0x13, 0x0a, 0x05, 0x65, 0x5f, 0x74, 0x61, 0x67,
+ 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x65, 0x54, 0x61, 0x67, 0x12, 0x24, 0x0a, 0x0e,
+ 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x66, 0x69, 0x6c, 0x65, 0x5f, 0x69, 0x64, 0x18, 0x06,
+ 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x46, 0x69, 0x6c, 0x65,
+ 0x49, 0x64, 0x12, 0x22, 0x0a, 0x03, 0x66, 0x69, 0x64, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32,
+ 0x10, 0x2e, 0x66, 0x69, 0x6c, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x46, 0x69, 0x6c, 0x65, 0x49,
+ 0x64, 0x52, 0x03, 0x66, 0x69, 0x64, 0x12, 0x2f, 0x0a, 0x0a, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65,
+ 0x5f, 0x66, 0x69, 0x64, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x10, 0x2e, 0x66, 0x69, 0x6c,
+ 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x46, 0x69, 0x6c, 0x65, 0x49, 0x64, 0x52, 0x09, 0x73, 0x6f,
+ 0x75, 0x72, 0x63, 0x65, 0x46, 0x69, 0x64, 0x12, 0x1d, 0x0a, 0x0a, 0x63, 0x69, 0x70, 0x68, 0x65,
+ 0x72, 0x5f, 0x6b, 0x65, 0x79, 0x18, 0x09, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x09, 0x63, 0x69, 0x70,
+ 0x68, 0x65, 0x72, 0x4b, 0x65, 0x79, 0x12, 0x23, 0x0a, 0x0d, 0x69, 0x73, 0x5f, 0x63, 0x6f, 0x6d,
+ 0x70, 0x72, 0x65, 0x73, 0x73, 0x65, 0x64, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0c, 0x69,
+ 0x73, 0x43, 0x6f, 0x6d, 0x70, 0x72, 0x65, 0x73, 0x73, 0x65, 0x64, 0x12, 0x2a, 0x0a, 0x11, 0x69,
+ 0x73, 0x5f, 0x63, 0x68, 0x75, 0x6e, 0x6b, 0x5f, 0x6d, 0x61, 0x6e, 0x69, 0x66, 0x65, 0x73, 0x74,
+ 0x18, 0x0b, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0f, 0x69, 0x73, 0x43, 0x68, 0x75, 0x6e, 0x6b, 0x4d,
+ 0x61, 0x6e, 0x69, 0x66, 0x65, 0x73, 0x74, 0x22, 0x40, 0x0a, 0x11, 0x46, 0x69, 0x6c, 0x65, 0x43,
+ 0x68, 0x75, 0x6e, 0x6b, 0x4d, 0x61, 0x6e, 0x69, 0x66, 0x65, 0x73, 0x74, 0x12, 0x2b, 0x0a, 0x06,
+ 0x63, 0x68, 0x75, 0x6e, 0x6b, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x13, 0x2e, 0x66,
+ 0x69, 0x6c, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x46, 0x69, 0x6c, 0x65, 0x43, 0x68, 0x75, 0x6e,
+ 0x6b, 0x52, 0x06, 0x63, 0x68, 0x75, 0x6e, 0x6b, 0x73, 0x22, 0x58, 0x0a, 0x06, 0x46, 0x69, 0x6c,
+ 0x65, 0x49, 0x64, 0x12, 0x1b, 0x0a, 0x09, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x69, 0x64,
+ 0x18, 0x01, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x08, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x49, 0x64,
+ 0x12, 0x19, 0x0a, 0x08, 0x66, 0x69, 0x6c, 0x65, 0x5f, 0x6b, 0x65, 0x79, 0x18, 0x02, 0x20, 0x01,
+ 0x28, 0x04, 0x52, 0x07, 0x66, 0x69, 0x6c, 0x65, 0x4b, 0x65, 0x79, 0x12, 0x16, 0x0a, 0x06, 0x63,
+ 0x6f, 0x6f, 0x6b, 0x69, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x07, 0x52, 0x06, 0x63, 0x6f, 0x6f,
+ 0x6b, 0x69, 0x65, 0x22, 0x9d, 0x03, 0x0a, 0x0e, 0x46, 0x75, 0x73, 0x65, 0x41, 0x74, 0x74, 0x72,
+ 0x69, 0x62, 0x75, 0x74, 0x65, 0x73, 0x12, 0x1b, 0x0a, 0x09, 0x66, 0x69, 0x6c, 0x65, 0x5f, 0x73,
+ 0x69, 0x7a, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x04, 0x52, 0x08, 0x66, 0x69, 0x6c, 0x65, 0x53,
+ 0x69, 0x7a, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x6d, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01,
+ 0x28, 0x03, 0x52, 0x05, 0x6d, 0x74, 0x69, 0x6d, 0x65, 0x12, 0x1b, 0x0a, 0x09, 0x66, 0x69, 0x6c,
+ 0x65, 0x5f, 0x6d, 0x6f, 0x64, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x08, 0x66, 0x69,
+ 0x6c, 0x65, 0x4d, 0x6f, 0x64, 0x65, 0x12, 0x10, 0x0a, 0x03, 0x75, 0x69, 0x64, 0x18, 0x04, 0x20,
+ 0x01, 0x28, 0x0d, 0x52, 0x03, 0x75, 0x69, 0x64, 0x12, 0x10, 0x0a, 0x03, 0x67, 0x69, 0x64, 0x18,
+ 0x05, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x03, 0x67, 0x69, 0x64, 0x12, 0x16, 0x0a, 0x06, 0x63, 0x72,
+ 0x74, 0x69, 0x6d, 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, 0x03, 0x52, 0x06, 0x63, 0x72, 0x74, 0x69,
+ 0x6d, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x6d, 0x69, 0x6d, 0x65, 0x18, 0x07, 0x20, 0x01, 0x28, 0x09,
+ 0x52, 0x04, 0x6d, 0x69, 0x6d, 0x65, 0x12, 0x20, 0x0a, 0x0b, 0x72, 0x65, 0x70, 0x6c, 0x69, 0x63,
+ 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x08, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x72, 0x65, 0x70,
+ 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x1e, 0x0a, 0x0a, 0x63, 0x6f, 0x6c, 0x6c,
+ 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x09, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x63, 0x6f,
+ 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x17, 0x0a, 0x07, 0x74, 0x74, 0x6c, 0x5f,
+ 0x73, 0x65, 0x63, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x05, 0x52, 0x06, 0x74, 0x74, 0x6c, 0x53, 0x65,
+ 0x63, 0x12, 0x1b, 0x0a, 0x09, 0x75, 0x73, 0x65, 0x72, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x0b,
+ 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x75, 0x73, 0x65, 0x72, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x1d,
+ 0x0a, 0x0a, 0x67, 0x72, 0x6f, 0x75, 0x70, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x0c, 0x20, 0x03,
+ 0x28, 0x09, 0x52, 0x09, 0x67, 0x72, 0x6f, 0x75, 0x70, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x25, 0x0a,
+ 0x0e, 0x73, 0x79, 0x6d, 0x6c, 0x69, 0x6e, 0x6b, 0x5f, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x18,
+ 0x0d, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0d, 0x73, 0x79, 0x6d, 0x6c, 0x69, 0x6e, 0x6b, 0x54, 0x61,
+ 0x72, 0x67, 0x65, 0x74, 0x12, 0x10, 0x0a, 0x03, 0x6d, 0x64, 0x35, 0x18, 0x0e, 0x20, 0x01, 0x28,
+ 0x0c, 0x52, 0x03, 0x6d, 0x64, 0x35, 0x12, 0x1b, 0x0a, 0x09, 0x64, 0x69, 0x73, 0x6b, 0x5f, 0x74,
+ 0x79, 0x70, 0x65, 0x18, 0x0f, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x64, 0x69, 0x73, 0x6b, 0x54,
+ 0x79, 0x70, 0x65, 0x22, 0xc3, 0x01, 0x0a, 0x12, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x45, 0x6e,
+ 0x74, 0x72, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1c, 0x0a, 0x09, 0x64, 0x69,
+ 0x72, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x64,
+ 0x69, 0x72, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x79, 0x12, 0x25, 0x0a, 0x05, 0x65, 0x6e, 0x74, 0x72,
+ 0x79, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0f, 0x2e, 0x66, 0x69, 0x6c, 0x65, 0x72, 0x5f,
+ 0x70, 0x62, 0x2e, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x05, 0x65, 0x6e, 0x74, 0x72, 0x79, 0x12,
+ 0x15, 0x0a, 0x06, 0x6f, 0x5f, 0x65, 0x78, 0x63, 0x6c, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x52,
+ 0x05, 0x6f, 0x45, 0x78, 0x63, 0x6c, 0x12, 0x31, 0x0a, 0x15, 0x69, 0x73, 0x5f, 0x66, 0x72, 0x6f,
+ 0x6d, 0x5f, 0x6f, 0x74, 0x68, 0x65, 0x72, 0x5f, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x18,
+ 0x04, 0x20, 0x01, 0x28, 0x08, 0x52, 0x12, 0x69, 0x73, 0x46, 0x72, 0x6f, 0x6d, 0x4f, 0x74, 0x68,
+ 0x65, 0x72, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x12, 0x1e, 0x0a, 0x0a, 0x73, 0x69, 0x67,
+ 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x18, 0x05, 0x20, 0x03, 0x28, 0x05, 0x52, 0x0a, 0x73,
+ 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x22, 0x2b, 0x0a, 0x13, 0x43, 0x72, 0x65,
+ 0x61, 0x74, 0x65, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65,
+ 0x12, 0x14, 0x0a, 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52,
+ 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x22, 0xac, 0x01, 0x0a, 0x12, 0x55, 0x70, 0x64, 0x61, 0x74,
+ 0x65, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1c, 0x0a,
+ 0x09, 0x64, 0x69, 0x72, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09,
+ 0x52, 0x09, 0x64, 0x69, 0x72, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x79, 0x12, 0x25, 0x0a, 0x05, 0x65,
+ 0x6e, 0x74, 0x72, 0x79, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0f, 0x2e, 0x66, 0x69, 0x6c,
+ 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x05, 0x65, 0x6e, 0x74,
+ 0x72, 0x79, 0x12, 0x31, 0x0a, 0x15, 0x69, 0x73, 0x5f, 0x66, 0x72, 0x6f, 0x6d, 0x5f, 0x6f, 0x74,
+ 0x68, 0x65, 0x72, 0x5f, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x18, 0x03, 0x20, 0x01, 0x28,
+ 0x08, 0x52, 0x12, 0x69, 0x73, 0x46, 0x72, 0x6f, 0x6d, 0x4f, 0x74, 0x68, 0x65, 0x72, 0x43, 0x6c,
+ 0x75, 0x73, 0x74, 0x65, 0x72, 0x12, 0x1e, 0x0a, 0x0a, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75,
+ 0x72, 0x65, 0x73, 0x18, 0x04, 0x20, 0x03, 0x28, 0x05, 0x52, 0x0a, 0x73, 0x69, 0x67, 0x6e, 0x61,
+ 0x74, 0x75, 0x72, 0x65, 0x73, 0x22, 0x15, 0x0a, 0x13, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x45,
+ 0x6e, 0x74, 0x72, 0x79, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x80, 0x01, 0x0a,
+ 0x14, 0x41, 0x70, 0x70, 0x65, 0x6e, 0x64, 0x54, 0x6f, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x65,
+ 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1c, 0x0a, 0x09, 0x64, 0x69, 0x72, 0x65, 0x63, 0x74, 0x6f,
+ 0x72, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x64, 0x69, 0x72, 0x65, 0x63, 0x74,
+ 0x6f, 0x72, 0x79, 0x12, 0x1d, 0x0a, 0x0a, 0x65, 0x6e, 0x74, 0x72, 0x79, 0x5f, 0x6e, 0x61, 0x6d,
+ 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x65, 0x6e, 0x74, 0x72, 0x79, 0x4e, 0x61,
+ 0x6d, 0x65, 0x12, 0x2b, 0x0a, 0x06, 0x63, 0x68, 0x75, 0x6e, 0x6b, 0x73, 0x18, 0x03, 0x20, 0x03,
+ 0x28, 0x0b, 0x32, 0x13, 0x2e, 0x66, 0x69, 0x6c, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x46, 0x69,
+ 0x6c, 0x65, 0x43, 0x68, 0x75, 0x6e, 0x6b, 0x52, 0x06, 0x63, 0x68, 0x75, 0x6e, 0x6b, 0x73, 0x22,
+ 0x17, 0x0a, 0x15, 0x41, 0x70, 0x70, 0x65, 0x6e, 0x64, 0x54, 0x6f, 0x45, 0x6e, 0x74, 0x72, 0x79,
+ 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x98, 0x02, 0x0a, 0x12, 0x44, 0x65, 0x6c,
+ 0x65, 0x74, 0x65, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12,
+ 0x1c, 0x0a, 0x09, 0x64, 0x69, 0x72, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x79, 0x18, 0x01, 0x20, 0x01,
+ 0x28, 0x09, 0x52, 0x09, 0x64, 0x69, 0x72, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x79, 0x12, 0x12, 0x0a,
+ 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d,
+ 0x65, 0x12, 0x24, 0x0a, 0x0e, 0x69, 0x73, 0x5f, 0x64, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x5f, 0x64,
+ 0x61, 0x74, 0x61, 0x18, 0x04, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0c, 0x69, 0x73, 0x44, 0x65, 0x6c,
+ 0x65, 0x74, 0x65, 0x44, 0x61, 0x74, 0x61, 0x12, 0x21, 0x0a, 0x0c, 0x69, 0x73, 0x5f, 0x72, 0x65,
+ 0x63, 0x75, 0x72, 0x73, 0x69, 0x76, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0b, 0x69,
+ 0x73, 0x52, 0x65, 0x63, 0x75, 0x72, 0x73, 0x69, 0x76, 0x65, 0x12, 0x34, 0x0a, 0x16, 0x69, 0x67,
+ 0x6e, 0x6f, 0x72, 0x65, 0x5f, 0x72, 0x65, 0x63, 0x75, 0x72, 0x73, 0x69, 0x76, 0x65, 0x5f, 0x65,
+ 0x72, 0x72, 0x6f, 0x72, 0x18, 0x06, 0x20, 0x01, 0x28, 0x08, 0x52, 0x14, 0x69, 0x67, 0x6e, 0x6f,
+ 0x72, 0x65, 0x52, 0x65, 0x63, 0x75, 0x72, 0x73, 0x69, 0x76, 0x65, 0x45, 0x72, 0x72, 0x6f, 0x72,
+ 0x12, 0x31, 0x0a, 0x15, 0x69, 0x73, 0x5f, 0x66, 0x72, 0x6f, 0x6d, 0x5f, 0x6f, 0x74, 0x68, 0x65,
+ 0x72, 0x5f, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x18, 0x07, 0x20, 0x01, 0x28, 0x08, 0x52,
+ 0x12, 0x69, 0x73, 0x46, 0x72, 0x6f, 0x6d, 0x4f, 0x74, 0x68, 0x65, 0x72, 0x43, 0x6c, 0x75, 0x73,
+ 0x74, 0x65, 0x72, 0x12, 0x1e, 0x0a, 0x0a, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65,
+ 0x73, 0x18, 0x08, 0x20, 0x03, 0x28, 0x05, 0x52, 0x0a, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75,
+ 0x72, 0x65, 0x73, 0x22, 0x2b, 0x0a, 0x13, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x45, 0x6e, 0x74,
+ 0x72, 0x79, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x65, 0x72,
+ 0x72, 0x6f, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72,
+ 0x22, 0x9a, 0x01, 0x0a, 0x18, 0x41, 0x74, 0x6f, 0x6d, 0x69, 0x63, 0x52, 0x65, 0x6e, 0x61, 0x6d,
+ 0x65, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x23, 0x0a,
+ 0x0d, 0x6f, 0x6c, 0x64, 0x5f, 0x64, 0x69, 0x72, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x79, 0x18, 0x01,
+ 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x6f, 0x6c, 0x64, 0x44, 0x69, 0x72, 0x65, 0x63, 0x74, 0x6f,
+ 0x72, 0x79, 0x12, 0x19, 0x0a, 0x08, 0x6f, 0x6c, 0x64, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x02,
+ 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x6f, 0x6c, 0x64, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x23, 0x0a,
+ 0x0d, 0x6e, 0x65, 0x77, 0x5f, 0x64, 0x69, 0x72, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x79, 0x18, 0x03,
+ 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x6e, 0x65, 0x77, 0x44, 0x69, 0x72, 0x65, 0x63, 0x74, 0x6f,
+ 0x72, 0x79, 0x12, 0x19, 0x0a, 0x08, 0x6e, 0x65, 0x77, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x04,
+ 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x6e, 0x65, 0x77, 0x4e, 0x61, 0x6d, 0x65, 0x22, 0x1b, 0x0a,
+ 0x19, 0x41, 0x74, 0x6f, 0x6d, 0x69, 0x63, 0x52, 0x65, 0x6e, 0x61, 0x6d, 0x65, 0x45, 0x6e, 0x74,
+ 0x72, 0x79, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0xec, 0x01, 0x0a, 0x13, 0x41,
+ 0x73, 0x73, 0x69, 0x67, 0x6e, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65,
+ 0x73, 0x74, 0x12, 0x14, 0x0a, 0x05, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28,
+ 0x05, 0x52, 0x05, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x12, 0x1e, 0x0a, 0x0a, 0x63, 0x6f, 0x6c, 0x6c,
+ 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x63, 0x6f,
+ 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x20, 0x0a, 0x0b, 0x72, 0x65, 0x70, 0x6c,
+ 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x72,
+ 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x17, 0x0a, 0x07, 0x74, 0x74,
+ 0x6c, 0x5f, 0x73, 0x65, 0x63, 0x18, 0x04, 0x20, 0x01, 0x28, 0x05, 0x52, 0x06, 0x74, 0x74, 0x6c,
+ 0x53, 0x65, 0x63, 0x12, 0x1f, 0x0a, 0x0b, 0x64, 0x61, 0x74, 0x61, 0x5f, 0x63, 0x65, 0x6e, 0x74,
+ 0x65, 0x72, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x64, 0x61, 0x74, 0x61, 0x43, 0x65,
+ 0x6e, 0x74, 0x65, 0x72, 0x12, 0x12, 0x0a, 0x04, 0x70, 0x61, 0x74, 0x68, 0x18, 0x06, 0x20, 0x01,
+ 0x28, 0x09, 0x52, 0x04, 0x70, 0x61, 0x74, 0x68, 0x12, 0x12, 0x0a, 0x04, 0x72, 0x61, 0x63, 0x6b,
+ 0x18, 0x07, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x72, 0x61, 0x63, 0x6b, 0x12, 0x1b, 0x0a, 0x09,
+ 0x64, 0x69, 0x73, 0x6b, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x08, 0x20, 0x01, 0x28, 0x09, 0x52,
+ 0x08, 0x64, 0x69, 0x73, 0x6b, 0x54, 0x79, 0x70, 0x65, 0x22, 0xe2, 0x01, 0x0a, 0x14, 0x41, 0x73,
+ 0x73, 0x69, 0x67, 0x6e, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e,
+ 0x73, 0x65, 0x12, 0x17, 0x0a, 0x07, 0x66, 0x69, 0x6c, 0x65, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20,
+ 0x01, 0x28, 0x09, 0x52, 0x06, 0x66, 0x69, 0x6c, 0x65, 0x49, 0x64, 0x12, 0x10, 0x0a, 0x03, 0x75,
+ 0x72, 0x6c, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x75, 0x72, 0x6c, 0x12, 0x1d, 0x0a,
+ 0x0a, 0x70, 0x75, 0x62, 0x6c, 0x69, 0x63, 0x5f, 0x75, 0x72, 0x6c, 0x18, 0x03, 0x20, 0x01, 0x28,
+ 0x09, 0x52, 0x09, 0x70, 0x75, 0x62, 0x6c, 0x69, 0x63, 0x55, 0x72, 0x6c, 0x12, 0x14, 0x0a, 0x05,
+ 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x05, 0x52, 0x05, 0x63, 0x6f, 0x75,
+ 0x6e, 0x74, 0x12, 0x12, 0x0a, 0x04, 0x61, 0x75, 0x74, 0x68, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09,
+ 0x52, 0x04, 0x61, 0x75, 0x74, 0x68, 0x12, 0x1e, 0x0a, 0x0a, 0x63, 0x6f, 0x6c, 0x6c, 0x65, 0x63,
+ 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x63, 0x6f, 0x6c, 0x6c,
+ 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x20, 0x0a, 0x0b, 0x72, 0x65, 0x70, 0x6c, 0x69, 0x63,
+ 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x07, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x72, 0x65, 0x70,
+ 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x14, 0x0a, 0x05, 0x65, 0x72, 0x72, 0x6f,
+ 0x72, 0x18, 0x08, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x22, 0x34,
+ 0x0a, 0x13, 0x4c, 0x6f, 0x6f, 0x6b, 0x75, 0x70, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x52, 0x65,
+ 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1d, 0x0a, 0x0a, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f,
+ 0x69, 0x64, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x09, 0x52, 0x09, 0x76, 0x6f, 0x6c, 0x75, 0x6d,
+ 0x65, 0x49, 0x64, 0x73, 0x22, 0x3d, 0x0a, 0x09, 0x4c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e,
+ 0x73, 0x12, 0x30, 0x0a, 0x09, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x01,
+ 0x20, 0x03, 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x66, 0x69, 0x6c, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e,
+ 0x4c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x09, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69,
+ 0x6f, 0x6e, 0x73, 0x22, 0x3b, 0x0a, 0x08, 0x4c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12,
+ 0x10, 0x0a, 0x03, 0x75, 0x72, 0x6c, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x75, 0x72,
+ 0x6c, 0x12, 0x1d, 0x0a, 0x0a, 0x70, 0x75, 0x62, 0x6c, 0x69, 0x63, 0x5f, 0x75, 0x72, 0x6c, 0x18,
+ 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x70, 0x75, 0x62, 0x6c, 0x69, 0x63, 0x55, 0x72, 0x6c,
+ 0x22, 0xc3, 0x01, 0x0a, 0x14, 0x4c, 0x6f, 0x6f, 0x6b, 0x75, 0x70, 0x56, 0x6f, 0x6c, 0x75, 0x6d,
+ 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x55, 0x0a, 0x0d, 0x6c, 0x6f, 0x63,
+ 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x5f, 0x6d, 0x61, 0x70, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b,
+ 0x32, 0x30, 0x2e, 0x66, 0x69, 0x6c, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x4c, 0x6f, 0x6f, 0x6b,
+ 0x75, 0x70, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65,
+ 0x2e, 0x4c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x4d, 0x61, 0x70, 0x45, 0x6e, 0x74,
+ 0x72, 0x79, 0x52, 0x0c, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x4d, 0x61, 0x70,
+ 0x1a, 0x54, 0x0a, 0x11, 0x4c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x4d, 0x61, 0x70,
+ 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01,
+ 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x29, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65,
+ 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x13, 0x2e, 0x66, 0x69, 0x6c, 0x65, 0x72, 0x5f, 0x70,
+ 0x62, 0x2e, 0x4c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x05, 0x76, 0x61, 0x6c,
+ 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0x20, 0x0a, 0x0a, 0x43, 0x6f, 0x6c, 0x6c, 0x65, 0x63,
+ 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01,
+ 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x22, 0x7b, 0x0a, 0x15, 0x43, 0x6f, 0x6c, 0x6c,
+ 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x4c, 0x69, 0x73, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73,
+ 0x74, 0x12, 0x34, 0x0a, 0x16, 0x69, 0x6e, 0x63, 0x6c, 0x75, 0x64, 0x65, 0x5f, 0x6e, 0x6f, 0x72,
+ 0x6d, 0x61, 0x6c, 0x5f, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28,
+ 0x08, 0x52, 0x14, 0x69, 0x6e, 0x63, 0x6c, 0x75, 0x64, 0x65, 0x4e, 0x6f, 0x72, 0x6d, 0x61, 0x6c,
+ 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x73, 0x12, 0x2c, 0x0a, 0x12, 0x69, 0x6e, 0x63, 0x6c, 0x75,
+ 0x64, 0x65, 0x5f, 0x65, 0x63, 0x5f, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x73, 0x18, 0x02, 0x20,
+ 0x01, 0x28, 0x08, 0x52, 0x10, 0x69, 0x6e, 0x63, 0x6c, 0x75, 0x64, 0x65, 0x45, 0x63, 0x56, 0x6f,
+ 0x6c, 0x75, 0x6d, 0x65, 0x73, 0x22, 0x50, 0x0a, 0x16, 0x43, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74,
+ 0x69, 0x6f, 0x6e, 0x4c, 0x69, 0x73, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12,
+ 0x36, 0x0a, 0x0b, 0x63, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x01,
+ 0x20, 0x03, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x66, 0x69, 0x6c, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e,
+ 0x43, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0b, 0x63, 0x6f, 0x6c, 0x6c,
+ 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x22, 0x39, 0x0a, 0x17, 0x44, 0x65, 0x6c, 0x65, 0x74,
+ 0x65, 0x43, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65,
+ 0x73, 0x74, 0x12, 0x1e, 0x0a, 0x0a, 0x63, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e,
+ 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x63, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69,
+ 0x6f, 0x6e, 0x22, 0x1a, 0x0a, 0x18, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x43, 0x6f, 0x6c, 0x6c,
+ 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x84,
+ 0x01, 0x0a, 0x11, 0x53, 0x74, 0x61, 0x74, 0x69, 0x73, 0x74, 0x69, 0x63, 0x73, 0x52, 0x65, 0x71,
+ 0x75, 0x65, 0x73, 0x74, 0x12, 0x20, 0x0a, 0x0b, 0x72, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74,
+ 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x72, 0x65, 0x70, 0x6c, 0x69,
+ 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x1e, 0x0a, 0x0a, 0x63, 0x6f, 0x6c, 0x6c, 0x65, 0x63,
+ 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x63, 0x6f, 0x6c, 0x6c,
+ 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x10, 0x0a, 0x03, 0x74, 0x74, 0x6c, 0x18, 0x03, 0x20,
+ 0x01, 0x28, 0x09, 0x52, 0x03, 0x74, 0x74, 0x6c, 0x12, 0x1b, 0x0a, 0x09, 0x64, 0x69, 0x73, 0x6b,
+ 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x64, 0x69, 0x73,
+ 0x6b, 0x54, 0x79, 0x70, 0x65, 0x22, 0x6f, 0x0a, 0x12, 0x53, 0x74, 0x61, 0x74, 0x69, 0x73, 0x74,
+ 0x69, 0x63, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x1d, 0x0a, 0x0a, 0x74,
+ 0x6f, 0x74, 0x61, 0x6c, 0x5f, 0x73, 0x69, 0x7a, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x04, 0x52,
+ 0x09, 0x74, 0x6f, 0x74, 0x61, 0x6c, 0x53, 0x69, 0x7a, 0x65, 0x12, 0x1b, 0x0a, 0x09, 0x75, 0x73,
+ 0x65, 0x64, 0x5f, 0x73, 0x69, 0x7a, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x04, 0x52, 0x08, 0x75,
+ 0x73, 0x65, 0x64, 0x53, 0x69, 0x7a, 0x65, 0x12, 0x1d, 0x0a, 0x0a, 0x66, 0x69, 0x6c, 0x65, 0x5f,
+ 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x06, 0x20, 0x01, 0x28, 0x04, 0x52, 0x09, 0x66, 0x69, 0x6c,
+ 0x65, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x22, 0x1e, 0x0a, 0x1c, 0x47, 0x65, 0x74, 0x46, 0x69, 0x6c,
+ 0x65, 0x72, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52,
+ 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x22, 0xc4, 0x02, 0x0a, 0x1d, 0x47, 0x65, 0x74, 0x46, 0x69,
+ 0x6c, 0x65, 0x72, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e,
+ 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x6d, 0x61, 0x73, 0x74,
+ 0x65, 0x72, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x09, 0x52, 0x07, 0x6d, 0x61, 0x73, 0x74, 0x65,
+ 0x72, 0x73, 0x12, 0x20, 0x0a, 0x0b, 0x72, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f,
+ 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x72, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61,
+ 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x1e, 0x0a, 0x0a, 0x63, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69,
+ 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x63, 0x6f, 0x6c, 0x6c, 0x65, 0x63,
+ 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x15, 0x0a, 0x06, 0x6d, 0x61, 0x78, 0x5f, 0x6d, 0x62, 0x18, 0x04,
+ 0x20, 0x01, 0x28, 0x0d, 0x52, 0x05, 0x6d, 0x61, 0x78, 0x4d, 0x62, 0x12, 0x1f, 0x0a, 0x0b, 0x64,
+ 0x69, 0x72, 0x5f, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x73, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09,
+ 0x52, 0x0a, 0x64, 0x69, 0x72, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x73, 0x12, 0x16, 0x0a, 0x06,
+ 0x63, 0x69, 0x70, 0x68, 0x65, 0x72, 0x18, 0x07, 0x20, 0x01, 0x28, 0x08, 0x52, 0x06, 0x63, 0x69,
+ 0x70, 0x68, 0x65, 0x72, 0x12, 0x1c, 0x0a, 0x09, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72,
+ 0x65, 0x18, 0x08, 0x20, 0x01, 0x28, 0x05, 0x52, 0x09, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75,
+ 0x72, 0x65, 0x12, 0x27, 0x0a, 0x0f, 0x6d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, 0x5f, 0x61, 0x64,
+ 0x64, 0x72, 0x65, 0x73, 0x73, 0x18, 0x09, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0e, 0x6d, 0x65, 0x74,
+ 0x72, 0x69, 0x63, 0x73, 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x12, 0x30, 0x0a, 0x14, 0x6d,
+ 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, 0x5f, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x76, 0x61, 0x6c, 0x5f,
+ 0x73, 0x65, 0x63, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x05, 0x52, 0x12, 0x6d, 0x65, 0x74, 0x72, 0x69,
+ 0x63, 0x73, 0x49, 0x6e, 0x74, 0x65, 0x72, 0x76, 0x61, 0x6c, 0x53, 0x65, 0x63, 0x22, 0x95, 0x01,
+ 0x0a, 0x18, 0x53, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x62, 0x65, 0x4d, 0x65, 0x74, 0x61, 0x64,
+ 0x61, 0x74, 0x61, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1f, 0x0a, 0x0b, 0x63, 0x6c,
+ 0x69, 0x65, 0x6e, 0x74, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52,
+ 0x0a, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x1f, 0x0a, 0x0b, 0x70,
+ 0x61, 0x74, 0x68, 0x5f, 0x70, 0x72, 0x65, 0x66, 0x69, 0x78, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09,
+ 0x52, 0x0a, 0x70, 0x61, 0x74, 0x68, 0x50, 0x72, 0x65, 0x66, 0x69, 0x78, 0x12, 0x19, 0x0a, 0x08,
+ 0x73, 0x69, 0x6e, 0x63, 0x65, 0x5f, 0x6e, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x03, 0x52, 0x07,
+ 0x73, 0x69, 0x6e, 0x63, 0x65, 0x4e, 0x73, 0x12, 0x1c, 0x0a, 0x09, 0x73, 0x69, 0x67, 0x6e, 0x61,
+ 0x74, 0x75, 0x72, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x05, 0x52, 0x09, 0x73, 0x69, 0x67, 0x6e,
+ 0x61, 0x74, 0x75, 0x72, 0x65, 0x22, 0x9a, 0x01, 0x0a, 0x19, 0x53, 0x75, 0x62, 0x73, 0x63, 0x72,
+ 0x69, 0x62, 0x65, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x52, 0x65, 0x73, 0x70, 0x6f,
+ 0x6e, 0x73, 0x65, 0x12, 0x1c, 0x0a, 0x09, 0x64, 0x69, 0x72, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x79,
+ 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x64, 0x69, 0x72, 0x65, 0x63, 0x74, 0x6f, 0x72,
+ 0x79, 0x12, 0x4a, 0x0a, 0x12, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x5f, 0x6e, 0x6f, 0x74, 0x69, 0x66,
+ 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e,
+ 0x66, 0x69, 0x6c, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x4e, 0x6f,
+ 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x11, 0x65, 0x76, 0x65, 0x6e,
+ 0x74, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x13, 0x0a,
+ 0x05, 0x74, 0x73, 0x5f, 0x6e, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x03, 0x52, 0x04, 0x74, 0x73,
+ 0x4e, 0x73, 0x22, 0x61, 0x0a, 0x08, 0x4c, 0x6f, 0x67, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x13,
+ 0x0a, 0x05, 0x74, 0x73, 0x5f, 0x6e, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, 0x52, 0x04, 0x74,
+ 0x73, 0x4e, 0x73, 0x12, 0x2c, 0x0a, 0x12, 0x70, 0x61, 0x72, 0x74, 0x69, 0x74, 0x69, 0x6f, 0x6e,
+ 0x5f, 0x6b, 0x65, 0x79, 0x5f, 0x68, 0x61, 0x73, 0x68, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x52,
+ 0x10, 0x70, 0x61, 0x72, 0x74, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x4b, 0x65, 0x79, 0x48, 0x61, 0x73,
+ 0x68, 0x12, 0x12, 0x0a, 0x04, 0x64, 0x61, 0x74, 0x61, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0c, 0x52,
+ 0x04, 0x64, 0x61, 0x74, 0x61, 0x22, 0x65, 0x0a, 0x14, 0x4b, 0x65, 0x65, 0x70, 0x43, 0x6f, 0x6e,
+ 0x6e, 0x65, 0x63, 0x74, 0x65, 0x64, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x12, 0x0a,
+ 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d,
+ 0x65, 0x12, 0x1b, 0x0a, 0x09, 0x67, 0x72, 0x70, 0x63, 0x5f, 0x70, 0x6f, 0x72, 0x74, 0x18, 0x02,
+ 0x20, 0x01, 0x28, 0x0d, 0x52, 0x08, 0x67, 0x72, 0x70, 0x63, 0x50, 0x6f, 0x72, 0x74, 0x12, 0x1c,
+ 0x0a, 0x09, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28,
+ 0x09, 0x52, 0x09, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x73, 0x22, 0x17, 0x0a, 0x15,
+ 0x4b, 0x65, 0x65, 0x70, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x65, 0x64, 0x52, 0x65, 0x73,
+ 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x31, 0x0a, 0x13, 0x4c, 0x6f, 0x63, 0x61, 0x74, 0x65, 0x42,
+ 0x72, 0x6f, 0x6b, 0x65, 0x72, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1a, 0x0a, 0x08,
+ 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08,
+ 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x22, 0xcd, 0x01, 0x0a, 0x14, 0x4c, 0x6f, 0x63,
+ 0x61, 0x74, 0x65, 0x42, 0x72, 0x6f, 0x6b, 0x65, 0x72, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73,
+ 0x65, 0x12, 0x14, 0x0a, 0x05, 0x66, 0x6f, 0x75, 0x6e, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08,
+ 0x52, 0x05, 0x66, 0x6f, 0x75, 0x6e, 0x64, 0x12, 0x45, 0x0a, 0x09, 0x72, 0x65, 0x73, 0x6f, 0x75,
+ 0x72, 0x63, 0x65, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x27, 0x2e, 0x66, 0x69, 0x6c,
+ 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x4c, 0x6f, 0x63, 0x61, 0x74, 0x65, 0x42, 0x72, 0x6f, 0x6b,
+ 0x65, 0x72, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x52, 0x65, 0x73, 0x6f, 0x75,
+ 0x72, 0x63, 0x65, 0x52, 0x09, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x73, 0x1a, 0x58,
+ 0x0a, 0x08, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x12, 0x25, 0x0a, 0x0e, 0x67, 0x72,
+ 0x70, 0x63, 0x5f, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x65, 0x73, 0x18, 0x01, 0x20, 0x01,
+ 0x28, 0x09, 0x52, 0x0d, 0x67, 0x72, 0x70, 0x63, 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x65,
+ 0x73, 0x12, 0x25, 0x0a, 0x0e, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x63, 0x6f,
+ 0x75, 0x6e, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, 0x0d, 0x72, 0x65, 0x73, 0x6f, 0x75,
+ 0x72, 0x63, 0x65, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x22, 0x20, 0x0a, 0x0c, 0x4b, 0x76, 0x47, 0x65,
+ 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18,
+ 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x22, 0x3b, 0x0a, 0x0d, 0x4b, 0x76,
+ 0x47, 0x65, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x76,
+ 0x61, 0x6c, 0x75, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75,
+ 0x65, 0x12, 0x14, 0x0a, 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09,
+ 0x52, 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x22, 0x36, 0x0a, 0x0c, 0x4b, 0x76, 0x50, 0x75, 0x74,
+ 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01,
+ 0x20, 0x01, 0x28, 0x0c, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c,
+ 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x22,
+ 0x25, 0x0a, 0x0d, 0x4b, 0x76, 0x50, 0x75, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65,
+ 0x12, 0x14, 0x0a, 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52,
+ 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x22, 0xce, 0x02, 0x0a, 0x09, 0x46, 0x69, 0x6c, 0x65, 0x72,
+ 0x43, 0x6f, 0x6e, 0x66, 0x12, 0x18, 0x0a, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18,
+ 0x01, 0x20, 0x01, 0x28, 0x05, 0x52, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x3a,
+ 0x0a, 0x09, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28,
+ 0x0b, 0x32, 0x1c, 0x2e, 0x66, 0x69, 0x6c, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x46, 0x69, 0x6c,
+ 0x65, 0x72, 0x43, 0x6f, 0x6e, 0x66, 0x2e, 0x50, 0x61, 0x74, 0x68, 0x43, 0x6f, 0x6e, 0x66, 0x52,
+ 0x09, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x1a, 0xea, 0x01, 0x0a, 0x08, 0x50,
+ 0x61, 0x74, 0x68, 0x43, 0x6f, 0x6e, 0x66, 0x12, 0x27, 0x0a, 0x0f, 0x6c, 0x6f, 0x63, 0x61, 0x74,
+ 0x69, 0x6f, 0x6e, 0x5f, 0x70, 0x72, 0x65, 0x66, 0x69, 0x78, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09,
+ 0x52, 0x0e, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x72, 0x65, 0x66, 0x69, 0x78,
+ 0x12, 0x1e, 0x0a, 0x0a, 0x63, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x02,
+ 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x63, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e,
+ 0x12, 0x20, 0x0a, 0x0b, 0x72, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18,
+ 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x72, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69,
+ 0x6f, 0x6e, 0x12, 0x10, 0x0a, 0x03, 0x74, 0x74, 0x6c, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52,
+ 0x03, 0x74, 0x74, 0x6c, 0x12, 0x1b, 0x0a, 0x09, 0x64, 0x69, 0x73, 0x6b, 0x5f, 0x74, 0x79, 0x70,
+ 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x64, 0x69, 0x73, 0x6b, 0x54, 0x79, 0x70,
+ 0x65, 0x12, 0x14, 0x0a, 0x05, 0x66, 0x73, 0x79, 0x6e, 0x63, 0x18, 0x06, 0x20, 0x01, 0x28, 0x08,
+ 0x52, 0x05, 0x66, 0x73, 0x79, 0x6e, 0x63, 0x12, 0x2e, 0x0a, 0x13, 0x76, 0x6f, 0x6c, 0x75, 0x6d,
+ 0x65, 0x5f, 0x67, 0x72, 0x6f, 0x77, 0x74, 0x68, 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x07,
+ 0x20, 0x01, 0x28, 0x0d, 0x52, 0x11, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x47, 0x72, 0x6f, 0x77,
+ 0x74, 0x68, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x32, 0xdc, 0x0c, 0x0a, 0x0c, 0x53, 0x65, 0x61, 0x77,
+ 0x65, 0x65, 0x64, 0x46, 0x69, 0x6c, 0x65, 0x72, 0x12, 0x67, 0x0a, 0x14, 0x4c, 0x6f, 0x6f, 0x6b,
+ 0x75, 0x70, 0x44, 0x69, 0x72, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x79, 0x45, 0x6e, 0x74, 0x72, 0x79,
+ 0x12, 0x25, 0x2e, 0x66, 0x69, 0x6c, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x4c, 0x6f, 0x6f, 0x6b,
+ 0x75, 0x70, 0x44, 0x69, 0x72, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x79, 0x45, 0x6e, 0x74, 0x72, 0x79,
+ 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x26, 0x2e, 0x66, 0x69, 0x6c, 0x65, 0x72, 0x5f,
+ 0x70, 0x62, 0x2e, 0x4c, 0x6f, 0x6f, 0x6b, 0x75, 0x70, 0x44, 0x69, 0x72, 0x65, 0x63, 0x74, 0x6f,
+ 0x72, 0x79, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22,
+ 0x00, 0x12, 0x4e, 0x0a, 0x0b, 0x4c, 0x69, 0x73, 0x74, 0x45, 0x6e, 0x74, 0x72, 0x69, 0x65, 0x73,
+ 0x12, 0x1c, 0x2e, 0x66, 0x69, 0x6c, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x4c, 0x69, 0x73, 0x74,
+ 0x45, 0x6e, 0x74, 0x72, 0x69, 0x65, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1d,
+ 0x2e, 0x66, 0x69, 0x6c, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x45, 0x6e,
+ 0x74, 0x72, 0x69, 0x65, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x30,
+ 0x01, 0x12, 0x4c, 0x0a, 0x0b, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x45, 0x6e, 0x74, 0x72, 0x79,
+ 0x12, 0x1c, 0x2e, 0x66, 0x69, 0x6c, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x43, 0x72, 0x65, 0x61,
+ 0x74, 0x65, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1d,
+ 0x2e, 0x66, 0x69, 0x6c, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65,
+ 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12,
+ 0x4c, 0x0a, 0x0b, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x1c,
+ 0x2e, 0x66, 0x69, 0x6c, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65,
+ 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1d, 0x2e, 0x66,
+ 0x69, 0x6c, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x45, 0x6e,
+ 0x74, 0x72, 0x79, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x52, 0x0a,
+ 0x0d, 0x41, 0x70, 0x70, 0x65, 0x6e, 0x64, 0x54, 0x6f, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x1e,
+ 0x2e, 0x66, 0x69, 0x6c, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x41, 0x70, 0x70, 0x65, 0x6e, 0x64,
+ 0x54, 0x6f, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1f,
+ 0x2e, 0x66, 0x69, 0x6c, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x41, 0x70, 0x70, 0x65, 0x6e, 0x64,
+ 0x54, 0x6f, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22,
+ 0x00, 0x12, 0x4c, 0x0a, 0x0b, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x45, 0x6e, 0x74, 0x72, 0x79,
+ 0x12, 0x1c, 0x2e, 0x66, 0x69, 0x6c, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x44, 0x65, 0x6c, 0x65,
+ 0x74, 0x65, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1d,
+ 0x2e, 0x66, 0x69, 0x6c, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65,
+ 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12,
+ 0x5e, 0x0a, 0x11, 0x41, 0x74, 0x6f, 0x6d, 0x69, 0x63, 0x52, 0x65, 0x6e, 0x61, 0x6d, 0x65, 0x45,
+ 0x6e, 0x74, 0x72, 0x79, 0x12, 0x22, 0x2e, 0x66, 0x69, 0x6c, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e,
+ 0x41, 0x74, 0x6f, 0x6d, 0x69, 0x63, 0x52, 0x65, 0x6e, 0x61, 0x6d, 0x65, 0x45, 0x6e, 0x74, 0x72,
+ 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x23, 0x2e, 0x66, 0x69, 0x6c, 0x65, 0x72,
+ 0x5f, 0x70, 0x62, 0x2e, 0x41, 0x74, 0x6f, 0x6d, 0x69, 0x63, 0x52, 0x65, 0x6e, 0x61, 0x6d, 0x65,
+ 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12,
+ 0x4f, 0x0a, 0x0c, 0x41, 0x73, 0x73, 0x69, 0x67, 0x6e, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x12,
+ 0x1d, 0x2e, 0x66, 0x69, 0x6c, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x41, 0x73, 0x73, 0x69, 0x67,
+ 0x6e, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1e,
+ 0x2e, 0x66, 0x69, 0x6c, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x41, 0x73, 0x73, 0x69, 0x67, 0x6e,
+ 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00,
+ 0x12, 0x4f, 0x0a, 0x0c, 0x4c, 0x6f, 0x6f, 0x6b, 0x75, 0x70, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65,
+ 0x12, 0x1d, 0x2e, 0x66, 0x69, 0x6c, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x4c, 0x6f, 0x6f, 0x6b,
+ 0x75, 0x70, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a,
+ 0x1e, 0x2e, 0x66, 0x69, 0x6c, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x4c, 0x6f, 0x6f, 0x6b, 0x75,
+ 0x70, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22,
+ 0x00, 0x12, 0x55, 0x0a, 0x0e, 0x43, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x4c,
+ 0x69, 0x73, 0x74, 0x12, 0x1f, 0x2e, 0x66, 0x69, 0x6c, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x43,
+ 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x4c, 0x69, 0x73, 0x74, 0x52, 0x65, 0x71,
+ 0x75, 0x65, 0x73, 0x74, 0x1a, 0x20, 0x2e, 0x66, 0x69, 0x6c, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e,
+ 0x43, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x4c, 0x69, 0x73, 0x74, 0x52, 0x65,
+ 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x5b, 0x0a, 0x10, 0x44, 0x65, 0x6c, 0x65,
+ 0x74, 0x65, 0x43, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x21, 0x2e, 0x66,
+ 0x69, 0x6c, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x43, 0x6f,
+ 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a,
+ 0x22, 0x2e, 0x66, 0x69, 0x6c, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x44, 0x65, 0x6c, 0x65, 0x74,
+ 0x65, 0x43, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x6f,
+ 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x49, 0x0a, 0x0a, 0x53, 0x74, 0x61, 0x74, 0x69, 0x73, 0x74,
+ 0x69, 0x63, 0x73, 0x12, 0x1b, 0x2e, 0x66, 0x69, 0x6c, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x53,
+ 0x74, 0x61, 0x74, 0x69, 0x73, 0x74, 0x69, 0x63, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74,
+ 0x1a, 0x1c, 0x2e, 0x66, 0x69, 0x6c, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x53, 0x74, 0x61, 0x74,
+ 0x69, 0x73, 0x74, 0x69, 0x63, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00,
+ 0x12, 0x6a, 0x0a, 0x15, 0x47, 0x65, 0x74, 0x46, 0x69, 0x6c, 0x65, 0x72, 0x43, 0x6f, 0x6e, 0x66,
+ 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x26, 0x2e, 0x66, 0x69, 0x6c, 0x65,
+ 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x47, 0x65, 0x74, 0x46, 0x69, 0x6c, 0x65, 0x72, 0x43, 0x6f, 0x6e,
+ 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73,
+ 0x74, 0x1a, 0x27, 0x2e, 0x66, 0x69, 0x6c, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x47, 0x65, 0x74,
+ 0x46, 0x69, 0x6c, 0x65, 0x72, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, 0x69,
+ 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x60, 0x0a, 0x11,
+ 0x53, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x62, 0x65, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74,
+ 0x61, 0x12, 0x22, 0x2e, 0x66, 0x69, 0x6c, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x53, 0x75, 0x62,
+ 0x73, 0x63, 0x72, 0x69, 0x62, 0x65, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x52, 0x65,
+ 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x23, 0x2e, 0x66, 0x69, 0x6c, 0x65, 0x72, 0x5f, 0x70, 0x62,
+ 0x2e, 0x53, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x62, 0x65, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61,
+ 0x74, 0x61, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x30, 0x01, 0x12, 0x65,
+ 0x0a, 0x16, 0x53, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x62, 0x65, 0x4c, 0x6f, 0x63, 0x61, 0x6c,
+ 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x12, 0x22, 0x2e, 0x66, 0x69, 0x6c, 0x65, 0x72,
+ 0x5f, 0x70, 0x62, 0x2e, 0x53, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x62, 0x65, 0x4d, 0x65, 0x74,
+ 0x61, 0x64, 0x61, 0x74, 0x61, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x23, 0x2e, 0x66,
+ 0x69, 0x6c, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x53, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x62,
+ 0x65, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73,
+ 0x65, 0x22, 0x00, 0x30, 0x01, 0x12, 0x56, 0x0a, 0x0d, 0x4b, 0x65, 0x65, 0x70, 0x43, 0x6f, 0x6e,
+ 0x6e, 0x65, 0x63, 0x74, 0x65, 0x64, 0x12, 0x1e, 0x2e, 0x66, 0x69, 0x6c, 0x65, 0x72, 0x5f, 0x70,
+ 0x62, 0x2e, 0x4b, 0x65, 0x65, 0x70, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x65, 0x64, 0x52,
+ 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1f, 0x2e, 0x66, 0x69, 0x6c, 0x65, 0x72, 0x5f, 0x70,
+ 0x62, 0x2e, 0x4b, 0x65, 0x65, 0x70, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x65, 0x64, 0x52,
+ 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x28, 0x01, 0x30, 0x01, 0x12, 0x4f, 0x0a,
+ 0x0c, 0x4c, 0x6f, 0x63, 0x61, 0x74, 0x65, 0x42, 0x72, 0x6f, 0x6b, 0x65, 0x72, 0x12, 0x1d, 0x2e,
+ 0x66, 0x69, 0x6c, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x4c, 0x6f, 0x63, 0x61, 0x74, 0x65, 0x42,
+ 0x72, 0x6f, 0x6b, 0x65, 0x72, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1e, 0x2e, 0x66,
+ 0x69, 0x6c, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x4c, 0x6f, 0x63, 0x61, 0x74, 0x65, 0x42, 0x72,
+ 0x6f, 0x6b, 0x65, 0x72, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x3a,
+ 0x0a, 0x05, 0x4b, 0x76, 0x47, 0x65, 0x74, 0x12, 0x16, 0x2e, 0x66, 0x69, 0x6c, 0x65, 0x72, 0x5f,
+ 0x70, 0x62, 0x2e, 0x4b, 0x76, 0x47, 0x65, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a,
+ 0x17, 0x2e, 0x66, 0x69, 0x6c, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x4b, 0x76, 0x47, 0x65, 0x74,
+ 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x3a, 0x0a, 0x05, 0x4b, 0x76,
+ 0x50, 0x75, 0x74, 0x12, 0x16, 0x2e, 0x66, 0x69, 0x6c, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x4b,
+ 0x76, 0x50, 0x75, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x17, 0x2e, 0x66, 0x69,
+ 0x6c, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x4b, 0x76, 0x50, 0x75, 0x74, 0x52, 0x65, 0x73, 0x70,
+ 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x42, 0x4f, 0x0a, 0x10, 0x73, 0x65, 0x61, 0x77, 0x65, 0x65,
+ 0x64, 0x66, 0x73, 0x2e, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x42, 0x0a, 0x46, 0x69, 0x6c, 0x65,
+ 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x5a, 0x2f, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63,
+ 0x6f, 0x6d, 0x2f, 0x63, 0x68, 0x72, 0x69, 0x73, 0x6c, 0x75, 0x73, 0x66, 0x2f, 0x73, 0x65, 0x61,
+ 0x77, 0x65, 0x65, 0x64, 0x66, 0x73, 0x2f, 0x77, 0x65, 0x65, 0x64, 0x2f, 0x70, 0x62, 0x2f, 0x66,
+ 0x69, 0x6c, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
+}
+
+var (
+ file_filer_proto_rawDescOnce sync.Once
+ file_filer_proto_rawDescData = file_filer_proto_rawDesc
+)
+
+func file_filer_proto_rawDescGZIP() []byte {
+ file_filer_proto_rawDescOnce.Do(func() {
+ file_filer_proto_rawDescData = protoimpl.X.CompressGZIP(file_filer_proto_rawDescData)
+ })
+ return file_filer_proto_rawDescData
+}
+
+var file_filer_proto_msgTypes = make([]protoimpl.MessageInfo, 52)
+var file_filer_proto_goTypes = []interface{}{
+ (*LookupDirectoryEntryRequest)(nil), // 0: filer_pb.LookupDirectoryEntryRequest
+ (*LookupDirectoryEntryResponse)(nil), // 1: filer_pb.LookupDirectoryEntryResponse
+ (*ListEntriesRequest)(nil), // 2: filer_pb.ListEntriesRequest
+ (*ListEntriesResponse)(nil), // 3: filer_pb.ListEntriesResponse
+ (*Entry)(nil), // 4: filer_pb.Entry
+ (*FullEntry)(nil), // 5: filer_pb.FullEntry
+ (*EventNotification)(nil), // 6: filer_pb.EventNotification
+ (*FileChunk)(nil), // 7: filer_pb.FileChunk
+ (*FileChunkManifest)(nil), // 8: filer_pb.FileChunkManifest
+ (*FileId)(nil), // 9: filer_pb.FileId
+ (*FuseAttributes)(nil), // 10: filer_pb.FuseAttributes
+ (*CreateEntryRequest)(nil), // 11: filer_pb.CreateEntryRequest
+ (*CreateEntryResponse)(nil), // 12: filer_pb.CreateEntryResponse
+ (*UpdateEntryRequest)(nil), // 13: filer_pb.UpdateEntryRequest
+ (*UpdateEntryResponse)(nil), // 14: filer_pb.UpdateEntryResponse
+ (*AppendToEntryRequest)(nil), // 15: filer_pb.AppendToEntryRequest
+ (*AppendToEntryResponse)(nil), // 16: filer_pb.AppendToEntryResponse
+ (*DeleteEntryRequest)(nil), // 17: filer_pb.DeleteEntryRequest
+ (*DeleteEntryResponse)(nil), // 18: filer_pb.DeleteEntryResponse
+ (*AtomicRenameEntryRequest)(nil), // 19: filer_pb.AtomicRenameEntryRequest
+ (*AtomicRenameEntryResponse)(nil), // 20: filer_pb.AtomicRenameEntryResponse
+ (*AssignVolumeRequest)(nil), // 21: filer_pb.AssignVolumeRequest
+ (*AssignVolumeResponse)(nil), // 22: filer_pb.AssignVolumeResponse
+ (*LookupVolumeRequest)(nil), // 23: filer_pb.LookupVolumeRequest
+ (*Locations)(nil), // 24: filer_pb.Locations
+ (*Location)(nil), // 25: filer_pb.Location
+ (*LookupVolumeResponse)(nil), // 26: filer_pb.LookupVolumeResponse
+ (*Collection)(nil), // 27: filer_pb.Collection
+ (*CollectionListRequest)(nil), // 28: filer_pb.CollectionListRequest
+ (*CollectionListResponse)(nil), // 29: filer_pb.CollectionListResponse
+ (*DeleteCollectionRequest)(nil), // 30: filer_pb.DeleteCollectionRequest
+ (*DeleteCollectionResponse)(nil), // 31: filer_pb.DeleteCollectionResponse
+ (*StatisticsRequest)(nil), // 32: filer_pb.StatisticsRequest
+ (*StatisticsResponse)(nil), // 33: filer_pb.StatisticsResponse
+ (*GetFilerConfigurationRequest)(nil), // 34: filer_pb.GetFilerConfigurationRequest
+ (*GetFilerConfigurationResponse)(nil), // 35: filer_pb.GetFilerConfigurationResponse
+ (*SubscribeMetadataRequest)(nil), // 36: filer_pb.SubscribeMetadataRequest
+ (*SubscribeMetadataResponse)(nil), // 37: filer_pb.SubscribeMetadataResponse
+ (*LogEntry)(nil), // 38: filer_pb.LogEntry
+ (*KeepConnectedRequest)(nil), // 39: filer_pb.KeepConnectedRequest
+ (*KeepConnectedResponse)(nil), // 40: filer_pb.KeepConnectedResponse
+ (*LocateBrokerRequest)(nil), // 41: filer_pb.LocateBrokerRequest
+ (*LocateBrokerResponse)(nil), // 42: filer_pb.LocateBrokerResponse
+ (*KvGetRequest)(nil), // 43: filer_pb.KvGetRequest
+ (*KvGetResponse)(nil), // 44: filer_pb.KvGetResponse
+ (*KvPutRequest)(nil), // 45: filer_pb.KvPutRequest
+ (*KvPutResponse)(nil), // 46: filer_pb.KvPutResponse
+ (*FilerConf)(nil), // 47: filer_pb.FilerConf
+ nil, // 48: filer_pb.Entry.ExtendedEntry
+ nil, // 49: filer_pb.LookupVolumeResponse.LocationsMapEntry
+ (*LocateBrokerResponse_Resource)(nil), // 50: filer_pb.LocateBrokerResponse.Resource
+ (*FilerConf_PathConf)(nil), // 51: filer_pb.FilerConf.PathConf
+}
+var file_filer_proto_depIdxs = []int32{
+ 4, // 0: filer_pb.LookupDirectoryEntryResponse.entry:type_name -> filer_pb.Entry
+ 4, // 1: filer_pb.ListEntriesResponse.entry:type_name -> filer_pb.Entry
+ 7, // 2: filer_pb.Entry.chunks:type_name -> filer_pb.FileChunk
+ 10, // 3: filer_pb.Entry.attributes:type_name -> filer_pb.FuseAttributes
+ 48, // 4: filer_pb.Entry.extended:type_name -> filer_pb.Entry.ExtendedEntry
+ 4, // 5: filer_pb.FullEntry.entry:type_name -> filer_pb.Entry
+ 4, // 6: filer_pb.EventNotification.old_entry:type_name -> filer_pb.Entry
+ 4, // 7: filer_pb.EventNotification.new_entry:type_name -> filer_pb.Entry
+ 9, // 8: filer_pb.FileChunk.fid:type_name -> filer_pb.FileId
+ 9, // 9: filer_pb.FileChunk.source_fid:type_name -> filer_pb.FileId
+ 7, // 10: filer_pb.FileChunkManifest.chunks:type_name -> filer_pb.FileChunk
+ 4, // 11: filer_pb.CreateEntryRequest.entry:type_name -> filer_pb.Entry
+ 4, // 12: filer_pb.UpdateEntryRequest.entry:type_name -> filer_pb.Entry
+ 7, // 13: filer_pb.AppendToEntryRequest.chunks:type_name -> filer_pb.FileChunk
+ 25, // 14: filer_pb.Locations.locations:type_name -> filer_pb.Location
+ 49, // 15: filer_pb.LookupVolumeResponse.locations_map:type_name -> filer_pb.LookupVolumeResponse.LocationsMapEntry
+ 27, // 16: filer_pb.CollectionListResponse.collections:type_name -> filer_pb.Collection
+ 6, // 17: filer_pb.SubscribeMetadataResponse.event_notification:type_name -> filer_pb.EventNotification
+ 50, // 18: filer_pb.LocateBrokerResponse.resources:type_name -> filer_pb.LocateBrokerResponse.Resource
+ 51, // 19: filer_pb.FilerConf.locations:type_name -> filer_pb.FilerConf.PathConf
+ 24, // 20: filer_pb.LookupVolumeResponse.LocationsMapEntry.value:type_name -> filer_pb.Locations
+ 0, // 21: filer_pb.SeaweedFiler.LookupDirectoryEntry:input_type -> filer_pb.LookupDirectoryEntryRequest
+ 2, // 22: filer_pb.SeaweedFiler.ListEntries:input_type -> filer_pb.ListEntriesRequest
+ 11, // 23: filer_pb.SeaweedFiler.CreateEntry:input_type -> filer_pb.CreateEntryRequest
+ 13, // 24: filer_pb.SeaweedFiler.UpdateEntry:input_type -> filer_pb.UpdateEntryRequest
+ 15, // 25: filer_pb.SeaweedFiler.AppendToEntry:input_type -> filer_pb.AppendToEntryRequest
+ 17, // 26: filer_pb.SeaweedFiler.DeleteEntry:input_type -> filer_pb.DeleteEntryRequest
+ 19, // 27: filer_pb.SeaweedFiler.AtomicRenameEntry:input_type -> filer_pb.AtomicRenameEntryRequest
+ 21, // 28: filer_pb.SeaweedFiler.AssignVolume:input_type -> filer_pb.AssignVolumeRequest
+ 23, // 29: filer_pb.SeaweedFiler.LookupVolume:input_type -> filer_pb.LookupVolumeRequest
+ 28, // 30: filer_pb.SeaweedFiler.CollectionList:input_type -> filer_pb.CollectionListRequest
+ 30, // 31: filer_pb.SeaweedFiler.DeleteCollection:input_type -> filer_pb.DeleteCollectionRequest
+ 32, // 32: filer_pb.SeaweedFiler.Statistics:input_type -> filer_pb.StatisticsRequest
+ 34, // 33: filer_pb.SeaweedFiler.GetFilerConfiguration:input_type -> filer_pb.GetFilerConfigurationRequest
+ 36, // 34: filer_pb.SeaweedFiler.SubscribeMetadata:input_type -> filer_pb.SubscribeMetadataRequest
+ 36, // 35: filer_pb.SeaweedFiler.SubscribeLocalMetadata:input_type -> filer_pb.SubscribeMetadataRequest
+ 39, // 36: filer_pb.SeaweedFiler.KeepConnected:input_type -> filer_pb.KeepConnectedRequest
+ 41, // 37: filer_pb.SeaweedFiler.LocateBroker:input_type -> filer_pb.LocateBrokerRequest
+ 43, // 38: filer_pb.SeaweedFiler.KvGet:input_type -> filer_pb.KvGetRequest
+ 45, // 39: filer_pb.SeaweedFiler.KvPut:input_type -> filer_pb.KvPutRequest
+ 1, // 40: filer_pb.SeaweedFiler.LookupDirectoryEntry:output_type -> filer_pb.LookupDirectoryEntryResponse
+ 3, // 41: filer_pb.SeaweedFiler.ListEntries:output_type -> filer_pb.ListEntriesResponse
+ 12, // 42: filer_pb.SeaweedFiler.CreateEntry:output_type -> filer_pb.CreateEntryResponse
+ 14, // 43: filer_pb.SeaweedFiler.UpdateEntry:output_type -> filer_pb.UpdateEntryResponse
+ 16, // 44: filer_pb.SeaweedFiler.AppendToEntry:output_type -> filer_pb.AppendToEntryResponse
+ 18, // 45: filer_pb.SeaweedFiler.DeleteEntry:output_type -> filer_pb.DeleteEntryResponse
+ 20, // 46: filer_pb.SeaweedFiler.AtomicRenameEntry:output_type -> filer_pb.AtomicRenameEntryResponse
+ 22, // 47: filer_pb.SeaweedFiler.AssignVolume:output_type -> filer_pb.AssignVolumeResponse
+ 26, // 48: filer_pb.SeaweedFiler.LookupVolume:output_type -> filer_pb.LookupVolumeResponse
+ 29, // 49: filer_pb.SeaweedFiler.CollectionList:output_type -> filer_pb.CollectionListResponse
+ 31, // 50: filer_pb.SeaweedFiler.DeleteCollection:output_type -> filer_pb.DeleteCollectionResponse
+ 33, // 51: filer_pb.SeaweedFiler.Statistics:output_type -> filer_pb.StatisticsResponse
+ 35, // 52: filer_pb.SeaweedFiler.GetFilerConfiguration:output_type -> filer_pb.GetFilerConfigurationResponse
+ 37, // 53: filer_pb.SeaweedFiler.SubscribeMetadata:output_type -> filer_pb.SubscribeMetadataResponse
+ 37, // 54: filer_pb.SeaweedFiler.SubscribeLocalMetadata:output_type -> filer_pb.SubscribeMetadataResponse
+ 40, // 55: filer_pb.SeaweedFiler.KeepConnected:output_type -> filer_pb.KeepConnectedResponse
+ 42, // 56: filer_pb.SeaweedFiler.LocateBroker:output_type -> filer_pb.LocateBrokerResponse
+ 44, // 57: filer_pb.SeaweedFiler.KvGet:output_type -> filer_pb.KvGetResponse
+ 46, // 58: filer_pb.SeaweedFiler.KvPut:output_type -> filer_pb.KvPutResponse
+ 40, // [40:59] is the sub-list for method output_type
+ 21, // [21:40] is the sub-list for method input_type
+ 21, // [21:21] is the sub-list for extension type_name
+ 21, // [21:21] is the sub-list for extension extendee
+ 0, // [0:21] is the sub-list for field type_name
+}
+
+func init() { file_filer_proto_init() }
+func file_filer_proto_init() {
+ if File_filer_proto != nil {
+ return
+ }
+ if !protoimpl.UnsafeEnabled {
+ file_filer_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*LookupDirectoryEntryRequest); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_filer_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*LookupDirectoryEntryResponse); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_filer_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*ListEntriesRequest); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_filer_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*ListEntriesResponse); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_filer_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*Entry); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_filer_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*FullEntry); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_filer_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*EventNotification); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_filer_proto_msgTypes[7].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*FileChunk); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_filer_proto_msgTypes[8].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*FileChunkManifest); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_filer_proto_msgTypes[9].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*FileId); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_filer_proto_msgTypes[10].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*FuseAttributes); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_filer_proto_msgTypes[11].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*CreateEntryRequest); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_filer_proto_msgTypes[12].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*CreateEntryResponse); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_filer_proto_msgTypes[13].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*UpdateEntryRequest); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_filer_proto_msgTypes[14].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*UpdateEntryResponse); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_filer_proto_msgTypes[15].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*AppendToEntryRequest); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_filer_proto_msgTypes[16].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*AppendToEntryResponse); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_filer_proto_msgTypes[17].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*DeleteEntryRequest); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_filer_proto_msgTypes[18].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*DeleteEntryResponse); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_filer_proto_msgTypes[19].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*AtomicRenameEntryRequest); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_filer_proto_msgTypes[20].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*AtomicRenameEntryResponse); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_filer_proto_msgTypes[21].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*AssignVolumeRequest); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_filer_proto_msgTypes[22].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*AssignVolumeResponse); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_filer_proto_msgTypes[23].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*LookupVolumeRequest); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_filer_proto_msgTypes[24].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*Locations); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_filer_proto_msgTypes[25].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*Location); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_filer_proto_msgTypes[26].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*LookupVolumeResponse); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_filer_proto_msgTypes[27].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*Collection); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_filer_proto_msgTypes[28].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*CollectionListRequest); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_filer_proto_msgTypes[29].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*CollectionListResponse); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_filer_proto_msgTypes[30].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*DeleteCollectionRequest); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_filer_proto_msgTypes[31].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*DeleteCollectionResponse); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_filer_proto_msgTypes[32].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*StatisticsRequest); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_filer_proto_msgTypes[33].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*StatisticsResponse); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_filer_proto_msgTypes[34].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*GetFilerConfigurationRequest); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_filer_proto_msgTypes[35].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*GetFilerConfigurationResponse); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_filer_proto_msgTypes[36].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*SubscribeMetadataRequest); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_filer_proto_msgTypes[37].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*SubscribeMetadataResponse); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_filer_proto_msgTypes[38].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*LogEntry); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_filer_proto_msgTypes[39].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*KeepConnectedRequest); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_filer_proto_msgTypes[40].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*KeepConnectedResponse); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_filer_proto_msgTypes[41].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*LocateBrokerRequest); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_filer_proto_msgTypes[42].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*LocateBrokerResponse); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_filer_proto_msgTypes[43].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*KvGetRequest); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_filer_proto_msgTypes[44].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*KvGetResponse); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_filer_proto_msgTypes[45].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*KvPutRequest); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_filer_proto_msgTypes[46].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*KvPutResponse); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_filer_proto_msgTypes[47].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*FilerConf); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_filer_proto_msgTypes[50].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*LocateBrokerResponse_Resource); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_filer_proto_msgTypes[51].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*FilerConf_PathConf); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ }
+ type x struct{}
+ out := protoimpl.TypeBuilder{
+ File: protoimpl.DescBuilder{
+ GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
+ RawDescriptor: file_filer_proto_rawDesc,
+ NumEnums: 0,
+ NumMessages: 52,
+ NumExtensions: 0,
+ NumServices: 1,
+ },
+ GoTypes: file_filer_proto_goTypes,
+ DependencyIndexes: file_filer_proto_depIdxs,
+ MessageInfos: file_filer_proto_msgTypes,
+ }.Build()
+ File_filer_proto = out.File
+ file_filer_proto_rawDesc = nil
+ file_filer_proto_goTypes = nil
+ file_filer_proto_depIdxs = nil
}
// Reference imports to suppress errors if they are not otherwise used.
var _ context.Context
-var _ grpc.ClientConn
+var _ grpc.ClientConnInterface
// This is a compile-time assertion to ensure that this generated file
// is compatible with the grpc package it is being compiled against.
-const _ = grpc.SupportPackageIsVersion4
-
-// Client API for SeaweedFiler service
+const _ = grpc.SupportPackageIsVersion6
+// SeaweedFilerClient is the client API for SeaweedFiler service.
+//
+// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream.
type SeaweedFilerClient interface {
LookupDirectoryEntry(ctx context.Context, in *LookupDirectoryEntryRequest, opts ...grpc.CallOption) (*LookupDirectoryEntryResponse, error)
ListEntries(ctx context.Context, in *ListEntriesRequest, opts ...grpc.CallOption) (SeaweedFiler_ListEntriesClient, error)
CreateEntry(ctx context.Context, in *CreateEntryRequest, opts ...grpc.CallOption) (*CreateEntryResponse, error)
UpdateEntry(ctx context.Context, in *UpdateEntryRequest, opts ...grpc.CallOption) (*UpdateEntryResponse, error)
+ AppendToEntry(ctx context.Context, in *AppendToEntryRequest, opts ...grpc.CallOption) (*AppendToEntryResponse, error)
DeleteEntry(ctx context.Context, in *DeleteEntryRequest, opts ...grpc.CallOption) (*DeleteEntryResponse, error)
AtomicRenameEntry(ctx context.Context, in *AtomicRenameEntryRequest, opts ...grpc.CallOption) (*AtomicRenameEntryResponse, error)
AssignVolume(ctx context.Context, in *AssignVolumeRequest, opts ...grpc.CallOption) (*AssignVolumeResponse, error)
LookupVolume(ctx context.Context, in *LookupVolumeRequest, opts ...grpc.CallOption) (*LookupVolumeResponse, error)
+ CollectionList(ctx context.Context, in *CollectionListRequest, opts ...grpc.CallOption) (*CollectionListResponse, error)
DeleteCollection(ctx context.Context, in *DeleteCollectionRequest, opts ...grpc.CallOption) (*DeleteCollectionResponse, error)
Statistics(ctx context.Context, in *StatisticsRequest, opts ...grpc.CallOption) (*StatisticsResponse, error)
GetFilerConfiguration(ctx context.Context, in *GetFilerConfigurationRequest, opts ...grpc.CallOption) (*GetFilerConfigurationResponse, error)
+ SubscribeMetadata(ctx context.Context, in *SubscribeMetadataRequest, opts ...grpc.CallOption) (SeaweedFiler_SubscribeMetadataClient, error)
+ SubscribeLocalMetadata(ctx context.Context, in *SubscribeMetadataRequest, opts ...grpc.CallOption) (SeaweedFiler_SubscribeLocalMetadataClient, error)
+ KeepConnected(ctx context.Context, opts ...grpc.CallOption) (SeaweedFiler_KeepConnectedClient, error)
+ LocateBroker(ctx context.Context, in *LocateBrokerRequest, opts ...grpc.CallOption) (*LocateBrokerResponse, error)
+ KvGet(ctx context.Context, in *KvGetRequest, opts ...grpc.CallOption) (*KvGetResponse, error)
+ KvPut(ctx context.Context, in *KvPutRequest, opts ...grpc.CallOption) (*KvPutResponse, error)
}
type seaweedFilerClient struct {
- cc *grpc.ClientConn
+ cc grpc.ClientConnInterface
}
-func NewSeaweedFilerClient(cc *grpc.ClientConn) SeaweedFilerClient {
+func NewSeaweedFilerClient(cc grpc.ClientConnInterface) SeaweedFilerClient {
return &seaweedFilerClient{cc}
}
func (c *seaweedFilerClient) LookupDirectoryEntry(ctx context.Context, in *LookupDirectoryEntryRequest, opts ...grpc.CallOption) (*LookupDirectoryEntryResponse, error) {
out := new(LookupDirectoryEntryResponse)
- err := grpc.Invoke(ctx, "/filer_pb.SeaweedFiler/LookupDirectoryEntry", in, out, c.cc, opts...)
+ err := c.cc.Invoke(ctx, "/filer_pb.SeaweedFiler/LookupDirectoryEntry", in, out, opts...)
if err != nil {
return nil, err
}
@@ -1066,7 +4537,7 @@ func (c *seaweedFilerClient) LookupDirectoryEntry(ctx context.Context, in *Looku
}
func (c *seaweedFilerClient) ListEntries(ctx context.Context, in *ListEntriesRequest, opts ...grpc.CallOption) (SeaweedFiler_ListEntriesClient, error) {
- stream, err := grpc.NewClientStream(ctx, &_SeaweedFiler_serviceDesc.Streams[0], c.cc, "/filer_pb.SeaweedFiler/ListEntries", opts...)
+ stream, err := c.cc.NewStream(ctx, &_SeaweedFiler_serviceDesc.Streams[0], "/filer_pb.SeaweedFiler/ListEntries", opts...)
if err != nil {
return nil, err
}
@@ -1099,7 +4570,7 @@ func (x *seaweedFilerListEntriesClient) Recv() (*ListEntriesResponse, error) {
func (c *seaweedFilerClient) CreateEntry(ctx context.Context, in *CreateEntryRequest, opts ...grpc.CallOption) (*CreateEntryResponse, error) {
out := new(CreateEntryResponse)
- err := grpc.Invoke(ctx, "/filer_pb.SeaweedFiler/CreateEntry", in, out, c.cc, opts...)
+ err := c.cc.Invoke(ctx, "/filer_pb.SeaweedFiler/CreateEntry", in, out, opts...)
if err != nil {
return nil, err
}
@@ -1108,7 +4579,16 @@ func (c *seaweedFilerClient) CreateEntry(ctx context.Context, in *CreateEntryReq
func (c *seaweedFilerClient) UpdateEntry(ctx context.Context, in *UpdateEntryRequest, opts ...grpc.CallOption) (*UpdateEntryResponse, error) {
out := new(UpdateEntryResponse)
- err := grpc.Invoke(ctx, "/filer_pb.SeaweedFiler/UpdateEntry", in, out, c.cc, opts...)
+ err := c.cc.Invoke(ctx, "/filer_pb.SeaweedFiler/UpdateEntry", in, out, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
+func (c *seaweedFilerClient) AppendToEntry(ctx context.Context, in *AppendToEntryRequest, opts ...grpc.CallOption) (*AppendToEntryResponse, error) {
+ out := new(AppendToEntryResponse)
+ err := c.cc.Invoke(ctx, "/filer_pb.SeaweedFiler/AppendToEntry", in, out, opts...)
if err != nil {
return nil, err
}
@@ -1117,7 +4597,7 @@ func (c *seaweedFilerClient) UpdateEntry(ctx context.Context, in *UpdateEntryReq
func (c *seaweedFilerClient) DeleteEntry(ctx context.Context, in *DeleteEntryRequest, opts ...grpc.CallOption) (*DeleteEntryResponse, error) {
out := new(DeleteEntryResponse)
- err := grpc.Invoke(ctx, "/filer_pb.SeaweedFiler/DeleteEntry", in, out, c.cc, opts...)
+ err := c.cc.Invoke(ctx, "/filer_pb.SeaweedFiler/DeleteEntry", in, out, opts...)
if err != nil {
return nil, err
}
@@ -1126,7 +4606,7 @@ func (c *seaweedFilerClient) DeleteEntry(ctx context.Context, in *DeleteEntryReq
func (c *seaweedFilerClient) AtomicRenameEntry(ctx context.Context, in *AtomicRenameEntryRequest, opts ...grpc.CallOption) (*AtomicRenameEntryResponse, error) {
out := new(AtomicRenameEntryResponse)
- err := grpc.Invoke(ctx, "/filer_pb.SeaweedFiler/AtomicRenameEntry", in, out, c.cc, opts...)
+ err := c.cc.Invoke(ctx, "/filer_pb.SeaweedFiler/AtomicRenameEntry", in, out, opts...)
if err != nil {
return nil, err
}
@@ -1135,7 +4615,7 @@ func (c *seaweedFilerClient) AtomicRenameEntry(ctx context.Context, in *AtomicRe
func (c *seaweedFilerClient) AssignVolume(ctx context.Context, in *AssignVolumeRequest, opts ...grpc.CallOption) (*AssignVolumeResponse, error) {
out := new(AssignVolumeResponse)
- err := grpc.Invoke(ctx, "/filer_pb.SeaweedFiler/AssignVolume", in, out, c.cc, opts...)
+ err := c.cc.Invoke(ctx, "/filer_pb.SeaweedFiler/AssignVolume", in, out, opts...)
if err != nil {
return nil, err
}
@@ -1144,7 +4624,16 @@ func (c *seaweedFilerClient) AssignVolume(ctx context.Context, in *AssignVolumeR
func (c *seaweedFilerClient) LookupVolume(ctx context.Context, in *LookupVolumeRequest, opts ...grpc.CallOption) (*LookupVolumeResponse, error) {
out := new(LookupVolumeResponse)
- err := grpc.Invoke(ctx, "/filer_pb.SeaweedFiler/LookupVolume", in, out, c.cc, opts...)
+ err := c.cc.Invoke(ctx, "/filer_pb.SeaweedFiler/LookupVolume", in, out, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
+func (c *seaweedFilerClient) CollectionList(ctx context.Context, in *CollectionListRequest, opts ...grpc.CallOption) (*CollectionListResponse, error) {
+ out := new(CollectionListResponse)
+ err := c.cc.Invoke(ctx, "/filer_pb.SeaweedFiler/CollectionList", in, out, opts...)
if err != nil {
return nil, err
}
@@ -1153,7 +4642,7 @@ func (c *seaweedFilerClient) LookupVolume(ctx context.Context, in *LookupVolumeR
func (c *seaweedFilerClient) DeleteCollection(ctx context.Context, in *DeleteCollectionRequest, opts ...grpc.CallOption) (*DeleteCollectionResponse, error) {
out := new(DeleteCollectionResponse)
- err := grpc.Invoke(ctx, "/filer_pb.SeaweedFiler/DeleteCollection", in, out, c.cc, opts...)
+ err := c.cc.Invoke(ctx, "/filer_pb.SeaweedFiler/DeleteCollection", in, out, opts...)
if err != nil {
return nil, err
}
@@ -1162,7 +4651,7 @@ func (c *seaweedFilerClient) DeleteCollection(ctx context.Context, in *DeleteCol
func (c *seaweedFilerClient) Statistics(ctx context.Context, in *StatisticsRequest, opts ...grpc.CallOption) (*StatisticsResponse, error) {
out := new(StatisticsResponse)
- err := grpc.Invoke(ctx, "/filer_pb.SeaweedFiler/Statistics", in, out, c.cc, opts...)
+ err := c.cc.Invoke(ctx, "/filer_pb.SeaweedFiler/Statistics", in, out, opts...)
if err != nil {
return nil, err
}
@@ -1171,27 +4660,218 @@ func (c *seaweedFilerClient) Statistics(ctx context.Context, in *StatisticsReque
func (c *seaweedFilerClient) GetFilerConfiguration(ctx context.Context, in *GetFilerConfigurationRequest, opts ...grpc.CallOption) (*GetFilerConfigurationResponse, error) {
out := new(GetFilerConfigurationResponse)
- err := grpc.Invoke(ctx, "/filer_pb.SeaweedFiler/GetFilerConfiguration", in, out, c.cc, opts...)
+ err := c.cc.Invoke(ctx, "/filer_pb.SeaweedFiler/GetFilerConfiguration", in, out, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
+func (c *seaweedFilerClient) SubscribeMetadata(ctx context.Context, in *SubscribeMetadataRequest, opts ...grpc.CallOption) (SeaweedFiler_SubscribeMetadataClient, error) {
+ stream, err := c.cc.NewStream(ctx, &_SeaweedFiler_serviceDesc.Streams[1], "/filer_pb.SeaweedFiler/SubscribeMetadata", opts...)
+ if err != nil {
+ return nil, err
+ }
+ x := &seaweedFilerSubscribeMetadataClient{stream}
+ if err := x.ClientStream.SendMsg(in); err != nil {
+ return nil, err
+ }
+ if err := x.ClientStream.CloseSend(); err != nil {
+ return nil, err
+ }
+ return x, nil
+}
+
+type SeaweedFiler_SubscribeMetadataClient interface {
+ Recv() (*SubscribeMetadataResponse, error)
+ grpc.ClientStream
+}
+
+type seaweedFilerSubscribeMetadataClient struct {
+ grpc.ClientStream
+}
+
+func (x *seaweedFilerSubscribeMetadataClient) Recv() (*SubscribeMetadataResponse, error) {
+ m := new(SubscribeMetadataResponse)
+ if err := x.ClientStream.RecvMsg(m); err != nil {
+ return nil, err
+ }
+ return m, nil
+}
+
+func (c *seaweedFilerClient) SubscribeLocalMetadata(ctx context.Context, in *SubscribeMetadataRequest, opts ...grpc.CallOption) (SeaweedFiler_SubscribeLocalMetadataClient, error) {
+ stream, err := c.cc.NewStream(ctx, &_SeaweedFiler_serviceDesc.Streams[2], "/filer_pb.SeaweedFiler/SubscribeLocalMetadata", opts...)
+ if err != nil {
+ return nil, err
+ }
+ x := &seaweedFilerSubscribeLocalMetadataClient{stream}
+ if err := x.ClientStream.SendMsg(in); err != nil {
+ return nil, err
+ }
+ if err := x.ClientStream.CloseSend(); err != nil {
+ return nil, err
+ }
+ return x, nil
+}
+
+type SeaweedFiler_SubscribeLocalMetadataClient interface {
+ Recv() (*SubscribeMetadataResponse, error)
+ grpc.ClientStream
+}
+
+type seaweedFilerSubscribeLocalMetadataClient struct {
+ grpc.ClientStream
+}
+
+func (x *seaweedFilerSubscribeLocalMetadataClient) Recv() (*SubscribeMetadataResponse, error) {
+ m := new(SubscribeMetadataResponse)
+ if err := x.ClientStream.RecvMsg(m); err != nil {
+ return nil, err
+ }
+ return m, nil
+}
+
+func (c *seaweedFilerClient) KeepConnected(ctx context.Context, opts ...grpc.CallOption) (SeaweedFiler_KeepConnectedClient, error) {
+ stream, err := c.cc.NewStream(ctx, &_SeaweedFiler_serviceDesc.Streams[3], "/filer_pb.SeaweedFiler/KeepConnected", opts...)
+ if err != nil {
+ return nil, err
+ }
+ x := &seaweedFilerKeepConnectedClient{stream}
+ return x, nil
+}
+
+type SeaweedFiler_KeepConnectedClient interface {
+ Send(*KeepConnectedRequest) error
+ Recv() (*KeepConnectedResponse, error)
+ grpc.ClientStream
+}
+
+type seaweedFilerKeepConnectedClient struct {
+ grpc.ClientStream
+}
+
+func (x *seaweedFilerKeepConnectedClient) Send(m *KeepConnectedRequest) error {
+ return x.ClientStream.SendMsg(m)
+}
+
+func (x *seaweedFilerKeepConnectedClient) Recv() (*KeepConnectedResponse, error) {
+ m := new(KeepConnectedResponse)
+ if err := x.ClientStream.RecvMsg(m); err != nil {
+ return nil, err
+ }
+ return m, nil
+}
+
+func (c *seaweedFilerClient) LocateBroker(ctx context.Context, in *LocateBrokerRequest, opts ...grpc.CallOption) (*LocateBrokerResponse, error) {
+ out := new(LocateBrokerResponse)
+ err := c.cc.Invoke(ctx, "/filer_pb.SeaweedFiler/LocateBroker", in, out, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
+func (c *seaweedFilerClient) KvGet(ctx context.Context, in *KvGetRequest, opts ...grpc.CallOption) (*KvGetResponse, error) {
+ out := new(KvGetResponse)
+ err := c.cc.Invoke(ctx, "/filer_pb.SeaweedFiler/KvGet", in, out, opts...)
if err != nil {
return nil, err
}
return out, nil
}
-// Server API for SeaweedFiler service
+func (c *seaweedFilerClient) KvPut(ctx context.Context, in *KvPutRequest, opts ...grpc.CallOption) (*KvPutResponse, error) {
+ out := new(KvPutResponse)
+ err := c.cc.Invoke(ctx, "/filer_pb.SeaweedFiler/KvPut", in, out, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+// SeaweedFilerServer is the server API for SeaweedFiler service.
type SeaweedFilerServer interface {
LookupDirectoryEntry(context.Context, *LookupDirectoryEntryRequest) (*LookupDirectoryEntryResponse, error)
ListEntries(*ListEntriesRequest, SeaweedFiler_ListEntriesServer) error
CreateEntry(context.Context, *CreateEntryRequest) (*CreateEntryResponse, error)
UpdateEntry(context.Context, *UpdateEntryRequest) (*UpdateEntryResponse, error)
+ AppendToEntry(context.Context, *AppendToEntryRequest) (*AppendToEntryResponse, error)
DeleteEntry(context.Context, *DeleteEntryRequest) (*DeleteEntryResponse, error)
AtomicRenameEntry(context.Context, *AtomicRenameEntryRequest) (*AtomicRenameEntryResponse, error)
AssignVolume(context.Context, *AssignVolumeRequest) (*AssignVolumeResponse, error)
LookupVolume(context.Context, *LookupVolumeRequest) (*LookupVolumeResponse, error)
+ CollectionList(context.Context, *CollectionListRequest) (*CollectionListResponse, error)
DeleteCollection(context.Context, *DeleteCollectionRequest) (*DeleteCollectionResponse, error)
Statistics(context.Context, *StatisticsRequest) (*StatisticsResponse, error)
GetFilerConfiguration(context.Context, *GetFilerConfigurationRequest) (*GetFilerConfigurationResponse, error)
+ SubscribeMetadata(*SubscribeMetadataRequest, SeaweedFiler_SubscribeMetadataServer) error
+ SubscribeLocalMetadata(*SubscribeMetadataRequest, SeaweedFiler_SubscribeLocalMetadataServer) error
+ KeepConnected(SeaweedFiler_KeepConnectedServer) error
+ LocateBroker(context.Context, *LocateBrokerRequest) (*LocateBrokerResponse, error)
+ KvGet(context.Context, *KvGetRequest) (*KvGetResponse, error)
+ KvPut(context.Context, *KvPutRequest) (*KvPutResponse, error)
+}
+
+// UnimplementedSeaweedFilerServer can be embedded to have forward compatible implementations.
+type UnimplementedSeaweedFilerServer struct {
+}
+
+func (*UnimplementedSeaweedFilerServer) LookupDirectoryEntry(context.Context, *LookupDirectoryEntryRequest) (*LookupDirectoryEntryResponse, error) {
+ return nil, status.Errorf(codes.Unimplemented, "method LookupDirectoryEntry not implemented")
+}
+func (*UnimplementedSeaweedFilerServer) ListEntries(*ListEntriesRequest, SeaweedFiler_ListEntriesServer) error {
+ return status.Errorf(codes.Unimplemented, "method ListEntries not implemented")
+}
+func (*UnimplementedSeaweedFilerServer) CreateEntry(context.Context, *CreateEntryRequest) (*CreateEntryResponse, error) {
+ return nil, status.Errorf(codes.Unimplemented, "method CreateEntry not implemented")
+}
+func (*UnimplementedSeaweedFilerServer) UpdateEntry(context.Context, *UpdateEntryRequest) (*UpdateEntryResponse, error) {
+ return nil, status.Errorf(codes.Unimplemented, "method UpdateEntry not implemented")
+}
+func (*UnimplementedSeaweedFilerServer) AppendToEntry(context.Context, *AppendToEntryRequest) (*AppendToEntryResponse, error) {
+ return nil, status.Errorf(codes.Unimplemented, "method AppendToEntry not implemented")
+}
+func (*UnimplementedSeaweedFilerServer) DeleteEntry(context.Context, *DeleteEntryRequest) (*DeleteEntryResponse, error) {
+ return nil, status.Errorf(codes.Unimplemented, "method DeleteEntry not implemented")
+}
+func (*UnimplementedSeaweedFilerServer) AtomicRenameEntry(context.Context, *AtomicRenameEntryRequest) (*AtomicRenameEntryResponse, error) {
+ return nil, status.Errorf(codes.Unimplemented, "method AtomicRenameEntry not implemented")
+}
+func (*UnimplementedSeaweedFilerServer) AssignVolume(context.Context, *AssignVolumeRequest) (*AssignVolumeResponse, error) {
+ return nil, status.Errorf(codes.Unimplemented, "method AssignVolume not implemented")
+}
+func (*UnimplementedSeaweedFilerServer) LookupVolume(context.Context, *LookupVolumeRequest) (*LookupVolumeResponse, error) {
+ return nil, status.Errorf(codes.Unimplemented, "method LookupVolume not implemented")
+}
+func (*UnimplementedSeaweedFilerServer) CollectionList(context.Context, *CollectionListRequest) (*CollectionListResponse, error) {
+ return nil, status.Errorf(codes.Unimplemented, "method CollectionList not implemented")
+}
+func (*UnimplementedSeaweedFilerServer) DeleteCollection(context.Context, *DeleteCollectionRequest) (*DeleteCollectionResponse, error) {
+ return nil, status.Errorf(codes.Unimplemented, "method DeleteCollection not implemented")
+}
+func (*UnimplementedSeaweedFilerServer) Statistics(context.Context, *StatisticsRequest) (*StatisticsResponse, error) {
+ return nil, status.Errorf(codes.Unimplemented, "method Statistics not implemented")
+}
+func (*UnimplementedSeaweedFilerServer) GetFilerConfiguration(context.Context, *GetFilerConfigurationRequest) (*GetFilerConfigurationResponse, error) {
+ return nil, status.Errorf(codes.Unimplemented, "method GetFilerConfiguration not implemented")
+}
+func (*UnimplementedSeaweedFilerServer) SubscribeMetadata(*SubscribeMetadataRequest, SeaweedFiler_SubscribeMetadataServer) error {
+ return status.Errorf(codes.Unimplemented, "method SubscribeMetadata not implemented")
+}
+func (*UnimplementedSeaweedFilerServer) SubscribeLocalMetadata(*SubscribeMetadataRequest, SeaweedFiler_SubscribeLocalMetadataServer) error {
+ return status.Errorf(codes.Unimplemented, "method SubscribeLocalMetadata not implemented")
+}
+func (*UnimplementedSeaweedFilerServer) KeepConnected(SeaweedFiler_KeepConnectedServer) error {
+ return status.Errorf(codes.Unimplemented, "method KeepConnected not implemented")
+}
+func (*UnimplementedSeaweedFilerServer) LocateBroker(context.Context, *LocateBrokerRequest) (*LocateBrokerResponse, error) {
+ return nil, status.Errorf(codes.Unimplemented, "method LocateBroker not implemented")
+}
+func (*UnimplementedSeaweedFilerServer) KvGet(context.Context, *KvGetRequest) (*KvGetResponse, error) {
+ return nil, status.Errorf(codes.Unimplemented, "method KvGet not implemented")
+}
+func (*UnimplementedSeaweedFilerServer) KvPut(context.Context, *KvPutRequest) (*KvPutResponse, error) {
+ return nil, status.Errorf(codes.Unimplemented, "method KvPut not implemented")
}
func RegisterSeaweedFilerServer(s *grpc.Server, srv SeaweedFilerServer) {
@@ -1273,6 +4953,24 @@ func _SeaweedFiler_UpdateEntry_Handler(srv interface{}, ctx context.Context, dec
return interceptor(ctx, in, info, handler)
}
+func _SeaweedFiler_AppendToEntry_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(AppendToEntryRequest)
+ if err := dec(in); err != nil {
+ return nil, err
+ }
+ if interceptor == nil {
+ return srv.(SeaweedFilerServer).AppendToEntry(ctx, in)
+ }
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: "/filer_pb.SeaweedFiler/AppendToEntry",
+ }
+ handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+ return srv.(SeaweedFilerServer).AppendToEntry(ctx, req.(*AppendToEntryRequest))
+ }
+ return interceptor(ctx, in, info, handler)
+}
+
func _SeaweedFiler_DeleteEntry_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
in := new(DeleteEntryRequest)
if err := dec(in); err != nil {
@@ -1345,6 +5043,24 @@ func _SeaweedFiler_LookupVolume_Handler(srv interface{}, ctx context.Context, de
return interceptor(ctx, in, info, handler)
}
+func _SeaweedFiler_CollectionList_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(CollectionListRequest)
+ if err := dec(in); err != nil {
+ return nil, err
+ }
+ if interceptor == nil {
+ return srv.(SeaweedFilerServer).CollectionList(ctx, in)
+ }
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: "/filer_pb.SeaweedFiler/CollectionList",
+ }
+ handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+ return srv.(SeaweedFilerServer).CollectionList(ctx, req.(*CollectionListRequest))
+ }
+ return interceptor(ctx, in, info, handler)
+}
+
func _SeaweedFiler_DeleteCollection_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
in := new(DeleteCollectionRequest)
if err := dec(in); err != nil {
@@ -1399,6 +5115,128 @@ func _SeaweedFiler_GetFilerConfiguration_Handler(srv interface{}, ctx context.Co
return interceptor(ctx, in, info, handler)
}
+func _SeaweedFiler_SubscribeMetadata_Handler(srv interface{}, stream grpc.ServerStream) error {
+ m := new(SubscribeMetadataRequest)
+ if err := stream.RecvMsg(m); err != nil {
+ return err
+ }
+ return srv.(SeaweedFilerServer).SubscribeMetadata(m, &seaweedFilerSubscribeMetadataServer{stream})
+}
+
+type SeaweedFiler_SubscribeMetadataServer interface {
+ Send(*SubscribeMetadataResponse) error
+ grpc.ServerStream
+}
+
+type seaweedFilerSubscribeMetadataServer struct {
+ grpc.ServerStream
+}
+
+func (x *seaweedFilerSubscribeMetadataServer) Send(m *SubscribeMetadataResponse) error {
+ return x.ServerStream.SendMsg(m)
+}
+
+func _SeaweedFiler_SubscribeLocalMetadata_Handler(srv interface{}, stream grpc.ServerStream) error {
+ m := new(SubscribeMetadataRequest)
+ if err := stream.RecvMsg(m); err != nil {
+ return err
+ }
+ return srv.(SeaweedFilerServer).SubscribeLocalMetadata(m, &seaweedFilerSubscribeLocalMetadataServer{stream})
+}
+
+type SeaweedFiler_SubscribeLocalMetadataServer interface {
+ Send(*SubscribeMetadataResponse) error
+ grpc.ServerStream
+}
+
+type seaweedFilerSubscribeLocalMetadataServer struct {
+ grpc.ServerStream
+}
+
+func (x *seaweedFilerSubscribeLocalMetadataServer) Send(m *SubscribeMetadataResponse) error {
+ return x.ServerStream.SendMsg(m)
+}
+
+func _SeaweedFiler_KeepConnected_Handler(srv interface{}, stream grpc.ServerStream) error {
+ return srv.(SeaweedFilerServer).KeepConnected(&seaweedFilerKeepConnectedServer{stream})
+}
+
+type SeaweedFiler_KeepConnectedServer interface {
+ Send(*KeepConnectedResponse) error
+ Recv() (*KeepConnectedRequest, error)
+ grpc.ServerStream
+}
+
+type seaweedFilerKeepConnectedServer struct {
+ grpc.ServerStream
+}
+
+func (x *seaweedFilerKeepConnectedServer) Send(m *KeepConnectedResponse) error {
+ return x.ServerStream.SendMsg(m)
+}
+
+func (x *seaweedFilerKeepConnectedServer) Recv() (*KeepConnectedRequest, error) {
+ m := new(KeepConnectedRequest)
+ if err := x.ServerStream.RecvMsg(m); err != nil {
+ return nil, err
+ }
+ return m, nil
+}
+
+func _SeaweedFiler_LocateBroker_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(LocateBrokerRequest)
+ if err := dec(in); err != nil {
+ return nil, err
+ }
+ if interceptor == nil {
+ return srv.(SeaweedFilerServer).LocateBroker(ctx, in)
+ }
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: "/filer_pb.SeaweedFiler/LocateBroker",
+ }
+ handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+ return srv.(SeaweedFilerServer).LocateBroker(ctx, req.(*LocateBrokerRequest))
+ }
+ return interceptor(ctx, in, info, handler)
+}
+
+func _SeaweedFiler_KvGet_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(KvGetRequest)
+ if err := dec(in); err != nil {
+ return nil, err
+ }
+ if interceptor == nil {
+ return srv.(SeaweedFilerServer).KvGet(ctx, in)
+ }
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: "/filer_pb.SeaweedFiler/KvGet",
+ }
+ handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+ return srv.(SeaweedFilerServer).KvGet(ctx, req.(*KvGetRequest))
+ }
+ return interceptor(ctx, in, info, handler)
+}
+
+func _SeaweedFiler_KvPut_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(KvPutRequest)
+ if err := dec(in); err != nil {
+ return nil, err
+ }
+ if interceptor == nil {
+ return srv.(SeaweedFilerServer).KvPut(ctx, in)
+ }
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: "/filer_pb.SeaweedFiler/KvPut",
+ }
+ handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+ return srv.(SeaweedFilerServer).KvPut(ctx, req.(*KvPutRequest))
+ }
+ return interceptor(ctx, in, info, handler)
+}
+
var _SeaweedFiler_serviceDesc = grpc.ServiceDesc{
ServiceName: "filer_pb.SeaweedFiler",
HandlerType: (*SeaweedFilerServer)(nil),
@@ -1415,6 +5253,10 @@ var _SeaweedFiler_serviceDesc = grpc.ServiceDesc{
MethodName: "UpdateEntry",
Handler: _SeaweedFiler_UpdateEntry_Handler,
},
+ {
+ MethodName: "AppendToEntry",
+ Handler: _SeaweedFiler_AppendToEntry_Handler,
+ },
{
MethodName: "DeleteEntry",
Handler: _SeaweedFiler_DeleteEntry_Handler,
@@ -1431,6 +5273,10 @@ var _SeaweedFiler_serviceDesc = grpc.ServiceDesc{
MethodName: "LookupVolume",
Handler: _SeaweedFiler_LookupVolume_Handler,
},
+ {
+ MethodName: "CollectionList",
+ Handler: _SeaweedFiler_CollectionList_Handler,
+ },
{
MethodName: "DeleteCollection",
Handler: _SeaweedFiler_DeleteCollection_Handler,
@@ -1443,6 +5289,18 @@ var _SeaweedFiler_serviceDesc = grpc.ServiceDesc{
MethodName: "GetFilerConfiguration",
Handler: _SeaweedFiler_GetFilerConfiguration_Handler,
},
+ {
+ MethodName: "LocateBroker",
+ Handler: _SeaweedFiler_LocateBroker_Handler,
+ },
+ {
+ MethodName: "KvGet",
+ Handler: _SeaweedFiler_KvGet_Handler,
+ },
+ {
+ MethodName: "KvPut",
+ Handler: _SeaweedFiler_KvPut_Handler,
+ },
},
Streams: []grpc.StreamDesc{
{
@@ -1450,113 +5308,22 @@ var _SeaweedFiler_serviceDesc = grpc.ServiceDesc{
Handler: _SeaweedFiler_ListEntries_Handler,
ServerStreams: true,
},
+ {
+ StreamName: "SubscribeMetadata",
+ Handler: _SeaweedFiler_SubscribeMetadata_Handler,
+ ServerStreams: true,
+ },
+ {
+ StreamName: "SubscribeLocalMetadata",
+ Handler: _SeaweedFiler_SubscribeLocalMetadata_Handler,
+ ServerStreams: true,
+ },
+ {
+ StreamName: "KeepConnected",
+ Handler: _SeaweedFiler_KeepConnected_Handler,
+ ServerStreams: true,
+ ClientStreams: true,
+ },
},
Metadata: "filer.proto",
}
-
-func init() { proto.RegisterFile("filer.proto", fileDescriptor0) }
-
-var fileDescriptor0 = []byte{
- // 1603 bytes of a gzipped FileDescriptorProto
- 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0xb4, 0x58, 0xcd, 0x6f, 0xdc, 0x44,
- 0x14, 0x8f, 0xf7, 0xdb, 0x6f, 0x77, 0xdb, 0x64, 0x92, 0xb6, 0xdb, 0xcd, 0x07, 0xa9, 0x43, 0x4b,
- 0x10, 0x55, 0xa8, 0x42, 0x0f, 0x2d, 0x85, 0x43, 0x9b, 0x0f, 0x14, 0x91, 0x7e, 0xc8, 0x69, 0x11,
- 0x08, 0x09, 0xcb, 0xb1, 0x67, 0x37, 0x43, 0x6c, 0xcf, 0x32, 0x1e, 0x27, 0x29, 0x7f, 0x02, 0x47,
- 0x8e, 0x48, 0x9c, 0xf9, 0x27, 0x10, 0x17, 0x84, 0xf8, 0x6f, 0x38, 0x72, 0x46, 0x33, 0x63, 0x7b,
- 0xc7, 0xeb, 0x4d, 0xd2, 0x0a, 0xf5, 0xe6, 0x79, 0xdf, 0xef, 0xcd, 0x7b, 0xbf, 0x37, 0xbb, 0xd0,
- 0x1e, 0x90, 0x00, 0xb3, 0x8d, 0x11, 0xa3, 0x9c, 0xa2, 0x96, 0x3c, 0x38, 0xa3, 0x43, 0xeb, 0x39,
- 0x2c, 0xee, 0x53, 0x7a, 0x9c, 0x8c, 0xb6, 0x09, 0xc3, 0x1e, 0xa7, 0xec, 0xf5, 0x4e, 0xc4, 0xd9,
- 0x6b, 0x1b, 0xff, 0x90, 0xe0, 0x98, 0xa3, 0x25, 0x30, 0xfd, 0x8c, 0xd1, 0x33, 0x56, 0x8d, 0x75,
- 0xd3, 0x1e, 0x13, 0x10, 0x82, 0x5a, 0xe4, 0x86, 0xb8, 0x57, 0x91, 0x0c, 0xf9, 0x6d, 0xed, 0xc0,
- 0xd2, 0x74, 0x83, 0xf1, 0x88, 0x46, 0x31, 0x46, 0xb7, 0xa1, 0x8e, 0x05, 0x41, 0x5a, 0x6b, 0x6f,
- 0x5e, 0xdd, 0xc8, 0x42, 0xd9, 0x50, 0x72, 0x8a, 0x6b, 0xfd, 0x61, 0x00, 0xda, 0x27, 0x31, 0x17,
- 0x44, 0x82, 0xe3, 0x37, 0x8b, 0xe7, 0x3a, 0x34, 0x46, 0x0c, 0x0f, 0xc8, 0x59, 0x1a, 0x51, 0x7a,
- 0x42, 0x77, 0x61, 0x2e, 0xe6, 0x2e, 0xe3, 0xbb, 0x8c, 0x86, 0xbb, 0x24, 0xc0, 0xcf, 0x44, 0xd0,
- 0x55, 0x29, 0x52, 0x66, 0xa0, 0x0d, 0x40, 0x24, 0xf2, 0x82, 0x24, 0x26, 0x27, 0xf8, 0x20, 0xe3,
- 0xf6, 0x6a, 0xab, 0xc6, 0x7a, 0xcb, 0x9e, 0xc2, 0x41, 0x0b, 0x50, 0x0f, 0x48, 0x48, 0x78, 0xaf,
- 0xbe, 0x6a, 0xac, 0x77, 0x6d, 0x75, 0xb0, 0x3e, 0x83, 0xf9, 0x42, 0xfc, 0x6f, 0x97, 0xfe, 0xaf,
- 0x15, 0xa8, 0x4b, 0x42, 0x5e, 0x63, 0x63, 0x5c, 0x63, 0x74, 0x0b, 0x3a, 0x24, 0x76, 0xc6, 0x85,
- 0xa8, 0xc8, 0xd8, 0xda, 0x24, 0xce, 0x6b, 0x8e, 0x3e, 0x82, 0x86, 0x77, 0x94, 0x44, 0xc7, 0x71,
- 0xaf, 0xba, 0x5a, 0x5d, 0x6f, 0x6f, 0xce, 0x8f, 0x1d, 0x89, 0x44, 0xb7, 0x04, 0xcf, 0x4e, 0x45,
- 0xd0, 0x03, 0x00, 0x97, 0x73, 0x46, 0x0e, 0x13, 0x8e, 0x63, 0x99, 0x69, 0x7b, 0xb3, 0xa7, 0x29,
- 0x24, 0x31, 0x7e, 0x9c, 0xf3, 0x6d, 0x4d, 0x16, 0x3d, 0x84, 0x16, 0x3e, 0xe3, 0x38, 0xf2, 0xb1,
- 0xdf, 0xab, 0x4b, 0x47, 0xcb, 0x13, 0x19, 0x6d, 0xec, 0xa4, 0x7c, 0x95, 0x5f, 0x2e, 0xde, 0x7f,
- 0x04, 0xdd, 0x02, 0x0b, 0xcd, 0x42, 0xf5, 0x18, 0x67, 0xb7, 0x2a, 0x3e, 0x45, 0x65, 0x4f, 0xdc,
- 0x20, 0x51, 0x0d, 0xd6, 0xb1, 0xd5, 0xe1, 0xd3, 0xca, 0x03, 0xc3, 0xda, 0x06, 0x73, 0x37, 0x09,
- 0x82, 0x5c, 0xd1, 0x27, 0x2c, 0x53, 0xf4, 0x09, 0x1b, 0x57, 0xb9, 0x72, 0x61, 0x95, 0x7f, 0x37,
- 0x60, 0x6e, 0xe7, 0x04, 0x47, 0xfc, 0x19, 0xe5, 0x64, 0x40, 0x3c, 0x97, 0x13, 0x1a, 0xa1, 0xbb,
- 0x60, 0xd2, 0xc0, 0x77, 0x2e, 0xbc, 0xa6, 0x16, 0x0d, 0xd2, 0xa8, 0xef, 0x82, 0x19, 0xe1, 0x53,
- 0xe7, 0x42, 0x77, 0xad, 0x08, 0x9f, 0x2a, 0xe9, 0x35, 0xe8, 0xfa, 0x38, 0xc0, 0x1c, 0x3b, 0xf9,
- 0xed, 0x88, 0xab, 0xeb, 0x28, 0xe2, 0x96, 0xba, 0x8e, 0x3b, 0x70, 0x55, 0x98, 0x1c, 0xb9, 0x0c,
- 0x47, 0xdc, 0x19, 0xb9, 0xfc, 0x48, 0xde, 0x89, 0x69, 0x77, 0x23, 0x7c, 0xfa, 0x42, 0x52, 0x5f,
- 0xb8, 0xfc, 0xc8, 0xfa, 0xd7, 0x00, 0x33, 0xbf, 0x4c, 0x74, 0x03, 0x9a, 0xc2, 0xad, 0x43, 0xfc,
- 0xb4, 0x12, 0x0d, 0x71, 0xdc, 0xf3, 0xc5, 0x54, 0xd0, 0xc1, 0x20, 0xc6, 0x5c, 0x86, 0x57, 0xb5,
- 0xd3, 0x93, 0xe8, 0xac, 0x98, 0xfc, 0xa8, 0x06, 0xa1, 0x66, 0xcb, 0x6f, 0x51, 0xf1, 0x90, 0x93,
- 0x10, 0x4b, 0x87, 0x55, 0x5b, 0x1d, 0xd0, 0x3c, 0xd4, 0xb1, 0xc3, 0xdd, 0xa1, 0xec, 0x70, 0xd3,
- 0xae, 0xe1, 0x97, 0xee, 0x10, 0xbd, 0x0f, 0x57, 0x62, 0x9a, 0x30, 0x0f, 0x3b, 0x99, 0xdb, 0x86,
- 0xe4, 0x76, 0x14, 0x75, 0x57, 0x39, 0xb7, 0xa0, 0x3a, 0x20, 0x7e, 0xaf, 0x29, 0x0b, 0x33, 0x5b,
- 0x6c, 0xc2, 0x3d, 0xdf, 0x16, 0x4c, 0xf4, 0x31, 0x40, 0x6e, 0xc9, 0xef, 0xb5, 0xce, 0x11, 0x35,
- 0x33, 0xbb, 0xbe, 0xf5, 0x35, 0x34, 0x52, 0xf3, 0x8b, 0x60, 0x9e, 0xd0, 0x20, 0x09, 0xf3, 0xb4,
- 0xbb, 0x76, 0x4b, 0x11, 0xf6, 0x7c, 0x74, 0x13, 0x24, 0xce, 0x39, 0xa2, 0xab, 0x2a, 0x32, 0x49,
- 0x59, 0xa1, 0x2f, 0xb1, 0x44, 0x0a, 0x8f, 0xd2, 0x63, 0xa2, 0xb2, 0x6f, 0xda, 0xe9, 0xc9, 0xfa,
- 0xa7, 0x02, 0x57, 0x8a, 0xed, 0x2e, 0x5c, 0x48, 0x2b, 0xb2, 0x56, 0x86, 0x34, 0x23, 0xcd, 0x1e,
- 0x14, 0xea, 0x55, 0xd1, 0xeb, 0x95, 0xa9, 0x84, 0xd4, 0x57, 0x0e, 0xba, 0x4a, 0xe5, 0x29, 0xf5,
- 0xb1, 0xe8, 0xd6, 0x84, 0xf8, 0xb2, 0xc0, 0x5d, 0x5b, 0x7c, 0x0a, 0xca, 0x90, 0xf8, 0x29, 0x7c,
- 0x88, 0x4f, 0x19, 0x1e, 0x93, 0x76, 0x1b, 0xea, 0xca, 0xd4, 0x49, 0x5c, 0x59, 0x28, 0xa8, 0x4d,
- 0x75, 0x0f, 0xe2, 0x1b, 0xad, 0x42, 0x9b, 0xe1, 0x51, 0x90, 0x76, 0xaf, 0x2c, 0x9f, 0x69, 0xeb,
- 0x24, 0xb4, 0x02, 0xe0, 0xd1, 0x20, 0xc0, 0x9e, 0x14, 0x30, 0xa5, 0x80, 0x46, 0x11, 0x9d, 0xc3,
- 0x79, 0xe0, 0xc4, 0xd8, 0xeb, 0xc1, 0xaa, 0xb1, 0x5e, 0xb7, 0x1b, 0x9c, 0x07, 0x07, 0xd8, 0x13,
- 0x79, 0x24, 0x31, 0x66, 0x8e, 0x04, 0xa0, 0xb6, 0xd4, 0x6b, 0x09, 0x82, 0x84, 0xc9, 0x65, 0x80,
- 0x21, 0xa3, 0xc9, 0x48, 0x71, 0x3b, 0xab, 0x55, 0x81, 0xc5, 0x92, 0x22, 0xd9, 0xb7, 0xe1, 0x4a,
- 0xfc, 0x3a, 0x0c, 0x48, 0x74, 0xec, 0x70, 0x97, 0x0d, 0x31, 0xef, 0x75, 0x55, 0x0f, 0xa7, 0xd4,
- 0x97, 0x92, 0x68, 0x7d, 0x03, 0x68, 0x8b, 0x61, 0x97, 0xe3, 0xb7, 0x58, 0x3b, 0x6f, 0x38, 0xdd,
- 0xd7, 0x60, 0xbe, 0x60, 0x5a, 0x21, 0xb0, 0xf0, 0xf8, 0x6a, 0xe4, 0xbf, 0x2b, 0x8f, 0x05, 0xd3,
- 0xa9, 0xc7, 0xbf, 0x0c, 0x40, 0xdb, 0x72, 0xc0, 0xff, 0xdf, 0x6e, 0x15, 0x23, 0x27, 0x70, 0x5f,
- 0x01, 0x88, 0xef, 0x72, 0x37, 0xdd, 0x4a, 0x1d, 0x12, 0x2b, 0xfb, 0xdb, 0x2e, 0x77, 0xd3, 0xed,
- 0xc0, 0xb0, 0x97, 0x30, 0xb1, 0xa8, 0x64, 0x5f, 0xc9, 0xed, 0x60, 0x67, 0x24, 0x74, 0x1f, 0xae,
- 0x93, 0x61, 0x44, 0x19, 0x1e, 0x8b, 0x39, 0x98, 0x31, 0xca, 0x64, 0xbf, 0xb5, 0xec, 0x05, 0xc5,
- 0xcd, 0x15, 0x76, 0x04, 0x4f, 0xa4, 0x57, 0x48, 0x23, 0x4d, 0xef, 0x17, 0x03, 0x7a, 0x8f, 0x39,
- 0x0d, 0x89, 0x67, 0x63, 0x11, 0x66, 0x21, 0xc9, 0x35, 0xe8, 0x0a, 0x30, 0x9d, 0x4c, 0xb4, 0x43,
- 0x03, 0x7f, 0xbc, 0xac, 0x6e, 0x82, 0xc0, 0x53, 0x47, 0xcb, 0xb7, 0x49, 0x03, 0x5f, 0xb6, 0xd1,
- 0x1a, 0x08, 0xd0, 0xd3, 0xf4, 0xd5, 0xda, 0xee, 0x44, 0xf8, 0xb4, 0xa0, 0x2f, 0x84, 0xa4, 0xbe,
- 0x42, 0xca, 0x66, 0x84, 0x4f, 0x85, 0xbe, 0xb5, 0x08, 0x37, 0xa7, 0xc4, 0x96, 0x46, 0xfe, 0x9b,
- 0x01, 0xf3, 0x8f, 0xe3, 0x98, 0x0c, 0xa3, 0xaf, 0x24, 0x66, 0x64, 0x41, 0x2f, 0x40, 0xdd, 0xa3,
- 0x49, 0xc4, 0x65, 0xb0, 0x75, 0x5b, 0x1d, 0x26, 0xc6, 0xa8, 0x52, 0x1a, 0xa3, 0x89, 0x41, 0xac,
- 0x96, 0x07, 0x51, 0x1b, 0xb4, 0x5a, 0x61, 0xd0, 0xde, 0x83, 0xb6, 0xb8, 0x4e, 0xc7, 0xc3, 0x11,
- 0xc7, 0x2c, 0x85, 0x59, 0x10, 0xa4, 0x2d, 0x49, 0xb1, 0x7e, 0x32, 0x60, 0xa1, 0x18, 0x69, 0xfa,
- 0x9e, 0x38, 0x17, 0xf5, 0x05, 0xcc, 0xb0, 0x20, 0x0d, 0x53, 0x7c, 0x8a, 0x81, 0x1d, 0x25, 0x87,
- 0x01, 0xf1, 0x1c, 0xc1, 0x50, 0xe1, 0x99, 0x8a, 0xf2, 0x8a, 0x05, 0xe3, 0xa4, 0x6b, 0x7a, 0xd2,
- 0x08, 0x6a, 0x6e, 0xc2, 0x8f, 0x32, 0xe4, 0x17, 0xdf, 0xd6, 0x7d, 0x98, 0x57, 0x4f, 0xbc, 0x62,
- 0xd5, 0x96, 0x01, 0x72, 0x2c, 0x8e, 0x7b, 0x86, 0x02, 0x84, 0x0c, 0x8c, 0x63, 0xeb, 0x73, 0x30,
- 0xf7, 0xa9, 0x2a, 0x44, 0x8c, 0xee, 0x81, 0x19, 0x64, 0x07, 0x29, 0xda, 0xde, 0x44, 0xe3, 0xa1,
- 0xca, 0xe4, 0xec, 0xb1, 0x90, 0xf5, 0x08, 0x5a, 0x19, 0x39, 0xcb, 0xcd, 0x38, 0x2f, 0xb7, 0xca,
- 0x44, 0x6e, 0xd6, 0x9f, 0x06, 0x2c, 0x14, 0x43, 0x4e, 0xcb, 0xf7, 0x0a, 0xba, 0xb9, 0x0b, 0x27,
- 0x74, 0x47, 0x69, 0x2c, 0xf7, 0xf4, 0x58, 0xca, 0x6a, 0x79, 0x80, 0xf1, 0x53, 0x77, 0xa4, 0x5a,
- 0xaa, 0x13, 0x68, 0xa4, 0xfe, 0x4b, 0x98, 0x2b, 0x89, 0x4c, 0x79, 0xdf, 0x7c, 0xa8, 0xbf, 0x6f,
- 0x0a, 0x6f, 0xb4, 0x5c, 0x5b, 0x7f, 0xf4, 0x3c, 0x84, 0x1b, 0x6a, 0xfe, 0xb6, 0xf2, 0xa6, 0xcb,
- 0x6a, 0x5f, 0xec, 0x4d, 0x63, 0xb2, 0x37, 0xad, 0x3e, 0xf4, 0xca, 0xaa, 0xe9, 0x14, 0x0c, 0x61,
- 0xee, 0x80, 0xbb, 0x9c, 0xc4, 0x9c, 0x78, 0xf9, 0x43, 0x7b, 0xa2, 0x99, 0x8d, 0xcb, 0xb6, 0x4a,
- 0x79, 0x1c, 0x66, 0xa1, 0xca, 0x79, 0xd6, 0x67, 0xe2, 0x53, 0xdc, 0x02, 0xd2, 0x3d, 0xa5, 0x77,
- 0xf0, 0x0e, 0x5c, 0x89, 0x7e, 0xe0, 0x94, 0xbb, 0x81, 0xda, 0xda, 0x35, 0xb9, 0xb5, 0x4d, 0x49,
- 0x91, 0x6b, 0x5b, 0x2d, 0x36, 0x5f, 0x71, 0xeb, 0x6a, 0xa7, 0x0b, 0x82, 0x64, 0x2e, 0x03, 0xc8,
- 0x91, 0x52, 0xd3, 0xd0, 0x50, 0xba, 0x82, 0xb2, 0x25, 0x08, 0xd6, 0x0a, 0x2c, 0x7d, 0x81, 0xb9,
- 0x78, 0x7f, 0xb0, 0x2d, 0x1a, 0x0d, 0xc8, 0x30, 0x61, 0xae, 0x76, 0x15, 0xd6, 0xcf, 0x06, 0x2c,
- 0x9f, 0x23, 0x90, 0x26, 0xdc, 0x83, 0x66, 0xe8, 0xc6, 0x1c, 0xb3, 0x6c, 0x4a, 0xb2, 0xe3, 0x64,
- 0x29, 0x2a, 0x97, 0x95, 0xa2, 0x5a, 0x2a, 0xc5, 0x35, 0x68, 0x84, 0xee, 0x99, 0x13, 0x1e, 0xa6,
- 0x0f, 0x8c, 0x7a, 0xe8, 0x9e, 0x3d, 0x3d, 0xdc, 0xfc, 0xbb, 0x09, 0x9d, 0x03, 0xec, 0x9e, 0x62,
- 0xec, 0xcb, 0xc0, 0xd0, 0x30, 0x1b, 0x88, 0xe2, 0xcf, 0x34, 0x74, 0x7b, 0xb2, 0xf3, 0xa7, 0xfe,
- 0x2e, 0xec, 0xdf, 0xb9, 0x4c, 0x2c, 0xed, 0xad, 0x19, 0xf4, 0x0c, 0xda, 0xda, 0xef, 0x20, 0xb4,
- 0xa4, 0x29, 0x96, 0x7e, 0xde, 0xf5, 0x97, 0xcf, 0xe1, 0x66, 0xd6, 0xee, 0x19, 0x68, 0x1f, 0xda,
- 0xda, 0x56, 0xd7, 0xed, 0x95, 0xdf, 0x11, 0xba, 0xbd, 0x69, 0x4f, 0x81, 0x19, 0x61, 0x4d, 0xdb,
- 0xd8, 0xba, 0xb5, 0xf2, 0x1b, 0x41, 0xb7, 0x36, 0x6d, 0xcd, 0x4b, 0x6b, 0xda, 0x82, 0xd4, 0xad,
- 0x95, 0xd7, 0xbf, 0x6e, 0x6d, 0xda, 0x56, 0x9d, 0x41, 0xdf, 0xc1, 0x5c, 0x69, 0x75, 0x21, 0x6b,
- 0xac, 0x75, 0xde, 0xce, 0xed, 0xaf, 0x5d, 0x28, 0x93, 0xdb, 0x7f, 0x0e, 0x1d, 0x7d, 0xa5, 0x20,
- 0x2d, 0xa0, 0x29, 0x4b, 0xb1, 0xbf, 0x72, 0x1e, 0x5b, 0x37, 0xa8, 0xa3, 0xa5, 0x6e, 0x70, 0xca,
- 0xbe, 0xd0, 0x0d, 0x4e, 0x03, 0x59, 0x6b, 0x06, 0x7d, 0x0b, 0xb3, 0x93, 0xa8, 0x85, 0x6e, 0x4d,
- 0x96, 0xad, 0x04, 0x86, 0x7d, 0xeb, 0x22, 0x91, 0xdc, 0xf8, 0x1e, 0xc0, 0x18, 0x8c, 0xd0, 0xe2,
- 0x58, 0xa7, 0x04, 0x86, 0xfd, 0xa5, 0xe9, 0xcc, 0xdc, 0xd4, 0xf7, 0x70, 0x6d, 0xea, 0xc4, 0x23,
- 0x6d, 0x4c, 0x2e, 0xc2, 0x8c, 0xfe, 0x07, 0x97, 0xca, 0x65, 0xbe, 0x9e, 0xac, 0xc0, 0x6c, 0xac,
- 0x06, 0x79, 0x10, 0x6f, 0x78, 0x01, 0xc1, 0x11, 0x7f, 0x02, 0x52, 0xe3, 0x05, 0xa3, 0x9c, 0x1e,
- 0x36, 0xe4, 0x3f, 0x3c, 0x9f, 0xfc, 0x17, 0x00, 0x00, 0xff, 0xff, 0x14, 0x43, 0x9d, 0xb9, 0xf0,
- 0x11, 0x00, 0x00,
-}
diff --git a/weed/pb/filer_pb/filer_client.go b/weed/pb/filer_pb/filer_client.go
new file mode 100644
index 000000000..65bd85c84
--- /dev/null
+++ b/weed/pb/filer_pb/filer_client.go
@@ -0,0 +1,299 @@
+package filer_pb
+
+import (
+ "context"
+ "errors"
+ "fmt"
+ "io"
+ "math"
+ "os"
+ "strings"
+ "time"
+
+ "github.com/chrislusf/seaweedfs/weed/glog"
+ "github.com/chrislusf/seaweedfs/weed/util"
+)
+
+var (
+ OS_UID = uint32(os.Getuid())
+ OS_GID = uint32(os.Getgid())
+)
+
+type FilerClient interface {
+ WithFilerClient(fn func(SeaweedFilerClient) error) error
+ AdjustedUrl(location *Location) string
+}
+
+func GetEntry(filerClient FilerClient, fullFilePath util.FullPath) (entry *Entry, err error) {
+
+ dir, name := fullFilePath.DirAndName()
+
+ err = filerClient.WithFilerClient(func(client SeaweedFilerClient) error {
+
+ request := &LookupDirectoryEntryRequest{
+ Directory: dir,
+ Name: name,
+ }
+
+ // glog.V(3).Infof("read %s request: %v", fullFilePath, request)
+ resp, err := LookupEntry(client, request)
+ if err != nil {
+ if err == ErrNotFound {
+ return nil
+ }
+ glog.V(3).Infof("read %s %v: %v", fullFilePath, resp, err)
+ return err
+ }
+
+ if resp.Entry == nil {
+ // glog.V(3).Infof("read %s entry: %v", fullFilePath, entry)
+ return nil
+ }
+
+ entry = resp.Entry
+ return nil
+ })
+
+ return
+}
+
+type EachEntryFunciton func(entry *Entry, isLast bool) error
+
+func ReadDirAllEntries(filerClient FilerClient, fullDirPath util.FullPath, prefix string, fn EachEntryFunciton) (err error) {
+
+ var counter uint32
+ var startFrom string
+ var counterFunc = func(entry *Entry, isLast bool) error {
+ counter++
+ startFrom = entry.Name
+ return fn(entry, isLast)
+ }
+
+ var paginationLimit uint32 = 10000
+
+ if err = doList(filerClient, fullDirPath, prefix, counterFunc, "", false, paginationLimit); err != nil {
+ return err
+ }
+
+ for counter == paginationLimit {
+ counter = 0
+ if err = doList(filerClient, fullDirPath, prefix, counterFunc, startFrom, false, paginationLimit); err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+func List(filerClient FilerClient, parentDirectoryPath, prefix string, fn EachEntryFunciton, startFrom string, inclusive bool, limit uint32) (err error) {
+ return filerClient.WithFilerClient(func(client SeaweedFilerClient) error {
+ return doSeaweedList(client, util.FullPath(parentDirectoryPath), prefix, fn, startFrom, inclusive, limit)
+ })
+}
+
+func doList(filerClient FilerClient, fullDirPath util.FullPath, prefix string, fn EachEntryFunciton, startFrom string, inclusive bool, limit uint32) (err error) {
+ return filerClient.WithFilerClient(func(client SeaweedFilerClient) error {
+ return doSeaweedList(client, fullDirPath, prefix, fn, startFrom, inclusive, limit)
+ })
+}
+
+func SeaweedList(client SeaweedFilerClient, parentDirectoryPath, prefix string, fn EachEntryFunciton, startFrom string, inclusive bool, limit uint32) (err error) {
+ return doSeaweedList(client, util.FullPath(parentDirectoryPath), prefix, fn, startFrom, inclusive, limit)
+}
+
+func doSeaweedList(client SeaweedFilerClient, fullDirPath util.FullPath, prefix string, fn EachEntryFunciton, startFrom string, inclusive bool, limit uint32) (err error) {
+ // Redundancy limit to make it correctly judge whether it is the last file.
+ redLimit := limit
+ if limit != math.MaxInt32 && limit != 0 {
+ redLimit = limit + 1
+ }
+ request := &ListEntriesRequest{
+ Directory: string(fullDirPath),
+ Prefix: prefix,
+ StartFromFileName: startFrom,
+ Limit: redLimit,
+ InclusiveStartFrom: inclusive,
+ }
+
+ glog.V(4).Infof("read directory: %v", request)
+ ctx, cancel := context.WithCancel(context.Background())
+ defer cancel()
+ stream, err := client.ListEntries(ctx, request)
+ if err != nil {
+ return fmt.Errorf("list %s: %v", fullDirPath, err)
+ }
+
+ var prevEntry *Entry
+ count := 0
+ for {
+ resp, recvErr := stream.Recv()
+ if recvErr != nil {
+ if recvErr == io.EOF {
+ if prevEntry != nil {
+ if err := fn(prevEntry, true); err != nil {
+ return err
+ }
+ }
+ break
+ } else {
+ return recvErr
+ }
+ }
+ if prevEntry != nil {
+ if err := fn(prevEntry, false); err != nil {
+ return err
+ }
+ }
+ prevEntry = resp.Entry
+ count++
+ if count > int(limit) && limit != 0 {
+ prevEntry = nil
+ }
+ }
+
+ return nil
+}
+
+func Exists(filerClient FilerClient, parentDirectoryPath string, entryName string, isDirectory bool) (exists bool, err error) {
+
+ err = filerClient.WithFilerClient(func(client SeaweedFilerClient) error {
+
+ request := &LookupDirectoryEntryRequest{
+ Directory: parentDirectoryPath,
+ Name: entryName,
+ }
+
+ glog.V(4).Infof("exists entry %v/%v: %v", parentDirectoryPath, entryName, request)
+ resp, err := LookupEntry(client, request)
+ if err != nil {
+ if err == ErrNotFound {
+ exists = false
+ return nil
+ }
+ glog.V(0).Infof("exists entry %v: %v", request, err)
+ return fmt.Errorf("exists entry %s/%s: %v", parentDirectoryPath, entryName, err)
+ }
+
+ exists = resp.Entry.IsDirectory == isDirectory
+
+ return nil
+ })
+
+ return
+}
+
+func Touch(filerClient FilerClient, parentDirectoryPath string, entryName string, entry *Entry) (err error) {
+
+ return filerClient.WithFilerClient(func(client SeaweedFilerClient) error {
+
+ request := &UpdateEntryRequest{
+ Directory: parentDirectoryPath,
+ Entry: entry,
+ }
+
+ glog.V(4).Infof("touch entry %v/%v: %v", parentDirectoryPath, entryName, request)
+ if err := UpdateEntry(client, request); err != nil {
+ glog.V(0).Infof("touch exists entry %v: %v", request, err)
+ return fmt.Errorf("touch exists entry %s/%s: %v", parentDirectoryPath, entryName, err)
+ }
+
+ return nil
+ })
+
+}
+
+func Mkdir(filerClient FilerClient, parentDirectoryPath string, dirName string, fn func(entry *Entry)) error {
+ return filerClient.WithFilerClient(func(client SeaweedFilerClient) error {
+
+ entry := &Entry{
+ Name: dirName,
+ IsDirectory: true,
+ Attributes: &FuseAttributes{
+ Mtime: time.Now().Unix(),
+ Crtime: time.Now().Unix(),
+ FileMode: uint32(0777 | os.ModeDir),
+ Uid: OS_UID,
+ Gid: OS_GID,
+ },
+ }
+
+ if fn != nil {
+ fn(entry)
+ }
+
+ request := &CreateEntryRequest{
+ Directory: parentDirectoryPath,
+ Entry: entry,
+ }
+
+ glog.V(1).Infof("mkdir: %v", request)
+ if err := CreateEntry(client, request); err != nil {
+ glog.V(0).Infof("mkdir %v: %v", request, err)
+ return fmt.Errorf("mkdir %s/%s: %v", parentDirectoryPath, dirName, err)
+ }
+
+ return nil
+ })
+}
+
+func MkFile(filerClient FilerClient, parentDirectoryPath string, fileName string, chunks []*FileChunk) error {
+ return filerClient.WithFilerClient(func(client SeaweedFilerClient) error {
+
+ entry := &Entry{
+ Name: fileName,
+ IsDirectory: false,
+ Attributes: &FuseAttributes{
+ Mtime: time.Now().Unix(),
+ Crtime: time.Now().Unix(),
+ FileMode: uint32(0770),
+ Uid: OS_UID,
+ Gid: OS_GID,
+ },
+ Chunks: chunks,
+ }
+
+ request := &CreateEntryRequest{
+ Directory: parentDirectoryPath,
+ Entry: entry,
+ }
+
+ glog.V(1).Infof("create file: %s/%s", parentDirectoryPath, fileName)
+ if err := CreateEntry(client, request); err != nil {
+ glog.V(0).Infof("create file %v:%v", request, err)
+ return fmt.Errorf("create file %s/%s: %v", parentDirectoryPath, fileName, err)
+ }
+
+ return nil
+ })
+}
+
+func Remove(filerClient FilerClient, parentDirectoryPath, name string, isDeleteData, isRecursive, ignoreRecursiveErr, isFromOtherCluster bool, signatures []int32) error {
+ return filerClient.WithFilerClient(func(client SeaweedFilerClient) error {
+
+ deleteEntryRequest := &DeleteEntryRequest{
+ Directory: parentDirectoryPath,
+ Name: name,
+ IsDeleteData: isDeleteData,
+ IsRecursive: isRecursive,
+ IgnoreRecursiveError: ignoreRecursiveErr,
+ IsFromOtherCluster: isFromOtherCluster,
+ Signatures: signatures,
+ }
+ if resp, err := client.DeleteEntry(context.Background(), deleteEntryRequest); err != nil {
+ if strings.Contains(err.Error(), ErrNotFound.Error()) {
+ return nil
+ }
+ return err
+ } else {
+ if resp.Error != "" {
+ if strings.Contains(resp.Error, ErrNotFound.Error()) {
+ return nil
+ }
+ return errors.New(resp.Error)
+ }
+ }
+
+ return nil
+
+ })
+}
diff --git a/weed/pb/filer_pb/filer_client_bfs.go b/weed/pb/filer_pb/filer_client_bfs.go
new file mode 100644
index 000000000..4e5b65f12
--- /dev/null
+++ b/weed/pb/filer_pb/filer_client_bfs.go
@@ -0,0 +1,63 @@
+package filer_pb
+
+import (
+ "fmt"
+ "sync"
+ "time"
+
+ "github.com/chrislusf/seaweedfs/weed/util"
+)
+
+func TraverseBfs(filerClient FilerClient, parentPath util.FullPath, fn func(parentPath util.FullPath, entry *Entry)) (err error) {
+
+ K := 5
+
+ var jobQueueWg sync.WaitGroup
+ queue := util.NewQueue()
+ jobQueueWg.Add(1)
+ queue.Enqueue(parentPath)
+ var isTerminating bool
+
+ for i := 0; i < K; i++ {
+ go func() {
+ for {
+ if isTerminating {
+ break
+ }
+ t := queue.Dequeue()
+ if t == nil {
+ time.Sleep(329 * time.Millisecond)
+ continue
+ }
+ dir := t.(util.FullPath)
+ processErr := processOneDirectory(filerClient, dir, queue, &jobQueueWg, fn)
+ if processErr != nil {
+ err = processErr
+ }
+ jobQueueWg.Done()
+ }
+ }()
+ }
+ jobQueueWg.Wait()
+ isTerminating = true
+ return
+}
+
+func processOneDirectory(filerClient FilerClient, parentPath util.FullPath, queue *util.Queue, jobQueueWg *sync.WaitGroup, fn func(parentPath util.FullPath, entry *Entry)) (err error) {
+
+ return ReadDirAllEntries(filerClient, parentPath, "", func(entry *Entry, isLast bool) error {
+
+ fn(parentPath, entry)
+
+ if entry.IsDirectory {
+ subDir := fmt.Sprintf("%s/%s", parentPath, entry.Name)
+ if parentPath == "/" {
+ subDir = "/" + entry.Name
+ }
+ jobQueueWg.Add(1)
+ queue.Enqueue(util.FullPath(subDir))
+ }
+ return nil
+ })
+
+}
diff --git a/weed/pb/filer_pb/filer_pb_helper.go b/weed/pb/filer_pb/filer_pb_helper.go
index 5c40332e6..b46385c8f 100644
--- a/weed/pb/filer_pb/filer_pb_helper.go
+++ b/weed/pb/filer_pb/filer_pb_helper.go
@@ -1,10 +1,18 @@
package filer_pb
import (
+ "context"
+ "errors"
+ "fmt"
+ "strings"
+
+ "github.com/chrislusf/seaweedfs/weed/glog"
"github.com/chrislusf/seaweedfs/weed/storage/needle"
+ "github.com/golang/protobuf/proto"
+ "github.com/viant/ptrie"
)
-func toFileIdObject(fileIdStr string) (*FileId, error) {
+func ToFileIdObject(fileIdStr string) (*FileId, error) {
t, err := needle.ParseFileIdFromString(fileIdStr)
if err != nil {
return nil, err
@@ -37,14 +45,14 @@ func BeforeEntrySerialization(chunks []*FileChunk) {
for _, chunk := range chunks {
if chunk.FileId != "" {
- if fid, err := toFileIdObject(chunk.FileId); err == nil {
+ if fid, err := ToFileIdObject(chunk.FileId); err == nil {
chunk.Fid = fid
chunk.FileId = ""
}
}
if chunk.SourceFileId != "" {
- if fid, err := toFileIdObject(chunk.SourceFileId); err == nil {
+ if fid, err := ToFileIdObject(chunk.SourceFileId); err == nil {
chunk.SourceFid = fid
chunk.SourceFileId = ""
}
@@ -53,6 +61,15 @@ func BeforeEntrySerialization(chunks []*FileChunk) {
}
}
+func EnsureFid(chunk *FileChunk) {
+ if chunk.Fid != nil {
+ return
+ }
+ if fid, err := ToFileIdObject(chunk.FileId); err == nil {
+ chunk.Fid = fid
+ }
+}
+
func AfterEntryDeserialization(chunks []*FileChunk) {
for _, chunk := range chunks {
@@ -67,3 +84,66 @@ func AfterEntryDeserialization(chunks []*FileChunk) {
}
}
+
+func CreateEntry(client SeaweedFilerClient, request *CreateEntryRequest) error {
+ resp, err := client.CreateEntry(context.Background(), request)
+ if err != nil {
+ glog.V(1).Infof("create entry %s/%s %v: %v", request.Directory, request.Entry.Name, request.OExcl, err)
+ return fmt.Errorf("CreateEntry: %v", err)
+ }
+ if resp.Error != "" {
+ glog.V(1).Infof("create entry %s/%s %v: %v", request.Directory, request.Entry.Name, request.OExcl, resp.Error)
+ return fmt.Errorf("CreateEntry : %v", resp.Error)
+ }
+ return nil
+}
+
+func UpdateEntry(client SeaweedFilerClient, request *UpdateEntryRequest) error {
+ _, err := client.UpdateEntry(context.Background(), request)
+ if err != nil {
+ glog.V(1).Infof("update entry %s/%s :%v", request.Directory, request.Entry.Name, err)
+ return fmt.Errorf("UpdateEntry: %v", err)
+ }
+ return nil
+}
+
+func LookupEntry(client SeaweedFilerClient, request *LookupDirectoryEntryRequest) (*LookupDirectoryEntryResponse, error) {
+ resp, err := client.LookupDirectoryEntry(context.Background(), request)
+ if err != nil {
+ if err == ErrNotFound || strings.Contains(err.Error(), ErrNotFound.Error()) {
+ return nil, ErrNotFound
+ }
+ glog.V(3).Infof("read %s/%v: %v", request.Directory, request.Name, err)
+ return nil, fmt.Errorf("LookupEntry1: %v", err)
+ }
+ if resp.Entry == nil {
+ return nil, ErrNotFound
+ }
+ return resp, nil
+}
+
+var ErrNotFound = errors.New("filer: no entry is found in filer store")
+
+func IsCreate(event *SubscribeMetadataResponse) bool {
+ return event.EventNotification.NewEntry != nil && event.EventNotification.OldEntry == nil
+}
+func IsUpdate(event *SubscribeMetadataResponse) bool {
+ return event.EventNotification.NewEntry != nil &&
+ event.EventNotification.OldEntry != nil &&
+ event.Directory == event.EventNotification.NewParentPath
+}
+func IsDelete(event *SubscribeMetadataResponse) bool {
+ return event.EventNotification.NewEntry == nil && event.EventNotification.OldEntry != nil
+}
+func IsRename(event *SubscribeMetadataResponse) bool {
+ return event.EventNotification.NewEntry != nil &&
+ event.EventNotification.OldEntry != nil &&
+ event.Directory != event.EventNotification.NewParentPath
+}
+
+var _ = ptrie.KeyProvider(&FilerConf_PathConf{})
+
+func (fp *FilerConf_PathConf) Key() interface{} {
+ key, _ := proto.Marshal(fp)
+ return string(key)
+}
diff --git a/weed/pb/filer_pb/filer_pb_helper_test.go b/weed/pb/filer_pb/filer_pb_helper_test.go
index d4468c011..0009afdbe 100644
--- a/weed/pb/filer_pb/filer_pb_helper_test.go
+++ b/weed/pb/filer_pb/filer_pb_helper_test.go
@@ -9,7 +9,7 @@ import (
func TestFileIdSize(t *testing.T) {
fileIdStr := "11745,0293434534cbb9892b"
- fid, _ := toFileIdObject(fileIdStr)
+ fid, _ := ToFileIdObject(fileIdStr)
bytes, _ := proto.Marshal(fid)
println(len(fileIdStr))
diff --git a/weed/pb/filer_pb/signature.go b/weed/pb/filer_pb/signature.go
new file mode 100644
index 000000000..e13afc656
--- /dev/null
+++ b/weed/pb/filer_pb/signature.go
@@ -0,0 +1,13 @@
+package filer_pb
+
+func (r *CreateEntryRequest) AddSignature(sig int32) {
+ r.Signatures = append(r.Signatures, sig)
+}
+func (r *CreateEntryRequest) HasSigned(sig int32) bool {
+ for _, s := range r.Signatures {
+ if s == sig {
+ return true
+ }
+ }
+ return false
+}
diff --git a/weed/pb/grpc_client_server.go b/weed/pb/grpc_client_server.go
new file mode 100644
index 000000000..9efcd9bdc
--- /dev/null
+++ b/weed/pb/grpc_client_server.go
@@ -0,0 +1,204 @@
+package pb
+
+import (
+ "context"
+ "fmt"
+ "github.com/chrislusf/seaweedfs/weed/glog"
+ "net/http"
+ "strconv"
+ "strings"
+ "sync"
+ "time"
+
+ "google.golang.org/grpc"
+ "google.golang.org/grpc/keepalive"
+
+ "github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
+ "github.com/chrislusf/seaweedfs/weed/pb/master_pb"
+ "github.com/chrislusf/seaweedfs/weed/pb/messaging_pb"
+)
+
+const (
+ Max_Message_Size = 1 << 30 // 1 GB
+)
+
+var (
+ // cache grpc connections
+ grpcClients = make(map[string]*grpc.ClientConn)
+ grpcClientsLock sync.Mutex
+)
+
+func init() {
+ http.DefaultTransport.(*http.Transport).MaxIdleConnsPerHost = 1024
+ http.DefaultTransport.(*http.Transport).MaxIdleConns = 1024
+}
+
+func NewGrpcServer(opts ...grpc.ServerOption) *grpc.Server {
+ var options []grpc.ServerOption
+ options = append(options,
+ grpc.KeepaliveParams(keepalive.ServerParameters{
+ Time: 10 * time.Second, // wait time before ping if no activity
+ Timeout: 20 * time.Second, // ping timeout
+ MaxConnectionAge: 10 * time.Hour,
+ }),
+ grpc.KeepaliveEnforcementPolicy(keepalive.EnforcementPolicy{
+ MinTime: 60 * time.Second, // min time a client should wait before sending a ping
+ PermitWithoutStream: false,
+ }),
+ grpc.MaxRecvMsgSize(Max_Message_Size),
+ grpc.MaxSendMsgSize(Max_Message_Size),
+ )
+ for _, opt := range opts {
+ if opt != nil {
+ options = append(options, opt)
+ }
+ }
+ return grpc.NewServer(options...)
+}
+
+func GrpcDial(ctx context.Context, address string, opts ...grpc.DialOption) (*grpc.ClientConn, error) {
+ // opts = append(opts, grpc.WithBlock())
+ // opts = append(opts, grpc.WithTimeout(time.Duration(5*time.Second)))
+ var options []grpc.DialOption
+ options = append(options,
+ // grpc.WithInsecure(),
+ grpc.WithDefaultCallOptions(
+ grpc.MaxCallSendMsgSize(Max_Message_Size),
+ grpc.MaxCallRecvMsgSize(Max_Message_Size),
+ ),
+ grpc.WithKeepaliveParams(keepalive.ClientParameters{
+ Time: 30 * time.Second, // client ping server if no activity for this long
+ Timeout: 20 * time.Second,
+ PermitWithoutStream: false,
+ }))
+ for _, opt := range opts {
+ if opt != nil {
+ options = append(options, opt)
+ }
+ }
+ return grpc.DialContext(ctx, address, options...)
+}
+
+func getOrCreateConnection(address string, opts ...grpc.DialOption) (*grpc.ClientConn, error) {
+
+ grpcClientsLock.Lock()
+ defer grpcClientsLock.Unlock()
+
+ existingConnection, found := grpcClients[address]
+ if found {
+ return existingConnection, nil
+ }
+
+ grpcConnection, err := GrpcDial(context.Background(), address, opts...)
+ if err != nil {
+ return nil, fmt.Errorf("fail to dial %s: %v", address, err)
+ }
+
+ grpcClients[address] = grpcConnection
+
+ return grpcConnection, nil
+}
+
+func WithCachedGrpcClient(fn func(*grpc.ClientConn) error, address string, opts ...grpc.DialOption) error {
+
+ grpcConnection, err := getOrCreateConnection(address, opts...)
+ if err != nil {
+ return fmt.Errorf("getOrCreateConnection %s: %v", address, err)
+ }
+ return fn(grpcConnection)
+}
+
+func ParseServerToGrpcAddress(server string) (serverGrpcAddress string, err error) {
+ return ParseServerAddress(server, 10000)
+}
+
+func ParseServerAddress(server string, deltaPort int) (newServerAddress string, err error) {
+
+ host, port, parseErr := hostAndPort(server)
+ if parseErr != nil {
+ return "", fmt.Errorf("server port parse error: %v", parseErr)
+ }
+
+ newPort := int(port) + deltaPort
+
+ return fmt.Sprintf("%s:%d", host, newPort), nil
+}
+
+func hostAndPort(address string) (host string, port uint64, err error) {
+ colonIndex := strings.LastIndex(address, ":")
+ if colonIndex < 0 {
+ return "", 0, fmt.Errorf("server should have hostname:port format: %v", address)
+ }
+ port, err = strconv.ParseUint(address[colonIndex+1:], 10, 64)
+ if err != nil {
+ return "", 0, fmt.Errorf("server port parse error: %v", err)
+ }
+
+ return address[:colonIndex], port, err
+}
+
+func ServerToGrpcAddress(server string) (serverGrpcAddress string) {
+
+ host, port, parseErr := hostAndPort(server)
+ if parseErr != nil {
+ glog.Fatalf("server address %s parse error: %v", server, parseErr)
+ }
+
+ grpcPort := int(port) + 10000
+
+ return fmt.Sprintf("%s:%d", host, grpcPort)
+}
+
+func GrpcAddressToServerAddress(grpcAddress string) (serverAddress string) {
+ host, grpcPort, parseErr := hostAndPort(grpcAddress)
+ if parseErr != nil {
+ glog.Fatalf("server grpc address %s parse error: %v", grpcAddress, parseErr)
+ }
+
+ port := int(grpcPort) - 10000
+
+ return fmt.Sprintf("%s:%d", host, port)
+}
+
+func WithMasterClient(master string, grpcDialOption grpc.DialOption, fn func(client master_pb.SeaweedClient) error) error {
+
+ masterGrpcAddress, parseErr := ParseServerToGrpcAddress(master)
+ if parseErr != nil {
+ return fmt.Errorf("failed to parse master grpc %v: %v", master, parseErr)
+ }
+
+ return WithCachedGrpcClient(func(grpcConnection *grpc.ClientConn) error {
+ client := master_pb.NewSeaweedClient(grpcConnection)
+ return fn(client)
+ }, masterGrpcAddress, grpcDialOption)
+
+}
+
+func WithBrokerGrpcClient(brokerGrpcAddress string, grpcDialOption grpc.DialOption, fn func(client messaging_pb.SeaweedMessagingClient) error) error {
+
+ return WithCachedGrpcClient(func(grpcConnection *grpc.ClientConn) error {
+ client := messaging_pb.NewSeaweedMessagingClient(grpcConnection)
+ return fn(client)
+ }, brokerGrpcAddress, grpcDialOption)
+
+}
+
+func WithFilerClient(filer string, grpcDialOption grpc.DialOption, fn func(client filer_pb.SeaweedFilerClient) error) error {
+
+ filerGrpcAddress, parseErr := ParseServerToGrpcAddress(filer)
+ if parseErr != nil {
+ return fmt.Errorf("failed to parse filer grpc %v: %v", filer, parseErr)
+ }
+
+ return WithGrpcFilerClient(filerGrpcAddress, grpcDialOption, fn)
+
+}
+
+func WithGrpcFilerClient(filerGrpcAddress string, grpcDialOption grpc.DialOption, fn func(client filer_pb.SeaweedFilerClient) error) error {
+
+ return WithCachedGrpcClient(func(grpcConnection *grpc.ClientConn) error {
+ client := filer_pb.NewSeaweedFilerClient(grpcConnection)
+ return fn(client)
+ }, filerGrpcAddress, grpcDialOption)
+
+}
diff --git a/weed/pb/iam.proto b/weed/pb/iam.proto
new file mode 100644
index 000000000..558bd2b70
--- /dev/null
+++ b/weed/pb/iam.proto
@@ -0,0 +1,51 @@
+syntax = "proto3";
+
+package iam_pb;
+
+option go_package = "github.com/chrislusf/seaweedfs/weed/pb/iam_pb";
+option java_package = "seaweedfs.client";
+option java_outer_classname = "IamProto";
+
+//////////////////////////////////////////////////
+
+service SeaweedIdentityAccessManagement {
+
+}
+
+//////////////////////////////////////////////////
+
+message S3ApiConfiguration {
+ repeated Identity identities = 1;
+}
+
+message Identity {
+ string name = 1;
+ repeated Credential credentials = 2;
+ repeated string actions = 3;
+}
+
+message Credential {
+ string access_key = 1;
+ string secret_key = 2;
+ // uint64 expiration = 3;
+ // bool is_disabled = 4;
+}
+
+/*
+message Policy {
+ repeated Statement statements = 1;
+}
+
+message Statement {
+ repeated Action action = 1;
+ repeated Resource resource = 2;
+}
+
+message Action {
+ string action = 1;
+}
+message Resource {
+ string bucket = 1;
+ // string path = 2;
+}
+*/
\ No newline at end of file
diff --git a/weed/pb/iam_pb/iam.pb.go b/weed/pb/iam_pb/iam.pb.go
new file mode 100644
index 000000000..7d0b6281b
--- /dev/null
+++ b/weed/pb/iam_pb/iam.pb.go
@@ -0,0 +1,356 @@
+// Code generated by protoc-gen-go. DO NOT EDIT.
+// versions:
+// protoc-gen-go v1.25.0
+// protoc v3.12.3
+// source: iam.proto
+
+package iam_pb
+
+import (
+ context "context"
+ proto "github.com/golang/protobuf/proto"
+ grpc "google.golang.org/grpc"
+ protoreflect "google.golang.org/protobuf/reflect/protoreflect"
+ protoimpl "google.golang.org/protobuf/runtime/protoimpl"
+ reflect "reflect"
+ sync "sync"
+)
+
+const (
+ // Verify that this generated code is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
+ // Verify that runtime/protoimpl is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
+)
+
+// This is a compile-time assertion that a sufficiently up-to-date version
+// of the legacy proto package is being used.
+const _ = proto.ProtoPackageIsVersion4
+
+type S3ApiConfiguration struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ Identities []*Identity `protobuf:"bytes,1,rep,name=identities,proto3" json:"identities,omitempty"`
+}
+
+func (x *S3ApiConfiguration) Reset() {
+ *x = S3ApiConfiguration{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_iam_proto_msgTypes[0]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *S3ApiConfiguration) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*S3ApiConfiguration) ProtoMessage() {}
+
+func (x *S3ApiConfiguration) ProtoReflect() protoreflect.Message {
+ mi := &file_iam_proto_msgTypes[0]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use S3ApiConfiguration.ProtoReflect.Descriptor instead.
+func (*S3ApiConfiguration) Descriptor() ([]byte, []int) {
+ return file_iam_proto_rawDescGZIP(), []int{0}
+}
+
+func (x *S3ApiConfiguration) GetIdentities() []*Identity {
+ if x != nil {
+ return x.Identities
+ }
+ return nil
+}
+
+type Identity struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
+ Credentials []*Credential `protobuf:"bytes,2,rep,name=credentials,proto3" json:"credentials,omitempty"`
+ Actions []string `protobuf:"bytes,3,rep,name=actions,proto3" json:"actions,omitempty"`
+}
+
+func (x *Identity) Reset() {
+ *x = Identity{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_iam_proto_msgTypes[1]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *Identity) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*Identity) ProtoMessage() {}
+
+func (x *Identity) ProtoReflect() protoreflect.Message {
+ mi := &file_iam_proto_msgTypes[1]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use Identity.ProtoReflect.Descriptor instead.
+func (*Identity) Descriptor() ([]byte, []int) {
+ return file_iam_proto_rawDescGZIP(), []int{1}
+}
+
+func (x *Identity) GetName() string {
+ if x != nil {
+ return x.Name
+ }
+ return ""
+}
+
+func (x *Identity) GetCredentials() []*Credential {
+ if x != nil {
+ return x.Credentials
+ }
+ return nil
+}
+
+func (x *Identity) GetActions() []string {
+ if x != nil {
+ return x.Actions
+ }
+ return nil
+}
+
+type Credential struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ AccessKey string `protobuf:"bytes,1,opt,name=access_key,json=accessKey,proto3" json:"access_key,omitempty"`
+ SecretKey string `protobuf:"bytes,2,opt,name=secret_key,json=secretKey,proto3" json:"secret_key,omitempty"`
+}
+
+func (x *Credential) Reset() {
+ *x = Credential{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_iam_proto_msgTypes[2]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *Credential) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*Credential) ProtoMessage() {}
+
+func (x *Credential) ProtoReflect() protoreflect.Message {
+ mi := &file_iam_proto_msgTypes[2]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use Credential.ProtoReflect.Descriptor instead.
+func (*Credential) Descriptor() ([]byte, []int) {
+ return file_iam_proto_rawDescGZIP(), []int{2}
+}
+
+func (x *Credential) GetAccessKey() string {
+ if x != nil {
+ return x.AccessKey
+ }
+ return ""
+}
+
+func (x *Credential) GetSecretKey() string {
+ if x != nil {
+ return x.SecretKey
+ }
+ return ""
+}
+
+var File_iam_proto protoreflect.FileDescriptor
+
+var file_iam_proto_rawDesc = []byte{
+ 0x0a, 0x09, 0x69, 0x61, 0x6d, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x06, 0x69, 0x61, 0x6d,
+ 0x5f, 0x70, 0x62, 0x22, 0x46, 0x0a, 0x12, 0x53, 0x33, 0x41, 0x70, 0x69, 0x43, 0x6f, 0x6e, 0x66,
+ 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x30, 0x0a, 0x0a, 0x69, 0x64, 0x65,
+ 0x6e, 0x74, 0x69, 0x74, 0x69, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x10, 0x2e,
+ 0x69, 0x61, 0x6d, 0x5f, 0x70, 0x62, 0x2e, 0x49, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x52,
+ 0x0a, 0x69, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x69, 0x65, 0x73, 0x22, 0x6e, 0x0a, 0x08, 0x49,
+ 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18,
+ 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x34, 0x0a, 0x0b, 0x63,
+ 0x72, 0x65, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x61, 0x6c, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b,
+ 0x32, 0x12, 0x2e, 0x69, 0x61, 0x6d, 0x5f, 0x70, 0x62, 0x2e, 0x43, 0x72, 0x65, 0x64, 0x65, 0x6e,
+ 0x74, 0x69, 0x61, 0x6c, 0x52, 0x0b, 0x63, 0x72, 0x65, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x61, 0x6c,
+ 0x73, 0x12, 0x18, 0x0a, 0x07, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x03, 0x20, 0x03,
+ 0x28, 0x09, 0x52, 0x07, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x22, 0x4a, 0x0a, 0x0a, 0x43,
+ 0x72, 0x65, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x61, 0x6c, 0x12, 0x1d, 0x0a, 0x0a, 0x61, 0x63, 0x63,
+ 0x65, 0x73, 0x73, 0x5f, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x61,
+ 0x63, 0x63, 0x65, 0x73, 0x73, 0x4b, 0x65, 0x79, 0x12, 0x1d, 0x0a, 0x0a, 0x73, 0x65, 0x63, 0x72,
+ 0x65, 0x74, 0x5f, 0x6b, 0x65, 0x79, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x73, 0x65,
+ 0x63, 0x72, 0x65, 0x74, 0x4b, 0x65, 0x79, 0x32, 0x21, 0x0a, 0x1f, 0x53, 0x65, 0x61, 0x77, 0x65,
+ 0x65, 0x64, 0x49, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x41, 0x63, 0x63, 0x65, 0x73, 0x73,
+ 0x4d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x42, 0x4b, 0x0a, 0x10, 0x73, 0x65,
+ 0x61, 0x77, 0x65, 0x65, 0x64, 0x66, 0x73, 0x2e, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x42, 0x08,
+ 0x49, 0x61, 0x6d, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x5a, 0x2d, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62,
+ 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x63, 0x68, 0x72, 0x69, 0x73, 0x6c, 0x75, 0x73, 0x66, 0x2f, 0x73,
+ 0x65, 0x61, 0x77, 0x65, 0x65, 0x64, 0x66, 0x73, 0x2f, 0x77, 0x65, 0x65, 0x64, 0x2f, 0x70, 0x62,
+ 0x2f, 0x69, 0x61, 0x6d, 0x5f, 0x70, 0x62, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
+}
+
+var (
+ file_iam_proto_rawDescOnce sync.Once
+ file_iam_proto_rawDescData = file_iam_proto_rawDesc
+)
+
+func file_iam_proto_rawDescGZIP() []byte {
+ file_iam_proto_rawDescOnce.Do(func() {
+ file_iam_proto_rawDescData = protoimpl.X.CompressGZIP(file_iam_proto_rawDescData)
+ })
+ return file_iam_proto_rawDescData
+}
+
+var file_iam_proto_msgTypes = make([]protoimpl.MessageInfo, 3)
+var file_iam_proto_goTypes = []interface{}{
+ (*S3ApiConfiguration)(nil), // 0: iam_pb.S3ApiConfiguration
+ (*Identity)(nil), // 1: iam_pb.Identity
+ (*Credential)(nil), // 2: iam_pb.Credential
+}
+var file_iam_proto_depIdxs = []int32{
+ 1, // 0: iam_pb.S3ApiConfiguration.identities:type_name -> iam_pb.Identity
+ 2, // 1: iam_pb.Identity.credentials:type_name -> iam_pb.Credential
+ 2, // [2:2] is the sub-list for method output_type
+ 2, // [2:2] is the sub-list for method input_type
+ 2, // [2:2] is the sub-list for extension type_name
+ 2, // [2:2] is the sub-list for extension extendee
+ 0, // [0:2] is the sub-list for field type_name
+}
+
+func init() { file_iam_proto_init() }
+func file_iam_proto_init() {
+ if File_iam_proto != nil {
+ return
+ }
+ if !protoimpl.UnsafeEnabled {
+ file_iam_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*S3ApiConfiguration); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_iam_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*Identity); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_iam_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*Credential); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ }
+ type x struct{}
+ out := protoimpl.TypeBuilder{
+ File: protoimpl.DescBuilder{
+ GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
+ RawDescriptor: file_iam_proto_rawDesc,
+ NumEnums: 0,
+ NumMessages: 3,
+ NumExtensions: 0,
+ NumServices: 1,
+ },
+ GoTypes: file_iam_proto_goTypes,
+ DependencyIndexes: file_iam_proto_depIdxs,
+ MessageInfos: file_iam_proto_msgTypes,
+ }.Build()
+ File_iam_proto = out.File
+ file_iam_proto_rawDesc = nil
+ file_iam_proto_goTypes = nil
+ file_iam_proto_depIdxs = nil
+}
+
+// Reference imports to suppress errors if they are not otherwise used.
+var _ context.Context
+var _ grpc.ClientConnInterface
+
+// This is a compile-time assertion to ensure that this generated file
+// is compatible with the grpc package it is being compiled against.
+const _ = grpc.SupportPackageIsVersion6
+
+// SeaweedIdentityAccessManagementClient is the client API for SeaweedIdentityAccessManagement service.
+//
+// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream.
+type SeaweedIdentityAccessManagementClient interface {
+}
+
+type seaweedIdentityAccessManagementClient struct {
+ cc grpc.ClientConnInterface
+}
+
+func NewSeaweedIdentityAccessManagementClient(cc grpc.ClientConnInterface) SeaweedIdentityAccessManagementClient {
+ return &seaweedIdentityAccessManagementClient{cc}
+}
+
+// SeaweedIdentityAccessManagementServer is the server API for SeaweedIdentityAccessManagement service.
+type SeaweedIdentityAccessManagementServer interface {
+}
+
+// UnimplementedSeaweedIdentityAccessManagementServer can be embedded to have forward compatible implementations.
+type UnimplementedSeaweedIdentityAccessManagementServer struct {
+}
+
+func RegisterSeaweedIdentityAccessManagementServer(s *grpc.Server, srv SeaweedIdentityAccessManagementServer) {
+ s.RegisterService(&_SeaweedIdentityAccessManagement_serviceDesc, srv)
+}
+
+var _SeaweedIdentityAccessManagement_serviceDesc = grpc.ServiceDesc{
+ ServiceName: "iam_pb.SeaweedIdentityAccessManagement",
+ HandlerType: (*SeaweedIdentityAccessManagementServer)(nil),
+ Methods: []grpc.MethodDesc{},
+ Streams: []grpc.StreamDesc{},
+ Metadata: "iam.proto",
+}
diff --git a/weed/pb/master.proto b/weed/pb/master.proto
index 9b1e884c7..cdb49d1e3 100644
--- a/weed/pb/master.proto
+++ b/weed/pb/master.proto
@@ -2,6 +2,8 @@ syntax = "proto3";
package master_pb;
+option go_package = "github.com/chrislusf/seaweedfs/weed/pb/master_pb";
+
//////////////////////////////////////////////////
service Seaweed {
@@ -23,8 +25,17 @@ service Seaweed {
}
rpc LookupEcVolume (LookupEcVolumeRequest) returns (LookupEcVolumeResponse) {
}
+ rpc VacuumVolume (VacuumVolumeRequest) returns (VacuumVolumeResponse) {
+ }
rpc GetMasterConfiguration (GetMasterConfigurationRequest) returns (GetMasterConfigurationResponse) {
}
+ rpc ListMasterClients (ListMasterClientsRequest) returns (ListMasterClientsResponse) {
+ }
+ rpc LeaseAdminToken (LeaseAdminTokenRequest) returns (LeaseAdminTokenResponse) {
+ }
+ rpc ReleaseAdminToken (ReleaseAdminTokenRequest) returns (ReleaseAdminTokenResponse) {
+ }
+
}
//////////////////////////////////////////////////
@@ -33,7 +44,6 @@ message Heartbeat {
string ip = 1;
uint32 port = 2;
string public_url = 3;
- uint32 max_volume_count = 4;
uint64 max_file_key = 5;
string data_center = 6;
string rack = 7;
@@ -51,6 +61,8 @@ message Heartbeat {
repeated VolumeEcShardInformationMessage deleted_ec_shards = 18;
bool has_no_ec_shards = 19;
+ map max_volume_counts = 4;
+
}
message HeartbeatResponse {
@@ -76,6 +88,7 @@ message VolumeInformationMessage {
int64 modified_at_second = 12;
string remote_storage_name = 13;
string remote_storage_key = 14;
+ string disk_type = 15;
}
message VolumeShortInformationMessage {
@@ -84,12 +97,14 @@ message VolumeShortInformationMessage {
uint32 replica_placement = 8;
uint32 version = 9;
uint32 ttl = 10;
+ string disk_type = 15;
}
message VolumeEcShardInformationMessage {
uint32 id = 1;
string collection = 2;
uint32 ec_index_bits = 3;
+ string disk_type = 4;
}
message StorageBackend {
@@ -112,6 +127,7 @@ message SuperBlockExtra {
message KeepConnectedRequest {
string name = 1;
+ uint32 grpc_port = 2;
}
message VolumeLocation {
@@ -120,6 +136,7 @@ message VolumeLocation {
repeated uint32 new_vids = 3;
repeated uint32 deleted_vids = 4;
string leader = 5; // optional when leader is not itself
+ string data_center = 6; // optional when DataCenter is in use
}
message LookupVolumeRequest {
@@ -150,6 +167,7 @@ message AssignRequest {
string data_node = 7;
uint32 memory_map_max_size_mb = 8;
uint32 Writable_volume_count = 9;
+ string disk_type = 10;
}
message AssignResponse {
string fid = 1;
@@ -164,11 +182,9 @@ message StatisticsRequest {
string replication = 1;
string collection = 2;
string ttl = 3;
+ string disk_type = 4;
}
message StatisticsResponse {
- string replication = 1;
- string collection = 2;
- string ttl = 3;
uint64 total_size = 4;
uint64 used_size = 5;
uint64 file_count = 6;
@@ -177,11 +193,6 @@ message StatisticsResponse {
//
// collection related
//
-
-message StorageType {
- string replication = 1;
- string ttl = 2;
-}
message Collection {
string name = 1;
}
@@ -202,8 +213,8 @@ message CollectionDeleteResponse {
//
// volume related
//
-message DataNodeInfo {
- string id = 1;
+message DiskInfo {
+ string type = 1;
uint64 volume_count = 2;
uint64 max_volume_count = 3;
uint64 free_volume_count = 4;
@@ -212,32 +223,24 @@ message DataNodeInfo {
repeated VolumeEcShardInformationMessage ec_shard_infos = 7;
uint64 remote_volume_count = 8;
}
+message DataNodeInfo {
+ string id = 1;
+ map diskInfos = 2;
+}
message RackInfo {
string id = 1;
- uint64 volume_count = 2;
- uint64 max_volume_count = 3;
- uint64 free_volume_count = 4;
- uint64 active_volume_count = 5;
- repeated DataNodeInfo data_node_infos = 6;
- uint64 remote_volume_count = 7;
+ repeated DataNodeInfo data_node_infos = 2;
+ map diskInfos = 3;
}
message DataCenterInfo {
string id = 1;
- uint64 volume_count = 2;
- uint64 max_volume_count = 3;
- uint64 free_volume_count = 4;
- uint64 active_volume_count = 5;
- repeated RackInfo rack_infos = 6;
- uint64 remote_volume_count = 7;
+ repeated RackInfo rack_infos = 2;
+ map diskInfos = 3;
}
message TopologyInfo {
string id = 1;
- uint64 volume_count = 2;
- uint64 max_volume_count = 3;
- uint64 free_volume_count = 4;
- uint64 active_volume_count = 5;
- repeated DataCenterInfo data_center_infos = 6;
- uint64 remote_volume_count = 7;
+ repeated DataCenterInfo data_center_infos = 2;
+ map diskInfos = 3;
}
message VolumeListRequest {
}
@@ -258,9 +261,44 @@ message LookupEcVolumeResponse {
repeated EcShardIdLocation shard_id_locations = 2;
}
+message VacuumVolumeRequest {
+ float garbage_threshold = 1;
+}
+message VacuumVolumeResponse {
+}
+
message GetMasterConfigurationRequest {
}
message GetMasterConfigurationResponse {
string metrics_address = 1;
uint32 metrics_interval_seconds = 2;
+ repeated StorageBackend storage_backends = 3;
+ string default_replication = 4;
+ string leader = 5;
+}
+
+message ListMasterClientsRequest {
+ string client_type = 1;
+}
+message ListMasterClientsResponse {
+ repeated string grpc_addresses = 1;
+}
+
+message LeaseAdminTokenRequest {
+ int64 previous_token = 1;
+ int64 previous_lock_time = 2;
+ string lock_name = 3;
+ string client_name = 4;
+}
+message LeaseAdminTokenResponse {
+ int64 token = 1;
+ int64 lock_ts_ns = 2;
+}
+
+message ReleaseAdminTokenRequest {
+ int64 previous_token = 1;
+ int64 previous_lock_time = 2;
+ string lock_name = 3;
+}
+message ReleaseAdminTokenResponse {
}
diff --git a/weed/pb/master_pb/master.pb.go b/weed/pb/master_pb/master.pb.go
index ea4362c92..29d8499f8 100644
--- a/weed/pb/master_pb/master.pb.go
+++ b/weed/pb/master_pb/master.pb.go
@@ -1,1485 +1,4088 @@
-// Code generated by protoc-gen-go.
+// Code generated by protoc-gen-go. DO NOT EDIT.
+// versions:
+// protoc-gen-go v1.25.0
+// protoc v3.12.3
// source: master.proto
-// DO NOT EDIT!
-
-/*
-Package master_pb is a generated protocol buffer package.
-
-It is generated from these files:
- master.proto
-
-It has these top-level messages:
- Heartbeat
- HeartbeatResponse
- VolumeInformationMessage
- VolumeShortInformationMessage
- VolumeEcShardInformationMessage
- StorageBackend
- Empty
- SuperBlockExtra
- KeepConnectedRequest
- VolumeLocation
- LookupVolumeRequest
- LookupVolumeResponse
- Location
- AssignRequest
- AssignResponse
- StatisticsRequest
- StatisticsResponse
- StorageType
- Collection
- CollectionListRequest
- CollectionListResponse
- CollectionDeleteRequest
- CollectionDeleteResponse
- DataNodeInfo
- RackInfo
- DataCenterInfo
- TopologyInfo
- VolumeListRequest
- VolumeListResponse
- LookupEcVolumeRequest
- LookupEcVolumeResponse
- GetMasterConfigurationRequest
- GetMasterConfigurationResponse
-*/
-package master_pb
-import proto "github.com/golang/protobuf/proto"
-import fmt "fmt"
-import math "math"
+package master_pb
import (
- context "golang.org/x/net/context"
+ context "context"
+ proto "github.com/golang/protobuf/proto"
grpc "google.golang.org/grpc"
+ codes "google.golang.org/grpc/codes"
+ status "google.golang.org/grpc/status"
+ protoreflect "google.golang.org/protobuf/reflect/protoreflect"
+ protoimpl "google.golang.org/protobuf/runtime/protoimpl"
+ reflect "reflect"
+ sync "sync"
)
-// Reference imports to suppress errors if they are not otherwise used.
-var _ = proto.Marshal
-var _ = fmt.Errorf
-var _ = math.Inf
+const (
+ // Verify that this generated code is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
+ // Verify that runtime/protoimpl is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
+)
-// This is a compile-time assertion to ensure that this generated file
-// is compatible with the proto package it is being compiled against.
-// A compilation error at this line likely means your copy of the
-// proto package needs to be updated.
-const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package
+// This is a compile-time assertion that a sufficiently up-to-date version
+// of the legacy proto package is being used.
+const _ = proto.ProtoPackageIsVersion4
type Heartbeat struct {
- Ip string `protobuf:"bytes,1,opt,name=ip" json:"ip,omitempty"`
- Port uint32 `protobuf:"varint,2,opt,name=port" json:"port,omitempty"`
- PublicUrl string `protobuf:"bytes,3,opt,name=public_url,json=publicUrl" json:"public_url,omitempty"`
- MaxVolumeCount uint32 `protobuf:"varint,4,opt,name=max_volume_count,json=maxVolumeCount" json:"max_volume_count,omitempty"`
- MaxFileKey uint64 `protobuf:"varint,5,opt,name=max_file_key,json=maxFileKey" json:"max_file_key,omitempty"`
- DataCenter string `protobuf:"bytes,6,opt,name=data_center,json=dataCenter" json:"data_center,omitempty"`
- Rack string `protobuf:"bytes,7,opt,name=rack" json:"rack,omitempty"`
- AdminPort uint32 `protobuf:"varint,8,opt,name=admin_port,json=adminPort" json:"admin_port,omitempty"`
- Volumes []*VolumeInformationMessage `protobuf:"bytes,9,rep,name=volumes" json:"volumes,omitempty"`
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ Ip string `protobuf:"bytes,1,opt,name=ip,proto3" json:"ip,omitempty"`
+ Port uint32 `protobuf:"varint,2,opt,name=port,proto3" json:"port,omitempty"`
+ PublicUrl string `protobuf:"bytes,3,opt,name=public_url,json=publicUrl,proto3" json:"public_url,omitempty"`
+ MaxFileKey uint64 `protobuf:"varint,5,opt,name=max_file_key,json=maxFileKey,proto3" json:"max_file_key,omitempty"`
+ DataCenter string `protobuf:"bytes,6,opt,name=data_center,json=dataCenter,proto3" json:"data_center,omitempty"`
+ Rack string `protobuf:"bytes,7,opt,name=rack,proto3" json:"rack,omitempty"`
+ AdminPort uint32 `protobuf:"varint,8,opt,name=admin_port,json=adminPort,proto3" json:"admin_port,omitempty"`
+ Volumes []*VolumeInformationMessage `protobuf:"bytes,9,rep,name=volumes,proto3" json:"volumes,omitempty"`
// delta volumes
- NewVolumes []*VolumeShortInformationMessage `protobuf:"bytes,10,rep,name=new_volumes,json=newVolumes" json:"new_volumes,omitempty"`
- DeletedVolumes []*VolumeShortInformationMessage `protobuf:"bytes,11,rep,name=deleted_volumes,json=deletedVolumes" json:"deleted_volumes,omitempty"`
- HasNoVolumes bool `protobuf:"varint,12,opt,name=has_no_volumes,json=hasNoVolumes" json:"has_no_volumes,omitempty"`
+ NewVolumes []*VolumeShortInformationMessage `protobuf:"bytes,10,rep,name=new_volumes,json=newVolumes,proto3" json:"new_volumes,omitempty"`
+ DeletedVolumes []*VolumeShortInformationMessage `protobuf:"bytes,11,rep,name=deleted_volumes,json=deletedVolumes,proto3" json:"deleted_volumes,omitempty"`
+ HasNoVolumes bool `protobuf:"varint,12,opt,name=has_no_volumes,json=hasNoVolumes,proto3" json:"has_no_volumes,omitempty"`
// erasure coding
- EcShards []*VolumeEcShardInformationMessage `protobuf:"bytes,16,rep,name=ec_shards,json=ecShards" json:"ec_shards,omitempty"`
+ EcShards []*VolumeEcShardInformationMessage `protobuf:"bytes,16,rep,name=ec_shards,json=ecShards,proto3" json:"ec_shards,omitempty"`
// delta erasure coding shards
- NewEcShards []*VolumeEcShardInformationMessage `protobuf:"bytes,17,rep,name=new_ec_shards,json=newEcShards" json:"new_ec_shards,omitempty"`
- DeletedEcShards []*VolumeEcShardInformationMessage `protobuf:"bytes,18,rep,name=deleted_ec_shards,json=deletedEcShards" json:"deleted_ec_shards,omitempty"`
- HasNoEcShards bool `protobuf:"varint,19,opt,name=has_no_ec_shards,json=hasNoEcShards" json:"has_no_ec_shards,omitempty"`
+ NewEcShards []*VolumeEcShardInformationMessage `protobuf:"bytes,17,rep,name=new_ec_shards,json=newEcShards,proto3" json:"new_ec_shards,omitempty"`
+ DeletedEcShards []*VolumeEcShardInformationMessage `protobuf:"bytes,18,rep,name=deleted_ec_shards,json=deletedEcShards,proto3" json:"deleted_ec_shards,omitempty"`
+ HasNoEcShards bool `protobuf:"varint,19,opt,name=has_no_ec_shards,json=hasNoEcShards,proto3" json:"has_no_ec_shards,omitempty"`
+ MaxVolumeCounts map[string]uint32 `protobuf:"bytes,4,rep,name=max_volume_counts,json=maxVolumeCounts,proto3" json:"max_volume_counts,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"varint,2,opt,name=value,proto3"`
}
-func (m *Heartbeat) Reset() { *m = Heartbeat{} }
-func (m *Heartbeat) String() string { return proto.CompactTextString(m) }
-func (*Heartbeat) ProtoMessage() {}
-func (*Heartbeat) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0} }
-
-func (m *Heartbeat) GetIp() string {
- if m != nil {
- return m.Ip
+func (x *Heartbeat) Reset() {
+ *x = Heartbeat{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_master_proto_msgTypes[0]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
- return ""
}
-func (m *Heartbeat) GetPort() uint32 {
- if m != nil {
- return m.Port
+func (x *Heartbeat) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*Heartbeat) ProtoMessage() {}
+
+func (x *Heartbeat) ProtoReflect() protoreflect.Message {
+ mi := &file_master_proto_msgTypes[0]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
}
- return 0
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use Heartbeat.ProtoReflect.Descriptor instead.
+func (*Heartbeat) Descriptor() ([]byte, []int) {
+ return file_master_proto_rawDescGZIP(), []int{0}
}
-func (m *Heartbeat) GetPublicUrl() string {
- if m != nil {
- return m.PublicUrl
+func (x *Heartbeat) GetIp() string {
+ if x != nil {
+ return x.Ip
}
return ""
}
-func (m *Heartbeat) GetMaxVolumeCount() uint32 {
- if m != nil {
- return m.MaxVolumeCount
+func (x *Heartbeat) GetPort() uint32 {
+ if x != nil {
+ return x.Port
}
return 0
}
-func (m *Heartbeat) GetMaxFileKey() uint64 {
- if m != nil {
- return m.MaxFileKey
+func (x *Heartbeat) GetPublicUrl() string {
+ if x != nil {
+ return x.PublicUrl
+ }
+ return ""
+}
+
+func (x *Heartbeat) GetMaxFileKey() uint64 {
+ if x != nil {
+ return x.MaxFileKey
}
return 0
}
-func (m *Heartbeat) GetDataCenter() string {
- if m != nil {
- return m.DataCenter
+func (x *Heartbeat) GetDataCenter() string {
+ if x != nil {
+ return x.DataCenter
}
return ""
}
-func (m *Heartbeat) GetRack() string {
- if m != nil {
- return m.Rack
+func (x *Heartbeat) GetRack() string {
+ if x != nil {
+ return x.Rack
}
return ""
}
-func (m *Heartbeat) GetAdminPort() uint32 {
- if m != nil {
- return m.AdminPort
+func (x *Heartbeat) GetAdminPort() uint32 {
+ if x != nil {
+ return x.AdminPort
}
return 0
}
-func (m *Heartbeat) GetVolumes() []*VolumeInformationMessage {
- if m != nil {
- return m.Volumes
+func (x *Heartbeat) GetVolumes() []*VolumeInformationMessage {
+ if x != nil {
+ return x.Volumes
}
return nil
}
-func (m *Heartbeat) GetNewVolumes() []*VolumeShortInformationMessage {
- if m != nil {
- return m.NewVolumes
+func (x *Heartbeat) GetNewVolumes() []*VolumeShortInformationMessage {
+ if x != nil {
+ return x.NewVolumes
}
return nil
}
-func (m *Heartbeat) GetDeletedVolumes() []*VolumeShortInformationMessage {
- if m != nil {
- return m.DeletedVolumes
+func (x *Heartbeat) GetDeletedVolumes() []*VolumeShortInformationMessage {
+ if x != nil {
+ return x.DeletedVolumes
}
return nil
}
-func (m *Heartbeat) GetHasNoVolumes() bool {
- if m != nil {
- return m.HasNoVolumes
+func (x *Heartbeat) GetHasNoVolumes() bool {
+ if x != nil {
+ return x.HasNoVolumes
}
return false
}
-func (m *Heartbeat) GetEcShards() []*VolumeEcShardInformationMessage {
- if m != nil {
- return m.EcShards
+func (x *Heartbeat) GetEcShards() []*VolumeEcShardInformationMessage {
+ if x != nil {
+ return x.EcShards
}
return nil
}
-func (m *Heartbeat) GetNewEcShards() []*VolumeEcShardInformationMessage {
- if m != nil {
- return m.NewEcShards
+func (x *Heartbeat) GetNewEcShards() []*VolumeEcShardInformationMessage {
+ if x != nil {
+ return x.NewEcShards
}
return nil
}
-func (m *Heartbeat) GetDeletedEcShards() []*VolumeEcShardInformationMessage {
- if m != nil {
- return m.DeletedEcShards
+func (x *Heartbeat) GetDeletedEcShards() []*VolumeEcShardInformationMessage {
+ if x != nil {
+ return x.DeletedEcShards
}
return nil
}
-func (m *Heartbeat) GetHasNoEcShards() bool {
- if m != nil {
- return m.HasNoEcShards
+func (x *Heartbeat) GetHasNoEcShards() bool {
+ if x != nil {
+ return x.HasNoEcShards
}
return false
}
+func (x *Heartbeat) GetMaxVolumeCounts() map[string]uint32 {
+ if x != nil {
+ return x.MaxVolumeCounts
+ }
+ return nil
+}
+
type HeartbeatResponse struct {
- VolumeSizeLimit uint64 `protobuf:"varint,1,opt,name=volume_size_limit,json=volumeSizeLimit" json:"volume_size_limit,omitempty"`
- Leader string `protobuf:"bytes,2,opt,name=leader" json:"leader,omitempty"`
- MetricsAddress string `protobuf:"bytes,3,opt,name=metrics_address,json=metricsAddress" json:"metrics_address,omitempty"`
- MetricsIntervalSeconds uint32 `protobuf:"varint,4,opt,name=metrics_interval_seconds,json=metricsIntervalSeconds" json:"metrics_interval_seconds,omitempty"`
- StorageBackends []*StorageBackend `protobuf:"bytes,5,rep,name=storage_backends,json=storageBackends" json:"storage_backends,omitempty"`
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ VolumeSizeLimit uint64 `protobuf:"varint,1,opt,name=volume_size_limit,json=volumeSizeLimit,proto3" json:"volume_size_limit,omitempty"`
+ Leader string `protobuf:"bytes,2,opt,name=leader,proto3" json:"leader,omitempty"`
+ MetricsAddress string `protobuf:"bytes,3,opt,name=metrics_address,json=metricsAddress,proto3" json:"metrics_address,omitempty"`
+ MetricsIntervalSeconds uint32 `protobuf:"varint,4,opt,name=metrics_interval_seconds,json=metricsIntervalSeconds,proto3" json:"metrics_interval_seconds,omitempty"`
+ StorageBackends []*StorageBackend `protobuf:"bytes,5,rep,name=storage_backends,json=storageBackends,proto3" json:"storage_backends,omitempty"`
+}
+
+func (x *HeartbeatResponse) Reset() {
+ *x = HeartbeatResponse{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_master_proto_msgTypes[1]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *HeartbeatResponse) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*HeartbeatResponse) ProtoMessage() {}
+
+func (x *HeartbeatResponse) ProtoReflect() protoreflect.Message {
+ mi := &file_master_proto_msgTypes[1]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
}
-func (m *HeartbeatResponse) Reset() { *m = HeartbeatResponse{} }
-func (m *HeartbeatResponse) String() string { return proto.CompactTextString(m) }
-func (*HeartbeatResponse) ProtoMessage() {}
-func (*HeartbeatResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{1} }
+// Deprecated: Use HeartbeatResponse.ProtoReflect.Descriptor instead.
+func (*HeartbeatResponse) Descriptor() ([]byte, []int) {
+ return file_master_proto_rawDescGZIP(), []int{1}
+}
-func (m *HeartbeatResponse) GetVolumeSizeLimit() uint64 {
- if m != nil {
- return m.VolumeSizeLimit
+func (x *HeartbeatResponse) GetVolumeSizeLimit() uint64 {
+ if x != nil {
+ return x.VolumeSizeLimit
}
return 0
}
-func (m *HeartbeatResponse) GetLeader() string {
- if m != nil {
- return m.Leader
+func (x *HeartbeatResponse) GetLeader() string {
+ if x != nil {
+ return x.Leader
}
return ""
}
-func (m *HeartbeatResponse) GetMetricsAddress() string {
- if m != nil {
- return m.MetricsAddress
+func (x *HeartbeatResponse) GetMetricsAddress() string {
+ if x != nil {
+ return x.MetricsAddress
}
return ""
}
-func (m *HeartbeatResponse) GetMetricsIntervalSeconds() uint32 {
- if m != nil {
- return m.MetricsIntervalSeconds
+func (x *HeartbeatResponse) GetMetricsIntervalSeconds() uint32 {
+ if x != nil {
+ return x.MetricsIntervalSeconds
}
return 0
}
-func (m *HeartbeatResponse) GetStorageBackends() []*StorageBackend {
- if m != nil {
- return m.StorageBackends
+func (x *HeartbeatResponse) GetStorageBackends() []*StorageBackend {
+ if x != nil {
+ return x.StorageBackends
}
return nil
}
type VolumeInformationMessage struct {
- Id uint32 `protobuf:"varint,1,opt,name=id" json:"id,omitempty"`
- Size uint64 `protobuf:"varint,2,opt,name=size" json:"size,omitempty"`
- Collection string `protobuf:"bytes,3,opt,name=collection" json:"collection,omitempty"`
- FileCount uint64 `protobuf:"varint,4,opt,name=file_count,json=fileCount" json:"file_count,omitempty"`
- DeleteCount uint64 `protobuf:"varint,5,opt,name=delete_count,json=deleteCount" json:"delete_count,omitempty"`
- DeletedByteCount uint64 `protobuf:"varint,6,opt,name=deleted_byte_count,json=deletedByteCount" json:"deleted_byte_count,omitempty"`
- ReadOnly bool `protobuf:"varint,7,opt,name=read_only,json=readOnly" json:"read_only,omitempty"`
- ReplicaPlacement uint32 `protobuf:"varint,8,opt,name=replica_placement,json=replicaPlacement" json:"replica_placement,omitempty"`
- Version uint32 `protobuf:"varint,9,opt,name=version" json:"version,omitempty"`
- Ttl uint32 `protobuf:"varint,10,opt,name=ttl" json:"ttl,omitempty"`
- CompactRevision uint32 `protobuf:"varint,11,opt,name=compact_revision,json=compactRevision" json:"compact_revision,omitempty"`
- ModifiedAtSecond int64 `protobuf:"varint,12,opt,name=modified_at_second,json=modifiedAtSecond" json:"modified_at_second,omitempty"`
- RemoteStorageName string `protobuf:"bytes,13,opt,name=remote_storage_name,json=remoteStorageName" json:"remote_storage_name,omitempty"`
- RemoteStorageKey string `protobuf:"bytes,14,opt,name=remote_storage_key,json=remoteStorageKey" json:"remote_storage_key,omitempty"`
-}
-
-func (m *VolumeInformationMessage) Reset() { *m = VolumeInformationMessage{} }
-func (m *VolumeInformationMessage) String() string { return proto.CompactTextString(m) }
-func (*VolumeInformationMessage) ProtoMessage() {}
-func (*VolumeInformationMessage) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{2} }
-
-func (m *VolumeInformationMessage) GetId() uint32 {
- if m != nil {
- return m.Id
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ Id uint32 `protobuf:"varint,1,opt,name=id,proto3" json:"id,omitempty"`
+ Size uint64 `protobuf:"varint,2,opt,name=size,proto3" json:"size,omitempty"`
+ Collection string `protobuf:"bytes,3,opt,name=collection,proto3" json:"collection,omitempty"`
+ FileCount uint64 `protobuf:"varint,4,opt,name=file_count,json=fileCount,proto3" json:"file_count,omitempty"`
+ DeleteCount uint64 `protobuf:"varint,5,opt,name=delete_count,json=deleteCount,proto3" json:"delete_count,omitempty"`
+ DeletedByteCount uint64 `protobuf:"varint,6,opt,name=deleted_byte_count,json=deletedByteCount,proto3" json:"deleted_byte_count,omitempty"`
+ ReadOnly bool `protobuf:"varint,7,opt,name=read_only,json=readOnly,proto3" json:"read_only,omitempty"`
+ ReplicaPlacement uint32 `protobuf:"varint,8,opt,name=replica_placement,json=replicaPlacement,proto3" json:"replica_placement,omitempty"`
+ Version uint32 `protobuf:"varint,9,opt,name=version,proto3" json:"version,omitempty"`
+ Ttl uint32 `protobuf:"varint,10,opt,name=ttl,proto3" json:"ttl,omitempty"`
+ CompactRevision uint32 `protobuf:"varint,11,opt,name=compact_revision,json=compactRevision,proto3" json:"compact_revision,omitempty"`
+ ModifiedAtSecond int64 `protobuf:"varint,12,opt,name=modified_at_second,json=modifiedAtSecond,proto3" json:"modified_at_second,omitempty"`
+ RemoteStorageName string `protobuf:"bytes,13,opt,name=remote_storage_name,json=remoteStorageName,proto3" json:"remote_storage_name,omitempty"`
+ RemoteStorageKey string `protobuf:"bytes,14,opt,name=remote_storage_key,json=remoteStorageKey,proto3" json:"remote_storage_key,omitempty"`
+ DiskType string `protobuf:"bytes,15,opt,name=disk_type,json=diskType,proto3" json:"disk_type,omitempty"`
+}
+
+func (x *VolumeInformationMessage) Reset() {
+ *x = VolumeInformationMessage{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_master_proto_msgTypes[2]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *VolumeInformationMessage) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*VolumeInformationMessage) ProtoMessage() {}
+
+func (x *VolumeInformationMessage) ProtoReflect() protoreflect.Message {
+ mi := &file_master_proto_msgTypes[2]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use VolumeInformationMessage.ProtoReflect.Descriptor instead.
+func (*VolumeInformationMessage) Descriptor() ([]byte, []int) {
+ return file_master_proto_rawDescGZIP(), []int{2}
+}
+
+func (x *VolumeInformationMessage) GetId() uint32 {
+ if x != nil {
+ return x.Id
}
return 0
}
-func (m *VolumeInformationMessage) GetSize() uint64 {
- if m != nil {
- return m.Size
+func (x *VolumeInformationMessage) GetSize() uint64 {
+ if x != nil {
+ return x.Size
}
return 0
}
-func (m *VolumeInformationMessage) GetCollection() string {
- if m != nil {
- return m.Collection
+func (x *VolumeInformationMessage) GetCollection() string {
+ if x != nil {
+ return x.Collection
}
return ""
}
-func (m *VolumeInformationMessage) GetFileCount() uint64 {
- if m != nil {
- return m.FileCount
+func (x *VolumeInformationMessage) GetFileCount() uint64 {
+ if x != nil {
+ return x.FileCount
}
return 0
}
-func (m *VolumeInformationMessage) GetDeleteCount() uint64 {
- if m != nil {
- return m.DeleteCount
+func (x *VolumeInformationMessage) GetDeleteCount() uint64 {
+ if x != nil {
+ return x.DeleteCount
}
return 0
}
-func (m *VolumeInformationMessage) GetDeletedByteCount() uint64 {
- if m != nil {
- return m.DeletedByteCount
+func (x *VolumeInformationMessage) GetDeletedByteCount() uint64 {
+ if x != nil {
+ return x.DeletedByteCount
}
return 0
}
-func (m *VolumeInformationMessage) GetReadOnly() bool {
- if m != nil {
- return m.ReadOnly
+func (x *VolumeInformationMessage) GetReadOnly() bool {
+ if x != nil {
+ return x.ReadOnly
}
return false
}
-func (m *VolumeInformationMessage) GetReplicaPlacement() uint32 {
- if m != nil {
- return m.ReplicaPlacement
+func (x *VolumeInformationMessage) GetReplicaPlacement() uint32 {
+ if x != nil {
+ return x.ReplicaPlacement
}
return 0
}
-func (m *VolumeInformationMessage) GetVersion() uint32 {
- if m != nil {
- return m.Version
+func (x *VolumeInformationMessage) GetVersion() uint32 {
+ if x != nil {
+ return x.Version
}
return 0
}
-func (m *VolumeInformationMessage) GetTtl() uint32 {
- if m != nil {
- return m.Ttl
+func (x *VolumeInformationMessage) GetTtl() uint32 {
+ if x != nil {
+ return x.Ttl
}
return 0
}
-func (m *VolumeInformationMessage) GetCompactRevision() uint32 {
- if m != nil {
- return m.CompactRevision
+func (x *VolumeInformationMessage) GetCompactRevision() uint32 {
+ if x != nil {
+ return x.CompactRevision
}
return 0
}
-func (m *VolumeInformationMessage) GetModifiedAtSecond() int64 {
- if m != nil {
- return m.ModifiedAtSecond
+func (x *VolumeInformationMessage) GetModifiedAtSecond() int64 {
+ if x != nil {
+ return x.ModifiedAtSecond
}
return 0
}
-func (m *VolumeInformationMessage) GetRemoteStorageName() string {
- if m != nil {
- return m.RemoteStorageName
+func (x *VolumeInformationMessage) GetRemoteStorageName() string {
+ if x != nil {
+ return x.RemoteStorageName
+ }
+ return ""
+}
+
+func (x *VolumeInformationMessage) GetRemoteStorageKey() string {
+ if x != nil {
+ return x.RemoteStorageKey
}
return ""
}
-func (m *VolumeInformationMessage) GetRemoteStorageKey() string {
- if m != nil {
- return m.RemoteStorageKey
+func (x *VolumeInformationMessage) GetDiskType() string {
+ if x != nil {
+ return x.DiskType
}
return ""
}
type VolumeShortInformationMessage struct {
- Id uint32 `protobuf:"varint,1,opt,name=id" json:"id,omitempty"`
- Collection string `protobuf:"bytes,3,opt,name=collection" json:"collection,omitempty"`
- ReplicaPlacement uint32 `protobuf:"varint,8,opt,name=replica_placement,json=replicaPlacement" json:"replica_placement,omitempty"`
- Version uint32 `protobuf:"varint,9,opt,name=version" json:"version,omitempty"`
- Ttl uint32 `protobuf:"varint,10,opt,name=ttl" json:"ttl,omitempty"`
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ Id uint32 `protobuf:"varint,1,opt,name=id,proto3" json:"id,omitempty"`
+ Collection string `protobuf:"bytes,3,opt,name=collection,proto3" json:"collection,omitempty"`
+ ReplicaPlacement uint32 `protobuf:"varint,8,opt,name=replica_placement,json=replicaPlacement,proto3" json:"replica_placement,omitempty"`
+ Version uint32 `protobuf:"varint,9,opt,name=version,proto3" json:"version,omitempty"`
+ Ttl uint32 `protobuf:"varint,10,opt,name=ttl,proto3" json:"ttl,omitempty"`
+ DiskType string `protobuf:"bytes,15,opt,name=disk_type,json=diskType,proto3" json:"disk_type,omitempty"`
+}
+
+func (x *VolumeShortInformationMessage) Reset() {
+ *x = VolumeShortInformationMessage{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_master_proto_msgTypes[3]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *VolumeShortInformationMessage) String() string {
+ return protoimpl.X.MessageStringOf(x)
}
-func (m *VolumeShortInformationMessage) Reset() { *m = VolumeShortInformationMessage{} }
-func (m *VolumeShortInformationMessage) String() string { return proto.CompactTextString(m) }
-func (*VolumeShortInformationMessage) ProtoMessage() {}
-func (*VolumeShortInformationMessage) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{3} }
+func (*VolumeShortInformationMessage) ProtoMessage() {}
-func (m *VolumeShortInformationMessage) GetId() uint32 {
- if m != nil {
- return m.Id
+func (x *VolumeShortInformationMessage) ProtoReflect() protoreflect.Message {
+ mi := &file_master_proto_msgTypes[3]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use VolumeShortInformationMessage.ProtoReflect.Descriptor instead.
+func (*VolumeShortInformationMessage) Descriptor() ([]byte, []int) {
+ return file_master_proto_rawDescGZIP(), []int{3}
+}
+
+func (x *VolumeShortInformationMessage) GetId() uint32 {
+ if x != nil {
+ return x.Id
}
return 0
}
-func (m *VolumeShortInformationMessage) GetCollection() string {
- if m != nil {
- return m.Collection
+func (x *VolumeShortInformationMessage) GetCollection() string {
+ if x != nil {
+ return x.Collection
}
return ""
}
-func (m *VolumeShortInformationMessage) GetReplicaPlacement() uint32 {
- if m != nil {
- return m.ReplicaPlacement
+func (x *VolumeShortInformationMessage) GetReplicaPlacement() uint32 {
+ if x != nil {
+ return x.ReplicaPlacement
}
return 0
}
-func (m *VolumeShortInformationMessage) GetVersion() uint32 {
- if m != nil {
- return m.Version
+func (x *VolumeShortInformationMessage) GetVersion() uint32 {
+ if x != nil {
+ return x.Version
}
return 0
}
-func (m *VolumeShortInformationMessage) GetTtl() uint32 {
- if m != nil {
- return m.Ttl
+func (x *VolumeShortInformationMessage) GetTtl() uint32 {
+ if x != nil {
+ return x.Ttl
}
return 0
}
+func (x *VolumeShortInformationMessage) GetDiskType() string {
+ if x != nil {
+ return x.DiskType
+ }
+ return ""
+}
+
type VolumeEcShardInformationMessage struct {
- Id uint32 `protobuf:"varint,1,opt,name=id" json:"id,omitempty"`
- Collection string `protobuf:"bytes,2,opt,name=collection" json:"collection,omitempty"`
- EcIndexBits uint32 `protobuf:"varint,3,opt,name=ec_index_bits,json=ecIndexBits" json:"ec_index_bits,omitempty"`
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ Id uint32 `protobuf:"varint,1,opt,name=id,proto3" json:"id,omitempty"`
+ Collection string `protobuf:"bytes,2,opt,name=collection,proto3" json:"collection,omitempty"`
+ EcIndexBits uint32 `protobuf:"varint,3,opt,name=ec_index_bits,json=ecIndexBits,proto3" json:"ec_index_bits,omitempty"`
+ DiskType string `protobuf:"bytes,4,opt,name=disk_type,json=diskType,proto3" json:"disk_type,omitempty"`
+}
+
+func (x *VolumeEcShardInformationMessage) Reset() {
+ *x = VolumeEcShardInformationMessage{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_master_proto_msgTypes[4]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *VolumeEcShardInformationMessage) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*VolumeEcShardInformationMessage) ProtoMessage() {}
+
+func (x *VolumeEcShardInformationMessage) ProtoReflect() protoreflect.Message {
+ mi := &file_master_proto_msgTypes[4]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
}
-func (m *VolumeEcShardInformationMessage) Reset() { *m = VolumeEcShardInformationMessage{} }
-func (m *VolumeEcShardInformationMessage) String() string { return proto.CompactTextString(m) }
-func (*VolumeEcShardInformationMessage) ProtoMessage() {}
-func (*VolumeEcShardInformationMessage) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{4} }
+// Deprecated: Use VolumeEcShardInformationMessage.ProtoReflect.Descriptor instead.
+func (*VolumeEcShardInformationMessage) Descriptor() ([]byte, []int) {
+ return file_master_proto_rawDescGZIP(), []int{4}
+}
-func (m *VolumeEcShardInformationMessage) GetId() uint32 {
- if m != nil {
- return m.Id
+func (x *VolumeEcShardInformationMessage) GetId() uint32 {
+ if x != nil {
+ return x.Id
}
return 0
}
-func (m *VolumeEcShardInformationMessage) GetCollection() string {
- if m != nil {
- return m.Collection
+func (x *VolumeEcShardInformationMessage) GetCollection() string {
+ if x != nil {
+ return x.Collection
}
return ""
}
-func (m *VolumeEcShardInformationMessage) GetEcIndexBits() uint32 {
- if m != nil {
- return m.EcIndexBits
+func (x *VolumeEcShardInformationMessage) GetEcIndexBits() uint32 {
+ if x != nil {
+ return x.EcIndexBits
}
return 0
}
+func (x *VolumeEcShardInformationMessage) GetDiskType() string {
+ if x != nil {
+ return x.DiskType
+ }
+ return ""
+}
+
type StorageBackend struct {
- Type string `protobuf:"bytes,1,opt,name=type" json:"type,omitempty"`
- Id string `protobuf:"bytes,2,opt,name=id" json:"id,omitempty"`
- Properties map[string]string `protobuf:"bytes,3,rep,name=properties" json:"properties,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"`
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ Type string `protobuf:"bytes,1,opt,name=type,proto3" json:"type,omitempty"`
+ Id string `protobuf:"bytes,2,opt,name=id,proto3" json:"id,omitempty"`
+ Properties map[string]string `protobuf:"bytes,3,rep,name=properties,proto3" json:"properties,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"`
+}
+
+func (x *StorageBackend) Reset() {
+ *x = StorageBackend{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_master_proto_msgTypes[5]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *StorageBackend) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*StorageBackend) ProtoMessage() {}
+
+func (x *StorageBackend) ProtoReflect() protoreflect.Message {
+ mi := &file_master_proto_msgTypes[5]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
}
-func (m *StorageBackend) Reset() { *m = StorageBackend{} }
-func (m *StorageBackend) String() string { return proto.CompactTextString(m) }
-func (*StorageBackend) ProtoMessage() {}
-func (*StorageBackend) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{5} }
+// Deprecated: Use StorageBackend.ProtoReflect.Descriptor instead.
+func (*StorageBackend) Descriptor() ([]byte, []int) {
+ return file_master_proto_rawDescGZIP(), []int{5}
+}
-func (m *StorageBackend) GetType() string {
- if m != nil {
- return m.Type
+func (x *StorageBackend) GetType() string {
+ if x != nil {
+ return x.Type
}
return ""
}
-func (m *StorageBackend) GetId() string {
- if m != nil {
- return m.Id
+func (x *StorageBackend) GetId() string {
+ if x != nil {
+ return x.Id
}
return ""
}
-func (m *StorageBackend) GetProperties() map[string]string {
- if m != nil {
- return m.Properties
+func (x *StorageBackend) GetProperties() map[string]string {
+ if x != nil {
+ return x.Properties
}
return nil
}
type Empty struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
}
-func (m *Empty) Reset() { *m = Empty{} }
-func (m *Empty) String() string { return proto.CompactTextString(m) }
-func (*Empty) ProtoMessage() {}
-func (*Empty) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{6} }
+func (x *Empty) Reset() {
+ *x = Empty{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_master_proto_msgTypes[6]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
-type SuperBlockExtra struct {
- ErasureCoding *SuperBlockExtra_ErasureCoding `protobuf:"bytes,1,opt,name=erasure_coding,json=erasureCoding" json:"erasure_coding,omitempty"`
+func (x *Empty) String() string {
+ return protoimpl.X.MessageStringOf(x)
}
-func (m *SuperBlockExtra) Reset() { *m = SuperBlockExtra{} }
-func (m *SuperBlockExtra) String() string { return proto.CompactTextString(m) }
-func (*SuperBlockExtra) ProtoMessage() {}
-func (*SuperBlockExtra) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{7} }
+func (*Empty) ProtoMessage() {}
-func (m *SuperBlockExtra) GetErasureCoding() *SuperBlockExtra_ErasureCoding {
- if m != nil {
- return m.ErasureCoding
+func (x *Empty) ProtoReflect() protoreflect.Message {
+ mi := &file_master_proto_msgTypes[6]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
}
- return nil
+ return mi.MessageOf(x)
}
-type SuperBlockExtra_ErasureCoding struct {
- Data uint32 `protobuf:"varint,1,opt,name=data" json:"data,omitempty"`
- Parity uint32 `protobuf:"varint,2,opt,name=parity" json:"parity,omitempty"`
- VolumeIds []uint32 `protobuf:"varint,3,rep,packed,name=volume_ids,json=volumeIds" json:"volume_ids,omitempty"`
+// Deprecated: Use Empty.ProtoReflect.Descriptor instead.
+func (*Empty) Descriptor() ([]byte, []int) {
+ return file_master_proto_rawDescGZIP(), []int{6}
}
-func (m *SuperBlockExtra_ErasureCoding) Reset() { *m = SuperBlockExtra_ErasureCoding{} }
-func (m *SuperBlockExtra_ErasureCoding) String() string { return proto.CompactTextString(m) }
-func (*SuperBlockExtra_ErasureCoding) ProtoMessage() {}
-func (*SuperBlockExtra_ErasureCoding) Descriptor() ([]byte, []int) {
- return fileDescriptor0, []int{7, 0}
+type SuperBlockExtra struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ ErasureCoding *SuperBlockExtra_ErasureCoding `protobuf:"bytes,1,opt,name=erasure_coding,json=erasureCoding,proto3" json:"erasure_coding,omitempty"`
}
-func (m *SuperBlockExtra_ErasureCoding) GetData() uint32 {
- if m != nil {
- return m.Data
+func (x *SuperBlockExtra) Reset() {
+ *x = SuperBlockExtra{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_master_proto_msgTypes[7]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
- return 0
}
-func (m *SuperBlockExtra_ErasureCoding) GetParity() uint32 {
- if m != nil {
- return m.Parity
+func (x *SuperBlockExtra) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*SuperBlockExtra) ProtoMessage() {}
+
+func (x *SuperBlockExtra) ProtoReflect() protoreflect.Message {
+ mi := &file_master_proto_msgTypes[7]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
}
- return 0
+ return mi.MessageOf(x)
}
-func (m *SuperBlockExtra_ErasureCoding) GetVolumeIds() []uint32 {
- if m != nil {
- return m.VolumeIds
+// Deprecated: Use SuperBlockExtra.ProtoReflect.Descriptor instead.
+func (*SuperBlockExtra) Descriptor() ([]byte, []int) {
+ return file_master_proto_rawDescGZIP(), []int{7}
+}
+
+func (x *SuperBlockExtra) GetErasureCoding() *SuperBlockExtra_ErasureCoding {
+ if x != nil {
+ return x.ErasureCoding
}
return nil
}
type KeepConnectedRequest struct {
- Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"`
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
+ GrpcPort uint32 `protobuf:"varint,2,opt,name=grpc_port,json=grpcPort,proto3" json:"grpc_port,omitempty"`
+}
+
+func (x *KeepConnectedRequest) Reset() {
+ *x = KeepConnectedRequest{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_master_proto_msgTypes[8]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *KeepConnectedRequest) String() string {
+ return protoimpl.X.MessageStringOf(x)
}
-func (m *KeepConnectedRequest) Reset() { *m = KeepConnectedRequest{} }
-func (m *KeepConnectedRequest) String() string { return proto.CompactTextString(m) }
-func (*KeepConnectedRequest) ProtoMessage() {}
-func (*KeepConnectedRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{8} }
+func (*KeepConnectedRequest) ProtoMessage() {}
-func (m *KeepConnectedRequest) GetName() string {
- if m != nil {
- return m.Name
+func (x *KeepConnectedRequest) ProtoReflect() protoreflect.Message {
+ mi := &file_master_proto_msgTypes[8]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use KeepConnectedRequest.ProtoReflect.Descriptor instead.
+func (*KeepConnectedRequest) Descriptor() ([]byte, []int) {
+ return file_master_proto_rawDescGZIP(), []int{8}
+}
+
+func (x *KeepConnectedRequest) GetName() string {
+ if x != nil {
+ return x.Name
}
return ""
}
+func (x *KeepConnectedRequest) GetGrpcPort() uint32 {
+ if x != nil {
+ return x.GrpcPort
+ }
+ return 0
+}
+
type VolumeLocation struct {
- Url string `protobuf:"bytes,1,opt,name=url" json:"url,omitempty"`
- PublicUrl string `protobuf:"bytes,2,opt,name=public_url,json=publicUrl" json:"public_url,omitempty"`
- NewVids []uint32 `protobuf:"varint,3,rep,packed,name=new_vids,json=newVids" json:"new_vids,omitempty"`
- DeletedVids []uint32 `protobuf:"varint,4,rep,packed,name=deleted_vids,json=deletedVids" json:"deleted_vids,omitempty"`
- Leader string `protobuf:"bytes,5,opt,name=leader" json:"leader,omitempty"`
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ Url string `protobuf:"bytes,1,opt,name=url,proto3" json:"url,omitempty"`
+ PublicUrl string `protobuf:"bytes,2,opt,name=public_url,json=publicUrl,proto3" json:"public_url,omitempty"`
+ NewVids []uint32 `protobuf:"varint,3,rep,packed,name=new_vids,json=newVids,proto3" json:"new_vids,omitempty"`
+ DeletedVids []uint32 `protobuf:"varint,4,rep,packed,name=deleted_vids,json=deletedVids,proto3" json:"deleted_vids,omitempty"`
+ Leader string `protobuf:"bytes,5,opt,name=leader,proto3" json:"leader,omitempty"` // optional when leader is not itself
+ DataCenter string `protobuf:"bytes,6,opt,name=data_center,json=dataCenter,proto3" json:"data_center,omitempty"` // optional when DataCenter is in use
+}
+
+func (x *VolumeLocation) Reset() {
+ *x = VolumeLocation{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_master_proto_msgTypes[9]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *VolumeLocation) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*VolumeLocation) ProtoMessage() {}
+
+func (x *VolumeLocation) ProtoReflect() protoreflect.Message {
+ mi := &file_master_proto_msgTypes[9]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
}
-func (m *VolumeLocation) Reset() { *m = VolumeLocation{} }
-func (m *VolumeLocation) String() string { return proto.CompactTextString(m) }
-func (*VolumeLocation) ProtoMessage() {}
-func (*VolumeLocation) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{9} }
+// Deprecated: Use VolumeLocation.ProtoReflect.Descriptor instead.
+func (*VolumeLocation) Descriptor() ([]byte, []int) {
+ return file_master_proto_rawDescGZIP(), []int{9}
+}
-func (m *VolumeLocation) GetUrl() string {
- if m != nil {
- return m.Url
+func (x *VolumeLocation) GetUrl() string {
+ if x != nil {
+ return x.Url
}
return ""
}
-func (m *VolumeLocation) GetPublicUrl() string {
- if m != nil {
- return m.PublicUrl
+func (x *VolumeLocation) GetPublicUrl() string {
+ if x != nil {
+ return x.PublicUrl
}
return ""
}
-func (m *VolumeLocation) GetNewVids() []uint32 {
- if m != nil {
- return m.NewVids
+func (x *VolumeLocation) GetNewVids() []uint32 {
+ if x != nil {
+ return x.NewVids
}
return nil
}
-func (m *VolumeLocation) GetDeletedVids() []uint32 {
- if m != nil {
- return m.DeletedVids
+func (x *VolumeLocation) GetDeletedVids() []uint32 {
+ if x != nil {
+ return x.DeletedVids
}
return nil
}
-func (m *VolumeLocation) GetLeader() string {
- if m != nil {
- return m.Leader
+func (x *VolumeLocation) GetLeader() string {
+ if x != nil {
+ return x.Leader
+ }
+ return ""
+}
+
+func (x *VolumeLocation) GetDataCenter() string {
+ if x != nil {
+ return x.DataCenter
}
return ""
}
type LookupVolumeRequest struct {
- VolumeIds []string `protobuf:"bytes,1,rep,name=volume_ids,json=volumeIds" json:"volume_ids,omitempty"`
- Collection string `protobuf:"bytes,2,opt,name=collection" json:"collection,omitempty"`
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ VolumeIds []string `protobuf:"bytes,1,rep,name=volume_ids,json=volumeIds,proto3" json:"volume_ids,omitempty"`
+ Collection string `protobuf:"bytes,2,opt,name=collection,proto3" json:"collection,omitempty"` // optional, a bit faster if provided.
+}
+
+func (x *LookupVolumeRequest) Reset() {
+ *x = LookupVolumeRequest{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_master_proto_msgTypes[10]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *LookupVolumeRequest) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*LookupVolumeRequest) ProtoMessage() {}
+
+func (x *LookupVolumeRequest) ProtoReflect() protoreflect.Message {
+ mi := &file_master_proto_msgTypes[10]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
}
-func (m *LookupVolumeRequest) Reset() { *m = LookupVolumeRequest{} }
-func (m *LookupVolumeRequest) String() string { return proto.CompactTextString(m) }
-func (*LookupVolumeRequest) ProtoMessage() {}
-func (*LookupVolumeRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{10} }
+// Deprecated: Use LookupVolumeRequest.ProtoReflect.Descriptor instead.
+func (*LookupVolumeRequest) Descriptor() ([]byte, []int) {
+ return file_master_proto_rawDescGZIP(), []int{10}
+}
-func (m *LookupVolumeRequest) GetVolumeIds() []string {
- if m != nil {
- return m.VolumeIds
+func (x *LookupVolumeRequest) GetVolumeIds() []string {
+ if x != nil {
+ return x.VolumeIds
}
return nil
}
-func (m *LookupVolumeRequest) GetCollection() string {
- if m != nil {
- return m.Collection
+func (x *LookupVolumeRequest) GetCollection() string {
+ if x != nil {
+ return x.Collection
}
return ""
}
type LookupVolumeResponse struct {
- VolumeIdLocations []*LookupVolumeResponse_VolumeIdLocation `protobuf:"bytes,1,rep,name=volume_id_locations,json=volumeIdLocations" json:"volume_id_locations,omitempty"`
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ VolumeIdLocations []*LookupVolumeResponse_VolumeIdLocation `protobuf:"bytes,1,rep,name=volume_id_locations,json=volumeIdLocations,proto3" json:"volume_id_locations,omitempty"`
+}
+
+func (x *LookupVolumeResponse) Reset() {
+ *x = LookupVolumeResponse{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_master_proto_msgTypes[11]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *LookupVolumeResponse) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*LookupVolumeResponse) ProtoMessage() {}
+
+func (x *LookupVolumeResponse) ProtoReflect() protoreflect.Message {
+ mi := &file_master_proto_msgTypes[11]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
}
-func (m *LookupVolumeResponse) Reset() { *m = LookupVolumeResponse{} }
-func (m *LookupVolumeResponse) String() string { return proto.CompactTextString(m) }
-func (*LookupVolumeResponse) ProtoMessage() {}
-func (*LookupVolumeResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{11} }
+// Deprecated: Use LookupVolumeResponse.ProtoReflect.Descriptor instead.
+func (*LookupVolumeResponse) Descriptor() ([]byte, []int) {
+ return file_master_proto_rawDescGZIP(), []int{11}
+}
-func (m *LookupVolumeResponse) GetVolumeIdLocations() []*LookupVolumeResponse_VolumeIdLocation {
- if m != nil {
- return m.VolumeIdLocations
+func (x *LookupVolumeResponse) GetVolumeIdLocations() []*LookupVolumeResponse_VolumeIdLocation {
+ if x != nil {
+ return x.VolumeIdLocations
}
return nil
}
-type LookupVolumeResponse_VolumeIdLocation struct {
- VolumeId string `protobuf:"bytes,1,opt,name=volume_id,json=volumeId" json:"volume_id,omitempty"`
- Locations []*Location `protobuf:"bytes,2,rep,name=locations" json:"locations,omitempty"`
- Error string `protobuf:"bytes,3,opt,name=error" json:"error,omitempty"`
+type Location struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ Url string `protobuf:"bytes,1,opt,name=url,proto3" json:"url,omitempty"`
+ PublicUrl string `protobuf:"bytes,2,opt,name=public_url,json=publicUrl,proto3" json:"public_url,omitempty"`
}
-func (m *LookupVolumeResponse_VolumeIdLocation) Reset() { *m = LookupVolumeResponse_VolumeIdLocation{} }
-func (m *LookupVolumeResponse_VolumeIdLocation) String() string { return proto.CompactTextString(m) }
-func (*LookupVolumeResponse_VolumeIdLocation) ProtoMessage() {}
-func (*LookupVolumeResponse_VolumeIdLocation) Descriptor() ([]byte, []int) {
- return fileDescriptor0, []int{11, 0}
+func (x *Location) Reset() {
+ *x = Location{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_master_proto_msgTypes[12]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *Location) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*Location) ProtoMessage() {}
+
+func (x *Location) ProtoReflect() protoreflect.Message {
+ mi := &file_master_proto_msgTypes[12]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use Location.ProtoReflect.Descriptor instead.
+func (*Location) Descriptor() ([]byte, []int) {
+ return file_master_proto_rawDescGZIP(), []int{12}
}
-func (m *LookupVolumeResponse_VolumeIdLocation) GetVolumeId() string {
- if m != nil {
- return m.VolumeId
+func (x *Location) GetUrl() string {
+ if x != nil {
+ return x.Url
}
return ""
}
-func (m *LookupVolumeResponse_VolumeIdLocation) GetLocations() []*Location {
- if m != nil {
- return m.Locations
+func (x *Location) GetPublicUrl() string {
+ if x != nil {
+ return x.PublicUrl
}
- return nil
+ return ""
+}
+
+type AssignRequest struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ Count uint64 `protobuf:"varint,1,opt,name=count,proto3" json:"count,omitempty"`
+ Replication string `protobuf:"bytes,2,opt,name=replication,proto3" json:"replication,omitempty"`
+ Collection string `protobuf:"bytes,3,opt,name=collection,proto3" json:"collection,omitempty"`
+ Ttl string `protobuf:"bytes,4,opt,name=ttl,proto3" json:"ttl,omitempty"`
+ DataCenter string `protobuf:"bytes,5,opt,name=data_center,json=dataCenter,proto3" json:"data_center,omitempty"`
+ Rack string `protobuf:"bytes,6,opt,name=rack,proto3" json:"rack,omitempty"`
+ DataNode string `protobuf:"bytes,7,opt,name=data_node,json=dataNode,proto3" json:"data_node,omitempty"`
+ MemoryMapMaxSizeMb uint32 `protobuf:"varint,8,opt,name=memory_map_max_size_mb,json=memoryMapMaxSizeMb,proto3" json:"memory_map_max_size_mb,omitempty"`
+ WritableVolumeCount uint32 `protobuf:"varint,9,opt,name=Writable_volume_count,json=WritableVolumeCount,proto3" json:"Writable_volume_count,omitempty"`
+ DiskType string `protobuf:"bytes,10,opt,name=disk_type,json=diskType,proto3" json:"disk_type,omitempty"`
+}
+
+func (x *AssignRequest) Reset() {
+ *x = AssignRequest{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_master_proto_msgTypes[13]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *AssignRequest) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*AssignRequest) ProtoMessage() {}
+
+func (x *AssignRequest) ProtoReflect() protoreflect.Message {
+ mi := &file_master_proto_msgTypes[13]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use AssignRequest.ProtoReflect.Descriptor instead.
+func (*AssignRequest) Descriptor() ([]byte, []int) {
+ return file_master_proto_rawDescGZIP(), []int{13}
+}
+
+func (x *AssignRequest) GetCount() uint64 {
+ if x != nil {
+ return x.Count
+ }
+ return 0
}
-func (m *LookupVolumeResponse_VolumeIdLocation) GetError() string {
- if m != nil {
- return m.Error
+func (x *AssignRequest) GetReplication() string {
+ if x != nil {
+ return x.Replication
}
return ""
}
-type Location struct {
- Url string `protobuf:"bytes,1,opt,name=url" json:"url,omitempty"`
- PublicUrl string `protobuf:"bytes,2,opt,name=public_url,json=publicUrl" json:"public_url,omitempty"`
+func (x *AssignRequest) GetCollection() string {
+ if x != nil {
+ return x.Collection
+ }
+ return ""
}
-func (m *Location) Reset() { *m = Location{} }
-func (m *Location) String() string { return proto.CompactTextString(m) }
-func (*Location) ProtoMessage() {}
-func (*Location) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{12} }
+func (x *AssignRequest) GetTtl() string {
+ if x != nil {
+ return x.Ttl
+ }
+ return ""
+}
-func (m *Location) GetUrl() string {
- if m != nil {
- return m.Url
+func (x *AssignRequest) GetDataCenter() string {
+ if x != nil {
+ return x.DataCenter
}
return ""
}
-func (m *Location) GetPublicUrl() string {
- if m != nil {
- return m.PublicUrl
+func (x *AssignRequest) GetRack() string {
+ if x != nil {
+ return x.Rack
}
return ""
}
-type AssignRequest struct {
- Count uint64 `protobuf:"varint,1,opt,name=count" json:"count,omitempty"`
- Replication string `protobuf:"bytes,2,opt,name=replication" json:"replication,omitempty"`
- Collection string `protobuf:"bytes,3,opt,name=collection" json:"collection,omitempty"`
- Ttl string `protobuf:"bytes,4,opt,name=ttl" json:"ttl,omitempty"`
- DataCenter string `protobuf:"bytes,5,opt,name=data_center,json=dataCenter" json:"data_center,omitempty"`
- Rack string `protobuf:"bytes,6,opt,name=rack" json:"rack,omitempty"`
- DataNode string `protobuf:"bytes,7,opt,name=data_node,json=dataNode" json:"data_node,omitempty"`
- MemoryMapMaxSizeMb uint32 `protobuf:"varint,8,opt,name=memory_map_max_size_mb,json=memoryMapMaxSizeMb" json:"memory_map_max_size_mb,omitempty"`
- WritableVolumeCount uint32 `protobuf:"varint,9,opt,name=Writable_volume_count,json=WritableVolumeCount" json:"Writable_volume_count,omitempty"`
-}
-
-func (m *AssignRequest) Reset() { *m = AssignRequest{} }
-func (m *AssignRequest) String() string { return proto.CompactTextString(m) }
-func (*AssignRequest) ProtoMessage() {}
-func (*AssignRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{13} }
-
-func (m *AssignRequest) GetCount() uint64 {
- if m != nil {
- return m.Count
+func (x *AssignRequest) GetDataNode() string {
+ if x != nil {
+ return x.DataNode
+ }
+ return ""
+}
+
+func (x *AssignRequest) GetMemoryMapMaxSizeMb() uint32 {
+ if x != nil {
+ return x.MemoryMapMaxSizeMb
+ }
+ return 0
+}
+
+func (x *AssignRequest) GetWritableVolumeCount() uint32 {
+ if x != nil {
+ return x.WritableVolumeCount
}
return 0
}
-func (m *AssignRequest) GetReplication() string {
- if m != nil {
- return m.Replication
+func (x *AssignRequest) GetDiskType() string {
+ if x != nil {
+ return x.DiskType
+ }
+ return ""
+}
+
+type AssignResponse struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ Fid string `protobuf:"bytes,1,opt,name=fid,proto3" json:"fid,omitempty"`
+ Url string `protobuf:"bytes,2,opt,name=url,proto3" json:"url,omitempty"`
+ PublicUrl string `protobuf:"bytes,3,opt,name=public_url,json=publicUrl,proto3" json:"public_url,omitempty"`
+ Count uint64 `protobuf:"varint,4,opt,name=count,proto3" json:"count,omitempty"`
+ Error string `protobuf:"bytes,5,opt,name=error,proto3" json:"error,omitempty"`
+ Auth string `protobuf:"bytes,6,opt,name=auth,proto3" json:"auth,omitempty"`
+}
+
+func (x *AssignResponse) Reset() {
+ *x = AssignResponse{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_master_proto_msgTypes[14]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *AssignResponse) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*AssignResponse) ProtoMessage() {}
+
+func (x *AssignResponse) ProtoReflect() protoreflect.Message {
+ mi := &file_master_proto_msgTypes[14]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use AssignResponse.ProtoReflect.Descriptor instead.
+func (*AssignResponse) Descriptor() ([]byte, []int) {
+ return file_master_proto_rawDescGZIP(), []int{14}
+}
+
+func (x *AssignResponse) GetFid() string {
+ if x != nil {
+ return x.Fid
+ }
+ return ""
+}
+
+func (x *AssignResponse) GetUrl() string {
+ if x != nil {
+ return x.Url
+ }
+ return ""
+}
+
+func (x *AssignResponse) GetPublicUrl() string {
+ if x != nil {
+ return x.PublicUrl
}
return ""
}
-func (m *AssignRequest) GetCollection() string {
- if m != nil {
- return m.Collection
+func (x *AssignResponse) GetCount() uint64 {
+ if x != nil {
+ return x.Count
+ }
+ return 0
+}
+
+func (x *AssignResponse) GetError() string {
+ if x != nil {
+ return x.Error
}
return ""
}
-func (m *AssignRequest) GetTtl() string {
- if m != nil {
- return m.Ttl
+func (x *AssignResponse) GetAuth() string {
+ if x != nil {
+ return x.Auth
}
return ""
}
-func (m *AssignRequest) GetDataCenter() string {
- if m != nil {
- return m.DataCenter
+type StatisticsRequest struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ Replication string `protobuf:"bytes,1,opt,name=replication,proto3" json:"replication,omitempty"`
+ Collection string `protobuf:"bytes,2,opt,name=collection,proto3" json:"collection,omitempty"`
+ Ttl string `protobuf:"bytes,3,opt,name=ttl,proto3" json:"ttl,omitempty"`
+ DiskType string `protobuf:"bytes,4,opt,name=disk_type,json=diskType,proto3" json:"disk_type,omitempty"`
+}
+
+func (x *StatisticsRequest) Reset() {
+ *x = StatisticsRequest{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_master_proto_msgTypes[15]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *StatisticsRequest) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*StatisticsRequest) ProtoMessage() {}
+
+func (x *StatisticsRequest) ProtoReflect() protoreflect.Message {
+ mi := &file_master_proto_msgTypes[15]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use StatisticsRequest.ProtoReflect.Descriptor instead.
+func (*StatisticsRequest) Descriptor() ([]byte, []int) {
+ return file_master_proto_rawDescGZIP(), []int{15}
+}
+
+func (x *StatisticsRequest) GetReplication() string {
+ if x != nil {
+ return x.Replication
+ }
+ return ""
+}
+
+func (x *StatisticsRequest) GetCollection() string {
+ if x != nil {
+ return x.Collection
+ }
+ return ""
+}
+
+func (x *StatisticsRequest) GetTtl() string {
+ if x != nil {
+ return x.Ttl
+ }
+ return ""
+}
+
+func (x *StatisticsRequest) GetDiskType() string {
+ if x != nil {
+ return x.DiskType
+ }
+ return ""
+}
+
+type StatisticsResponse struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ TotalSize uint64 `protobuf:"varint,4,opt,name=total_size,json=totalSize,proto3" json:"total_size,omitempty"`
+ UsedSize uint64 `protobuf:"varint,5,opt,name=used_size,json=usedSize,proto3" json:"used_size,omitempty"`
+ FileCount uint64 `protobuf:"varint,6,opt,name=file_count,json=fileCount,proto3" json:"file_count,omitempty"`
+}
+
+func (x *StatisticsResponse) Reset() {
+ *x = StatisticsResponse{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_master_proto_msgTypes[16]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *StatisticsResponse) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*StatisticsResponse) ProtoMessage() {}
+
+func (x *StatisticsResponse) ProtoReflect() protoreflect.Message {
+ mi := &file_master_proto_msgTypes[16]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use StatisticsResponse.ProtoReflect.Descriptor instead.
+func (*StatisticsResponse) Descriptor() ([]byte, []int) {
+ return file_master_proto_rawDescGZIP(), []int{16}
+}
+
+func (x *StatisticsResponse) GetTotalSize() uint64 {
+ if x != nil {
+ return x.TotalSize
+ }
+ return 0
+}
+
+func (x *StatisticsResponse) GetUsedSize() uint64 {
+ if x != nil {
+ return x.UsedSize
+ }
+ return 0
+}
+
+func (x *StatisticsResponse) GetFileCount() uint64 {
+ if x != nil {
+ return x.FileCount
+ }
+ return 0
+}
+
+//
+// collection related
+//
+type Collection struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
+}
+
+func (x *Collection) Reset() {
+ *x = Collection{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_master_proto_msgTypes[17]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *Collection) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*Collection) ProtoMessage() {}
+
+func (x *Collection) ProtoReflect() protoreflect.Message {
+ mi := &file_master_proto_msgTypes[17]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use Collection.ProtoReflect.Descriptor instead.
+func (*Collection) Descriptor() ([]byte, []int) {
+ return file_master_proto_rawDescGZIP(), []int{17}
+}
+
+func (x *Collection) GetName() string {
+ if x != nil {
+ return x.Name
+ }
+ return ""
+}
+
+type CollectionListRequest struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ IncludeNormalVolumes bool `protobuf:"varint,1,opt,name=include_normal_volumes,json=includeNormalVolumes,proto3" json:"include_normal_volumes,omitempty"`
+ IncludeEcVolumes bool `protobuf:"varint,2,opt,name=include_ec_volumes,json=includeEcVolumes,proto3" json:"include_ec_volumes,omitempty"`
+}
+
+func (x *CollectionListRequest) Reset() {
+ *x = CollectionListRequest{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_master_proto_msgTypes[18]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *CollectionListRequest) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*CollectionListRequest) ProtoMessage() {}
+
+func (x *CollectionListRequest) ProtoReflect() protoreflect.Message {
+ mi := &file_master_proto_msgTypes[18]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use CollectionListRequest.ProtoReflect.Descriptor instead.
+func (*CollectionListRequest) Descriptor() ([]byte, []int) {
+ return file_master_proto_rawDescGZIP(), []int{18}
+}
+
+func (x *CollectionListRequest) GetIncludeNormalVolumes() bool {
+ if x != nil {
+ return x.IncludeNormalVolumes
+ }
+ return false
+}
+
+func (x *CollectionListRequest) GetIncludeEcVolumes() bool {
+ if x != nil {
+ return x.IncludeEcVolumes
+ }
+ return false
+}
+
+type CollectionListResponse struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ Collections []*Collection `protobuf:"bytes,1,rep,name=collections,proto3" json:"collections,omitempty"`
+}
+
+func (x *CollectionListResponse) Reset() {
+ *x = CollectionListResponse{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_master_proto_msgTypes[19]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *CollectionListResponse) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*CollectionListResponse) ProtoMessage() {}
+
+func (x *CollectionListResponse) ProtoReflect() protoreflect.Message {
+ mi := &file_master_proto_msgTypes[19]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use CollectionListResponse.ProtoReflect.Descriptor instead.
+func (*CollectionListResponse) Descriptor() ([]byte, []int) {
+ return file_master_proto_rawDescGZIP(), []int{19}
+}
+
+func (x *CollectionListResponse) GetCollections() []*Collection {
+ if x != nil {
+ return x.Collections
+ }
+ return nil
+}
+
+type CollectionDeleteRequest struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
+}
+
+func (x *CollectionDeleteRequest) Reset() {
+ *x = CollectionDeleteRequest{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_master_proto_msgTypes[20]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *CollectionDeleteRequest) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*CollectionDeleteRequest) ProtoMessage() {}
+
+func (x *CollectionDeleteRequest) ProtoReflect() protoreflect.Message {
+ mi := &file_master_proto_msgTypes[20]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use CollectionDeleteRequest.ProtoReflect.Descriptor instead.
+func (*CollectionDeleteRequest) Descriptor() ([]byte, []int) {
+ return file_master_proto_rawDescGZIP(), []int{20}
+}
+
+func (x *CollectionDeleteRequest) GetName() string {
+ if x != nil {
+ return x.Name
+ }
+ return ""
+}
+
+type CollectionDeleteResponse struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+}
+
+func (x *CollectionDeleteResponse) Reset() {
+ *x = CollectionDeleteResponse{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_master_proto_msgTypes[21]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *CollectionDeleteResponse) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*CollectionDeleteResponse) ProtoMessage() {}
+
+func (x *CollectionDeleteResponse) ProtoReflect() protoreflect.Message {
+ mi := &file_master_proto_msgTypes[21]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use CollectionDeleteResponse.ProtoReflect.Descriptor instead.
+func (*CollectionDeleteResponse) Descriptor() ([]byte, []int) {
+ return file_master_proto_rawDescGZIP(), []int{21}
+}
+
+//
+// volume related
+//
+type DiskInfo struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ Type string `protobuf:"bytes,1,opt,name=type,proto3" json:"type,omitempty"`
+ VolumeCount uint64 `protobuf:"varint,2,opt,name=volume_count,json=volumeCount,proto3" json:"volume_count,omitempty"`
+ MaxVolumeCount uint64 `protobuf:"varint,3,opt,name=max_volume_count,json=maxVolumeCount,proto3" json:"max_volume_count,omitempty"`
+ FreeVolumeCount uint64 `protobuf:"varint,4,opt,name=free_volume_count,json=freeVolumeCount,proto3" json:"free_volume_count,omitempty"`
+ ActiveVolumeCount uint64 `protobuf:"varint,5,opt,name=active_volume_count,json=activeVolumeCount,proto3" json:"active_volume_count,omitempty"`
+ VolumeInfos []*VolumeInformationMessage `protobuf:"bytes,6,rep,name=volume_infos,json=volumeInfos,proto3" json:"volume_infos,omitempty"`
+ EcShardInfos []*VolumeEcShardInformationMessage `protobuf:"bytes,7,rep,name=ec_shard_infos,json=ecShardInfos,proto3" json:"ec_shard_infos,omitempty"`
+ RemoteVolumeCount uint64 `protobuf:"varint,8,opt,name=remote_volume_count,json=remoteVolumeCount,proto3" json:"remote_volume_count,omitempty"`
+}
+
+func (x *DiskInfo) Reset() {
+ *x = DiskInfo{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_master_proto_msgTypes[22]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *DiskInfo) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*DiskInfo) ProtoMessage() {}
+
+func (x *DiskInfo) ProtoReflect() protoreflect.Message {
+ mi := &file_master_proto_msgTypes[22]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use DiskInfo.ProtoReflect.Descriptor instead.
+func (*DiskInfo) Descriptor() ([]byte, []int) {
+ return file_master_proto_rawDescGZIP(), []int{22}
+}
+
+func (x *DiskInfo) GetType() string {
+ if x != nil {
+ return x.Type
+ }
+ return ""
+}
+
+func (x *DiskInfo) GetVolumeCount() uint64 {
+ if x != nil {
+ return x.VolumeCount
+ }
+ return 0
+}
+
+func (x *DiskInfo) GetMaxVolumeCount() uint64 {
+ if x != nil {
+ return x.MaxVolumeCount
+ }
+ return 0
+}
+
+func (x *DiskInfo) GetFreeVolumeCount() uint64 {
+ if x != nil {
+ return x.FreeVolumeCount
+ }
+ return 0
+}
+
+func (x *DiskInfo) GetActiveVolumeCount() uint64 {
+ if x != nil {
+ return x.ActiveVolumeCount
+ }
+ return 0
+}
+
+func (x *DiskInfo) GetVolumeInfos() []*VolumeInformationMessage {
+ if x != nil {
+ return x.VolumeInfos
+ }
+ return nil
+}
+
+func (x *DiskInfo) GetEcShardInfos() []*VolumeEcShardInformationMessage {
+ if x != nil {
+ return x.EcShardInfos
+ }
+ return nil
+}
+
+func (x *DiskInfo) GetRemoteVolumeCount() uint64 {
+ if x != nil {
+ return x.RemoteVolumeCount
+ }
+ return 0
+}
+
+type DataNodeInfo struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ Id string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"`
+ DiskInfos map[string]*DiskInfo `protobuf:"bytes,2,rep,name=diskInfos,proto3" json:"diskInfos,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"`
+}
+
+func (x *DataNodeInfo) Reset() {
+ *x = DataNodeInfo{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_master_proto_msgTypes[23]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *DataNodeInfo) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*DataNodeInfo) ProtoMessage() {}
+
+func (x *DataNodeInfo) ProtoReflect() protoreflect.Message {
+ mi := &file_master_proto_msgTypes[23]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use DataNodeInfo.ProtoReflect.Descriptor instead.
+func (*DataNodeInfo) Descriptor() ([]byte, []int) {
+ return file_master_proto_rawDescGZIP(), []int{23}
+}
+
+func (x *DataNodeInfo) GetId() string {
+ if x != nil {
+ return x.Id
+ }
+ return ""
+}
+
+func (x *DataNodeInfo) GetDiskInfos() map[string]*DiskInfo {
+ if x != nil {
+ return x.DiskInfos
+ }
+ return nil
+}
+
+type RackInfo struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ Id string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"`
+ DataNodeInfos []*DataNodeInfo `protobuf:"bytes,2,rep,name=data_node_infos,json=dataNodeInfos,proto3" json:"data_node_infos,omitempty"`
+ DiskInfos map[string]*DiskInfo `protobuf:"bytes,3,rep,name=diskInfos,proto3" json:"diskInfos,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"`
+}
+
+func (x *RackInfo) Reset() {
+ *x = RackInfo{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_master_proto_msgTypes[24]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *RackInfo) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*RackInfo) ProtoMessage() {}
+
+func (x *RackInfo) ProtoReflect() protoreflect.Message {
+ mi := &file_master_proto_msgTypes[24]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use RackInfo.ProtoReflect.Descriptor instead.
+func (*RackInfo) Descriptor() ([]byte, []int) {
+ return file_master_proto_rawDescGZIP(), []int{24}
+}
+
+func (x *RackInfo) GetId() string {
+ if x != nil {
+ return x.Id
+ }
+ return ""
+}
+
+func (x *RackInfo) GetDataNodeInfos() []*DataNodeInfo {
+ if x != nil {
+ return x.DataNodeInfos
+ }
+ return nil
+}
+
+func (x *RackInfo) GetDiskInfos() map[string]*DiskInfo {
+ if x != nil {
+ return x.DiskInfos
+ }
+ return nil
+}
+
+type DataCenterInfo struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ Id string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"`
+ RackInfos []*RackInfo `protobuf:"bytes,2,rep,name=rack_infos,json=rackInfos,proto3" json:"rack_infos,omitempty"`
+ DiskInfos map[string]*DiskInfo `protobuf:"bytes,3,rep,name=diskInfos,proto3" json:"diskInfos,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"`
+}
+
+func (x *DataCenterInfo) Reset() {
+ *x = DataCenterInfo{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_master_proto_msgTypes[25]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *DataCenterInfo) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*DataCenterInfo) ProtoMessage() {}
+
+func (x *DataCenterInfo) ProtoReflect() protoreflect.Message {
+ mi := &file_master_proto_msgTypes[25]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use DataCenterInfo.ProtoReflect.Descriptor instead.
+func (*DataCenterInfo) Descriptor() ([]byte, []int) {
+ return file_master_proto_rawDescGZIP(), []int{25}
+}
+
+func (x *DataCenterInfo) GetId() string {
+ if x != nil {
+ return x.Id
+ }
+ return ""
+}
+
+func (x *DataCenterInfo) GetRackInfos() []*RackInfo {
+ if x != nil {
+ return x.RackInfos
+ }
+ return nil
+}
+
+func (x *DataCenterInfo) GetDiskInfos() map[string]*DiskInfo {
+ if x != nil {
+ return x.DiskInfos
+ }
+ return nil
+}
+
+type TopologyInfo struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ Id string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"`
+ DataCenterInfos []*DataCenterInfo `protobuf:"bytes,2,rep,name=data_center_infos,json=dataCenterInfos,proto3" json:"data_center_infos,omitempty"`
+ DiskInfos map[string]*DiskInfo `protobuf:"bytes,3,rep,name=diskInfos,proto3" json:"diskInfos,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"`
+}
+
+func (x *TopologyInfo) Reset() {
+ *x = TopologyInfo{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_master_proto_msgTypes[26]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *TopologyInfo) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*TopologyInfo) ProtoMessage() {}
+
+func (x *TopologyInfo) ProtoReflect() protoreflect.Message {
+ mi := &file_master_proto_msgTypes[26]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use TopologyInfo.ProtoReflect.Descriptor instead.
+func (*TopologyInfo) Descriptor() ([]byte, []int) {
+ return file_master_proto_rawDescGZIP(), []int{26}
+}
+
+func (x *TopologyInfo) GetId() string {
+ if x != nil {
+ return x.Id
+ }
+ return ""
+}
+
+func (x *TopologyInfo) GetDataCenterInfos() []*DataCenterInfo {
+ if x != nil {
+ return x.DataCenterInfos
+ }
+ return nil
+}
+
+func (x *TopologyInfo) GetDiskInfos() map[string]*DiskInfo {
+ if x != nil {
+ return x.DiskInfos
+ }
+ return nil
+}
+
+type VolumeListRequest struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+}
+
+func (x *VolumeListRequest) Reset() {
+ *x = VolumeListRequest{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_master_proto_msgTypes[27]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *VolumeListRequest) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*VolumeListRequest) ProtoMessage() {}
+
+func (x *VolumeListRequest) ProtoReflect() protoreflect.Message {
+ mi := &file_master_proto_msgTypes[27]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use VolumeListRequest.ProtoReflect.Descriptor instead.
+func (*VolumeListRequest) Descriptor() ([]byte, []int) {
+ return file_master_proto_rawDescGZIP(), []int{27}
+}
+
+type VolumeListResponse struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ TopologyInfo *TopologyInfo `protobuf:"bytes,1,opt,name=topology_info,json=topologyInfo,proto3" json:"topology_info,omitempty"`
+ VolumeSizeLimitMb uint64 `protobuf:"varint,2,opt,name=volume_size_limit_mb,json=volumeSizeLimitMb,proto3" json:"volume_size_limit_mb,omitempty"`
+}
+
+func (x *VolumeListResponse) Reset() {
+ *x = VolumeListResponse{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_master_proto_msgTypes[28]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *VolumeListResponse) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*VolumeListResponse) ProtoMessage() {}
+
+func (x *VolumeListResponse) ProtoReflect() protoreflect.Message {
+ mi := &file_master_proto_msgTypes[28]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use VolumeListResponse.ProtoReflect.Descriptor instead.
+func (*VolumeListResponse) Descriptor() ([]byte, []int) {
+ return file_master_proto_rawDescGZIP(), []int{28}
+}
+
+func (x *VolumeListResponse) GetTopologyInfo() *TopologyInfo {
+ if x != nil {
+ return x.TopologyInfo
+ }
+ return nil
+}
+
+func (x *VolumeListResponse) GetVolumeSizeLimitMb() uint64 {
+ if x != nil {
+ return x.VolumeSizeLimitMb
+ }
+ return 0
+}
+
+type LookupEcVolumeRequest struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ VolumeId uint32 `protobuf:"varint,1,opt,name=volume_id,json=volumeId,proto3" json:"volume_id,omitempty"`
+}
+
+func (x *LookupEcVolumeRequest) Reset() {
+ *x = LookupEcVolumeRequest{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_master_proto_msgTypes[29]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *LookupEcVolumeRequest) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*LookupEcVolumeRequest) ProtoMessage() {}
+
+func (x *LookupEcVolumeRequest) ProtoReflect() protoreflect.Message {
+ mi := &file_master_proto_msgTypes[29]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use LookupEcVolumeRequest.ProtoReflect.Descriptor instead.
+func (*LookupEcVolumeRequest) Descriptor() ([]byte, []int) {
+ return file_master_proto_rawDescGZIP(), []int{29}
+}
+
+func (x *LookupEcVolumeRequest) GetVolumeId() uint32 {
+ if x != nil {
+ return x.VolumeId
+ }
+ return 0
+}
+
+type LookupEcVolumeResponse struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ VolumeId uint32 `protobuf:"varint,1,opt,name=volume_id,json=volumeId,proto3" json:"volume_id,omitempty"`
+ ShardIdLocations []*LookupEcVolumeResponse_EcShardIdLocation `protobuf:"bytes,2,rep,name=shard_id_locations,json=shardIdLocations,proto3" json:"shard_id_locations,omitempty"`
+}
+
+func (x *LookupEcVolumeResponse) Reset() {
+ *x = LookupEcVolumeResponse{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_master_proto_msgTypes[30]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *LookupEcVolumeResponse) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*LookupEcVolumeResponse) ProtoMessage() {}
+
+func (x *LookupEcVolumeResponse) ProtoReflect() protoreflect.Message {
+ mi := &file_master_proto_msgTypes[30]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use LookupEcVolumeResponse.ProtoReflect.Descriptor instead.
+func (*LookupEcVolumeResponse) Descriptor() ([]byte, []int) {
+ return file_master_proto_rawDescGZIP(), []int{30}
+}
+
+func (x *LookupEcVolumeResponse) GetVolumeId() uint32 {
+ if x != nil {
+ return x.VolumeId
+ }
+ return 0
+}
+
+func (x *LookupEcVolumeResponse) GetShardIdLocations() []*LookupEcVolumeResponse_EcShardIdLocation {
+ if x != nil {
+ return x.ShardIdLocations
+ }
+ return nil
+}
+
+type VacuumVolumeRequest struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ GarbageThreshold float32 `protobuf:"fixed32,1,opt,name=garbage_threshold,json=garbageThreshold,proto3" json:"garbage_threshold,omitempty"`
+}
+
+func (x *VacuumVolumeRequest) Reset() {
+ *x = VacuumVolumeRequest{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_master_proto_msgTypes[31]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
- return ""
}
-func (m *AssignRequest) GetRack() string {
- if m != nil {
- return m.Rack
- }
- return ""
+func (x *VacuumVolumeRequest) String() string {
+ return protoimpl.X.MessageStringOf(x)
}
-func (m *AssignRequest) GetDataNode() string {
- if m != nil {
- return m.DataNode
+func (*VacuumVolumeRequest) ProtoMessage() {}
+
+func (x *VacuumVolumeRequest) ProtoReflect() protoreflect.Message {
+ mi := &file_master_proto_msgTypes[31]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
}
- return ""
+ return mi.MessageOf(x)
}
-func (m *AssignRequest) GetMemoryMapMaxSizeMb() uint32 {
- if m != nil {
- return m.MemoryMapMaxSizeMb
- }
- return 0
+// Deprecated: Use VacuumVolumeRequest.ProtoReflect.Descriptor instead.
+func (*VacuumVolumeRequest) Descriptor() ([]byte, []int) {
+ return file_master_proto_rawDescGZIP(), []int{31}
}
-func (m *AssignRequest) GetWritableVolumeCount() uint32 {
- if m != nil {
- return m.WritableVolumeCount
+func (x *VacuumVolumeRequest) GetGarbageThreshold() float32 {
+ if x != nil {
+ return x.GarbageThreshold
}
return 0
}
-type AssignResponse struct {
- Fid string `protobuf:"bytes,1,opt,name=fid" json:"fid,omitempty"`
- Url string `protobuf:"bytes,2,opt,name=url" json:"url,omitempty"`
- PublicUrl string `protobuf:"bytes,3,opt,name=public_url,json=publicUrl" json:"public_url,omitempty"`
- Count uint64 `protobuf:"varint,4,opt,name=count" json:"count,omitempty"`
- Error string `protobuf:"bytes,5,opt,name=error" json:"error,omitempty"`
- Auth string `protobuf:"bytes,6,opt,name=auth" json:"auth,omitempty"`
+type VacuumVolumeResponse struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
}
-func (m *AssignResponse) Reset() { *m = AssignResponse{} }
-func (m *AssignResponse) String() string { return proto.CompactTextString(m) }
-func (*AssignResponse) ProtoMessage() {}
-func (*AssignResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{14} }
-
-func (m *AssignResponse) GetFid() string {
- if m != nil {
- return m.Fid
+func (x *VacuumVolumeResponse) Reset() {
+ *x = VacuumVolumeResponse{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_master_proto_msgTypes[32]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
- return ""
}
-func (m *AssignResponse) GetUrl() string {
- if m != nil {
- return m.Url
- }
- return ""
+func (x *VacuumVolumeResponse) String() string {
+ return protoimpl.X.MessageStringOf(x)
}
-func (m *AssignResponse) GetPublicUrl() string {
- if m != nil {
- return m.PublicUrl
+func (*VacuumVolumeResponse) ProtoMessage() {}
+
+func (x *VacuumVolumeResponse) ProtoReflect() protoreflect.Message {
+ mi := &file_master_proto_msgTypes[32]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
}
- return ""
+ return mi.MessageOf(x)
}
-func (m *AssignResponse) GetCount() uint64 {
- if m != nil {
- return m.Count
- }
- return 0
+// Deprecated: Use VacuumVolumeResponse.ProtoReflect.Descriptor instead.
+func (*VacuumVolumeResponse) Descriptor() ([]byte, []int) {
+ return file_master_proto_rawDescGZIP(), []int{32}
}
-func (m *AssignResponse) GetError() string {
- if m != nil {
- return m.Error
- }
- return ""
+type GetMasterConfigurationRequest struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
}
-func (m *AssignResponse) GetAuth() string {
- if m != nil {
- return m.Auth
+func (x *GetMasterConfigurationRequest) Reset() {
+ *x = GetMasterConfigurationRequest{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_master_proto_msgTypes[33]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
- return ""
}
-type StatisticsRequest struct {
- Replication string `protobuf:"bytes,1,opt,name=replication" json:"replication,omitempty"`
- Collection string `protobuf:"bytes,2,opt,name=collection" json:"collection,omitempty"`
- Ttl string `protobuf:"bytes,3,opt,name=ttl" json:"ttl,omitempty"`
+func (x *GetMasterConfigurationRequest) String() string {
+ return protoimpl.X.MessageStringOf(x)
}
-func (m *StatisticsRequest) Reset() { *m = StatisticsRequest{} }
-func (m *StatisticsRequest) String() string { return proto.CompactTextString(m) }
-func (*StatisticsRequest) ProtoMessage() {}
-func (*StatisticsRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{15} }
+func (*GetMasterConfigurationRequest) ProtoMessage() {}
-func (m *StatisticsRequest) GetReplication() string {
- if m != nil {
- return m.Replication
+func (x *GetMasterConfigurationRequest) ProtoReflect() protoreflect.Message {
+ mi := &file_master_proto_msgTypes[33]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
}
- return ""
+ return mi.MessageOf(x)
}
-func (m *StatisticsRequest) GetCollection() string {
- if m != nil {
- return m.Collection
- }
- return ""
+// Deprecated: Use GetMasterConfigurationRequest.ProtoReflect.Descriptor instead.
+func (*GetMasterConfigurationRequest) Descriptor() ([]byte, []int) {
+ return file_master_proto_rawDescGZIP(), []int{33}
}
-func (m *StatisticsRequest) GetTtl() string {
- if m != nil {
- return m.Ttl
+type GetMasterConfigurationResponse struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ MetricsAddress string `protobuf:"bytes,1,opt,name=metrics_address,json=metricsAddress,proto3" json:"metrics_address,omitempty"`
+ MetricsIntervalSeconds uint32 `protobuf:"varint,2,opt,name=metrics_interval_seconds,json=metricsIntervalSeconds,proto3" json:"metrics_interval_seconds,omitempty"`
+ StorageBackends []*StorageBackend `protobuf:"bytes,3,rep,name=storage_backends,json=storageBackends,proto3" json:"storage_backends,omitempty"`
+ DefaultReplication string `protobuf:"bytes,4,opt,name=default_replication,json=defaultReplication,proto3" json:"default_replication,omitempty"`
+ Leader string `protobuf:"bytes,5,opt,name=leader,proto3" json:"leader,omitempty"`
+}
+
+func (x *GetMasterConfigurationResponse) Reset() {
+ *x = GetMasterConfigurationResponse{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_master_proto_msgTypes[34]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
- return ""
}
-type StatisticsResponse struct {
- Replication string `protobuf:"bytes,1,opt,name=replication" json:"replication,omitempty"`
- Collection string `protobuf:"bytes,2,opt,name=collection" json:"collection,omitempty"`
- Ttl string `protobuf:"bytes,3,opt,name=ttl" json:"ttl,omitempty"`
- TotalSize uint64 `protobuf:"varint,4,opt,name=total_size,json=totalSize" json:"total_size,omitempty"`
- UsedSize uint64 `protobuf:"varint,5,opt,name=used_size,json=usedSize" json:"used_size,omitempty"`
- FileCount uint64 `protobuf:"varint,6,opt,name=file_count,json=fileCount" json:"file_count,omitempty"`
+func (x *GetMasterConfigurationResponse) String() string {
+ return protoimpl.X.MessageStringOf(x)
}
-func (m *StatisticsResponse) Reset() { *m = StatisticsResponse{} }
-func (m *StatisticsResponse) String() string { return proto.CompactTextString(m) }
-func (*StatisticsResponse) ProtoMessage() {}
-func (*StatisticsResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{16} }
+func (*GetMasterConfigurationResponse) ProtoMessage() {}
-func (m *StatisticsResponse) GetReplication() string {
- if m != nil {
- return m.Replication
+func (x *GetMasterConfigurationResponse) ProtoReflect() protoreflect.Message {
+ mi := &file_master_proto_msgTypes[34]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
}
- return ""
+ return mi.MessageOf(x)
}
-func (m *StatisticsResponse) GetCollection() string {
- if m != nil {
- return m.Collection
- }
- return ""
+// Deprecated: Use GetMasterConfigurationResponse.ProtoReflect.Descriptor instead.
+func (*GetMasterConfigurationResponse) Descriptor() ([]byte, []int) {
+ return file_master_proto_rawDescGZIP(), []int{34}
}
-func (m *StatisticsResponse) GetTtl() string {
- if m != nil {
- return m.Ttl
+func (x *GetMasterConfigurationResponse) GetMetricsAddress() string {
+ if x != nil {
+ return x.MetricsAddress
}
return ""
}
-func (m *StatisticsResponse) GetTotalSize() uint64 {
- if m != nil {
- return m.TotalSize
+func (x *GetMasterConfigurationResponse) GetMetricsIntervalSeconds() uint32 {
+ if x != nil {
+ return x.MetricsIntervalSeconds
}
return 0
}
-func (m *StatisticsResponse) GetUsedSize() uint64 {
- if m != nil {
- return m.UsedSize
+func (x *GetMasterConfigurationResponse) GetStorageBackends() []*StorageBackend {
+ if x != nil {
+ return x.StorageBackends
}
- return 0
+ return nil
}
-func (m *StatisticsResponse) GetFileCount() uint64 {
- if m != nil {
- return m.FileCount
+func (x *GetMasterConfigurationResponse) GetDefaultReplication() string {
+ if x != nil {
+ return x.DefaultReplication
}
- return 0
+ return ""
}
-type StorageType struct {
- Replication string `protobuf:"bytes,1,opt,name=replication" json:"replication,omitempty"`
- Ttl string `protobuf:"bytes,2,opt,name=ttl" json:"ttl,omitempty"`
+func (x *GetMasterConfigurationResponse) GetLeader() string {
+ if x != nil {
+ return x.Leader
+ }
+ return ""
}
-func (m *StorageType) Reset() { *m = StorageType{} }
-func (m *StorageType) String() string { return proto.CompactTextString(m) }
-func (*StorageType) ProtoMessage() {}
-func (*StorageType) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{17} }
+type ListMasterClientsRequest struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
-func (m *StorageType) GetReplication() string {
- if m != nil {
- return m.Replication
- }
- return ""
+ ClientType string `protobuf:"bytes,1,opt,name=client_type,json=clientType,proto3" json:"client_type,omitempty"`
}
-func (m *StorageType) GetTtl() string {
- if m != nil {
- return m.Ttl
+func (x *ListMasterClientsRequest) Reset() {
+ *x = ListMasterClientsRequest{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_master_proto_msgTypes[35]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
- return ""
}
-type Collection struct {
- Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"`
+func (x *ListMasterClientsRequest) String() string {
+ return protoimpl.X.MessageStringOf(x)
}
-func (m *Collection) Reset() { *m = Collection{} }
-func (m *Collection) String() string { return proto.CompactTextString(m) }
-func (*Collection) ProtoMessage() {}
-func (*Collection) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{18} }
+func (*ListMasterClientsRequest) ProtoMessage() {}
-func (m *Collection) GetName() string {
- if m != nil {
- return m.Name
+func (x *ListMasterClientsRequest) ProtoReflect() protoreflect.Message {
+ mi := &file_master_proto_msgTypes[35]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
}
- return ""
+ return mi.MessageOf(x)
}
-type CollectionListRequest struct {
- IncludeNormalVolumes bool `protobuf:"varint,1,opt,name=include_normal_volumes,json=includeNormalVolumes" json:"include_normal_volumes,omitempty"`
- IncludeEcVolumes bool `protobuf:"varint,2,opt,name=include_ec_volumes,json=includeEcVolumes" json:"include_ec_volumes,omitempty"`
+// Deprecated: Use ListMasterClientsRequest.ProtoReflect.Descriptor instead.
+func (*ListMasterClientsRequest) Descriptor() ([]byte, []int) {
+ return file_master_proto_rawDescGZIP(), []int{35}
}
-func (m *CollectionListRequest) Reset() { *m = CollectionListRequest{} }
-func (m *CollectionListRequest) String() string { return proto.CompactTextString(m) }
-func (*CollectionListRequest) ProtoMessage() {}
-func (*CollectionListRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{19} }
-
-func (m *CollectionListRequest) GetIncludeNormalVolumes() bool {
- if m != nil {
- return m.IncludeNormalVolumes
+func (x *ListMasterClientsRequest) GetClientType() string {
+ if x != nil {
+ return x.ClientType
}
- return false
+ return ""
}
-func (m *CollectionListRequest) GetIncludeEcVolumes() bool {
- if m != nil {
- return m.IncludeEcVolumes
+type ListMasterClientsResponse struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ GrpcAddresses []string `protobuf:"bytes,1,rep,name=grpc_addresses,json=grpcAddresses,proto3" json:"grpc_addresses,omitempty"`
+}
+
+func (x *ListMasterClientsResponse) Reset() {
+ *x = ListMasterClientsResponse{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_master_proto_msgTypes[36]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
- return false
}
-type CollectionListResponse struct {
- Collections []*Collection `protobuf:"bytes,1,rep,name=collections" json:"collections,omitempty"`
+func (x *ListMasterClientsResponse) String() string {
+ return protoimpl.X.MessageStringOf(x)
}
-func (m *CollectionListResponse) Reset() { *m = CollectionListResponse{} }
-func (m *CollectionListResponse) String() string { return proto.CompactTextString(m) }
-func (*CollectionListResponse) ProtoMessage() {}
-func (*CollectionListResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{20} }
+func (*ListMasterClientsResponse) ProtoMessage() {}
-func (m *CollectionListResponse) GetCollections() []*Collection {
- if m != nil {
- return m.Collections
+func (x *ListMasterClientsResponse) ProtoReflect() protoreflect.Message {
+ mi := &file_master_proto_msgTypes[36]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
}
- return nil
+ return mi.MessageOf(x)
}
-type CollectionDeleteRequest struct {
- Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"`
+// Deprecated: Use ListMasterClientsResponse.ProtoReflect.Descriptor instead.
+func (*ListMasterClientsResponse) Descriptor() ([]byte, []int) {
+ return file_master_proto_rawDescGZIP(), []int{36}
}
-func (m *CollectionDeleteRequest) Reset() { *m = CollectionDeleteRequest{} }
-func (m *CollectionDeleteRequest) String() string { return proto.CompactTextString(m) }
-func (*CollectionDeleteRequest) ProtoMessage() {}
-func (*CollectionDeleteRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{21} }
-
-func (m *CollectionDeleteRequest) GetName() string {
- if m != nil {
- return m.Name
+func (x *ListMasterClientsResponse) GetGrpcAddresses() []string {
+ if x != nil {
+ return x.GrpcAddresses
}
- return ""
+ return nil
}
-type CollectionDeleteResponse struct {
+type LeaseAdminTokenRequest struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ PreviousToken int64 `protobuf:"varint,1,opt,name=previous_token,json=previousToken,proto3" json:"previous_token,omitempty"`
+ PreviousLockTime int64 `protobuf:"varint,2,opt,name=previous_lock_time,json=previousLockTime,proto3" json:"previous_lock_time,omitempty"`
+ LockName string `protobuf:"bytes,3,opt,name=lock_name,json=lockName,proto3" json:"lock_name,omitempty"`
+ ClientName string `protobuf:"bytes,4,opt,name=client_name,json=clientName,proto3" json:"client_name,omitempty"`
}
-func (m *CollectionDeleteResponse) Reset() { *m = CollectionDeleteResponse{} }
-func (m *CollectionDeleteResponse) String() string { return proto.CompactTextString(m) }
-func (*CollectionDeleteResponse) ProtoMessage() {}
-func (*CollectionDeleteResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{22} }
+func (x *LeaseAdminTokenRequest) Reset() {
+ *x = LeaseAdminTokenRequest{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_master_proto_msgTypes[37]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
-//
-// volume related
-//
-type DataNodeInfo struct {
- Id string `protobuf:"bytes,1,opt,name=id" json:"id,omitempty"`
- VolumeCount uint64 `protobuf:"varint,2,opt,name=volume_count,json=volumeCount" json:"volume_count,omitempty"`
- MaxVolumeCount uint64 `protobuf:"varint,3,opt,name=max_volume_count,json=maxVolumeCount" json:"max_volume_count,omitempty"`
- FreeVolumeCount uint64 `protobuf:"varint,4,opt,name=free_volume_count,json=freeVolumeCount" json:"free_volume_count,omitempty"`
- ActiveVolumeCount uint64 `protobuf:"varint,5,opt,name=active_volume_count,json=activeVolumeCount" json:"active_volume_count,omitempty"`
- VolumeInfos []*VolumeInformationMessage `protobuf:"bytes,6,rep,name=volume_infos,json=volumeInfos" json:"volume_infos,omitempty"`
- EcShardInfos []*VolumeEcShardInformationMessage `protobuf:"bytes,7,rep,name=ec_shard_infos,json=ecShardInfos" json:"ec_shard_infos,omitempty"`
- RemoteVolumeCount uint64 `protobuf:"varint,8,opt,name=remote_volume_count,json=remoteVolumeCount" json:"remote_volume_count,omitempty"`
+func (x *LeaseAdminTokenRequest) String() string {
+ return protoimpl.X.MessageStringOf(x)
}
-func (m *DataNodeInfo) Reset() { *m = DataNodeInfo{} }
-func (m *DataNodeInfo) String() string { return proto.CompactTextString(m) }
-func (*DataNodeInfo) ProtoMessage() {}
-func (*DataNodeInfo) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{23} }
+func (*LeaseAdminTokenRequest) ProtoMessage() {}
-func (m *DataNodeInfo) GetId() string {
- if m != nil {
- return m.Id
+func (x *LeaseAdminTokenRequest) ProtoReflect() protoreflect.Message {
+ mi := &file_master_proto_msgTypes[37]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
}
- return ""
+ return mi.MessageOf(x)
}
-func (m *DataNodeInfo) GetVolumeCount() uint64 {
- if m != nil {
- return m.VolumeCount
- }
- return 0
+// Deprecated: Use LeaseAdminTokenRequest.ProtoReflect.Descriptor instead.
+func (*LeaseAdminTokenRequest) Descriptor() ([]byte, []int) {
+ return file_master_proto_rawDescGZIP(), []int{37}
}
-func (m *DataNodeInfo) GetMaxVolumeCount() uint64 {
- if m != nil {
- return m.MaxVolumeCount
+func (x *LeaseAdminTokenRequest) GetPreviousToken() int64 {
+ if x != nil {
+ return x.PreviousToken
}
return 0
}
-func (m *DataNodeInfo) GetFreeVolumeCount() uint64 {
- if m != nil {
- return m.FreeVolumeCount
+func (x *LeaseAdminTokenRequest) GetPreviousLockTime() int64 {
+ if x != nil {
+ return x.PreviousLockTime
}
return 0
}
-func (m *DataNodeInfo) GetActiveVolumeCount() uint64 {
- if m != nil {
- return m.ActiveVolumeCount
+func (x *LeaseAdminTokenRequest) GetLockName() string {
+ if x != nil {
+ return x.LockName
}
- return 0
+ return ""
}
-func (m *DataNodeInfo) GetVolumeInfos() []*VolumeInformationMessage {
- if m != nil {
- return m.VolumeInfos
+func (x *LeaseAdminTokenRequest) GetClientName() string {
+ if x != nil {
+ return x.ClientName
}
- return nil
+ return ""
}
-func (m *DataNodeInfo) GetEcShardInfos() []*VolumeEcShardInformationMessage {
- if m != nil {
- return m.EcShardInfos
- }
- return nil
+type LeaseAdminTokenResponse struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ Token int64 `protobuf:"varint,1,opt,name=token,proto3" json:"token,omitempty"`
+ LockTsNs int64 `protobuf:"varint,2,opt,name=lock_ts_ns,json=lockTsNs,proto3" json:"lock_ts_ns,omitempty"`
}
-func (m *DataNodeInfo) GetRemoteVolumeCount() uint64 {
- if m != nil {
- return m.RemoteVolumeCount
+func (x *LeaseAdminTokenResponse) Reset() {
+ *x = LeaseAdminTokenResponse{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_master_proto_msgTypes[38]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
- return 0
}
-type RackInfo struct {
- Id string `protobuf:"bytes,1,opt,name=id" json:"id,omitempty"`
- VolumeCount uint64 `protobuf:"varint,2,opt,name=volume_count,json=volumeCount" json:"volume_count,omitempty"`
- MaxVolumeCount uint64 `protobuf:"varint,3,opt,name=max_volume_count,json=maxVolumeCount" json:"max_volume_count,omitempty"`
- FreeVolumeCount uint64 `protobuf:"varint,4,opt,name=free_volume_count,json=freeVolumeCount" json:"free_volume_count,omitempty"`
- ActiveVolumeCount uint64 `protobuf:"varint,5,opt,name=active_volume_count,json=activeVolumeCount" json:"active_volume_count,omitempty"`
- DataNodeInfos []*DataNodeInfo `protobuf:"bytes,6,rep,name=data_node_infos,json=dataNodeInfos" json:"data_node_infos,omitempty"`
- RemoteVolumeCount uint64 `protobuf:"varint,7,opt,name=remote_volume_count,json=remoteVolumeCount" json:"remote_volume_count,omitempty"`
+func (x *LeaseAdminTokenResponse) String() string {
+ return protoimpl.X.MessageStringOf(x)
}
-func (m *RackInfo) Reset() { *m = RackInfo{} }
-func (m *RackInfo) String() string { return proto.CompactTextString(m) }
-func (*RackInfo) ProtoMessage() {}
-func (*RackInfo) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{24} }
+func (*LeaseAdminTokenResponse) ProtoMessage() {}
-func (m *RackInfo) GetId() string {
- if m != nil {
- return m.Id
+func (x *LeaseAdminTokenResponse) ProtoReflect() protoreflect.Message {
+ mi := &file_master_proto_msgTypes[38]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
}
- return ""
+ return mi.MessageOf(x)
}
-func (m *RackInfo) GetVolumeCount() uint64 {
- if m != nil {
- return m.VolumeCount
- }
- return 0
+// Deprecated: Use LeaseAdminTokenResponse.ProtoReflect.Descriptor instead.
+func (*LeaseAdminTokenResponse) Descriptor() ([]byte, []int) {
+ return file_master_proto_rawDescGZIP(), []int{38}
}
-func (m *RackInfo) GetMaxVolumeCount() uint64 {
- if m != nil {
- return m.MaxVolumeCount
+func (x *LeaseAdminTokenResponse) GetToken() int64 {
+ if x != nil {
+ return x.Token
}
return 0
}
-func (m *RackInfo) GetFreeVolumeCount() uint64 {
- if m != nil {
- return m.FreeVolumeCount
+func (x *LeaseAdminTokenResponse) GetLockTsNs() int64 {
+ if x != nil {
+ return x.LockTsNs
}
return 0
}
-func (m *RackInfo) GetActiveVolumeCount() uint64 {
- if m != nil {
- return m.ActiveVolumeCount
- }
- return 0
-}
+type ReleaseAdminTokenRequest struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
-func (m *RackInfo) GetDataNodeInfos() []*DataNodeInfo {
- if m != nil {
- return m.DataNodeInfos
- }
- return nil
+ PreviousToken int64 `protobuf:"varint,1,opt,name=previous_token,json=previousToken,proto3" json:"previous_token,omitempty"`
+ PreviousLockTime int64 `protobuf:"varint,2,opt,name=previous_lock_time,json=previousLockTime,proto3" json:"previous_lock_time,omitempty"`
+ LockName string `protobuf:"bytes,3,opt,name=lock_name,json=lockName,proto3" json:"lock_name,omitempty"`
}
-func (m *RackInfo) GetRemoteVolumeCount() uint64 {
- if m != nil {
- return m.RemoteVolumeCount
+func (x *ReleaseAdminTokenRequest) Reset() {
+ *x = ReleaseAdminTokenRequest{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_master_proto_msgTypes[39]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
- return 0
}
-type DataCenterInfo struct {
- Id string `protobuf:"bytes,1,opt,name=id" json:"id,omitempty"`
- VolumeCount uint64 `protobuf:"varint,2,opt,name=volume_count,json=volumeCount" json:"volume_count,omitempty"`
- MaxVolumeCount uint64 `protobuf:"varint,3,opt,name=max_volume_count,json=maxVolumeCount" json:"max_volume_count,omitempty"`
- FreeVolumeCount uint64 `protobuf:"varint,4,opt,name=free_volume_count,json=freeVolumeCount" json:"free_volume_count,omitempty"`
- ActiveVolumeCount uint64 `protobuf:"varint,5,opt,name=active_volume_count,json=activeVolumeCount" json:"active_volume_count,omitempty"`
- RackInfos []*RackInfo `protobuf:"bytes,6,rep,name=rack_infos,json=rackInfos" json:"rack_infos,omitempty"`
- RemoteVolumeCount uint64 `protobuf:"varint,7,opt,name=remote_volume_count,json=remoteVolumeCount" json:"remote_volume_count,omitempty"`
+func (x *ReleaseAdminTokenRequest) String() string {
+ return protoimpl.X.MessageStringOf(x)
}
-func (m *DataCenterInfo) Reset() { *m = DataCenterInfo{} }
-func (m *DataCenterInfo) String() string { return proto.CompactTextString(m) }
-func (*DataCenterInfo) ProtoMessage() {}
-func (*DataCenterInfo) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{25} }
+func (*ReleaseAdminTokenRequest) ProtoMessage() {}
-func (m *DataCenterInfo) GetId() string {
- if m != nil {
- return m.Id
+func (x *ReleaseAdminTokenRequest) ProtoReflect() protoreflect.Message {
+ mi := &file_master_proto_msgTypes[39]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
}
- return ""
+ return mi.MessageOf(x)
}
-func (m *DataCenterInfo) GetVolumeCount() uint64 {
- if m != nil {
- return m.VolumeCount
- }
- return 0
+// Deprecated: Use ReleaseAdminTokenRequest.ProtoReflect.Descriptor instead.
+func (*ReleaseAdminTokenRequest) Descriptor() ([]byte, []int) {
+ return file_master_proto_rawDescGZIP(), []int{39}
}
-func (m *DataCenterInfo) GetMaxVolumeCount() uint64 {
- if m != nil {
- return m.MaxVolumeCount
+func (x *ReleaseAdminTokenRequest) GetPreviousToken() int64 {
+ if x != nil {
+ return x.PreviousToken
}
return 0
}
-func (m *DataCenterInfo) GetFreeVolumeCount() uint64 {
- if m != nil {
- return m.FreeVolumeCount
+func (x *ReleaseAdminTokenRequest) GetPreviousLockTime() int64 {
+ if x != nil {
+ return x.PreviousLockTime
}
return 0
}
-func (m *DataCenterInfo) GetActiveVolumeCount() uint64 {
- if m != nil {
- return m.ActiveVolumeCount
+func (x *ReleaseAdminTokenRequest) GetLockName() string {
+ if x != nil {
+ return x.LockName
}
- return 0
+ return ""
}
-func (m *DataCenterInfo) GetRackInfos() []*RackInfo {
- if m != nil {
- return m.RackInfos
- }
- return nil
+type ReleaseAdminTokenResponse struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
}
-func (m *DataCenterInfo) GetRemoteVolumeCount() uint64 {
- if m != nil {
- return m.RemoteVolumeCount
+func (x *ReleaseAdminTokenResponse) Reset() {
+ *x = ReleaseAdminTokenResponse{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_master_proto_msgTypes[40]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
- return 0
}
-type TopologyInfo struct {
- Id string `protobuf:"bytes,1,opt,name=id" json:"id,omitempty"`
- VolumeCount uint64 `protobuf:"varint,2,opt,name=volume_count,json=volumeCount" json:"volume_count,omitempty"`
- MaxVolumeCount uint64 `protobuf:"varint,3,opt,name=max_volume_count,json=maxVolumeCount" json:"max_volume_count,omitempty"`
- FreeVolumeCount uint64 `protobuf:"varint,4,opt,name=free_volume_count,json=freeVolumeCount" json:"free_volume_count,omitempty"`
- ActiveVolumeCount uint64 `protobuf:"varint,5,opt,name=active_volume_count,json=activeVolumeCount" json:"active_volume_count,omitempty"`
- DataCenterInfos []*DataCenterInfo `protobuf:"bytes,6,rep,name=data_center_infos,json=dataCenterInfos" json:"data_center_infos,omitempty"`
- RemoteVolumeCount uint64 `protobuf:"varint,7,opt,name=remote_volume_count,json=remoteVolumeCount" json:"remote_volume_count,omitempty"`
+func (x *ReleaseAdminTokenResponse) String() string {
+ return protoimpl.X.MessageStringOf(x)
}
-func (m *TopologyInfo) Reset() { *m = TopologyInfo{} }
-func (m *TopologyInfo) String() string { return proto.CompactTextString(m) }
-func (*TopologyInfo) ProtoMessage() {}
-func (*TopologyInfo) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{26} }
+func (*ReleaseAdminTokenResponse) ProtoMessage() {}
-func (m *TopologyInfo) GetId() string {
- if m != nil {
- return m.Id
+func (x *ReleaseAdminTokenResponse) ProtoReflect() protoreflect.Message {
+ mi := &file_master_proto_msgTypes[40]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
}
- return ""
+ return mi.MessageOf(x)
}
-func (m *TopologyInfo) GetVolumeCount() uint64 {
- if m != nil {
- return m.VolumeCount
- }
- return 0
+// Deprecated: Use ReleaseAdminTokenResponse.ProtoReflect.Descriptor instead.
+func (*ReleaseAdminTokenResponse) Descriptor() ([]byte, []int) {
+ return file_master_proto_rawDescGZIP(), []int{40}
}
-func (m *TopologyInfo) GetMaxVolumeCount() uint64 {
- if m != nil {
- return m.MaxVolumeCount
- }
- return 0
+type SuperBlockExtra_ErasureCoding struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ Data uint32 `protobuf:"varint,1,opt,name=data,proto3" json:"data,omitempty"`
+ Parity uint32 `protobuf:"varint,2,opt,name=parity,proto3" json:"parity,omitempty"`
+ VolumeIds []uint32 `protobuf:"varint,3,rep,packed,name=volume_ids,json=volumeIds,proto3" json:"volume_ids,omitempty"`
}
-func (m *TopologyInfo) GetFreeVolumeCount() uint64 {
- if m != nil {
- return m.FreeVolumeCount
+func (x *SuperBlockExtra_ErasureCoding) Reset() {
+ *x = SuperBlockExtra_ErasureCoding{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_master_proto_msgTypes[43]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
- return 0
}
-func (m *TopologyInfo) GetActiveVolumeCount() uint64 {
- if m != nil {
- return m.ActiveVolumeCount
- }
- return 0
+func (x *SuperBlockExtra_ErasureCoding) String() string {
+ return protoimpl.X.MessageStringOf(x)
}
-func (m *TopologyInfo) GetDataCenterInfos() []*DataCenterInfo {
- if m != nil {
- return m.DataCenterInfos
+func (*SuperBlockExtra_ErasureCoding) ProtoMessage() {}
+
+func (x *SuperBlockExtra_ErasureCoding) ProtoReflect() protoreflect.Message {
+ mi := &file_master_proto_msgTypes[43]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
}
- return nil
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use SuperBlockExtra_ErasureCoding.ProtoReflect.Descriptor instead.
+func (*SuperBlockExtra_ErasureCoding) Descriptor() ([]byte, []int) {
+ return file_master_proto_rawDescGZIP(), []int{7, 0}
}
-func (m *TopologyInfo) GetRemoteVolumeCount() uint64 {
- if m != nil {
- return m.RemoteVolumeCount
+func (x *SuperBlockExtra_ErasureCoding) GetData() uint32 {
+ if x != nil {
+ return x.Data
}
return 0
}
-type VolumeListRequest struct {
+func (x *SuperBlockExtra_ErasureCoding) GetParity() uint32 {
+ if x != nil {
+ return x.Parity
+ }
+ return 0
}
-func (m *VolumeListRequest) Reset() { *m = VolumeListRequest{} }
-func (m *VolumeListRequest) String() string { return proto.CompactTextString(m) }
-func (*VolumeListRequest) ProtoMessage() {}
-func (*VolumeListRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{27} }
-
-type VolumeListResponse struct {
- TopologyInfo *TopologyInfo `protobuf:"bytes,1,opt,name=topology_info,json=topologyInfo" json:"topology_info,omitempty"`
- VolumeSizeLimitMb uint64 `protobuf:"varint,2,opt,name=volume_size_limit_mb,json=volumeSizeLimitMb" json:"volume_size_limit_mb,omitempty"`
+func (x *SuperBlockExtra_ErasureCoding) GetVolumeIds() []uint32 {
+ if x != nil {
+ return x.VolumeIds
+ }
+ return nil
}
-func (m *VolumeListResponse) Reset() { *m = VolumeListResponse{} }
-func (m *VolumeListResponse) String() string { return proto.CompactTextString(m) }
-func (*VolumeListResponse) ProtoMessage() {}
-func (*VolumeListResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{28} }
+type LookupVolumeResponse_VolumeIdLocation struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
-func (m *VolumeListResponse) GetTopologyInfo() *TopologyInfo {
- if m != nil {
- return m.TopologyInfo
- }
- return nil
+ VolumeId string `protobuf:"bytes,1,opt,name=volume_id,json=volumeId,proto3" json:"volume_id,omitempty"`
+ Locations []*Location `protobuf:"bytes,2,rep,name=locations,proto3" json:"locations,omitempty"`
+ Error string `protobuf:"bytes,3,opt,name=error,proto3" json:"error,omitempty"`
}
-func (m *VolumeListResponse) GetVolumeSizeLimitMb() uint64 {
- if m != nil {
- return m.VolumeSizeLimitMb
+func (x *LookupVolumeResponse_VolumeIdLocation) Reset() {
+ *x = LookupVolumeResponse_VolumeIdLocation{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_master_proto_msgTypes[44]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
- return 0
}
-type LookupEcVolumeRequest struct {
- VolumeId uint32 `protobuf:"varint,1,opt,name=volume_id,json=volumeId" json:"volume_id,omitempty"`
+func (x *LookupVolumeResponse_VolumeIdLocation) String() string {
+ return protoimpl.X.MessageStringOf(x)
}
-func (m *LookupEcVolumeRequest) Reset() { *m = LookupEcVolumeRequest{} }
-func (m *LookupEcVolumeRequest) String() string { return proto.CompactTextString(m) }
-func (*LookupEcVolumeRequest) ProtoMessage() {}
-func (*LookupEcVolumeRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{29} }
+func (*LookupVolumeResponse_VolumeIdLocation) ProtoMessage() {}
-func (m *LookupEcVolumeRequest) GetVolumeId() uint32 {
- if m != nil {
- return m.VolumeId
+func (x *LookupVolumeResponse_VolumeIdLocation) ProtoReflect() protoreflect.Message {
+ mi := &file_master_proto_msgTypes[44]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
}
- return 0
+ return mi.MessageOf(x)
}
-type LookupEcVolumeResponse struct {
- VolumeId uint32 `protobuf:"varint,1,opt,name=volume_id,json=volumeId" json:"volume_id,omitempty"`
- ShardIdLocations []*LookupEcVolumeResponse_EcShardIdLocation `protobuf:"bytes,2,rep,name=shard_id_locations,json=shardIdLocations" json:"shard_id_locations,omitempty"`
+// Deprecated: Use LookupVolumeResponse_VolumeIdLocation.ProtoReflect.Descriptor instead.
+func (*LookupVolumeResponse_VolumeIdLocation) Descriptor() ([]byte, []int) {
+ return file_master_proto_rawDescGZIP(), []int{11, 0}
}
-func (m *LookupEcVolumeResponse) Reset() { *m = LookupEcVolumeResponse{} }
-func (m *LookupEcVolumeResponse) String() string { return proto.CompactTextString(m) }
-func (*LookupEcVolumeResponse) ProtoMessage() {}
-func (*LookupEcVolumeResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{30} }
-
-func (m *LookupEcVolumeResponse) GetVolumeId() uint32 {
- if m != nil {
- return m.VolumeId
+func (x *LookupVolumeResponse_VolumeIdLocation) GetVolumeId() string {
+ if x != nil {
+ return x.VolumeId
}
- return 0
+ return ""
}
-func (m *LookupEcVolumeResponse) GetShardIdLocations() []*LookupEcVolumeResponse_EcShardIdLocation {
- if m != nil {
- return m.ShardIdLocations
+func (x *LookupVolumeResponse_VolumeIdLocation) GetLocations() []*Location {
+ if x != nil {
+ return x.Locations
}
return nil
}
-type LookupEcVolumeResponse_EcShardIdLocation struct {
- ShardId uint32 `protobuf:"varint,1,opt,name=shard_id,json=shardId" json:"shard_id,omitempty"`
- Locations []*Location `protobuf:"bytes,2,rep,name=locations" json:"locations,omitempty"`
+func (x *LookupVolumeResponse_VolumeIdLocation) GetError() string {
+ if x != nil {
+ return x.Error
+ }
+ return ""
}
-func (m *LookupEcVolumeResponse_EcShardIdLocation) Reset() {
- *m = LookupEcVolumeResponse_EcShardIdLocation{}
-}
-func (m *LookupEcVolumeResponse_EcShardIdLocation) String() string { return proto.CompactTextString(m) }
-func (*LookupEcVolumeResponse_EcShardIdLocation) ProtoMessage() {}
-func (*LookupEcVolumeResponse_EcShardIdLocation) Descriptor() ([]byte, []int) {
- return fileDescriptor0, []int{30, 0}
-}
+type LookupEcVolumeResponse_EcShardIdLocation struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
-func (m *LookupEcVolumeResponse_EcShardIdLocation) GetShardId() uint32 {
- if m != nil {
- return m.ShardId
- }
- return 0
+ ShardId uint32 `protobuf:"varint,1,opt,name=shard_id,json=shardId,proto3" json:"shard_id,omitempty"`
+ Locations []*Location `protobuf:"bytes,2,rep,name=locations,proto3" json:"locations,omitempty"`
}
-func (m *LookupEcVolumeResponse_EcShardIdLocation) GetLocations() []*Location {
- if m != nil {
- return m.Locations
+func (x *LookupEcVolumeResponse_EcShardIdLocation) Reset() {
+ *x = LookupEcVolumeResponse_EcShardIdLocation{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_master_proto_msgTypes[49]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
- return nil
}
-type GetMasterConfigurationRequest struct {
+func (x *LookupEcVolumeResponse_EcShardIdLocation) String() string {
+ return protoimpl.X.MessageStringOf(x)
}
-func (m *GetMasterConfigurationRequest) Reset() { *m = GetMasterConfigurationRequest{} }
-func (m *GetMasterConfigurationRequest) String() string { return proto.CompactTextString(m) }
-func (*GetMasterConfigurationRequest) ProtoMessage() {}
-func (*GetMasterConfigurationRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{31} }
+func (*LookupEcVolumeResponse_EcShardIdLocation) ProtoMessage() {}
-type GetMasterConfigurationResponse struct {
- MetricsAddress string `protobuf:"bytes,1,opt,name=metrics_address,json=metricsAddress" json:"metrics_address,omitempty"`
- MetricsIntervalSeconds uint32 `protobuf:"varint,2,opt,name=metrics_interval_seconds,json=metricsIntervalSeconds" json:"metrics_interval_seconds,omitempty"`
+func (x *LookupEcVolumeResponse_EcShardIdLocation) ProtoReflect() protoreflect.Message {
+ mi := &file_master_proto_msgTypes[49]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
}
-func (m *GetMasterConfigurationResponse) Reset() { *m = GetMasterConfigurationResponse{} }
-func (m *GetMasterConfigurationResponse) String() string { return proto.CompactTextString(m) }
-func (*GetMasterConfigurationResponse) ProtoMessage() {}
-func (*GetMasterConfigurationResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{32} }
+// Deprecated: Use LookupEcVolumeResponse_EcShardIdLocation.ProtoReflect.Descriptor instead.
+func (*LookupEcVolumeResponse_EcShardIdLocation) Descriptor() ([]byte, []int) {
+ return file_master_proto_rawDescGZIP(), []int{30, 0}
+}
-func (m *GetMasterConfigurationResponse) GetMetricsAddress() string {
- if m != nil {
- return m.MetricsAddress
+func (x *LookupEcVolumeResponse_EcShardIdLocation) GetShardId() uint32 {
+ if x != nil {
+ return x.ShardId
}
- return ""
+ return 0
}
-func (m *GetMasterConfigurationResponse) GetMetricsIntervalSeconds() uint32 {
- if m != nil {
- return m.MetricsIntervalSeconds
+func (x *LookupEcVolumeResponse_EcShardIdLocation) GetLocations() []*Location {
+ if x != nil {
+ return x.Locations
}
- return 0
+ return nil
}
-func init() {
- proto.RegisterType((*Heartbeat)(nil), "master_pb.Heartbeat")
- proto.RegisterType((*HeartbeatResponse)(nil), "master_pb.HeartbeatResponse")
- proto.RegisterType((*VolumeInformationMessage)(nil), "master_pb.VolumeInformationMessage")
- proto.RegisterType((*VolumeShortInformationMessage)(nil), "master_pb.VolumeShortInformationMessage")
- proto.RegisterType((*VolumeEcShardInformationMessage)(nil), "master_pb.VolumeEcShardInformationMessage")
- proto.RegisterType((*StorageBackend)(nil), "master_pb.StorageBackend")
- proto.RegisterType((*Empty)(nil), "master_pb.Empty")
- proto.RegisterType((*SuperBlockExtra)(nil), "master_pb.SuperBlockExtra")
- proto.RegisterType((*SuperBlockExtra_ErasureCoding)(nil), "master_pb.SuperBlockExtra.ErasureCoding")
- proto.RegisterType((*KeepConnectedRequest)(nil), "master_pb.KeepConnectedRequest")
- proto.RegisterType((*VolumeLocation)(nil), "master_pb.VolumeLocation")
- proto.RegisterType((*LookupVolumeRequest)(nil), "master_pb.LookupVolumeRequest")
- proto.RegisterType((*LookupVolumeResponse)(nil), "master_pb.LookupVolumeResponse")
- proto.RegisterType((*LookupVolumeResponse_VolumeIdLocation)(nil), "master_pb.LookupVolumeResponse.VolumeIdLocation")
- proto.RegisterType((*Location)(nil), "master_pb.Location")
- proto.RegisterType((*AssignRequest)(nil), "master_pb.AssignRequest")
- proto.RegisterType((*AssignResponse)(nil), "master_pb.AssignResponse")
- proto.RegisterType((*StatisticsRequest)(nil), "master_pb.StatisticsRequest")
- proto.RegisterType((*StatisticsResponse)(nil), "master_pb.StatisticsResponse")
- proto.RegisterType((*StorageType)(nil), "master_pb.StorageType")
- proto.RegisterType((*Collection)(nil), "master_pb.Collection")
- proto.RegisterType((*CollectionListRequest)(nil), "master_pb.CollectionListRequest")
- proto.RegisterType((*CollectionListResponse)(nil), "master_pb.CollectionListResponse")
- proto.RegisterType((*CollectionDeleteRequest)(nil), "master_pb.CollectionDeleteRequest")
- proto.RegisterType((*CollectionDeleteResponse)(nil), "master_pb.CollectionDeleteResponse")
- proto.RegisterType((*DataNodeInfo)(nil), "master_pb.DataNodeInfo")
- proto.RegisterType((*RackInfo)(nil), "master_pb.RackInfo")
- proto.RegisterType((*DataCenterInfo)(nil), "master_pb.DataCenterInfo")
- proto.RegisterType((*TopologyInfo)(nil), "master_pb.TopologyInfo")
- proto.RegisterType((*VolumeListRequest)(nil), "master_pb.VolumeListRequest")
- proto.RegisterType((*VolumeListResponse)(nil), "master_pb.VolumeListResponse")
- proto.RegisterType((*LookupEcVolumeRequest)(nil), "master_pb.LookupEcVolumeRequest")
- proto.RegisterType((*LookupEcVolumeResponse)(nil), "master_pb.LookupEcVolumeResponse")
- proto.RegisterType((*LookupEcVolumeResponse_EcShardIdLocation)(nil), "master_pb.LookupEcVolumeResponse.EcShardIdLocation")
- proto.RegisterType((*GetMasterConfigurationRequest)(nil), "master_pb.GetMasterConfigurationRequest")
- proto.RegisterType((*GetMasterConfigurationResponse)(nil), "master_pb.GetMasterConfigurationResponse")
+var File_master_proto protoreflect.FileDescriptor
+
+var file_master_proto_rawDesc = []byte{
+ 0x0a, 0x0c, 0x6d, 0x61, 0x73, 0x74, 0x65, 0x72, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x09,
+ 0x6d, 0x61, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x22, 0xfc, 0x06, 0x0a, 0x09, 0x48, 0x65,
+ 0x61, 0x72, 0x74, 0x62, 0x65, 0x61, 0x74, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x70, 0x18, 0x01, 0x20,
+ 0x01, 0x28, 0x09, 0x52, 0x02, 0x69, 0x70, 0x12, 0x12, 0x0a, 0x04, 0x70, 0x6f, 0x72, 0x74, 0x18,
+ 0x02, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x04, 0x70, 0x6f, 0x72, 0x74, 0x12, 0x1d, 0x0a, 0x0a, 0x70,
+ 0x75, 0x62, 0x6c, 0x69, 0x63, 0x5f, 0x75, 0x72, 0x6c, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52,
+ 0x09, 0x70, 0x75, 0x62, 0x6c, 0x69, 0x63, 0x55, 0x72, 0x6c, 0x12, 0x20, 0x0a, 0x0c, 0x6d, 0x61,
+ 0x78, 0x5f, 0x66, 0x69, 0x6c, 0x65, 0x5f, 0x6b, 0x65, 0x79, 0x18, 0x05, 0x20, 0x01, 0x28, 0x04,
+ 0x52, 0x0a, 0x6d, 0x61, 0x78, 0x46, 0x69, 0x6c, 0x65, 0x4b, 0x65, 0x79, 0x12, 0x1f, 0x0a, 0x0b,
+ 0x64, 0x61, 0x74, 0x61, 0x5f, 0x63, 0x65, 0x6e, 0x74, 0x65, 0x72, 0x18, 0x06, 0x20, 0x01, 0x28,
+ 0x09, 0x52, 0x0a, 0x64, 0x61, 0x74, 0x61, 0x43, 0x65, 0x6e, 0x74, 0x65, 0x72, 0x12, 0x12, 0x0a,
+ 0x04, 0x72, 0x61, 0x63, 0x6b, 0x18, 0x07, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x72, 0x61, 0x63,
+ 0x6b, 0x12, 0x1d, 0x0a, 0x0a, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x5f, 0x70, 0x6f, 0x72, 0x74, 0x18,
+ 0x08, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x09, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x50, 0x6f, 0x72, 0x74,
+ 0x12, 0x3d, 0x0a, 0x07, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x73, 0x18, 0x09, 0x20, 0x03, 0x28,
+ 0x0b, 0x32, 0x23, 0x2e, 0x6d, 0x61, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x56, 0x6f,
+ 0x6c, 0x75, 0x6d, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d,
+ 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x52, 0x07, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x73, 0x12,
+ 0x49, 0x0a, 0x0b, 0x6e, 0x65, 0x77, 0x5f, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x73, 0x18, 0x0a,
+ 0x20, 0x03, 0x28, 0x0b, 0x32, 0x28, 0x2e, 0x6d, 0x61, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x70, 0x62,
+ 0x2e, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x53, 0x68, 0x6f, 0x72, 0x74, 0x49, 0x6e, 0x66, 0x6f,
+ 0x72, 0x6d, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x52, 0x0a,
+ 0x6e, 0x65, 0x77, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x73, 0x12, 0x51, 0x0a, 0x0f, 0x64, 0x65,
+ 0x6c, 0x65, 0x74, 0x65, 0x64, 0x5f, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x73, 0x18, 0x0b, 0x20,
+ 0x03, 0x28, 0x0b, 0x32, 0x28, 0x2e, 0x6d, 0x61, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e,
+ 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x53, 0x68, 0x6f, 0x72, 0x74, 0x49, 0x6e, 0x66, 0x6f, 0x72,
+ 0x6d, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x52, 0x0e, 0x64,
+ 0x65, 0x6c, 0x65, 0x74, 0x65, 0x64, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x73, 0x12, 0x24, 0x0a,
+ 0x0e, 0x68, 0x61, 0x73, 0x5f, 0x6e, 0x6f, 0x5f, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x73, 0x18,
+ 0x0c, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0c, 0x68, 0x61, 0x73, 0x4e, 0x6f, 0x56, 0x6f, 0x6c, 0x75,
+ 0x6d, 0x65, 0x73, 0x12, 0x47, 0x0a, 0x09, 0x65, 0x63, 0x5f, 0x73, 0x68, 0x61, 0x72, 0x64, 0x73,
+ 0x18, 0x10, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2a, 0x2e, 0x6d, 0x61, 0x73, 0x74, 0x65, 0x72, 0x5f,
+ 0x70, 0x62, 0x2e, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x45, 0x63, 0x53, 0x68, 0x61, 0x72, 0x64,
+ 0x49, 0x6e, 0x66, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x65, 0x73, 0x73, 0x61,
+ 0x67, 0x65, 0x52, 0x08, 0x65, 0x63, 0x53, 0x68, 0x61, 0x72, 0x64, 0x73, 0x12, 0x4e, 0x0a, 0x0d,
+ 0x6e, 0x65, 0x77, 0x5f, 0x65, 0x63, 0x5f, 0x73, 0x68, 0x61, 0x72, 0x64, 0x73, 0x18, 0x11, 0x20,
+ 0x03, 0x28, 0x0b, 0x32, 0x2a, 0x2e, 0x6d, 0x61, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e,
+ 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x45, 0x63, 0x53, 0x68, 0x61, 0x72, 0x64, 0x49, 0x6e, 0x66,
+ 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x52,
+ 0x0b, 0x6e, 0x65, 0x77, 0x45, 0x63, 0x53, 0x68, 0x61, 0x72, 0x64, 0x73, 0x12, 0x56, 0x0a, 0x11,
+ 0x64, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x64, 0x5f, 0x65, 0x63, 0x5f, 0x73, 0x68, 0x61, 0x72, 0x64,
+ 0x73, 0x18, 0x12, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2a, 0x2e, 0x6d, 0x61, 0x73, 0x74, 0x65, 0x72,
+ 0x5f, 0x70, 0x62, 0x2e, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x45, 0x63, 0x53, 0x68, 0x61, 0x72,
+ 0x64, 0x49, 0x6e, 0x66, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x65, 0x73, 0x73,
+ 0x61, 0x67, 0x65, 0x52, 0x0f, 0x64, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x64, 0x45, 0x63, 0x53, 0x68,
+ 0x61, 0x72, 0x64, 0x73, 0x12, 0x27, 0x0a, 0x10, 0x68, 0x61, 0x73, 0x5f, 0x6e, 0x6f, 0x5f, 0x65,
+ 0x63, 0x5f, 0x73, 0x68, 0x61, 0x72, 0x64, 0x73, 0x18, 0x13, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0d,
+ 0x68, 0x61, 0x73, 0x4e, 0x6f, 0x45, 0x63, 0x53, 0x68, 0x61, 0x72, 0x64, 0x73, 0x12, 0x55, 0x0a,
+ 0x11, 0x6d, 0x61, 0x78, 0x5f, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x63, 0x6f, 0x75, 0x6e,
+ 0x74, 0x73, 0x18, 0x04, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x29, 0x2e, 0x6d, 0x61, 0x73, 0x74, 0x65,
+ 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x48, 0x65, 0x61, 0x72, 0x74, 0x62, 0x65, 0x61, 0x74, 0x2e, 0x4d,
+ 0x61, 0x78, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x73, 0x45, 0x6e,
+ 0x74, 0x72, 0x79, 0x52, 0x0f, 0x6d, 0x61, 0x78, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x43, 0x6f,
+ 0x75, 0x6e, 0x74, 0x73, 0x1a, 0x42, 0x0a, 0x14, 0x4d, 0x61, 0x78, 0x56, 0x6f, 0x6c, 0x75, 0x6d,
+ 0x65, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03,
+ 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14,
+ 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x05, 0x76,
+ 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0x80, 0x02, 0x0a, 0x11, 0x48, 0x65, 0x61,
+ 0x72, 0x74, 0x62, 0x65, 0x61, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x2a,
+ 0x0a, 0x11, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x73, 0x69, 0x7a, 0x65, 0x5f, 0x6c, 0x69,
+ 0x6d, 0x69, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x04, 0x52, 0x0f, 0x76, 0x6f, 0x6c, 0x75, 0x6d,
+ 0x65, 0x53, 0x69, 0x7a, 0x65, 0x4c, 0x69, 0x6d, 0x69, 0x74, 0x12, 0x16, 0x0a, 0x06, 0x6c, 0x65,
+ 0x61, 0x64, 0x65, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x6c, 0x65, 0x61, 0x64,
+ 0x65, 0x72, 0x12, 0x27, 0x0a, 0x0f, 0x6d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, 0x5f, 0x61, 0x64,
+ 0x64, 0x72, 0x65, 0x73, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0e, 0x6d, 0x65, 0x74,
+ 0x72, 0x69, 0x63, 0x73, 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x12, 0x38, 0x0a, 0x18, 0x6d,
+ 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, 0x5f, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x76, 0x61, 0x6c, 0x5f,
+ 0x73, 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x16, 0x6d,
+ 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, 0x49, 0x6e, 0x74, 0x65, 0x72, 0x76, 0x61, 0x6c, 0x53, 0x65,
+ 0x63, 0x6f, 0x6e, 0x64, 0x73, 0x12, 0x44, 0x0a, 0x10, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65,
+ 0x5f, 0x62, 0x61, 0x63, 0x6b, 0x65, 0x6e, 0x64, 0x73, 0x18, 0x05, 0x20, 0x03, 0x28, 0x0b, 0x32,
+ 0x19, 0x2e, 0x6d, 0x61, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x53, 0x74, 0x6f, 0x72,
+ 0x61, 0x67, 0x65, 0x42, 0x61, 0x63, 0x6b, 0x65, 0x6e, 0x64, 0x52, 0x0f, 0x73, 0x74, 0x6f, 0x72,
+ 0x61, 0x67, 0x65, 0x42, 0x61, 0x63, 0x6b, 0x65, 0x6e, 0x64, 0x73, 0x22, 0x98, 0x04, 0x0a, 0x18,
+ 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x69, 0x6f,
+ 0x6e, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x01,
+ 0x20, 0x01, 0x28, 0x0d, 0x52, 0x02, 0x69, 0x64, 0x12, 0x12, 0x0a, 0x04, 0x73, 0x69, 0x7a, 0x65,
+ 0x18, 0x02, 0x20, 0x01, 0x28, 0x04, 0x52, 0x04, 0x73, 0x69, 0x7a, 0x65, 0x12, 0x1e, 0x0a, 0x0a,
+ 0x63, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09,
+ 0x52, 0x0a, 0x63, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x1d, 0x0a, 0x0a,
+ 0x66, 0x69, 0x6c, 0x65, 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x04,
+ 0x52, 0x09, 0x66, 0x69, 0x6c, 0x65, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x12, 0x21, 0x0a, 0x0c, 0x64,
+ 0x65, 0x6c, 0x65, 0x74, 0x65, 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x05, 0x20, 0x01, 0x28,
+ 0x04, 0x52, 0x0b, 0x64, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x12, 0x2c,
+ 0x0a, 0x12, 0x64, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x64, 0x5f, 0x62, 0x79, 0x74, 0x65, 0x5f, 0x63,
+ 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x06, 0x20, 0x01, 0x28, 0x04, 0x52, 0x10, 0x64, 0x65, 0x6c, 0x65,
+ 0x74, 0x65, 0x64, 0x42, 0x79, 0x74, 0x65, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x12, 0x1b, 0x0a, 0x09,
+ 0x72, 0x65, 0x61, 0x64, 0x5f, 0x6f, 0x6e, 0x6c, 0x79, 0x18, 0x07, 0x20, 0x01, 0x28, 0x08, 0x52,
+ 0x08, 0x72, 0x65, 0x61, 0x64, 0x4f, 0x6e, 0x6c, 0x79, 0x12, 0x2b, 0x0a, 0x11, 0x72, 0x65, 0x70,
+ 0x6c, 0x69, 0x63, 0x61, 0x5f, 0x70, 0x6c, 0x61, 0x63, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x18, 0x08,
+ 0x20, 0x01, 0x28, 0x0d, 0x52, 0x10, 0x72, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x50, 0x6c, 0x61,
+ 0x63, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x12, 0x18, 0x0a, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f,
+ 0x6e, 0x18, 0x09, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e,
+ 0x12, 0x10, 0x0a, 0x03, 0x74, 0x74, 0x6c, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x03, 0x74,
+ 0x74, 0x6c, 0x12, 0x29, 0x0a, 0x10, 0x63, 0x6f, 0x6d, 0x70, 0x61, 0x63, 0x74, 0x5f, 0x72, 0x65,
+ 0x76, 0x69, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x0f, 0x63, 0x6f,
+ 0x6d, 0x70, 0x61, 0x63, 0x74, 0x52, 0x65, 0x76, 0x69, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x2c, 0x0a,
+ 0x12, 0x6d, 0x6f, 0x64, 0x69, 0x66, 0x69, 0x65, 0x64, 0x5f, 0x61, 0x74, 0x5f, 0x73, 0x65, 0x63,
+ 0x6f, 0x6e, 0x64, 0x18, 0x0c, 0x20, 0x01, 0x28, 0x03, 0x52, 0x10, 0x6d, 0x6f, 0x64, 0x69, 0x66,
+ 0x69, 0x65, 0x64, 0x41, 0x74, 0x53, 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x12, 0x2e, 0x0a, 0x13, 0x72,
+ 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x5f, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x5f, 0x6e, 0x61,
+ 0x6d, 0x65, 0x18, 0x0d, 0x20, 0x01, 0x28, 0x09, 0x52, 0x11, 0x72, 0x65, 0x6d, 0x6f, 0x74, 0x65,
+ 0x53, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x2c, 0x0a, 0x12, 0x72,
+ 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x5f, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x5f, 0x6b, 0x65,
+ 0x79, 0x18, 0x0e, 0x20, 0x01, 0x28, 0x09, 0x52, 0x10, 0x72, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x53,
+ 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x4b, 0x65, 0x79, 0x12, 0x1b, 0x0a, 0x09, 0x64, 0x69, 0x73,
+ 0x6b, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x0f, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x64, 0x69,
+ 0x73, 0x6b, 0x54, 0x79, 0x70, 0x65, 0x22, 0xc5, 0x01, 0x0a, 0x1d, 0x56, 0x6f, 0x6c, 0x75, 0x6d,
+ 0x65, 0x53, 0x68, 0x6f, 0x72, 0x74, 0x49, 0x6e, 0x66, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x69, 0x6f,
+ 0x6e, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x01,
+ 0x20, 0x01, 0x28, 0x0d, 0x52, 0x02, 0x69, 0x64, 0x12, 0x1e, 0x0a, 0x0a, 0x63, 0x6f, 0x6c, 0x6c,
+ 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x63, 0x6f,
+ 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x2b, 0x0a, 0x11, 0x72, 0x65, 0x70, 0x6c,
+ 0x69, 0x63, 0x61, 0x5f, 0x70, 0x6c, 0x61, 0x63, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x18, 0x08, 0x20,
+ 0x01, 0x28, 0x0d, 0x52, 0x10, 0x72, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x50, 0x6c, 0x61, 0x63,
+ 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x12, 0x18, 0x0a, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e,
+ 0x18, 0x09, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12,
+ 0x10, 0x0a, 0x03, 0x74, 0x74, 0x6c, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x03, 0x74, 0x74,
+ 0x6c, 0x12, 0x1b, 0x0a, 0x09, 0x64, 0x69, 0x73, 0x6b, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x0f,
+ 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x64, 0x69, 0x73, 0x6b, 0x54, 0x79, 0x70, 0x65, 0x22, 0x92,
+ 0x01, 0x0a, 0x1f, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x45, 0x63, 0x53, 0x68, 0x61, 0x72, 0x64,
+ 0x49, 0x6e, 0x66, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x65, 0x73, 0x73, 0x61,
+ 0x67, 0x65, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x02,
+ 0x69, 0x64, 0x12, 0x1e, 0x0a, 0x0a, 0x63, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e,
+ 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x63, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69,
+ 0x6f, 0x6e, 0x12, 0x22, 0x0a, 0x0d, 0x65, 0x63, 0x5f, 0x69, 0x6e, 0x64, 0x65, 0x78, 0x5f, 0x62,
+ 0x69, 0x74, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x0b, 0x65, 0x63, 0x49, 0x6e, 0x64,
+ 0x65, 0x78, 0x42, 0x69, 0x74, 0x73, 0x12, 0x1b, 0x0a, 0x09, 0x64, 0x69, 0x73, 0x6b, 0x5f, 0x74,
+ 0x79, 0x70, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x64, 0x69, 0x73, 0x6b, 0x54,
+ 0x79, 0x70, 0x65, 0x22, 0xbe, 0x01, 0x0a, 0x0e, 0x53, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x42,
+ 0x61, 0x63, 0x6b, 0x65, 0x6e, 0x64, 0x12, 0x12, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x01,
+ 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64,
+ 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x02, 0x69, 0x64, 0x12, 0x49, 0x0a, 0x0a, 0x70, 0x72,
+ 0x6f, 0x70, 0x65, 0x72, 0x74, 0x69, 0x65, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x29,
+ 0x2e, 0x6d, 0x61, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x53, 0x74, 0x6f, 0x72, 0x61,
+ 0x67, 0x65, 0x42, 0x61, 0x63, 0x6b, 0x65, 0x6e, 0x64, 0x2e, 0x50, 0x72, 0x6f, 0x70, 0x65, 0x72,
+ 0x74, 0x69, 0x65, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x0a, 0x70, 0x72, 0x6f, 0x70, 0x65,
+ 0x72, 0x74, 0x69, 0x65, 0x73, 0x1a, 0x3d, 0x0a, 0x0f, 0x50, 0x72, 0x6f, 0x70, 0x65, 0x72, 0x74,
+ 0x69, 0x65, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18,
+ 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61,
+ 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65,
+ 0x3a, 0x02, 0x38, 0x01, 0x22, 0x07, 0x0a, 0x05, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x22, 0xbe, 0x01,
+ 0x0a, 0x0f, 0x53, 0x75, 0x70, 0x65, 0x72, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x45, 0x78, 0x74, 0x72,
+ 0x61, 0x12, 0x4f, 0x0a, 0x0e, 0x65, 0x72, 0x61, 0x73, 0x75, 0x72, 0x65, 0x5f, 0x63, 0x6f, 0x64,
+ 0x69, 0x6e, 0x67, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x28, 0x2e, 0x6d, 0x61, 0x73, 0x74,
+ 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x53, 0x75, 0x70, 0x65, 0x72, 0x42, 0x6c, 0x6f, 0x63, 0x6b,
+ 0x45, 0x78, 0x74, 0x72, 0x61, 0x2e, 0x45, 0x72, 0x61, 0x73, 0x75, 0x72, 0x65, 0x43, 0x6f, 0x64,
+ 0x69, 0x6e, 0x67, 0x52, 0x0d, 0x65, 0x72, 0x61, 0x73, 0x75, 0x72, 0x65, 0x43, 0x6f, 0x64, 0x69,
+ 0x6e, 0x67, 0x1a, 0x5a, 0x0a, 0x0d, 0x45, 0x72, 0x61, 0x73, 0x75, 0x72, 0x65, 0x43, 0x6f, 0x64,
+ 0x69, 0x6e, 0x67, 0x12, 0x12, 0x0a, 0x04, 0x64, 0x61, 0x74, 0x61, 0x18, 0x01, 0x20, 0x01, 0x28,
+ 0x0d, 0x52, 0x04, 0x64, 0x61, 0x74, 0x61, 0x12, 0x16, 0x0a, 0x06, 0x70, 0x61, 0x72, 0x69, 0x74,
+ 0x79, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x06, 0x70, 0x61, 0x72, 0x69, 0x74, 0x79, 0x12,
+ 0x1d, 0x0a, 0x0a, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x69, 0x64, 0x73, 0x18, 0x03, 0x20,
+ 0x03, 0x28, 0x0d, 0x52, 0x09, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x49, 0x64, 0x73, 0x22, 0x47,
+ 0x0a, 0x14, 0x4b, 0x65, 0x65, 0x70, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x65, 0x64, 0x52,
+ 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01,
+ 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x1b, 0x0a, 0x09, 0x67, 0x72,
+ 0x70, 0x63, 0x5f, 0x70, 0x6f, 0x72, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x08, 0x67,
+ 0x72, 0x70, 0x63, 0x50, 0x6f, 0x72, 0x74, 0x22, 0xb8, 0x01, 0x0a, 0x0e, 0x56, 0x6f, 0x6c, 0x75,
+ 0x6d, 0x65, 0x4c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x10, 0x0a, 0x03, 0x75, 0x72,
+ 0x6c, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x75, 0x72, 0x6c, 0x12, 0x1d, 0x0a, 0x0a,
+ 0x70, 0x75, 0x62, 0x6c, 0x69, 0x63, 0x5f, 0x75, 0x72, 0x6c, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09,
+ 0x52, 0x09, 0x70, 0x75, 0x62, 0x6c, 0x69, 0x63, 0x55, 0x72, 0x6c, 0x12, 0x19, 0x0a, 0x08, 0x6e,
+ 0x65, 0x77, 0x5f, 0x76, 0x69, 0x64, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0d, 0x52, 0x07, 0x6e,
+ 0x65, 0x77, 0x56, 0x69, 0x64, 0x73, 0x12, 0x21, 0x0a, 0x0c, 0x64, 0x65, 0x6c, 0x65, 0x74, 0x65,
+ 0x64, 0x5f, 0x76, 0x69, 0x64, 0x73, 0x18, 0x04, 0x20, 0x03, 0x28, 0x0d, 0x52, 0x0b, 0x64, 0x65,
+ 0x6c, 0x65, 0x74, 0x65, 0x64, 0x56, 0x69, 0x64, 0x73, 0x12, 0x16, 0x0a, 0x06, 0x6c, 0x65, 0x61,
+ 0x64, 0x65, 0x72, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x6c, 0x65, 0x61, 0x64, 0x65,
+ 0x72, 0x12, 0x1f, 0x0a, 0x0b, 0x64, 0x61, 0x74, 0x61, 0x5f, 0x63, 0x65, 0x6e, 0x74, 0x65, 0x72,
+ 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x64, 0x61, 0x74, 0x61, 0x43, 0x65, 0x6e, 0x74,
+ 0x65, 0x72, 0x22, 0x54, 0x0a, 0x13, 0x4c, 0x6f, 0x6f, 0x6b, 0x75, 0x70, 0x56, 0x6f, 0x6c, 0x75,
+ 0x6d, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1d, 0x0a, 0x0a, 0x76, 0x6f, 0x6c,
+ 0x75, 0x6d, 0x65, 0x5f, 0x69, 0x64, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x09, 0x52, 0x09, 0x76,
+ 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x49, 0x64, 0x73, 0x12, 0x1e, 0x0a, 0x0a, 0x63, 0x6f, 0x6c, 0x6c,
+ 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x63, 0x6f,
+ 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0xf2, 0x01, 0x0a, 0x14, 0x4c, 0x6f, 0x6f,
+ 0x6b, 0x75, 0x70, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73,
+ 0x65, 0x12, 0x60, 0x0a, 0x13, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x69, 0x64, 0x5f, 0x6c,
+ 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x30,
+ 0x2e, 0x6d, 0x61, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x4c, 0x6f, 0x6f, 0x6b, 0x75,
+ 0x70, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e,
+ 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x49, 0x64, 0x4c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e,
+ 0x52, 0x11, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x49, 0x64, 0x4c, 0x6f, 0x63, 0x61, 0x74, 0x69,
+ 0x6f, 0x6e, 0x73, 0x1a, 0x78, 0x0a, 0x10, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x49, 0x64, 0x4c,
+ 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x1b, 0x0a, 0x09, 0x76, 0x6f, 0x6c, 0x75, 0x6d,
+ 0x65, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x76, 0x6f, 0x6c, 0x75,
+ 0x6d, 0x65, 0x49, 0x64, 0x12, 0x31, 0x0a, 0x09, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e,
+ 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x13, 0x2e, 0x6d, 0x61, 0x73, 0x74, 0x65, 0x72,
+ 0x5f, 0x70, 0x62, 0x2e, 0x4c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x09, 0x6c, 0x6f,
+ 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x14, 0x0a, 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72,
+ 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x22, 0x3b, 0x0a,
+ 0x08, 0x4c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x10, 0x0a, 0x03, 0x75, 0x72, 0x6c,
+ 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x75, 0x72, 0x6c, 0x12, 0x1d, 0x0a, 0x0a, 0x70,
+ 0x75, 0x62, 0x6c, 0x69, 0x63, 0x5f, 0x75, 0x72, 0x6c, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52,
+ 0x09, 0x70, 0x75, 0x62, 0x6c, 0x69, 0x63, 0x55, 0x72, 0x6c, 0x22, 0xd0, 0x02, 0x0a, 0x0d, 0x41,
+ 0x73, 0x73, 0x69, 0x67, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x14, 0x0a, 0x05,
+ 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x04, 0x52, 0x05, 0x63, 0x6f, 0x75,
+ 0x6e, 0x74, 0x12, 0x20, 0x0a, 0x0b, 0x72, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f,
+ 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x72, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61,
+ 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x1e, 0x0a, 0x0a, 0x63, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69,
+ 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x63, 0x6f, 0x6c, 0x6c, 0x65, 0x63,
+ 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x10, 0x0a, 0x03, 0x74, 0x74, 0x6c, 0x18, 0x04, 0x20, 0x01, 0x28,
+ 0x09, 0x52, 0x03, 0x74, 0x74, 0x6c, 0x12, 0x1f, 0x0a, 0x0b, 0x64, 0x61, 0x74, 0x61, 0x5f, 0x63,
+ 0x65, 0x6e, 0x74, 0x65, 0x72, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x64, 0x61, 0x74,
+ 0x61, 0x43, 0x65, 0x6e, 0x74, 0x65, 0x72, 0x12, 0x12, 0x0a, 0x04, 0x72, 0x61, 0x63, 0x6b, 0x18,
+ 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x72, 0x61, 0x63, 0x6b, 0x12, 0x1b, 0x0a, 0x09, 0x64,
+ 0x61, 0x74, 0x61, 0x5f, 0x6e, 0x6f, 0x64, 0x65, 0x18, 0x07, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08,
+ 0x64, 0x61, 0x74, 0x61, 0x4e, 0x6f, 0x64, 0x65, 0x12, 0x32, 0x0a, 0x16, 0x6d, 0x65, 0x6d, 0x6f,
+ 0x72, 0x79, 0x5f, 0x6d, 0x61, 0x70, 0x5f, 0x6d, 0x61, 0x78, 0x5f, 0x73, 0x69, 0x7a, 0x65, 0x5f,
+ 0x6d, 0x62, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x12, 0x6d, 0x65, 0x6d, 0x6f, 0x72, 0x79,
+ 0x4d, 0x61, 0x70, 0x4d, 0x61, 0x78, 0x53, 0x69, 0x7a, 0x65, 0x4d, 0x62, 0x12, 0x32, 0x0a, 0x15,
+ 0x57, 0x72, 0x69, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x5f, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f,
+ 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x09, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x13, 0x57, 0x72, 0x69,
+ 0x74, 0x61, 0x62, 0x6c, 0x65, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x43, 0x6f, 0x75, 0x6e, 0x74,
+ 0x12, 0x1b, 0x0a, 0x09, 0x64, 0x69, 0x73, 0x6b, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x0a, 0x20,
+ 0x01, 0x28, 0x09, 0x52, 0x08, 0x64, 0x69, 0x73, 0x6b, 0x54, 0x79, 0x70, 0x65, 0x22, 0x93, 0x01,
+ 0x0a, 0x0e, 0x41, 0x73, 0x73, 0x69, 0x67, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65,
+ 0x12, 0x10, 0x0a, 0x03, 0x66, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x66,
+ 0x69, 0x64, 0x12, 0x10, 0x0a, 0x03, 0x75, 0x72, 0x6c, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52,
+ 0x03, 0x75, 0x72, 0x6c, 0x12, 0x1d, 0x0a, 0x0a, 0x70, 0x75, 0x62, 0x6c, 0x69, 0x63, 0x5f, 0x75,
+ 0x72, 0x6c, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x70, 0x75, 0x62, 0x6c, 0x69, 0x63,
+ 0x55, 0x72, 0x6c, 0x12, 0x14, 0x0a, 0x05, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x04, 0x20, 0x01,
+ 0x28, 0x04, 0x52, 0x05, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x12, 0x14, 0x0a, 0x05, 0x65, 0x72, 0x72,
+ 0x6f, 0x72, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x12,
+ 0x12, 0x0a, 0x04, 0x61, 0x75, 0x74, 0x68, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x61,
+ 0x75, 0x74, 0x68, 0x22, 0x84, 0x01, 0x0a, 0x11, 0x53, 0x74, 0x61, 0x74, 0x69, 0x73, 0x74, 0x69,
+ 0x63, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x20, 0x0a, 0x0b, 0x72, 0x65, 0x70,
+ 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b,
+ 0x72, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x1e, 0x0a, 0x0a, 0x63,
+ 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52,
+ 0x0a, 0x63, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x10, 0x0a, 0x03, 0x74,
+ 0x74, 0x6c, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x74, 0x74, 0x6c, 0x12, 0x1b, 0x0a,
+ 0x09, 0x64, 0x69, 0x73, 0x6b, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09,
+ 0x52, 0x08, 0x64, 0x69, 0x73, 0x6b, 0x54, 0x79, 0x70, 0x65, 0x22, 0x6f, 0x0a, 0x12, 0x53, 0x74,
+ 0x61, 0x74, 0x69, 0x73, 0x74, 0x69, 0x63, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65,
+ 0x12, 0x1d, 0x0a, 0x0a, 0x74, 0x6f, 0x74, 0x61, 0x6c, 0x5f, 0x73, 0x69, 0x7a, 0x65, 0x18, 0x04,
+ 0x20, 0x01, 0x28, 0x04, 0x52, 0x09, 0x74, 0x6f, 0x74, 0x61, 0x6c, 0x53, 0x69, 0x7a, 0x65, 0x12,
+ 0x1b, 0x0a, 0x09, 0x75, 0x73, 0x65, 0x64, 0x5f, 0x73, 0x69, 0x7a, 0x65, 0x18, 0x05, 0x20, 0x01,
+ 0x28, 0x04, 0x52, 0x08, 0x75, 0x73, 0x65, 0x64, 0x53, 0x69, 0x7a, 0x65, 0x12, 0x1d, 0x0a, 0x0a,
+ 0x66, 0x69, 0x6c, 0x65, 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x06, 0x20, 0x01, 0x28, 0x04,
+ 0x52, 0x09, 0x66, 0x69, 0x6c, 0x65, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x22, 0x20, 0x0a, 0x0a, 0x43,
+ 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d,
+ 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x22, 0x7b, 0x0a,
+ 0x15, 0x43, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x4c, 0x69, 0x73, 0x74, 0x52,
+ 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x34, 0x0a, 0x16, 0x69, 0x6e, 0x63, 0x6c, 0x75, 0x64,
+ 0x65, 0x5f, 0x6e, 0x6f, 0x72, 0x6d, 0x61, 0x6c, 0x5f, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x73,
+ 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x52, 0x14, 0x69, 0x6e, 0x63, 0x6c, 0x75, 0x64, 0x65, 0x4e,
+ 0x6f, 0x72, 0x6d, 0x61, 0x6c, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x73, 0x12, 0x2c, 0x0a, 0x12,
+ 0x69, 0x6e, 0x63, 0x6c, 0x75, 0x64, 0x65, 0x5f, 0x65, 0x63, 0x5f, 0x76, 0x6f, 0x6c, 0x75, 0x6d,
+ 0x65, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x10, 0x69, 0x6e, 0x63, 0x6c, 0x75, 0x64,
+ 0x65, 0x45, 0x63, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x73, 0x22, 0x51, 0x0a, 0x16, 0x43, 0x6f,
+ 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x4c, 0x69, 0x73, 0x74, 0x52, 0x65, 0x73, 0x70,
+ 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x37, 0x0a, 0x0b, 0x63, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69,
+ 0x6f, 0x6e, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x6d, 0x61, 0x73, 0x74,
+ 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x43, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e,
+ 0x52, 0x0b, 0x63, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x22, 0x2d, 0x0a,
+ 0x17, 0x43, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x44, 0x65, 0x6c, 0x65, 0x74,
+ 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65,
+ 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x22, 0x1a, 0x0a, 0x18,
+ 0x43, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65,
+ 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x91, 0x03, 0x0a, 0x08, 0x44, 0x69, 0x73,
+ 0x6b, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x12, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20,
+ 0x01, 0x28, 0x09, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, 0x12, 0x21, 0x0a, 0x0c, 0x76, 0x6f, 0x6c,
+ 0x75, 0x6d, 0x65, 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x04, 0x52,
+ 0x0b, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x12, 0x28, 0x0a, 0x10,
+ 0x6d, 0x61, 0x78, 0x5f, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74,
+ 0x18, 0x03, 0x20, 0x01, 0x28, 0x04, 0x52, 0x0e, 0x6d, 0x61, 0x78, 0x56, 0x6f, 0x6c, 0x75, 0x6d,
+ 0x65, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x12, 0x2a, 0x0a, 0x11, 0x66, 0x72, 0x65, 0x65, 0x5f, 0x76,
+ 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28,
+ 0x04, 0x52, 0x0f, 0x66, 0x72, 0x65, 0x65, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x43, 0x6f, 0x75,
+ 0x6e, 0x74, 0x12, 0x2e, 0x0a, 0x13, 0x61, 0x63, 0x74, 0x69, 0x76, 0x65, 0x5f, 0x76, 0x6f, 0x6c,
+ 0x75, 0x6d, 0x65, 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x05, 0x20, 0x01, 0x28, 0x04, 0x52,
+ 0x11, 0x61, 0x63, 0x74, 0x69, 0x76, 0x65, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x43, 0x6f, 0x75,
+ 0x6e, 0x74, 0x12, 0x46, 0x0a, 0x0c, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x69, 0x6e, 0x66,
+ 0x6f, 0x73, 0x18, 0x06, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x23, 0x2e, 0x6d, 0x61, 0x73, 0x74, 0x65,
+ 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x72,
+ 0x6d, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x52, 0x0b, 0x76,
+ 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x73, 0x12, 0x50, 0x0a, 0x0e, 0x65, 0x63,
+ 0x5f, 0x73, 0x68, 0x61, 0x72, 0x64, 0x5f, 0x69, 0x6e, 0x66, 0x6f, 0x73, 0x18, 0x07, 0x20, 0x03,
+ 0x28, 0x0b, 0x32, 0x2a, 0x2e, 0x6d, 0x61, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x56,
+ 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x45, 0x63, 0x53, 0x68, 0x61, 0x72, 0x64, 0x49, 0x6e, 0x66, 0x6f,
+ 0x72, 0x6d, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x52, 0x0c,
+ 0x65, 0x63, 0x53, 0x68, 0x61, 0x72, 0x64, 0x49, 0x6e, 0x66, 0x6f, 0x73, 0x12, 0x2e, 0x0a, 0x13,
+ 0x72, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x5f, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x63, 0x6f,
+ 0x75, 0x6e, 0x74, 0x18, 0x08, 0x20, 0x01, 0x28, 0x04, 0x52, 0x11, 0x72, 0x65, 0x6d, 0x6f, 0x74,
+ 0x65, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x22, 0xb7, 0x01, 0x0a,
+ 0x0c, 0x44, 0x61, 0x74, 0x61, 0x4e, 0x6f, 0x64, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x0e, 0x0a,
+ 0x02, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x02, 0x69, 0x64, 0x12, 0x44, 0x0a,
+ 0x09, 0x64, 0x69, 0x73, 0x6b, 0x49, 0x6e, 0x66, 0x6f, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b,
+ 0x32, 0x26, 0x2e, 0x6d, 0x61, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x44, 0x61, 0x74,
+ 0x61, 0x4e, 0x6f, 0x64, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x2e, 0x44, 0x69, 0x73, 0x6b, 0x49, 0x6e,
+ 0x66, 0x6f, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x09, 0x64, 0x69, 0x73, 0x6b, 0x49, 0x6e,
+ 0x66, 0x6f, 0x73, 0x1a, 0x51, 0x0a, 0x0e, 0x44, 0x69, 0x73, 0x6b, 0x49, 0x6e, 0x66, 0x6f, 0x73,
+ 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01,
+ 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x29, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65,
+ 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x13, 0x2e, 0x6d, 0x61, 0x73, 0x74, 0x65, 0x72, 0x5f,
+ 0x70, 0x62, 0x2e, 0x44, 0x69, 0x73, 0x6b, 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x05, 0x76, 0x61, 0x6c,
+ 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0xf0, 0x01, 0x0a, 0x08, 0x52, 0x61, 0x63, 0x6b, 0x49,
+ 0x6e, 0x66, 0x6f, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52,
+ 0x02, 0x69, 0x64, 0x12, 0x3f, 0x0a, 0x0f, 0x64, 0x61, 0x74, 0x61, 0x5f, 0x6e, 0x6f, 0x64, 0x65,
+ 0x5f, 0x69, 0x6e, 0x66, 0x6f, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x6d,
+ 0x61, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x44, 0x61, 0x74, 0x61, 0x4e, 0x6f, 0x64,
+ 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x0d, 0x64, 0x61, 0x74, 0x61, 0x4e, 0x6f, 0x64, 0x65, 0x49,
+ 0x6e, 0x66, 0x6f, 0x73, 0x12, 0x40, 0x0a, 0x09, 0x64, 0x69, 0x73, 0x6b, 0x49, 0x6e, 0x66, 0x6f,
+ 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x6d, 0x61, 0x73, 0x74, 0x65, 0x72,
+ 0x5f, 0x70, 0x62, 0x2e, 0x52, 0x61, 0x63, 0x6b, 0x49, 0x6e, 0x66, 0x6f, 0x2e, 0x44, 0x69, 0x73,
+ 0x6b, 0x49, 0x6e, 0x66, 0x6f, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x09, 0x64, 0x69, 0x73,
+ 0x6b, 0x49, 0x6e, 0x66, 0x6f, 0x73, 0x1a, 0x51, 0x0a, 0x0e, 0x44, 0x69, 0x73, 0x6b, 0x49, 0x6e,
+ 0x66, 0x6f, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18,
+ 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x29, 0x0a, 0x05, 0x76, 0x61,
+ 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x13, 0x2e, 0x6d, 0x61, 0x73, 0x74,
+ 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x44, 0x69, 0x73, 0x6b, 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x05,
+ 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0xef, 0x01, 0x0a, 0x0e, 0x44, 0x61,
+ 0x74, 0x61, 0x43, 0x65, 0x6e, 0x74, 0x65, 0x72, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x0e, 0x0a, 0x02,
+ 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x02, 0x69, 0x64, 0x12, 0x32, 0x0a, 0x0a,
+ 0x72, 0x61, 0x63, 0x6b, 0x5f, 0x69, 0x6e, 0x66, 0x6f, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b,
+ 0x32, 0x13, 0x2e, 0x6d, 0x61, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x52, 0x61, 0x63,
+ 0x6b, 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x09, 0x72, 0x61, 0x63, 0x6b, 0x49, 0x6e, 0x66, 0x6f, 0x73,
+ 0x12, 0x46, 0x0a, 0x09, 0x64, 0x69, 0x73, 0x6b, 0x49, 0x6e, 0x66, 0x6f, 0x73, 0x18, 0x03, 0x20,
+ 0x03, 0x28, 0x0b, 0x32, 0x28, 0x2e, 0x6d, 0x61, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e,
+ 0x44, 0x61, 0x74, 0x61, 0x43, 0x65, 0x6e, 0x74, 0x65, 0x72, 0x49, 0x6e, 0x66, 0x6f, 0x2e, 0x44,
+ 0x69, 0x73, 0x6b, 0x49, 0x6e, 0x66, 0x6f, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x09, 0x64,
+ 0x69, 0x73, 0x6b, 0x49, 0x6e, 0x66, 0x6f, 0x73, 0x1a, 0x51, 0x0a, 0x0e, 0x44, 0x69, 0x73, 0x6b,
+ 0x49, 0x6e, 0x66, 0x6f, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65,
+ 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x29, 0x0a, 0x05,
+ 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x13, 0x2e, 0x6d, 0x61,
+ 0x73, 0x74, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x44, 0x69, 0x73, 0x6b, 0x49, 0x6e, 0x66, 0x6f,
+ 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0xfe, 0x01, 0x0a, 0x0c,
+ 0x54, 0x6f, 0x70, 0x6f, 0x6c, 0x6f, 0x67, 0x79, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x0e, 0x0a, 0x02,
+ 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x02, 0x69, 0x64, 0x12, 0x45, 0x0a, 0x11,
+ 0x64, 0x61, 0x74, 0x61, 0x5f, 0x63, 0x65, 0x6e, 0x74, 0x65, 0x72, 0x5f, 0x69, 0x6e, 0x66, 0x6f,
+ 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x6d, 0x61, 0x73, 0x74, 0x65, 0x72,
+ 0x5f, 0x70, 0x62, 0x2e, 0x44, 0x61, 0x74, 0x61, 0x43, 0x65, 0x6e, 0x74, 0x65, 0x72, 0x49, 0x6e,
+ 0x66, 0x6f, 0x52, 0x0f, 0x64, 0x61, 0x74, 0x61, 0x43, 0x65, 0x6e, 0x74, 0x65, 0x72, 0x49, 0x6e,
+ 0x66, 0x6f, 0x73, 0x12, 0x44, 0x0a, 0x09, 0x64, 0x69, 0x73, 0x6b, 0x49, 0x6e, 0x66, 0x6f, 0x73,
+ 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x26, 0x2e, 0x6d, 0x61, 0x73, 0x74, 0x65, 0x72, 0x5f,
+ 0x70, 0x62, 0x2e, 0x54, 0x6f, 0x70, 0x6f, 0x6c, 0x6f, 0x67, 0x79, 0x49, 0x6e, 0x66, 0x6f, 0x2e,
+ 0x44, 0x69, 0x73, 0x6b, 0x49, 0x6e, 0x66, 0x6f, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x09,
+ 0x64, 0x69, 0x73, 0x6b, 0x49, 0x6e, 0x66, 0x6f, 0x73, 0x1a, 0x51, 0x0a, 0x0e, 0x44, 0x69, 0x73,
+ 0x6b, 0x49, 0x6e, 0x66, 0x6f, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b,
+ 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x29, 0x0a,
+ 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x13, 0x2e, 0x6d,
+ 0x61, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x44, 0x69, 0x73, 0x6b, 0x49, 0x6e, 0x66,
+ 0x6f, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0x13, 0x0a, 0x11,
+ 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x4c, 0x69, 0x73, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73,
+ 0x74, 0x22, 0x83, 0x01, 0x0a, 0x12, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x4c, 0x69, 0x73, 0x74,
+ 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x3c, 0x0a, 0x0d, 0x74, 0x6f, 0x70, 0x6f,
+ 0x6c, 0x6f, 0x67, 0x79, 0x5f, 0x69, 0x6e, 0x66, 0x6f, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32,
+ 0x17, 0x2e, 0x6d, 0x61, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x54, 0x6f, 0x70, 0x6f,
+ 0x6c, 0x6f, 0x67, 0x79, 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x0c, 0x74, 0x6f, 0x70, 0x6f, 0x6c, 0x6f,
+ 0x67, 0x79, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x2f, 0x0a, 0x14, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65,
+ 0x5f, 0x73, 0x69, 0x7a, 0x65, 0x5f, 0x6c, 0x69, 0x6d, 0x69, 0x74, 0x5f, 0x6d, 0x62, 0x18, 0x02,
+ 0x20, 0x01, 0x28, 0x04, 0x52, 0x11, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x53, 0x69, 0x7a, 0x65,
+ 0x4c, 0x69, 0x6d, 0x69, 0x74, 0x4d, 0x62, 0x22, 0x34, 0x0a, 0x15, 0x4c, 0x6f, 0x6f, 0x6b, 0x75,
+ 0x70, 0x45, 0x63, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74,
+ 0x12, 0x1b, 0x0a, 0x09, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20,
+ 0x01, 0x28, 0x0d, 0x52, 0x08, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x49, 0x64, 0x22, 0xfb, 0x01,
+ 0x0a, 0x16, 0x4c, 0x6f, 0x6f, 0x6b, 0x75, 0x70, 0x45, 0x63, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65,
+ 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x1b, 0x0a, 0x09, 0x76, 0x6f, 0x6c, 0x75,
+ 0x6d, 0x65, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x08, 0x76, 0x6f, 0x6c,
+ 0x75, 0x6d, 0x65, 0x49, 0x64, 0x12, 0x61, 0x0a, 0x12, 0x73, 0x68, 0x61, 0x72, 0x64, 0x5f, 0x69,
+ 0x64, 0x5f, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28,
+ 0x0b, 0x32, 0x33, 0x2e, 0x6d, 0x61, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x4c, 0x6f,
+ 0x6f, 0x6b, 0x75, 0x70, 0x45, 0x63, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x52, 0x65, 0x73, 0x70,
+ 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x45, 0x63, 0x53, 0x68, 0x61, 0x72, 0x64, 0x49, 0x64, 0x4c, 0x6f,
+ 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x10, 0x73, 0x68, 0x61, 0x72, 0x64, 0x49, 0x64, 0x4c,
+ 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x1a, 0x61, 0x0a, 0x11, 0x45, 0x63, 0x53, 0x68,
+ 0x61, 0x72, 0x64, 0x49, 0x64, 0x4c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x19, 0x0a,
+ 0x08, 0x73, 0x68, 0x61, 0x72, 0x64, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0d, 0x52,
+ 0x07, 0x73, 0x68, 0x61, 0x72, 0x64, 0x49, 0x64, 0x12, 0x31, 0x0a, 0x09, 0x6c, 0x6f, 0x63, 0x61,
+ 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x13, 0x2e, 0x6d, 0x61,
+ 0x73, 0x74, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x4c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e,
+ 0x52, 0x09, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x22, 0x42, 0x0a, 0x13, 0x56,
+ 0x61, 0x63, 0x75, 0x75, 0x6d, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65,
+ 0x73, 0x74, 0x12, 0x2b, 0x0a, 0x11, 0x67, 0x61, 0x72, 0x62, 0x61, 0x67, 0x65, 0x5f, 0x74, 0x68,
+ 0x72, 0x65, 0x73, 0x68, 0x6f, 0x6c, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x02, 0x52, 0x10, 0x67,
+ 0x61, 0x72, 0x62, 0x61, 0x67, 0x65, 0x54, 0x68, 0x72, 0x65, 0x73, 0x68, 0x6f, 0x6c, 0x64, 0x22,
+ 0x16, 0x0a, 0x14, 0x56, 0x61, 0x63, 0x75, 0x75, 0x6d, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x52,
+ 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x1f, 0x0a, 0x1d, 0x47, 0x65, 0x74, 0x4d, 0x61,
+ 0x73, 0x74, 0x65, 0x72, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f,
+ 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x22, 0x92, 0x02, 0x0a, 0x1e, 0x47, 0x65, 0x74,
+ 0x4d, 0x61, 0x73, 0x74, 0x65, 0x72, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, 0x74,
+ 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x27, 0x0a, 0x0f, 0x6d,
+ 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, 0x5f, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x18, 0x01,
+ 0x20, 0x01, 0x28, 0x09, 0x52, 0x0e, 0x6d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, 0x41, 0x64, 0x64,
+ 0x72, 0x65, 0x73, 0x73, 0x12, 0x38, 0x0a, 0x18, 0x6d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, 0x5f,
+ 0x69, 0x6e, 0x74, 0x65, 0x72, 0x76, 0x61, 0x6c, 0x5f, 0x73, 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x73,
+ 0x18, 0x02, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x16, 0x6d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, 0x49,
+ 0x6e, 0x74, 0x65, 0x72, 0x76, 0x61, 0x6c, 0x53, 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x73, 0x12, 0x44,
+ 0x0a, 0x10, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x5f, 0x62, 0x61, 0x63, 0x6b, 0x65, 0x6e,
+ 0x64, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x6d, 0x61, 0x73, 0x74, 0x65,
+ 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x53, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x42, 0x61, 0x63, 0x6b,
+ 0x65, 0x6e, 0x64, 0x52, 0x0f, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x42, 0x61, 0x63, 0x6b,
+ 0x65, 0x6e, 0x64, 0x73, 0x12, 0x2f, 0x0a, 0x13, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x5f,
+ 0x72, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x04, 0x20, 0x01, 0x28,
+ 0x09, 0x52, 0x12, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63,
+ 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x16, 0x0a, 0x06, 0x6c, 0x65, 0x61, 0x64, 0x65, 0x72, 0x18,
+ 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x6c, 0x65, 0x61, 0x64, 0x65, 0x72, 0x22, 0x3b, 0x0a,
+ 0x18, 0x4c, 0x69, 0x73, 0x74, 0x4d, 0x61, 0x73, 0x74, 0x65, 0x72, 0x43, 0x6c, 0x69, 0x65, 0x6e,
+ 0x74, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1f, 0x0a, 0x0b, 0x63, 0x6c, 0x69,
+ 0x65, 0x6e, 0x74, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a,
+ 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x54, 0x79, 0x70, 0x65, 0x22, 0x42, 0x0a, 0x19, 0x4c, 0x69,
+ 0x73, 0x74, 0x4d, 0x61, 0x73, 0x74, 0x65, 0x72, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x73, 0x52,
+ 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x25, 0x0a, 0x0e, 0x67, 0x72, 0x70, 0x63, 0x5f,
+ 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x09, 0x52,
+ 0x0d, 0x67, 0x72, 0x70, 0x63, 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x65, 0x73, 0x22, 0xab,
+ 0x01, 0x0a, 0x16, 0x4c, 0x65, 0x61, 0x73, 0x65, 0x41, 0x64, 0x6d, 0x69, 0x6e, 0x54, 0x6f, 0x6b,
+ 0x65, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x25, 0x0a, 0x0e, 0x70, 0x72, 0x65,
+ 0x76, 0x69, 0x6f, 0x75, 0x73, 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28,
+ 0x03, 0x52, 0x0d, 0x70, 0x72, 0x65, 0x76, 0x69, 0x6f, 0x75, 0x73, 0x54, 0x6f, 0x6b, 0x65, 0x6e,
+ 0x12, 0x2c, 0x0a, 0x12, 0x70, 0x72, 0x65, 0x76, 0x69, 0x6f, 0x75, 0x73, 0x5f, 0x6c, 0x6f, 0x63,
+ 0x6b, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x03, 0x52, 0x10, 0x70, 0x72,
+ 0x65, 0x76, 0x69, 0x6f, 0x75, 0x73, 0x4c, 0x6f, 0x63, 0x6b, 0x54, 0x69, 0x6d, 0x65, 0x12, 0x1b,
+ 0x0a, 0x09, 0x6c, 0x6f, 0x63, 0x6b, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28,
+ 0x09, 0x52, 0x08, 0x6c, 0x6f, 0x63, 0x6b, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x1f, 0x0a, 0x0b, 0x63,
+ 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09,
+ 0x52, 0x0a, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x4e, 0x61, 0x6d, 0x65, 0x22, 0x4d, 0x0a, 0x17,
+ 0x4c, 0x65, 0x61, 0x73, 0x65, 0x41, 0x64, 0x6d, 0x69, 0x6e, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x52,
+ 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x74, 0x6f, 0x6b, 0x65, 0x6e,
+ 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, 0x52, 0x05, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x12, 0x1c, 0x0a,
+ 0x0a, 0x6c, 0x6f, 0x63, 0x6b, 0x5f, 0x74, 0x73, 0x5f, 0x6e, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28,
+ 0x03, 0x52, 0x08, 0x6c, 0x6f, 0x63, 0x6b, 0x54, 0x73, 0x4e, 0x73, 0x22, 0x8c, 0x01, 0x0a, 0x18,
+ 0x52, 0x65, 0x6c, 0x65, 0x61, 0x73, 0x65, 0x41, 0x64, 0x6d, 0x69, 0x6e, 0x54, 0x6f, 0x6b, 0x65,
+ 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x25, 0x0a, 0x0e, 0x70, 0x72, 0x65, 0x76,
+ 0x69, 0x6f, 0x75, 0x73, 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03,
+ 0x52, 0x0d, 0x70, 0x72, 0x65, 0x76, 0x69, 0x6f, 0x75, 0x73, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x12,
+ 0x2c, 0x0a, 0x12, 0x70, 0x72, 0x65, 0x76, 0x69, 0x6f, 0x75, 0x73, 0x5f, 0x6c, 0x6f, 0x63, 0x6b,
+ 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x03, 0x52, 0x10, 0x70, 0x72, 0x65,
+ 0x76, 0x69, 0x6f, 0x75, 0x73, 0x4c, 0x6f, 0x63, 0x6b, 0x54, 0x69, 0x6d, 0x65, 0x12, 0x1b, 0x0a,
+ 0x09, 0x6c, 0x6f, 0x63, 0x6b, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09,
+ 0x52, 0x08, 0x6c, 0x6f, 0x63, 0x6b, 0x4e, 0x61, 0x6d, 0x65, 0x22, 0x1b, 0x0a, 0x19, 0x52, 0x65,
+ 0x6c, 0x65, 0x61, 0x73, 0x65, 0x41, 0x64, 0x6d, 0x69, 0x6e, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x52,
+ 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x32, 0xca, 0x09, 0x0a, 0x07, 0x53, 0x65, 0x61, 0x77,
+ 0x65, 0x65, 0x64, 0x12, 0x49, 0x0a, 0x0d, 0x53, 0x65, 0x6e, 0x64, 0x48, 0x65, 0x61, 0x72, 0x74,
+ 0x62, 0x65, 0x61, 0x74, 0x12, 0x14, 0x2e, 0x6d, 0x61, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x70, 0x62,
+ 0x2e, 0x48, 0x65, 0x61, 0x72, 0x74, 0x62, 0x65, 0x61, 0x74, 0x1a, 0x1c, 0x2e, 0x6d, 0x61, 0x73,
+ 0x74, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x48, 0x65, 0x61, 0x72, 0x74, 0x62, 0x65, 0x61, 0x74,
+ 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x28, 0x01, 0x30, 0x01, 0x12, 0x51,
+ 0x0a, 0x0d, 0x4b, 0x65, 0x65, 0x70, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x65, 0x64, 0x12,
+ 0x1f, 0x2e, 0x6d, 0x61, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x4b, 0x65, 0x65, 0x70,
+ 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x65, 0x64, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74,
+ 0x1a, 0x19, 0x2e, 0x6d, 0x61, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x56, 0x6f, 0x6c,
+ 0x75, 0x6d, 0x65, 0x4c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0x00, 0x28, 0x01, 0x30,
+ 0x01, 0x12, 0x51, 0x0a, 0x0c, 0x4c, 0x6f, 0x6f, 0x6b, 0x75, 0x70, 0x56, 0x6f, 0x6c, 0x75, 0x6d,
+ 0x65, 0x12, 0x1e, 0x2e, 0x6d, 0x61, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x4c, 0x6f,
+ 0x6f, 0x6b, 0x75, 0x70, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73,
+ 0x74, 0x1a, 0x1f, 0x2e, 0x6d, 0x61, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x4c, 0x6f,
+ 0x6f, 0x6b, 0x75, 0x70, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e,
+ 0x73, 0x65, 0x22, 0x00, 0x12, 0x3f, 0x0a, 0x06, 0x41, 0x73, 0x73, 0x69, 0x67, 0x6e, 0x12, 0x18,
+ 0x2e, 0x6d, 0x61, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x41, 0x73, 0x73, 0x69, 0x67,
+ 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x19, 0x2e, 0x6d, 0x61, 0x73, 0x74, 0x65,
+ 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x41, 0x73, 0x73, 0x69, 0x67, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x6f,
+ 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x4b, 0x0a, 0x0a, 0x53, 0x74, 0x61, 0x74, 0x69, 0x73, 0x74,
+ 0x69, 0x63, 0x73, 0x12, 0x1c, 0x2e, 0x6d, 0x61, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e,
+ 0x53, 0x74, 0x61, 0x74, 0x69, 0x73, 0x74, 0x69, 0x63, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73,
+ 0x74, 0x1a, 0x1d, 0x2e, 0x6d, 0x61, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x53, 0x74,
+ 0x61, 0x74, 0x69, 0x73, 0x74, 0x69, 0x63, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65,
+ 0x22, 0x00, 0x12, 0x57, 0x0a, 0x0e, 0x43, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e,
+ 0x4c, 0x69, 0x73, 0x74, 0x12, 0x20, 0x2e, 0x6d, 0x61, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x70, 0x62,
+ 0x2e, 0x43, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x4c, 0x69, 0x73, 0x74, 0x52,
+ 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x21, 0x2e, 0x6d, 0x61, 0x73, 0x74, 0x65, 0x72, 0x5f,
+ 0x70, 0x62, 0x2e, 0x43, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x4c, 0x69, 0x73,
+ 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x5d, 0x0a, 0x10, 0x43,
+ 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x12,
+ 0x22, 0x2e, 0x6d, 0x61, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x43, 0x6f, 0x6c, 0x6c,
+ 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x52, 0x65, 0x71, 0x75,
+ 0x65, 0x73, 0x74, 0x1a, 0x23, 0x2e, 0x6d, 0x61, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e,
+ 0x43, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65,
+ 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x4b, 0x0a, 0x0a, 0x56, 0x6f,
+ 0x6c, 0x75, 0x6d, 0x65, 0x4c, 0x69, 0x73, 0x74, 0x12, 0x1c, 0x2e, 0x6d, 0x61, 0x73, 0x74, 0x65,
+ 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x4c, 0x69, 0x73, 0x74, 0x52,
+ 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1d, 0x2e, 0x6d, 0x61, 0x73, 0x74, 0x65, 0x72, 0x5f,
+ 0x70, 0x62, 0x2e, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x4c, 0x69, 0x73, 0x74, 0x52, 0x65, 0x73,
+ 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x57, 0x0a, 0x0e, 0x4c, 0x6f, 0x6f, 0x6b, 0x75,
+ 0x70, 0x45, 0x63, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x12, 0x20, 0x2e, 0x6d, 0x61, 0x73, 0x74,
+ 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x4c, 0x6f, 0x6f, 0x6b, 0x75, 0x70, 0x45, 0x63, 0x56, 0x6f,
+ 0x6c, 0x75, 0x6d, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x21, 0x2e, 0x6d, 0x61,
+ 0x73, 0x74, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x4c, 0x6f, 0x6f, 0x6b, 0x75, 0x70, 0x45, 0x63,
+ 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00,
+ 0x12, 0x51, 0x0a, 0x0c, 0x56, 0x61, 0x63, 0x75, 0x75, 0x6d, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65,
+ 0x12, 0x1e, 0x2e, 0x6d, 0x61, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x56, 0x61, 0x63,
+ 0x75, 0x75, 0x6d, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74,
+ 0x1a, 0x1f, 0x2e, 0x6d, 0x61, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x56, 0x61, 0x63,
+ 0x75, 0x75, 0x6d, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73,
+ 0x65, 0x22, 0x00, 0x12, 0x6f, 0x0a, 0x16, 0x47, 0x65, 0x74, 0x4d, 0x61, 0x73, 0x74, 0x65, 0x72,
+ 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x28, 0x2e,
+ 0x6d, 0x61, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x47, 0x65, 0x74, 0x4d, 0x61, 0x73,
+ 0x74, 0x65, 0x72, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e,
+ 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x29, 0x2e, 0x6d, 0x61, 0x73, 0x74, 0x65, 0x72,
+ 0x5f, 0x70, 0x62, 0x2e, 0x47, 0x65, 0x74, 0x4d, 0x61, 0x73, 0x74, 0x65, 0x72, 0x43, 0x6f, 0x6e,
+ 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e,
+ 0x73, 0x65, 0x22, 0x00, 0x12, 0x60, 0x0a, 0x11, 0x4c, 0x69, 0x73, 0x74, 0x4d, 0x61, 0x73, 0x74,
+ 0x65, 0x72, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x73, 0x12, 0x23, 0x2e, 0x6d, 0x61, 0x73, 0x74,
+ 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x4d, 0x61, 0x73, 0x74, 0x65, 0x72,
+ 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x24,
+ 0x2e, 0x6d, 0x61, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x4d,
+ 0x61, 0x73, 0x74, 0x65, 0x72, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x73, 0x52, 0x65, 0x73, 0x70,
+ 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x5a, 0x0a, 0x0f, 0x4c, 0x65, 0x61, 0x73, 0x65, 0x41,
+ 0x64, 0x6d, 0x69, 0x6e, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x12, 0x21, 0x2e, 0x6d, 0x61, 0x73, 0x74,
+ 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x4c, 0x65, 0x61, 0x73, 0x65, 0x41, 0x64, 0x6d, 0x69, 0x6e,
+ 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x22, 0x2e, 0x6d,
+ 0x61, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x4c, 0x65, 0x61, 0x73, 0x65, 0x41, 0x64,
+ 0x6d, 0x69, 0x6e, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65,
+ 0x22, 0x00, 0x12, 0x60, 0x0a, 0x11, 0x52, 0x65, 0x6c, 0x65, 0x61, 0x73, 0x65, 0x41, 0x64, 0x6d,
+ 0x69, 0x6e, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x12, 0x23, 0x2e, 0x6d, 0x61, 0x73, 0x74, 0x65, 0x72,
+ 0x5f, 0x70, 0x62, 0x2e, 0x52, 0x65, 0x6c, 0x65, 0x61, 0x73, 0x65, 0x41, 0x64, 0x6d, 0x69, 0x6e,
+ 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x24, 0x2e, 0x6d,
+ 0x61, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x52, 0x65, 0x6c, 0x65, 0x61, 0x73, 0x65,
+ 0x41, 0x64, 0x6d, 0x69, 0x6e, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e,
+ 0x73, 0x65, 0x22, 0x00, 0x42, 0x32, 0x5a, 0x30, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63,
+ 0x6f, 0x6d, 0x2f, 0x63, 0x68, 0x72, 0x69, 0x73, 0x6c, 0x75, 0x73, 0x66, 0x2f, 0x73, 0x65, 0x61,
+ 0x77, 0x65, 0x65, 0x64, 0x66, 0x73, 0x2f, 0x77, 0x65, 0x65, 0x64, 0x2f, 0x70, 0x62, 0x2f, 0x6d,
+ 0x61, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
+}
+
+var (
+ file_master_proto_rawDescOnce sync.Once
+ file_master_proto_rawDescData = file_master_proto_rawDesc
+)
+
+func file_master_proto_rawDescGZIP() []byte {
+ file_master_proto_rawDescOnce.Do(func() {
+ file_master_proto_rawDescData = protoimpl.X.CompressGZIP(file_master_proto_rawDescData)
+ })
+ return file_master_proto_rawDescData
+}
+
+var file_master_proto_msgTypes = make([]protoimpl.MessageInfo, 50)
+var file_master_proto_goTypes = []interface{}{
+ (*Heartbeat)(nil), // 0: master_pb.Heartbeat
+ (*HeartbeatResponse)(nil), // 1: master_pb.HeartbeatResponse
+ (*VolumeInformationMessage)(nil), // 2: master_pb.VolumeInformationMessage
+ (*VolumeShortInformationMessage)(nil), // 3: master_pb.VolumeShortInformationMessage
+ (*VolumeEcShardInformationMessage)(nil), // 4: master_pb.VolumeEcShardInformationMessage
+ (*StorageBackend)(nil), // 5: master_pb.StorageBackend
+ (*Empty)(nil), // 6: master_pb.Empty
+ (*SuperBlockExtra)(nil), // 7: master_pb.SuperBlockExtra
+ (*KeepConnectedRequest)(nil), // 8: master_pb.KeepConnectedRequest
+ (*VolumeLocation)(nil), // 9: master_pb.VolumeLocation
+ (*LookupVolumeRequest)(nil), // 10: master_pb.LookupVolumeRequest
+ (*LookupVolumeResponse)(nil), // 11: master_pb.LookupVolumeResponse
+ (*Location)(nil), // 12: master_pb.Location
+ (*AssignRequest)(nil), // 13: master_pb.AssignRequest
+ (*AssignResponse)(nil), // 14: master_pb.AssignResponse
+ (*StatisticsRequest)(nil), // 15: master_pb.StatisticsRequest
+ (*StatisticsResponse)(nil), // 16: master_pb.StatisticsResponse
+ (*Collection)(nil), // 17: master_pb.Collection
+ (*CollectionListRequest)(nil), // 18: master_pb.CollectionListRequest
+ (*CollectionListResponse)(nil), // 19: master_pb.CollectionListResponse
+ (*CollectionDeleteRequest)(nil), // 20: master_pb.CollectionDeleteRequest
+ (*CollectionDeleteResponse)(nil), // 21: master_pb.CollectionDeleteResponse
+ (*DiskInfo)(nil), // 22: master_pb.DiskInfo
+ (*DataNodeInfo)(nil), // 23: master_pb.DataNodeInfo
+ (*RackInfo)(nil), // 24: master_pb.RackInfo
+ (*DataCenterInfo)(nil), // 25: master_pb.DataCenterInfo
+ (*TopologyInfo)(nil), // 26: master_pb.TopologyInfo
+ (*VolumeListRequest)(nil), // 27: master_pb.VolumeListRequest
+ (*VolumeListResponse)(nil), // 28: master_pb.VolumeListResponse
+ (*LookupEcVolumeRequest)(nil), // 29: master_pb.LookupEcVolumeRequest
+ (*LookupEcVolumeResponse)(nil), // 30: master_pb.LookupEcVolumeResponse
+ (*VacuumVolumeRequest)(nil), // 31: master_pb.VacuumVolumeRequest
+ (*VacuumVolumeResponse)(nil), // 32: master_pb.VacuumVolumeResponse
+ (*GetMasterConfigurationRequest)(nil), // 33: master_pb.GetMasterConfigurationRequest
+ (*GetMasterConfigurationResponse)(nil), // 34: master_pb.GetMasterConfigurationResponse
+ (*ListMasterClientsRequest)(nil), // 35: master_pb.ListMasterClientsRequest
+ (*ListMasterClientsResponse)(nil), // 36: master_pb.ListMasterClientsResponse
+ (*LeaseAdminTokenRequest)(nil), // 37: master_pb.LeaseAdminTokenRequest
+ (*LeaseAdminTokenResponse)(nil), // 38: master_pb.LeaseAdminTokenResponse
+ (*ReleaseAdminTokenRequest)(nil), // 39: master_pb.ReleaseAdminTokenRequest
+ (*ReleaseAdminTokenResponse)(nil), // 40: master_pb.ReleaseAdminTokenResponse
+ nil, // 41: master_pb.Heartbeat.MaxVolumeCountsEntry
+ nil, // 42: master_pb.StorageBackend.PropertiesEntry
+ (*SuperBlockExtra_ErasureCoding)(nil), // 43: master_pb.SuperBlockExtra.ErasureCoding
+ (*LookupVolumeResponse_VolumeIdLocation)(nil), // 44: master_pb.LookupVolumeResponse.VolumeIdLocation
+ nil, // 45: master_pb.DataNodeInfo.DiskInfosEntry
+ nil, // 46: master_pb.RackInfo.DiskInfosEntry
+ nil, // 47: master_pb.DataCenterInfo.DiskInfosEntry
+ nil, // 48: master_pb.TopologyInfo.DiskInfosEntry
+ (*LookupEcVolumeResponse_EcShardIdLocation)(nil), // 49: master_pb.LookupEcVolumeResponse.EcShardIdLocation
+}
+var file_master_proto_depIdxs = []int32{
+ 2, // 0: master_pb.Heartbeat.volumes:type_name -> master_pb.VolumeInformationMessage
+ 3, // 1: master_pb.Heartbeat.new_volumes:type_name -> master_pb.VolumeShortInformationMessage
+ 3, // 2: master_pb.Heartbeat.deleted_volumes:type_name -> master_pb.VolumeShortInformationMessage
+ 4, // 3: master_pb.Heartbeat.ec_shards:type_name -> master_pb.VolumeEcShardInformationMessage
+ 4, // 4: master_pb.Heartbeat.new_ec_shards:type_name -> master_pb.VolumeEcShardInformationMessage
+ 4, // 5: master_pb.Heartbeat.deleted_ec_shards:type_name -> master_pb.VolumeEcShardInformationMessage
+ 41, // 6: master_pb.Heartbeat.max_volume_counts:type_name -> master_pb.Heartbeat.MaxVolumeCountsEntry
+ 5, // 7: master_pb.HeartbeatResponse.storage_backends:type_name -> master_pb.StorageBackend
+ 42, // 8: master_pb.StorageBackend.properties:type_name -> master_pb.StorageBackend.PropertiesEntry
+ 43, // 9: master_pb.SuperBlockExtra.erasure_coding:type_name -> master_pb.SuperBlockExtra.ErasureCoding
+ 44, // 10: master_pb.LookupVolumeResponse.volume_id_locations:type_name -> master_pb.LookupVolumeResponse.VolumeIdLocation
+ 17, // 11: master_pb.CollectionListResponse.collections:type_name -> master_pb.Collection
+ 2, // 12: master_pb.DiskInfo.volume_infos:type_name -> master_pb.VolumeInformationMessage
+ 4, // 13: master_pb.DiskInfo.ec_shard_infos:type_name -> master_pb.VolumeEcShardInformationMessage
+ 45, // 14: master_pb.DataNodeInfo.diskInfos:type_name -> master_pb.DataNodeInfo.DiskInfosEntry
+ 23, // 15: master_pb.RackInfo.data_node_infos:type_name -> master_pb.DataNodeInfo
+ 46, // 16: master_pb.RackInfo.diskInfos:type_name -> master_pb.RackInfo.DiskInfosEntry
+ 24, // 17: master_pb.DataCenterInfo.rack_infos:type_name -> master_pb.RackInfo
+ 47, // 18: master_pb.DataCenterInfo.diskInfos:type_name -> master_pb.DataCenterInfo.DiskInfosEntry
+ 25, // 19: master_pb.TopologyInfo.data_center_infos:type_name -> master_pb.DataCenterInfo
+ 48, // 20: master_pb.TopologyInfo.diskInfos:type_name -> master_pb.TopologyInfo.DiskInfosEntry
+ 26, // 21: master_pb.VolumeListResponse.topology_info:type_name -> master_pb.TopologyInfo
+ 49, // 22: master_pb.LookupEcVolumeResponse.shard_id_locations:type_name -> master_pb.LookupEcVolumeResponse.EcShardIdLocation
+ 5, // 23: master_pb.GetMasterConfigurationResponse.storage_backends:type_name -> master_pb.StorageBackend
+ 12, // 24: master_pb.LookupVolumeResponse.VolumeIdLocation.locations:type_name -> master_pb.Location
+ 22, // 25: master_pb.DataNodeInfo.DiskInfosEntry.value:type_name -> master_pb.DiskInfo
+ 22, // 26: master_pb.RackInfo.DiskInfosEntry.value:type_name -> master_pb.DiskInfo
+ 22, // 27: master_pb.DataCenterInfo.DiskInfosEntry.value:type_name -> master_pb.DiskInfo
+ 22, // 28: master_pb.TopologyInfo.DiskInfosEntry.value:type_name -> master_pb.DiskInfo
+ 12, // 29: master_pb.LookupEcVolumeResponse.EcShardIdLocation.locations:type_name -> master_pb.Location
+ 0, // 30: master_pb.Seaweed.SendHeartbeat:input_type -> master_pb.Heartbeat
+ 8, // 31: master_pb.Seaweed.KeepConnected:input_type -> master_pb.KeepConnectedRequest
+ 10, // 32: master_pb.Seaweed.LookupVolume:input_type -> master_pb.LookupVolumeRequest
+ 13, // 33: master_pb.Seaweed.Assign:input_type -> master_pb.AssignRequest
+ 15, // 34: master_pb.Seaweed.Statistics:input_type -> master_pb.StatisticsRequest
+ 18, // 35: master_pb.Seaweed.CollectionList:input_type -> master_pb.CollectionListRequest
+ 20, // 36: master_pb.Seaweed.CollectionDelete:input_type -> master_pb.CollectionDeleteRequest
+ 27, // 37: master_pb.Seaweed.VolumeList:input_type -> master_pb.VolumeListRequest
+ 29, // 38: master_pb.Seaweed.LookupEcVolume:input_type -> master_pb.LookupEcVolumeRequest
+ 31, // 39: master_pb.Seaweed.VacuumVolume:input_type -> master_pb.VacuumVolumeRequest
+ 33, // 40: master_pb.Seaweed.GetMasterConfiguration:input_type -> master_pb.GetMasterConfigurationRequest
+ 35, // 41: master_pb.Seaweed.ListMasterClients:input_type -> master_pb.ListMasterClientsRequest
+ 37, // 42: master_pb.Seaweed.LeaseAdminToken:input_type -> master_pb.LeaseAdminTokenRequest
+ 39, // 43: master_pb.Seaweed.ReleaseAdminToken:input_type -> master_pb.ReleaseAdminTokenRequest
+ 1, // 44: master_pb.Seaweed.SendHeartbeat:output_type -> master_pb.HeartbeatResponse
+ 9, // 45: master_pb.Seaweed.KeepConnected:output_type -> master_pb.VolumeLocation
+ 11, // 46: master_pb.Seaweed.LookupVolume:output_type -> master_pb.LookupVolumeResponse
+ 14, // 47: master_pb.Seaweed.Assign:output_type -> master_pb.AssignResponse
+ 16, // 48: master_pb.Seaweed.Statistics:output_type -> master_pb.StatisticsResponse
+ 19, // 49: master_pb.Seaweed.CollectionList:output_type -> master_pb.CollectionListResponse
+ 21, // 50: master_pb.Seaweed.CollectionDelete:output_type -> master_pb.CollectionDeleteResponse
+ 28, // 51: master_pb.Seaweed.VolumeList:output_type -> master_pb.VolumeListResponse
+ 30, // 52: master_pb.Seaweed.LookupEcVolume:output_type -> master_pb.LookupEcVolumeResponse
+ 32, // 53: master_pb.Seaweed.VacuumVolume:output_type -> master_pb.VacuumVolumeResponse
+ 34, // 54: master_pb.Seaweed.GetMasterConfiguration:output_type -> master_pb.GetMasterConfigurationResponse
+ 36, // 55: master_pb.Seaweed.ListMasterClients:output_type -> master_pb.ListMasterClientsResponse
+ 38, // 56: master_pb.Seaweed.LeaseAdminToken:output_type -> master_pb.LeaseAdminTokenResponse
+ 40, // 57: master_pb.Seaweed.ReleaseAdminToken:output_type -> master_pb.ReleaseAdminTokenResponse
+ 44, // [44:58] is the sub-list for method output_type
+ 30, // [30:44] is the sub-list for method input_type
+ 30, // [30:30] is the sub-list for extension type_name
+ 30, // [30:30] is the sub-list for extension extendee
+ 0, // [0:30] is the sub-list for field type_name
+}
+
+func init() { file_master_proto_init() }
+func file_master_proto_init() {
+ if File_master_proto != nil {
+ return
+ }
+ if !protoimpl.UnsafeEnabled {
+ file_master_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*Heartbeat); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_master_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*HeartbeatResponse); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_master_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*VolumeInformationMessage); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_master_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*VolumeShortInformationMessage); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_master_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*VolumeEcShardInformationMessage); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_master_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*StorageBackend); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_master_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*Empty); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_master_proto_msgTypes[7].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*SuperBlockExtra); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_master_proto_msgTypes[8].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*KeepConnectedRequest); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_master_proto_msgTypes[9].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*VolumeLocation); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_master_proto_msgTypes[10].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*LookupVolumeRequest); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_master_proto_msgTypes[11].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*LookupVolumeResponse); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_master_proto_msgTypes[12].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*Location); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_master_proto_msgTypes[13].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*AssignRequest); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_master_proto_msgTypes[14].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*AssignResponse); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_master_proto_msgTypes[15].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*StatisticsRequest); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_master_proto_msgTypes[16].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*StatisticsResponse); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_master_proto_msgTypes[17].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*Collection); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_master_proto_msgTypes[18].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*CollectionListRequest); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_master_proto_msgTypes[19].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*CollectionListResponse); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_master_proto_msgTypes[20].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*CollectionDeleteRequest); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_master_proto_msgTypes[21].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*CollectionDeleteResponse); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_master_proto_msgTypes[22].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*DiskInfo); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_master_proto_msgTypes[23].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*DataNodeInfo); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_master_proto_msgTypes[24].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*RackInfo); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_master_proto_msgTypes[25].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*DataCenterInfo); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_master_proto_msgTypes[26].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*TopologyInfo); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_master_proto_msgTypes[27].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*VolumeListRequest); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_master_proto_msgTypes[28].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*VolumeListResponse); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_master_proto_msgTypes[29].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*LookupEcVolumeRequest); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_master_proto_msgTypes[30].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*LookupEcVolumeResponse); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_master_proto_msgTypes[31].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*VacuumVolumeRequest); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_master_proto_msgTypes[32].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*VacuumVolumeResponse); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_master_proto_msgTypes[33].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*GetMasterConfigurationRequest); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_master_proto_msgTypes[34].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*GetMasterConfigurationResponse); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_master_proto_msgTypes[35].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*ListMasterClientsRequest); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_master_proto_msgTypes[36].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*ListMasterClientsResponse); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_master_proto_msgTypes[37].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*LeaseAdminTokenRequest); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_master_proto_msgTypes[38].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*LeaseAdminTokenResponse); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_master_proto_msgTypes[39].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*ReleaseAdminTokenRequest); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_master_proto_msgTypes[40].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*ReleaseAdminTokenResponse); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_master_proto_msgTypes[43].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*SuperBlockExtra_ErasureCoding); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_master_proto_msgTypes[44].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*LookupVolumeResponse_VolumeIdLocation); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_master_proto_msgTypes[49].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*LookupEcVolumeResponse_EcShardIdLocation); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ }
+ type x struct{}
+ out := protoimpl.TypeBuilder{
+ File: protoimpl.DescBuilder{
+ GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
+ RawDescriptor: file_master_proto_rawDesc,
+ NumEnums: 0,
+ NumMessages: 50,
+ NumExtensions: 0,
+ NumServices: 1,
+ },
+ GoTypes: file_master_proto_goTypes,
+ DependencyIndexes: file_master_proto_depIdxs,
+ MessageInfos: file_master_proto_msgTypes,
+ }.Build()
+ File_master_proto = out.File
+ file_master_proto_rawDesc = nil
+ file_master_proto_goTypes = nil
+ file_master_proto_depIdxs = nil
}
// Reference imports to suppress errors if they are not otherwise used.
var _ context.Context
-var _ grpc.ClientConn
+var _ grpc.ClientConnInterface
// This is a compile-time assertion to ensure that this generated file
// is compatible with the grpc package it is being compiled against.
-const _ = grpc.SupportPackageIsVersion4
-
-// Client API for Seaweed service
+const _ = grpc.SupportPackageIsVersion6
+// SeaweedClient is the client API for Seaweed service.
+//
+// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream.
type SeaweedClient interface {
SendHeartbeat(ctx context.Context, opts ...grpc.CallOption) (Seaweed_SendHeartbeatClient, error)
KeepConnected(ctx context.Context, opts ...grpc.CallOption) (Seaweed_KeepConnectedClient, error)
@@ -1490,19 +4093,23 @@ type SeaweedClient interface {
CollectionDelete(ctx context.Context, in *CollectionDeleteRequest, opts ...grpc.CallOption) (*CollectionDeleteResponse, error)
VolumeList(ctx context.Context, in *VolumeListRequest, opts ...grpc.CallOption) (*VolumeListResponse, error)
LookupEcVolume(ctx context.Context, in *LookupEcVolumeRequest, opts ...grpc.CallOption) (*LookupEcVolumeResponse, error)
+ VacuumVolume(ctx context.Context, in *VacuumVolumeRequest, opts ...grpc.CallOption) (*VacuumVolumeResponse, error)
GetMasterConfiguration(ctx context.Context, in *GetMasterConfigurationRequest, opts ...grpc.CallOption) (*GetMasterConfigurationResponse, error)
+ ListMasterClients(ctx context.Context, in *ListMasterClientsRequest, opts ...grpc.CallOption) (*ListMasterClientsResponse, error)
+ LeaseAdminToken(ctx context.Context, in *LeaseAdminTokenRequest, opts ...grpc.CallOption) (*LeaseAdminTokenResponse, error)
+ ReleaseAdminToken(ctx context.Context, in *ReleaseAdminTokenRequest, opts ...grpc.CallOption) (*ReleaseAdminTokenResponse, error)
}
type seaweedClient struct {
- cc *grpc.ClientConn
+ cc grpc.ClientConnInterface
}
-func NewSeaweedClient(cc *grpc.ClientConn) SeaweedClient {
+func NewSeaweedClient(cc grpc.ClientConnInterface) SeaweedClient {
return &seaweedClient{cc}
}
func (c *seaweedClient) SendHeartbeat(ctx context.Context, opts ...grpc.CallOption) (Seaweed_SendHeartbeatClient, error) {
- stream, err := grpc.NewClientStream(ctx, &_Seaweed_serviceDesc.Streams[0], c.cc, "/master_pb.Seaweed/SendHeartbeat", opts...)
+ stream, err := c.cc.NewStream(ctx, &_Seaweed_serviceDesc.Streams[0], "/master_pb.Seaweed/SendHeartbeat", opts...)
if err != nil {
return nil, err
}
@@ -1533,7 +4140,7 @@ func (x *seaweedSendHeartbeatClient) Recv() (*HeartbeatResponse, error) {
}
func (c *seaweedClient) KeepConnected(ctx context.Context, opts ...grpc.CallOption) (Seaweed_KeepConnectedClient, error) {
- stream, err := grpc.NewClientStream(ctx, &_Seaweed_serviceDesc.Streams[1], c.cc, "/master_pb.Seaweed/KeepConnected", opts...)
+ stream, err := c.cc.NewStream(ctx, &_Seaweed_serviceDesc.Streams[1], "/master_pb.Seaweed/KeepConnected", opts...)
if err != nil {
return nil, err
}
@@ -1565,7 +4172,7 @@ func (x *seaweedKeepConnectedClient) Recv() (*VolumeLocation, error) {
func (c *seaweedClient) LookupVolume(ctx context.Context, in *LookupVolumeRequest, opts ...grpc.CallOption) (*LookupVolumeResponse, error) {
out := new(LookupVolumeResponse)
- err := grpc.Invoke(ctx, "/master_pb.Seaweed/LookupVolume", in, out, c.cc, opts...)
+ err := c.cc.Invoke(ctx, "/master_pb.Seaweed/LookupVolume", in, out, opts...)
if err != nil {
return nil, err
}
@@ -1574,7 +4181,7 @@ func (c *seaweedClient) LookupVolume(ctx context.Context, in *LookupVolumeReques
func (c *seaweedClient) Assign(ctx context.Context, in *AssignRequest, opts ...grpc.CallOption) (*AssignResponse, error) {
out := new(AssignResponse)
- err := grpc.Invoke(ctx, "/master_pb.Seaweed/Assign", in, out, c.cc, opts...)
+ err := c.cc.Invoke(ctx, "/master_pb.Seaweed/Assign", in, out, opts...)
if err != nil {
return nil, err
}
@@ -1583,7 +4190,7 @@ func (c *seaweedClient) Assign(ctx context.Context, in *AssignRequest, opts ...g
func (c *seaweedClient) Statistics(ctx context.Context, in *StatisticsRequest, opts ...grpc.CallOption) (*StatisticsResponse, error) {
out := new(StatisticsResponse)
- err := grpc.Invoke(ctx, "/master_pb.Seaweed/Statistics", in, out, c.cc, opts...)
+ err := c.cc.Invoke(ctx, "/master_pb.Seaweed/Statistics", in, out, opts...)
if err != nil {
return nil, err
}
@@ -1592,7 +4199,7 @@ func (c *seaweedClient) Statistics(ctx context.Context, in *StatisticsRequest, o
func (c *seaweedClient) CollectionList(ctx context.Context, in *CollectionListRequest, opts ...grpc.CallOption) (*CollectionListResponse, error) {
out := new(CollectionListResponse)
- err := grpc.Invoke(ctx, "/master_pb.Seaweed/CollectionList", in, out, c.cc, opts...)
+ err := c.cc.Invoke(ctx, "/master_pb.Seaweed/CollectionList", in, out, opts...)
if err != nil {
return nil, err
}
@@ -1601,7 +4208,7 @@ func (c *seaweedClient) CollectionList(ctx context.Context, in *CollectionListRe
func (c *seaweedClient) CollectionDelete(ctx context.Context, in *CollectionDeleteRequest, opts ...grpc.CallOption) (*CollectionDeleteResponse, error) {
out := new(CollectionDeleteResponse)
- err := grpc.Invoke(ctx, "/master_pb.Seaweed/CollectionDelete", in, out, c.cc, opts...)
+ err := c.cc.Invoke(ctx, "/master_pb.Seaweed/CollectionDelete", in, out, opts...)
if err != nil {
return nil, err
}
@@ -1610,7 +4217,7 @@ func (c *seaweedClient) CollectionDelete(ctx context.Context, in *CollectionDele
func (c *seaweedClient) VolumeList(ctx context.Context, in *VolumeListRequest, opts ...grpc.CallOption) (*VolumeListResponse, error) {
out := new(VolumeListResponse)
- err := grpc.Invoke(ctx, "/master_pb.Seaweed/VolumeList", in, out, c.cc, opts...)
+ err := c.cc.Invoke(ctx, "/master_pb.Seaweed/VolumeList", in, out, opts...)
if err != nil {
return nil, err
}
@@ -1619,7 +4226,16 @@ func (c *seaweedClient) VolumeList(ctx context.Context, in *VolumeListRequest, o
func (c *seaweedClient) LookupEcVolume(ctx context.Context, in *LookupEcVolumeRequest, opts ...grpc.CallOption) (*LookupEcVolumeResponse, error) {
out := new(LookupEcVolumeResponse)
- err := grpc.Invoke(ctx, "/master_pb.Seaweed/LookupEcVolume", in, out, c.cc, opts...)
+ err := c.cc.Invoke(ctx, "/master_pb.Seaweed/LookupEcVolume", in, out, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
+func (c *seaweedClient) VacuumVolume(ctx context.Context, in *VacuumVolumeRequest, opts ...grpc.CallOption) (*VacuumVolumeResponse, error) {
+ out := new(VacuumVolumeResponse)
+ err := c.cc.Invoke(ctx, "/master_pb.Seaweed/VacuumVolume", in, out, opts...)
if err != nil {
return nil, err
}
@@ -1628,15 +4244,41 @@ func (c *seaweedClient) LookupEcVolume(ctx context.Context, in *LookupEcVolumeRe
func (c *seaweedClient) GetMasterConfiguration(ctx context.Context, in *GetMasterConfigurationRequest, opts ...grpc.CallOption) (*GetMasterConfigurationResponse, error) {
out := new(GetMasterConfigurationResponse)
- err := grpc.Invoke(ctx, "/master_pb.Seaweed/GetMasterConfiguration", in, out, c.cc, opts...)
+ err := c.cc.Invoke(ctx, "/master_pb.Seaweed/GetMasterConfiguration", in, out, opts...)
if err != nil {
return nil, err
}
return out, nil
}
-// Server API for Seaweed service
+func (c *seaweedClient) ListMasterClients(ctx context.Context, in *ListMasterClientsRequest, opts ...grpc.CallOption) (*ListMasterClientsResponse, error) {
+ out := new(ListMasterClientsResponse)
+ err := c.cc.Invoke(ctx, "/master_pb.Seaweed/ListMasterClients", in, out, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
+func (c *seaweedClient) LeaseAdminToken(ctx context.Context, in *LeaseAdminTokenRequest, opts ...grpc.CallOption) (*LeaseAdminTokenResponse, error) {
+ out := new(LeaseAdminTokenResponse)
+ err := c.cc.Invoke(ctx, "/master_pb.Seaweed/LeaseAdminToken", in, out, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
+func (c *seaweedClient) ReleaseAdminToken(ctx context.Context, in *ReleaseAdminTokenRequest, opts ...grpc.CallOption) (*ReleaseAdminTokenResponse, error) {
+ out := new(ReleaseAdminTokenResponse)
+ err := c.cc.Invoke(ctx, "/master_pb.Seaweed/ReleaseAdminToken", in, out, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+// SeaweedServer is the server API for Seaweed service.
type SeaweedServer interface {
SendHeartbeat(Seaweed_SendHeartbeatServer) error
KeepConnected(Seaweed_KeepConnectedServer) error
@@ -1647,7 +4289,58 @@ type SeaweedServer interface {
CollectionDelete(context.Context, *CollectionDeleteRequest) (*CollectionDeleteResponse, error)
VolumeList(context.Context, *VolumeListRequest) (*VolumeListResponse, error)
LookupEcVolume(context.Context, *LookupEcVolumeRequest) (*LookupEcVolumeResponse, error)
+ VacuumVolume(context.Context, *VacuumVolumeRequest) (*VacuumVolumeResponse, error)
GetMasterConfiguration(context.Context, *GetMasterConfigurationRequest) (*GetMasterConfigurationResponse, error)
+ ListMasterClients(context.Context, *ListMasterClientsRequest) (*ListMasterClientsResponse, error)
+ LeaseAdminToken(context.Context, *LeaseAdminTokenRequest) (*LeaseAdminTokenResponse, error)
+ ReleaseAdminToken(context.Context, *ReleaseAdminTokenRequest) (*ReleaseAdminTokenResponse, error)
+}
+
+// UnimplementedSeaweedServer can be embedded to have forward compatible implementations.
+type UnimplementedSeaweedServer struct {
+}
+
+func (*UnimplementedSeaweedServer) SendHeartbeat(Seaweed_SendHeartbeatServer) error {
+ return status.Errorf(codes.Unimplemented, "method SendHeartbeat not implemented")
+}
+func (*UnimplementedSeaweedServer) KeepConnected(Seaweed_KeepConnectedServer) error {
+ return status.Errorf(codes.Unimplemented, "method KeepConnected not implemented")
+}
+func (*UnimplementedSeaweedServer) LookupVolume(context.Context, *LookupVolumeRequest) (*LookupVolumeResponse, error) {
+ return nil, status.Errorf(codes.Unimplemented, "method LookupVolume not implemented")
+}
+func (*UnimplementedSeaweedServer) Assign(context.Context, *AssignRequest) (*AssignResponse, error) {
+ return nil, status.Errorf(codes.Unimplemented, "method Assign not implemented")
+}
+func (*UnimplementedSeaweedServer) Statistics(context.Context, *StatisticsRequest) (*StatisticsResponse, error) {
+ return nil, status.Errorf(codes.Unimplemented, "method Statistics not implemented")
+}
+func (*UnimplementedSeaweedServer) CollectionList(context.Context, *CollectionListRequest) (*CollectionListResponse, error) {
+ return nil, status.Errorf(codes.Unimplemented, "method CollectionList not implemented")
+}
+func (*UnimplementedSeaweedServer) CollectionDelete(context.Context, *CollectionDeleteRequest) (*CollectionDeleteResponse, error) {
+ return nil, status.Errorf(codes.Unimplemented, "method CollectionDelete not implemented")
+}
+func (*UnimplementedSeaweedServer) VolumeList(context.Context, *VolumeListRequest) (*VolumeListResponse, error) {
+ return nil, status.Errorf(codes.Unimplemented, "method VolumeList not implemented")
+}
+func (*UnimplementedSeaweedServer) LookupEcVolume(context.Context, *LookupEcVolumeRequest) (*LookupEcVolumeResponse, error) {
+ return nil, status.Errorf(codes.Unimplemented, "method LookupEcVolume not implemented")
+}
+func (*UnimplementedSeaweedServer) VacuumVolume(context.Context, *VacuumVolumeRequest) (*VacuumVolumeResponse, error) {
+ return nil, status.Errorf(codes.Unimplemented, "method VacuumVolume not implemented")
+}
+func (*UnimplementedSeaweedServer) GetMasterConfiguration(context.Context, *GetMasterConfigurationRequest) (*GetMasterConfigurationResponse, error) {
+ return nil, status.Errorf(codes.Unimplemented, "method GetMasterConfiguration not implemented")
+}
+func (*UnimplementedSeaweedServer) ListMasterClients(context.Context, *ListMasterClientsRequest) (*ListMasterClientsResponse, error) {
+ return nil, status.Errorf(codes.Unimplemented, "method ListMasterClients not implemented")
+}
+func (*UnimplementedSeaweedServer) LeaseAdminToken(context.Context, *LeaseAdminTokenRequest) (*LeaseAdminTokenResponse, error) {
+ return nil, status.Errorf(codes.Unimplemented, "method LeaseAdminToken not implemented")
+}
+func (*UnimplementedSeaweedServer) ReleaseAdminToken(context.Context, *ReleaseAdminTokenRequest) (*ReleaseAdminTokenResponse, error) {
+ return nil, status.Errorf(codes.Unimplemented, "method ReleaseAdminToken not implemented")
}
func RegisterSeaweedServer(s *grpc.Server, srv SeaweedServer) {
@@ -1832,6 +4525,24 @@ func _Seaweed_LookupEcVolume_Handler(srv interface{}, ctx context.Context, dec f
return interceptor(ctx, in, info, handler)
}
+func _Seaweed_VacuumVolume_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(VacuumVolumeRequest)
+ if err := dec(in); err != nil {
+ return nil, err
+ }
+ if interceptor == nil {
+ return srv.(SeaweedServer).VacuumVolume(ctx, in)
+ }
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: "/master_pb.Seaweed/VacuumVolume",
+ }
+ handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+ return srv.(SeaweedServer).VacuumVolume(ctx, req.(*VacuumVolumeRequest))
+ }
+ return interceptor(ctx, in, info, handler)
+}
+
func _Seaweed_GetMasterConfiguration_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
in := new(GetMasterConfigurationRequest)
if err := dec(in); err != nil {
@@ -1850,6 +4561,60 @@ func _Seaweed_GetMasterConfiguration_Handler(srv interface{}, ctx context.Contex
return interceptor(ctx, in, info, handler)
}
+func _Seaweed_ListMasterClients_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(ListMasterClientsRequest)
+ if err := dec(in); err != nil {
+ return nil, err
+ }
+ if interceptor == nil {
+ return srv.(SeaweedServer).ListMasterClients(ctx, in)
+ }
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: "/master_pb.Seaweed/ListMasterClients",
+ }
+ handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+ return srv.(SeaweedServer).ListMasterClients(ctx, req.(*ListMasterClientsRequest))
+ }
+ return interceptor(ctx, in, info, handler)
+}
+
+func _Seaweed_LeaseAdminToken_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(LeaseAdminTokenRequest)
+ if err := dec(in); err != nil {
+ return nil, err
+ }
+ if interceptor == nil {
+ return srv.(SeaweedServer).LeaseAdminToken(ctx, in)
+ }
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: "/master_pb.Seaweed/LeaseAdminToken",
+ }
+ handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+ return srv.(SeaweedServer).LeaseAdminToken(ctx, req.(*LeaseAdminTokenRequest))
+ }
+ return interceptor(ctx, in, info, handler)
+}
+
+func _Seaweed_ReleaseAdminToken_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(ReleaseAdminTokenRequest)
+ if err := dec(in); err != nil {
+ return nil, err
+ }
+ if interceptor == nil {
+ return srv.(SeaweedServer).ReleaseAdminToken(ctx, in)
+ }
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: "/master_pb.Seaweed/ReleaseAdminToken",
+ }
+ handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+ return srv.(SeaweedServer).ReleaseAdminToken(ctx, req.(*ReleaseAdminTokenRequest))
+ }
+ return interceptor(ctx, in, info, handler)
+}
+
var _Seaweed_serviceDesc = grpc.ServiceDesc{
ServiceName: "master_pb.Seaweed",
HandlerType: (*SeaweedServer)(nil),
@@ -1882,10 +4647,26 @@ var _Seaweed_serviceDesc = grpc.ServiceDesc{
MethodName: "LookupEcVolume",
Handler: _Seaweed_LookupEcVolume_Handler,
},
+ {
+ MethodName: "VacuumVolume",
+ Handler: _Seaweed_VacuumVolume_Handler,
+ },
{
MethodName: "GetMasterConfiguration",
Handler: _Seaweed_GetMasterConfiguration_Handler,
},
+ {
+ MethodName: "ListMasterClients",
+ Handler: _Seaweed_ListMasterClients_Handler,
+ },
+ {
+ MethodName: "LeaseAdminToken",
+ Handler: _Seaweed_LeaseAdminToken_Handler,
+ },
+ {
+ MethodName: "ReleaseAdminToken",
+ Handler: _Seaweed_ReleaseAdminToken_Handler,
+ },
},
Streams: []grpc.StreamDesc{
{
@@ -1903,141 +4684,3 @@ var _Seaweed_serviceDesc = grpc.ServiceDesc{
},
Metadata: "master.proto",
}
-
-func init() { proto.RegisterFile("master.proto", fileDescriptor0) }
-
-var fileDescriptor0 = []byte{
- // 2102 bytes of a gzipped FileDescriptorProto
- 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0xd4, 0x59, 0x4b, 0x6f, 0x1c, 0xc7,
- 0x11, 0xd6, 0xec, 0x2e, 0x97, 0xbb, 0xb5, 0xef, 0x26, 0x45, 0xaf, 0xd6, 0x96, 0xb5, 0x1a, 0x07,
- 0x30, 0xa5, 0x38, 0x8c, 0x43, 0x1b, 0x88, 0x91, 0xc4, 0x30, 0x24, 0x8a, 0x76, 0x08, 0x89, 0xb4,
- 0x34, 0x54, 0x64, 0x20, 0x40, 0x30, 0xe9, 0x9d, 0x69, 0x92, 0x03, 0xce, 0x2b, 0xd3, 0xbd, 0x14,
- 0xd7, 0xb9, 0x04, 0xc8, 0x31, 0xa7, 0x20, 0x87, 0xfc, 0x85, 0x5c, 0x72, 0x4a, 0xce, 0xbe, 0xe4,
- 0x47, 0xe4, 0x7f, 0xe4, 0xea, 0x4b, 0xd0, 0xaf, 0x99, 0x9e, 0x7d, 0x90, 0xa6, 0x01, 0x1f, 0x74,
- 0x9b, 0xae, 0xaa, 0xae, 0xae, 0xfe, 0xaa, 0xbb, 0xea, 0xeb, 0x5d, 0x68, 0x47, 0x98, 0x32, 0x92,
- 0xed, 0xa4, 0x59, 0xc2, 0x12, 0xd4, 0x94, 0x23, 0x37, 0x9d, 0xd8, 0x7f, 0xa9, 0x43, 0xf3, 0xd7,
- 0x04, 0x67, 0x6c, 0x42, 0x30, 0x43, 0x5d, 0xa8, 0x04, 0xe9, 0xd0, 0x1a, 0x5b, 0xdb, 0x4d, 0xa7,
- 0x12, 0xa4, 0x08, 0x41, 0x2d, 0x4d, 0x32, 0x36, 0xac, 0x8c, 0xad, 0xed, 0x8e, 0x23, 0xbe, 0xd1,
- 0x5d, 0x80, 0x74, 0x3a, 0x09, 0x03, 0xcf, 0x9d, 0x66, 0xe1, 0xb0, 0x2a, 0x6c, 0x9b, 0x52, 0xf2,
- 0x9b, 0x2c, 0x44, 0xdb, 0xd0, 0x8f, 0xf0, 0xa5, 0x7b, 0x91, 0x84, 0xd3, 0x88, 0xb8, 0x5e, 0x32,
- 0x8d, 0xd9, 0xb0, 0x26, 0xa6, 0x77, 0x23, 0x7c, 0xf9, 0x4a, 0x88, 0xf7, 0xb8, 0x14, 0x8d, 0x79,
- 0x54, 0x97, 0xee, 0x49, 0x10, 0x12, 0xf7, 0x9c, 0xcc, 0x86, 0x6b, 0x63, 0x6b, 0xbb, 0xe6, 0x40,
- 0x84, 0x2f, 0x3f, 0x0f, 0x42, 0xf2, 0x94, 0xcc, 0xd0, 0x3d, 0x68, 0xf9, 0x98, 0x61, 0xd7, 0x23,
- 0x31, 0x23, 0xd9, 0xb0, 0x2e, 0xd6, 0x02, 0x2e, 0xda, 0x13, 0x12, 0x1e, 0x5f, 0x86, 0xbd, 0xf3,
- 0xe1, 0xba, 0xd0, 0x88, 0x6f, 0x1e, 0x1f, 0xf6, 0xa3, 0x20, 0x76, 0x45, 0xe4, 0x0d, 0xb1, 0x74,
- 0x53, 0x48, 0x9e, 0xf3, 0xf0, 0x3f, 0x85, 0x75, 0x19, 0x1b, 0x1d, 0x36, 0xc7, 0xd5, 0xed, 0xd6,
- 0xee, 0x7b, 0x3b, 0x39, 0x1a, 0x3b, 0x32, 0xbc, 0x83, 0xf8, 0x24, 0xc9, 0x22, 0xcc, 0x82, 0x24,
- 0x3e, 0x24, 0x94, 0xe2, 0x53, 0xe2, 0xe8, 0x39, 0xe8, 0x00, 0x5a, 0x31, 0x79, 0xed, 0x6a, 0x17,
- 0x20, 0x5c, 0x6c, 0x2f, 0xb8, 0x38, 0x3e, 0x4b, 0x32, 0xb6, 0xc4, 0x0f, 0xc4, 0xe4, 0xf5, 0x2b,
- 0xe5, 0xea, 0x05, 0xf4, 0x7c, 0x12, 0x12, 0x46, 0xfc, 0xdc, 0x5d, 0xeb, 0x86, 0xee, 0xba, 0xca,
- 0x81, 0x76, 0xf9, 0x23, 0xe8, 0x9e, 0x61, 0xea, 0xc6, 0x49, 0xee, 0xb1, 0x3d, 0xb6, 0xb6, 0x1b,
- 0x4e, 0xfb, 0x0c, 0xd3, 0xa3, 0x44, 0x5b, 0x7d, 0x01, 0x4d, 0xe2, 0xb9, 0xf4, 0x0c, 0x67, 0x3e,
- 0x1d, 0xf6, 0xc5, 0x92, 0x0f, 0x17, 0x96, 0xdc, 0xf7, 0x8e, 0xb9, 0xc1, 0x92, 0x45, 0x1b, 0x44,
- 0xaa, 0x28, 0x3a, 0x82, 0x0e, 0x07, 0xa3, 0x70, 0x36, 0xb8, 0xb1, 0x33, 0x8e, 0xe6, 0xbe, 0xf6,
- 0xf7, 0x0a, 0x06, 0x1a, 0x91, 0xc2, 0x27, 0xba, 0xb1, 0x4f, 0x0d, 0x6b, 0xee, 0xf7, 0x7d, 0xe8,
- 0x2b, 0x58, 0x0a, 0xb7, 0x1b, 0x02, 0x98, 0x8e, 0x00, 0x46, 0x1b, 0xda, 0x7f, 0xaa, 0xc0, 0x20,
- 0xbf, 0x0d, 0x0e, 0xa1, 0x69, 0x12, 0x53, 0x82, 0x1e, 0xc2, 0x40, 0x1d, 0x67, 0x1a, 0x7c, 0x4d,
- 0xdc, 0x30, 0x88, 0x02, 0x26, 0x2e, 0x49, 0xcd, 0xe9, 0x49, 0xc5, 0x71, 0xf0, 0x35, 0x79, 0xc6,
- 0xc5, 0x68, 0x0b, 0xea, 0x21, 0xc1, 0x3e, 0xc9, 0xc4, 0x9d, 0x69, 0x3a, 0x6a, 0x84, 0xde, 0x87,
- 0x5e, 0x44, 0x58, 0x16, 0x78, 0xd4, 0xc5, 0xbe, 0x9f, 0x11, 0x4a, 0xd5, 0xd5, 0xe9, 0x2a, 0xf1,
- 0x23, 0x29, 0x45, 0x9f, 0xc0, 0x50, 0x1b, 0x06, 0xfc, 0x8c, 0x5f, 0xe0, 0xd0, 0xa5, 0xc4, 0x4b,
- 0x62, 0x9f, 0xaa, 0x7b, 0xb4, 0xa5, 0xf4, 0x07, 0x4a, 0x7d, 0x2c, 0xb5, 0xe8, 0x09, 0xf4, 0x29,
- 0x4b, 0x32, 0x7c, 0x4a, 0xdc, 0x09, 0xf6, 0xce, 0x09, 0x9f, 0xb1, 0x26, 0xc0, 0xbb, 0x63, 0x80,
- 0x77, 0x2c, 0x4d, 0x1e, 0x4b, 0x0b, 0xa7, 0x47, 0x4b, 0x63, 0x6a, 0x7f, 0x5b, 0x85, 0xe1, 0xaa,
- 0x6b, 0x20, 0xea, 0x83, 0x2f, 0xb6, 0xde, 0x71, 0x2a, 0x81, 0xcf, 0xef, 0x1f, 0x87, 0x44, 0xec,
- 0xb5, 0xe6, 0x88, 0x6f, 0xf4, 0x2e, 0x80, 0x97, 0x84, 0x21, 0xf1, 0xf8, 0x44, 0xb5, 0x49, 0x43,
- 0xc2, 0xef, 0xa7, 0xb8, 0xf2, 0x45, 0x69, 0xa8, 0x39, 0x4d, 0x2e, 0x91, 0x55, 0xe1, 0x3e, 0xb4,
- 0x65, 0xfa, 0x94, 0x81, 0xac, 0x0a, 0x2d, 0x29, 0x93, 0x26, 0x1f, 0x00, 0xd2, 0xc7, 0x64, 0x32,
- 0xcb, 0x0d, 0xeb, 0xc2, 0xb0, 0xaf, 0x34, 0x8f, 0x67, 0xda, 0xfa, 0x6d, 0x68, 0x66, 0x04, 0xfb,
- 0x6e, 0x12, 0x87, 0x33, 0x51, 0x28, 0x1a, 0x4e, 0x83, 0x0b, 0xbe, 0x8c, 0xc3, 0x19, 0xfa, 0x31,
- 0x0c, 0x32, 0x92, 0x86, 0x81, 0x87, 0xdd, 0x34, 0xc4, 0x1e, 0x89, 0x48, 0xac, 0x6b, 0x46, 0x5f,
- 0x29, 0x9e, 0x6b, 0x39, 0x1a, 0xc2, 0xfa, 0x05, 0xc9, 0x28, 0xdf, 0x56, 0x53, 0x98, 0xe8, 0x21,
- 0xea, 0x43, 0x95, 0xb1, 0x70, 0x08, 0x42, 0xca, 0x3f, 0xd1, 0x03, 0xe8, 0x7b, 0x49, 0x94, 0x62,
- 0x8f, 0xb9, 0x19, 0xb9, 0x08, 0xc4, 0xa4, 0x96, 0x50, 0xf7, 0x94, 0xdc, 0x51, 0x62, 0xbe, 0x9d,
- 0x28, 0xf1, 0x83, 0x93, 0x80, 0xf8, 0x2e, 0x66, 0x2a, 0xd9, 0xe2, 0xe2, 0x56, 0x9d, 0xbe, 0xd6,
- 0x3c, 0x62, 0x32, 0xcd, 0x68, 0x07, 0x36, 0x32, 0x12, 0x25, 0x8c, 0xb8, 0x3a, 0xd9, 0x31, 0x8e,
- 0xc8, 0xb0, 0x23, 0x70, 0x1e, 0x48, 0x95, 0xca, 0xf1, 0x11, 0x8e, 0x08, 0xf7, 0x3e, 0x67, 0xcf,
- 0x6b, 0x6d, 0x57, 0x98, 0xf7, 0x4b, 0xe6, 0x4f, 0xc9, 0xcc, 0xfe, 0x87, 0x05, 0x77, 0xaf, 0x2c,
- 0x39, 0x0b, 0x47, 0xe0, 0xba, 0x74, 0xff, 0x50, 0x08, 0xdb, 0x53, 0xb8, 0x77, 0x4d, 0x21, 0xb8,
- 0x26, 0xd6, 0xca, 0x42, 0xac, 0x36, 0x74, 0x88, 0xe7, 0x06, 0xb1, 0x4f, 0x2e, 0xdd, 0x49, 0xc0,
- 0xe4, 0x15, 0xed, 0x38, 0x2d, 0xe2, 0x1d, 0x70, 0xd9, 0xe3, 0x80, 0x51, 0xfb, 0x1b, 0x0b, 0xba,
- 0xe5, 0x3b, 0xc4, 0x6f, 0x01, 0x9b, 0xa5, 0x44, 0xf5, 0x4d, 0xf1, 0xad, 0x96, 0xae, 0xa8, 0x4e,
- 0xea, 0xa3, 0x03, 0x80, 0x34, 0x4b, 0x52, 0x92, 0xb1, 0x80, 0x70, 0xbf, 0xfc, 0x5a, 0x3e, 0x58,
- 0x79, 0x2d, 0x77, 0x9e, 0xe7, 0xb6, 0xfb, 0x31, 0xcb, 0x66, 0x8e, 0x31, 0x79, 0xf4, 0x29, 0xf4,
- 0xe6, 0xd4, 0x1c, 0x1d, 0x9e, 0x55, 0x19, 0x00, 0xff, 0x44, 0x9b, 0xb0, 0x76, 0x81, 0xc3, 0x29,
- 0x51, 0x21, 0xc8, 0xc1, 0x2f, 0x2a, 0x9f, 0x58, 0xf6, 0x3a, 0xac, 0xed, 0x47, 0x29, 0x9b, 0xf1,
- 0x9d, 0xf4, 0x8e, 0xa7, 0x29, 0xc9, 0x1e, 0x87, 0x89, 0x77, 0xbe, 0x7f, 0xc9, 0x32, 0x8c, 0xbe,
- 0x84, 0x2e, 0xc9, 0x30, 0x9d, 0x66, 0xfc, 0x56, 0xf9, 0x41, 0x7c, 0x2a, 0x7c, 0x96, 0x5b, 0xd2,
- 0xdc, 0x9c, 0x9d, 0x7d, 0x39, 0x61, 0x4f, 0xd8, 0x3b, 0x1d, 0x62, 0x0e, 0x47, 0xbf, 0x85, 0x4e,
- 0x49, 0xcf, 0xc1, 0xe2, 0x0d, 0x5c, 0x65, 0x45, 0x7c, 0xf3, 0xa2, 0x99, 0xe2, 0x2c, 0x60, 0x33,
- 0x45, 0x34, 0xd4, 0x88, 0x97, 0x0a, 0x55, 0x78, 0x03, 0x5f, 0x82, 0xd6, 0x71, 0x9a, 0x52, 0x72,
- 0xe0, 0x53, 0xfb, 0x21, 0x6c, 0x3e, 0x25, 0x24, 0xdd, 0x4b, 0xe2, 0x98, 0x78, 0x8c, 0xf8, 0x0e,
- 0xf9, 0xc3, 0x94, 0x50, 0xc6, 0x97, 0x10, 0x77, 0x42, 0xe5, 0x83, 0x7f, 0xdb, 0x7f, 0xb7, 0xa0,
- 0x2b, 0x8f, 0xcb, 0xb3, 0xc4, 0x13, 0x87, 0x84, 0x83, 0xc6, 0x19, 0x8c, 0x02, 0x6d, 0x9a, 0x85,
- 0x73, 0xd4, 0xa6, 0x32, 0x4f, 0x6d, 0xee, 0x40, 0x43, 0xf4, 0xfe, 0x22, 0x98, 0x75, 0xde, 0xce,
- 0x03, 0x9f, 0x16, 0x55, 0xcb, 0x97, 0xea, 0x9a, 0x50, 0xb7, 0x74, 0x7b, 0xe6, 0x26, 0x45, 0x67,
- 0x58, 0x33, 0x3b, 0x83, 0xfd, 0x12, 0x36, 0x9e, 0x25, 0xc9, 0xf9, 0x34, 0x95, 0xe1, 0xe9, 0x4d,
- 0x94, 0xf7, 0x6e, 0x8d, 0xab, 0x3c, 0x96, 0x7c, 0xef, 0xd7, 0x1d, 0x65, 0xfb, 0x7f, 0x16, 0x6c,
- 0x96, 0xdd, 0xaa, 0x66, 0xf6, 0x7b, 0xd8, 0xc8, 0xfd, 0xba, 0xa1, 0xc2, 0x42, 0x2e, 0xd0, 0xda,
- 0xfd, 0xd0, 0x48, 0xf3, 0xb2, 0xd9, 0x9a, 0x20, 0xf9, 0x1a, 0x44, 0x67, 0x70, 0x31, 0x27, 0xa1,
- 0xa3, 0x4b, 0xe8, 0xcf, 0x9b, 0xf1, 0x22, 0x9c, 0xaf, 0xaa, 0x10, 0x6f, 0xe8, 0x99, 0xe8, 0x67,
- 0xd0, 0x2c, 0x02, 0xa9, 0x88, 0x40, 0x36, 0x4a, 0x81, 0xa8, 0xb5, 0x0a, 0x2b, 0x7e, 0xbc, 0x49,
- 0x96, 0x25, 0x99, 0x2a, 0x38, 0x72, 0x60, 0xff, 0x12, 0x1a, 0xdf, 0x3b, 0xbb, 0xf6, 0xbf, 0x2a,
- 0xd0, 0x79, 0x44, 0x69, 0x70, 0x1a, 0xeb, 0x14, 0x6c, 0xc2, 0x9a, 0x6c, 0x2d, 0xb2, 0xd7, 0xcb,
- 0x01, 0x1a, 0x43, 0x4b, 0xd5, 0x2d, 0x03, 0x7a, 0x53, 0x74, 0x6d, 0x49, 0x54, 0xb5, 0xac, 0x26,
- 0x43, 0xe3, 0xdd, 0x62, 0x8e, 0xe8, 0xae, 0xad, 0x24, 0xba, 0x75, 0x83, 0xe8, 0xbe, 0x0d, 0x4d,
- 0x31, 0x29, 0x4e, 0x7c, 0xa2, 0x18, 0x70, 0x83, 0x0b, 0x8e, 0x12, 0x9f, 0xa0, 0x5d, 0xd8, 0x8a,
- 0x48, 0x94, 0x64, 0x33, 0x37, 0xc2, 0xa9, 0xcb, 0x79, 0xb6, 0xe0, 0x2e, 0xd1, 0x44, 0xd5, 0x5e,
- 0x24, 0xb5, 0x87, 0x38, 0x3d, 0xc4, 0x97, 0x9c, 0xbe, 0x1c, 0x4e, 0xd0, 0x2e, 0xdc, 0xfe, 0x2a,
- 0x0b, 0x18, 0x9e, 0x84, 0xa4, 0xcc, 0xdf, 0x65, 0x2d, 0xde, 0xd0, 0x4a, 0x83, 0xc4, 0xdb, 0x7f,
- 0xb3, 0xa0, 0xab, 0x51, 0x53, 0x27, 0xac, 0x0f, 0xd5, 0x93, 0x3c, 0xcb, 0xfc, 0x53, 0xe7, 0xa2,
- 0xb2, 0x2a, 0x17, 0x0b, 0x8f, 0x88, 0x1c, 0xf9, 0x9a, 0x89, 0x7c, 0x9e, 0xf4, 0x35, 0x23, 0xe9,
- 0x1c, 0x1a, 0x3c, 0x65, 0x67, 0x1a, 0x1a, 0xfe, 0x6d, 0x9f, 0xc2, 0xe0, 0x98, 0x61, 0x16, 0x50,
- 0x16, 0x78, 0x54, 0xa7, 0x73, 0x2e, 0x71, 0xd6, 0x75, 0x89, 0xab, 0xac, 0x4a, 0x5c, 0x35, 0x4f,
- 0x9c, 0xfd, 0x1f, 0x0b, 0x90, 0xb9, 0x92, 0x82, 0xe0, 0x07, 0x58, 0x8a, 0x43, 0xc6, 0x12, 0xc6,
- 0xd9, 0x20, 0x67, 0x5c, 0x8a, 0x37, 0x09, 0x09, 0x4f, 0x1f, 0x3f, 0x0d, 0x53, 0x4a, 0x7c, 0xa9,
- 0x95, 0xa4, 0xa9, 0xc1, 0x05, 0x42, 0x59, 0xe6, 0x5c, 0xf5, 0x39, 0xce, 0x65, 0x3f, 0x82, 0x96,
- 0xea, 0x3f, 0x2f, 0x79, 0xef, 0xba, 0x3e, 0x7a, 0x15, 0x5d, 0xa5, 0x00, 0x62, 0x0c, 0xb0, 0x57,
- 0x44, 0xbf, 0xac, 0x02, 0xff, 0x11, 0x6e, 0x17, 0x16, 0xcf, 0x02, 0xca, 0x74, 0x5e, 0x3e, 0x86,
- 0xad, 0x20, 0xf6, 0xc2, 0xa9, 0x4f, 0xdc, 0x98, 0x77, 0xf0, 0x30, 0x7f, 0xbc, 0x58, 0x82, 0xad,
- 0x6d, 0x2a, 0xed, 0x91, 0x50, 0xea, 0x47, 0xcc, 0x07, 0x80, 0xf4, 0x2c, 0xe2, 0xe5, 0x33, 0x2a,
- 0x62, 0x46, 0x5f, 0x69, 0xf6, 0x3d, 0x65, 0x6d, 0xbf, 0x80, 0xad, 0xf9, 0xc5, 0x55, 0xaa, 0x7e,
- 0x0e, 0xad, 0x02, 0x76, 0x5d, 0x07, 0x6f, 0x1b, 0xe5, 0xa7, 0x98, 0xe7, 0x98, 0x96, 0xf6, 0x4f,
- 0xe0, 0xad, 0x42, 0xf5, 0x44, 0x14, 0xfa, 0xab, 0x1a, 0xd0, 0x08, 0x86, 0x8b, 0xe6, 0x32, 0x06,
- 0xfb, 0xaf, 0x55, 0x68, 0x3f, 0x51, 0x37, 0x97, 0xd3, 0x18, 0x83, 0xb8, 0x48, 0xf6, 0x70, 0x1f,
- 0xda, 0xa5, 0x0b, 0x29, 0xf9, 0x76, 0xeb, 0xc2, 0x78, 0x4d, 0x2f, 0x7b, 0x77, 0x57, 0x85, 0xd9,
- 0xfc, 0xbb, 0xfb, 0x21, 0x0c, 0x4e, 0x32, 0x42, 0x16, 0x9f, 0xe8, 0x35, 0xa7, 0xc7, 0x15, 0xa6,
- 0xed, 0x0e, 0x6c, 0x60, 0x8f, 0x05, 0x17, 0x73, 0xd6, 0xf2, 0x7c, 0x0d, 0xa4, 0xca, 0xb4, 0xff,
- 0x3c, 0x0f, 0x34, 0x88, 0x4f, 0x12, 0x3a, 0xac, 0x7f, 0xf7, 0x27, 0xb6, 0xda, 0x0d, 0xd7, 0x50,
- 0xf4, 0x1c, 0xba, 0xfa, 0xa9, 0xa6, 0x3c, 0xad, 0xdf, 0xf8, 0x19, 0xd8, 0x26, 0x85, 0x8a, 0x1a,
- 0xbc, 0xb9, 0xb4, 0x93, 0x86, 0xdc, 0x89, 0x54, 0x99, 0x85, 0xed, 0xdf, 0x15, 0x68, 0x38, 0xd8,
- 0x3b, 0x7f, 0xb3, 0xf3, 0xf1, 0x19, 0xf4, 0xf2, 0x1e, 0x51, 0x4a, 0xc9, 0x5b, 0x06, 0x90, 0xe6,
- 0xd1, 0x73, 0x3a, 0xbe, 0x31, 0x5a, 0x09, 0xdb, 0xfa, 0x2a, 0xd8, 0xfe, 0x59, 0x81, 0xee, 0x93,
- 0xbc, 0x6f, 0xbd, 0xd9, 0xe0, 0xed, 0x02, 0xf0, 0x46, 0x5b, 0xc2, 0xcd, 0x24, 0x26, 0xfa, 0x78,
- 0x38, 0xcd, 0x4c, 0x7d, 0xdd, 0x1c, 0xaf, 0x6f, 0x2a, 0xd0, 0x7e, 0x99, 0xa4, 0x49, 0x98, 0x9c,
- 0xce, 0xde, 0x6c, 0xb4, 0xf6, 0x61, 0x60, 0x70, 0x98, 0x12, 0x68, 0x77, 0xe6, 0x0e, 0x5b, 0x71,
- 0x38, 0x9c, 0x9e, 0x5f, 0x1a, 0xdf, 0x1c, 0xc0, 0x0d, 0x18, 0x28, 0x5e, 0x5f, 0xb4, 0x14, 0xfb,
- 0xcf, 0x16, 0x20, 0x53, 0xaa, 0x6a, 0xfd, 0xaf, 0xa0, 0xc3, 0x14, 0xd6, 0x22, 0x3e, 0xf5, 0xb8,
- 0x31, 0xef, 0x82, 0x99, 0x0b, 0xa7, 0xcd, 0xcc, 0xcc, 0xfc, 0x14, 0x36, 0x17, 0x7e, 0x06, 0xe2,
- 0x84, 0x4a, 0x66, 0x64, 0x30, 0xf7, 0x4b, 0xd0, 0xe1, 0xc4, 0xfe, 0x18, 0x6e, 0x4b, 0x12, 0xad,
- 0xfb, 0x90, 0xee, 0x0f, 0x0b, 0x6c, 0xb8, 0x53, 0xb0, 0x61, 0xfb, 0x5b, 0x0b, 0xb6, 0xe6, 0xa7,
- 0xa9, 0xf8, 0xaf, 0x9a, 0x87, 0x30, 0x20, 0x55, 0x2f, 0x4d, 0x5e, 0x2f, 0xe9, 0xf4, 0x47, 0x0b,
- 0xbc, 0x7e, 0xde, 0xf7, 0x8e, 0xae, 0xa3, 0x05, 0xb5, 0xef, 0xd3, 0xb2, 0x80, 0x8e, 0x30, 0x0c,
- 0x16, 0xcc, 0xf8, 0xab, 0x48, 0xaf, 0xab, 0x62, 0x5a, 0x57, 0x13, 0xbf, 0x07, 0xb1, 0xb7, 0xef,
- 0xc1, 0xdd, 0x2f, 0x08, 0x3b, 0x14, 0x36, 0x7b, 0x49, 0x7c, 0x12, 0x9c, 0x4e, 0x33, 0x69, 0x54,
- 0xa4, 0xf6, 0xdd, 0x55, 0x16, 0x0a, 0xa6, 0x25, 0xbf, 0xb5, 0x59, 0x37, 0xfe, 0xad, 0xad, 0x72,
- 0xd5, 0x6f, 0x6d, 0xbb, 0xff, 0xad, 0xc3, 0xfa, 0x31, 0xc1, 0xaf, 0x09, 0xe1, 0x4f, 0xfb, 0xce,
- 0x31, 0x89, 0xfd, 0xe2, 0x57, 0xf4, 0x4d, 0x63, 0x8f, 0xb9, 0x74, 0xf4, 0xce, 0x32, 0x69, 0x4e,
- 0x01, 0x6e, 0x6d, 0x5b, 0x1f, 0x5a, 0xe8, 0x05, 0x74, 0x4a, 0x2f, 0x5a, 0x74, 0xcf, 0x98, 0xb4,
- 0xec, 0xad, 0x3b, 0xba, 0xb3, 0xd0, 0x10, 0x35, 0xaa, 0xb9, 0xcb, 0xb6, 0xf9, 0x92, 0x43, 0xef,
- 0xae, 0x7c, 0xe2, 0x49, 0x87, 0xf7, 0xae, 0x79, 0x02, 0xda, 0xb7, 0xd0, 0x67, 0x50, 0x97, 0x94,
- 0x1f, 0x0d, 0x0d, 0xe3, 0xd2, 0xdb, 0xa9, 0x14, 0x57, 0xf9, 0x7d, 0x60, 0xdf, 0x42, 0x4f, 0x01,
- 0x0a, 0xd2, 0x8c, 0xde, 0x29, 0xfd, 0x0c, 0x32, 0xc7, 0xda, 0x47, 0x77, 0x57, 0x68, 0x73, 0x67,
- 0x5f, 0x41, 0xb7, 0x4c, 0xed, 0xd0, 0x78, 0x29, 0x7b, 0x33, 0xea, 0xc3, 0xe8, 0xfe, 0x15, 0x16,
- 0xb9, 0xe3, 0xdf, 0x41, 0x7f, 0x9e, 0xb1, 0x21, 0x7b, 0xe9, 0xc4, 0x12, 0xfb, 0x1b, 0xbd, 0x77,
- 0xa5, 0x8d, 0x09, 0x42, 0x51, 0xa2, 0x4a, 0x20, 0x2c, 0xd4, 0xb3, 0x12, 0x08, 0x8b, 0x75, 0x4d,
- 0x82, 0x50, 0xbe, 0xd7, 0x25, 0x10, 0x96, 0x56, 0xa1, 0x12, 0x08, 0xcb, 0x8b, 0x82, 0x7d, 0x0b,
- 0x25, 0xb0, 0xb5, 0xfc, 0xb6, 0x21, 0xf3, 0x27, 0xa1, 0x2b, 0xaf, 0xec, 0xe8, 0xc1, 0x77, 0xb0,
- 0xd4, 0x0b, 0x4e, 0xea, 0xe2, 0x2f, 0xaa, 0x8f, 0xfe, 0x1f, 0x00, 0x00, 0xff, 0xff, 0x6c, 0x64,
- 0x5c, 0xbc, 0xb2, 0x1a, 0x00, 0x00,
-}
diff --git a/weed/pb/messaging.proto b/weed/pb/messaging.proto
new file mode 100644
index 000000000..04446ad16
--- /dev/null
+++ b/weed/pb/messaging.proto
@@ -0,0 +1,135 @@
+syntax = "proto3";
+
+package messaging_pb;
+
+option go_package = "github.com/chrislusf/seaweedfs/weed/pb/messaging_pb";
+option java_package = "seaweedfs.client";
+option java_outer_classname = "MessagingProto";
+
+//////////////////////////////////////////////////
+
+service SeaweedMessaging {
+
+ rpc Subscribe (stream SubscriberMessage) returns (stream BrokerMessage) {
+ }
+
+ rpc Publish (stream PublishRequest) returns (stream PublishResponse) {
+ }
+
+ rpc DeleteTopic (DeleteTopicRequest) returns (DeleteTopicResponse) {
+ }
+
+ rpc ConfigureTopic (ConfigureTopicRequest) returns (ConfigureTopicResponse) {
+ }
+
+ rpc GetTopicConfiguration (GetTopicConfigurationRequest) returns (GetTopicConfigurationResponse) {
+ }
+
+ rpc FindBroker (FindBrokerRequest) returns (FindBrokerResponse) {
+ }
+
+}
+
+//////////////////////////////////////////////////
+
+message SubscriberMessage {
+ message InitMessage {
+ string namespace = 1;
+ string topic = 2;
+ int32 partition = 3;
+ enum StartPosition {
+ LATEST = 0; // Start at the newest message
+ EARLIEST = 1; // Start at the oldest message
+ TIMESTAMP = 2; // Start after a specified timestamp, exclusive
+ }
+ StartPosition startPosition = 4; // Where to begin consuming from
+ int64 timestampNs = 5; // timestamp in nano seconds
+ string subscriber_id = 6; // uniquely identify a subscriber to track consumption
+ }
+ InitMessage init = 1;
+ message AckMessage {
+ int64 message_id = 1;
+ }
+ AckMessage ack = 2;
+ bool is_close = 3;
+}
+
+message Message {
+ int64 event_time_ns = 1 [jstype = JS_STRING];
+ bytes key = 2; // Message key
+ bytes value = 3; // Message payload
+ map headers = 4; // Message headers
+ bool is_close = 5;
+}
+
+message BrokerMessage {
+ Message data = 1;
+}
+
+message PublishRequest {
+ message InitMessage {
+ string namespace = 1; // only needed on the initial request
+ string topic = 2; // only needed on the initial request
+ int32 partition = 3;
+ }
+ InitMessage init = 1;
+ Message data = 2;
+}
+
+message PublishResponse {
+ message ConfigMessage {
+ int32 partition_count = 1;
+ }
+ ConfigMessage config = 1;
+ message RedirectMessage {
+ string new_broker = 1;
+ }
+ RedirectMessage redirect = 2;
+ bool is_closed = 3;
+}
+
+message DeleteTopicRequest {
+ string namespace = 1;
+ string topic = 2;
+}
+message DeleteTopicResponse {
+}
+
+message ConfigureTopicRequest {
+ string namespace = 1;
+ string topic = 2;
+ TopicConfiguration configuration = 3;
+}
+message ConfigureTopicResponse {
+}
+
+message GetTopicConfigurationRequest {
+ string namespace = 1;
+ string topic = 2;
+}
+message GetTopicConfigurationResponse {
+ TopicConfiguration configuration = 1;
+}
+
+message FindBrokerRequest {
+ string namespace = 1;
+ string topic = 2;
+ int32 parition = 3;
+}
+
+message FindBrokerResponse {
+ string broker = 1;
+}
+
+message TopicConfiguration {
+ int32 partition_count = 1;
+ string collection = 2;
+ string replication = 3;
+ bool is_transient = 4;
+ enum Partitioning {
+ NonNullKeyHash = 0; // If not null, hash by key value. If null, round robin
+ KeyHash = 1; // hash by key value
+ RoundRobin = 2; // round robin pick one partition
+ }
+ Partitioning partitoning = 5;
+}
diff --git a/weed/pb/messaging_pb/messaging.pb.go b/weed/pb/messaging_pb/messaging.pb.go
new file mode 100644
index 000000000..591406347
--- /dev/null
+++ b/weed/pb/messaging_pb/messaging.pb.go
@@ -0,0 +1,2053 @@
+// Code generated by protoc-gen-go. DO NOT EDIT.
+// versions:
+// protoc-gen-go v1.25.0
+// protoc v3.12.3
+// source: messaging.proto
+
+package messaging_pb
+
+import (
+ context "context"
+ proto "github.com/golang/protobuf/proto"
+ grpc "google.golang.org/grpc"
+ codes "google.golang.org/grpc/codes"
+ status "google.golang.org/grpc/status"
+ protoreflect "google.golang.org/protobuf/reflect/protoreflect"
+ protoimpl "google.golang.org/protobuf/runtime/protoimpl"
+ reflect "reflect"
+ sync "sync"
+)
+
+const (
+ // Verify that this generated code is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
+ // Verify that runtime/protoimpl is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
+)
+
+// This is a compile-time assertion that a sufficiently up-to-date version
+// of the legacy proto package is being used.
+const _ = proto.ProtoPackageIsVersion4
+
+type SubscriberMessage_InitMessage_StartPosition int32
+
+const (
+ SubscriberMessage_InitMessage_LATEST SubscriberMessage_InitMessage_StartPosition = 0 // Start at the newest message
+ SubscriberMessage_InitMessage_EARLIEST SubscriberMessage_InitMessage_StartPosition = 1 // Start at the oldest message
+ SubscriberMessage_InitMessage_TIMESTAMP SubscriberMessage_InitMessage_StartPosition = 2 // Start after a specified timestamp, exclusive
+)
+
+// Enum value maps for SubscriberMessage_InitMessage_StartPosition.
+var (
+ SubscriberMessage_InitMessage_StartPosition_name = map[int32]string{
+ 0: "LATEST",
+ 1: "EARLIEST",
+ 2: "TIMESTAMP",
+ }
+ SubscriberMessage_InitMessage_StartPosition_value = map[string]int32{
+ "LATEST": 0,
+ "EARLIEST": 1,
+ "TIMESTAMP": 2,
+ }
+)
+
+func (x SubscriberMessage_InitMessage_StartPosition) Enum() *SubscriberMessage_InitMessage_StartPosition {
+ p := new(SubscriberMessage_InitMessage_StartPosition)
+ *p = x
+ return p
+}
+
+func (x SubscriberMessage_InitMessage_StartPosition) String() string {
+ return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x))
+}
+
+func (SubscriberMessage_InitMessage_StartPosition) Descriptor() protoreflect.EnumDescriptor {
+ return file_messaging_proto_enumTypes[0].Descriptor()
+}
+
+func (SubscriberMessage_InitMessage_StartPosition) Type() protoreflect.EnumType {
+ return &file_messaging_proto_enumTypes[0]
+}
+
+func (x SubscriberMessage_InitMessage_StartPosition) Number() protoreflect.EnumNumber {
+ return protoreflect.EnumNumber(x)
+}
+
+// Deprecated: Use SubscriberMessage_InitMessage_StartPosition.Descriptor instead.
+func (SubscriberMessage_InitMessage_StartPosition) EnumDescriptor() ([]byte, []int) {
+ return file_messaging_proto_rawDescGZIP(), []int{0, 0, 0}
+}
+
+type TopicConfiguration_Partitioning int32
+
+const (
+ TopicConfiguration_NonNullKeyHash TopicConfiguration_Partitioning = 0 // If not null, hash by key value. If null, round robin
+ TopicConfiguration_KeyHash TopicConfiguration_Partitioning = 1 // hash by key value
+ TopicConfiguration_RoundRobin TopicConfiguration_Partitioning = 2 // round robin pick one partition
+)
+
+// Enum value maps for TopicConfiguration_Partitioning.
+var (
+ TopicConfiguration_Partitioning_name = map[int32]string{
+ 0: "NonNullKeyHash",
+ 1: "KeyHash",
+ 2: "RoundRobin",
+ }
+ TopicConfiguration_Partitioning_value = map[string]int32{
+ "NonNullKeyHash": 0,
+ "KeyHash": 1,
+ "RoundRobin": 2,
+ }
+)
+
+func (x TopicConfiguration_Partitioning) Enum() *TopicConfiguration_Partitioning {
+ p := new(TopicConfiguration_Partitioning)
+ *p = x
+ return p
+}
+
+func (x TopicConfiguration_Partitioning) String() string {
+ return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x))
+}
+
+func (TopicConfiguration_Partitioning) Descriptor() protoreflect.EnumDescriptor {
+ return file_messaging_proto_enumTypes[1].Descriptor()
+}
+
+func (TopicConfiguration_Partitioning) Type() protoreflect.EnumType {
+ return &file_messaging_proto_enumTypes[1]
+}
+
+func (x TopicConfiguration_Partitioning) Number() protoreflect.EnumNumber {
+ return protoreflect.EnumNumber(x)
+}
+
+// Deprecated: Use TopicConfiguration_Partitioning.Descriptor instead.
+func (TopicConfiguration_Partitioning) EnumDescriptor() ([]byte, []int) {
+ return file_messaging_proto_rawDescGZIP(), []int{13, 0}
+}
+
+type SubscriberMessage struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ Init *SubscriberMessage_InitMessage `protobuf:"bytes,1,opt,name=init,proto3" json:"init,omitempty"`
+ Ack *SubscriberMessage_AckMessage `protobuf:"bytes,2,opt,name=ack,proto3" json:"ack,omitempty"`
+ IsClose bool `protobuf:"varint,3,opt,name=is_close,json=isClose,proto3" json:"is_close,omitempty"`
+}
+
+func (x *SubscriberMessage) Reset() {
+ *x = SubscriberMessage{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_messaging_proto_msgTypes[0]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *SubscriberMessage) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*SubscriberMessage) ProtoMessage() {}
+
+func (x *SubscriberMessage) ProtoReflect() protoreflect.Message {
+ mi := &file_messaging_proto_msgTypes[0]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use SubscriberMessage.ProtoReflect.Descriptor instead.
+func (*SubscriberMessage) Descriptor() ([]byte, []int) {
+ return file_messaging_proto_rawDescGZIP(), []int{0}
+}
+
+func (x *SubscriberMessage) GetInit() *SubscriberMessage_InitMessage {
+ if x != nil {
+ return x.Init
+ }
+ return nil
+}
+
+func (x *SubscriberMessage) GetAck() *SubscriberMessage_AckMessage {
+ if x != nil {
+ return x.Ack
+ }
+ return nil
+}
+
+func (x *SubscriberMessage) GetIsClose() bool {
+ if x != nil {
+ return x.IsClose
+ }
+ return false
+}
+
+type Message struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ EventTimeNs int64 `protobuf:"varint,1,opt,name=event_time_ns,json=eventTimeNs,proto3" json:"event_time_ns,omitempty"`
+ Key []byte `protobuf:"bytes,2,opt,name=key,proto3" json:"key,omitempty"` // Message key
+ Value []byte `protobuf:"bytes,3,opt,name=value,proto3" json:"value,omitempty"` // Message payload
+ Headers map[string][]byte `protobuf:"bytes,4,rep,name=headers,proto3" json:"headers,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` // Message headers
+ IsClose bool `protobuf:"varint,5,opt,name=is_close,json=isClose,proto3" json:"is_close,omitempty"`
+}
+
+func (x *Message) Reset() {
+ *x = Message{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_messaging_proto_msgTypes[1]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *Message) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*Message) ProtoMessage() {}
+
+func (x *Message) ProtoReflect() protoreflect.Message {
+ mi := &file_messaging_proto_msgTypes[1]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use Message.ProtoReflect.Descriptor instead.
+func (*Message) Descriptor() ([]byte, []int) {
+ return file_messaging_proto_rawDescGZIP(), []int{1}
+}
+
+func (x *Message) GetEventTimeNs() int64 {
+ if x != nil {
+ return x.EventTimeNs
+ }
+ return 0
+}
+
+func (x *Message) GetKey() []byte {
+ if x != nil {
+ return x.Key
+ }
+ return nil
+}
+
+func (x *Message) GetValue() []byte {
+ if x != nil {
+ return x.Value
+ }
+ return nil
+}
+
+func (x *Message) GetHeaders() map[string][]byte {
+ if x != nil {
+ return x.Headers
+ }
+ return nil
+}
+
+func (x *Message) GetIsClose() bool {
+ if x != nil {
+ return x.IsClose
+ }
+ return false
+}
+
+type BrokerMessage struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ Data *Message `protobuf:"bytes,1,opt,name=data,proto3" json:"data,omitempty"`
+}
+
+func (x *BrokerMessage) Reset() {
+ *x = BrokerMessage{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_messaging_proto_msgTypes[2]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *BrokerMessage) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*BrokerMessage) ProtoMessage() {}
+
+func (x *BrokerMessage) ProtoReflect() protoreflect.Message {
+ mi := &file_messaging_proto_msgTypes[2]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use BrokerMessage.ProtoReflect.Descriptor instead.
+func (*BrokerMessage) Descriptor() ([]byte, []int) {
+ return file_messaging_proto_rawDescGZIP(), []int{2}
+}
+
+func (x *BrokerMessage) GetData() *Message {
+ if x != nil {
+ return x.Data
+ }
+ return nil
+}
+
+type PublishRequest struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ Init *PublishRequest_InitMessage `protobuf:"bytes,1,opt,name=init,proto3" json:"init,omitempty"`
+ Data *Message `protobuf:"bytes,2,opt,name=data,proto3" json:"data,omitempty"`
+}
+
+func (x *PublishRequest) Reset() {
+ *x = PublishRequest{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_messaging_proto_msgTypes[3]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *PublishRequest) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*PublishRequest) ProtoMessage() {}
+
+func (x *PublishRequest) ProtoReflect() protoreflect.Message {
+ mi := &file_messaging_proto_msgTypes[3]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use PublishRequest.ProtoReflect.Descriptor instead.
+func (*PublishRequest) Descriptor() ([]byte, []int) {
+ return file_messaging_proto_rawDescGZIP(), []int{3}
+}
+
+func (x *PublishRequest) GetInit() *PublishRequest_InitMessage {
+ if x != nil {
+ return x.Init
+ }
+ return nil
+}
+
+func (x *PublishRequest) GetData() *Message {
+ if x != nil {
+ return x.Data
+ }
+ return nil
+}
+
+type PublishResponse struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ Config *PublishResponse_ConfigMessage `protobuf:"bytes,1,opt,name=config,proto3" json:"config,omitempty"`
+ Redirect *PublishResponse_RedirectMessage `protobuf:"bytes,2,opt,name=redirect,proto3" json:"redirect,omitempty"`
+ IsClosed bool `protobuf:"varint,3,opt,name=is_closed,json=isClosed,proto3" json:"is_closed,omitempty"`
+}
+
+func (x *PublishResponse) Reset() {
+ *x = PublishResponse{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_messaging_proto_msgTypes[4]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *PublishResponse) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*PublishResponse) ProtoMessage() {}
+
+func (x *PublishResponse) ProtoReflect() protoreflect.Message {
+ mi := &file_messaging_proto_msgTypes[4]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use PublishResponse.ProtoReflect.Descriptor instead.
+func (*PublishResponse) Descriptor() ([]byte, []int) {
+ return file_messaging_proto_rawDescGZIP(), []int{4}
+}
+
+func (x *PublishResponse) GetConfig() *PublishResponse_ConfigMessage {
+ if x != nil {
+ return x.Config
+ }
+ return nil
+}
+
+func (x *PublishResponse) GetRedirect() *PublishResponse_RedirectMessage {
+ if x != nil {
+ return x.Redirect
+ }
+ return nil
+}
+
+func (x *PublishResponse) GetIsClosed() bool {
+ if x != nil {
+ return x.IsClosed
+ }
+ return false
+}
+
+type DeleteTopicRequest struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ Namespace string `protobuf:"bytes,1,opt,name=namespace,proto3" json:"namespace,omitempty"`
+ Topic string `protobuf:"bytes,2,opt,name=topic,proto3" json:"topic,omitempty"`
+}
+
+func (x *DeleteTopicRequest) Reset() {
+ *x = DeleteTopicRequest{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_messaging_proto_msgTypes[5]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *DeleteTopicRequest) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*DeleteTopicRequest) ProtoMessage() {}
+
+func (x *DeleteTopicRequest) ProtoReflect() protoreflect.Message {
+ mi := &file_messaging_proto_msgTypes[5]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use DeleteTopicRequest.ProtoReflect.Descriptor instead.
+func (*DeleteTopicRequest) Descriptor() ([]byte, []int) {
+ return file_messaging_proto_rawDescGZIP(), []int{5}
+}
+
+func (x *DeleteTopicRequest) GetNamespace() string {
+ if x != nil {
+ return x.Namespace
+ }
+ return ""
+}
+
+func (x *DeleteTopicRequest) GetTopic() string {
+ if x != nil {
+ return x.Topic
+ }
+ return ""
+}
+
+type DeleteTopicResponse struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+}
+
+func (x *DeleteTopicResponse) Reset() {
+ *x = DeleteTopicResponse{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_messaging_proto_msgTypes[6]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *DeleteTopicResponse) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*DeleteTopicResponse) ProtoMessage() {}
+
+func (x *DeleteTopicResponse) ProtoReflect() protoreflect.Message {
+ mi := &file_messaging_proto_msgTypes[6]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use DeleteTopicResponse.ProtoReflect.Descriptor instead.
+func (*DeleteTopicResponse) Descriptor() ([]byte, []int) {
+ return file_messaging_proto_rawDescGZIP(), []int{6}
+}
+
+type ConfigureTopicRequest struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ Namespace string `protobuf:"bytes,1,opt,name=namespace,proto3" json:"namespace,omitempty"`
+ Topic string `protobuf:"bytes,2,opt,name=topic,proto3" json:"topic,omitempty"`
+ Configuration *TopicConfiguration `protobuf:"bytes,3,opt,name=configuration,proto3" json:"configuration,omitempty"`
+}
+
+func (x *ConfigureTopicRequest) Reset() {
+ *x = ConfigureTopicRequest{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_messaging_proto_msgTypes[7]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *ConfigureTopicRequest) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*ConfigureTopicRequest) ProtoMessage() {}
+
+func (x *ConfigureTopicRequest) ProtoReflect() protoreflect.Message {
+ mi := &file_messaging_proto_msgTypes[7]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use ConfigureTopicRequest.ProtoReflect.Descriptor instead.
+func (*ConfigureTopicRequest) Descriptor() ([]byte, []int) {
+ return file_messaging_proto_rawDescGZIP(), []int{7}
+}
+
+func (x *ConfigureTopicRequest) GetNamespace() string {
+ if x != nil {
+ return x.Namespace
+ }
+ return ""
+}
+
+func (x *ConfigureTopicRequest) GetTopic() string {
+ if x != nil {
+ return x.Topic
+ }
+ return ""
+}
+
+func (x *ConfigureTopicRequest) GetConfiguration() *TopicConfiguration {
+ if x != nil {
+ return x.Configuration
+ }
+ return nil
+}
+
+type ConfigureTopicResponse struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+}
+
+func (x *ConfigureTopicResponse) Reset() {
+ *x = ConfigureTopicResponse{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_messaging_proto_msgTypes[8]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *ConfigureTopicResponse) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*ConfigureTopicResponse) ProtoMessage() {}
+
+func (x *ConfigureTopicResponse) ProtoReflect() protoreflect.Message {
+ mi := &file_messaging_proto_msgTypes[8]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use ConfigureTopicResponse.ProtoReflect.Descriptor instead.
+func (*ConfigureTopicResponse) Descriptor() ([]byte, []int) {
+ return file_messaging_proto_rawDescGZIP(), []int{8}
+}
+
+type GetTopicConfigurationRequest struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ Namespace string `protobuf:"bytes,1,opt,name=namespace,proto3" json:"namespace,omitempty"`
+ Topic string `protobuf:"bytes,2,opt,name=topic,proto3" json:"topic,omitempty"`
+}
+
+func (x *GetTopicConfigurationRequest) Reset() {
+ *x = GetTopicConfigurationRequest{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_messaging_proto_msgTypes[9]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *GetTopicConfigurationRequest) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*GetTopicConfigurationRequest) ProtoMessage() {}
+
+func (x *GetTopicConfigurationRequest) ProtoReflect() protoreflect.Message {
+ mi := &file_messaging_proto_msgTypes[9]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use GetTopicConfigurationRequest.ProtoReflect.Descriptor instead.
+func (*GetTopicConfigurationRequest) Descriptor() ([]byte, []int) {
+ return file_messaging_proto_rawDescGZIP(), []int{9}
+}
+
+func (x *GetTopicConfigurationRequest) GetNamespace() string {
+ if x != nil {
+ return x.Namespace
+ }
+ return ""
+}
+
+func (x *GetTopicConfigurationRequest) GetTopic() string {
+ if x != nil {
+ return x.Topic
+ }
+ return ""
+}
+
+type GetTopicConfigurationResponse struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ Configuration *TopicConfiguration `protobuf:"bytes,1,opt,name=configuration,proto3" json:"configuration,omitempty"`
+}
+
+func (x *GetTopicConfigurationResponse) Reset() {
+ *x = GetTopicConfigurationResponse{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_messaging_proto_msgTypes[10]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *GetTopicConfigurationResponse) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*GetTopicConfigurationResponse) ProtoMessage() {}
+
+func (x *GetTopicConfigurationResponse) ProtoReflect() protoreflect.Message {
+ mi := &file_messaging_proto_msgTypes[10]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use GetTopicConfigurationResponse.ProtoReflect.Descriptor instead.
+func (*GetTopicConfigurationResponse) Descriptor() ([]byte, []int) {
+ return file_messaging_proto_rawDescGZIP(), []int{10}
+}
+
+func (x *GetTopicConfigurationResponse) GetConfiguration() *TopicConfiguration {
+ if x != nil {
+ return x.Configuration
+ }
+ return nil
+}
+
+type FindBrokerRequest struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ Namespace string `protobuf:"bytes,1,opt,name=namespace,proto3" json:"namespace,omitempty"`
+ Topic string `protobuf:"bytes,2,opt,name=topic,proto3" json:"topic,omitempty"`
+ Parition int32 `protobuf:"varint,3,opt,name=parition,proto3" json:"parition,omitempty"`
+}
+
+func (x *FindBrokerRequest) Reset() {
+ *x = FindBrokerRequest{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_messaging_proto_msgTypes[11]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *FindBrokerRequest) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*FindBrokerRequest) ProtoMessage() {}
+
+func (x *FindBrokerRequest) ProtoReflect() protoreflect.Message {
+ mi := &file_messaging_proto_msgTypes[11]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use FindBrokerRequest.ProtoReflect.Descriptor instead.
+func (*FindBrokerRequest) Descriptor() ([]byte, []int) {
+ return file_messaging_proto_rawDescGZIP(), []int{11}
+}
+
+func (x *FindBrokerRequest) GetNamespace() string {
+ if x != nil {
+ return x.Namespace
+ }
+ return ""
+}
+
+func (x *FindBrokerRequest) GetTopic() string {
+ if x != nil {
+ return x.Topic
+ }
+ return ""
+}
+
+func (x *FindBrokerRequest) GetParition() int32 {
+ if x != nil {
+ return x.Parition
+ }
+ return 0
+}
+
+type FindBrokerResponse struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ Broker string `protobuf:"bytes,1,opt,name=broker,proto3" json:"broker,omitempty"`
+}
+
+func (x *FindBrokerResponse) Reset() {
+ *x = FindBrokerResponse{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_messaging_proto_msgTypes[12]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *FindBrokerResponse) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*FindBrokerResponse) ProtoMessage() {}
+
+func (x *FindBrokerResponse) ProtoReflect() protoreflect.Message {
+ mi := &file_messaging_proto_msgTypes[12]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use FindBrokerResponse.ProtoReflect.Descriptor instead.
+func (*FindBrokerResponse) Descriptor() ([]byte, []int) {
+ return file_messaging_proto_rawDescGZIP(), []int{12}
+}
+
+func (x *FindBrokerResponse) GetBroker() string {
+ if x != nil {
+ return x.Broker
+ }
+ return ""
+}
+
+type TopicConfiguration struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ PartitionCount int32 `protobuf:"varint,1,opt,name=partition_count,json=partitionCount,proto3" json:"partition_count,omitempty"`
+ Collection string `protobuf:"bytes,2,opt,name=collection,proto3" json:"collection,omitempty"`
+ Replication string `protobuf:"bytes,3,opt,name=replication,proto3" json:"replication,omitempty"`
+ IsTransient bool `protobuf:"varint,4,opt,name=is_transient,json=isTransient,proto3" json:"is_transient,omitempty"`
+ Partitoning TopicConfiguration_Partitioning `protobuf:"varint,5,opt,name=partitoning,proto3,enum=messaging_pb.TopicConfiguration_Partitioning" json:"partitoning,omitempty"`
+}
+
+func (x *TopicConfiguration) Reset() {
+ *x = TopicConfiguration{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_messaging_proto_msgTypes[13]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *TopicConfiguration) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*TopicConfiguration) ProtoMessage() {}
+
+func (x *TopicConfiguration) ProtoReflect() protoreflect.Message {
+ mi := &file_messaging_proto_msgTypes[13]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use TopicConfiguration.ProtoReflect.Descriptor instead.
+func (*TopicConfiguration) Descriptor() ([]byte, []int) {
+ return file_messaging_proto_rawDescGZIP(), []int{13}
+}
+
+func (x *TopicConfiguration) GetPartitionCount() int32 {
+ if x != nil {
+ return x.PartitionCount
+ }
+ return 0
+}
+
+func (x *TopicConfiguration) GetCollection() string {
+ if x != nil {
+ return x.Collection
+ }
+ return ""
+}
+
+func (x *TopicConfiguration) GetReplication() string {
+ if x != nil {
+ return x.Replication
+ }
+ return ""
+}
+
+func (x *TopicConfiguration) GetIsTransient() bool {
+ if x != nil {
+ return x.IsTransient
+ }
+ return false
+}
+
+func (x *TopicConfiguration) GetPartitoning() TopicConfiguration_Partitioning {
+ if x != nil {
+ return x.Partitoning
+ }
+ return TopicConfiguration_NonNullKeyHash
+}
+
+type SubscriberMessage_InitMessage struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ Namespace string `protobuf:"bytes,1,opt,name=namespace,proto3" json:"namespace,omitempty"`
+ Topic string `protobuf:"bytes,2,opt,name=topic,proto3" json:"topic,omitempty"`
+ Partition int32 `protobuf:"varint,3,opt,name=partition,proto3" json:"partition,omitempty"`
+ StartPosition SubscriberMessage_InitMessage_StartPosition `protobuf:"varint,4,opt,name=startPosition,proto3,enum=messaging_pb.SubscriberMessage_InitMessage_StartPosition" json:"startPosition,omitempty"` // Where to begin consuming from
+ TimestampNs int64 `protobuf:"varint,5,opt,name=timestampNs,proto3" json:"timestampNs,omitempty"` // timestamp in nano seconds
+ SubscriberId string `protobuf:"bytes,6,opt,name=subscriber_id,json=subscriberId,proto3" json:"subscriber_id,omitempty"` // uniquely identify a subscriber to track consumption
+}
+
+func (x *SubscriberMessage_InitMessage) Reset() {
+ *x = SubscriberMessage_InitMessage{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_messaging_proto_msgTypes[14]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *SubscriberMessage_InitMessage) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*SubscriberMessage_InitMessage) ProtoMessage() {}
+
+func (x *SubscriberMessage_InitMessage) ProtoReflect() protoreflect.Message {
+ mi := &file_messaging_proto_msgTypes[14]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use SubscriberMessage_InitMessage.ProtoReflect.Descriptor instead.
+func (*SubscriberMessage_InitMessage) Descriptor() ([]byte, []int) {
+ return file_messaging_proto_rawDescGZIP(), []int{0, 0}
+}
+
+func (x *SubscriberMessage_InitMessage) GetNamespace() string {
+ if x != nil {
+ return x.Namespace
+ }
+ return ""
+}
+
+func (x *SubscriberMessage_InitMessage) GetTopic() string {
+ if x != nil {
+ return x.Topic
+ }
+ return ""
+}
+
+func (x *SubscriberMessage_InitMessage) GetPartition() int32 {
+ if x != nil {
+ return x.Partition
+ }
+ return 0
+}
+
+func (x *SubscriberMessage_InitMessage) GetStartPosition() SubscriberMessage_InitMessage_StartPosition {
+ if x != nil {
+ return x.StartPosition
+ }
+ return SubscriberMessage_InitMessage_LATEST
+}
+
+func (x *SubscriberMessage_InitMessage) GetTimestampNs() int64 {
+ if x != nil {
+ return x.TimestampNs
+ }
+ return 0
+}
+
+func (x *SubscriberMessage_InitMessage) GetSubscriberId() string {
+ if x != nil {
+ return x.SubscriberId
+ }
+ return ""
+}
+
+type SubscriberMessage_AckMessage struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ MessageId int64 `protobuf:"varint,1,opt,name=message_id,json=messageId,proto3" json:"message_id,omitempty"`
+}
+
+func (x *SubscriberMessage_AckMessage) Reset() {
+ *x = SubscriberMessage_AckMessage{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_messaging_proto_msgTypes[15]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *SubscriberMessage_AckMessage) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*SubscriberMessage_AckMessage) ProtoMessage() {}
+
+func (x *SubscriberMessage_AckMessage) ProtoReflect() protoreflect.Message {
+ mi := &file_messaging_proto_msgTypes[15]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use SubscriberMessage_AckMessage.ProtoReflect.Descriptor instead.
+func (*SubscriberMessage_AckMessage) Descriptor() ([]byte, []int) {
+ return file_messaging_proto_rawDescGZIP(), []int{0, 1}
+}
+
+func (x *SubscriberMessage_AckMessage) GetMessageId() int64 {
+ if x != nil {
+ return x.MessageId
+ }
+ return 0
+}
+
+type PublishRequest_InitMessage struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ Namespace string `protobuf:"bytes,1,opt,name=namespace,proto3" json:"namespace,omitempty"` // only needed on the initial request
+ Topic string `protobuf:"bytes,2,opt,name=topic,proto3" json:"topic,omitempty"` // only needed on the initial request
+ Partition int32 `protobuf:"varint,3,opt,name=partition,proto3" json:"partition,omitempty"`
+}
+
+func (x *PublishRequest_InitMessage) Reset() {
+ *x = PublishRequest_InitMessage{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_messaging_proto_msgTypes[17]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *PublishRequest_InitMessage) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*PublishRequest_InitMessage) ProtoMessage() {}
+
+func (x *PublishRequest_InitMessage) ProtoReflect() protoreflect.Message {
+ mi := &file_messaging_proto_msgTypes[17]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use PublishRequest_InitMessage.ProtoReflect.Descriptor instead.
+func (*PublishRequest_InitMessage) Descriptor() ([]byte, []int) {
+ return file_messaging_proto_rawDescGZIP(), []int{3, 0}
+}
+
+func (x *PublishRequest_InitMessage) GetNamespace() string {
+ if x != nil {
+ return x.Namespace
+ }
+ return ""
+}
+
+func (x *PublishRequest_InitMessage) GetTopic() string {
+ if x != nil {
+ return x.Topic
+ }
+ return ""
+}
+
+func (x *PublishRequest_InitMessage) GetPartition() int32 {
+ if x != nil {
+ return x.Partition
+ }
+ return 0
+}
+
+type PublishResponse_ConfigMessage struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ PartitionCount int32 `protobuf:"varint,1,opt,name=partition_count,json=partitionCount,proto3" json:"partition_count,omitempty"`
+}
+
+func (x *PublishResponse_ConfigMessage) Reset() {
+ *x = PublishResponse_ConfigMessage{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_messaging_proto_msgTypes[18]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *PublishResponse_ConfigMessage) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*PublishResponse_ConfigMessage) ProtoMessage() {}
+
+func (x *PublishResponse_ConfigMessage) ProtoReflect() protoreflect.Message {
+ mi := &file_messaging_proto_msgTypes[18]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use PublishResponse_ConfigMessage.ProtoReflect.Descriptor instead.
+func (*PublishResponse_ConfigMessage) Descriptor() ([]byte, []int) {
+ return file_messaging_proto_rawDescGZIP(), []int{4, 0}
+}
+
+func (x *PublishResponse_ConfigMessage) GetPartitionCount() int32 {
+ if x != nil {
+ return x.PartitionCount
+ }
+ return 0
+}
+
+type PublishResponse_RedirectMessage struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ NewBroker string `protobuf:"bytes,1,opt,name=new_broker,json=newBroker,proto3" json:"new_broker,omitempty"`
+}
+
+func (x *PublishResponse_RedirectMessage) Reset() {
+ *x = PublishResponse_RedirectMessage{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_messaging_proto_msgTypes[19]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *PublishResponse_RedirectMessage) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*PublishResponse_RedirectMessage) ProtoMessage() {}
+
+func (x *PublishResponse_RedirectMessage) ProtoReflect() protoreflect.Message {
+ mi := &file_messaging_proto_msgTypes[19]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use PublishResponse_RedirectMessage.ProtoReflect.Descriptor instead.
+func (*PublishResponse_RedirectMessage) Descriptor() ([]byte, []int) {
+ return file_messaging_proto_rawDescGZIP(), []int{4, 1}
+}
+
+func (x *PublishResponse_RedirectMessage) GetNewBroker() string {
+ if x != nil {
+ return x.NewBroker
+ }
+ return ""
+}
+
+var File_messaging_proto protoreflect.FileDescriptor
+
+var file_messaging_proto_rawDesc = []byte{
+ 0x0a, 0x0f, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x69, 0x6e, 0x67, 0x2e, 0x70, 0x72, 0x6f, 0x74,
+ 0x6f, 0x12, 0x0c, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x69, 0x6e, 0x67, 0x5f, 0x70, 0x62, 0x22,
+ 0x9e, 0x04, 0x0a, 0x11, 0x53, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x62, 0x65, 0x72, 0x4d, 0x65,
+ 0x73, 0x73, 0x61, 0x67, 0x65, 0x12, 0x3f, 0x0a, 0x04, 0x69, 0x6e, 0x69, 0x74, 0x18, 0x01, 0x20,
+ 0x01, 0x28, 0x0b, 0x32, 0x2b, 0x2e, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x69, 0x6e, 0x67, 0x5f,
+ 0x70, 0x62, 0x2e, 0x53, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x62, 0x65, 0x72, 0x4d, 0x65, 0x73,
+ 0x73, 0x61, 0x67, 0x65, 0x2e, 0x49, 0x6e, 0x69, 0x74, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65,
+ 0x52, 0x04, 0x69, 0x6e, 0x69, 0x74, 0x12, 0x3c, 0x0a, 0x03, 0x61, 0x63, 0x6b, 0x18, 0x02, 0x20,
+ 0x01, 0x28, 0x0b, 0x32, 0x2a, 0x2e, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x69, 0x6e, 0x67, 0x5f,
+ 0x70, 0x62, 0x2e, 0x53, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x62, 0x65, 0x72, 0x4d, 0x65, 0x73,
+ 0x73, 0x61, 0x67, 0x65, 0x2e, 0x41, 0x63, 0x6b, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x52,
+ 0x03, 0x61, 0x63, 0x6b, 0x12, 0x19, 0x0a, 0x08, 0x69, 0x73, 0x5f, 0x63, 0x6c, 0x6f, 0x73, 0x65,
+ 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x52, 0x07, 0x69, 0x73, 0x43, 0x6c, 0x6f, 0x73, 0x65, 0x1a,
+ 0xc1, 0x02, 0x0a, 0x0b, 0x49, 0x6e, 0x69, 0x74, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x12,
+ 0x1c, 0x0a, 0x09, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01,
+ 0x28, 0x09, 0x52, 0x09, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x14, 0x0a,
+ 0x05, 0x74, 0x6f, 0x70, 0x69, 0x63, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x74, 0x6f,
+ 0x70, 0x69, 0x63, 0x12, 0x1c, 0x0a, 0x09, 0x70, 0x61, 0x72, 0x74, 0x69, 0x74, 0x69, 0x6f, 0x6e,
+ 0x18, 0x03, 0x20, 0x01, 0x28, 0x05, 0x52, 0x09, 0x70, 0x61, 0x72, 0x74, 0x69, 0x74, 0x69, 0x6f,
+ 0x6e, 0x12, 0x5f, 0x0a, 0x0d, 0x73, 0x74, 0x61, 0x72, 0x74, 0x50, 0x6f, 0x73, 0x69, 0x74, 0x69,
+ 0x6f, 0x6e, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x39, 0x2e, 0x6d, 0x65, 0x73, 0x73, 0x61,
+ 0x67, 0x69, 0x6e, 0x67, 0x5f, 0x70, 0x62, 0x2e, 0x53, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x62,
+ 0x65, 0x72, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x2e, 0x49, 0x6e, 0x69, 0x74, 0x4d, 0x65,
+ 0x73, 0x73, 0x61, 0x67, 0x65, 0x2e, 0x53, 0x74, 0x61, 0x72, 0x74, 0x50, 0x6f, 0x73, 0x69, 0x74,
+ 0x69, 0x6f, 0x6e, 0x52, 0x0d, 0x73, 0x74, 0x61, 0x72, 0x74, 0x50, 0x6f, 0x73, 0x69, 0x74, 0x69,
+ 0x6f, 0x6e, 0x12, 0x20, 0x0a, 0x0b, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x4e,
+ 0x73, 0x18, 0x05, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0b, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61,
+ 0x6d, 0x70, 0x4e, 0x73, 0x12, 0x23, 0x0a, 0x0d, 0x73, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x62,
+ 0x65, 0x72, 0x5f, 0x69, 0x64, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x73, 0x75, 0x62,
+ 0x73, 0x63, 0x72, 0x69, 0x62, 0x65, 0x72, 0x49, 0x64, 0x22, 0x38, 0x0a, 0x0d, 0x53, 0x74, 0x61,
+ 0x72, 0x74, 0x50, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x0a, 0x0a, 0x06, 0x4c, 0x41,
+ 0x54, 0x45, 0x53, 0x54, 0x10, 0x00, 0x12, 0x0c, 0x0a, 0x08, 0x45, 0x41, 0x52, 0x4c, 0x49, 0x45,
+ 0x53, 0x54, 0x10, 0x01, 0x12, 0x0d, 0x0a, 0x09, 0x54, 0x49, 0x4d, 0x45, 0x53, 0x54, 0x41, 0x4d,
+ 0x50, 0x10, 0x02, 0x1a, 0x2b, 0x0a, 0x0a, 0x41, 0x63, 0x6b, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67,
+ 0x65, 0x12, 0x1d, 0x0a, 0x0a, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x5f, 0x69, 0x64, 0x18,
+ 0x01, 0x20, 0x01, 0x28, 0x03, 0x52, 0x09, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x49, 0x64,
+ 0x22, 0xee, 0x01, 0x0a, 0x07, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x12, 0x26, 0x0a, 0x0d,
+ 0x65, 0x76, 0x65, 0x6e, 0x74, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x5f, 0x6e, 0x73, 0x18, 0x01, 0x20,
+ 0x01, 0x28, 0x03, 0x42, 0x02, 0x30, 0x01, 0x52, 0x0b, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x54, 0x69,
+ 0x6d, 0x65, 0x4e, 0x73, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x02, 0x20, 0x01, 0x28,
+ 0x0c, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18,
+ 0x03, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x3c, 0x0a, 0x07,
+ 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x73, 0x18, 0x04, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x22, 0x2e,
+ 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x69, 0x6e, 0x67, 0x5f, 0x70, 0x62, 0x2e, 0x4d, 0x65, 0x73,
+ 0x73, 0x61, 0x67, 0x65, 0x2e, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x73, 0x45, 0x6e, 0x74, 0x72,
+ 0x79, 0x52, 0x07, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x73, 0x12, 0x19, 0x0a, 0x08, 0x69, 0x73,
+ 0x5f, 0x63, 0x6c, 0x6f, 0x73, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x08, 0x52, 0x07, 0x69, 0x73,
+ 0x43, 0x6c, 0x6f, 0x73, 0x65, 0x1a, 0x3a, 0x0a, 0x0c, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x73,
+ 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01,
+ 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65,
+ 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38,
+ 0x01, 0x22, 0x3a, 0x0a, 0x0d, 0x42, 0x72, 0x6f, 0x6b, 0x65, 0x72, 0x4d, 0x65, 0x73, 0x73, 0x61,
+ 0x67, 0x65, 0x12, 0x29, 0x0a, 0x04, 0x64, 0x61, 0x74, 0x61, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b,
+ 0x32, 0x15, 0x2e, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x69, 0x6e, 0x67, 0x5f, 0x70, 0x62, 0x2e,
+ 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x52, 0x04, 0x64, 0x61, 0x74, 0x61, 0x22, 0xda, 0x01,
+ 0x0a, 0x0e, 0x50, 0x75, 0x62, 0x6c, 0x69, 0x73, 0x68, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74,
+ 0x12, 0x3c, 0x0a, 0x04, 0x69, 0x6e, 0x69, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x28,
+ 0x2e, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x69, 0x6e, 0x67, 0x5f, 0x70, 0x62, 0x2e, 0x50, 0x75,
+ 0x62, 0x6c, 0x69, 0x73, 0x68, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x2e, 0x49, 0x6e, 0x69,
+ 0x74, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x52, 0x04, 0x69, 0x6e, 0x69, 0x74, 0x12, 0x29,
+ 0x0a, 0x04, 0x64, 0x61, 0x74, 0x61, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x6d,
+ 0x65, 0x73, 0x73, 0x61, 0x67, 0x69, 0x6e, 0x67, 0x5f, 0x70, 0x62, 0x2e, 0x4d, 0x65, 0x73, 0x73,
+ 0x61, 0x67, 0x65, 0x52, 0x04, 0x64, 0x61, 0x74, 0x61, 0x1a, 0x5f, 0x0a, 0x0b, 0x49, 0x6e, 0x69,
+ 0x74, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x12, 0x1c, 0x0a, 0x09, 0x6e, 0x61, 0x6d, 0x65,
+ 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x6e, 0x61, 0x6d,
+ 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x74, 0x6f, 0x70, 0x69, 0x63, 0x18,
+ 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x74, 0x6f, 0x70, 0x69, 0x63, 0x12, 0x1c, 0x0a, 0x09,
+ 0x70, 0x61, 0x72, 0x74, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x05, 0x52,
+ 0x09, 0x70, 0x61, 0x72, 0x74, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0xaa, 0x02, 0x0a, 0x0f, 0x50,
+ 0x75, 0x62, 0x6c, 0x69, 0x73, 0x68, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x43,
+ 0x0a, 0x06, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2b,
+ 0x2e, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x69, 0x6e, 0x67, 0x5f, 0x70, 0x62, 0x2e, 0x50, 0x75,
+ 0x62, 0x6c, 0x69, 0x73, 0x68, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x43, 0x6f,
+ 0x6e, 0x66, 0x69, 0x67, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x52, 0x06, 0x63, 0x6f, 0x6e,
+ 0x66, 0x69, 0x67, 0x12, 0x49, 0x0a, 0x08, 0x72, 0x65, 0x64, 0x69, 0x72, 0x65, 0x63, 0x74, 0x18,
+ 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2d, 0x2e, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x69, 0x6e,
+ 0x67, 0x5f, 0x70, 0x62, 0x2e, 0x50, 0x75, 0x62, 0x6c, 0x69, 0x73, 0x68, 0x52, 0x65, 0x73, 0x70,
+ 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x52, 0x65, 0x64, 0x69, 0x72, 0x65, 0x63, 0x74, 0x4d, 0x65, 0x73,
+ 0x73, 0x61, 0x67, 0x65, 0x52, 0x08, 0x72, 0x65, 0x64, 0x69, 0x72, 0x65, 0x63, 0x74, 0x12, 0x1b,
+ 0x0a, 0x09, 0x69, 0x73, 0x5f, 0x63, 0x6c, 0x6f, 0x73, 0x65, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28,
+ 0x08, 0x52, 0x08, 0x69, 0x73, 0x43, 0x6c, 0x6f, 0x73, 0x65, 0x64, 0x1a, 0x38, 0x0a, 0x0d, 0x43,
+ 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x12, 0x27, 0x0a, 0x0f,
+ 0x70, 0x61, 0x72, 0x74, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18,
+ 0x01, 0x20, 0x01, 0x28, 0x05, 0x52, 0x0e, 0x70, 0x61, 0x72, 0x74, 0x69, 0x74, 0x69, 0x6f, 0x6e,
+ 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x1a, 0x30, 0x0a, 0x0f, 0x52, 0x65, 0x64, 0x69, 0x72, 0x65, 0x63,
+ 0x74, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x12, 0x1d, 0x0a, 0x0a, 0x6e, 0x65, 0x77, 0x5f,
+ 0x62, 0x72, 0x6f, 0x6b, 0x65, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x6e, 0x65,
+ 0x77, 0x42, 0x72, 0x6f, 0x6b, 0x65, 0x72, 0x22, 0x48, 0x0a, 0x12, 0x44, 0x65, 0x6c, 0x65, 0x74,
+ 0x65, 0x54, 0x6f, 0x70, 0x69, 0x63, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1c, 0x0a,
+ 0x09, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09,
+ 0x52, 0x09, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x74,
+ 0x6f, 0x70, 0x69, 0x63, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x74, 0x6f, 0x70, 0x69,
+ 0x63, 0x22, 0x15, 0x0a, 0x13, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x54, 0x6f, 0x70, 0x69, 0x63,
+ 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x93, 0x01, 0x0a, 0x15, 0x43, 0x6f, 0x6e,
+ 0x66, 0x69, 0x67, 0x75, 0x72, 0x65, 0x54, 0x6f, 0x70, 0x69, 0x63, 0x52, 0x65, 0x71, 0x75, 0x65,
+ 0x73, 0x74, 0x12, 0x1c, 0x0a, 0x09, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18,
+ 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65,
+ 0x12, 0x14, 0x0a, 0x05, 0x74, 0x6f, 0x70, 0x69, 0x63, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52,
+ 0x05, 0x74, 0x6f, 0x70, 0x69, 0x63, 0x12, 0x46, 0x0a, 0x0d, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67,
+ 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x20, 0x2e,
+ 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x69, 0x6e, 0x67, 0x5f, 0x70, 0x62, 0x2e, 0x54, 0x6f, 0x70,
+ 0x69, 0x63, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52,
+ 0x0d, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0x18,
+ 0x0a, 0x16, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x65, 0x54, 0x6f, 0x70, 0x69, 0x63,
+ 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x52, 0x0a, 0x1c, 0x47, 0x65, 0x74, 0x54,
+ 0x6f, 0x70, 0x69, 0x63, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f,
+ 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1c, 0x0a, 0x09, 0x6e, 0x61, 0x6d, 0x65,
+ 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x6e, 0x61, 0x6d,
+ 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x74, 0x6f, 0x70, 0x69, 0x63, 0x18,
+ 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x74, 0x6f, 0x70, 0x69, 0x63, 0x22, 0x67, 0x0a, 0x1d,
+ 0x47, 0x65, 0x74, 0x54, 0x6f, 0x70, 0x69, 0x63, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72,
+ 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x46, 0x0a,
+ 0x0d, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x01,
+ 0x20, 0x01, 0x28, 0x0b, 0x32, 0x20, 0x2e, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x69, 0x6e, 0x67,
+ 0x5f, 0x70, 0x62, 0x2e, 0x54, 0x6f, 0x70, 0x69, 0x63, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75,
+ 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0d, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72,
+ 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0x63, 0x0a, 0x11, 0x46, 0x69, 0x6e, 0x64, 0x42, 0x72, 0x6f,
+ 0x6b, 0x65, 0x72, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1c, 0x0a, 0x09, 0x6e, 0x61,
+ 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x6e,
+ 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x74, 0x6f, 0x70, 0x69,
+ 0x63, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x74, 0x6f, 0x70, 0x69, 0x63, 0x12, 0x1a,
+ 0x0a, 0x08, 0x70, 0x61, 0x72, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x05,
+ 0x52, 0x08, 0x70, 0x61, 0x72, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0x2c, 0x0a, 0x12, 0x46, 0x69,
+ 0x6e, 0x64, 0x42, 0x72, 0x6f, 0x6b, 0x65, 0x72, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65,
+ 0x12, 0x16, 0x0a, 0x06, 0x62, 0x72, 0x6f, 0x6b, 0x65, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09,
+ 0x52, 0x06, 0x62, 0x72, 0x6f, 0x6b, 0x65, 0x72, 0x22, 0xb4, 0x02, 0x0a, 0x12, 0x54, 0x6f, 0x70,
+ 0x69, 0x63, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12,
+ 0x27, 0x0a, 0x0f, 0x70, 0x61, 0x72, 0x74, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x63, 0x6f, 0x75,
+ 0x6e, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x05, 0x52, 0x0e, 0x70, 0x61, 0x72, 0x74, 0x69, 0x74,
+ 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x12, 0x1e, 0x0a, 0x0a, 0x63, 0x6f, 0x6c, 0x6c,
+ 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x63, 0x6f,
+ 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x20, 0x0a, 0x0b, 0x72, 0x65, 0x70, 0x6c,
+ 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x72,
+ 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x21, 0x0a, 0x0c, 0x69, 0x73,
+ 0x5f, 0x74, 0x72, 0x61, 0x6e, 0x73, 0x69, 0x65, 0x6e, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x08,
+ 0x52, 0x0b, 0x69, 0x73, 0x54, 0x72, 0x61, 0x6e, 0x73, 0x69, 0x65, 0x6e, 0x74, 0x12, 0x4f, 0x0a,
+ 0x0b, 0x70, 0x61, 0x72, 0x74, 0x69, 0x74, 0x6f, 0x6e, 0x69, 0x6e, 0x67, 0x18, 0x05, 0x20, 0x01,
+ 0x28, 0x0e, 0x32, 0x2d, 0x2e, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x69, 0x6e, 0x67, 0x5f, 0x70,
+ 0x62, 0x2e, 0x54, 0x6f, 0x70, 0x69, 0x63, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x61,
+ 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x50, 0x61, 0x72, 0x74, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x69, 0x6e,
+ 0x67, 0x52, 0x0b, 0x70, 0x61, 0x72, 0x74, 0x69, 0x74, 0x6f, 0x6e, 0x69, 0x6e, 0x67, 0x22, 0x3f,
+ 0x0a, 0x0c, 0x50, 0x61, 0x72, 0x74, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x69, 0x6e, 0x67, 0x12, 0x12,
+ 0x0a, 0x0e, 0x4e, 0x6f, 0x6e, 0x4e, 0x75, 0x6c, 0x6c, 0x4b, 0x65, 0x79, 0x48, 0x61, 0x73, 0x68,
+ 0x10, 0x00, 0x12, 0x0b, 0x0a, 0x07, 0x4b, 0x65, 0x79, 0x48, 0x61, 0x73, 0x68, 0x10, 0x01, 0x12,
+ 0x0e, 0x0a, 0x0a, 0x52, 0x6f, 0x75, 0x6e, 0x64, 0x52, 0x6f, 0x62, 0x69, 0x6e, 0x10, 0x02, 0x32,
+ 0xad, 0x04, 0x0a, 0x10, 0x53, 0x65, 0x61, 0x77, 0x65, 0x65, 0x64, 0x4d, 0x65, 0x73, 0x73, 0x61,
+ 0x67, 0x69, 0x6e, 0x67, 0x12, 0x4f, 0x0a, 0x09, 0x53, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x62,
+ 0x65, 0x12, 0x1f, 0x2e, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x69, 0x6e, 0x67, 0x5f, 0x70, 0x62,
+ 0x2e, 0x53, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x62, 0x65, 0x72, 0x4d, 0x65, 0x73, 0x73, 0x61,
+ 0x67, 0x65, 0x1a, 0x1b, 0x2e, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x69, 0x6e, 0x67, 0x5f, 0x70,
+ 0x62, 0x2e, 0x42, 0x72, 0x6f, 0x6b, 0x65, 0x72, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x22,
+ 0x00, 0x28, 0x01, 0x30, 0x01, 0x12, 0x4c, 0x0a, 0x07, 0x50, 0x75, 0x62, 0x6c, 0x69, 0x73, 0x68,
+ 0x12, 0x1c, 0x2e, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x69, 0x6e, 0x67, 0x5f, 0x70, 0x62, 0x2e,
+ 0x50, 0x75, 0x62, 0x6c, 0x69, 0x73, 0x68, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1d,
+ 0x2e, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x69, 0x6e, 0x67, 0x5f, 0x70, 0x62, 0x2e, 0x50, 0x75,
+ 0x62, 0x6c, 0x69, 0x73, 0x68, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x28,
+ 0x01, 0x30, 0x01, 0x12, 0x54, 0x0a, 0x0b, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x54, 0x6f, 0x70,
+ 0x69, 0x63, 0x12, 0x20, 0x2e, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x69, 0x6e, 0x67, 0x5f, 0x70,
+ 0x62, 0x2e, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x54, 0x6f, 0x70, 0x69, 0x63, 0x52, 0x65, 0x71,
+ 0x75, 0x65, 0x73, 0x74, 0x1a, 0x21, 0x2e, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x69, 0x6e, 0x67,
+ 0x5f, 0x70, 0x62, 0x2e, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x54, 0x6f, 0x70, 0x69, 0x63, 0x52,
+ 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x5d, 0x0a, 0x0e, 0x43, 0x6f, 0x6e,
+ 0x66, 0x69, 0x67, 0x75, 0x72, 0x65, 0x54, 0x6f, 0x70, 0x69, 0x63, 0x12, 0x23, 0x2e, 0x6d, 0x65,
+ 0x73, 0x73, 0x61, 0x67, 0x69, 0x6e, 0x67, 0x5f, 0x70, 0x62, 0x2e, 0x43, 0x6f, 0x6e, 0x66, 0x69,
+ 0x67, 0x75, 0x72, 0x65, 0x54, 0x6f, 0x70, 0x69, 0x63, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74,
+ 0x1a, 0x24, 0x2e, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x69, 0x6e, 0x67, 0x5f, 0x70, 0x62, 0x2e,
+ 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x65, 0x54, 0x6f, 0x70, 0x69, 0x63, 0x52, 0x65,
+ 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x72, 0x0a, 0x15, 0x47, 0x65, 0x74, 0x54,
+ 0x6f, 0x70, 0x69, 0x63, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f,
+ 0x6e, 0x12, 0x2a, 0x2e, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x69, 0x6e, 0x67, 0x5f, 0x70, 0x62,
+ 0x2e, 0x47, 0x65, 0x74, 0x54, 0x6f, 0x70, 0x69, 0x63, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75,
+ 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2b, 0x2e,
+ 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x69, 0x6e, 0x67, 0x5f, 0x70, 0x62, 0x2e, 0x47, 0x65, 0x74,
+ 0x54, 0x6f, 0x70, 0x69, 0x63, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, 0x69,
+ 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x51, 0x0a, 0x0a,
+ 0x46, 0x69, 0x6e, 0x64, 0x42, 0x72, 0x6f, 0x6b, 0x65, 0x72, 0x12, 0x1f, 0x2e, 0x6d, 0x65, 0x73,
+ 0x73, 0x61, 0x67, 0x69, 0x6e, 0x67, 0x5f, 0x70, 0x62, 0x2e, 0x46, 0x69, 0x6e, 0x64, 0x42, 0x72,
+ 0x6f, 0x6b, 0x65, 0x72, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x20, 0x2e, 0x6d, 0x65,
+ 0x73, 0x73, 0x61, 0x67, 0x69, 0x6e, 0x67, 0x5f, 0x70, 0x62, 0x2e, 0x46, 0x69, 0x6e, 0x64, 0x42,
+ 0x72, 0x6f, 0x6b, 0x65, 0x72, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x42,
+ 0x57, 0x0a, 0x10, 0x73, 0x65, 0x61, 0x77, 0x65, 0x65, 0x64, 0x66, 0x73, 0x2e, 0x63, 0x6c, 0x69,
+ 0x65, 0x6e, 0x74, 0x42, 0x0e, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x69, 0x6e, 0x67, 0x50, 0x72,
+ 0x6f, 0x74, 0x6f, 0x5a, 0x33, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f,
+ 0x63, 0x68, 0x72, 0x69, 0x73, 0x6c, 0x75, 0x73, 0x66, 0x2f, 0x73, 0x65, 0x61, 0x77, 0x65, 0x65,
+ 0x64, 0x66, 0x73, 0x2f, 0x77, 0x65, 0x65, 0x64, 0x2f, 0x70, 0x62, 0x2f, 0x6d, 0x65, 0x73, 0x73,
+ 0x61, 0x67, 0x69, 0x6e, 0x67, 0x5f, 0x70, 0x62, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
+}
+
+var (
+ file_messaging_proto_rawDescOnce sync.Once
+ file_messaging_proto_rawDescData = file_messaging_proto_rawDesc
+)
+
+func file_messaging_proto_rawDescGZIP() []byte {
+ file_messaging_proto_rawDescOnce.Do(func() {
+ file_messaging_proto_rawDescData = protoimpl.X.CompressGZIP(file_messaging_proto_rawDescData)
+ })
+ return file_messaging_proto_rawDescData
+}
+
+var file_messaging_proto_enumTypes = make([]protoimpl.EnumInfo, 2)
+var file_messaging_proto_msgTypes = make([]protoimpl.MessageInfo, 20)
+var file_messaging_proto_goTypes = []interface{}{
+ (SubscriberMessage_InitMessage_StartPosition)(0), // 0: messaging_pb.SubscriberMessage.InitMessage.StartPosition
+ (TopicConfiguration_Partitioning)(0), // 1: messaging_pb.TopicConfiguration.Partitioning
+ (*SubscriberMessage)(nil), // 2: messaging_pb.SubscriberMessage
+ (*Message)(nil), // 3: messaging_pb.Message
+ (*BrokerMessage)(nil), // 4: messaging_pb.BrokerMessage
+ (*PublishRequest)(nil), // 5: messaging_pb.PublishRequest
+ (*PublishResponse)(nil), // 6: messaging_pb.PublishResponse
+ (*DeleteTopicRequest)(nil), // 7: messaging_pb.DeleteTopicRequest
+ (*DeleteTopicResponse)(nil), // 8: messaging_pb.DeleteTopicResponse
+ (*ConfigureTopicRequest)(nil), // 9: messaging_pb.ConfigureTopicRequest
+ (*ConfigureTopicResponse)(nil), // 10: messaging_pb.ConfigureTopicResponse
+ (*GetTopicConfigurationRequest)(nil), // 11: messaging_pb.GetTopicConfigurationRequest
+ (*GetTopicConfigurationResponse)(nil), // 12: messaging_pb.GetTopicConfigurationResponse
+ (*FindBrokerRequest)(nil), // 13: messaging_pb.FindBrokerRequest
+ (*FindBrokerResponse)(nil), // 14: messaging_pb.FindBrokerResponse
+ (*TopicConfiguration)(nil), // 15: messaging_pb.TopicConfiguration
+ (*SubscriberMessage_InitMessage)(nil), // 16: messaging_pb.SubscriberMessage.InitMessage
+ (*SubscriberMessage_AckMessage)(nil), // 17: messaging_pb.SubscriberMessage.AckMessage
+ nil, // 18: messaging_pb.Message.HeadersEntry
+ (*PublishRequest_InitMessage)(nil), // 19: messaging_pb.PublishRequest.InitMessage
+ (*PublishResponse_ConfigMessage)(nil), // 20: messaging_pb.PublishResponse.ConfigMessage
+ (*PublishResponse_RedirectMessage)(nil), // 21: messaging_pb.PublishResponse.RedirectMessage
+}
+var file_messaging_proto_depIdxs = []int32{
+ 16, // 0: messaging_pb.SubscriberMessage.init:type_name -> messaging_pb.SubscriberMessage.InitMessage
+ 17, // 1: messaging_pb.SubscriberMessage.ack:type_name -> messaging_pb.SubscriberMessage.AckMessage
+ 18, // 2: messaging_pb.Message.headers:type_name -> messaging_pb.Message.HeadersEntry
+ 3, // 3: messaging_pb.BrokerMessage.data:type_name -> messaging_pb.Message
+ 19, // 4: messaging_pb.PublishRequest.init:type_name -> messaging_pb.PublishRequest.InitMessage
+ 3, // 5: messaging_pb.PublishRequest.data:type_name -> messaging_pb.Message
+ 20, // 6: messaging_pb.PublishResponse.config:type_name -> messaging_pb.PublishResponse.ConfigMessage
+ 21, // 7: messaging_pb.PublishResponse.redirect:type_name -> messaging_pb.PublishResponse.RedirectMessage
+ 15, // 8: messaging_pb.ConfigureTopicRequest.configuration:type_name -> messaging_pb.TopicConfiguration
+ 15, // 9: messaging_pb.GetTopicConfigurationResponse.configuration:type_name -> messaging_pb.TopicConfiguration
+ 1, // 10: messaging_pb.TopicConfiguration.partitoning:type_name -> messaging_pb.TopicConfiguration.Partitioning
+ 0, // 11: messaging_pb.SubscriberMessage.InitMessage.startPosition:type_name -> messaging_pb.SubscriberMessage.InitMessage.StartPosition
+ 2, // 12: messaging_pb.SeaweedMessaging.Subscribe:input_type -> messaging_pb.SubscriberMessage
+ 5, // 13: messaging_pb.SeaweedMessaging.Publish:input_type -> messaging_pb.PublishRequest
+ 7, // 14: messaging_pb.SeaweedMessaging.DeleteTopic:input_type -> messaging_pb.DeleteTopicRequest
+ 9, // 15: messaging_pb.SeaweedMessaging.ConfigureTopic:input_type -> messaging_pb.ConfigureTopicRequest
+ 11, // 16: messaging_pb.SeaweedMessaging.GetTopicConfiguration:input_type -> messaging_pb.GetTopicConfigurationRequest
+ 13, // 17: messaging_pb.SeaweedMessaging.FindBroker:input_type -> messaging_pb.FindBrokerRequest
+ 4, // 18: messaging_pb.SeaweedMessaging.Subscribe:output_type -> messaging_pb.BrokerMessage
+ 6, // 19: messaging_pb.SeaweedMessaging.Publish:output_type -> messaging_pb.PublishResponse
+ 8, // 20: messaging_pb.SeaweedMessaging.DeleteTopic:output_type -> messaging_pb.DeleteTopicResponse
+ 10, // 21: messaging_pb.SeaweedMessaging.ConfigureTopic:output_type -> messaging_pb.ConfigureTopicResponse
+ 12, // 22: messaging_pb.SeaweedMessaging.GetTopicConfiguration:output_type -> messaging_pb.GetTopicConfigurationResponse
+ 14, // 23: messaging_pb.SeaweedMessaging.FindBroker:output_type -> messaging_pb.FindBrokerResponse
+ 18, // [18:24] is the sub-list for method output_type
+ 12, // [12:18] is the sub-list for method input_type
+ 12, // [12:12] is the sub-list for extension type_name
+ 12, // [12:12] is the sub-list for extension extendee
+ 0, // [0:12] is the sub-list for field type_name
+}
+
+func init() { file_messaging_proto_init() }
+func file_messaging_proto_init() {
+ if File_messaging_proto != nil {
+ return
+ }
+ if !protoimpl.UnsafeEnabled {
+ file_messaging_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*SubscriberMessage); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_messaging_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*Message); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_messaging_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*BrokerMessage); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_messaging_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*PublishRequest); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_messaging_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*PublishResponse); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_messaging_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*DeleteTopicRequest); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_messaging_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*DeleteTopicResponse); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_messaging_proto_msgTypes[7].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*ConfigureTopicRequest); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_messaging_proto_msgTypes[8].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*ConfigureTopicResponse); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_messaging_proto_msgTypes[9].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*GetTopicConfigurationRequest); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_messaging_proto_msgTypes[10].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*GetTopicConfigurationResponse); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_messaging_proto_msgTypes[11].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*FindBrokerRequest); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_messaging_proto_msgTypes[12].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*FindBrokerResponse); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_messaging_proto_msgTypes[13].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*TopicConfiguration); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_messaging_proto_msgTypes[14].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*SubscriberMessage_InitMessage); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_messaging_proto_msgTypes[15].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*SubscriberMessage_AckMessage); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_messaging_proto_msgTypes[17].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*PublishRequest_InitMessage); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_messaging_proto_msgTypes[18].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*PublishResponse_ConfigMessage); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_messaging_proto_msgTypes[19].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*PublishResponse_RedirectMessage); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ }
+ type x struct{}
+ out := protoimpl.TypeBuilder{
+ File: protoimpl.DescBuilder{
+ GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
+ RawDescriptor: file_messaging_proto_rawDesc,
+ NumEnums: 2,
+ NumMessages: 20,
+ NumExtensions: 0,
+ NumServices: 1,
+ },
+ GoTypes: file_messaging_proto_goTypes,
+ DependencyIndexes: file_messaging_proto_depIdxs,
+ EnumInfos: file_messaging_proto_enumTypes,
+ MessageInfos: file_messaging_proto_msgTypes,
+ }.Build()
+ File_messaging_proto = out.File
+ file_messaging_proto_rawDesc = nil
+ file_messaging_proto_goTypes = nil
+ file_messaging_proto_depIdxs = nil
+}
+
+// Reference imports to suppress errors if they are not otherwise used.
+var _ context.Context
+var _ grpc.ClientConnInterface
+
+// This is a compile-time assertion to ensure that this generated file
+// is compatible with the grpc package it is being compiled against.
+const _ = grpc.SupportPackageIsVersion6
+
+// SeaweedMessagingClient is the client API for SeaweedMessaging service.
+//
+// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream.
+type SeaweedMessagingClient interface {
+ Subscribe(ctx context.Context, opts ...grpc.CallOption) (SeaweedMessaging_SubscribeClient, error)
+ Publish(ctx context.Context, opts ...grpc.CallOption) (SeaweedMessaging_PublishClient, error)
+ DeleteTopic(ctx context.Context, in *DeleteTopicRequest, opts ...grpc.CallOption) (*DeleteTopicResponse, error)
+ ConfigureTopic(ctx context.Context, in *ConfigureTopicRequest, opts ...grpc.CallOption) (*ConfigureTopicResponse, error)
+ GetTopicConfiguration(ctx context.Context, in *GetTopicConfigurationRequest, opts ...grpc.CallOption) (*GetTopicConfigurationResponse, error)
+ FindBroker(ctx context.Context, in *FindBrokerRequest, opts ...grpc.CallOption) (*FindBrokerResponse, error)
+}
+
+type seaweedMessagingClient struct {
+ cc grpc.ClientConnInterface
+}
+
+func NewSeaweedMessagingClient(cc grpc.ClientConnInterface) SeaweedMessagingClient {
+ return &seaweedMessagingClient{cc}
+}
+
+func (c *seaweedMessagingClient) Subscribe(ctx context.Context, opts ...grpc.CallOption) (SeaweedMessaging_SubscribeClient, error) {
+ stream, err := c.cc.NewStream(ctx, &_SeaweedMessaging_serviceDesc.Streams[0], "/messaging_pb.SeaweedMessaging/Subscribe", opts...)
+ if err != nil {
+ return nil, err
+ }
+ x := &seaweedMessagingSubscribeClient{stream}
+ return x, nil
+}
+
+type SeaweedMessaging_SubscribeClient interface {
+ Send(*SubscriberMessage) error
+ Recv() (*BrokerMessage, error)
+ grpc.ClientStream
+}
+
+type seaweedMessagingSubscribeClient struct {
+ grpc.ClientStream
+}
+
+func (x *seaweedMessagingSubscribeClient) Send(m *SubscriberMessage) error {
+ return x.ClientStream.SendMsg(m)
+}
+
+func (x *seaweedMessagingSubscribeClient) Recv() (*BrokerMessage, error) {
+ m := new(BrokerMessage)
+ if err := x.ClientStream.RecvMsg(m); err != nil {
+ return nil, err
+ }
+ return m, nil
+}
+
+func (c *seaweedMessagingClient) Publish(ctx context.Context, opts ...grpc.CallOption) (SeaweedMessaging_PublishClient, error) {
+ stream, err := c.cc.NewStream(ctx, &_SeaweedMessaging_serviceDesc.Streams[1], "/messaging_pb.SeaweedMessaging/Publish", opts...)
+ if err != nil {
+ return nil, err
+ }
+ x := &seaweedMessagingPublishClient{stream}
+ return x, nil
+}
+
+type SeaweedMessaging_PublishClient interface {
+ Send(*PublishRequest) error
+ Recv() (*PublishResponse, error)
+ grpc.ClientStream
+}
+
+type seaweedMessagingPublishClient struct {
+ grpc.ClientStream
+}
+
+func (x *seaweedMessagingPublishClient) Send(m *PublishRequest) error {
+ return x.ClientStream.SendMsg(m)
+}
+
+func (x *seaweedMessagingPublishClient) Recv() (*PublishResponse, error) {
+ m := new(PublishResponse)
+ if err := x.ClientStream.RecvMsg(m); err != nil {
+ return nil, err
+ }
+ return m, nil
+}
+
+func (c *seaweedMessagingClient) DeleteTopic(ctx context.Context, in *DeleteTopicRequest, opts ...grpc.CallOption) (*DeleteTopicResponse, error) {
+ out := new(DeleteTopicResponse)
+ err := c.cc.Invoke(ctx, "/messaging_pb.SeaweedMessaging/DeleteTopic", in, out, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
+func (c *seaweedMessagingClient) ConfigureTopic(ctx context.Context, in *ConfigureTopicRequest, opts ...grpc.CallOption) (*ConfigureTopicResponse, error) {
+ out := new(ConfigureTopicResponse)
+ err := c.cc.Invoke(ctx, "/messaging_pb.SeaweedMessaging/ConfigureTopic", in, out, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
+func (c *seaweedMessagingClient) GetTopicConfiguration(ctx context.Context, in *GetTopicConfigurationRequest, opts ...grpc.CallOption) (*GetTopicConfigurationResponse, error) {
+ out := new(GetTopicConfigurationResponse)
+ err := c.cc.Invoke(ctx, "/messaging_pb.SeaweedMessaging/GetTopicConfiguration", in, out, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
+func (c *seaweedMessagingClient) FindBroker(ctx context.Context, in *FindBrokerRequest, opts ...grpc.CallOption) (*FindBrokerResponse, error) {
+ out := new(FindBrokerResponse)
+ err := c.cc.Invoke(ctx, "/messaging_pb.SeaweedMessaging/FindBroker", in, out, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
+// SeaweedMessagingServer is the server API for SeaweedMessaging service.
+type SeaweedMessagingServer interface {
+ Subscribe(SeaweedMessaging_SubscribeServer) error
+ Publish(SeaweedMessaging_PublishServer) error
+ DeleteTopic(context.Context, *DeleteTopicRequest) (*DeleteTopicResponse, error)
+ ConfigureTopic(context.Context, *ConfigureTopicRequest) (*ConfigureTopicResponse, error)
+ GetTopicConfiguration(context.Context, *GetTopicConfigurationRequest) (*GetTopicConfigurationResponse, error)
+ FindBroker(context.Context, *FindBrokerRequest) (*FindBrokerResponse, error)
+}
+
+// UnimplementedSeaweedMessagingServer can be embedded to have forward compatible implementations.
+type UnimplementedSeaweedMessagingServer struct {
+}
+
+func (*UnimplementedSeaweedMessagingServer) Subscribe(SeaweedMessaging_SubscribeServer) error {
+ return status.Errorf(codes.Unimplemented, "method Subscribe not implemented")
+}
+func (*UnimplementedSeaweedMessagingServer) Publish(SeaweedMessaging_PublishServer) error {
+ return status.Errorf(codes.Unimplemented, "method Publish not implemented")
+}
+func (*UnimplementedSeaweedMessagingServer) DeleteTopic(context.Context, *DeleteTopicRequest) (*DeleteTopicResponse, error) {
+ return nil, status.Errorf(codes.Unimplemented, "method DeleteTopic not implemented")
+}
+func (*UnimplementedSeaweedMessagingServer) ConfigureTopic(context.Context, *ConfigureTopicRequest) (*ConfigureTopicResponse, error) {
+ return nil, status.Errorf(codes.Unimplemented, "method ConfigureTopic not implemented")
+}
+func (*UnimplementedSeaweedMessagingServer) GetTopicConfiguration(context.Context, *GetTopicConfigurationRequest) (*GetTopicConfigurationResponse, error) {
+ return nil, status.Errorf(codes.Unimplemented, "method GetTopicConfiguration not implemented")
+}
+func (*UnimplementedSeaweedMessagingServer) FindBroker(context.Context, *FindBrokerRequest) (*FindBrokerResponse, error) {
+ return nil, status.Errorf(codes.Unimplemented, "method FindBroker not implemented")
+}
+
+func RegisterSeaweedMessagingServer(s *grpc.Server, srv SeaweedMessagingServer) {
+ s.RegisterService(&_SeaweedMessaging_serviceDesc, srv)
+}
+
+func _SeaweedMessaging_Subscribe_Handler(srv interface{}, stream grpc.ServerStream) error {
+ return srv.(SeaweedMessagingServer).Subscribe(&seaweedMessagingSubscribeServer{stream})
+}
+
+type SeaweedMessaging_SubscribeServer interface {
+ Send(*BrokerMessage) error
+ Recv() (*SubscriberMessage, error)
+ grpc.ServerStream
+}
+
+type seaweedMessagingSubscribeServer struct {
+ grpc.ServerStream
+}
+
+func (x *seaweedMessagingSubscribeServer) Send(m *BrokerMessage) error {
+ return x.ServerStream.SendMsg(m)
+}
+
+func (x *seaweedMessagingSubscribeServer) Recv() (*SubscriberMessage, error) {
+ m := new(SubscriberMessage)
+ if err := x.ServerStream.RecvMsg(m); err != nil {
+ return nil, err
+ }
+ return m, nil
+}
+
+func _SeaweedMessaging_Publish_Handler(srv interface{}, stream grpc.ServerStream) error {
+ return srv.(SeaweedMessagingServer).Publish(&seaweedMessagingPublishServer{stream})
+}
+
+type SeaweedMessaging_PublishServer interface {
+ Send(*PublishResponse) error
+ Recv() (*PublishRequest, error)
+ grpc.ServerStream
+}
+
+type seaweedMessagingPublishServer struct {
+ grpc.ServerStream
+}
+
+func (x *seaweedMessagingPublishServer) Send(m *PublishResponse) error {
+ return x.ServerStream.SendMsg(m)
+}
+
+func (x *seaweedMessagingPublishServer) Recv() (*PublishRequest, error) {
+ m := new(PublishRequest)
+ if err := x.ServerStream.RecvMsg(m); err != nil {
+ return nil, err
+ }
+ return m, nil
+}
+
+func _SeaweedMessaging_DeleteTopic_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(DeleteTopicRequest)
+ if err := dec(in); err != nil {
+ return nil, err
+ }
+ if interceptor == nil {
+ return srv.(SeaweedMessagingServer).DeleteTopic(ctx, in)
+ }
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: "/messaging_pb.SeaweedMessaging/DeleteTopic",
+ }
+ handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+ return srv.(SeaweedMessagingServer).DeleteTopic(ctx, req.(*DeleteTopicRequest))
+ }
+ return interceptor(ctx, in, info, handler)
+}
+
+func _SeaweedMessaging_ConfigureTopic_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(ConfigureTopicRequest)
+ if err := dec(in); err != nil {
+ return nil, err
+ }
+ if interceptor == nil {
+ return srv.(SeaweedMessagingServer).ConfigureTopic(ctx, in)
+ }
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: "/messaging_pb.SeaweedMessaging/ConfigureTopic",
+ }
+ handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+ return srv.(SeaweedMessagingServer).ConfigureTopic(ctx, req.(*ConfigureTopicRequest))
+ }
+ return interceptor(ctx, in, info, handler)
+}
+
+func _SeaweedMessaging_GetTopicConfiguration_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(GetTopicConfigurationRequest)
+ if err := dec(in); err != nil {
+ return nil, err
+ }
+ if interceptor == nil {
+ return srv.(SeaweedMessagingServer).GetTopicConfiguration(ctx, in)
+ }
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: "/messaging_pb.SeaweedMessaging/GetTopicConfiguration",
+ }
+ handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+ return srv.(SeaweedMessagingServer).GetTopicConfiguration(ctx, req.(*GetTopicConfigurationRequest))
+ }
+ return interceptor(ctx, in, info, handler)
+}
+
+func _SeaweedMessaging_FindBroker_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(FindBrokerRequest)
+ if err := dec(in); err != nil {
+ return nil, err
+ }
+ if interceptor == nil {
+ return srv.(SeaweedMessagingServer).FindBroker(ctx, in)
+ }
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: "/messaging_pb.SeaweedMessaging/FindBroker",
+ }
+ handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+ return srv.(SeaweedMessagingServer).FindBroker(ctx, req.(*FindBrokerRequest))
+ }
+ return interceptor(ctx, in, info, handler)
+}
+
+var _SeaweedMessaging_serviceDesc = grpc.ServiceDesc{
+ ServiceName: "messaging_pb.SeaweedMessaging",
+ HandlerType: (*SeaweedMessagingServer)(nil),
+ Methods: []grpc.MethodDesc{
+ {
+ MethodName: "DeleteTopic",
+ Handler: _SeaweedMessaging_DeleteTopic_Handler,
+ },
+ {
+ MethodName: "ConfigureTopic",
+ Handler: _SeaweedMessaging_ConfigureTopic_Handler,
+ },
+ {
+ MethodName: "GetTopicConfiguration",
+ Handler: _SeaweedMessaging_GetTopicConfiguration_Handler,
+ },
+ {
+ MethodName: "FindBroker",
+ Handler: _SeaweedMessaging_FindBroker_Handler,
+ },
+ },
+ Streams: []grpc.StreamDesc{
+ {
+ StreamName: "Subscribe",
+ Handler: _SeaweedMessaging_Subscribe_Handler,
+ ServerStreams: true,
+ ClientStreams: true,
+ },
+ {
+ StreamName: "Publish",
+ Handler: _SeaweedMessaging_Publish_Handler,
+ ServerStreams: true,
+ ClientStreams: true,
+ },
+ },
+ Metadata: "messaging.proto",
+}
diff --git a/weed/pb/shared_values.go b/weed/pb/shared_values.go
new file mode 100644
index 000000000..1af19e51a
--- /dev/null
+++ b/weed/pb/shared_values.go
@@ -0,0 +1,5 @@
+package pb
+
+const (
+ AdminShellClient = "adminShell"
+)
diff --git a/weed/pb/volume_info.go b/weed/pb/volume_info.go
index b2edf9c5e..cae9e018f 100644
--- a/weed/pb/volume_info.go
+++ b/weed/pb/volume_info.go
@@ -15,39 +15,49 @@ import (
)
// MaybeLoadVolumeInfo load the file data as *volume_server_pb.VolumeInfo, the returned volumeInfo will not be nil
-func MaybeLoadVolumeInfo(fileName string) (*volume_server_pb.VolumeInfo, bool) {
+func MaybeLoadVolumeInfo(fileName string) (volumeInfo *volume_server_pb.VolumeInfo, hasRemoteFile bool, hasVolumeInfoFile bool, err error) {
- volumeInfo := &volume_server_pb.VolumeInfo{}
+ volumeInfo = &volume_server_pb.VolumeInfo{}
glog.V(1).Infof("maybeLoadVolumeInfo checks %s", fileName)
if exists, canRead, _, _, _ := util.CheckFile(fileName); !exists || !canRead {
if !exists {
- return volumeInfo, false
+ return
}
+ hasVolumeInfoFile = true
if !canRead {
glog.Warningf("can not read %s", fileName)
+ err = fmt.Errorf("can not read %s", fileName)
+ return
}
- return volumeInfo, false
+ return
}
+ hasVolumeInfoFile = true
+
glog.V(1).Infof("maybeLoadVolumeInfo reads %s", fileName)
tierData, readErr := ioutil.ReadFile(fileName)
if readErr != nil {
glog.Warningf("fail to read %s : %v", fileName, readErr)
- return volumeInfo, false
+ err = fmt.Errorf("fail to read %s : %v", fileName, readErr)
+ return
+
}
glog.V(1).Infof("maybeLoadVolumeInfo Unmarshal volume info %v", fileName)
- if err := jsonpb.Unmarshal(bytes.NewReader(tierData), volumeInfo); err != nil {
+ if err = jsonpb.Unmarshal(bytes.NewReader(tierData), volumeInfo); err != nil {
glog.Warningf("unmarshal error: %v", err)
- return volumeInfo, false
+ err = fmt.Errorf("unmarshal error: %v", err)
+ return
}
if len(volumeInfo.GetFiles()) == 0 {
- return volumeInfo, false
+ return
}
- return volumeInfo, true
+ hasRemoteFile = true
+
+ return
}
func SaveVolumeInfo(fileName string, volumeInfo *volume_server_pb.VolumeInfo) error {
diff --git a/weed/pb/volume_server.proto b/weed/pb/volume_server.proto
index 9cf7272ef..f9836c402 100644
--- a/weed/pb/volume_server.proto
+++ b/weed/pb/volume_server.proto
@@ -1,6 +1,7 @@
syntax = "proto3";
package volume_server_pb;
+option go_package = "github.com/chrislusf/seaweedfs/weed/pb/volume_server_pb";
//////////////////////////////////////////////////
@@ -8,6 +9,7 @@ service VolumeServer {
//Experts only: takes multiple fid parameters. This function does not propagate deletes to replicas.
rpc BatchDelete (BatchDeleteRequest) returns (BatchDeleteResponse) {
}
+
rpc VacuumVolumeCheck (VacuumVolumeCheckRequest) returns (VacuumVolumeCheckResponse) {
}
rpc VacuumVolumeCompact (VacuumVolumeCompactRequest) returns (VacuumVolumeCompactResponse) {
@@ -35,6 +37,12 @@ service VolumeServer {
}
rpc VolumeMarkReadonly (VolumeMarkReadonlyRequest) returns (VolumeMarkReadonlyResponse) {
}
+ rpc VolumeMarkWritable (VolumeMarkWritableRequest) returns (VolumeMarkWritableResponse) {
+ }
+ rpc VolumeConfigure (VolumeConfigureRequest) returns (VolumeConfigureResponse) {
+ }
+ rpc VolumeStatus (VolumeStatusRequest) returns (VolumeStatusResponse) {
+ }
// copy the .idx .dat files, and mount this volume
rpc VolumeCopy (VolumeCopyRequest) returns (VolumeCopyResponse) {
@@ -44,6 +52,11 @@ service VolumeServer {
rpc CopyFile (CopyFileRequest) returns (stream CopyFileResponse) {
}
+ rpc ReadNeedleBlob (ReadNeedleBlobRequest) returns (ReadNeedleBlobResponse) {
+ }
+ rpc WriteNeedleBlob (WriteNeedleBlobRequest) returns (WriteNeedleBlobResponse) {
+ }
+
rpc VolumeTailSender (VolumeTailSenderRequest) returns (stream VolumeTailSenderResponse) {
}
rpc VolumeTailReceiver (VolumeTailReceiverRequest) returns (VolumeTailReceiverResponse) {
@@ -75,16 +88,24 @@ service VolumeServer {
rpc VolumeTierMoveDatFromRemote (VolumeTierMoveDatFromRemoteRequest) returns (stream VolumeTierMoveDatFromRemoteResponse) {
}
- // query
+ rpc VolumeServerStatus (VolumeServerStatusRequest) returns (VolumeServerStatusResponse) {
+ }
+ rpc VolumeServerLeave (VolumeServerLeaveRequest) returns (VolumeServerLeaveResponse) {
+ }
+
+ // query
rpc Query (QueryRequest) returns (stream QueriedStripe) {
}
+ rpc VolumeNeedleStatus (VolumeNeedleStatusRequest) returns (VolumeNeedleStatusResponse) {
+ }
}
//////////////////////////////////////////////////
message BatchDeleteRequest {
repeated string file_ids = 1;
+ bool skip_cookie_check = 2;
}
message BatchDeleteResponse {
@@ -119,6 +140,7 @@ message VacuumVolumeCommitRequest {
uint32 volume_id = 1;
}
message VacuumVolumeCommitResponse {
+ bool is_read_only = 1;
}
message VacuumVolumeCleanupRequest {
@@ -140,6 +162,7 @@ message AllocateVolumeRequest {
string replication = 4;
string ttl = 5;
uint32 memory_map_max_size_mb = 6;
+ string disk_type = 7;
}
message AllocateVolumeResponse {
}
@@ -189,12 +212,34 @@ message VolumeMarkReadonlyRequest {
message VolumeMarkReadonlyResponse {
}
+message VolumeMarkWritableRequest {
+ uint32 volume_id = 1;
+}
+message VolumeMarkWritableResponse {
+}
+
+message VolumeConfigureRequest {
+ uint32 volume_id = 1;
+ string replication = 2;
+}
+message VolumeConfigureResponse {
+ string error = 1;
+}
+
+message VolumeStatusRequest {
+ uint32 volume_id = 1;
+}
+message VolumeStatusResponse {
+ bool is_read_only = 1;
+}
+
message VolumeCopyRequest {
uint32 volume_id = 1;
string collection = 2;
string replication = 3;
string ttl = 4;
string source_data_node = 5;
+ string disk_type = 6;
}
message VolumeCopyResponse {
uint64 last_append_at_ns = 1;
@@ -213,6 +258,25 @@ message CopyFileResponse {
bytes file_content = 1;
}
+message ReadNeedleBlobRequest {
+ uint32 volume_id = 1;
+ uint64 needle_id = 2;
+ int64 offset = 3; // actual offset
+ int32 size = 4;
+}
+message ReadNeedleBlobResponse {
+ bytes needle_blob = 1;
+}
+
+message WriteNeedleBlobRequest {
+ uint32 volume_id = 1;
+ uint64 needle_id = 2;
+ int32 size = 3;
+ bytes needle_blob = 4;
+}
+message WriteNeedleBlobResponse {
+}
+
message VolumeTailSenderRequest {
uint32 volume_id = 1;
uint64 since_ns = 2;
@@ -323,6 +387,7 @@ message ReadVolumeFileStatusResponse {
uint64 file_count = 6;
uint32 compaction_revision = 7;
string collection = 8;
+ string disk_type = 9;
}
message DiskStatus {
@@ -330,6 +395,9 @@ message DiskStatus {
uint64 all = 2;
uint64 used = 3;
uint64 free = 4;
+ float percent_free = 5;
+ float percent_used = 6;
+ string disk_type = 7;
}
message MemStatus {
@@ -355,6 +423,7 @@ message RemoteFile {
message VolumeInfo {
repeated RemoteFile files = 1;
uint32 version = 2;
+ string replication = 3;
}
message VolumeTierMoveDatToRemoteRequest {
@@ -378,6 +447,19 @@ message VolumeTierMoveDatFromRemoteResponse {
float processedPercentage = 2;
}
+message VolumeServerStatusRequest {
+
+}
+message VolumeServerStatusResponse {
+ repeated DiskStatus disk_statuses = 1;
+ MemStatus memory_status = 2;
+}
+
+message VolumeServerLeaveRequest {
+}
+message VolumeServerLeaveResponse {
+}
+
// select on volume servers
message QueryRequest {
repeated string selections = 1;
@@ -435,3 +517,16 @@ message QueryRequest {
message QueriedStripe {
bytes records = 1;
}
+
+message VolumeNeedleStatusRequest {
+ uint32 volume_id = 1;
+ uint64 needle_id = 2;
+}
+message VolumeNeedleStatusResponse {
+ uint64 needle_id = 1;
+ uint32 cookie = 2;
+ uint32 size = 3;
+ uint64 last_modified = 4;
+ uint32 crc = 5;
+ string ttl = 6;
+}
diff --git a/weed/pb/volume_server_pb/volume_server.pb.go b/weed/pb/volume_server_pb/volume_server.pb.go
index 1c2e10d8e..c642142ba 100644
--- a/weed/pb/volume_server_pb/volume_server.pb.go
+++ b/weed/pb/volume_server_pb/volume_server.pb.go
@@ -1,2114 +1,7262 @@
-// Code generated by protoc-gen-go.
+// Code generated by protoc-gen-go. DO NOT EDIT.
+// versions:
+// protoc-gen-go v1.25.0
+// protoc v3.12.3
// source: volume_server.proto
-// DO NOT EDIT!
-
-/*
-Package volume_server_pb is a generated protocol buffer package.
-
-It is generated from these files:
- volume_server.proto
-
-It has these top-level messages:
- BatchDeleteRequest
- BatchDeleteResponse
- DeleteResult
- Empty
- VacuumVolumeCheckRequest
- VacuumVolumeCheckResponse
- VacuumVolumeCompactRequest
- VacuumVolumeCompactResponse
- VacuumVolumeCommitRequest
- VacuumVolumeCommitResponse
- VacuumVolumeCleanupRequest
- VacuumVolumeCleanupResponse
- DeleteCollectionRequest
- DeleteCollectionResponse
- AllocateVolumeRequest
- AllocateVolumeResponse
- VolumeSyncStatusRequest
- VolumeSyncStatusResponse
- VolumeIncrementalCopyRequest
- VolumeIncrementalCopyResponse
- VolumeMountRequest
- VolumeMountResponse
- VolumeUnmountRequest
- VolumeUnmountResponse
- VolumeDeleteRequest
- VolumeDeleteResponse
- VolumeMarkReadonlyRequest
- VolumeMarkReadonlyResponse
- VolumeCopyRequest
- VolumeCopyResponse
- CopyFileRequest
- CopyFileResponse
- VolumeTailSenderRequest
- VolumeTailSenderResponse
- VolumeTailReceiverRequest
- VolumeTailReceiverResponse
- VolumeEcShardsGenerateRequest
- VolumeEcShardsGenerateResponse
- VolumeEcShardsRebuildRequest
- VolumeEcShardsRebuildResponse
- VolumeEcShardsCopyRequest
- VolumeEcShardsCopyResponse
- VolumeEcShardsDeleteRequest
- VolumeEcShardsDeleteResponse
- VolumeEcShardsMountRequest
- VolumeEcShardsMountResponse
- VolumeEcShardsUnmountRequest
- VolumeEcShardsUnmountResponse
- VolumeEcShardReadRequest
- VolumeEcShardReadResponse
- VolumeEcBlobDeleteRequest
- VolumeEcBlobDeleteResponse
- VolumeEcShardsToVolumeRequest
- VolumeEcShardsToVolumeResponse
- ReadVolumeFileStatusRequest
- ReadVolumeFileStatusResponse
- DiskStatus
- MemStatus
- RemoteFile
- VolumeInfo
- VolumeTierMoveDatToRemoteRequest
- VolumeTierMoveDatToRemoteResponse
- VolumeTierMoveDatFromRemoteRequest
- VolumeTierMoveDatFromRemoteResponse
- QueryRequest
- QueriedStripe
-*/
-package volume_server_pb
-import proto "github.com/golang/protobuf/proto"
-import fmt "fmt"
-import math "math"
+package volume_server_pb
import (
- context "golang.org/x/net/context"
+ context "context"
+ proto "github.com/golang/protobuf/proto"
grpc "google.golang.org/grpc"
+ codes "google.golang.org/grpc/codes"
+ status "google.golang.org/grpc/status"
+ protoreflect "google.golang.org/protobuf/reflect/protoreflect"
+ protoimpl "google.golang.org/protobuf/runtime/protoimpl"
+ reflect "reflect"
+ sync "sync"
)
-// Reference imports to suppress errors if they are not otherwise used.
-var _ = proto.Marshal
-var _ = fmt.Errorf
-var _ = math.Inf
+const (
+ // Verify that this generated code is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
+ // Verify that runtime/protoimpl is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
+)
-// This is a compile-time assertion to ensure that this generated file
-// is compatible with the proto package it is being compiled against.
-// A compilation error at this line likely means your copy of the
-// proto package needs to be updated.
-const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package
+// This is a compile-time assertion that a sufficiently up-to-date version
+// of the legacy proto package is being used.
+const _ = proto.ProtoPackageIsVersion4
type BatchDeleteRequest struct {
- FileIds []string `protobuf:"bytes,1,rep,name=file_ids,json=fileIds" json:"file_ids,omitempty"`
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ FileIds []string `protobuf:"bytes,1,rep,name=file_ids,json=fileIds,proto3" json:"file_ids,omitempty"`
+ SkipCookieCheck bool `protobuf:"varint,2,opt,name=skip_cookie_check,json=skipCookieCheck,proto3" json:"skip_cookie_check,omitempty"`
+}
+
+func (x *BatchDeleteRequest) Reset() {
+ *x = BatchDeleteRequest{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_volume_server_proto_msgTypes[0]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *BatchDeleteRequest) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*BatchDeleteRequest) ProtoMessage() {}
+
+func (x *BatchDeleteRequest) ProtoReflect() protoreflect.Message {
+ mi := &file_volume_server_proto_msgTypes[0]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
}
-func (m *BatchDeleteRequest) Reset() { *m = BatchDeleteRequest{} }
-func (m *BatchDeleteRequest) String() string { return proto.CompactTextString(m) }
-func (*BatchDeleteRequest) ProtoMessage() {}
-func (*BatchDeleteRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0} }
+// Deprecated: Use BatchDeleteRequest.ProtoReflect.Descriptor instead.
+func (*BatchDeleteRequest) Descriptor() ([]byte, []int) {
+ return file_volume_server_proto_rawDescGZIP(), []int{0}
+}
-func (m *BatchDeleteRequest) GetFileIds() []string {
- if m != nil {
- return m.FileIds
+func (x *BatchDeleteRequest) GetFileIds() []string {
+ if x != nil {
+ return x.FileIds
}
return nil
}
+func (x *BatchDeleteRequest) GetSkipCookieCheck() bool {
+ if x != nil {
+ return x.SkipCookieCheck
+ }
+ return false
+}
+
type BatchDeleteResponse struct {
- Results []*DeleteResult `protobuf:"bytes,1,rep,name=results" json:"results,omitempty"`
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ Results []*DeleteResult `protobuf:"bytes,1,rep,name=results,proto3" json:"results,omitempty"`
+}
+
+func (x *BatchDeleteResponse) Reset() {
+ *x = BatchDeleteResponse{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_volume_server_proto_msgTypes[1]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
}
-func (m *BatchDeleteResponse) Reset() { *m = BatchDeleteResponse{} }
-func (m *BatchDeleteResponse) String() string { return proto.CompactTextString(m) }
-func (*BatchDeleteResponse) ProtoMessage() {}
-func (*BatchDeleteResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{1} }
+func (x *BatchDeleteResponse) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*BatchDeleteResponse) ProtoMessage() {}
+
+func (x *BatchDeleteResponse) ProtoReflect() protoreflect.Message {
+ mi := &file_volume_server_proto_msgTypes[1]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use BatchDeleteResponse.ProtoReflect.Descriptor instead.
+func (*BatchDeleteResponse) Descriptor() ([]byte, []int) {
+ return file_volume_server_proto_rawDescGZIP(), []int{1}
+}
-func (m *BatchDeleteResponse) GetResults() []*DeleteResult {
- if m != nil {
- return m.Results
+func (x *BatchDeleteResponse) GetResults() []*DeleteResult {
+ if x != nil {
+ return x.Results
}
return nil
}
type DeleteResult struct {
- FileId string `protobuf:"bytes,1,opt,name=file_id,json=fileId" json:"file_id,omitempty"`
- Status int32 `protobuf:"varint,2,opt,name=status" json:"status,omitempty"`
- Error string `protobuf:"bytes,3,opt,name=error" json:"error,omitempty"`
- Size uint32 `protobuf:"varint,4,opt,name=size" json:"size,omitempty"`
- Version uint32 `protobuf:"varint,5,opt,name=version" json:"version,omitempty"`
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ FileId string `protobuf:"bytes,1,opt,name=file_id,json=fileId,proto3" json:"file_id,omitempty"`
+ Status int32 `protobuf:"varint,2,opt,name=status,proto3" json:"status,omitempty"`
+ Error string `protobuf:"bytes,3,opt,name=error,proto3" json:"error,omitempty"`
+ Size uint32 `protobuf:"varint,4,opt,name=size,proto3" json:"size,omitempty"`
+ Version uint32 `protobuf:"varint,5,opt,name=version,proto3" json:"version,omitempty"`
}
-func (m *DeleteResult) Reset() { *m = DeleteResult{} }
-func (m *DeleteResult) String() string { return proto.CompactTextString(m) }
-func (*DeleteResult) ProtoMessage() {}
-func (*DeleteResult) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{2} }
+func (x *DeleteResult) Reset() {
+ *x = DeleteResult{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_volume_server_proto_msgTypes[2]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *DeleteResult) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*DeleteResult) ProtoMessage() {}
+
+func (x *DeleteResult) ProtoReflect() protoreflect.Message {
+ mi := &file_volume_server_proto_msgTypes[2]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
-func (m *DeleteResult) GetFileId() string {
- if m != nil {
- return m.FileId
+// Deprecated: Use DeleteResult.ProtoReflect.Descriptor instead.
+func (*DeleteResult) Descriptor() ([]byte, []int) {
+ return file_volume_server_proto_rawDescGZIP(), []int{2}
+}
+
+func (x *DeleteResult) GetFileId() string {
+ if x != nil {
+ return x.FileId
}
return ""
}
-func (m *DeleteResult) GetStatus() int32 {
- if m != nil {
- return m.Status
+func (x *DeleteResult) GetStatus() int32 {
+ if x != nil {
+ return x.Status
}
return 0
}
-func (m *DeleteResult) GetError() string {
- if m != nil {
- return m.Error
+func (x *DeleteResult) GetError() string {
+ if x != nil {
+ return x.Error
}
return ""
}
-func (m *DeleteResult) GetSize() uint32 {
- if m != nil {
- return m.Size
+func (x *DeleteResult) GetSize() uint32 {
+ if x != nil {
+ return x.Size
}
return 0
}
-func (m *DeleteResult) GetVersion() uint32 {
- if m != nil {
- return m.Version
+func (x *DeleteResult) GetVersion() uint32 {
+ if x != nil {
+ return x.Version
}
return 0
}
type Empty struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+}
+
+func (x *Empty) Reset() {
+ *x = Empty{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_volume_server_proto_msgTypes[3]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *Empty) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*Empty) ProtoMessage() {}
+
+func (x *Empty) ProtoReflect() protoreflect.Message {
+ mi := &file_volume_server_proto_msgTypes[3]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
}
-func (m *Empty) Reset() { *m = Empty{} }
-func (m *Empty) String() string { return proto.CompactTextString(m) }
-func (*Empty) ProtoMessage() {}
-func (*Empty) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{3} }
+// Deprecated: Use Empty.ProtoReflect.Descriptor instead.
+func (*Empty) Descriptor() ([]byte, []int) {
+ return file_volume_server_proto_rawDescGZIP(), []int{3}
+}
type VacuumVolumeCheckRequest struct {
- VolumeId uint32 `protobuf:"varint,1,opt,name=volume_id,json=volumeId" json:"volume_id,omitempty"`
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ VolumeId uint32 `protobuf:"varint,1,opt,name=volume_id,json=volumeId,proto3" json:"volume_id,omitempty"`
+}
+
+func (x *VacuumVolumeCheckRequest) Reset() {
+ *x = VacuumVolumeCheckRequest{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_volume_server_proto_msgTypes[4]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
}
-func (m *VacuumVolumeCheckRequest) Reset() { *m = VacuumVolumeCheckRequest{} }
-func (m *VacuumVolumeCheckRequest) String() string { return proto.CompactTextString(m) }
-func (*VacuumVolumeCheckRequest) ProtoMessage() {}
-func (*VacuumVolumeCheckRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{4} }
+func (x *VacuumVolumeCheckRequest) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*VacuumVolumeCheckRequest) ProtoMessage() {}
+
+func (x *VacuumVolumeCheckRequest) ProtoReflect() protoreflect.Message {
+ mi := &file_volume_server_proto_msgTypes[4]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use VacuumVolumeCheckRequest.ProtoReflect.Descriptor instead.
+func (*VacuumVolumeCheckRequest) Descriptor() ([]byte, []int) {
+ return file_volume_server_proto_rawDescGZIP(), []int{4}
+}
-func (m *VacuumVolumeCheckRequest) GetVolumeId() uint32 {
- if m != nil {
- return m.VolumeId
+func (x *VacuumVolumeCheckRequest) GetVolumeId() uint32 {
+ if x != nil {
+ return x.VolumeId
}
return 0
}
type VacuumVolumeCheckResponse struct {
- GarbageRatio float64 `protobuf:"fixed64,1,opt,name=garbage_ratio,json=garbageRatio" json:"garbage_ratio,omitempty"`
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ GarbageRatio float64 `protobuf:"fixed64,1,opt,name=garbage_ratio,json=garbageRatio,proto3" json:"garbage_ratio,omitempty"`
}
-func (m *VacuumVolumeCheckResponse) Reset() { *m = VacuumVolumeCheckResponse{} }
-func (m *VacuumVolumeCheckResponse) String() string { return proto.CompactTextString(m) }
-func (*VacuumVolumeCheckResponse) ProtoMessage() {}
-func (*VacuumVolumeCheckResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{5} }
+func (x *VacuumVolumeCheckResponse) Reset() {
+ *x = VacuumVolumeCheckResponse{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_volume_server_proto_msgTypes[5]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *VacuumVolumeCheckResponse) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*VacuumVolumeCheckResponse) ProtoMessage() {}
+
+func (x *VacuumVolumeCheckResponse) ProtoReflect() protoreflect.Message {
+ mi := &file_volume_server_proto_msgTypes[5]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
-func (m *VacuumVolumeCheckResponse) GetGarbageRatio() float64 {
- if m != nil {
- return m.GarbageRatio
+// Deprecated: Use VacuumVolumeCheckResponse.ProtoReflect.Descriptor instead.
+func (*VacuumVolumeCheckResponse) Descriptor() ([]byte, []int) {
+ return file_volume_server_proto_rawDescGZIP(), []int{5}
+}
+
+func (x *VacuumVolumeCheckResponse) GetGarbageRatio() float64 {
+ if x != nil {
+ return x.GarbageRatio
}
return 0
}
type VacuumVolumeCompactRequest struct {
- VolumeId uint32 `protobuf:"varint,1,opt,name=volume_id,json=volumeId" json:"volume_id,omitempty"`
- Preallocate int64 `protobuf:"varint,2,opt,name=preallocate" json:"preallocate,omitempty"`
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ VolumeId uint32 `protobuf:"varint,1,opt,name=volume_id,json=volumeId,proto3" json:"volume_id,omitempty"`
+ Preallocate int64 `protobuf:"varint,2,opt,name=preallocate,proto3" json:"preallocate,omitempty"`
+}
+
+func (x *VacuumVolumeCompactRequest) Reset() {
+ *x = VacuumVolumeCompactRequest{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_volume_server_proto_msgTypes[6]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *VacuumVolumeCompactRequest) String() string {
+ return protoimpl.X.MessageStringOf(x)
}
-func (m *VacuumVolumeCompactRequest) Reset() { *m = VacuumVolumeCompactRequest{} }
-func (m *VacuumVolumeCompactRequest) String() string { return proto.CompactTextString(m) }
-func (*VacuumVolumeCompactRequest) ProtoMessage() {}
-func (*VacuumVolumeCompactRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{6} }
+func (*VacuumVolumeCompactRequest) ProtoMessage() {}
-func (m *VacuumVolumeCompactRequest) GetVolumeId() uint32 {
- if m != nil {
- return m.VolumeId
+func (x *VacuumVolumeCompactRequest) ProtoReflect() protoreflect.Message {
+ mi := &file_volume_server_proto_msgTypes[6]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use VacuumVolumeCompactRequest.ProtoReflect.Descriptor instead.
+func (*VacuumVolumeCompactRequest) Descriptor() ([]byte, []int) {
+ return file_volume_server_proto_rawDescGZIP(), []int{6}
+}
+
+func (x *VacuumVolumeCompactRequest) GetVolumeId() uint32 {
+ if x != nil {
+ return x.VolumeId
}
return 0
}
-func (m *VacuumVolumeCompactRequest) GetPreallocate() int64 {
- if m != nil {
- return m.Preallocate
+func (x *VacuumVolumeCompactRequest) GetPreallocate() int64 {
+ if x != nil {
+ return x.Preallocate
}
return 0
}
type VacuumVolumeCompactResponse struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
}
-func (m *VacuumVolumeCompactResponse) Reset() { *m = VacuumVolumeCompactResponse{} }
-func (m *VacuumVolumeCompactResponse) String() string { return proto.CompactTextString(m) }
-func (*VacuumVolumeCompactResponse) ProtoMessage() {}
-func (*VacuumVolumeCompactResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{7} }
+func (x *VacuumVolumeCompactResponse) Reset() {
+ *x = VacuumVolumeCompactResponse{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_volume_server_proto_msgTypes[7]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
-type VacuumVolumeCommitRequest struct {
- VolumeId uint32 `protobuf:"varint,1,opt,name=volume_id,json=volumeId" json:"volume_id,omitempty"`
+func (x *VacuumVolumeCompactResponse) String() string {
+ return protoimpl.X.MessageStringOf(x)
}
-func (m *VacuumVolumeCommitRequest) Reset() { *m = VacuumVolumeCommitRequest{} }
-func (m *VacuumVolumeCommitRequest) String() string { return proto.CompactTextString(m) }
-func (*VacuumVolumeCommitRequest) ProtoMessage() {}
-func (*VacuumVolumeCommitRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{8} }
+func (*VacuumVolumeCompactResponse) ProtoMessage() {}
-func (m *VacuumVolumeCommitRequest) GetVolumeId() uint32 {
- if m != nil {
- return m.VolumeId
+func (x *VacuumVolumeCompactResponse) ProtoReflect() protoreflect.Message {
+ mi := &file_volume_server_proto_msgTypes[7]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
}
- return 0
+ return mi.MessageOf(x)
}
-type VacuumVolumeCommitResponse struct {
+// Deprecated: Use VacuumVolumeCompactResponse.ProtoReflect.Descriptor instead.
+func (*VacuumVolumeCompactResponse) Descriptor() ([]byte, []int) {
+ return file_volume_server_proto_rawDescGZIP(), []int{7}
}
-func (m *VacuumVolumeCommitResponse) Reset() { *m = VacuumVolumeCommitResponse{} }
-func (m *VacuumVolumeCommitResponse) String() string { return proto.CompactTextString(m) }
-func (*VacuumVolumeCommitResponse) ProtoMessage() {}
-func (*VacuumVolumeCommitResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{9} }
+type VacuumVolumeCommitRequest struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
-type VacuumVolumeCleanupRequest struct {
- VolumeId uint32 `protobuf:"varint,1,opt,name=volume_id,json=volumeId" json:"volume_id,omitempty"`
+ VolumeId uint32 `protobuf:"varint,1,opt,name=volume_id,json=volumeId,proto3" json:"volume_id,omitempty"`
}
-func (m *VacuumVolumeCleanupRequest) Reset() { *m = VacuumVolumeCleanupRequest{} }
-func (m *VacuumVolumeCleanupRequest) String() string { return proto.CompactTextString(m) }
-func (*VacuumVolumeCleanupRequest) ProtoMessage() {}
-func (*VacuumVolumeCleanupRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{10} }
-
-func (m *VacuumVolumeCleanupRequest) GetVolumeId() uint32 {
- if m != nil {
- return m.VolumeId
+func (x *VacuumVolumeCommitRequest) Reset() {
+ *x = VacuumVolumeCommitRequest{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_volume_server_proto_msgTypes[8]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
- return 0
}
-type VacuumVolumeCleanupResponse struct {
+func (x *VacuumVolumeCommitRequest) String() string {
+ return protoimpl.X.MessageStringOf(x)
}
-func (m *VacuumVolumeCleanupResponse) Reset() { *m = VacuumVolumeCleanupResponse{} }
-func (m *VacuumVolumeCleanupResponse) String() string { return proto.CompactTextString(m) }
-func (*VacuumVolumeCleanupResponse) ProtoMessage() {}
-func (*VacuumVolumeCleanupResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{11} }
+func (*VacuumVolumeCommitRequest) ProtoMessage() {}
-type DeleteCollectionRequest struct {
- Collection string `protobuf:"bytes,1,opt,name=collection" json:"collection,omitempty"`
+func (x *VacuumVolumeCommitRequest) ProtoReflect() protoreflect.Message {
+ mi := &file_volume_server_proto_msgTypes[8]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
}
-func (m *DeleteCollectionRequest) Reset() { *m = DeleteCollectionRequest{} }
-func (m *DeleteCollectionRequest) String() string { return proto.CompactTextString(m) }
-func (*DeleteCollectionRequest) ProtoMessage() {}
-func (*DeleteCollectionRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{12} }
+// Deprecated: Use VacuumVolumeCommitRequest.ProtoReflect.Descriptor instead.
+func (*VacuumVolumeCommitRequest) Descriptor() ([]byte, []int) {
+ return file_volume_server_proto_rawDescGZIP(), []int{8}
+}
-func (m *DeleteCollectionRequest) GetCollection() string {
- if m != nil {
- return m.Collection
+func (x *VacuumVolumeCommitRequest) GetVolumeId() uint32 {
+ if x != nil {
+ return x.VolumeId
}
- return ""
+ return 0
}
-type DeleteCollectionResponse struct {
+type VacuumVolumeCommitResponse struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ IsReadOnly bool `protobuf:"varint,1,opt,name=is_read_only,json=isReadOnly,proto3" json:"is_read_only,omitempty"`
}
-func (m *DeleteCollectionResponse) Reset() { *m = DeleteCollectionResponse{} }
-func (m *DeleteCollectionResponse) String() string { return proto.CompactTextString(m) }
-func (*DeleteCollectionResponse) ProtoMessage() {}
-func (*DeleteCollectionResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{13} }
+func (x *VacuumVolumeCommitResponse) Reset() {
+ *x = VacuumVolumeCommitResponse{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_volume_server_proto_msgTypes[9]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
-type AllocateVolumeRequest struct {
- VolumeId uint32 `protobuf:"varint,1,opt,name=volume_id,json=volumeId" json:"volume_id,omitempty"`
- Collection string `protobuf:"bytes,2,opt,name=collection" json:"collection,omitempty"`
- Preallocate int64 `protobuf:"varint,3,opt,name=preallocate" json:"preallocate,omitempty"`
- Replication string `protobuf:"bytes,4,opt,name=replication" json:"replication,omitempty"`
- Ttl string `protobuf:"bytes,5,opt,name=ttl" json:"ttl,omitempty"`
- MemoryMapMaxSizeMb uint32 `protobuf:"varint,6,opt,name=memory_map_max_size_mb,json=memoryMapMaxSizeMb" json:"memory_map_max_size_mb,omitempty"`
+func (x *VacuumVolumeCommitResponse) String() string {
+ return protoimpl.X.MessageStringOf(x)
}
-func (m *AllocateVolumeRequest) Reset() { *m = AllocateVolumeRequest{} }
-func (m *AllocateVolumeRequest) String() string { return proto.CompactTextString(m) }
-func (*AllocateVolumeRequest) ProtoMessage() {}
-func (*AllocateVolumeRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{14} }
+func (*VacuumVolumeCommitResponse) ProtoMessage() {}
-func (m *AllocateVolumeRequest) GetVolumeId() uint32 {
- if m != nil {
- return m.VolumeId
+func (x *VacuumVolumeCommitResponse) ProtoReflect() protoreflect.Message {
+ mi := &file_volume_server_proto_msgTypes[9]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
}
- return 0
+ return mi.MessageOf(x)
}
-func (m *AllocateVolumeRequest) GetCollection() string {
- if m != nil {
- return m.Collection
- }
- return ""
+// Deprecated: Use VacuumVolumeCommitResponse.ProtoReflect.Descriptor instead.
+func (*VacuumVolumeCommitResponse) Descriptor() ([]byte, []int) {
+ return file_volume_server_proto_rawDescGZIP(), []int{9}
}
-func (m *AllocateVolumeRequest) GetPreallocate() int64 {
- if m != nil {
- return m.Preallocate
+func (x *VacuumVolumeCommitResponse) GetIsReadOnly() bool {
+ if x != nil {
+ return x.IsReadOnly
}
- return 0
+ return false
+}
+
+type VacuumVolumeCleanupRequest struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ VolumeId uint32 `protobuf:"varint,1,opt,name=volume_id,json=volumeId,proto3" json:"volume_id,omitempty"`
}
-func (m *AllocateVolumeRequest) GetReplication() string {
- if m != nil {
- return m.Replication
+func (x *VacuumVolumeCleanupRequest) Reset() {
+ *x = VacuumVolumeCleanupRequest{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_volume_server_proto_msgTypes[10]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
- return ""
}
-func (m *AllocateVolumeRequest) GetTtl() string {
- if m != nil {
- return m.Ttl
+func (x *VacuumVolumeCleanupRequest) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*VacuumVolumeCleanupRequest) ProtoMessage() {}
+
+func (x *VacuumVolumeCleanupRequest) ProtoReflect() protoreflect.Message {
+ mi := &file_volume_server_proto_msgTypes[10]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
}
- return ""
+ return mi.MessageOf(x)
}
-func (m *AllocateVolumeRequest) GetMemoryMapMaxSizeMb() uint32 {
- if m != nil {
- return m.MemoryMapMaxSizeMb
+// Deprecated: Use VacuumVolumeCleanupRequest.ProtoReflect.Descriptor instead.
+func (*VacuumVolumeCleanupRequest) Descriptor() ([]byte, []int) {
+ return file_volume_server_proto_rawDescGZIP(), []int{10}
+}
+
+func (x *VacuumVolumeCleanupRequest) GetVolumeId() uint32 {
+ if x != nil {
+ return x.VolumeId
}
return 0
}
-type AllocateVolumeResponse struct {
+type VacuumVolumeCleanupResponse struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
}
-func (m *AllocateVolumeResponse) Reset() { *m = AllocateVolumeResponse{} }
-func (m *AllocateVolumeResponse) String() string { return proto.CompactTextString(m) }
-func (*AllocateVolumeResponse) ProtoMessage() {}
-func (*AllocateVolumeResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{15} }
+func (x *VacuumVolumeCleanupResponse) Reset() {
+ *x = VacuumVolumeCleanupResponse{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_volume_server_proto_msgTypes[11]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
-type VolumeSyncStatusRequest struct {
- VolumeId uint32 `protobuf:"varint,1,opt,name=volume_id,json=volumeId" json:"volume_id,omitempty"`
+func (x *VacuumVolumeCleanupResponse) String() string {
+ return protoimpl.X.MessageStringOf(x)
}
-func (m *VolumeSyncStatusRequest) Reset() { *m = VolumeSyncStatusRequest{} }
-func (m *VolumeSyncStatusRequest) String() string { return proto.CompactTextString(m) }
-func (*VolumeSyncStatusRequest) ProtoMessage() {}
-func (*VolumeSyncStatusRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{16} }
+func (*VacuumVolumeCleanupResponse) ProtoMessage() {}
-func (m *VolumeSyncStatusRequest) GetVolumeId() uint32 {
- if m != nil {
- return m.VolumeId
+func (x *VacuumVolumeCleanupResponse) ProtoReflect() protoreflect.Message {
+ mi := &file_volume_server_proto_msgTypes[11]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
}
- return 0
+ return mi.MessageOf(x)
}
-type VolumeSyncStatusResponse struct {
- VolumeId uint32 `protobuf:"varint,1,opt,name=volume_id,json=volumeId" json:"volume_id,omitempty"`
- Collection string `protobuf:"bytes,2,opt,name=collection" json:"collection,omitempty"`
- Replication string `protobuf:"bytes,4,opt,name=replication" json:"replication,omitempty"`
- Ttl string `protobuf:"bytes,5,opt,name=ttl" json:"ttl,omitempty"`
- TailOffset uint64 `protobuf:"varint,6,opt,name=tail_offset,json=tailOffset" json:"tail_offset,omitempty"`
- CompactRevision uint32 `protobuf:"varint,7,opt,name=compact_revision,json=compactRevision" json:"compact_revision,omitempty"`
- IdxFileSize uint64 `protobuf:"varint,8,opt,name=idx_file_size,json=idxFileSize" json:"idx_file_size,omitempty"`
+// Deprecated: Use VacuumVolumeCleanupResponse.ProtoReflect.Descriptor instead.
+func (*VacuumVolumeCleanupResponse) Descriptor() ([]byte, []int) {
+ return file_volume_server_proto_rawDescGZIP(), []int{11}
}
-func (m *VolumeSyncStatusResponse) Reset() { *m = VolumeSyncStatusResponse{} }
-func (m *VolumeSyncStatusResponse) String() string { return proto.CompactTextString(m) }
-func (*VolumeSyncStatusResponse) ProtoMessage() {}
-func (*VolumeSyncStatusResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{17} }
+type DeleteCollectionRequest struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
-func (m *VolumeSyncStatusResponse) GetVolumeId() uint32 {
- if m != nil {
- return m.VolumeId
- }
- return 0
+ Collection string `protobuf:"bytes,1,opt,name=collection,proto3" json:"collection,omitempty"`
}
-func (m *VolumeSyncStatusResponse) GetCollection() string {
- if m != nil {
- return m.Collection
+func (x *DeleteCollectionRequest) Reset() {
+ *x = DeleteCollectionRequest{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_volume_server_proto_msgTypes[12]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
- return ""
}
-func (m *VolumeSyncStatusResponse) GetReplication() string {
- if m != nil {
- return m.Replication
+func (x *DeleteCollectionRequest) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*DeleteCollectionRequest) ProtoMessage() {}
+
+func (x *DeleteCollectionRequest) ProtoReflect() protoreflect.Message {
+ mi := &file_volume_server_proto_msgTypes[12]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
}
- return ""
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use DeleteCollectionRequest.ProtoReflect.Descriptor instead.
+func (*DeleteCollectionRequest) Descriptor() ([]byte, []int) {
+ return file_volume_server_proto_rawDescGZIP(), []int{12}
}
-func (m *VolumeSyncStatusResponse) GetTtl() string {
- if m != nil {
- return m.Ttl
+func (x *DeleteCollectionRequest) GetCollection() string {
+ if x != nil {
+ return x.Collection
}
return ""
}
-func (m *VolumeSyncStatusResponse) GetTailOffset() uint64 {
- if m != nil {
- return m.TailOffset
+type DeleteCollectionResponse struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+}
+
+func (x *DeleteCollectionResponse) Reset() {
+ *x = DeleteCollectionResponse{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_volume_server_proto_msgTypes[13]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
- return 0
}
-func (m *VolumeSyncStatusResponse) GetCompactRevision() uint32 {
- if m != nil {
- return m.CompactRevision
+func (x *DeleteCollectionResponse) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*DeleteCollectionResponse) ProtoMessage() {}
+
+func (x *DeleteCollectionResponse) ProtoReflect() protoreflect.Message {
+ mi := &file_volume_server_proto_msgTypes[13]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
}
- return 0
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use DeleteCollectionResponse.ProtoReflect.Descriptor instead.
+func (*DeleteCollectionResponse) Descriptor() ([]byte, []int) {
+ return file_volume_server_proto_rawDescGZIP(), []int{13}
+}
+
+type AllocateVolumeRequest struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ VolumeId uint32 `protobuf:"varint,1,opt,name=volume_id,json=volumeId,proto3" json:"volume_id,omitempty"`
+ Collection string `protobuf:"bytes,2,opt,name=collection,proto3" json:"collection,omitempty"`
+ Preallocate int64 `protobuf:"varint,3,opt,name=preallocate,proto3" json:"preallocate,omitempty"`
+ Replication string `protobuf:"bytes,4,opt,name=replication,proto3" json:"replication,omitempty"`
+ Ttl string `protobuf:"bytes,5,opt,name=ttl,proto3" json:"ttl,omitempty"`
+ MemoryMapMaxSizeMb uint32 `protobuf:"varint,6,opt,name=memory_map_max_size_mb,json=memoryMapMaxSizeMb,proto3" json:"memory_map_max_size_mb,omitempty"`
+ DiskType string `protobuf:"bytes,7,opt,name=disk_type,json=diskType,proto3" json:"disk_type,omitempty"`
}
-func (m *VolumeSyncStatusResponse) GetIdxFileSize() uint64 {
- if m != nil {
- return m.IdxFileSize
+func (x *AllocateVolumeRequest) Reset() {
+ *x = AllocateVolumeRequest{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_volume_server_proto_msgTypes[14]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
- return 0
}
-type VolumeIncrementalCopyRequest struct {
- VolumeId uint32 `protobuf:"varint,1,opt,name=volume_id,json=volumeId" json:"volume_id,omitempty"`
- SinceNs uint64 `protobuf:"varint,2,opt,name=since_ns,json=sinceNs" json:"since_ns,omitempty"`
+func (x *AllocateVolumeRequest) String() string {
+ return protoimpl.X.MessageStringOf(x)
}
-func (m *VolumeIncrementalCopyRequest) Reset() { *m = VolumeIncrementalCopyRequest{} }
-func (m *VolumeIncrementalCopyRequest) String() string { return proto.CompactTextString(m) }
-func (*VolumeIncrementalCopyRequest) ProtoMessage() {}
-func (*VolumeIncrementalCopyRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{18} }
+func (*AllocateVolumeRequest) ProtoMessage() {}
-func (m *VolumeIncrementalCopyRequest) GetVolumeId() uint32 {
- if m != nil {
- return m.VolumeId
+func (x *AllocateVolumeRequest) ProtoReflect() protoreflect.Message {
+ mi := &file_volume_server_proto_msgTypes[14]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
}
- return 0
+ return mi.MessageOf(x)
}
-func (m *VolumeIncrementalCopyRequest) GetSinceNs() uint64 {
- if m != nil {
- return m.SinceNs
+// Deprecated: Use AllocateVolumeRequest.ProtoReflect.Descriptor instead.
+func (*AllocateVolumeRequest) Descriptor() ([]byte, []int) {
+ return file_volume_server_proto_rawDescGZIP(), []int{14}
+}
+
+func (x *AllocateVolumeRequest) GetVolumeId() uint32 {
+ if x != nil {
+ return x.VolumeId
}
return 0
}
-type VolumeIncrementalCopyResponse struct {
- FileContent []byte `protobuf:"bytes,1,opt,name=file_content,json=fileContent,proto3" json:"file_content,omitempty"`
+func (x *AllocateVolumeRequest) GetCollection() string {
+ if x != nil {
+ return x.Collection
+ }
+ return ""
}
-func (m *VolumeIncrementalCopyResponse) Reset() { *m = VolumeIncrementalCopyResponse{} }
-func (m *VolumeIncrementalCopyResponse) String() string { return proto.CompactTextString(m) }
-func (*VolumeIncrementalCopyResponse) ProtoMessage() {}
-func (*VolumeIncrementalCopyResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{19} }
-
-func (m *VolumeIncrementalCopyResponse) GetFileContent() []byte {
- if m != nil {
- return m.FileContent
+func (x *AllocateVolumeRequest) GetPreallocate() int64 {
+ if x != nil {
+ return x.Preallocate
}
- return nil
+ return 0
}
-type VolumeMountRequest struct {
- VolumeId uint32 `protobuf:"varint,1,opt,name=volume_id,json=volumeId" json:"volume_id,omitempty"`
+func (x *AllocateVolumeRequest) GetReplication() string {
+ if x != nil {
+ return x.Replication
+ }
+ return ""
}
-func (m *VolumeMountRequest) Reset() { *m = VolumeMountRequest{} }
-func (m *VolumeMountRequest) String() string { return proto.CompactTextString(m) }
-func (*VolumeMountRequest) ProtoMessage() {}
-func (*VolumeMountRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{20} }
+func (x *AllocateVolumeRequest) GetTtl() string {
+ if x != nil {
+ return x.Ttl
+ }
+ return ""
+}
-func (m *VolumeMountRequest) GetVolumeId() uint32 {
- if m != nil {
- return m.VolumeId
+func (x *AllocateVolumeRequest) GetMemoryMapMaxSizeMb() uint32 {
+ if x != nil {
+ return x.MemoryMapMaxSizeMb
}
return 0
}
-type VolumeMountResponse struct {
+func (x *AllocateVolumeRequest) GetDiskType() string {
+ if x != nil {
+ return x.DiskType
+ }
+ return ""
+}
+
+type AllocateVolumeResponse struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
}
-func (m *VolumeMountResponse) Reset() { *m = VolumeMountResponse{} }
-func (m *VolumeMountResponse) String() string { return proto.CompactTextString(m) }
-func (*VolumeMountResponse) ProtoMessage() {}
-func (*VolumeMountResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{21} }
+func (x *AllocateVolumeResponse) Reset() {
+ *x = AllocateVolumeResponse{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_volume_server_proto_msgTypes[15]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
-type VolumeUnmountRequest struct {
- VolumeId uint32 `protobuf:"varint,1,opt,name=volume_id,json=volumeId" json:"volume_id,omitempty"`
+func (x *AllocateVolumeResponse) String() string {
+ return protoimpl.X.MessageStringOf(x)
}
-func (m *VolumeUnmountRequest) Reset() { *m = VolumeUnmountRequest{} }
-func (m *VolumeUnmountRequest) String() string { return proto.CompactTextString(m) }
-func (*VolumeUnmountRequest) ProtoMessage() {}
-func (*VolumeUnmountRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{22} }
+func (*AllocateVolumeResponse) ProtoMessage() {}
-func (m *VolumeUnmountRequest) GetVolumeId() uint32 {
- if m != nil {
- return m.VolumeId
+func (x *AllocateVolumeResponse) ProtoReflect() protoreflect.Message {
+ mi := &file_volume_server_proto_msgTypes[15]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
}
- return 0
+ return mi.MessageOf(x)
}
-type VolumeUnmountResponse struct {
+// Deprecated: Use AllocateVolumeResponse.ProtoReflect.Descriptor instead.
+func (*AllocateVolumeResponse) Descriptor() ([]byte, []int) {
+ return file_volume_server_proto_rawDescGZIP(), []int{15}
}
-func (m *VolumeUnmountResponse) Reset() { *m = VolumeUnmountResponse{} }
-func (m *VolumeUnmountResponse) String() string { return proto.CompactTextString(m) }
-func (*VolumeUnmountResponse) ProtoMessage() {}
-func (*VolumeUnmountResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{23} }
+type VolumeSyncStatusRequest struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
-type VolumeDeleteRequest struct {
- VolumeId uint32 `protobuf:"varint,1,opt,name=volume_id,json=volumeId" json:"volume_id,omitempty"`
+ VolumeId uint32 `protobuf:"varint,1,opt,name=volume_id,json=volumeId,proto3" json:"volume_id,omitempty"`
}
-func (m *VolumeDeleteRequest) Reset() { *m = VolumeDeleteRequest{} }
-func (m *VolumeDeleteRequest) String() string { return proto.CompactTextString(m) }
-func (*VolumeDeleteRequest) ProtoMessage() {}
-func (*VolumeDeleteRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{24} }
-
-func (m *VolumeDeleteRequest) GetVolumeId() uint32 {
- if m != nil {
- return m.VolumeId
+func (x *VolumeSyncStatusRequest) Reset() {
+ *x = VolumeSyncStatusRequest{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_volume_server_proto_msgTypes[16]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
- return 0
}
-type VolumeDeleteResponse struct {
+func (x *VolumeSyncStatusRequest) String() string {
+ return protoimpl.X.MessageStringOf(x)
}
-func (m *VolumeDeleteResponse) Reset() { *m = VolumeDeleteResponse{} }
-func (m *VolumeDeleteResponse) String() string { return proto.CompactTextString(m) }
-func (*VolumeDeleteResponse) ProtoMessage() {}
-func (*VolumeDeleteResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{25} }
+func (*VolumeSyncStatusRequest) ProtoMessage() {}
-type VolumeMarkReadonlyRequest struct {
- VolumeId uint32 `protobuf:"varint,1,opt,name=volume_id,json=volumeId" json:"volume_id,omitempty"`
+func (x *VolumeSyncStatusRequest) ProtoReflect() protoreflect.Message {
+ mi := &file_volume_server_proto_msgTypes[16]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
}
-func (m *VolumeMarkReadonlyRequest) Reset() { *m = VolumeMarkReadonlyRequest{} }
-func (m *VolumeMarkReadonlyRequest) String() string { return proto.CompactTextString(m) }
-func (*VolumeMarkReadonlyRequest) ProtoMessage() {}
-func (*VolumeMarkReadonlyRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{26} }
+// Deprecated: Use VolumeSyncStatusRequest.ProtoReflect.Descriptor instead.
+func (*VolumeSyncStatusRequest) Descriptor() ([]byte, []int) {
+ return file_volume_server_proto_rawDescGZIP(), []int{16}
+}
-func (m *VolumeMarkReadonlyRequest) GetVolumeId() uint32 {
- if m != nil {
- return m.VolumeId
+func (x *VolumeSyncStatusRequest) GetVolumeId() uint32 {
+ if x != nil {
+ return x.VolumeId
}
return 0
}
-type VolumeMarkReadonlyResponse struct {
+type VolumeSyncStatusResponse struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ VolumeId uint32 `protobuf:"varint,1,opt,name=volume_id,json=volumeId,proto3" json:"volume_id,omitempty"`
+ Collection string `protobuf:"bytes,2,opt,name=collection,proto3" json:"collection,omitempty"`
+ Replication string `protobuf:"bytes,4,opt,name=replication,proto3" json:"replication,omitempty"`
+ Ttl string `protobuf:"bytes,5,opt,name=ttl,proto3" json:"ttl,omitempty"`
+ TailOffset uint64 `protobuf:"varint,6,opt,name=tail_offset,json=tailOffset,proto3" json:"tail_offset,omitempty"`
+ CompactRevision uint32 `protobuf:"varint,7,opt,name=compact_revision,json=compactRevision,proto3" json:"compact_revision,omitempty"`
+ IdxFileSize uint64 `protobuf:"varint,8,opt,name=idx_file_size,json=idxFileSize,proto3" json:"idx_file_size,omitempty"`
}
-func (m *VolumeMarkReadonlyResponse) Reset() { *m = VolumeMarkReadonlyResponse{} }
-func (m *VolumeMarkReadonlyResponse) String() string { return proto.CompactTextString(m) }
-func (*VolumeMarkReadonlyResponse) ProtoMessage() {}
-func (*VolumeMarkReadonlyResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{27} }
+func (x *VolumeSyncStatusResponse) Reset() {
+ *x = VolumeSyncStatusResponse{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_volume_server_proto_msgTypes[17]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
-type VolumeCopyRequest struct {
- VolumeId uint32 `protobuf:"varint,1,opt,name=volume_id,json=volumeId" json:"volume_id,omitempty"`
- Collection string `protobuf:"bytes,2,opt,name=collection" json:"collection,omitempty"`
- Replication string `protobuf:"bytes,3,opt,name=replication" json:"replication,omitempty"`
- Ttl string `protobuf:"bytes,4,opt,name=ttl" json:"ttl,omitempty"`
- SourceDataNode string `protobuf:"bytes,5,opt,name=source_data_node,json=sourceDataNode" json:"source_data_node,omitempty"`
+func (x *VolumeSyncStatusResponse) String() string {
+ return protoimpl.X.MessageStringOf(x)
}
-func (m *VolumeCopyRequest) Reset() { *m = VolumeCopyRequest{} }
-func (m *VolumeCopyRequest) String() string { return proto.CompactTextString(m) }
-func (*VolumeCopyRequest) ProtoMessage() {}
-func (*VolumeCopyRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{28} }
+func (*VolumeSyncStatusResponse) ProtoMessage() {}
-func (m *VolumeCopyRequest) GetVolumeId() uint32 {
- if m != nil {
- return m.VolumeId
+func (x *VolumeSyncStatusResponse) ProtoReflect() protoreflect.Message {
+ mi := &file_volume_server_proto_msgTypes[17]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
}
- return 0
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use VolumeSyncStatusResponse.ProtoReflect.Descriptor instead.
+func (*VolumeSyncStatusResponse) Descriptor() ([]byte, []int) {
+ return file_volume_server_proto_rawDescGZIP(), []int{17}
}
-func (m *VolumeCopyRequest) GetCollection() string {
- if m != nil {
- return m.Collection
+func (x *VolumeSyncStatusResponse) GetVolumeId() uint32 {
+ if x != nil {
+ return x.VolumeId
}
- return ""
+ return 0
}
-func (m *VolumeCopyRequest) GetReplication() string {
- if m != nil {
- return m.Replication
+func (x *VolumeSyncStatusResponse) GetCollection() string {
+ if x != nil {
+ return x.Collection
}
return ""
}
-func (m *VolumeCopyRequest) GetTtl() string {
- if m != nil {
- return m.Ttl
+func (x *VolumeSyncStatusResponse) GetReplication() string {
+ if x != nil {
+ return x.Replication
}
return ""
}
-func (m *VolumeCopyRequest) GetSourceDataNode() string {
- if m != nil {
- return m.SourceDataNode
+func (x *VolumeSyncStatusResponse) GetTtl() string {
+ if x != nil {
+ return x.Ttl
}
return ""
}
-type VolumeCopyResponse struct {
- LastAppendAtNs uint64 `protobuf:"varint,1,opt,name=last_append_at_ns,json=lastAppendAtNs" json:"last_append_at_ns,omitempty"`
+func (x *VolumeSyncStatusResponse) GetTailOffset() uint64 {
+ if x != nil {
+ return x.TailOffset
+ }
+ return 0
}
-func (m *VolumeCopyResponse) Reset() { *m = VolumeCopyResponse{} }
-func (m *VolumeCopyResponse) String() string { return proto.CompactTextString(m) }
-func (*VolumeCopyResponse) ProtoMessage() {}
-func (*VolumeCopyResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{29} }
-
-func (m *VolumeCopyResponse) GetLastAppendAtNs() uint64 {
- if m != nil {
- return m.LastAppendAtNs
+func (x *VolumeSyncStatusResponse) GetCompactRevision() uint32 {
+ if x != nil {
+ return x.CompactRevision
}
return 0
}
-type CopyFileRequest struct {
- VolumeId uint32 `protobuf:"varint,1,opt,name=volume_id,json=volumeId" json:"volume_id,omitempty"`
- Ext string `protobuf:"bytes,2,opt,name=ext" json:"ext,omitempty"`
- CompactionRevision uint32 `protobuf:"varint,3,opt,name=compaction_revision,json=compactionRevision" json:"compaction_revision,omitempty"`
- StopOffset uint64 `protobuf:"varint,4,opt,name=stop_offset,json=stopOffset" json:"stop_offset,omitempty"`
- Collection string `protobuf:"bytes,5,opt,name=collection" json:"collection,omitempty"`
- IsEcVolume bool `protobuf:"varint,6,opt,name=is_ec_volume,json=isEcVolume" json:"is_ec_volume,omitempty"`
- IgnoreSourceFileNotFound bool `protobuf:"varint,7,opt,name=ignore_source_file_not_found,json=ignoreSourceFileNotFound" json:"ignore_source_file_not_found,omitempty"`
+func (x *VolumeSyncStatusResponse) GetIdxFileSize() uint64 {
+ if x != nil {
+ return x.IdxFileSize
+ }
+ return 0
}
-func (m *CopyFileRequest) Reset() { *m = CopyFileRequest{} }
-func (m *CopyFileRequest) String() string { return proto.CompactTextString(m) }
-func (*CopyFileRequest) ProtoMessage() {}
-func (*CopyFileRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{30} }
+type VolumeIncrementalCopyRequest struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ VolumeId uint32 `protobuf:"varint,1,opt,name=volume_id,json=volumeId,proto3" json:"volume_id,omitempty"`
+ SinceNs uint64 `protobuf:"varint,2,opt,name=since_ns,json=sinceNs,proto3" json:"since_ns,omitempty"`
+}
-func (m *CopyFileRequest) GetVolumeId() uint32 {
- if m != nil {
- return m.VolumeId
+func (x *VolumeIncrementalCopyRequest) Reset() {
+ *x = VolumeIncrementalCopyRequest{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_volume_server_proto_msgTypes[18]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
- return 0
}
-func (m *CopyFileRequest) GetExt() string {
- if m != nil {
- return m.Ext
+func (x *VolumeIncrementalCopyRequest) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*VolumeIncrementalCopyRequest) ProtoMessage() {}
+
+func (x *VolumeIncrementalCopyRequest) ProtoReflect() protoreflect.Message {
+ mi := &file_volume_server_proto_msgTypes[18]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
}
- return ""
+ return mi.MessageOf(x)
}
-func (m *CopyFileRequest) GetCompactionRevision() uint32 {
- if m != nil {
- return m.CompactionRevision
+// Deprecated: Use VolumeIncrementalCopyRequest.ProtoReflect.Descriptor instead.
+func (*VolumeIncrementalCopyRequest) Descriptor() ([]byte, []int) {
+ return file_volume_server_proto_rawDescGZIP(), []int{18}
+}
+
+func (x *VolumeIncrementalCopyRequest) GetVolumeId() uint32 {
+ if x != nil {
+ return x.VolumeId
}
return 0
}
-func (m *CopyFileRequest) GetStopOffset() uint64 {
- if m != nil {
- return m.StopOffset
+func (x *VolumeIncrementalCopyRequest) GetSinceNs() uint64 {
+ if x != nil {
+ return x.SinceNs
}
return 0
}
-func (m *CopyFileRequest) GetCollection() string {
- if m != nil {
- return m.Collection
- }
- return ""
+type VolumeIncrementalCopyResponse struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ FileContent []byte `protobuf:"bytes,1,opt,name=file_content,json=fileContent,proto3" json:"file_content,omitempty"`
}
-func (m *CopyFileRequest) GetIsEcVolume() bool {
- if m != nil {
- return m.IsEcVolume
+func (x *VolumeIncrementalCopyResponse) Reset() {
+ *x = VolumeIncrementalCopyResponse{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_volume_server_proto_msgTypes[19]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
- return false
}
-func (m *CopyFileRequest) GetIgnoreSourceFileNotFound() bool {
- if m != nil {
- return m.IgnoreSourceFileNotFound
- }
- return false
+func (x *VolumeIncrementalCopyResponse) String() string {
+ return protoimpl.X.MessageStringOf(x)
}
-type CopyFileResponse struct {
- FileContent []byte `protobuf:"bytes,1,opt,name=file_content,json=fileContent,proto3" json:"file_content,omitempty"`
+func (*VolumeIncrementalCopyResponse) ProtoMessage() {}
+
+func (x *VolumeIncrementalCopyResponse) ProtoReflect() protoreflect.Message {
+ mi := &file_volume_server_proto_msgTypes[19]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
}
-func (m *CopyFileResponse) Reset() { *m = CopyFileResponse{} }
-func (m *CopyFileResponse) String() string { return proto.CompactTextString(m) }
-func (*CopyFileResponse) ProtoMessage() {}
-func (*CopyFileResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{31} }
+// Deprecated: Use VolumeIncrementalCopyResponse.ProtoReflect.Descriptor instead.
+func (*VolumeIncrementalCopyResponse) Descriptor() ([]byte, []int) {
+ return file_volume_server_proto_rawDescGZIP(), []int{19}
+}
-func (m *CopyFileResponse) GetFileContent() []byte {
- if m != nil {
- return m.FileContent
+func (x *VolumeIncrementalCopyResponse) GetFileContent() []byte {
+ if x != nil {
+ return x.FileContent
}
return nil
}
-type VolumeTailSenderRequest struct {
- VolumeId uint32 `protobuf:"varint,1,opt,name=volume_id,json=volumeId" json:"volume_id,omitempty"`
- SinceNs uint64 `protobuf:"varint,2,opt,name=since_ns,json=sinceNs" json:"since_ns,omitempty"`
- IdleTimeoutSeconds uint32 `protobuf:"varint,3,opt,name=idle_timeout_seconds,json=idleTimeoutSeconds" json:"idle_timeout_seconds,omitempty"`
+type VolumeMountRequest struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ VolumeId uint32 `protobuf:"varint,1,opt,name=volume_id,json=volumeId,proto3" json:"volume_id,omitempty"`
+}
+
+func (x *VolumeMountRequest) Reset() {
+ *x = VolumeMountRequest{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_volume_server_proto_msgTypes[20]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *VolumeMountRequest) String() string {
+ return protoimpl.X.MessageStringOf(x)
}
-func (m *VolumeTailSenderRequest) Reset() { *m = VolumeTailSenderRequest{} }
-func (m *VolumeTailSenderRequest) String() string { return proto.CompactTextString(m) }
-func (*VolumeTailSenderRequest) ProtoMessage() {}
-func (*VolumeTailSenderRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{32} }
+func (*VolumeMountRequest) ProtoMessage() {}
-func (m *VolumeTailSenderRequest) GetVolumeId() uint32 {
- if m != nil {
- return m.VolumeId
+func (x *VolumeMountRequest) ProtoReflect() protoreflect.Message {
+ mi := &file_volume_server_proto_msgTypes[20]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
}
- return 0
+ return mi.MessageOf(x)
}
-func (m *VolumeTailSenderRequest) GetSinceNs() uint64 {
- if m != nil {
- return m.SinceNs
+// Deprecated: Use VolumeMountRequest.ProtoReflect.Descriptor instead.
+func (*VolumeMountRequest) Descriptor() ([]byte, []int) {
+ return file_volume_server_proto_rawDescGZIP(), []int{20}
+}
+
+func (x *VolumeMountRequest) GetVolumeId() uint32 {
+ if x != nil {
+ return x.VolumeId
}
return 0
}
-func (m *VolumeTailSenderRequest) GetIdleTimeoutSeconds() uint32 {
- if m != nil {
- return m.IdleTimeoutSeconds
+type VolumeMountResponse struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+}
+
+func (x *VolumeMountResponse) Reset() {
+ *x = VolumeMountResponse{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_volume_server_proto_msgTypes[21]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
- return 0
}
-type VolumeTailSenderResponse struct {
- NeedleHeader []byte `protobuf:"bytes,1,opt,name=needle_header,json=needleHeader,proto3" json:"needle_header,omitempty"`
- NeedleBody []byte `protobuf:"bytes,2,opt,name=needle_body,json=needleBody,proto3" json:"needle_body,omitempty"`
- IsLastChunk bool `protobuf:"varint,3,opt,name=is_last_chunk,json=isLastChunk" json:"is_last_chunk,omitempty"`
+func (x *VolumeMountResponse) String() string {
+ return protoimpl.X.MessageStringOf(x)
}
-func (m *VolumeTailSenderResponse) Reset() { *m = VolumeTailSenderResponse{} }
-func (m *VolumeTailSenderResponse) String() string { return proto.CompactTextString(m) }
-func (*VolumeTailSenderResponse) ProtoMessage() {}
-func (*VolumeTailSenderResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{33} }
+func (*VolumeMountResponse) ProtoMessage() {}
-func (m *VolumeTailSenderResponse) GetNeedleHeader() []byte {
- if m != nil {
- return m.NeedleHeader
+func (x *VolumeMountResponse) ProtoReflect() protoreflect.Message {
+ mi := &file_volume_server_proto_msgTypes[21]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
}
- return nil
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use VolumeMountResponse.ProtoReflect.Descriptor instead.
+func (*VolumeMountResponse) Descriptor() ([]byte, []int) {
+ return file_volume_server_proto_rawDescGZIP(), []int{21}
+}
+
+type VolumeUnmountRequest struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ VolumeId uint32 `protobuf:"varint,1,opt,name=volume_id,json=volumeId,proto3" json:"volume_id,omitempty"`
}
-func (m *VolumeTailSenderResponse) GetNeedleBody() []byte {
- if m != nil {
- return m.NeedleBody
+func (x *VolumeUnmountRequest) Reset() {
+ *x = VolumeUnmountRequest{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_volume_server_proto_msgTypes[22]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *VolumeUnmountRequest) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*VolumeUnmountRequest) ProtoMessage() {}
+
+func (x *VolumeUnmountRequest) ProtoReflect() protoreflect.Message {
+ mi := &file_volume_server_proto_msgTypes[22]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use VolumeUnmountRequest.ProtoReflect.Descriptor instead.
+func (*VolumeUnmountRequest) Descriptor() ([]byte, []int) {
+ return file_volume_server_proto_rawDescGZIP(), []int{22}
+}
+
+func (x *VolumeUnmountRequest) GetVolumeId() uint32 {
+ if x != nil {
+ return x.VolumeId
+ }
+ return 0
+}
+
+type VolumeUnmountResponse struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+}
+
+func (x *VolumeUnmountResponse) Reset() {
+ *x = VolumeUnmountResponse{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_volume_server_proto_msgTypes[23]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *VolumeUnmountResponse) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*VolumeUnmountResponse) ProtoMessage() {}
+
+func (x *VolumeUnmountResponse) ProtoReflect() protoreflect.Message {
+ mi := &file_volume_server_proto_msgTypes[23]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use VolumeUnmountResponse.ProtoReflect.Descriptor instead.
+func (*VolumeUnmountResponse) Descriptor() ([]byte, []int) {
+ return file_volume_server_proto_rawDescGZIP(), []int{23}
+}
+
+type VolumeDeleteRequest struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ VolumeId uint32 `protobuf:"varint,1,opt,name=volume_id,json=volumeId,proto3" json:"volume_id,omitempty"`
+}
+
+func (x *VolumeDeleteRequest) Reset() {
+ *x = VolumeDeleteRequest{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_volume_server_proto_msgTypes[24]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *VolumeDeleteRequest) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*VolumeDeleteRequest) ProtoMessage() {}
+
+func (x *VolumeDeleteRequest) ProtoReflect() protoreflect.Message {
+ mi := &file_volume_server_proto_msgTypes[24]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use VolumeDeleteRequest.ProtoReflect.Descriptor instead.
+func (*VolumeDeleteRequest) Descriptor() ([]byte, []int) {
+ return file_volume_server_proto_rawDescGZIP(), []int{24}
+}
+
+func (x *VolumeDeleteRequest) GetVolumeId() uint32 {
+ if x != nil {
+ return x.VolumeId
+ }
+ return 0
+}
+
+type VolumeDeleteResponse struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+}
+
+func (x *VolumeDeleteResponse) Reset() {
+ *x = VolumeDeleteResponse{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_volume_server_proto_msgTypes[25]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *VolumeDeleteResponse) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*VolumeDeleteResponse) ProtoMessage() {}
+
+func (x *VolumeDeleteResponse) ProtoReflect() protoreflect.Message {
+ mi := &file_volume_server_proto_msgTypes[25]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use VolumeDeleteResponse.ProtoReflect.Descriptor instead.
+func (*VolumeDeleteResponse) Descriptor() ([]byte, []int) {
+ return file_volume_server_proto_rawDescGZIP(), []int{25}
+}
+
+type VolumeMarkReadonlyRequest struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ VolumeId uint32 `protobuf:"varint,1,opt,name=volume_id,json=volumeId,proto3" json:"volume_id,omitempty"`
+}
+
+func (x *VolumeMarkReadonlyRequest) Reset() {
+ *x = VolumeMarkReadonlyRequest{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_volume_server_proto_msgTypes[26]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *VolumeMarkReadonlyRequest) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*VolumeMarkReadonlyRequest) ProtoMessage() {}
+
+func (x *VolumeMarkReadonlyRequest) ProtoReflect() protoreflect.Message {
+ mi := &file_volume_server_proto_msgTypes[26]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use VolumeMarkReadonlyRequest.ProtoReflect.Descriptor instead.
+func (*VolumeMarkReadonlyRequest) Descriptor() ([]byte, []int) {
+ return file_volume_server_proto_rawDescGZIP(), []int{26}
+}
+
+func (x *VolumeMarkReadonlyRequest) GetVolumeId() uint32 {
+ if x != nil {
+ return x.VolumeId
+ }
+ return 0
+}
+
+type VolumeMarkReadonlyResponse struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+}
+
+func (x *VolumeMarkReadonlyResponse) Reset() {
+ *x = VolumeMarkReadonlyResponse{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_volume_server_proto_msgTypes[27]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *VolumeMarkReadonlyResponse) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*VolumeMarkReadonlyResponse) ProtoMessage() {}
+
+func (x *VolumeMarkReadonlyResponse) ProtoReflect() protoreflect.Message {
+ mi := &file_volume_server_proto_msgTypes[27]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use VolumeMarkReadonlyResponse.ProtoReflect.Descriptor instead.
+func (*VolumeMarkReadonlyResponse) Descriptor() ([]byte, []int) {
+ return file_volume_server_proto_rawDescGZIP(), []int{27}
+}
+
+type VolumeMarkWritableRequest struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ VolumeId uint32 `protobuf:"varint,1,opt,name=volume_id,json=volumeId,proto3" json:"volume_id,omitempty"`
+}
+
+func (x *VolumeMarkWritableRequest) Reset() {
+ *x = VolumeMarkWritableRequest{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_volume_server_proto_msgTypes[28]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *VolumeMarkWritableRequest) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*VolumeMarkWritableRequest) ProtoMessage() {}
+
+func (x *VolumeMarkWritableRequest) ProtoReflect() protoreflect.Message {
+ mi := &file_volume_server_proto_msgTypes[28]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use VolumeMarkWritableRequest.ProtoReflect.Descriptor instead.
+func (*VolumeMarkWritableRequest) Descriptor() ([]byte, []int) {
+ return file_volume_server_proto_rawDescGZIP(), []int{28}
+}
+
+func (x *VolumeMarkWritableRequest) GetVolumeId() uint32 {
+ if x != nil {
+ return x.VolumeId
+ }
+ return 0
+}
+
+type VolumeMarkWritableResponse struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+}
+
+func (x *VolumeMarkWritableResponse) Reset() {
+ *x = VolumeMarkWritableResponse{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_volume_server_proto_msgTypes[29]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *VolumeMarkWritableResponse) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*VolumeMarkWritableResponse) ProtoMessage() {}
+
+func (x *VolumeMarkWritableResponse) ProtoReflect() protoreflect.Message {
+ mi := &file_volume_server_proto_msgTypes[29]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use VolumeMarkWritableResponse.ProtoReflect.Descriptor instead.
+func (*VolumeMarkWritableResponse) Descriptor() ([]byte, []int) {
+ return file_volume_server_proto_rawDescGZIP(), []int{29}
+}
+
+type VolumeConfigureRequest struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ VolumeId uint32 `protobuf:"varint,1,opt,name=volume_id,json=volumeId,proto3" json:"volume_id,omitempty"`
+ Replication string `protobuf:"bytes,2,opt,name=replication,proto3" json:"replication,omitempty"`
+}
+
+func (x *VolumeConfigureRequest) Reset() {
+ *x = VolumeConfigureRequest{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_volume_server_proto_msgTypes[30]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *VolumeConfigureRequest) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*VolumeConfigureRequest) ProtoMessage() {}
+
+func (x *VolumeConfigureRequest) ProtoReflect() protoreflect.Message {
+ mi := &file_volume_server_proto_msgTypes[30]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use VolumeConfigureRequest.ProtoReflect.Descriptor instead.
+func (*VolumeConfigureRequest) Descriptor() ([]byte, []int) {
+ return file_volume_server_proto_rawDescGZIP(), []int{30}
+}
+
+func (x *VolumeConfigureRequest) GetVolumeId() uint32 {
+ if x != nil {
+ return x.VolumeId
+ }
+ return 0
+}
+
+func (x *VolumeConfigureRequest) GetReplication() string {
+ if x != nil {
+ return x.Replication
+ }
+ return ""
+}
+
+type VolumeConfigureResponse struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ Error string `protobuf:"bytes,1,opt,name=error,proto3" json:"error,omitempty"`
+}
+
+func (x *VolumeConfigureResponse) Reset() {
+ *x = VolumeConfigureResponse{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_volume_server_proto_msgTypes[31]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *VolumeConfigureResponse) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*VolumeConfigureResponse) ProtoMessage() {}
+
+func (x *VolumeConfigureResponse) ProtoReflect() protoreflect.Message {
+ mi := &file_volume_server_proto_msgTypes[31]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use VolumeConfigureResponse.ProtoReflect.Descriptor instead.
+func (*VolumeConfigureResponse) Descriptor() ([]byte, []int) {
+ return file_volume_server_proto_rawDescGZIP(), []int{31}
+}
+
+func (x *VolumeConfigureResponse) GetError() string {
+ if x != nil {
+ return x.Error
+ }
+ return ""
+}
+
+type VolumeStatusRequest struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ VolumeId uint32 `protobuf:"varint,1,opt,name=volume_id,json=volumeId,proto3" json:"volume_id,omitempty"`
+}
+
+func (x *VolumeStatusRequest) Reset() {
+ *x = VolumeStatusRequest{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_volume_server_proto_msgTypes[32]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *VolumeStatusRequest) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*VolumeStatusRequest) ProtoMessage() {}
+
+func (x *VolumeStatusRequest) ProtoReflect() protoreflect.Message {
+ mi := &file_volume_server_proto_msgTypes[32]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use VolumeStatusRequest.ProtoReflect.Descriptor instead.
+func (*VolumeStatusRequest) Descriptor() ([]byte, []int) {
+ return file_volume_server_proto_rawDescGZIP(), []int{32}
+}
+
+func (x *VolumeStatusRequest) GetVolumeId() uint32 {
+ if x != nil {
+ return x.VolumeId
+ }
+ return 0
+}
+
+type VolumeStatusResponse struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ IsReadOnly bool `protobuf:"varint,1,opt,name=is_read_only,json=isReadOnly,proto3" json:"is_read_only,omitempty"`
+}
+
+func (x *VolumeStatusResponse) Reset() {
+ *x = VolumeStatusResponse{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_volume_server_proto_msgTypes[33]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *VolumeStatusResponse) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*VolumeStatusResponse) ProtoMessage() {}
+
+func (x *VolumeStatusResponse) ProtoReflect() protoreflect.Message {
+ mi := &file_volume_server_proto_msgTypes[33]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use VolumeStatusResponse.ProtoReflect.Descriptor instead.
+func (*VolumeStatusResponse) Descriptor() ([]byte, []int) {
+ return file_volume_server_proto_rawDescGZIP(), []int{33}
+}
+
+func (x *VolumeStatusResponse) GetIsReadOnly() bool {
+ if x != nil {
+ return x.IsReadOnly
+ }
+ return false
+}
+
+type VolumeCopyRequest struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ VolumeId uint32 `protobuf:"varint,1,opt,name=volume_id,json=volumeId,proto3" json:"volume_id,omitempty"`
+ Collection string `protobuf:"bytes,2,opt,name=collection,proto3" json:"collection,omitempty"`
+ Replication string `protobuf:"bytes,3,opt,name=replication,proto3" json:"replication,omitempty"`
+ Ttl string `protobuf:"bytes,4,opt,name=ttl,proto3" json:"ttl,omitempty"`
+ SourceDataNode string `protobuf:"bytes,5,opt,name=source_data_node,json=sourceDataNode,proto3" json:"source_data_node,omitempty"`
+ DiskType string `protobuf:"bytes,6,opt,name=disk_type,json=diskType,proto3" json:"disk_type,omitempty"`
+}
+
+func (x *VolumeCopyRequest) Reset() {
+ *x = VolumeCopyRequest{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_volume_server_proto_msgTypes[34]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *VolumeCopyRequest) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*VolumeCopyRequest) ProtoMessage() {}
+
+func (x *VolumeCopyRequest) ProtoReflect() protoreflect.Message {
+ mi := &file_volume_server_proto_msgTypes[34]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use VolumeCopyRequest.ProtoReflect.Descriptor instead.
+func (*VolumeCopyRequest) Descriptor() ([]byte, []int) {
+ return file_volume_server_proto_rawDescGZIP(), []int{34}
+}
+
+func (x *VolumeCopyRequest) GetVolumeId() uint32 {
+ if x != nil {
+ return x.VolumeId
+ }
+ return 0
+}
+
+func (x *VolumeCopyRequest) GetCollection() string {
+ if x != nil {
+ return x.Collection
+ }
+ return ""
+}
+
+func (x *VolumeCopyRequest) GetReplication() string {
+ if x != nil {
+ return x.Replication
+ }
+ return ""
+}
+
+func (x *VolumeCopyRequest) GetTtl() string {
+ if x != nil {
+ return x.Ttl
+ }
+ return ""
+}
+
+func (x *VolumeCopyRequest) GetSourceDataNode() string {
+ if x != nil {
+ return x.SourceDataNode
+ }
+ return ""
+}
+
+func (x *VolumeCopyRequest) GetDiskType() string {
+ if x != nil {
+ return x.DiskType
+ }
+ return ""
+}
+
+type VolumeCopyResponse struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ LastAppendAtNs uint64 `protobuf:"varint,1,opt,name=last_append_at_ns,json=lastAppendAtNs,proto3" json:"last_append_at_ns,omitempty"`
+}
+
+func (x *VolumeCopyResponse) Reset() {
+ *x = VolumeCopyResponse{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_volume_server_proto_msgTypes[35]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *VolumeCopyResponse) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*VolumeCopyResponse) ProtoMessage() {}
+
+func (x *VolumeCopyResponse) ProtoReflect() protoreflect.Message {
+ mi := &file_volume_server_proto_msgTypes[35]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use VolumeCopyResponse.ProtoReflect.Descriptor instead.
+func (*VolumeCopyResponse) Descriptor() ([]byte, []int) {
+ return file_volume_server_proto_rawDescGZIP(), []int{35}
+}
+
+func (x *VolumeCopyResponse) GetLastAppendAtNs() uint64 {
+ if x != nil {
+ return x.LastAppendAtNs
+ }
+ return 0
+}
+
+type CopyFileRequest struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ VolumeId uint32 `protobuf:"varint,1,opt,name=volume_id,json=volumeId,proto3" json:"volume_id,omitempty"`
+ Ext string `protobuf:"bytes,2,opt,name=ext,proto3" json:"ext,omitempty"`
+ CompactionRevision uint32 `protobuf:"varint,3,opt,name=compaction_revision,json=compactionRevision,proto3" json:"compaction_revision,omitempty"`
+ StopOffset uint64 `protobuf:"varint,4,opt,name=stop_offset,json=stopOffset,proto3" json:"stop_offset,omitempty"`
+ Collection string `protobuf:"bytes,5,opt,name=collection,proto3" json:"collection,omitempty"`
+ IsEcVolume bool `protobuf:"varint,6,opt,name=is_ec_volume,json=isEcVolume,proto3" json:"is_ec_volume,omitempty"`
+ IgnoreSourceFileNotFound bool `protobuf:"varint,7,opt,name=ignore_source_file_not_found,json=ignoreSourceFileNotFound,proto3" json:"ignore_source_file_not_found,omitempty"`
+}
+
+func (x *CopyFileRequest) Reset() {
+ *x = CopyFileRequest{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_volume_server_proto_msgTypes[36]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *CopyFileRequest) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*CopyFileRequest) ProtoMessage() {}
+
+func (x *CopyFileRequest) ProtoReflect() protoreflect.Message {
+ mi := &file_volume_server_proto_msgTypes[36]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use CopyFileRequest.ProtoReflect.Descriptor instead.
+func (*CopyFileRequest) Descriptor() ([]byte, []int) {
+ return file_volume_server_proto_rawDescGZIP(), []int{36}
+}
+
+func (x *CopyFileRequest) GetVolumeId() uint32 {
+ if x != nil {
+ return x.VolumeId
+ }
+ return 0
+}
+
+func (x *CopyFileRequest) GetExt() string {
+ if x != nil {
+ return x.Ext
+ }
+ return ""
+}
+
+func (x *CopyFileRequest) GetCompactionRevision() uint32 {
+ if x != nil {
+ return x.CompactionRevision
+ }
+ return 0
+}
+
+func (x *CopyFileRequest) GetStopOffset() uint64 {
+ if x != nil {
+ return x.StopOffset
+ }
+ return 0
+}
+
+func (x *CopyFileRequest) GetCollection() string {
+ if x != nil {
+ return x.Collection
+ }
+ return ""
+}
+
+func (x *CopyFileRequest) GetIsEcVolume() bool {
+ if x != nil {
+ return x.IsEcVolume
+ }
+ return false
+}
+
+func (x *CopyFileRequest) GetIgnoreSourceFileNotFound() bool {
+ if x != nil {
+ return x.IgnoreSourceFileNotFound
+ }
+ return false
+}
+
+type CopyFileResponse struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ FileContent []byte `protobuf:"bytes,1,opt,name=file_content,json=fileContent,proto3" json:"file_content,omitempty"`
+}
+
+func (x *CopyFileResponse) Reset() {
+ *x = CopyFileResponse{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_volume_server_proto_msgTypes[37]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *CopyFileResponse) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*CopyFileResponse) ProtoMessage() {}
+
+func (x *CopyFileResponse) ProtoReflect() protoreflect.Message {
+ mi := &file_volume_server_proto_msgTypes[37]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use CopyFileResponse.ProtoReflect.Descriptor instead.
+func (*CopyFileResponse) Descriptor() ([]byte, []int) {
+ return file_volume_server_proto_rawDescGZIP(), []int{37}
+}
+
+func (x *CopyFileResponse) GetFileContent() []byte {
+ if x != nil {
+ return x.FileContent
+ }
+ return nil
+}
+
+type ReadNeedleBlobRequest struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ VolumeId uint32 `protobuf:"varint,1,opt,name=volume_id,json=volumeId,proto3" json:"volume_id,omitempty"`
+ NeedleId uint64 `protobuf:"varint,2,opt,name=needle_id,json=needleId,proto3" json:"needle_id,omitempty"`
+ Offset int64 `protobuf:"varint,3,opt,name=offset,proto3" json:"offset,omitempty"` // actual offset
+ Size int32 `protobuf:"varint,4,opt,name=size,proto3" json:"size,omitempty"`
+}
+
+func (x *ReadNeedleBlobRequest) Reset() {
+ *x = ReadNeedleBlobRequest{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_volume_server_proto_msgTypes[38]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *ReadNeedleBlobRequest) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*ReadNeedleBlobRequest) ProtoMessage() {}
+
+func (x *ReadNeedleBlobRequest) ProtoReflect() protoreflect.Message {
+ mi := &file_volume_server_proto_msgTypes[38]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use ReadNeedleBlobRequest.ProtoReflect.Descriptor instead.
+func (*ReadNeedleBlobRequest) Descriptor() ([]byte, []int) {
+ return file_volume_server_proto_rawDescGZIP(), []int{38}
+}
+
+func (x *ReadNeedleBlobRequest) GetVolumeId() uint32 {
+ if x != nil {
+ return x.VolumeId
+ }
+ return 0
+}
+
+func (x *ReadNeedleBlobRequest) GetNeedleId() uint64 {
+ if x != nil {
+ return x.NeedleId
+ }
+ return 0
+}
+
+func (x *ReadNeedleBlobRequest) GetOffset() int64 {
+ if x != nil {
+ return x.Offset
+ }
+ return 0
+}
+
+func (x *ReadNeedleBlobRequest) GetSize() int32 {
+ if x != nil {
+ return x.Size
+ }
+ return 0
+}
+
+type ReadNeedleBlobResponse struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ NeedleBlob []byte `protobuf:"bytes,1,opt,name=needle_blob,json=needleBlob,proto3" json:"needle_blob,omitempty"`
+}
+
+func (x *ReadNeedleBlobResponse) Reset() {
+ *x = ReadNeedleBlobResponse{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_volume_server_proto_msgTypes[39]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *ReadNeedleBlobResponse) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*ReadNeedleBlobResponse) ProtoMessage() {}
+
+func (x *ReadNeedleBlobResponse) ProtoReflect() protoreflect.Message {
+ mi := &file_volume_server_proto_msgTypes[39]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use ReadNeedleBlobResponse.ProtoReflect.Descriptor instead.
+func (*ReadNeedleBlobResponse) Descriptor() ([]byte, []int) {
+ return file_volume_server_proto_rawDescGZIP(), []int{39}
+}
+
+func (x *ReadNeedleBlobResponse) GetNeedleBlob() []byte {
+ if x != nil {
+ return x.NeedleBlob
+ }
+ return nil
+}
+
+type WriteNeedleBlobRequest struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ VolumeId uint32 `protobuf:"varint,1,opt,name=volume_id,json=volumeId,proto3" json:"volume_id,omitempty"`
+ NeedleId uint64 `protobuf:"varint,2,opt,name=needle_id,json=needleId,proto3" json:"needle_id,omitempty"`
+ Size int32 `protobuf:"varint,3,opt,name=size,proto3" json:"size,omitempty"`
+ NeedleBlob []byte `protobuf:"bytes,4,opt,name=needle_blob,json=needleBlob,proto3" json:"needle_blob,omitempty"`
+}
+
+func (x *WriteNeedleBlobRequest) Reset() {
+ *x = WriteNeedleBlobRequest{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_volume_server_proto_msgTypes[40]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *WriteNeedleBlobRequest) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*WriteNeedleBlobRequest) ProtoMessage() {}
+
+func (x *WriteNeedleBlobRequest) ProtoReflect() protoreflect.Message {
+ mi := &file_volume_server_proto_msgTypes[40]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use WriteNeedleBlobRequest.ProtoReflect.Descriptor instead.
+func (*WriteNeedleBlobRequest) Descriptor() ([]byte, []int) {
+ return file_volume_server_proto_rawDescGZIP(), []int{40}
+}
+
+func (x *WriteNeedleBlobRequest) GetVolumeId() uint32 {
+ if x != nil {
+ return x.VolumeId
+ }
+ return 0
+}
+
+func (x *WriteNeedleBlobRequest) GetNeedleId() uint64 {
+ if x != nil {
+ return x.NeedleId
+ }
+ return 0
+}
+
+func (x *WriteNeedleBlobRequest) GetSize() int32 {
+ if x != nil {
+ return x.Size
+ }
+ return 0
+}
+
+func (x *WriteNeedleBlobRequest) GetNeedleBlob() []byte {
+ if x != nil {
+ return x.NeedleBlob
+ }
+ return nil
+}
+
+type WriteNeedleBlobResponse struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+}
+
+func (x *WriteNeedleBlobResponse) Reset() {
+ *x = WriteNeedleBlobResponse{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_volume_server_proto_msgTypes[41]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *WriteNeedleBlobResponse) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*WriteNeedleBlobResponse) ProtoMessage() {}
+
+func (x *WriteNeedleBlobResponse) ProtoReflect() protoreflect.Message {
+ mi := &file_volume_server_proto_msgTypes[41]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use WriteNeedleBlobResponse.ProtoReflect.Descriptor instead.
+func (*WriteNeedleBlobResponse) Descriptor() ([]byte, []int) {
+ return file_volume_server_proto_rawDescGZIP(), []int{41}
+}
+
+type VolumeTailSenderRequest struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ VolumeId uint32 `protobuf:"varint,1,opt,name=volume_id,json=volumeId,proto3" json:"volume_id,omitempty"`
+ SinceNs uint64 `protobuf:"varint,2,opt,name=since_ns,json=sinceNs,proto3" json:"since_ns,omitempty"`
+ IdleTimeoutSeconds uint32 `protobuf:"varint,3,opt,name=idle_timeout_seconds,json=idleTimeoutSeconds,proto3" json:"idle_timeout_seconds,omitempty"`
+}
+
+func (x *VolumeTailSenderRequest) Reset() {
+ *x = VolumeTailSenderRequest{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_volume_server_proto_msgTypes[42]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *VolumeTailSenderRequest) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*VolumeTailSenderRequest) ProtoMessage() {}
+
+func (x *VolumeTailSenderRequest) ProtoReflect() protoreflect.Message {
+ mi := &file_volume_server_proto_msgTypes[42]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use VolumeTailSenderRequest.ProtoReflect.Descriptor instead.
+func (*VolumeTailSenderRequest) Descriptor() ([]byte, []int) {
+ return file_volume_server_proto_rawDescGZIP(), []int{42}
+}
+
+func (x *VolumeTailSenderRequest) GetVolumeId() uint32 {
+ if x != nil {
+ return x.VolumeId
+ }
+ return 0
+}
+
+func (x *VolumeTailSenderRequest) GetSinceNs() uint64 {
+ if x != nil {
+ return x.SinceNs
+ }
+ return 0
+}
+
+func (x *VolumeTailSenderRequest) GetIdleTimeoutSeconds() uint32 {
+ if x != nil {
+ return x.IdleTimeoutSeconds
+ }
+ return 0
+}
+
+type VolumeTailSenderResponse struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ NeedleHeader []byte `protobuf:"bytes,1,opt,name=needle_header,json=needleHeader,proto3" json:"needle_header,omitempty"`
+ NeedleBody []byte `protobuf:"bytes,2,opt,name=needle_body,json=needleBody,proto3" json:"needle_body,omitempty"`
+ IsLastChunk bool `protobuf:"varint,3,opt,name=is_last_chunk,json=isLastChunk,proto3" json:"is_last_chunk,omitempty"`
+}
+
+func (x *VolumeTailSenderResponse) Reset() {
+ *x = VolumeTailSenderResponse{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_volume_server_proto_msgTypes[43]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *VolumeTailSenderResponse) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*VolumeTailSenderResponse) ProtoMessage() {}
+
+func (x *VolumeTailSenderResponse) ProtoReflect() protoreflect.Message {
+ mi := &file_volume_server_proto_msgTypes[43]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use VolumeTailSenderResponse.ProtoReflect.Descriptor instead.
+func (*VolumeTailSenderResponse) Descriptor() ([]byte, []int) {
+ return file_volume_server_proto_rawDescGZIP(), []int{43}
+}
+
+func (x *VolumeTailSenderResponse) GetNeedleHeader() []byte {
+ if x != nil {
+ return x.NeedleHeader
+ }
+ return nil
+}
+
+func (x *VolumeTailSenderResponse) GetNeedleBody() []byte {
+ if x != nil {
+ return x.NeedleBody
+ }
+ return nil
+}
+
+func (x *VolumeTailSenderResponse) GetIsLastChunk() bool {
+ if x != nil {
+ return x.IsLastChunk
+ }
+ return false
+}
+
+type VolumeTailReceiverRequest struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ VolumeId uint32 `protobuf:"varint,1,opt,name=volume_id,json=volumeId,proto3" json:"volume_id,omitempty"`
+ SinceNs uint64 `protobuf:"varint,2,opt,name=since_ns,json=sinceNs,proto3" json:"since_ns,omitempty"`
+ IdleTimeoutSeconds uint32 `protobuf:"varint,3,opt,name=idle_timeout_seconds,json=idleTimeoutSeconds,proto3" json:"idle_timeout_seconds,omitempty"`
+ SourceVolumeServer string `protobuf:"bytes,4,opt,name=source_volume_server,json=sourceVolumeServer,proto3" json:"source_volume_server,omitempty"`
+}
+
+func (x *VolumeTailReceiverRequest) Reset() {
+ *x = VolumeTailReceiverRequest{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_volume_server_proto_msgTypes[44]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *VolumeTailReceiverRequest) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*VolumeTailReceiverRequest) ProtoMessage() {}
+
+func (x *VolumeTailReceiverRequest) ProtoReflect() protoreflect.Message {
+ mi := &file_volume_server_proto_msgTypes[44]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use VolumeTailReceiverRequest.ProtoReflect.Descriptor instead.
+func (*VolumeTailReceiverRequest) Descriptor() ([]byte, []int) {
+ return file_volume_server_proto_rawDescGZIP(), []int{44}
+}
+
+func (x *VolumeTailReceiverRequest) GetVolumeId() uint32 {
+ if x != nil {
+ return x.VolumeId
+ }
+ return 0
+}
+
+func (x *VolumeTailReceiverRequest) GetSinceNs() uint64 {
+ if x != nil {
+ return x.SinceNs
+ }
+ return 0
+}
+
+func (x *VolumeTailReceiverRequest) GetIdleTimeoutSeconds() uint32 {
+ if x != nil {
+ return x.IdleTimeoutSeconds
+ }
+ return 0
+}
+
+func (x *VolumeTailReceiverRequest) GetSourceVolumeServer() string {
+ if x != nil {
+ return x.SourceVolumeServer
+ }
+ return ""
+}
+
+type VolumeTailReceiverResponse struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+}
+
+func (x *VolumeTailReceiverResponse) Reset() {
+ *x = VolumeTailReceiverResponse{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_volume_server_proto_msgTypes[45]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *VolumeTailReceiverResponse) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*VolumeTailReceiverResponse) ProtoMessage() {}
+
+func (x *VolumeTailReceiverResponse) ProtoReflect() protoreflect.Message {
+ mi := &file_volume_server_proto_msgTypes[45]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use VolumeTailReceiverResponse.ProtoReflect.Descriptor instead.
+func (*VolumeTailReceiverResponse) Descriptor() ([]byte, []int) {
+ return file_volume_server_proto_rawDescGZIP(), []int{45}
+}
+
+type VolumeEcShardsGenerateRequest struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ VolumeId uint32 `protobuf:"varint,1,opt,name=volume_id,json=volumeId,proto3" json:"volume_id,omitempty"`
+ Collection string `protobuf:"bytes,2,opt,name=collection,proto3" json:"collection,omitempty"`
+}
+
+func (x *VolumeEcShardsGenerateRequest) Reset() {
+ *x = VolumeEcShardsGenerateRequest{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_volume_server_proto_msgTypes[46]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *VolumeEcShardsGenerateRequest) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*VolumeEcShardsGenerateRequest) ProtoMessage() {}
+
+func (x *VolumeEcShardsGenerateRequest) ProtoReflect() protoreflect.Message {
+ mi := &file_volume_server_proto_msgTypes[46]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use VolumeEcShardsGenerateRequest.ProtoReflect.Descriptor instead.
+func (*VolumeEcShardsGenerateRequest) Descriptor() ([]byte, []int) {
+ return file_volume_server_proto_rawDescGZIP(), []int{46}
+}
+
+func (x *VolumeEcShardsGenerateRequest) GetVolumeId() uint32 {
+ if x != nil {
+ return x.VolumeId
+ }
+ return 0
+}
+
+func (x *VolumeEcShardsGenerateRequest) GetCollection() string {
+ if x != nil {
+ return x.Collection
+ }
+ return ""
+}
+
+type VolumeEcShardsGenerateResponse struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+}
+
+func (x *VolumeEcShardsGenerateResponse) Reset() {
+ *x = VolumeEcShardsGenerateResponse{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_volume_server_proto_msgTypes[47]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *VolumeEcShardsGenerateResponse) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*VolumeEcShardsGenerateResponse) ProtoMessage() {}
+
+func (x *VolumeEcShardsGenerateResponse) ProtoReflect() protoreflect.Message {
+ mi := &file_volume_server_proto_msgTypes[47]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use VolumeEcShardsGenerateResponse.ProtoReflect.Descriptor instead.
+func (*VolumeEcShardsGenerateResponse) Descriptor() ([]byte, []int) {
+ return file_volume_server_proto_rawDescGZIP(), []int{47}
+}
+
+type VolumeEcShardsRebuildRequest struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ VolumeId uint32 `protobuf:"varint,1,opt,name=volume_id,json=volumeId,proto3" json:"volume_id,omitempty"`
+ Collection string `protobuf:"bytes,2,opt,name=collection,proto3" json:"collection,omitempty"`
+}
+
+func (x *VolumeEcShardsRebuildRequest) Reset() {
+ *x = VolumeEcShardsRebuildRequest{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_volume_server_proto_msgTypes[48]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *VolumeEcShardsRebuildRequest) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*VolumeEcShardsRebuildRequest) ProtoMessage() {}
+
+func (x *VolumeEcShardsRebuildRequest) ProtoReflect() protoreflect.Message {
+ mi := &file_volume_server_proto_msgTypes[48]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use VolumeEcShardsRebuildRequest.ProtoReflect.Descriptor instead.
+func (*VolumeEcShardsRebuildRequest) Descriptor() ([]byte, []int) {
+ return file_volume_server_proto_rawDescGZIP(), []int{48}
+}
+
+func (x *VolumeEcShardsRebuildRequest) GetVolumeId() uint32 {
+ if x != nil {
+ return x.VolumeId
+ }
+ return 0
+}
+
+func (x *VolumeEcShardsRebuildRequest) GetCollection() string {
+ if x != nil {
+ return x.Collection
+ }
+ return ""
+}
+
+type VolumeEcShardsRebuildResponse struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ RebuiltShardIds []uint32 `protobuf:"varint,1,rep,packed,name=rebuilt_shard_ids,json=rebuiltShardIds,proto3" json:"rebuilt_shard_ids,omitempty"`
+}
+
+func (x *VolumeEcShardsRebuildResponse) Reset() {
+ *x = VolumeEcShardsRebuildResponse{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_volume_server_proto_msgTypes[49]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *VolumeEcShardsRebuildResponse) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*VolumeEcShardsRebuildResponse) ProtoMessage() {}
+
+func (x *VolumeEcShardsRebuildResponse) ProtoReflect() protoreflect.Message {
+ mi := &file_volume_server_proto_msgTypes[49]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use VolumeEcShardsRebuildResponse.ProtoReflect.Descriptor instead.
+func (*VolumeEcShardsRebuildResponse) Descriptor() ([]byte, []int) {
+ return file_volume_server_proto_rawDescGZIP(), []int{49}
+}
+
+func (x *VolumeEcShardsRebuildResponse) GetRebuiltShardIds() []uint32 {
+ if x != nil {
+ return x.RebuiltShardIds
+ }
+ return nil
+}
+
+type VolumeEcShardsCopyRequest struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ VolumeId uint32 `protobuf:"varint,1,opt,name=volume_id,json=volumeId,proto3" json:"volume_id,omitempty"`
+ Collection string `protobuf:"bytes,2,opt,name=collection,proto3" json:"collection,omitempty"`
+ ShardIds []uint32 `protobuf:"varint,3,rep,packed,name=shard_ids,json=shardIds,proto3" json:"shard_ids,omitempty"`
+ CopyEcxFile bool `protobuf:"varint,4,opt,name=copy_ecx_file,json=copyEcxFile,proto3" json:"copy_ecx_file,omitempty"`
+ SourceDataNode string `protobuf:"bytes,5,opt,name=source_data_node,json=sourceDataNode,proto3" json:"source_data_node,omitempty"`
+ CopyEcjFile bool `protobuf:"varint,6,opt,name=copy_ecj_file,json=copyEcjFile,proto3" json:"copy_ecj_file,omitempty"`
+ CopyVifFile bool `protobuf:"varint,7,opt,name=copy_vif_file,json=copyVifFile,proto3" json:"copy_vif_file,omitempty"`
+}
+
+func (x *VolumeEcShardsCopyRequest) Reset() {
+ *x = VolumeEcShardsCopyRequest{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_volume_server_proto_msgTypes[50]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *VolumeEcShardsCopyRequest) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*VolumeEcShardsCopyRequest) ProtoMessage() {}
+
+func (x *VolumeEcShardsCopyRequest) ProtoReflect() protoreflect.Message {
+ mi := &file_volume_server_proto_msgTypes[50]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use VolumeEcShardsCopyRequest.ProtoReflect.Descriptor instead.
+func (*VolumeEcShardsCopyRequest) Descriptor() ([]byte, []int) {
+ return file_volume_server_proto_rawDescGZIP(), []int{50}
+}
+
+func (x *VolumeEcShardsCopyRequest) GetVolumeId() uint32 {
+ if x != nil {
+ return x.VolumeId
+ }
+ return 0
+}
+
+func (x *VolumeEcShardsCopyRequest) GetCollection() string {
+ if x != nil {
+ return x.Collection
+ }
+ return ""
+}
+
+func (x *VolumeEcShardsCopyRequest) GetShardIds() []uint32 {
+ if x != nil {
+ return x.ShardIds
+ }
+ return nil
+}
+
+func (x *VolumeEcShardsCopyRequest) GetCopyEcxFile() bool {
+ if x != nil {
+ return x.CopyEcxFile
+ }
+ return false
+}
+
+func (x *VolumeEcShardsCopyRequest) GetSourceDataNode() string {
+ if x != nil {
+ return x.SourceDataNode
+ }
+ return ""
+}
+
+func (x *VolumeEcShardsCopyRequest) GetCopyEcjFile() bool {
+ if x != nil {
+ return x.CopyEcjFile
+ }
+ return false
+}
+
+func (x *VolumeEcShardsCopyRequest) GetCopyVifFile() bool {
+ if x != nil {
+ return x.CopyVifFile
+ }
+ return false
+}
+
+type VolumeEcShardsCopyResponse struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+}
+
+func (x *VolumeEcShardsCopyResponse) Reset() {
+ *x = VolumeEcShardsCopyResponse{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_volume_server_proto_msgTypes[51]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *VolumeEcShardsCopyResponse) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*VolumeEcShardsCopyResponse) ProtoMessage() {}
+
+func (x *VolumeEcShardsCopyResponse) ProtoReflect() protoreflect.Message {
+ mi := &file_volume_server_proto_msgTypes[51]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use VolumeEcShardsCopyResponse.ProtoReflect.Descriptor instead.
+func (*VolumeEcShardsCopyResponse) Descriptor() ([]byte, []int) {
+ return file_volume_server_proto_rawDescGZIP(), []int{51}
+}
+
+type VolumeEcShardsDeleteRequest struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ VolumeId uint32 `protobuf:"varint,1,opt,name=volume_id,json=volumeId,proto3" json:"volume_id,omitempty"`
+ Collection string `protobuf:"bytes,2,opt,name=collection,proto3" json:"collection,omitempty"`
+ ShardIds []uint32 `protobuf:"varint,3,rep,packed,name=shard_ids,json=shardIds,proto3" json:"shard_ids,omitempty"`
+}
+
+func (x *VolumeEcShardsDeleteRequest) Reset() {
+ *x = VolumeEcShardsDeleteRequest{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_volume_server_proto_msgTypes[52]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *VolumeEcShardsDeleteRequest) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*VolumeEcShardsDeleteRequest) ProtoMessage() {}
+
+func (x *VolumeEcShardsDeleteRequest) ProtoReflect() protoreflect.Message {
+ mi := &file_volume_server_proto_msgTypes[52]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use VolumeEcShardsDeleteRequest.ProtoReflect.Descriptor instead.
+func (*VolumeEcShardsDeleteRequest) Descriptor() ([]byte, []int) {
+ return file_volume_server_proto_rawDescGZIP(), []int{52}
+}
+
+func (x *VolumeEcShardsDeleteRequest) GetVolumeId() uint32 {
+ if x != nil {
+ return x.VolumeId
+ }
+ return 0
+}
+
+func (x *VolumeEcShardsDeleteRequest) GetCollection() string {
+ if x != nil {
+ return x.Collection
+ }
+ return ""
+}
+
+func (x *VolumeEcShardsDeleteRequest) GetShardIds() []uint32 {
+ if x != nil {
+ return x.ShardIds
+ }
+ return nil
+}
+
+type VolumeEcShardsDeleteResponse struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+}
+
+func (x *VolumeEcShardsDeleteResponse) Reset() {
+ *x = VolumeEcShardsDeleteResponse{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_volume_server_proto_msgTypes[53]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *VolumeEcShardsDeleteResponse) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*VolumeEcShardsDeleteResponse) ProtoMessage() {}
+
+func (x *VolumeEcShardsDeleteResponse) ProtoReflect() protoreflect.Message {
+ mi := &file_volume_server_proto_msgTypes[53]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use VolumeEcShardsDeleteResponse.ProtoReflect.Descriptor instead.
+func (*VolumeEcShardsDeleteResponse) Descriptor() ([]byte, []int) {
+ return file_volume_server_proto_rawDescGZIP(), []int{53}
+}
+
+type VolumeEcShardsMountRequest struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ VolumeId uint32 `protobuf:"varint,1,opt,name=volume_id,json=volumeId,proto3" json:"volume_id,omitempty"`
+ Collection string `protobuf:"bytes,2,opt,name=collection,proto3" json:"collection,omitempty"`
+ ShardIds []uint32 `protobuf:"varint,3,rep,packed,name=shard_ids,json=shardIds,proto3" json:"shard_ids,omitempty"`
+}
+
+func (x *VolumeEcShardsMountRequest) Reset() {
+ *x = VolumeEcShardsMountRequest{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_volume_server_proto_msgTypes[54]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *VolumeEcShardsMountRequest) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*VolumeEcShardsMountRequest) ProtoMessage() {}
+
+func (x *VolumeEcShardsMountRequest) ProtoReflect() protoreflect.Message {
+ mi := &file_volume_server_proto_msgTypes[54]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use VolumeEcShardsMountRequest.ProtoReflect.Descriptor instead.
+func (*VolumeEcShardsMountRequest) Descriptor() ([]byte, []int) {
+ return file_volume_server_proto_rawDescGZIP(), []int{54}
+}
+
+func (x *VolumeEcShardsMountRequest) GetVolumeId() uint32 {
+ if x != nil {
+ return x.VolumeId
+ }
+ return 0
+}
+
+func (x *VolumeEcShardsMountRequest) GetCollection() string {
+ if x != nil {
+ return x.Collection
+ }
+ return ""
+}
+
+func (x *VolumeEcShardsMountRequest) GetShardIds() []uint32 {
+ if x != nil {
+ return x.ShardIds
+ }
+ return nil
+}
+
+type VolumeEcShardsMountResponse struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+}
+
+func (x *VolumeEcShardsMountResponse) Reset() {
+ *x = VolumeEcShardsMountResponse{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_volume_server_proto_msgTypes[55]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *VolumeEcShardsMountResponse) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*VolumeEcShardsMountResponse) ProtoMessage() {}
+
+func (x *VolumeEcShardsMountResponse) ProtoReflect() protoreflect.Message {
+ mi := &file_volume_server_proto_msgTypes[55]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use VolumeEcShardsMountResponse.ProtoReflect.Descriptor instead.
+func (*VolumeEcShardsMountResponse) Descriptor() ([]byte, []int) {
+ return file_volume_server_proto_rawDescGZIP(), []int{55}
+}
+
+type VolumeEcShardsUnmountRequest struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ VolumeId uint32 `protobuf:"varint,1,opt,name=volume_id,json=volumeId,proto3" json:"volume_id,omitempty"`
+ ShardIds []uint32 `protobuf:"varint,3,rep,packed,name=shard_ids,json=shardIds,proto3" json:"shard_ids,omitempty"`
+}
+
+func (x *VolumeEcShardsUnmountRequest) Reset() {
+ *x = VolumeEcShardsUnmountRequest{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_volume_server_proto_msgTypes[56]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *VolumeEcShardsUnmountRequest) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*VolumeEcShardsUnmountRequest) ProtoMessage() {}
+
+func (x *VolumeEcShardsUnmountRequest) ProtoReflect() protoreflect.Message {
+ mi := &file_volume_server_proto_msgTypes[56]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use VolumeEcShardsUnmountRequest.ProtoReflect.Descriptor instead.
+func (*VolumeEcShardsUnmountRequest) Descriptor() ([]byte, []int) {
+ return file_volume_server_proto_rawDescGZIP(), []int{56}
+}
+
+func (x *VolumeEcShardsUnmountRequest) GetVolumeId() uint32 {
+ if x != nil {
+ return x.VolumeId
+ }
+ return 0
+}
+
+func (x *VolumeEcShardsUnmountRequest) GetShardIds() []uint32 {
+ if x != nil {
+ return x.ShardIds
+ }
+ return nil
+}
+
+type VolumeEcShardsUnmountResponse struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+}
+
+func (x *VolumeEcShardsUnmountResponse) Reset() {
+ *x = VolumeEcShardsUnmountResponse{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_volume_server_proto_msgTypes[57]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *VolumeEcShardsUnmountResponse) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*VolumeEcShardsUnmountResponse) ProtoMessage() {}
+
+func (x *VolumeEcShardsUnmountResponse) ProtoReflect() protoreflect.Message {
+ mi := &file_volume_server_proto_msgTypes[57]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use VolumeEcShardsUnmountResponse.ProtoReflect.Descriptor instead.
+func (*VolumeEcShardsUnmountResponse) Descriptor() ([]byte, []int) {
+ return file_volume_server_proto_rawDescGZIP(), []int{57}
+}
+
+type VolumeEcShardReadRequest struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ VolumeId uint32 `protobuf:"varint,1,opt,name=volume_id,json=volumeId,proto3" json:"volume_id,omitempty"`
+ ShardId uint32 `protobuf:"varint,2,opt,name=shard_id,json=shardId,proto3" json:"shard_id,omitempty"`
+ Offset int64 `protobuf:"varint,3,opt,name=offset,proto3" json:"offset,omitempty"`
+ Size int64 `protobuf:"varint,4,opt,name=size,proto3" json:"size,omitempty"`
+ FileKey uint64 `protobuf:"varint,5,opt,name=file_key,json=fileKey,proto3" json:"file_key,omitempty"`
+}
+
+func (x *VolumeEcShardReadRequest) Reset() {
+ *x = VolumeEcShardReadRequest{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_volume_server_proto_msgTypes[58]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *VolumeEcShardReadRequest) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*VolumeEcShardReadRequest) ProtoMessage() {}
+
+func (x *VolumeEcShardReadRequest) ProtoReflect() protoreflect.Message {
+ mi := &file_volume_server_proto_msgTypes[58]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use VolumeEcShardReadRequest.ProtoReflect.Descriptor instead.
+func (*VolumeEcShardReadRequest) Descriptor() ([]byte, []int) {
+ return file_volume_server_proto_rawDescGZIP(), []int{58}
+}
+
+func (x *VolumeEcShardReadRequest) GetVolumeId() uint32 {
+ if x != nil {
+ return x.VolumeId
+ }
+ return 0
+}
+
+func (x *VolumeEcShardReadRequest) GetShardId() uint32 {
+ if x != nil {
+ return x.ShardId
+ }
+ return 0
+}
+
+func (x *VolumeEcShardReadRequest) GetOffset() int64 {
+ if x != nil {
+ return x.Offset
+ }
+ return 0
+}
+
+func (x *VolumeEcShardReadRequest) GetSize() int64 {
+ if x != nil {
+ return x.Size
+ }
+ return 0
+}
+
+func (x *VolumeEcShardReadRequest) GetFileKey() uint64 {
+ if x != nil {
+ return x.FileKey
+ }
+ return 0
+}
+
+type VolumeEcShardReadResponse struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ Data []byte `protobuf:"bytes,1,opt,name=data,proto3" json:"data,omitempty"`
+ IsDeleted bool `protobuf:"varint,2,opt,name=is_deleted,json=isDeleted,proto3" json:"is_deleted,omitempty"`
+}
+
+func (x *VolumeEcShardReadResponse) Reset() {
+ *x = VolumeEcShardReadResponse{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_volume_server_proto_msgTypes[59]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *VolumeEcShardReadResponse) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*VolumeEcShardReadResponse) ProtoMessage() {}
+
+func (x *VolumeEcShardReadResponse) ProtoReflect() protoreflect.Message {
+ mi := &file_volume_server_proto_msgTypes[59]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use VolumeEcShardReadResponse.ProtoReflect.Descriptor instead.
+func (*VolumeEcShardReadResponse) Descriptor() ([]byte, []int) {
+ return file_volume_server_proto_rawDescGZIP(), []int{59}
+}
+
+func (x *VolumeEcShardReadResponse) GetData() []byte {
+ if x != nil {
+ return x.Data
}
return nil
}
-func (m *VolumeTailSenderResponse) GetIsLastChunk() bool {
- if m != nil {
- return m.IsLastChunk
+func (x *VolumeEcShardReadResponse) GetIsDeleted() bool {
+ if x != nil {
+ return x.IsDeleted
+ }
+ return false
+}
+
+type VolumeEcBlobDeleteRequest struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ VolumeId uint32 `protobuf:"varint,1,opt,name=volume_id,json=volumeId,proto3" json:"volume_id,omitempty"`
+ Collection string `protobuf:"bytes,2,opt,name=collection,proto3" json:"collection,omitempty"`
+ FileKey uint64 `protobuf:"varint,3,opt,name=file_key,json=fileKey,proto3" json:"file_key,omitempty"`
+ Version uint32 `protobuf:"varint,4,opt,name=version,proto3" json:"version,omitempty"`
+}
+
+func (x *VolumeEcBlobDeleteRequest) Reset() {
+ *x = VolumeEcBlobDeleteRequest{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_volume_server_proto_msgTypes[60]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *VolumeEcBlobDeleteRequest) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*VolumeEcBlobDeleteRequest) ProtoMessage() {}
+
+func (x *VolumeEcBlobDeleteRequest) ProtoReflect() protoreflect.Message {
+ mi := &file_volume_server_proto_msgTypes[60]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use VolumeEcBlobDeleteRequest.ProtoReflect.Descriptor instead.
+func (*VolumeEcBlobDeleteRequest) Descriptor() ([]byte, []int) {
+ return file_volume_server_proto_rawDescGZIP(), []int{60}
+}
+
+func (x *VolumeEcBlobDeleteRequest) GetVolumeId() uint32 {
+ if x != nil {
+ return x.VolumeId
+ }
+ return 0
+}
+
+func (x *VolumeEcBlobDeleteRequest) GetCollection() string {
+ if x != nil {
+ return x.Collection
+ }
+ return ""
+}
+
+func (x *VolumeEcBlobDeleteRequest) GetFileKey() uint64 {
+ if x != nil {
+ return x.FileKey
+ }
+ return 0
+}
+
+func (x *VolumeEcBlobDeleteRequest) GetVersion() uint32 {
+ if x != nil {
+ return x.Version
+ }
+ return 0
+}
+
+type VolumeEcBlobDeleteResponse struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+}
+
+func (x *VolumeEcBlobDeleteResponse) Reset() {
+ *x = VolumeEcBlobDeleteResponse{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_volume_server_proto_msgTypes[61]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *VolumeEcBlobDeleteResponse) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*VolumeEcBlobDeleteResponse) ProtoMessage() {}
+
+func (x *VolumeEcBlobDeleteResponse) ProtoReflect() protoreflect.Message {
+ mi := &file_volume_server_proto_msgTypes[61]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use VolumeEcBlobDeleteResponse.ProtoReflect.Descriptor instead.
+func (*VolumeEcBlobDeleteResponse) Descriptor() ([]byte, []int) {
+ return file_volume_server_proto_rawDescGZIP(), []int{61}
+}
+
+type VolumeEcShardsToVolumeRequest struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ VolumeId uint32 `protobuf:"varint,1,opt,name=volume_id,json=volumeId,proto3" json:"volume_id,omitempty"`
+ Collection string `protobuf:"bytes,2,opt,name=collection,proto3" json:"collection,omitempty"`
+}
+
+func (x *VolumeEcShardsToVolumeRequest) Reset() {
+ *x = VolumeEcShardsToVolumeRequest{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_volume_server_proto_msgTypes[62]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *VolumeEcShardsToVolumeRequest) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*VolumeEcShardsToVolumeRequest) ProtoMessage() {}
+
+func (x *VolumeEcShardsToVolumeRequest) ProtoReflect() protoreflect.Message {
+ mi := &file_volume_server_proto_msgTypes[62]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use VolumeEcShardsToVolumeRequest.ProtoReflect.Descriptor instead.
+func (*VolumeEcShardsToVolumeRequest) Descriptor() ([]byte, []int) {
+ return file_volume_server_proto_rawDescGZIP(), []int{62}
+}
+
+func (x *VolumeEcShardsToVolumeRequest) GetVolumeId() uint32 {
+ if x != nil {
+ return x.VolumeId
+ }
+ return 0
+}
+
+func (x *VolumeEcShardsToVolumeRequest) GetCollection() string {
+ if x != nil {
+ return x.Collection
+ }
+ return ""
+}
+
+type VolumeEcShardsToVolumeResponse struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+}
+
+func (x *VolumeEcShardsToVolumeResponse) Reset() {
+ *x = VolumeEcShardsToVolumeResponse{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_volume_server_proto_msgTypes[63]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *VolumeEcShardsToVolumeResponse) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*VolumeEcShardsToVolumeResponse) ProtoMessage() {}
+
+func (x *VolumeEcShardsToVolumeResponse) ProtoReflect() protoreflect.Message {
+ mi := &file_volume_server_proto_msgTypes[63]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use VolumeEcShardsToVolumeResponse.ProtoReflect.Descriptor instead.
+func (*VolumeEcShardsToVolumeResponse) Descriptor() ([]byte, []int) {
+ return file_volume_server_proto_rawDescGZIP(), []int{63}
+}
+
+type ReadVolumeFileStatusRequest struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ VolumeId uint32 `protobuf:"varint,1,opt,name=volume_id,json=volumeId,proto3" json:"volume_id,omitempty"`
+}
+
+func (x *ReadVolumeFileStatusRequest) Reset() {
+ *x = ReadVolumeFileStatusRequest{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_volume_server_proto_msgTypes[64]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *ReadVolumeFileStatusRequest) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*ReadVolumeFileStatusRequest) ProtoMessage() {}
+
+func (x *ReadVolumeFileStatusRequest) ProtoReflect() protoreflect.Message {
+ mi := &file_volume_server_proto_msgTypes[64]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use ReadVolumeFileStatusRequest.ProtoReflect.Descriptor instead.
+func (*ReadVolumeFileStatusRequest) Descriptor() ([]byte, []int) {
+ return file_volume_server_proto_rawDescGZIP(), []int{64}
+}
+
+func (x *ReadVolumeFileStatusRequest) GetVolumeId() uint32 {
+ if x != nil {
+ return x.VolumeId
+ }
+ return 0
+}
+
+type ReadVolumeFileStatusResponse struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ VolumeId uint32 `protobuf:"varint,1,opt,name=volume_id,json=volumeId,proto3" json:"volume_id,omitempty"`
+ IdxFileTimestampSeconds uint64 `protobuf:"varint,2,opt,name=idx_file_timestamp_seconds,json=idxFileTimestampSeconds,proto3" json:"idx_file_timestamp_seconds,omitempty"`
+ IdxFileSize uint64 `protobuf:"varint,3,opt,name=idx_file_size,json=idxFileSize,proto3" json:"idx_file_size,omitempty"`
+ DatFileTimestampSeconds uint64 `protobuf:"varint,4,opt,name=dat_file_timestamp_seconds,json=datFileTimestampSeconds,proto3" json:"dat_file_timestamp_seconds,omitempty"`
+ DatFileSize uint64 `protobuf:"varint,5,opt,name=dat_file_size,json=datFileSize,proto3" json:"dat_file_size,omitempty"`
+ FileCount uint64 `protobuf:"varint,6,opt,name=file_count,json=fileCount,proto3" json:"file_count,omitempty"`
+ CompactionRevision uint32 `protobuf:"varint,7,opt,name=compaction_revision,json=compactionRevision,proto3" json:"compaction_revision,omitempty"`
+ Collection string `protobuf:"bytes,8,opt,name=collection,proto3" json:"collection,omitempty"`
+ DiskType string `protobuf:"bytes,9,opt,name=disk_type,json=diskType,proto3" json:"disk_type,omitempty"`
+}
+
+func (x *ReadVolumeFileStatusResponse) Reset() {
+ *x = ReadVolumeFileStatusResponse{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_volume_server_proto_msgTypes[65]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *ReadVolumeFileStatusResponse) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*ReadVolumeFileStatusResponse) ProtoMessage() {}
+
+func (x *ReadVolumeFileStatusResponse) ProtoReflect() protoreflect.Message {
+ mi := &file_volume_server_proto_msgTypes[65]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use ReadVolumeFileStatusResponse.ProtoReflect.Descriptor instead.
+func (*ReadVolumeFileStatusResponse) Descriptor() ([]byte, []int) {
+ return file_volume_server_proto_rawDescGZIP(), []int{65}
+}
+
+func (x *ReadVolumeFileStatusResponse) GetVolumeId() uint32 {
+ if x != nil {
+ return x.VolumeId
+ }
+ return 0
+}
+
+func (x *ReadVolumeFileStatusResponse) GetIdxFileTimestampSeconds() uint64 {
+ if x != nil {
+ return x.IdxFileTimestampSeconds
+ }
+ return 0
+}
+
+func (x *ReadVolumeFileStatusResponse) GetIdxFileSize() uint64 {
+ if x != nil {
+ return x.IdxFileSize
+ }
+ return 0
+}
+
+func (x *ReadVolumeFileStatusResponse) GetDatFileTimestampSeconds() uint64 {
+ if x != nil {
+ return x.DatFileTimestampSeconds
+ }
+ return 0
+}
+
+func (x *ReadVolumeFileStatusResponse) GetDatFileSize() uint64 {
+ if x != nil {
+ return x.DatFileSize
+ }
+ return 0
+}
+
+func (x *ReadVolumeFileStatusResponse) GetFileCount() uint64 {
+ if x != nil {
+ return x.FileCount
+ }
+ return 0
+}
+
+func (x *ReadVolumeFileStatusResponse) GetCompactionRevision() uint32 {
+ if x != nil {
+ return x.CompactionRevision
+ }
+ return 0
+}
+
+func (x *ReadVolumeFileStatusResponse) GetCollection() string {
+ if x != nil {
+ return x.Collection
+ }
+ return ""
+}
+
+func (x *ReadVolumeFileStatusResponse) GetDiskType() string {
+ if x != nil {
+ return x.DiskType
+ }
+ return ""
+}
+
+type DiskStatus struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ Dir string `protobuf:"bytes,1,opt,name=dir,proto3" json:"dir,omitempty"`
+ All uint64 `protobuf:"varint,2,opt,name=all,proto3" json:"all,omitempty"`
+ Used uint64 `protobuf:"varint,3,opt,name=used,proto3" json:"used,omitempty"`
+ Free uint64 `protobuf:"varint,4,opt,name=free,proto3" json:"free,omitempty"`
+ PercentFree float32 `protobuf:"fixed32,5,opt,name=percent_free,json=percentFree,proto3" json:"percent_free,omitempty"`
+ PercentUsed float32 `protobuf:"fixed32,6,opt,name=percent_used,json=percentUsed,proto3" json:"percent_used,omitempty"`
+ DiskType string `protobuf:"bytes,7,opt,name=disk_type,json=diskType,proto3" json:"disk_type,omitempty"`
+}
+
+func (x *DiskStatus) Reset() {
+ *x = DiskStatus{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_volume_server_proto_msgTypes[66]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *DiskStatus) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*DiskStatus) ProtoMessage() {}
+
+func (x *DiskStatus) ProtoReflect() protoreflect.Message {
+ mi := &file_volume_server_proto_msgTypes[66]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use DiskStatus.ProtoReflect.Descriptor instead.
+func (*DiskStatus) Descriptor() ([]byte, []int) {
+ return file_volume_server_proto_rawDescGZIP(), []int{66}
+}
+
+func (x *DiskStatus) GetDir() string {
+ if x != nil {
+ return x.Dir
+ }
+ return ""
+}
+
+func (x *DiskStatus) GetAll() uint64 {
+ if x != nil {
+ return x.All
+ }
+ return 0
+}
+
+func (x *DiskStatus) GetUsed() uint64 {
+ if x != nil {
+ return x.Used
+ }
+ return 0
+}
+
+func (x *DiskStatus) GetFree() uint64 {
+ if x != nil {
+ return x.Free
+ }
+ return 0
+}
+
+func (x *DiskStatus) GetPercentFree() float32 {
+ if x != nil {
+ return x.PercentFree
+ }
+ return 0
+}
+
+func (x *DiskStatus) GetPercentUsed() float32 {
+ if x != nil {
+ return x.PercentUsed
+ }
+ return 0
+}
+
+func (x *DiskStatus) GetDiskType() string {
+ if x != nil {
+ return x.DiskType
+ }
+ return ""
+}
+
+type MemStatus struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ Goroutines int32 `protobuf:"varint,1,opt,name=goroutines,proto3" json:"goroutines,omitempty"`
+ All uint64 `protobuf:"varint,2,opt,name=all,proto3" json:"all,omitempty"`
+ Used uint64 `protobuf:"varint,3,opt,name=used,proto3" json:"used,omitempty"`
+ Free uint64 `protobuf:"varint,4,opt,name=free,proto3" json:"free,omitempty"`
+ Self uint64 `protobuf:"varint,5,opt,name=self,proto3" json:"self,omitempty"`
+ Heap uint64 `protobuf:"varint,6,opt,name=heap,proto3" json:"heap,omitempty"`
+ Stack uint64 `protobuf:"varint,7,opt,name=stack,proto3" json:"stack,omitempty"`
+}
+
+func (x *MemStatus) Reset() {
+ *x = MemStatus{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_volume_server_proto_msgTypes[67]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *MemStatus) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*MemStatus) ProtoMessage() {}
+
+func (x *MemStatus) ProtoReflect() protoreflect.Message {
+ mi := &file_volume_server_proto_msgTypes[67]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use MemStatus.ProtoReflect.Descriptor instead.
+func (*MemStatus) Descriptor() ([]byte, []int) {
+ return file_volume_server_proto_rawDescGZIP(), []int{67}
+}
+
+func (x *MemStatus) GetGoroutines() int32 {
+ if x != nil {
+ return x.Goroutines
+ }
+ return 0
+}
+
+func (x *MemStatus) GetAll() uint64 {
+ if x != nil {
+ return x.All
+ }
+ return 0
+}
+
+func (x *MemStatus) GetUsed() uint64 {
+ if x != nil {
+ return x.Used
+ }
+ return 0
+}
+
+func (x *MemStatus) GetFree() uint64 {
+ if x != nil {
+ return x.Free
+ }
+ return 0
+}
+
+func (x *MemStatus) GetSelf() uint64 {
+ if x != nil {
+ return x.Self
+ }
+ return 0
+}
+
+func (x *MemStatus) GetHeap() uint64 {
+ if x != nil {
+ return x.Heap
+ }
+ return 0
+}
+
+func (x *MemStatus) GetStack() uint64 {
+ if x != nil {
+ return x.Stack
+ }
+ return 0
+}
+
+// tired storage on volume servers
+type RemoteFile struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ BackendType string `protobuf:"bytes,1,opt,name=backend_type,json=backendType,proto3" json:"backend_type,omitempty"`
+ BackendId string `protobuf:"bytes,2,opt,name=backend_id,json=backendId,proto3" json:"backend_id,omitempty"`
+ Key string `protobuf:"bytes,3,opt,name=key,proto3" json:"key,omitempty"`
+ Offset uint64 `protobuf:"varint,4,opt,name=offset,proto3" json:"offset,omitempty"`
+ FileSize uint64 `protobuf:"varint,5,opt,name=file_size,json=fileSize,proto3" json:"file_size,omitempty"`
+ ModifiedTime uint64 `protobuf:"varint,6,opt,name=modified_time,json=modifiedTime,proto3" json:"modified_time,omitempty"`
+ Extension string `protobuf:"bytes,7,opt,name=extension,proto3" json:"extension,omitempty"`
+}
+
+func (x *RemoteFile) Reset() {
+ *x = RemoteFile{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_volume_server_proto_msgTypes[68]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *RemoteFile) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*RemoteFile) ProtoMessage() {}
+
+func (x *RemoteFile) ProtoReflect() protoreflect.Message {
+ mi := &file_volume_server_proto_msgTypes[68]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use RemoteFile.ProtoReflect.Descriptor instead.
+func (*RemoteFile) Descriptor() ([]byte, []int) {
+ return file_volume_server_proto_rawDescGZIP(), []int{68}
+}
+
+func (x *RemoteFile) GetBackendType() string {
+ if x != nil {
+ return x.BackendType
+ }
+ return ""
+}
+
+func (x *RemoteFile) GetBackendId() string {
+ if x != nil {
+ return x.BackendId
}
- return false
+ return ""
}
-type VolumeTailReceiverRequest struct {
- VolumeId uint32 `protobuf:"varint,1,opt,name=volume_id,json=volumeId" json:"volume_id,omitempty"`
- SinceNs uint64 `protobuf:"varint,2,opt,name=since_ns,json=sinceNs" json:"since_ns,omitempty"`
- IdleTimeoutSeconds uint32 `protobuf:"varint,3,opt,name=idle_timeout_seconds,json=idleTimeoutSeconds" json:"idle_timeout_seconds,omitempty"`
- SourceVolumeServer string `protobuf:"bytes,4,opt,name=source_volume_server,json=sourceVolumeServer" json:"source_volume_server,omitempty"`
+func (x *RemoteFile) GetKey() string {
+ if x != nil {
+ return x.Key
+ }
+ return ""
}
-func (m *VolumeTailReceiverRequest) Reset() { *m = VolumeTailReceiverRequest{} }
-func (m *VolumeTailReceiverRequest) String() string { return proto.CompactTextString(m) }
-func (*VolumeTailReceiverRequest) ProtoMessage() {}
-func (*VolumeTailReceiverRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{34} }
-
-func (m *VolumeTailReceiverRequest) GetVolumeId() uint32 {
- if m != nil {
- return m.VolumeId
+func (x *RemoteFile) GetOffset() uint64 {
+ if x != nil {
+ return x.Offset
}
return 0
}
-func (m *VolumeTailReceiverRequest) GetSinceNs() uint64 {
- if m != nil {
- return m.SinceNs
+func (x *RemoteFile) GetFileSize() uint64 {
+ if x != nil {
+ return x.FileSize
}
return 0
}
-func (m *VolumeTailReceiverRequest) GetIdleTimeoutSeconds() uint32 {
- if m != nil {
- return m.IdleTimeoutSeconds
+func (x *RemoteFile) GetModifiedTime() uint64 {
+ if x != nil {
+ return x.ModifiedTime
}
return 0
}
-func (m *VolumeTailReceiverRequest) GetSourceVolumeServer() string {
- if m != nil {
- return m.SourceVolumeServer
+func (x *RemoteFile) GetExtension() string {
+ if x != nil {
+ return x.Extension
}
return ""
}
-type VolumeTailReceiverResponse struct {
-}
-
-func (m *VolumeTailReceiverResponse) Reset() { *m = VolumeTailReceiverResponse{} }
-func (m *VolumeTailReceiverResponse) String() string { return proto.CompactTextString(m) }
-func (*VolumeTailReceiverResponse) ProtoMessage() {}
-func (*VolumeTailReceiverResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{35} }
+type VolumeInfo struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
-type VolumeEcShardsGenerateRequest struct {
- VolumeId uint32 `protobuf:"varint,1,opt,name=volume_id,json=volumeId" json:"volume_id,omitempty"`
- Collection string `protobuf:"bytes,2,opt,name=collection" json:"collection,omitempty"`
+ Files []*RemoteFile `protobuf:"bytes,1,rep,name=files,proto3" json:"files,omitempty"`
+ Version uint32 `protobuf:"varint,2,opt,name=version,proto3" json:"version,omitempty"`
+ Replication string `protobuf:"bytes,3,opt,name=replication,proto3" json:"replication,omitempty"`
}
-func (m *VolumeEcShardsGenerateRequest) Reset() { *m = VolumeEcShardsGenerateRequest{} }
-func (m *VolumeEcShardsGenerateRequest) String() string { return proto.CompactTextString(m) }
-func (*VolumeEcShardsGenerateRequest) ProtoMessage() {}
-func (*VolumeEcShardsGenerateRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{36} }
-
-func (m *VolumeEcShardsGenerateRequest) GetVolumeId() uint32 {
- if m != nil {
- return m.VolumeId
+func (x *VolumeInfo) Reset() {
+ *x = VolumeInfo{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_volume_server_proto_msgTypes[69]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
- return 0
}
-func (m *VolumeEcShardsGenerateRequest) GetCollection() string {
- if m != nil {
- return m.Collection
- }
- return ""
+func (x *VolumeInfo) String() string {
+ return protoimpl.X.MessageStringOf(x)
}
-type VolumeEcShardsGenerateResponse struct {
-}
+func (*VolumeInfo) ProtoMessage() {}
-func (m *VolumeEcShardsGenerateResponse) Reset() { *m = VolumeEcShardsGenerateResponse{} }
-func (m *VolumeEcShardsGenerateResponse) String() string { return proto.CompactTextString(m) }
-func (*VolumeEcShardsGenerateResponse) ProtoMessage() {}
-func (*VolumeEcShardsGenerateResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{37} }
+func (x *VolumeInfo) ProtoReflect() protoreflect.Message {
+ mi := &file_volume_server_proto_msgTypes[69]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
-type VolumeEcShardsRebuildRequest struct {
- VolumeId uint32 `protobuf:"varint,1,opt,name=volume_id,json=volumeId" json:"volume_id,omitempty"`
- Collection string `protobuf:"bytes,2,opt,name=collection" json:"collection,omitempty"`
+// Deprecated: Use VolumeInfo.ProtoReflect.Descriptor instead.
+func (*VolumeInfo) Descriptor() ([]byte, []int) {
+ return file_volume_server_proto_rawDescGZIP(), []int{69}
}
-func (m *VolumeEcShardsRebuildRequest) Reset() { *m = VolumeEcShardsRebuildRequest{} }
-func (m *VolumeEcShardsRebuildRequest) String() string { return proto.CompactTextString(m) }
-func (*VolumeEcShardsRebuildRequest) ProtoMessage() {}
-func (*VolumeEcShardsRebuildRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{38} }
+func (x *VolumeInfo) GetFiles() []*RemoteFile {
+ if x != nil {
+ return x.Files
+ }
+ return nil
+}
-func (m *VolumeEcShardsRebuildRequest) GetVolumeId() uint32 {
- if m != nil {
- return m.VolumeId
+func (x *VolumeInfo) GetVersion() uint32 {
+ if x != nil {
+ return x.Version
}
return 0
}
-func (m *VolumeEcShardsRebuildRequest) GetCollection() string {
- if m != nil {
- return m.Collection
+func (x *VolumeInfo) GetReplication() string {
+ if x != nil {
+ return x.Replication
}
return ""
}
-type VolumeEcShardsRebuildResponse struct {
- RebuiltShardIds []uint32 `protobuf:"varint,1,rep,packed,name=rebuilt_shard_ids,json=rebuiltShardIds" json:"rebuilt_shard_ids,omitempty"`
-}
+type VolumeTierMoveDatToRemoteRequest struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
-func (m *VolumeEcShardsRebuildResponse) Reset() { *m = VolumeEcShardsRebuildResponse{} }
-func (m *VolumeEcShardsRebuildResponse) String() string { return proto.CompactTextString(m) }
-func (*VolumeEcShardsRebuildResponse) ProtoMessage() {}
-func (*VolumeEcShardsRebuildResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{39} }
+ VolumeId uint32 `protobuf:"varint,1,opt,name=volume_id,json=volumeId,proto3" json:"volume_id,omitempty"`
+ Collection string `protobuf:"bytes,2,opt,name=collection,proto3" json:"collection,omitempty"`
+ DestinationBackendName string `protobuf:"bytes,3,opt,name=destination_backend_name,json=destinationBackendName,proto3" json:"destination_backend_name,omitempty"`
+ KeepLocalDatFile bool `protobuf:"varint,4,opt,name=keep_local_dat_file,json=keepLocalDatFile,proto3" json:"keep_local_dat_file,omitempty"`
+}
-func (m *VolumeEcShardsRebuildResponse) GetRebuiltShardIds() []uint32 {
- if m != nil {
- return m.RebuiltShardIds
+func (x *VolumeTierMoveDatToRemoteRequest) Reset() {
+ *x = VolumeTierMoveDatToRemoteRequest{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_volume_server_proto_msgTypes[70]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
- return nil
}
-type VolumeEcShardsCopyRequest struct {
- VolumeId uint32 `protobuf:"varint,1,opt,name=volume_id,json=volumeId" json:"volume_id,omitempty"`
- Collection string `protobuf:"bytes,2,opt,name=collection" json:"collection,omitempty"`
- ShardIds []uint32 `protobuf:"varint,3,rep,packed,name=shard_ids,json=shardIds" json:"shard_ids,omitempty"`
- CopyEcxFile bool `protobuf:"varint,4,opt,name=copy_ecx_file,json=copyEcxFile" json:"copy_ecx_file,omitempty"`
- SourceDataNode string `protobuf:"bytes,5,opt,name=source_data_node,json=sourceDataNode" json:"source_data_node,omitempty"`
- CopyEcjFile bool `protobuf:"varint,6,opt,name=copy_ecj_file,json=copyEcjFile" json:"copy_ecj_file,omitempty"`
- CopyVifFile bool `protobuf:"varint,7,opt,name=copy_vif_file,json=copyVifFile" json:"copy_vif_file,omitempty"`
+func (x *VolumeTierMoveDatToRemoteRequest) String() string {
+ return protoimpl.X.MessageStringOf(x)
}
-func (m *VolumeEcShardsCopyRequest) Reset() { *m = VolumeEcShardsCopyRequest{} }
-func (m *VolumeEcShardsCopyRequest) String() string { return proto.CompactTextString(m) }
-func (*VolumeEcShardsCopyRequest) ProtoMessage() {}
-func (*VolumeEcShardsCopyRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{40} }
+func (*VolumeTierMoveDatToRemoteRequest) ProtoMessage() {}
-func (m *VolumeEcShardsCopyRequest) GetVolumeId() uint32 {
- if m != nil {
- return m.VolumeId
+func (x *VolumeTierMoveDatToRemoteRequest) ProtoReflect() protoreflect.Message {
+ mi := &file_volume_server_proto_msgTypes[70]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
}
- return 0
+ return mi.MessageOf(x)
}
-func (m *VolumeEcShardsCopyRequest) GetCollection() string {
- if m != nil {
- return m.Collection
- }
- return ""
+// Deprecated: Use VolumeTierMoveDatToRemoteRequest.ProtoReflect.Descriptor instead.
+func (*VolumeTierMoveDatToRemoteRequest) Descriptor() ([]byte, []int) {
+ return file_volume_server_proto_rawDescGZIP(), []int{70}
}
-func (m *VolumeEcShardsCopyRequest) GetShardIds() []uint32 {
- if m != nil {
- return m.ShardIds
+func (x *VolumeTierMoveDatToRemoteRequest) GetVolumeId() uint32 {
+ if x != nil {
+ return x.VolumeId
}
- return nil
+ return 0
}
-func (m *VolumeEcShardsCopyRequest) GetCopyEcxFile() bool {
- if m != nil {
- return m.CopyEcxFile
+func (x *VolumeTierMoveDatToRemoteRequest) GetCollection() string {
+ if x != nil {
+ return x.Collection
}
- return false
+ return ""
}
-func (m *VolumeEcShardsCopyRequest) GetSourceDataNode() string {
- if m != nil {
- return m.SourceDataNode
+func (x *VolumeTierMoveDatToRemoteRequest) GetDestinationBackendName() string {
+ if x != nil {
+ return x.DestinationBackendName
}
return ""
}
-func (m *VolumeEcShardsCopyRequest) GetCopyEcjFile() bool {
- if m != nil {
- return m.CopyEcjFile
+func (x *VolumeTierMoveDatToRemoteRequest) GetKeepLocalDatFile() bool {
+ if x != nil {
+ return x.KeepLocalDatFile
}
return false
}
-func (m *VolumeEcShardsCopyRequest) GetCopyVifFile() bool {
- if m != nil {
- return m.CopyVifFile
+type VolumeTierMoveDatToRemoteResponse struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ Processed int64 `protobuf:"varint,1,opt,name=processed,proto3" json:"processed,omitempty"`
+ ProcessedPercentage float32 `protobuf:"fixed32,2,opt,name=processedPercentage,proto3" json:"processedPercentage,omitempty"`
+}
+
+func (x *VolumeTierMoveDatToRemoteResponse) Reset() {
+ *x = VolumeTierMoveDatToRemoteResponse{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_volume_server_proto_msgTypes[71]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
- return false
}
-type VolumeEcShardsCopyResponse struct {
+func (x *VolumeTierMoveDatToRemoteResponse) String() string {
+ return protoimpl.X.MessageStringOf(x)
}
-func (m *VolumeEcShardsCopyResponse) Reset() { *m = VolumeEcShardsCopyResponse{} }
-func (m *VolumeEcShardsCopyResponse) String() string { return proto.CompactTextString(m) }
-func (*VolumeEcShardsCopyResponse) ProtoMessage() {}
-func (*VolumeEcShardsCopyResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{41} }
+func (*VolumeTierMoveDatToRemoteResponse) ProtoMessage() {}
-type VolumeEcShardsDeleteRequest struct {
- VolumeId uint32 `protobuf:"varint,1,opt,name=volume_id,json=volumeId" json:"volume_id,omitempty"`
- Collection string `protobuf:"bytes,2,opt,name=collection" json:"collection,omitempty"`
- ShardIds []uint32 `protobuf:"varint,3,rep,packed,name=shard_ids,json=shardIds" json:"shard_ids,omitempty"`
+func (x *VolumeTierMoveDatToRemoteResponse) ProtoReflect() protoreflect.Message {
+ mi := &file_volume_server_proto_msgTypes[71]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
}
-func (m *VolumeEcShardsDeleteRequest) Reset() { *m = VolumeEcShardsDeleteRequest{} }
-func (m *VolumeEcShardsDeleteRequest) String() string { return proto.CompactTextString(m) }
-func (*VolumeEcShardsDeleteRequest) ProtoMessage() {}
-func (*VolumeEcShardsDeleteRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{42} }
+// Deprecated: Use VolumeTierMoveDatToRemoteResponse.ProtoReflect.Descriptor instead.
+func (*VolumeTierMoveDatToRemoteResponse) Descriptor() ([]byte, []int) {
+ return file_volume_server_proto_rawDescGZIP(), []int{71}
+}
-func (m *VolumeEcShardsDeleteRequest) GetVolumeId() uint32 {
- if m != nil {
- return m.VolumeId
+func (x *VolumeTierMoveDatToRemoteResponse) GetProcessed() int64 {
+ if x != nil {
+ return x.Processed
}
return 0
}
-func (m *VolumeEcShardsDeleteRequest) GetCollection() string {
- if m != nil {
- return m.Collection
+func (x *VolumeTierMoveDatToRemoteResponse) GetProcessedPercentage() float32 {
+ if x != nil {
+ return x.ProcessedPercentage
}
- return ""
+ return 0
+}
+
+type VolumeTierMoveDatFromRemoteRequest struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ VolumeId uint32 `protobuf:"varint,1,opt,name=volume_id,json=volumeId,proto3" json:"volume_id,omitempty"`
+ Collection string `protobuf:"bytes,2,opt,name=collection,proto3" json:"collection,omitempty"`
+ KeepRemoteDatFile bool `protobuf:"varint,3,opt,name=keep_remote_dat_file,json=keepRemoteDatFile,proto3" json:"keep_remote_dat_file,omitempty"`
}
-func (m *VolumeEcShardsDeleteRequest) GetShardIds() []uint32 {
- if m != nil {
- return m.ShardIds
+func (x *VolumeTierMoveDatFromRemoteRequest) Reset() {
+ *x = VolumeTierMoveDatFromRemoteRequest{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_volume_server_proto_msgTypes[72]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
- return nil
}
-type VolumeEcShardsDeleteResponse struct {
+func (x *VolumeTierMoveDatFromRemoteRequest) String() string {
+ return protoimpl.X.MessageStringOf(x)
}
-func (m *VolumeEcShardsDeleteResponse) Reset() { *m = VolumeEcShardsDeleteResponse{} }
-func (m *VolumeEcShardsDeleteResponse) String() string { return proto.CompactTextString(m) }
-func (*VolumeEcShardsDeleteResponse) ProtoMessage() {}
-func (*VolumeEcShardsDeleteResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{43} }
+func (*VolumeTierMoveDatFromRemoteRequest) ProtoMessage() {}
-type VolumeEcShardsMountRequest struct {
- VolumeId uint32 `protobuf:"varint,1,opt,name=volume_id,json=volumeId" json:"volume_id,omitempty"`
- Collection string `protobuf:"bytes,2,opt,name=collection" json:"collection,omitempty"`
- ShardIds []uint32 `protobuf:"varint,3,rep,packed,name=shard_ids,json=shardIds" json:"shard_ids,omitempty"`
+func (x *VolumeTierMoveDatFromRemoteRequest) ProtoReflect() protoreflect.Message {
+ mi := &file_volume_server_proto_msgTypes[72]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
}
-func (m *VolumeEcShardsMountRequest) Reset() { *m = VolumeEcShardsMountRequest{} }
-func (m *VolumeEcShardsMountRequest) String() string { return proto.CompactTextString(m) }
-func (*VolumeEcShardsMountRequest) ProtoMessage() {}
-func (*VolumeEcShardsMountRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{44} }
+// Deprecated: Use VolumeTierMoveDatFromRemoteRequest.ProtoReflect.Descriptor instead.
+func (*VolumeTierMoveDatFromRemoteRequest) Descriptor() ([]byte, []int) {
+ return file_volume_server_proto_rawDescGZIP(), []int{72}
+}
-func (m *VolumeEcShardsMountRequest) GetVolumeId() uint32 {
- if m != nil {
- return m.VolumeId
+func (x *VolumeTierMoveDatFromRemoteRequest) GetVolumeId() uint32 {
+ if x != nil {
+ return x.VolumeId
}
return 0
}
-func (m *VolumeEcShardsMountRequest) GetCollection() string {
- if m != nil {
- return m.Collection
+func (x *VolumeTierMoveDatFromRemoteRequest) GetCollection() string {
+ if x != nil {
+ return x.Collection
}
return ""
}
-func (m *VolumeEcShardsMountRequest) GetShardIds() []uint32 {
- if m != nil {
- return m.ShardIds
+func (x *VolumeTierMoveDatFromRemoteRequest) GetKeepRemoteDatFile() bool {
+ if x != nil {
+ return x.KeepRemoteDatFile
}
- return nil
-}
-
-type VolumeEcShardsMountResponse struct {
-}
-
-func (m *VolumeEcShardsMountResponse) Reset() { *m = VolumeEcShardsMountResponse{} }
-func (m *VolumeEcShardsMountResponse) String() string { return proto.CompactTextString(m) }
-func (*VolumeEcShardsMountResponse) ProtoMessage() {}
-func (*VolumeEcShardsMountResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{45} }
-
-type VolumeEcShardsUnmountRequest struct {
- VolumeId uint32 `protobuf:"varint,1,opt,name=volume_id,json=volumeId" json:"volume_id,omitempty"`
- ShardIds []uint32 `protobuf:"varint,3,rep,packed,name=shard_ids,json=shardIds" json:"shard_ids,omitempty"`
+ return false
}
-func (m *VolumeEcShardsUnmountRequest) Reset() { *m = VolumeEcShardsUnmountRequest{} }
-func (m *VolumeEcShardsUnmountRequest) String() string { return proto.CompactTextString(m) }
-func (*VolumeEcShardsUnmountRequest) ProtoMessage() {}
-func (*VolumeEcShardsUnmountRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{46} }
+type VolumeTierMoveDatFromRemoteResponse struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
-func (m *VolumeEcShardsUnmountRequest) GetVolumeId() uint32 {
- if m != nil {
- return m.VolumeId
- }
- return 0
+ Processed int64 `protobuf:"varint,1,opt,name=processed,proto3" json:"processed,omitempty"`
+ ProcessedPercentage float32 `protobuf:"fixed32,2,opt,name=processedPercentage,proto3" json:"processedPercentage,omitempty"`
}
-func (m *VolumeEcShardsUnmountRequest) GetShardIds() []uint32 {
- if m != nil {
- return m.ShardIds
+func (x *VolumeTierMoveDatFromRemoteResponse) Reset() {
+ *x = VolumeTierMoveDatFromRemoteResponse{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_volume_server_proto_msgTypes[73]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
- return nil
}
-type VolumeEcShardsUnmountResponse struct {
+func (x *VolumeTierMoveDatFromRemoteResponse) String() string {
+ return protoimpl.X.MessageStringOf(x)
}
-func (m *VolumeEcShardsUnmountResponse) Reset() { *m = VolumeEcShardsUnmountResponse{} }
-func (m *VolumeEcShardsUnmountResponse) String() string { return proto.CompactTextString(m) }
-func (*VolumeEcShardsUnmountResponse) ProtoMessage() {}
-func (*VolumeEcShardsUnmountResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{47} }
+func (*VolumeTierMoveDatFromRemoteResponse) ProtoMessage() {}
-type VolumeEcShardReadRequest struct {
- VolumeId uint32 `protobuf:"varint,1,opt,name=volume_id,json=volumeId" json:"volume_id,omitempty"`
- ShardId uint32 `protobuf:"varint,2,opt,name=shard_id,json=shardId" json:"shard_id,omitempty"`
- Offset int64 `protobuf:"varint,3,opt,name=offset" json:"offset,omitempty"`
- Size int64 `protobuf:"varint,4,opt,name=size" json:"size,omitempty"`
- FileKey uint64 `protobuf:"varint,5,opt,name=file_key,json=fileKey" json:"file_key,omitempty"`
+func (x *VolumeTierMoveDatFromRemoteResponse) ProtoReflect() protoreflect.Message {
+ mi := &file_volume_server_proto_msgTypes[73]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
}
-func (m *VolumeEcShardReadRequest) Reset() { *m = VolumeEcShardReadRequest{} }
-func (m *VolumeEcShardReadRequest) String() string { return proto.CompactTextString(m) }
-func (*VolumeEcShardReadRequest) ProtoMessage() {}
-func (*VolumeEcShardReadRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{48} }
+// Deprecated: Use VolumeTierMoveDatFromRemoteResponse.ProtoReflect.Descriptor instead.
+func (*VolumeTierMoveDatFromRemoteResponse) Descriptor() ([]byte, []int) {
+ return file_volume_server_proto_rawDescGZIP(), []int{73}
+}
-func (m *VolumeEcShardReadRequest) GetVolumeId() uint32 {
- if m != nil {
- return m.VolumeId
+func (x *VolumeTierMoveDatFromRemoteResponse) GetProcessed() int64 {
+ if x != nil {
+ return x.Processed
}
return 0
}
-func (m *VolumeEcShardReadRequest) GetShardId() uint32 {
- if m != nil {
- return m.ShardId
+func (x *VolumeTierMoveDatFromRemoteResponse) GetProcessedPercentage() float32 {
+ if x != nil {
+ return x.ProcessedPercentage
}
return 0
}
-func (m *VolumeEcShardReadRequest) GetOffset() int64 {
- if m != nil {
- return m.Offset
- }
- return 0
+type VolumeServerStatusRequest struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
}
-func (m *VolumeEcShardReadRequest) GetSize() int64 {
- if m != nil {
- return m.Size
+func (x *VolumeServerStatusRequest) Reset() {
+ *x = VolumeServerStatusRequest{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_volume_server_proto_msgTypes[74]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
- return 0
}
-func (m *VolumeEcShardReadRequest) GetFileKey() uint64 {
- if m != nil {
- return m.FileKey
+func (x *VolumeServerStatusRequest) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*VolumeServerStatusRequest) ProtoMessage() {}
+
+func (x *VolumeServerStatusRequest) ProtoReflect() protoreflect.Message {
+ mi := &file_volume_server_proto_msgTypes[74]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
}
- return 0
+ return mi.MessageOf(x)
}
-type VolumeEcShardReadResponse struct {
- Data []byte `protobuf:"bytes,1,opt,name=data,proto3" json:"data,omitempty"`
- IsDeleted bool `protobuf:"varint,2,opt,name=is_deleted,json=isDeleted" json:"is_deleted,omitempty"`
+// Deprecated: Use VolumeServerStatusRequest.ProtoReflect.Descriptor instead.
+func (*VolumeServerStatusRequest) Descriptor() ([]byte, []int) {
+ return file_volume_server_proto_rawDescGZIP(), []int{74}
}
-func (m *VolumeEcShardReadResponse) Reset() { *m = VolumeEcShardReadResponse{} }
-func (m *VolumeEcShardReadResponse) String() string { return proto.CompactTextString(m) }
-func (*VolumeEcShardReadResponse) ProtoMessage() {}
-func (*VolumeEcShardReadResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{49} }
+type VolumeServerStatusResponse struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
-func (m *VolumeEcShardReadResponse) GetData() []byte {
- if m != nil {
- return m.Data
- }
- return nil
+ DiskStatuses []*DiskStatus `protobuf:"bytes,1,rep,name=disk_statuses,json=diskStatuses,proto3" json:"disk_statuses,omitempty"`
+ MemoryStatus *MemStatus `protobuf:"bytes,2,opt,name=memory_status,json=memoryStatus,proto3" json:"memory_status,omitempty"`
}
-func (m *VolumeEcShardReadResponse) GetIsDeleted() bool {
- if m != nil {
- return m.IsDeleted
+func (x *VolumeServerStatusResponse) Reset() {
+ *x = VolumeServerStatusResponse{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_volume_server_proto_msgTypes[75]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
- return false
}
-type VolumeEcBlobDeleteRequest struct {
- VolumeId uint32 `protobuf:"varint,1,opt,name=volume_id,json=volumeId" json:"volume_id,omitempty"`
- Collection string `protobuf:"bytes,2,opt,name=collection" json:"collection,omitempty"`
- FileKey uint64 `protobuf:"varint,3,opt,name=file_key,json=fileKey" json:"file_key,omitempty"`
- Version uint32 `protobuf:"varint,4,opt,name=version" json:"version,omitempty"`
+func (x *VolumeServerStatusResponse) String() string {
+ return protoimpl.X.MessageStringOf(x)
}
-func (m *VolumeEcBlobDeleteRequest) Reset() { *m = VolumeEcBlobDeleteRequest{} }
-func (m *VolumeEcBlobDeleteRequest) String() string { return proto.CompactTextString(m) }
-func (*VolumeEcBlobDeleteRequest) ProtoMessage() {}
-func (*VolumeEcBlobDeleteRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{50} }
+func (*VolumeServerStatusResponse) ProtoMessage() {}
-func (m *VolumeEcBlobDeleteRequest) GetVolumeId() uint32 {
- if m != nil {
- return m.VolumeId
+func (x *VolumeServerStatusResponse) ProtoReflect() protoreflect.Message {
+ mi := &file_volume_server_proto_msgTypes[75]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
}
- return 0
+ return mi.MessageOf(x)
}
-func (m *VolumeEcBlobDeleteRequest) GetCollection() string {
- if m != nil {
- return m.Collection
- }
- return ""
+// Deprecated: Use VolumeServerStatusResponse.ProtoReflect.Descriptor instead.
+func (*VolumeServerStatusResponse) Descriptor() ([]byte, []int) {
+ return file_volume_server_proto_rawDescGZIP(), []int{75}
}
-func (m *VolumeEcBlobDeleteRequest) GetFileKey() uint64 {
- if m != nil {
- return m.FileKey
+func (x *VolumeServerStatusResponse) GetDiskStatuses() []*DiskStatus {
+ if x != nil {
+ return x.DiskStatuses
}
- return 0
+ return nil
}
-func (m *VolumeEcBlobDeleteRequest) GetVersion() uint32 {
- if m != nil {
- return m.Version
+func (x *VolumeServerStatusResponse) GetMemoryStatus() *MemStatus {
+ if x != nil {
+ return x.MemoryStatus
}
- return 0
+ return nil
}
-type VolumeEcBlobDeleteResponse struct {
+type VolumeServerLeaveRequest struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
}
-func (m *VolumeEcBlobDeleteResponse) Reset() { *m = VolumeEcBlobDeleteResponse{} }
-func (m *VolumeEcBlobDeleteResponse) String() string { return proto.CompactTextString(m) }
-func (*VolumeEcBlobDeleteResponse) ProtoMessage() {}
-func (*VolumeEcBlobDeleteResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{51} }
+func (x *VolumeServerLeaveRequest) Reset() {
+ *x = VolumeServerLeaveRequest{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_volume_server_proto_msgTypes[76]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
-type VolumeEcShardsToVolumeRequest struct {
- VolumeId uint32 `protobuf:"varint,1,opt,name=volume_id,json=volumeId" json:"volume_id,omitempty"`
- Collection string `protobuf:"bytes,2,opt,name=collection" json:"collection,omitempty"`
+func (x *VolumeServerLeaveRequest) String() string {
+ return protoimpl.X.MessageStringOf(x)
}
-func (m *VolumeEcShardsToVolumeRequest) Reset() { *m = VolumeEcShardsToVolumeRequest{} }
-func (m *VolumeEcShardsToVolumeRequest) String() string { return proto.CompactTextString(m) }
-func (*VolumeEcShardsToVolumeRequest) ProtoMessage() {}
-func (*VolumeEcShardsToVolumeRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{52} }
+func (*VolumeServerLeaveRequest) ProtoMessage() {}
-func (m *VolumeEcShardsToVolumeRequest) GetVolumeId() uint32 {
- if m != nil {
- return m.VolumeId
+func (x *VolumeServerLeaveRequest) ProtoReflect() protoreflect.Message {
+ mi := &file_volume_server_proto_msgTypes[76]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
}
- return 0
+ return mi.MessageOf(x)
}
-func (m *VolumeEcShardsToVolumeRequest) GetCollection() string {
- if m != nil {
- return m.Collection
- }
- return ""
+// Deprecated: Use VolumeServerLeaveRequest.ProtoReflect.Descriptor instead.
+func (*VolumeServerLeaveRequest) Descriptor() ([]byte, []int) {
+ return file_volume_server_proto_rawDescGZIP(), []int{76}
}
-type VolumeEcShardsToVolumeResponse struct {
+type VolumeServerLeaveResponse struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
}
-func (m *VolumeEcShardsToVolumeResponse) Reset() { *m = VolumeEcShardsToVolumeResponse{} }
-func (m *VolumeEcShardsToVolumeResponse) String() string { return proto.CompactTextString(m) }
-func (*VolumeEcShardsToVolumeResponse) ProtoMessage() {}
-func (*VolumeEcShardsToVolumeResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{53} }
+func (x *VolumeServerLeaveResponse) Reset() {
+ *x = VolumeServerLeaveResponse{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_volume_server_proto_msgTypes[77]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
-type ReadVolumeFileStatusRequest struct {
- VolumeId uint32 `protobuf:"varint,1,opt,name=volume_id,json=volumeId" json:"volume_id,omitempty"`
+func (x *VolumeServerLeaveResponse) String() string {
+ return protoimpl.X.MessageStringOf(x)
}
-func (m *ReadVolumeFileStatusRequest) Reset() { *m = ReadVolumeFileStatusRequest{} }
-func (m *ReadVolumeFileStatusRequest) String() string { return proto.CompactTextString(m) }
-func (*ReadVolumeFileStatusRequest) ProtoMessage() {}
-func (*ReadVolumeFileStatusRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{54} }
+func (*VolumeServerLeaveResponse) ProtoMessage() {}
-func (m *ReadVolumeFileStatusRequest) GetVolumeId() uint32 {
- if m != nil {
- return m.VolumeId
+func (x *VolumeServerLeaveResponse) ProtoReflect() protoreflect.Message {
+ mi := &file_volume_server_proto_msgTypes[77]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
}
- return 0
+ return mi.MessageOf(x)
}
-type ReadVolumeFileStatusResponse struct {
- VolumeId uint32 `protobuf:"varint,1,opt,name=volume_id,json=volumeId" json:"volume_id,omitempty"`
- IdxFileTimestampSeconds uint64 `protobuf:"varint,2,opt,name=idx_file_timestamp_seconds,json=idxFileTimestampSeconds" json:"idx_file_timestamp_seconds,omitempty"`
- IdxFileSize uint64 `protobuf:"varint,3,opt,name=idx_file_size,json=idxFileSize" json:"idx_file_size,omitempty"`
- DatFileTimestampSeconds uint64 `protobuf:"varint,4,opt,name=dat_file_timestamp_seconds,json=datFileTimestampSeconds" json:"dat_file_timestamp_seconds,omitempty"`
- DatFileSize uint64 `protobuf:"varint,5,opt,name=dat_file_size,json=datFileSize" json:"dat_file_size,omitempty"`
- FileCount uint64 `protobuf:"varint,6,opt,name=file_count,json=fileCount" json:"file_count,omitempty"`
- CompactionRevision uint32 `protobuf:"varint,7,opt,name=compaction_revision,json=compactionRevision" json:"compaction_revision,omitempty"`
- Collection string `protobuf:"bytes,8,opt,name=collection" json:"collection,omitempty"`
+// Deprecated: Use VolumeServerLeaveResponse.ProtoReflect.Descriptor instead.
+func (*VolumeServerLeaveResponse) Descriptor() ([]byte, []int) {
+ return file_volume_server_proto_rawDescGZIP(), []int{77}
}
-func (m *ReadVolumeFileStatusResponse) Reset() { *m = ReadVolumeFileStatusResponse{} }
-func (m *ReadVolumeFileStatusResponse) String() string { return proto.CompactTextString(m) }
-func (*ReadVolumeFileStatusResponse) ProtoMessage() {}
-func (*ReadVolumeFileStatusResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{55} }
+// select on volume servers
+type QueryRequest struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
-func (m *ReadVolumeFileStatusResponse) GetVolumeId() uint32 {
- if m != nil {
- return m.VolumeId
- }
- return 0
+ Selections []string `protobuf:"bytes,1,rep,name=selections,proto3" json:"selections,omitempty"`
+ FromFileIds []string `protobuf:"bytes,2,rep,name=from_file_ids,json=fromFileIds,proto3" json:"from_file_ids,omitempty"`
+ Filter *QueryRequest_Filter `protobuf:"bytes,3,opt,name=filter,proto3" json:"filter,omitempty"`
+ InputSerialization *QueryRequest_InputSerialization `protobuf:"bytes,4,opt,name=input_serialization,json=inputSerialization,proto3" json:"input_serialization,omitempty"`
+ OutputSerialization *QueryRequest_OutputSerialization `protobuf:"bytes,5,opt,name=output_serialization,json=outputSerialization,proto3" json:"output_serialization,omitempty"`
}
-func (m *ReadVolumeFileStatusResponse) GetIdxFileTimestampSeconds() uint64 {
- if m != nil {
- return m.IdxFileTimestampSeconds
+func (x *QueryRequest) Reset() {
+ *x = QueryRequest{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_volume_server_proto_msgTypes[78]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
- return 0
}
-func (m *ReadVolumeFileStatusResponse) GetIdxFileSize() uint64 {
- if m != nil {
- return m.IdxFileSize
- }
- return 0
+func (x *QueryRequest) String() string {
+ return protoimpl.X.MessageStringOf(x)
}
-func (m *ReadVolumeFileStatusResponse) GetDatFileTimestampSeconds() uint64 {
- if m != nil {
- return m.DatFileTimestampSeconds
- }
- return 0
-}
+func (*QueryRequest) ProtoMessage() {}
-func (m *ReadVolumeFileStatusResponse) GetDatFileSize() uint64 {
- if m != nil {
- return m.DatFileSize
+func (x *QueryRequest) ProtoReflect() protoreflect.Message {
+ mi := &file_volume_server_proto_msgTypes[78]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
}
- return 0
+ return mi.MessageOf(x)
}
-func (m *ReadVolumeFileStatusResponse) GetFileCount() uint64 {
- if m != nil {
- return m.FileCount
- }
- return 0
+// Deprecated: Use QueryRequest.ProtoReflect.Descriptor instead.
+func (*QueryRequest) Descriptor() ([]byte, []int) {
+ return file_volume_server_proto_rawDescGZIP(), []int{78}
}
-func (m *ReadVolumeFileStatusResponse) GetCompactionRevision() uint32 {
- if m != nil {
- return m.CompactionRevision
+func (x *QueryRequest) GetSelections() []string {
+ if x != nil {
+ return x.Selections
}
- return 0
+ return nil
}
-func (m *ReadVolumeFileStatusResponse) GetCollection() string {
- if m != nil {
- return m.Collection
+func (x *QueryRequest) GetFromFileIds() []string {
+ if x != nil {
+ return x.FromFileIds
}
- return ""
-}
-
-type DiskStatus struct {
- Dir string `protobuf:"bytes,1,opt,name=dir" json:"dir,omitempty"`
- All uint64 `protobuf:"varint,2,opt,name=all" json:"all,omitempty"`
- Used uint64 `protobuf:"varint,3,opt,name=used" json:"used,omitempty"`
- Free uint64 `protobuf:"varint,4,opt,name=free" json:"free,omitempty"`
+ return nil
}
-func (m *DiskStatus) Reset() { *m = DiskStatus{} }
-func (m *DiskStatus) String() string { return proto.CompactTextString(m) }
-func (*DiskStatus) ProtoMessage() {}
-func (*DiskStatus) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{56} }
-
-func (m *DiskStatus) GetDir() string {
- if m != nil {
- return m.Dir
+func (x *QueryRequest) GetFilter() *QueryRequest_Filter {
+ if x != nil {
+ return x.Filter
}
- return ""
+ return nil
}
-func (m *DiskStatus) GetAll() uint64 {
- if m != nil {
- return m.All
+func (x *QueryRequest) GetInputSerialization() *QueryRequest_InputSerialization {
+ if x != nil {
+ return x.InputSerialization
}
- return 0
+ return nil
}
-func (m *DiskStatus) GetUsed() uint64 {
- if m != nil {
- return m.Used
+func (x *QueryRequest) GetOutputSerialization() *QueryRequest_OutputSerialization {
+ if x != nil {
+ return x.OutputSerialization
}
- return 0
+ return nil
}
-func (m *DiskStatus) GetFree() uint64 {
- if m != nil {
- return m.Free
- }
- return 0
-}
+type QueriedStripe struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
-type MemStatus struct {
- Goroutines int32 `protobuf:"varint,1,opt,name=goroutines" json:"goroutines,omitempty"`
- All uint64 `protobuf:"varint,2,opt,name=all" json:"all,omitempty"`
- Used uint64 `protobuf:"varint,3,opt,name=used" json:"used,omitempty"`
- Free uint64 `protobuf:"varint,4,opt,name=free" json:"free,omitempty"`
- Self uint64 `protobuf:"varint,5,opt,name=self" json:"self,omitempty"`
- Heap uint64 `protobuf:"varint,6,opt,name=heap" json:"heap,omitempty"`
- Stack uint64 `protobuf:"varint,7,opt,name=stack" json:"stack,omitempty"`
+ Records []byte `protobuf:"bytes,1,opt,name=records,proto3" json:"records,omitempty"`
}
-func (m *MemStatus) Reset() { *m = MemStatus{} }
-func (m *MemStatus) String() string { return proto.CompactTextString(m) }
-func (*MemStatus) ProtoMessage() {}
-func (*MemStatus) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{57} }
-
-func (m *MemStatus) GetGoroutines() int32 {
- if m != nil {
- return m.Goroutines
+func (x *QueriedStripe) Reset() {
+ *x = QueriedStripe{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_volume_server_proto_msgTypes[79]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
- return 0
}
-func (m *MemStatus) GetAll() uint64 {
- if m != nil {
- return m.All
- }
- return 0
+func (x *QueriedStripe) String() string {
+ return protoimpl.X.MessageStringOf(x)
}
-func (m *MemStatus) GetUsed() uint64 {
- if m != nil {
- return m.Used
+func (*QueriedStripe) ProtoMessage() {}
+
+func (x *QueriedStripe) ProtoReflect() protoreflect.Message {
+ mi := &file_volume_server_proto_msgTypes[79]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
}
- return 0
+ return mi.MessageOf(x)
}
-func (m *MemStatus) GetFree() uint64 {
- if m != nil {
- return m.Free
- }
- return 0
+// Deprecated: Use QueriedStripe.ProtoReflect.Descriptor instead.
+func (*QueriedStripe) Descriptor() ([]byte, []int) {
+ return file_volume_server_proto_rawDescGZIP(), []int{79}
}
-func (m *MemStatus) GetSelf() uint64 {
- if m != nil {
- return m.Self
+func (x *QueriedStripe) GetRecords() []byte {
+ if x != nil {
+ return x.Records
}
- return 0
+ return nil
}
-func (m *MemStatus) GetHeap() uint64 {
- if m != nil {
- return m.Heap
- }
- return 0
+type VolumeNeedleStatusRequest struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ VolumeId uint32 `protobuf:"varint,1,opt,name=volume_id,json=volumeId,proto3" json:"volume_id,omitempty"`
+ NeedleId uint64 `protobuf:"varint,2,opt,name=needle_id,json=needleId,proto3" json:"needle_id,omitempty"`
}
-func (m *MemStatus) GetStack() uint64 {
- if m != nil {
- return m.Stack
+func (x *VolumeNeedleStatusRequest) Reset() {
+ *x = VolumeNeedleStatusRequest{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_volume_server_proto_msgTypes[80]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
- return 0
}
-// tired storage on volume servers
-type RemoteFile struct {
- BackendType string `protobuf:"bytes,1,opt,name=backend_type,json=backendType" json:"backend_type,omitempty"`
- BackendId string `protobuf:"bytes,2,opt,name=backend_id,json=backendId" json:"backend_id,omitempty"`
- Key string `protobuf:"bytes,3,opt,name=key" json:"key,omitempty"`
- Offset uint64 `protobuf:"varint,4,opt,name=offset" json:"offset,omitempty"`
- FileSize uint64 `protobuf:"varint,5,opt,name=file_size,json=fileSize" json:"file_size,omitempty"`
- ModifiedTime uint64 `protobuf:"varint,6,opt,name=modified_time,json=modifiedTime" json:"modified_time,omitempty"`
- Extension string `protobuf:"bytes,7,opt,name=extension" json:"extension,omitempty"`
+func (x *VolumeNeedleStatusRequest) String() string {
+ return protoimpl.X.MessageStringOf(x)
}
-func (m *RemoteFile) Reset() { *m = RemoteFile{} }
-func (m *RemoteFile) String() string { return proto.CompactTextString(m) }
-func (*RemoteFile) ProtoMessage() {}
-func (*RemoteFile) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{58} }
+func (*VolumeNeedleStatusRequest) ProtoMessage() {}
-func (m *RemoteFile) GetBackendType() string {
- if m != nil {
- return m.BackendType
+func (x *VolumeNeedleStatusRequest) ProtoReflect() protoreflect.Message {
+ mi := &file_volume_server_proto_msgTypes[80]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
}
- return ""
-}
-
-func (m *RemoteFile) GetBackendId() string {
- if m != nil {
- return m.BackendId
- }
- return ""
+ return mi.MessageOf(x)
}
-func (m *RemoteFile) GetKey() string {
- if m != nil {
- return m.Key
- }
- return ""
+// Deprecated: Use VolumeNeedleStatusRequest.ProtoReflect.Descriptor instead.
+func (*VolumeNeedleStatusRequest) Descriptor() ([]byte, []int) {
+ return file_volume_server_proto_rawDescGZIP(), []int{80}
}
-func (m *RemoteFile) GetOffset() uint64 {
- if m != nil {
- return m.Offset
+func (x *VolumeNeedleStatusRequest) GetVolumeId() uint32 {
+ if x != nil {
+ return x.VolumeId
}
return 0
}
-func (m *RemoteFile) GetFileSize() uint64 {
- if m != nil {
- return m.FileSize
+func (x *VolumeNeedleStatusRequest) GetNeedleId() uint64 {
+ if x != nil {
+ return x.NeedleId
}
return 0
}
-func (m *RemoteFile) GetModifiedTime() uint64 {
- if m != nil {
- return m.ModifiedTime
- }
- return 0
+type VolumeNeedleStatusResponse struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ NeedleId uint64 `protobuf:"varint,1,opt,name=needle_id,json=needleId,proto3" json:"needle_id,omitempty"`
+ Cookie uint32 `protobuf:"varint,2,opt,name=cookie,proto3" json:"cookie,omitempty"`
+ Size uint32 `protobuf:"varint,3,opt,name=size,proto3" json:"size,omitempty"`
+ LastModified uint64 `protobuf:"varint,4,opt,name=last_modified,json=lastModified,proto3" json:"last_modified,omitempty"`
+ Crc uint32 `protobuf:"varint,5,opt,name=crc,proto3" json:"crc,omitempty"`
+ Ttl string `protobuf:"bytes,6,opt,name=ttl,proto3" json:"ttl,omitempty"`
}
-func (m *RemoteFile) GetExtension() string {
- if m != nil {
- return m.Extension
+func (x *VolumeNeedleStatusResponse) Reset() {
+ *x = VolumeNeedleStatusResponse{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_volume_server_proto_msgTypes[81]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
- return ""
}
-type VolumeInfo struct {
- Files []*RemoteFile `protobuf:"bytes,1,rep,name=files" json:"files,omitempty"`
- Version uint32 `protobuf:"varint,2,opt,name=version" json:"version,omitempty"`
+func (x *VolumeNeedleStatusResponse) String() string {
+ return protoimpl.X.MessageStringOf(x)
}
-func (m *VolumeInfo) Reset() { *m = VolumeInfo{} }
-func (m *VolumeInfo) String() string { return proto.CompactTextString(m) }
-func (*VolumeInfo) ProtoMessage() {}
-func (*VolumeInfo) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{59} }
+func (*VolumeNeedleStatusResponse) ProtoMessage() {}
-func (m *VolumeInfo) GetFiles() []*RemoteFile {
- if m != nil {
- return m.Files
+func (x *VolumeNeedleStatusResponse) ProtoReflect() protoreflect.Message {
+ mi := &file_volume_server_proto_msgTypes[81]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
}
- return nil
+ return mi.MessageOf(x)
}
-func (m *VolumeInfo) GetVersion() uint32 {
- if m != nil {
- return m.Version
- }
- return 0
+// Deprecated: Use VolumeNeedleStatusResponse.ProtoReflect.Descriptor instead.
+func (*VolumeNeedleStatusResponse) Descriptor() ([]byte, []int) {
+ return file_volume_server_proto_rawDescGZIP(), []int{81}
}
-type VolumeTierMoveDatToRemoteRequest struct {
- VolumeId uint32 `protobuf:"varint,1,opt,name=volume_id,json=volumeId" json:"volume_id,omitempty"`
- Collection string `protobuf:"bytes,2,opt,name=collection" json:"collection,omitempty"`
- DestinationBackendName string `protobuf:"bytes,3,opt,name=destination_backend_name,json=destinationBackendName" json:"destination_backend_name,omitempty"`
- KeepLocalDatFile bool `protobuf:"varint,4,opt,name=keep_local_dat_file,json=keepLocalDatFile" json:"keep_local_dat_file,omitempty"`
+func (x *VolumeNeedleStatusResponse) GetNeedleId() uint64 {
+ if x != nil {
+ return x.NeedleId
+ }
+ return 0
}
-func (m *VolumeTierMoveDatToRemoteRequest) Reset() { *m = VolumeTierMoveDatToRemoteRequest{} }
-func (m *VolumeTierMoveDatToRemoteRequest) String() string { return proto.CompactTextString(m) }
-func (*VolumeTierMoveDatToRemoteRequest) ProtoMessage() {}
-func (*VolumeTierMoveDatToRemoteRequest) Descriptor() ([]byte, []int) {
- return fileDescriptor0, []int{60}
+func (x *VolumeNeedleStatusResponse) GetCookie() uint32 {
+ if x != nil {
+ return x.Cookie
+ }
+ return 0
}
-func (m *VolumeTierMoveDatToRemoteRequest) GetVolumeId() uint32 {
- if m != nil {
- return m.VolumeId
+func (x *VolumeNeedleStatusResponse) GetSize() uint32 {
+ if x != nil {
+ return x.Size
}
return 0
}
-func (m *VolumeTierMoveDatToRemoteRequest) GetCollection() string {
- if m != nil {
- return m.Collection
+func (x *VolumeNeedleStatusResponse) GetLastModified() uint64 {
+ if x != nil {
+ return x.LastModified
}
- return ""
+ return 0
}
-func (m *VolumeTierMoveDatToRemoteRequest) GetDestinationBackendName() string {
- if m != nil {
- return m.DestinationBackendName
+func (x *VolumeNeedleStatusResponse) GetCrc() uint32 {
+ if x != nil {
+ return x.Crc
}
- return ""
+ return 0
}
-func (m *VolumeTierMoveDatToRemoteRequest) GetKeepLocalDatFile() bool {
- if m != nil {
- return m.KeepLocalDatFile
+func (x *VolumeNeedleStatusResponse) GetTtl() string {
+ if x != nil {
+ return x.Ttl
}
- return false
+ return ""
}
-type VolumeTierMoveDatToRemoteResponse struct {
- Processed int64 `protobuf:"varint,1,opt,name=processed" json:"processed,omitempty"`
- ProcessedPercentage float32 `protobuf:"fixed32,2,opt,name=processedPercentage" json:"processedPercentage,omitempty"`
-}
+type QueryRequest_Filter struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
-func (m *VolumeTierMoveDatToRemoteResponse) Reset() { *m = VolumeTierMoveDatToRemoteResponse{} }
-func (m *VolumeTierMoveDatToRemoteResponse) String() string { return proto.CompactTextString(m) }
-func (*VolumeTierMoveDatToRemoteResponse) ProtoMessage() {}
-func (*VolumeTierMoveDatToRemoteResponse) Descriptor() ([]byte, []int) {
- return fileDescriptor0, []int{61}
+ Field string `protobuf:"bytes,1,opt,name=field,proto3" json:"field,omitempty"`
+ Operand string `protobuf:"bytes,2,opt,name=operand,proto3" json:"operand,omitempty"`
+ Value string `protobuf:"bytes,3,opt,name=value,proto3" json:"value,omitempty"`
}
-func (m *VolumeTierMoveDatToRemoteResponse) GetProcessed() int64 {
- if m != nil {
- return m.Processed
+func (x *QueryRequest_Filter) Reset() {
+ *x = QueryRequest_Filter{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_volume_server_proto_msgTypes[82]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
- return 0
}
-func (m *VolumeTierMoveDatToRemoteResponse) GetProcessedPercentage() float32 {
- if m != nil {
- return m.ProcessedPercentage
- }
- return 0
+func (x *QueryRequest_Filter) String() string {
+ return protoimpl.X.MessageStringOf(x)
}
-type VolumeTierMoveDatFromRemoteRequest struct {
- VolumeId uint32 `protobuf:"varint,1,opt,name=volume_id,json=volumeId" json:"volume_id,omitempty"`
- Collection string `protobuf:"bytes,2,opt,name=collection" json:"collection,omitempty"`
- KeepRemoteDatFile bool `protobuf:"varint,3,opt,name=keep_remote_dat_file,json=keepRemoteDatFile" json:"keep_remote_dat_file,omitempty"`
-}
+func (*QueryRequest_Filter) ProtoMessage() {}
-func (m *VolumeTierMoveDatFromRemoteRequest) Reset() { *m = VolumeTierMoveDatFromRemoteRequest{} }
-func (m *VolumeTierMoveDatFromRemoteRequest) String() string { return proto.CompactTextString(m) }
-func (*VolumeTierMoveDatFromRemoteRequest) ProtoMessage() {}
-func (*VolumeTierMoveDatFromRemoteRequest) Descriptor() ([]byte, []int) {
- return fileDescriptor0, []int{62}
+func (x *QueryRequest_Filter) ProtoReflect() protoreflect.Message {
+ mi := &file_volume_server_proto_msgTypes[82]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
}
-func (m *VolumeTierMoveDatFromRemoteRequest) GetVolumeId() uint32 {
- if m != nil {
- return m.VolumeId
- }
- return 0
+// Deprecated: Use QueryRequest_Filter.ProtoReflect.Descriptor instead.
+func (*QueryRequest_Filter) Descriptor() ([]byte, []int) {
+ return file_volume_server_proto_rawDescGZIP(), []int{78, 0}
}
-func (m *VolumeTierMoveDatFromRemoteRequest) GetCollection() string {
- if m != nil {
- return m.Collection
+func (x *QueryRequest_Filter) GetField() string {
+ if x != nil {
+ return x.Field
}
return ""
}
-func (m *VolumeTierMoveDatFromRemoteRequest) GetKeepRemoteDatFile() bool {
- if m != nil {
- return m.KeepRemoteDatFile
+func (x *QueryRequest_Filter) GetOperand() string {
+ if x != nil {
+ return x.Operand
}
- return false
+ return ""
}
-type VolumeTierMoveDatFromRemoteResponse struct {
- Processed int64 `protobuf:"varint,1,opt,name=processed" json:"processed,omitempty"`
- ProcessedPercentage float32 `protobuf:"fixed32,2,opt,name=processedPercentage" json:"processedPercentage,omitempty"`
+func (x *QueryRequest_Filter) GetValue() string {
+ if x != nil {
+ return x.Value
+ }
+ return ""
}
-func (m *VolumeTierMoveDatFromRemoteResponse) Reset() { *m = VolumeTierMoveDatFromRemoteResponse{} }
-func (m *VolumeTierMoveDatFromRemoteResponse) String() string { return proto.CompactTextString(m) }
-func (*VolumeTierMoveDatFromRemoteResponse) ProtoMessage() {}
-func (*VolumeTierMoveDatFromRemoteResponse) Descriptor() ([]byte, []int) {
- return fileDescriptor0, []int{63}
-}
+type QueryRequest_InputSerialization struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
-func (m *VolumeTierMoveDatFromRemoteResponse) GetProcessed() int64 {
- if m != nil {
- return m.Processed
- }
- return 0
+ // NONE | GZIP | BZIP2
+ CompressionType string `protobuf:"bytes,1,opt,name=compression_type,json=compressionType,proto3" json:"compression_type,omitempty"`
+ CsvInput *QueryRequest_InputSerialization_CSVInput `protobuf:"bytes,2,opt,name=csv_input,json=csvInput,proto3" json:"csv_input,omitempty"`
+ JsonInput *QueryRequest_InputSerialization_JSONInput `protobuf:"bytes,3,opt,name=json_input,json=jsonInput,proto3" json:"json_input,omitempty"`
+ ParquetInput *QueryRequest_InputSerialization_ParquetInput `protobuf:"bytes,4,opt,name=parquet_input,json=parquetInput,proto3" json:"parquet_input,omitempty"`
}
-func (m *VolumeTierMoveDatFromRemoteResponse) GetProcessedPercentage() float32 {
- if m != nil {
- return m.ProcessedPercentage
+func (x *QueryRequest_InputSerialization) Reset() {
+ *x = QueryRequest_InputSerialization{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_volume_server_proto_msgTypes[83]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
- return 0
}
-// select on volume servers
-type QueryRequest struct {
- Selections []string `protobuf:"bytes,1,rep,name=selections" json:"selections,omitempty"`
- FromFileIds []string `protobuf:"bytes,2,rep,name=from_file_ids,json=fromFileIds" json:"from_file_ids,omitempty"`
- Filter *QueryRequest_Filter `protobuf:"bytes,3,opt,name=filter" json:"filter,omitempty"`
- InputSerialization *QueryRequest_InputSerialization `protobuf:"bytes,4,opt,name=input_serialization,json=inputSerialization" json:"input_serialization,omitempty"`
- OutputSerialization *QueryRequest_OutputSerialization `protobuf:"bytes,5,opt,name=output_serialization,json=outputSerialization" json:"output_serialization,omitempty"`
+func (x *QueryRequest_InputSerialization) String() string {
+ return protoimpl.X.MessageStringOf(x)
}
-func (m *QueryRequest) Reset() { *m = QueryRequest{} }
-func (m *QueryRequest) String() string { return proto.CompactTextString(m) }
-func (*QueryRequest) ProtoMessage() {}
-func (*QueryRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{64} }
+func (*QueryRequest_InputSerialization) ProtoMessage() {}
-func (m *QueryRequest) GetSelections() []string {
- if m != nil {
- return m.Selections
+func (x *QueryRequest_InputSerialization) ProtoReflect() protoreflect.Message {
+ mi := &file_volume_server_proto_msgTypes[83]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
}
- return nil
+ return mi.MessageOf(x)
}
-func (m *QueryRequest) GetFromFileIds() []string {
- if m != nil {
- return m.FromFileIds
- }
- return nil
+// Deprecated: Use QueryRequest_InputSerialization.ProtoReflect.Descriptor instead.
+func (*QueryRequest_InputSerialization) Descriptor() ([]byte, []int) {
+ return file_volume_server_proto_rawDescGZIP(), []int{78, 1}
}
-func (m *QueryRequest) GetFilter() *QueryRequest_Filter {
- if m != nil {
- return m.Filter
+func (x *QueryRequest_InputSerialization) GetCompressionType() string {
+ if x != nil {
+ return x.CompressionType
}
- return nil
+ return ""
}
-func (m *QueryRequest) GetInputSerialization() *QueryRequest_InputSerialization {
- if m != nil {
- return m.InputSerialization
+func (x *QueryRequest_InputSerialization) GetCsvInput() *QueryRequest_InputSerialization_CSVInput {
+ if x != nil {
+ return x.CsvInput
}
return nil
}
-func (m *QueryRequest) GetOutputSerialization() *QueryRequest_OutputSerialization {
- if m != nil {
- return m.OutputSerialization
+func (x *QueryRequest_InputSerialization) GetJsonInput() *QueryRequest_InputSerialization_JSONInput {
+ if x != nil {
+ return x.JsonInput
}
return nil
}
-type QueryRequest_Filter struct {
- Field string `protobuf:"bytes,1,opt,name=field" json:"field,omitempty"`
- Operand string `protobuf:"bytes,2,opt,name=operand" json:"operand,omitempty"`
- Value string `protobuf:"bytes,3,opt,name=value" json:"value,omitempty"`
-}
-
-func (m *QueryRequest_Filter) Reset() { *m = QueryRequest_Filter{} }
-func (m *QueryRequest_Filter) String() string { return proto.CompactTextString(m) }
-func (*QueryRequest_Filter) ProtoMessage() {}
-func (*QueryRequest_Filter) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{64, 0} }
-
-func (m *QueryRequest_Filter) GetField() string {
- if m != nil {
- return m.Field
+func (x *QueryRequest_InputSerialization) GetParquetInput() *QueryRequest_InputSerialization_ParquetInput {
+ if x != nil {
+ return x.ParquetInput
}
- return ""
+ return nil
}
-func (m *QueryRequest_Filter) GetOperand() string {
- if m != nil {
- return m.Operand
- }
- return ""
+type QueryRequest_OutputSerialization struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ CsvOutput *QueryRequest_OutputSerialization_CSVOutput `protobuf:"bytes,2,opt,name=csv_output,json=csvOutput,proto3" json:"csv_output,omitempty"`
+ JsonOutput *QueryRequest_OutputSerialization_JSONOutput `protobuf:"bytes,3,opt,name=json_output,json=jsonOutput,proto3" json:"json_output,omitempty"`
}
-func (m *QueryRequest_Filter) GetValue() string {
- if m != nil {
- return m.Value
+func (x *QueryRequest_OutputSerialization) Reset() {
+ *x = QueryRequest_OutputSerialization{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_volume_server_proto_msgTypes[84]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
- return ""
}
-type QueryRequest_InputSerialization struct {
- // NONE | GZIP | BZIP2
- CompressionType string `protobuf:"bytes,1,opt,name=compression_type,json=compressionType" json:"compression_type,omitempty"`
- CsvInput *QueryRequest_InputSerialization_CSVInput `protobuf:"bytes,2,opt,name=csv_input,json=csvInput" json:"csv_input,omitempty"`
- JsonInput *QueryRequest_InputSerialization_JSONInput `protobuf:"bytes,3,opt,name=json_input,json=jsonInput" json:"json_input,omitempty"`
- ParquetInput *QueryRequest_InputSerialization_ParquetInput `protobuf:"bytes,4,opt,name=parquet_input,json=parquetInput" json:"parquet_input,omitempty"`
+func (x *QueryRequest_OutputSerialization) String() string {
+ return protoimpl.X.MessageStringOf(x)
}
-func (m *QueryRequest_InputSerialization) Reset() { *m = QueryRequest_InputSerialization{} }
-func (m *QueryRequest_InputSerialization) String() string { return proto.CompactTextString(m) }
-func (*QueryRequest_InputSerialization) ProtoMessage() {}
-func (*QueryRequest_InputSerialization) Descriptor() ([]byte, []int) {
- return fileDescriptor0, []int{64, 1}
-}
+func (*QueryRequest_OutputSerialization) ProtoMessage() {}
-func (m *QueryRequest_InputSerialization) GetCompressionType() string {
- if m != nil {
- return m.CompressionType
+func (x *QueryRequest_OutputSerialization) ProtoReflect() protoreflect.Message {
+ mi := &file_volume_server_proto_msgTypes[84]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
}
- return ""
+ return mi.MessageOf(x)
}
-func (m *QueryRequest_InputSerialization) GetCsvInput() *QueryRequest_InputSerialization_CSVInput {
- if m != nil {
- return m.CsvInput
- }
- return nil
+// Deprecated: Use QueryRequest_OutputSerialization.ProtoReflect.Descriptor instead.
+func (*QueryRequest_OutputSerialization) Descriptor() ([]byte, []int) {
+ return file_volume_server_proto_rawDescGZIP(), []int{78, 2}
}
-func (m *QueryRequest_InputSerialization) GetJsonInput() *QueryRequest_InputSerialization_JSONInput {
- if m != nil {
- return m.JsonInput
+func (x *QueryRequest_OutputSerialization) GetCsvOutput() *QueryRequest_OutputSerialization_CSVOutput {
+ if x != nil {
+ return x.CsvOutput
}
return nil
}
-func (m *QueryRequest_InputSerialization) GetParquetInput() *QueryRequest_InputSerialization_ParquetInput {
- if m != nil {
- return m.ParquetInput
+func (x *QueryRequest_OutputSerialization) GetJsonOutput() *QueryRequest_OutputSerialization_JSONOutput {
+ if x != nil {
+ return x.JsonOutput
}
return nil
}
type QueryRequest_InputSerialization_CSVInput struct {
- FileHeaderInfo string `protobuf:"bytes,1,opt,name=file_header_info,json=fileHeaderInfo" json:"file_header_info,omitempty"`
- RecordDelimiter string `protobuf:"bytes,2,opt,name=record_delimiter,json=recordDelimiter" json:"record_delimiter,omitempty"`
- FieldDelimiter string `protobuf:"bytes,3,opt,name=field_delimiter,json=fieldDelimiter" json:"field_delimiter,omitempty"`
- QuoteCharactoer string `protobuf:"bytes,4,opt,name=quote_charactoer,json=quoteCharactoer" json:"quote_charactoer,omitempty"`
- QuoteEscapeCharacter string `protobuf:"bytes,5,opt,name=quote_escape_character,json=quoteEscapeCharacter" json:"quote_escape_character,omitempty"`
- Comments string `protobuf:"bytes,6,opt,name=comments" json:"comments,omitempty"`
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ FileHeaderInfo string `protobuf:"bytes,1,opt,name=file_header_info,json=fileHeaderInfo,proto3" json:"file_header_info,omitempty"` // Valid values: NONE | USE | IGNORE
+ RecordDelimiter string `protobuf:"bytes,2,opt,name=record_delimiter,json=recordDelimiter,proto3" json:"record_delimiter,omitempty"` // Default: \n
+ FieldDelimiter string `protobuf:"bytes,3,opt,name=field_delimiter,json=fieldDelimiter,proto3" json:"field_delimiter,omitempty"` // Default: ,
+ QuoteCharactoer string `protobuf:"bytes,4,opt,name=quote_charactoer,json=quoteCharactoer,proto3" json:"quote_charactoer,omitempty"` // Default: "
+ QuoteEscapeCharacter string `protobuf:"bytes,5,opt,name=quote_escape_character,json=quoteEscapeCharacter,proto3" json:"quote_escape_character,omitempty"` // Default: "
+ Comments string `protobuf:"bytes,6,opt,name=comments,proto3" json:"comments,omitempty"` // Default: #
// If true, records might contain record delimiters within quote characters
- AllowQuotedRecordDelimiter bool `protobuf:"varint,7,opt,name=allow_quoted_record_delimiter,json=allowQuotedRecordDelimiter" json:"allow_quoted_record_delimiter,omitempty"`
+ AllowQuotedRecordDelimiter bool `protobuf:"varint,7,opt,name=allow_quoted_record_delimiter,json=allowQuotedRecordDelimiter,proto3" json:"allow_quoted_record_delimiter,omitempty"` // default False.
+}
+
+func (x *QueryRequest_InputSerialization_CSVInput) Reset() {
+ *x = QueryRequest_InputSerialization_CSVInput{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_volume_server_proto_msgTypes[85]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *QueryRequest_InputSerialization_CSVInput) String() string {
+ return protoimpl.X.MessageStringOf(x)
}
-func (m *QueryRequest_InputSerialization_CSVInput) Reset() {
- *m = QueryRequest_InputSerialization_CSVInput{}
+func (*QueryRequest_InputSerialization_CSVInput) ProtoMessage() {}
+
+func (x *QueryRequest_InputSerialization_CSVInput) ProtoReflect() protoreflect.Message {
+ mi := &file_volume_server_proto_msgTypes[85]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
}
-func (m *QueryRequest_InputSerialization_CSVInput) String() string { return proto.CompactTextString(m) }
-func (*QueryRequest_InputSerialization_CSVInput) ProtoMessage() {}
+
+// Deprecated: Use QueryRequest_InputSerialization_CSVInput.ProtoReflect.Descriptor instead.
func (*QueryRequest_InputSerialization_CSVInput) Descriptor() ([]byte, []int) {
- return fileDescriptor0, []int{64, 1, 0}
+ return file_volume_server_proto_rawDescGZIP(), []int{78, 1, 0}
+}
+
+func (x *QueryRequest_InputSerialization_CSVInput) GetFileHeaderInfo() string {
+ if x != nil {
+ return x.FileHeaderInfo
+ }
+ return ""
}
-func (m *QueryRequest_InputSerialization_CSVInput) GetFileHeaderInfo() string {
- if m != nil {
- return m.FileHeaderInfo
+func (x *QueryRequest_InputSerialization_CSVInput) GetRecordDelimiter() string {
+ if x != nil {
+ return x.RecordDelimiter
}
return ""
}
-func (m *QueryRequest_InputSerialization_CSVInput) GetRecordDelimiter() string {
- if m != nil {
- return m.RecordDelimiter
+func (x *QueryRequest_InputSerialization_CSVInput) GetFieldDelimiter() string {
+ if x != nil {
+ return x.FieldDelimiter
}
return ""
}
-func (m *QueryRequest_InputSerialization_CSVInput) GetFieldDelimiter() string {
- if m != nil {
- return m.FieldDelimiter
+func (x *QueryRequest_InputSerialization_CSVInput) GetQuoteCharactoer() string {
+ if x != nil {
+ return x.QuoteCharactoer
}
return ""
}
-func (m *QueryRequest_InputSerialization_CSVInput) GetQuoteCharactoer() string {
- if m != nil {
- return m.QuoteCharactoer
+func (x *QueryRequest_InputSerialization_CSVInput) GetQuoteEscapeCharacter() string {
+ if x != nil {
+ return x.QuoteEscapeCharacter
}
return ""
}
-func (m *QueryRequest_InputSerialization_CSVInput) GetQuoteEscapeCharacter() string {
- if m != nil {
- return m.QuoteEscapeCharacter
+func (x *QueryRequest_InputSerialization_CSVInput) GetComments() string {
+ if x != nil {
+ return x.Comments
}
return ""
}
-func (m *QueryRequest_InputSerialization_CSVInput) GetComments() string {
- if m != nil {
- return m.Comments
- }
- return ""
+func (x *QueryRequest_InputSerialization_CSVInput) GetAllowQuotedRecordDelimiter() bool {
+ if x != nil {
+ return x.AllowQuotedRecordDelimiter
+ }
+ return false
+}
+
+type QueryRequest_InputSerialization_JSONInput struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ Type string `protobuf:"bytes,1,opt,name=type,proto3" json:"type,omitempty"` // Valid values: DOCUMENT | LINES
}
-func (m *QueryRequest_InputSerialization_CSVInput) GetAllowQuotedRecordDelimiter() bool {
- if m != nil {
- return m.AllowQuotedRecordDelimiter
+func (x *QueryRequest_InputSerialization_JSONInput) Reset() {
+ *x = QueryRequest_InputSerialization_JSONInput{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_volume_server_proto_msgTypes[86]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
- return false
}
-type QueryRequest_InputSerialization_JSONInput struct {
- Type string `protobuf:"bytes,1,opt,name=type" json:"type,omitempty"`
+func (x *QueryRequest_InputSerialization_JSONInput) String() string {
+ return protoimpl.X.MessageStringOf(x)
}
-func (m *QueryRequest_InputSerialization_JSONInput) Reset() {
- *m = QueryRequest_InputSerialization_JSONInput{}
+func (*QueryRequest_InputSerialization_JSONInput) ProtoMessage() {}
+
+func (x *QueryRequest_InputSerialization_JSONInput) ProtoReflect() protoreflect.Message {
+ mi := &file_volume_server_proto_msgTypes[86]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
}
-func (m *QueryRequest_InputSerialization_JSONInput) String() string { return proto.CompactTextString(m) }
-func (*QueryRequest_InputSerialization_JSONInput) ProtoMessage() {}
+
+// Deprecated: Use QueryRequest_InputSerialization_JSONInput.ProtoReflect.Descriptor instead.
func (*QueryRequest_InputSerialization_JSONInput) Descriptor() ([]byte, []int) {
- return fileDescriptor0, []int{64, 1, 1}
+ return file_volume_server_proto_rawDescGZIP(), []int{78, 1, 1}
}
-func (m *QueryRequest_InputSerialization_JSONInput) GetType() string {
- if m != nil {
- return m.Type
+func (x *QueryRequest_InputSerialization_JSONInput) GetType() string {
+ if x != nil {
+ return x.Type
}
return ""
}
type QueryRequest_InputSerialization_ParquetInput struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
}
-func (m *QueryRequest_InputSerialization_ParquetInput) Reset() {
- *m = QueryRequest_InputSerialization_ParquetInput{}
-}
-func (m *QueryRequest_InputSerialization_ParquetInput) String() string {
- return proto.CompactTextString(m)
-}
-func (*QueryRequest_InputSerialization_ParquetInput) ProtoMessage() {}
-func (*QueryRequest_InputSerialization_ParquetInput) Descriptor() ([]byte, []int) {
- return fileDescriptor0, []int{64, 1, 2}
+func (x *QueryRequest_InputSerialization_ParquetInput) Reset() {
+ *x = QueryRequest_InputSerialization_ParquetInput{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_volume_server_proto_msgTypes[87]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
}
-type QueryRequest_OutputSerialization struct {
- CsvOutput *QueryRequest_OutputSerialization_CSVOutput `protobuf:"bytes,2,opt,name=csv_output,json=csvOutput" json:"csv_output,omitempty"`
- JsonOutput *QueryRequest_OutputSerialization_JSONOutput `protobuf:"bytes,3,opt,name=json_output,json=jsonOutput" json:"json_output,omitempty"`
+func (x *QueryRequest_InputSerialization_ParquetInput) String() string {
+ return protoimpl.X.MessageStringOf(x)
}
-func (m *QueryRequest_OutputSerialization) Reset() { *m = QueryRequest_OutputSerialization{} }
-func (m *QueryRequest_OutputSerialization) String() string { return proto.CompactTextString(m) }
-func (*QueryRequest_OutputSerialization) ProtoMessage() {}
-func (*QueryRequest_OutputSerialization) Descriptor() ([]byte, []int) {
- return fileDescriptor0, []int{64, 2}
-}
+func (*QueryRequest_InputSerialization_ParquetInput) ProtoMessage() {}
-func (m *QueryRequest_OutputSerialization) GetCsvOutput() *QueryRequest_OutputSerialization_CSVOutput {
- if m != nil {
- return m.CsvOutput
+func (x *QueryRequest_InputSerialization_ParquetInput) ProtoReflect() protoreflect.Message {
+ mi := &file_volume_server_proto_msgTypes[87]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
}
- return nil
+ return mi.MessageOf(x)
}
-func (m *QueryRequest_OutputSerialization) GetJsonOutput() *QueryRequest_OutputSerialization_JSONOutput {
- if m != nil {
- return m.JsonOutput
- }
- return nil
+// Deprecated: Use QueryRequest_InputSerialization_ParquetInput.ProtoReflect.Descriptor instead.
+func (*QueryRequest_InputSerialization_ParquetInput) Descriptor() ([]byte, []int) {
+ return file_volume_server_proto_rawDescGZIP(), []int{78, 1, 2}
}
type QueryRequest_OutputSerialization_CSVOutput struct {
- QuoteFields string `protobuf:"bytes,1,opt,name=quote_fields,json=quoteFields" json:"quote_fields,omitempty"`
- RecordDelimiter string `protobuf:"bytes,2,opt,name=record_delimiter,json=recordDelimiter" json:"record_delimiter,omitempty"`
- FieldDelimiter string `protobuf:"bytes,3,opt,name=field_delimiter,json=fieldDelimiter" json:"field_delimiter,omitempty"`
- QuoteCharactoer string `protobuf:"bytes,4,opt,name=quote_charactoer,json=quoteCharactoer" json:"quote_charactoer,omitempty"`
- QuoteEscapeCharacter string `protobuf:"bytes,5,opt,name=quote_escape_character,json=quoteEscapeCharacter" json:"quote_escape_character,omitempty"`
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ QuoteFields string `protobuf:"bytes,1,opt,name=quote_fields,json=quoteFields,proto3" json:"quote_fields,omitempty"` // Valid values: ALWAYS | ASNEEDED
+ RecordDelimiter string `protobuf:"bytes,2,opt,name=record_delimiter,json=recordDelimiter,proto3" json:"record_delimiter,omitempty"` // Default: \n
+ FieldDelimiter string `protobuf:"bytes,3,opt,name=field_delimiter,json=fieldDelimiter,proto3" json:"field_delimiter,omitempty"` // Default: ,
+ QuoteCharactoer string `protobuf:"bytes,4,opt,name=quote_charactoer,json=quoteCharactoer,proto3" json:"quote_charactoer,omitempty"` // Default: "
+ QuoteEscapeCharacter string `protobuf:"bytes,5,opt,name=quote_escape_character,json=quoteEscapeCharacter,proto3" json:"quote_escape_character,omitempty"` // Default: "
}
-func (m *QueryRequest_OutputSerialization_CSVOutput) Reset() {
- *m = QueryRequest_OutputSerialization_CSVOutput{}
+func (x *QueryRequest_OutputSerialization_CSVOutput) Reset() {
+ *x = QueryRequest_OutputSerialization_CSVOutput{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_volume_server_proto_msgTypes[88]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
}
-func (m *QueryRequest_OutputSerialization_CSVOutput) String() string {
- return proto.CompactTextString(m)
+
+func (x *QueryRequest_OutputSerialization_CSVOutput) String() string {
+ return protoimpl.X.MessageStringOf(x)
}
+
func (*QueryRequest_OutputSerialization_CSVOutput) ProtoMessage() {}
+
+func (x *QueryRequest_OutputSerialization_CSVOutput) ProtoReflect() protoreflect.Message {
+ mi := &file_volume_server_proto_msgTypes[88]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use QueryRequest_OutputSerialization_CSVOutput.ProtoReflect.Descriptor instead.
func (*QueryRequest_OutputSerialization_CSVOutput) Descriptor() ([]byte, []int) {
- return fileDescriptor0, []int{64, 2, 0}
+ return file_volume_server_proto_rawDescGZIP(), []int{78, 2, 0}
}
-func (m *QueryRequest_OutputSerialization_CSVOutput) GetQuoteFields() string {
- if m != nil {
- return m.QuoteFields
+func (x *QueryRequest_OutputSerialization_CSVOutput) GetQuoteFields() string {
+ if x != nil {
+ return x.QuoteFields
}
return ""
}
-func (m *QueryRequest_OutputSerialization_CSVOutput) GetRecordDelimiter() string {
- if m != nil {
- return m.RecordDelimiter
+func (x *QueryRequest_OutputSerialization_CSVOutput) GetRecordDelimiter() string {
+ if x != nil {
+ return x.RecordDelimiter
}
return ""
}
-func (m *QueryRequest_OutputSerialization_CSVOutput) GetFieldDelimiter() string {
- if m != nil {
- return m.FieldDelimiter
+func (x *QueryRequest_OutputSerialization_CSVOutput) GetFieldDelimiter() string {
+ if x != nil {
+ return x.FieldDelimiter
}
return ""
}
-func (m *QueryRequest_OutputSerialization_CSVOutput) GetQuoteCharactoer() string {
- if m != nil {
- return m.QuoteCharactoer
+func (x *QueryRequest_OutputSerialization_CSVOutput) GetQuoteCharactoer() string {
+ if x != nil {
+ return x.QuoteCharactoer
}
return ""
}
-func (m *QueryRequest_OutputSerialization_CSVOutput) GetQuoteEscapeCharacter() string {
- if m != nil {
- return m.QuoteEscapeCharacter
+func (x *QueryRequest_OutputSerialization_CSVOutput) GetQuoteEscapeCharacter() string {
+ if x != nil {
+ return x.QuoteEscapeCharacter
}
return ""
}
type QueryRequest_OutputSerialization_JSONOutput struct {
- RecordDelimiter string `protobuf:"bytes,1,opt,name=record_delimiter,json=recordDelimiter" json:"record_delimiter,omitempty"`
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ RecordDelimiter string `protobuf:"bytes,1,opt,name=record_delimiter,json=recordDelimiter,proto3" json:"record_delimiter,omitempty"`
}
-func (m *QueryRequest_OutputSerialization_JSONOutput) Reset() {
- *m = QueryRequest_OutputSerialization_JSONOutput{}
+func (x *QueryRequest_OutputSerialization_JSONOutput) Reset() {
+ *x = QueryRequest_OutputSerialization_JSONOutput{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_volume_server_proto_msgTypes[89]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
}
-func (m *QueryRequest_OutputSerialization_JSONOutput) String() string {
- return proto.CompactTextString(m)
+
+func (x *QueryRequest_OutputSerialization_JSONOutput) String() string {
+ return protoimpl.X.MessageStringOf(x)
}
+
func (*QueryRequest_OutputSerialization_JSONOutput) ProtoMessage() {}
-func (*QueryRequest_OutputSerialization_JSONOutput) Descriptor() ([]byte, []int) {
- return fileDescriptor0, []int{64, 2, 1}
-}
-func (m *QueryRequest_OutputSerialization_JSONOutput) GetRecordDelimiter() string {
- if m != nil {
- return m.RecordDelimiter
+func (x *QueryRequest_OutputSerialization_JSONOutput) ProtoReflect() protoreflect.Message {
+ mi := &file_volume_server_proto_msgTypes[89]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
}
- return ""
+ return mi.MessageOf(x)
}
-type QueriedStripe struct {
- Records []byte `protobuf:"bytes,1,opt,name=records,proto3" json:"records,omitempty"`
+// Deprecated: Use QueryRequest_OutputSerialization_JSONOutput.ProtoReflect.Descriptor instead.
+func (*QueryRequest_OutputSerialization_JSONOutput) Descriptor() ([]byte, []int) {
+ return file_volume_server_proto_rawDescGZIP(), []int{78, 2, 1}
}
-func (m *QueriedStripe) Reset() { *m = QueriedStripe{} }
-func (m *QueriedStripe) String() string { return proto.CompactTextString(m) }
-func (*QueriedStripe) ProtoMessage() {}
-func (*QueriedStripe) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{65} }
-
-func (m *QueriedStripe) GetRecords() []byte {
- if m != nil {
- return m.Records
+func (x *QueryRequest_OutputSerialization_JSONOutput) GetRecordDelimiter() string {
+ if x != nil {
+ return x.RecordDelimiter
}
- return nil
+ return ""
}
-func init() {
- proto.RegisterType((*BatchDeleteRequest)(nil), "volume_server_pb.BatchDeleteRequest")
- proto.RegisterType((*BatchDeleteResponse)(nil), "volume_server_pb.BatchDeleteResponse")
- proto.RegisterType((*DeleteResult)(nil), "volume_server_pb.DeleteResult")
- proto.RegisterType((*Empty)(nil), "volume_server_pb.Empty")
- proto.RegisterType((*VacuumVolumeCheckRequest)(nil), "volume_server_pb.VacuumVolumeCheckRequest")
- proto.RegisterType((*VacuumVolumeCheckResponse)(nil), "volume_server_pb.VacuumVolumeCheckResponse")
- proto.RegisterType((*VacuumVolumeCompactRequest)(nil), "volume_server_pb.VacuumVolumeCompactRequest")
- proto.RegisterType((*VacuumVolumeCompactResponse)(nil), "volume_server_pb.VacuumVolumeCompactResponse")
- proto.RegisterType((*VacuumVolumeCommitRequest)(nil), "volume_server_pb.VacuumVolumeCommitRequest")
- proto.RegisterType((*VacuumVolumeCommitResponse)(nil), "volume_server_pb.VacuumVolumeCommitResponse")
- proto.RegisterType((*VacuumVolumeCleanupRequest)(nil), "volume_server_pb.VacuumVolumeCleanupRequest")
- proto.RegisterType((*VacuumVolumeCleanupResponse)(nil), "volume_server_pb.VacuumVolumeCleanupResponse")
- proto.RegisterType((*DeleteCollectionRequest)(nil), "volume_server_pb.DeleteCollectionRequest")
- proto.RegisterType((*DeleteCollectionResponse)(nil), "volume_server_pb.DeleteCollectionResponse")
- proto.RegisterType((*AllocateVolumeRequest)(nil), "volume_server_pb.AllocateVolumeRequest")
- proto.RegisterType((*AllocateVolumeResponse)(nil), "volume_server_pb.AllocateVolumeResponse")
- proto.RegisterType((*VolumeSyncStatusRequest)(nil), "volume_server_pb.VolumeSyncStatusRequest")
- proto.RegisterType((*VolumeSyncStatusResponse)(nil), "volume_server_pb.VolumeSyncStatusResponse")
- proto.RegisterType((*VolumeIncrementalCopyRequest)(nil), "volume_server_pb.VolumeIncrementalCopyRequest")
- proto.RegisterType((*VolumeIncrementalCopyResponse)(nil), "volume_server_pb.VolumeIncrementalCopyResponse")
- proto.RegisterType((*VolumeMountRequest)(nil), "volume_server_pb.VolumeMountRequest")
- proto.RegisterType((*VolumeMountResponse)(nil), "volume_server_pb.VolumeMountResponse")
- proto.RegisterType((*VolumeUnmountRequest)(nil), "volume_server_pb.VolumeUnmountRequest")
- proto.RegisterType((*VolumeUnmountResponse)(nil), "volume_server_pb.VolumeUnmountResponse")
- proto.RegisterType((*VolumeDeleteRequest)(nil), "volume_server_pb.VolumeDeleteRequest")
- proto.RegisterType((*VolumeDeleteResponse)(nil), "volume_server_pb.VolumeDeleteResponse")
- proto.RegisterType((*VolumeMarkReadonlyRequest)(nil), "volume_server_pb.VolumeMarkReadonlyRequest")
- proto.RegisterType((*VolumeMarkReadonlyResponse)(nil), "volume_server_pb.VolumeMarkReadonlyResponse")
- proto.RegisterType((*VolumeCopyRequest)(nil), "volume_server_pb.VolumeCopyRequest")
- proto.RegisterType((*VolumeCopyResponse)(nil), "volume_server_pb.VolumeCopyResponse")
- proto.RegisterType((*CopyFileRequest)(nil), "volume_server_pb.CopyFileRequest")
- proto.RegisterType((*CopyFileResponse)(nil), "volume_server_pb.CopyFileResponse")
- proto.RegisterType((*VolumeTailSenderRequest)(nil), "volume_server_pb.VolumeTailSenderRequest")
- proto.RegisterType((*VolumeTailSenderResponse)(nil), "volume_server_pb.VolumeTailSenderResponse")
- proto.RegisterType((*VolumeTailReceiverRequest)(nil), "volume_server_pb.VolumeTailReceiverRequest")
- proto.RegisterType((*VolumeTailReceiverResponse)(nil), "volume_server_pb.VolumeTailReceiverResponse")
- proto.RegisterType((*VolumeEcShardsGenerateRequest)(nil), "volume_server_pb.VolumeEcShardsGenerateRequest")
- proto.RegisterType((*VolumeEcShardsGenerateResponse)(nil), "volume_server_pb.VolumeEcShardsGenerateResponse")
- proto.RegisterType((*VolumeEcShardsRebuildRequest)(nil), "volume_server_pb.VolumeEcShardsRebuildRequest")
- proto.RegisterType((*VolumeEcShardsRebuildResponse)(nil), "volume_server_pb.VolumeEcShardsRebuildResponse")
- proto.RegisterType((*VolumeEcShardsCopyRequest)(nil), "volume_server_pb.VolumeEcShardsCopyRequest")
- proto.RegisterType((*VolumeEcShardsCopyResponse)(nil), "volume_server_pb.VolumeEcShardsCopyResponse")
- proto.RegisterType((*VolumeEcShardsDeleteRequest)(nil), "volume_server_pb.VolumeEcShardsDeleteRequest")
- proto.RegisterType((*VolumeEcShardsDeleteResponse)(nil), "volume_server_pb.VolumeEcShardsDeleteResponse")
- proto.RegisterType((*VolumeEcShardsMountRequest)(nil), "volume_server_pb.VolumeEcShardsMountRequest")
- proto.RegisterType((*VolumeEcShardsMountResponse)(nil), "volume_server_pb.VolumeEcShardsMountResponse")
- proto.RegisterType((*VolumeEcShardsUnmountRequest)(nil), "volume_server_pb.VolumeEcShardsUnmountRequest")
- proto.RegisterType((*VolumeEcShardsUnmountResponse)(nil), "volume_server_pb.VolumeEcShardsUnmountResponse")
- proto.RegisterType((*VolumeEcShardReadRequest)(nil), "volume_server_pb.VolumeEcShardReadRequest")
- proto.RegisterType((*VolumeEcShardReadResponse)(nil), "volume_server_pb.VolumeEcShardReadResponse")
- proto.RegisterType((*VolumeEcBlobDeleteRequest)(nil), "volume_server_pb.VolumeEcBlobDeleteRequest")
- proto.RegisterType((*VolumeEcBlobDeleteResponse)(nil), "volume_server_pb.VolumeEcBlobDeleteResponse")
- proto.RegisterType((*VolumeEcShardsToVolumeRequest)(nil), "volume_server_pb.VolumeEcShardsToVolumeRequest")
- proto.RegisterType((*VolumeEcShardsToVolumeResponse)(nil), "volume_server_pb.VolumeEcShardsToVolumeResponse")
- proto.RegisterType((*ReadVolumeFileStatusRequest)(nil), "volume_server_pb.ReadVolumeFileStatusRequest")
- proto.RegisterType((*ReadVolumeFileStatusResponse)(nil), "volume_server_pb.ReadVolumeFileStatusResponse")
- proto.RegisterType((*DiskStatus)(nil), "volume_server_pb.DiskStatus")
- proto.RegisterType((*MemStatus)(nil), "volume_server_pb.MemStatus")
- proto.RegisterType((*RemoteFile)(nil), "volume_server_pb.RemoteFile")
- proto.RegisterType((*VolumeInfo)(nil), "volume_server_pb.VolumeInfo")
- proto.RegisterType((*VolumeTierMoveDatToRemoteRequest)(nil), "volume_server_pb.VolumeTierMoveDatToRemoteRequest")
- proto.RegisterType((*VolumeTierMoveDatToRemoteResponse)(nil), "volume_server_pb.VolumeTierMoveDatToRemoteResponse")
- proto.RegisterType((*VolumeTierMoveDatFromRemoteRequest)(nil), "volume_server_pb.VolumeTierMoveDatFromRemoteRequest")
- proto.RegisterType((*VolumeTierMoveDatFromRemoteResponse)(nil), "volume_server_pb.VolumeTierMoveDatFromRemoteResponse")
- proto.RegisterType((*QueryRequest)(nil), "volume_server_pb.QueryRequest")
- proto.RegisterType((*QueryRequest_Filter)(nil), "volume_server_pb.QueryRequest.Filter")
- proto.RegisterType((*QueryRequest_InputSerialization)(nil), "volume_server_pb.QueryRequest.InputSerialization")
- proto.RegisterType((*QueryRequest_InputSerialization_CSVInput)(nil), "volume_server_pb.QueryRequest.InputSerialization.CSVInput")
- proto.RegisterType((*QueryRequest_InputSerialization_JSONInput)(nil), "volume_server_pb.QueryRequest.InputSerialization.JSONInput")
- proto.RegisterType((*QueryRequest_InputSerialization_ParquetInput)(nil), "volume_server_pb.QueryRequest.InputSerialization.ParquetInput")
- proto.RegisterType((*QueryRequest_OutputSerialization)(nil), "volume_server_pb.QueryRequest.OutputSerialization")
- proto.RegisterType((*QueryRequest_OutputSerialization_CSVOutput)(nil), "volume_server_pb.QueryRequest.OutputSerialization.CSVOutput")
- proto.RegisterType((*QueryRequest_OutputSerialization_JSONOutput)(nil), "volume_server_pb.QueryRequest.OutputSerialization.JSONOutput")
- proto.RegisterType((*QueriedStripe)(nil), "volume_server_pb.QueriedStripe")
+var File_volume_server_proto protoreflect.FileDescriptor
+
+var file_volume_server_proto_rawDesc = []byte{
+ 0x0a, 0x13, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x2e,
+ 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x10, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x73, 0x65,
+ 0x72, 0x76, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x22, 0x5b, 0x0a, 0x12, 0x42, 0x61, 0x74, 0x63, 0x68,
+ 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x19, 0x0a,
+ 0x08, 0x66, 0x69, 0x6c, 0x65, 0x5f, 0x69, 0x64, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x09, 0x52,
+ 0x07, 0x66, 0x69, 0x6c, 0x65, 0x49, 0x64, 0x73, 0x12, 0x2a, 0x0a, 0x11, 0x73, 0x6b, 0x69, 0x70,
+ 0x5f, 0x63, 0x6f, 0x6f, 0x6b, 0x69, 0x65, 0x5f, 0x63, 0x68, 0x65, 0x63, 0x6b, 0x18, 0x02, 0x20,
+ 0x01, 0x28, 0x08, 0x52, 0x0f, 0x73, 0x6b, 0x69, 0x70, 0x43, 0x6f, 0x6f, 0x6b, 0x69, 0x65, 0x43,
+ 0x68, 0x65, 0x63, 0x6b, 0x22, 0x4f, 0x0a, 0x13, 0x42, 0x61, 0x74, 0x63, 0x68, 0x44, 0x65, 0x6c,
+ 0x65, 0x74, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x38, 0x0a, 0x07, 0x72,
+ 0x65, 0x73, 0x75, 0x6c, 0x74, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x76,
+ 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e,
+ 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x52, 0x07, 0x72, 0x65,
+ 0x73, 0x75, 0x6c, 0x74, 0x73, 0x22, 0x83, 0x01, 0x0a, 0x0c, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65,
+ 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x12, 0x17, 0x0a, 0x07, 0x66, 0x69, 0x6c, 0x65, 0x5f, 0x69,
+ 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x66, 0x69, 0x6c, 0x65, 0x49, 0x64, 0x12,
+ 0x16, 0x0a, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x52,
+ 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x14, 0x0a, 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72,
+ 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x12, 0x12, 0x0a,
+ 0x04, 0x73, 0x69, 0x7a, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x04, 0x73, 0x69, 0x7a,
+ 0x65, 0x12, 0x18, 0x0a, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x05, 0x20, 0x01,
+ 0x28, 0x0d, 0x52, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x22, 0x07, 0x0a, 0x05, 0x45,
+ 0x6d, 0x70, 0x74, 0x79, 0x22, 0x37, 0x0a, 0x18, 0x56, 0x61, 0x63, 0x75, 0x75, 0x6d, 0x56, 0x6f,
+ 0x6c, 0x75, 0x6d, 0x65, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74,
+ 0x12, 0x1b, 0x0a, 0x09, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20,
+ 0x01, 0x28, 0x0d, 0x52, 0x08, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x49, 0x64, 0x22, 0x40, 0x0a,
+ 0x19, 0x56, 0x61, 0x63, 0x75, 0x75, 0x6d, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x43, 0x68, 0x65,
+ 0x63, 0x6b, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x23, 0x0a, 0x0d, 0x67, 0x61,
+ 0x72, 0x62, 0x61, 0x67, 0x65, 0x5f, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x18, 0x01, 0x20, 0x01, 0x28,
+ 0x01, 0x52, 0x0c, 0x67, 0x61, 0x72, 0x62, 0x61, 0x67, 0x65, 0x52, 0x61, 0x74, 0x69, 0x6f, 0x22,
+ 0x5b, 0x0a, 0x1a, 0x56, 0x61, 0x63, 0x75, 0x75, 0x6d, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x43,
+ 0x6f, 0x6d, 0x70, 0x61, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1b, 0x0a,
+ 0x09, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0d,
+ 0x52, 0x08, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x49, 0x64, 0x12, 0x20, 0x0a, 0x0b, 0x70, 0x72,
+ 0x65, 0x61, 0x6c, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x03, 0x52,
+ 0x0b, 0x70, 0x72, 0x65, 0x61, 0x6c, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x65, 0x22, 0x1d, 0x0a, 0x1b,
+ 0x56, 0x61, 0x63, 0x75, 0x75, 0x6d, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x43, 0x6f, 0x6d, 0x70,
+ 0x61, 0x63, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x38, 0x0a, 0x19, 0x56,
+ 0x61, 0x63, 0x75, 0x75, 0x6d, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x43, 0x6f, 0x6d, 0x6d, 0x69,
+ 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1b, 0x0a, 0x09, 0x76, 0x6f, 0x6c, 0x75,
+ 0x6d, 0x65, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x08, 0x76, 0x6f, 0x6c,
+ 0x75, 0x6d, 0x65, 0x49, 0x64, 0x22, 0x3e, 0x0a, 0x1a, 0x56, 0x61, 0x63, 0x75, 0x75, 0x6d, 0x56,
+ 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x43, 0x6f, 0x6d, 0x6d, 0x69, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f,
+ 0x6e, 0x73, 0x65, 0x12, 0x20, 0x0a, 0x0c, 0x69, 0x73, 0x5f, 0x72, 0x65, 0x61, 0x64, 0x5f, 0x6f,
+ 0x6e, 0x6c, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0a, 0x69, 0x73, 0x52, 0x65, 0x61,
+ 0x64, 0x4f, 0x6e, 0x6c, 0x79, 0x22, 0x39, 0x0a, 0x1a, 0x56, 0x61, 0x63, 0x75, 0x75, 0x6d, 0x56,
+ 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x43, 0x6c, 0x65, 0x61, 0x6e, 0x75, 0x70, 0x52, 0x65, 0x71, 0x75,
+ 0x65, 0x73, 0x74, 0x12, 0x1b, 0x0a, 0x09, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x69, 0x64,
+ 0x18, 0x01, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x08, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x49, 0x64,
+ 0x22, 0x1d, 0x0a, 0x1b, 0x56, 0x61, 0x63, 0x75, 0x75, 0x6d, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65,
+ 0x43, 0x6c, 0x65, 0x61, 0x6e, 0x75, 0x70, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22,
+ 0x39, 0x0a, 0x17, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x43, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74,
+ 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1e, 0x0a, 0x0a, 0x63, 0x6f,
+ 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a,
+ 0x63, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0x1a, 0x0a, 0x18, 0x44, 0x65,
+ 0x6c, 0x65, 0x74, 0x65, 0x43, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65,
+ 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0xfb, 0x01, 0x0a, 0x15, 0x41, 0x6c, 0x6c, 0x6f, 0x63,
+ 0x61, 0x74, 0x65, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74,
+ 0x12, 0x1b, 0x0a, 0x09, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20,
+ 0x01, 0x28, 0x0d, 0x52, 0x08, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x49, 0x64, 0x12, 0x1e, 0x0a,
+ 0x0a, 0x63, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28,
+ 0x09, 0x52, 0x0a, 0x63, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x20, 0x0a,
+ 0x0b, 0x70, 0x72, 0x65, 0x61, 0x6c, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x65, 0x18, 0x03, 0x20, 0x01,
+ 0x28, 0x03, 0x52, 0x0b, 0x70, 0x72, 0x65, 0x61, 0x6c, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x65, 0x12,
+ 0x20, 0x0a, 0x0b, 0x72, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x04,
+ 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x72, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f,
+ 0x6e, 0x12, 0x10, 0x0a, 0x03, 0x74, 0x74, 0x6c, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03,
+ 0x74, 0x74, 0x6c, 0x12, 0x32, 0x0a, 0x16, 0x6d, 0x65, 0x6d, 0x6f, 0x72, 0x79, 0x5f, 0x6d, 0x61,
+ 0x70, 0x5f, 0x6d, 0x61, 0x78, 0x5f, 0x73, 0x69, 0x7a, 0x65, 0x5f, 0x6d, 0x62, 0x18, 0x06, 0x20,
+ 0x01, 0x28, 0x0d, 0x52, 0x12, 0x6d, 0x65, 0x6d, 0x6f, 0x72, 0x79, 0x4d, 0x61, 0x70, 0x4d, 0x61,
+ 0x78, 0x53, 0x69, 0x7a, 0x65, 0x4d, 0x62, 0x12, 0x1b, 0x0a, 0x09, 0x64, 0x69, 0x73, 0x6b, 0x5f,
+ 0x74, 0x79, 0x70, 0x65, 0x18, 0x07, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x64, 0x69, 0x73, 0x6b,
+ 0x54, 0x79, 0x70, 0x65, 0x22, 0x18, 0x0a, 0x16, 0x41, 0x6c, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x65,
+ 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x36,
+ 0x0a, 0x17, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x53, 0x79, 0x6e, 0x63, 0x53, 0x74, 0x61, 0x74,
+ 0x75, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1b, 0x0a, 0x09, 0x76, 0x6f, 0x6c,
+ 0x75, 0x6d, 0x65, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x08, 0x76, 0x6f,
+ 0x6c, 0x75, 0x6d, 0x65, 0x49, 0x64, 0x22, 0xfb, 0x01, 0x0a, 0x18, 0x56, 0x6f, 0x6c, 0x75, 0x6d,
+ 0x65, 0x53, 0x79, 0x6e, 0x63, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f,
+ 0x6e, 0x73, 0x65, 0x12, 0x1b, 0x0a, 0x09, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x69, 0x64,
+ 0x18, 0x01, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x08, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x49, 0x64,
+ 0x12, 0x1e, 0x0a, 0x0a, 0x63, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x02,
+ 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x63, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e,
+ 0x12, 0x20, 0x0a, 0x0b, 0x72, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18,
+ 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x72, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69,
+ 0x6f, 0x6e, 0x12, 0x10, 0x0a, 0x03, 0x74, 0x74, 0x6c, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52,
+ 0x03, 0x74, 0x74, 0x6c, 0x12, 0x1f, 0x0a, 0x0b, 0x74, 0x61, 0x69, 0x6c, 0x5f, 0x6f, 0x66, 0x66,
+ 0x73, 0x65, 0x74, 0x18, 0x06, 0x20, 0x01, 0x28, 0x04, 0x52, 0x0a, 0x74, 0x61, 0x69, 0x6c, 0x4f,
+ 0x66, 0x66, 0x73, 0x65, 0x74, 0x12, 0x29, 0x0a, 0x10, 0x63, 0x6f, 0x6d, 0x70, 0x61, 0x63, 0x74,
+ 0x5f, 0x72, 0x65, 0x76, 0x69, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0d, 0x52,
+ 0x0f, 0x63, 0x6f, 0x6d, 0x70, 0x61, 0x63, 0x74, 0x52, 0x65, 0x76, 0x69, 0x73, 0x69, 0x6f, 0x6e,
+ 0x12, 0x22, 0x0a, 0x0d, 0x69, 0x64, 0x78, 0x5f, 0x66, 0x69, 0x6c, 0x65, 0x5f, 0x73, 0x69, 0x7a,
+ 0x65, 0x18, 0x08, 0x20, 0x01, 0x28, 0x04, 0x52, 0x0b, 0x69, 0x64, 0x78, 0x46, 0x69, 0x6c, 0x65,
+ 0x53, 0x69, 0x7a, 0x65, 0x22, 0x56, 0x0a, 0x1c, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x49, 0x6e,
+ 0x63, 0x72, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x61, 0x6c, 0x43, 0x6f, 0x70, 0x79, 0x52, 0x65, 0x71,
+ 0x75, 0x65, 0x73, 0x74, 0x12, 0x1b, 0x0a, 0x09, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x69,
+ 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x08, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x49,
+ 0x64, 0x12, 0x19, 0x0a, 0x08, 0x73, 0x69, 0x6e, 0x63, 0x65, 0x5f, 0x6e, 0x73, 0x18, 0x02, 0x20,
+ 0x01, 0x28, 0x04, 0x52, 0x07, 0x73, 0x69, 0x6e, 0x63, 0x65, 0x4e, 0x73, 0x22, 0x42, 0x0a, 0x1d,
+ 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x49, 0x6e, 0x63, 0x72, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x61,
+ 0x6c, 0x43, 0x6f, 0x70, 0x79, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x21, 0x0a,
+ 0x0c, 0x66, 0x69, 0x6c, 0x65, 0x5f, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x18, 0x01, 0x20,
+ 0x01, 0x28, 0x0c, 0x52, 0x0b, 0x66, 0x69, 0x6c, 0x65, 0x43, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74,
+ 0x22, 0x31, 0x0a, 0x12, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x4d, 0x6f, 0x75, 0x6e, 0x74, 0x52,
+ 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1b, 0x0a, 0x09, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65,
+ 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x08, 0x76, 0x6f, 0x6c, 0x75, 0x6d,
+ 0x65, 0x49, 0x64, 0x22, 0x15, 0x0a, 0x13, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x4d, 0x6f, 0x75,
+ 0x6e, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x33, 0x0a, 0x14, 0x56, 0x6f,
+ 0x6c, 0x75, 0x6d, 0x65, 0x55, 0x6e, 0x6d, 0x6f, 0x75, 0x6e, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65,
+ 0x73, 0x74, 0x12, 0x1b, 0x0a, 0x09, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x69, 0x64, 0x18,
+ 0x01, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x08, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x49, 0x64, 0x22,
+ 0x17, 0x0a, 0x15, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x55, 0x6e, 0x6d, 0x6f, 0x75, 0x6e, 0x74,
+ 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x32, 0x0a, 0x13, 0x56, 0x6f, 0x6c, 0x75,
+ 0x6d, 0x65, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12,
+ 0x1b, 0x0a, 0x09, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01,
+ 0x28, 0x0d, 0x52, 0x08, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x49, 0x64, 0x22, 0x16, 0x0a, 0x14,
+ 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x52, 0x65, 0x73, 0x70,
+ 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x38, 0x0a, 0x19, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x4d, 0x61,
+ 0x72, 0x6b, 0x52, 0x65, 0x61, 0x64, 0x6f, 0x6e, 0x6c, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73,
+ 0x74, 0x12, 0x1b, 0x0a, 0x09, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x69, 0x64, 0x18, 0x01,
+ 0x20, 0x01, 0x28, 0x0d, 0x52, 0x08, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x49, 0x64, 0x22, 0x1c,
+ 0x0a, 0x1a, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x4d, 0x61, 0x72, 0x6b, 0x52, 0x65, 0x61, 0x64,
+ 0x6f, 0x6e, 0x6c, 0x79, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x38, 0x0a, 0x19,
+ 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x4d, 0x61, 0x72, 0x6b, 0x57, 0x72, 0x69, 0x74, 0x61, 0x62,
+ 0x6c, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1b, 0x0a, 0x09, 0x76, 0x6f, 0x6c,
+ 0x75, 0x6d, 0x65, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x08, 0x76, 0x6f,
+ 0x6c, 0x75, 0x6d, 0x65, 0x49, 0x64, 0x22, 0x1c, 0x0a, 0x1a, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65,
+ 0x4d, 0x61, 0x72, 0x6b, 0x57, 0x72, 0x69, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x52, 0x65, 0x73, 0x70,
+ 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x57, 0x0a, 0x16, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x43, 0x6f,
+ 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1b,
+ 0x0a, 0x09, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28,
+ 0x0d, 0x52, 0x08, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x49, 0x64, 0x12, 0x20, 0x0a, 0x0b, 0x72,
+ 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09,
+ 0x52, 0x0b, 0x72, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0x2f, 0x0a,
+ 0x17, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x65,
+ 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x65, 0x72, 0x72, 0x6f,
+ 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x22, 0x32,
+ 0x0a, 0x13, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65,
+ 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1b, 0x0a, 0x09, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f,
+ 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x08, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65,
+ 0x49, 0x64, 0x22, 0x38, 0x0a, 0x14, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x53, 0x74, 0x61, 0x74,
+ 0x75, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x20, 0x0a, 0x0c, 0x69, 0x73,
+ 0x5f, 0x72, 0x65, 0x61, 0x64, 0x5f, 0x6f, 0x6e, 0x6c, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08,
+ 0x52, 0x0a, 0x69, 0x73, 0x52, 0x65, 0x61, 0x64, 0x4f, 0x6e, 0x6c, 0x79, 0x22, 0xcb, 0x01, 0x0a,
+ 0x11, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x43, 0x6f, 0x70, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65,
+ 0x73, 0x74, 0x12, 0x1b, 0x0a, 0x09, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x69, 0x64, 0x18,
+ 0x01, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x08, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x49, 0x64, 0x12,
+ 0x1e, 0x0a, 0x0a, 0x63, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20,
+ 0x01, 0x28, 0x09, 0x52, 0x0a, 0x63, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12,
+ 0x20, 0x0a, 0x0b, 0x72, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x03,
+ 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x72, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f,
+ 0x6e, 0x12, 0x10, 0x0a, 0x03, 0x74, 0x74, 0x6c, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03,
+ 0x74, 0x74, 0x6c, 0x12, 0x28, 0x0a, 0x10, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x64, 0x61,
+ 0x74, 0x61, 0x5f, 0x6e, 0x6f, 0x64, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0e, 0x73,
+ 0x6f, 0x75, 0x72, 0x63, 0x65, 0x44, 0x61, 0x74, 0x61, 0x4e, 0x6f, 0x64, 0x65, 0x12, 0x1b, 0x0a,
+ 0x09, 0x64, 0x69, 0x73, 0x6b, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09,
+ 0x52, 0x08, 0x64, 0x69, 0x73, 0x6b, 0x54, 0x79, 0x70, 0x65, 0x22, 0x3f, 0x0a, 0x12, 0x56, 0x6f,
+ 0x6c, 0x75, 0x6d, 0x65, 0x43, 0x6f, 0x70, 0x79, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65,
+ 0x12, 0x29, 0x0a, 0x11, 0x6c, 0x61, 0x73, 0x74, 0x5f, 0x61, 0x70, 0x70, 0x65, 0x6e, 0x64, 0x5f,
+ 0x61, 0x74, 0x5f, 0x6e, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x04, 0x52, 0x0e, 0x6c, 0x61, 0x73,
+ 0x74, 0x41, 0x70, 0x70, 0x65, 0x6e, 0x64, 0x41, 0x74, 0x4e, 0x73, 0x22, 0x94, 0x02, 0x0a, 0x0f,
+ 0x43, 0x6f, 0x70, 0x79, 0x46, 0x69, 0x6c, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12,
+ 0x1b, 0x0a, 0x09, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01,
+ 0x28, 0x0d, 0x52, 0x08, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x49, 0x64, 0x12, 0x10, 0x0a, 0x03,
+ 0x65, 0x78, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x65, 0x78, 0x74, 0x12, 0x2f,
+ 0x0a, 0x13, 0x63, 0x6f, 0x6d, 0x70, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x72, 0x65, 0x76,
+ 0x69, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x12, 0x63, 0x6f, 0x6d,
+ 0x70, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x76, 0x69, 0x73, 0x69, 0x6f, 0x6e, 0x12,
+ 0x1f, 0x0a, 0x0b, 0x73, 0x74, 0x6f, 0x70, 0x5f, 0x6f, 0x66, 0x66, 0x73, 0x65, 0x74, 0x18, 0x04,
+ 0x20, 0x01, 0x28, 0x04, 0x52, 0x0a, 0x73, 0x74, 0x6f, 0x70, 0x4f, 0x66, 0x66, 0x73, 0x65, 0x74,
+ 0x12, 0x1e, 0x0a, 0x0a, 0x63, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x05,
+ 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x63, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e,
+ 0x12, 0x20, 0x0a, 0x0c, 0x69, 0x73, 0x5f, 0x65, 0x63, 0x5f, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65,
+ 0x18, 0x06, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0a, 0x69, 0x73, 0x45, 0x63, 0x56, 0x6f, 0x6c, 0x75,
+ 0x6d, 0x65, 0x12, 0x3e, 0x0a, 0x1c, 0x69, 0x67, 0x6e, 0x6f, 0x72, 0x65, 0x5f, 0x73, 0x6f, 0x75,
+ 0x72, 0x63, 0x65, 0x5f, 0x66, 0x69, 0x6c, 0x65, 0x5f, 0x6e, 0x6f, 0x74, 0x5f, 0x66, 0x6f, 0x75,
+ 0x6e, 0x64, 0x18, 0x07, 0x20, 0x01, 0x28, 0x08, 0x52, 0x18, 0x69, 0x67, 0x6e, 0x6f, 0x72, 0x65,
+ 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x46, 0x69, 0x6c, 0x65, 0x4e, 0x6f, 0x74, 0x46, 0x6f, 0x75,
+ 0x6e, 0x64, 0x22, 0x35, 0x0a, 0x10, 0x43, 0x6f, 0x70, 0x79, 0x46, 0x69, 0x6c, 0x65, 0x52, 0x65,
+ 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x21, 0x0a, 0x0c, 0x66, 0x69, 0x6c, 0x65, 0x5f, 0x63,
+ 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x0b, 0x66, 0x69,
+ 0x6c, 0x65, 0x43, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x22, 0x7d, 0x0a, 0x15, 0x52, 0x65, 0x61,
+ 0x64, 0x4e, 0x65, 0x65, 0x64, 0x6c, 0x65, 0x42, 0x6c, 0x6f, 0x62, 0x52, 0x65, 0x71, 0x75, 0x65,
+ 0x73, 0x74, 0x12, 0x1b, 0x0a, 0x09, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x69, 0x64, 0x18,
+ 0x01, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x08, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x49, 0x64, 0x12,
+ 0x1b, 0x0a, 0x09, 0x6e, 0x65, 0x65, 0x64, 0x6c, 0x65, 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01,
+ 0x28, 0x04, 0x52, 0x08, 0x6e, 0x65, 0x65, 0x64, 0x6c, 0x65, 0x49, 0x64, 0x12, 0x16, 0x0a, 0x06,
+ 0x6f, 0x66, 0x66, 0x73, 0x65, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x03, 0x52, 0x06, 0x6f, 0x66,
+ 0x66, 0x73, 0x65, 0x74, 0x12, 0x12, 0x0a, 0x04, 0x73, 0x69, 0x7a, 0x65, 0x18, 0x04, 0x20, 0x01,
+ 0x28, 0x05, 0x52, 0x04, 0x73, 0x69, 0x7a, 0x65, 0x22, 0x39, 0x0a, 0x16, 0x52, 0x65, 0x61, 0x64,
+ 0x4e, 0x65, 0x65, 0x64, 0x6c, 0x65, 0x42, 0x6c, 0x6f, 0x62, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e,
+ 0x73, 0x65, 0x12, 0x1f, 0x0a, 0x0b, 0x6e, 0x65, 0x65, 0x64, 0x6c, 0x65, 0x5f, 0x62, 0x6c, 0x6f,
+ 0x62, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x0a, 0x6e, 0x65, 0x65, 0x64, 0x6c, 0x65, 0x42,
+ 0x6c, 0x6f, 0x62, 0x22, 0x87, 0x01, 0x0a, 0x16, 0x57, 0x72, 0x69, 0x74, 0x65, 0x4e, 0x65, 0x65,
+ 0x64, 0x6c, 0x65, 0x42, 0x6c, 0x6f, 0x62, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1b,
+ 0x0a, 0x09, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28,
+ 0x0d, 0x52, 0x08, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x49, 0x64, 0x12, 0x1b, 0x0a, 0x09, 0x6e,
+ 0x65, 0x65, 0x64, 0x6c, 0x65, 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x04, 0x52, 0x08,
+ 0x6e, 0x65, 0x65, 0x64, 0x6c, 0x65, 0x49, 0x64, 0x12, 0x12, 0x0a, 0x04, 0x73, 0x69, 0x7a, 0x65,
+ 0x18, 0x03, 0x20, 0x01, 0x28, 0x05, 0x52, 0x04, 0x73, 0x69, 0x7a, 0x65, 0x12, 0x1f, 0x0a, 0x0b,
+ 0x6e, 0x65, 0x65, 0x64, 0x6c, 0x65, 0x5f, 0x62, 0x6c, 0x6f, 0x62, 0x18, 0x04, 0x20, 0x01, 0x28,
+ 0x0c, 0x52, 0x0a, 0x6e, 0x65, 0x65, 0x64, 0x6c, 0x65, 0x42, 0x6c, 0x6f, 0x62, 0x22, 0x19, 0x0a,
+ 0x17, 0x57, 0x72, 0x69, 0x74, 0x65, 0x4e, 0x65, 0x65, 0x64, 0x6c, 0x65, 0x42, 0x6c, 0x6f, 0x62,
+ 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x83, 0x01, 0x0a, 0x17, 0x56, 0x6f, 0x6c,
+ 0x75, 0x6d, 0x65, 0x54, 0x61, 0x69, 0x6c, 0x53, 0x65, 0x6e, 0x64, 0x65, 0x72, 0x52, 0x65, 0x71,
+ 0x75, 0x65, 0x73, 0x74, 0x12, 0x1b, 0x0a, 0x09, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x69,
+ 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x08, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x49,
+ 0x64, 0x12, 0x19, 0x0a, 0x08, 0x73, 0x69, 0x6e, 0x63, 0x65, 0x5f, 0x6e, 0x73, 0x18, 0x02, 0x20,
+ 0x01, 0x28, 0x04, 0x52, 0x07, 0x73, 0x69, 0x6e, 0x63, 0x65, 0x4e, 0x73, 0x12, 0x30, 0x0a, 0x14,
+ 0x69, 0x64, 0x6c, 0x65, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x5f, 0x73, 0x65, 0x63,
+ 0x6f, 0x6e, 0x64, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x12, 0x69, 0x64, 0x6c, 0x65,
+ 0x54, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x53, 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x73, 0x22, 0x84,
+ 0x01, 0x0a, 0x18, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x54, 0x61, 0x69, 0x6c, 0x53, 0x65, 0x6e,
+ 0x64, 0x65, 0x72, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x23, 0x0a, 0x0d, 0x6e,
+ 0x65, 0x65, 0x64, 0x6c, 0x65, 0x5f, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x18, 0x01, 0x20, 0x01,
+ 0x28, 0x0c, 0x52, 0x0c, 0x6e, 0x65, 0x65, 0x64, 0x6c, 0x65, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72,
+ 0x12, 0x1f, 0x0a, 0x0b, 0x6e, 0x65, 0x65, 0x64, 0x6c, 0x65, 0x5f, 0x62, 0x6f, 0x64, 0x79, 0x18,
+ 0x02, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x0a, 0x6e, 0x65, 0x65, 0x64, 0x6c, 0x65, 0x42, 0x6f, 0x64,
+ 0x79, 0x12, 0x22, 0x0a, 0x0d, 0x69, 0x73, 0x5f, 0x6c, 0x61, 0x73, 0x74, 0x5f, 0x63, 0x68, 0x75,
+ 0x6e, 0x6b, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0b, 0x69, 0x73, 0x4c, 0x61, 0x73, 0x74,
+ 0x43, 0x68, 0x75, 0x6e, 0x6b, 0x22, 0xb7, 0x01, 0x0a, 0x19, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65,
+ 0x54, 0x61, 0x69, 0x6c, 0x52, 0x65, 0x63, 0x65, 0x69, 0x76, 0x65, 0x72, 0x52, 0x65, 0x71, 0x75,
+ 0x65, 0x73, 0x74, 0x12, 0x1b, 0x0a, 0x09, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x69, 0x64,
+ 0x18, 0x01, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x08, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x49, 0x64,
+ 0x12, 0x19, 0x0a, 0x08, 0x73, 0x69, 0x6e, 0x63, 0x65, 0x5f, 0x6e, 0x73, 0x18, 0x02, 0x20, 0x01,
+ 0x28, 0x04, 0x52, 0x07, 0x73, 0x69, 0x6e, 0x63, 0x65, 0x4e, 0x73, 0x12, 0x30, 0x0a, 0x14, 0x69,
+ 0x64, 0x6c, 0x65, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x5f, 0x73, 0x65, 0x63, 0x6f,
+ 0x6e, 0x64, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x12, 0x69, 0x64, 0x6c, 0x65, 0x54,
+ 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x53, 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x73, 0x12, 0x30, 0x0a,
+ 0x14, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x73,
+ 0x65, 0x72, 0x76, 0x65, 0x72, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x12, 0x73, 0x6f, 0x75,
+ 0x72, 0x63, 0x65, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x22,
+ 0x1c, 0x0a, 0x1a, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x54, 0x61, 0x69, 0x6c, 0x52, 0x65, 0x63,
+ 0x65, 0x69, 0x76, 0x65, 0x72, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x5c, 0x0a,
+ 0x1d, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x45, 0x63, 0x53, 0x68, 0x61, 0x72, 0x64, 0x73, 0x47,
+ 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1b,
+ 0x0a, 0x09, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28,
+ 0x0d, 0x52, 0x08, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x49, 0x64, 0x12, 0x1e, 0x0a, 0x0a, 0x63,
+ 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52,
+ 0x0a, 0x63, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0x20, 0x0a, 0x1e, 0x56,
+ 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x45, 0x63, 0x53, 0x68, 0x61, 0x72, 0x64, 0x73, 0x47, 0x65, 0x6e,
+ 0x65, 0x72, 0x61, 0x74, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x5b, 0x0a,
+ 0x1c, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x45, 0x63, 0x53, 0x68, 0x61, 0x72, 0x64, 0x73, 0x52,
+ 0x65, 0x62, 0x75, 0x69, 0x6c, 0x64, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1b, 0x0a,
+ 0x09, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0d,
+ 0x52, 0x08, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x49, 0x64, 0x12, 0x1e, 0x0a, 0x0a, 0x63, 0x6f,
+ 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a,
+ 0x63, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0x4b, 0x0a, 0x1d, 0x56, 0x6f,
+ 0x6c, 0x75, 0x6d, 0x65, 0x45, 0x63, 0x53, 0x68, 0x61, 0x72, 0x64, 0x73, 0x52, 0x65, 0x62, 0x75,
+ 0x69, 0x6c, 0x64, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x2a, 0x0a, 0x11, 0x72,
+ 0x65, 0x62, 0x75, 0x69, 0x6c, 0x74, 0x5f, 0x73, 0x68, 0x61, 0x72, 0x64, 0x5f, 0x69, 0x64, 0x73,
+ 0x18, 0x01, 0x20, 0x03, 0x28, 0x0d, 0x52, 0x0f, 0x72, 0x65, 0x62, 0x75, 0x69, 0x6c, 0x74, 0x53,
+ 0x68, 0x61, 0x72, 0x64, 0x49, 0x64, 0x73, 0x22, 0x8b, 0x02, 0x0a, 0x19, 0x56, 0x6f, 0x6c, 0x75,
+ 0x6d, 0x65, 0x45, 0x63, 0x53, 0x68, 0x61, 0x72, 0x64, 0x73, 0x43, 0x6f, 0x70, 0x79, 0x52, 0x65,
+ 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1b, 0x0a, 0x09, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f,
+ 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x08, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65,
+ 0x49, 0x64, 0x12, 0x1e, 0x0a, 0x0a, 0x63, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e,
+ 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x63, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69,
+ 0x6f, 0x6e, 0x12, 0x1b, 0x0a, 0x09, 0x73, 0x68, 0x61, 0x72, 0x64, 0x5f, 0x69, 0x64, 0x73, 0x18,
+ 0x03, 0x20, 0x03, 0x28, 0x0d, 0x52, 0x08, 0x73, 0x68, 0x61, 0x72, 0x64, 0x49, 0x64, 0x73, 0x12,
+ 0x22, 0x0a, 0x0d, 0x63, 0x6f, 0x70, 0x79, 0x5f, 0x65, 0x63, 0x78, 0x5f, 0x66, 0x69, 0x6c, 0x65,
+ 0x18, 0x04, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0b, 0x63, 0x6f, 0x70, 0x79, 0x45, 0x63, 0x78, 0x46,
+ 0x69, 0x6c, 0x65, 0x12, 0x28, 0x0a, 0x10, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x64, 0x61,
+ 0x74, 0x61, 0x5f, 0x6e, 0x6f, 0x64, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0e, 0x73,
+ 0x6f, 0x75, 0x72, 0x63, 0x65, 0x44, 0x61, 0x74, 0x61, 0x4e, 0x6f, 0x64, 0x65, 0x12, 0x22, 0x0a,
+ 0x0d, 0x63, 0x6f, 0x70, 0x79, 0x5f, 0x65, 0x63, 0x6a, 0x5f, 0x66, 0x69, 0x6c, 0x65, 0x18, 0x06,
+ 0x20, 0x01, 0x28, 0x08, 0x52, 0x0b, 0x63, 0x6f, 0x70, 0x79, 0x45, 0x63, 0x6a, 0x46, 0x69, 0x6c,
+ 0x65, 0x12, 0x22, 0x0a, 0x0d, 0x63, 0x6f, 0x70, 0x79, 0x5f, 0x76, 0x69, 0x66, 0x5f, 0x66, 0x69,
+ 0x6c, 0x65, 0x18, 0x07, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0b, 0x63, 0x6f, 0x70, 0x79, 0x56, 0x69,
+ 0x66, 0x46, 0x69, 0x6c, 0x65, 0x22, 0x1c, 0x0a, 0x1a, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x45,
+ 0x63, 0x53, 0x68, 0x61, 0x72, 0x64, 0x73, 0x43, 0x6f, 0x70, 0x79, 0x52, 0x65, 0x73, 0x70, 0x6f,
+ 0x6e, 0x73, 0x65, 0x22, 0x77, 0x0a, 0x1b, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x45, 0x63, 0x53,
+ 0x68, 0x61, 0x72, 0x64, 0x73, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65,
+ 0x73, 0x74, 0x12, 0x1b, 0x0a, 0x09, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x69, 0x64, 0x18,
+ 0x01, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x08, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x49, 0x64, 0x12,
+ 0x1e, 0x0a, 0x0a, 0x63, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20,
+ 0x01, 0x28, 0x09, 0x52, 0x0a, 0x63, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12,
+ 0x1b, 0x0a, 0x09, 0x73, 0x68, 0x61, 0x72, 0x64, 0x5f, 0x69, 0x64, 0x73, 0x18, 0x03, 0x20, 0x03,
+ 0x28, 0x0d, 0x52, 0x08, 0x73, 0x68, 0x61, 0x72, 0x64, 0x49, 0x64, 0x73, 0x22, 0x1e, 0x0a, 0x1c,
+ 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x45, 0x63, 0x53, 0x68, 0x61, 0x72, 0x64, 0x73, 0x44, 0x65,
+ 0x6c, 0x65, 0x74, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x76, 0x0a, 0x1a,
+ 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x45, 0x63, 0x53, 0x68, 0x61, 0x72, 0x64, 0x73, 0x4d, 0x6f,
+ 0x75, 0x6e, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1b, 0x0a, 0x09, 0x76, 0x6f,
+ 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x08, 0x76,
+ 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x49, 0x64, 0x12, 0x1e, 0x0a, 0x0a, 0x63, 0x6f, 0x6c, 0x6c, 0x65,
+ 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x63, 0x6f, 0x6c,
+ 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x1b, 0x0a, 0x09, 0x73, 0x68, 0x61, 0x72, 0x64,
+ 0x5f, 0x69, 0x64, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0d, 0x52, 0x08, 0x73, 0x68, 0x61, 0x72,
+ 0x64, 0x49, 0x64, 0x73, 0x22, 0x1d, 0x0a, 0x1b, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x45, 0x63,
+ 0x53, 0x68, 0x61, 0x72, 0x64, 0x73, 0x4d, 0x6f, 0x75, 0x6e, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f,
+ 0x6e, 0x73, 0x65, 0x22, 0x58, 0x0a, 0x1c, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x45, 0x63, 0x53,
+ 0x68, 0x61, 0x72, 0x64, 0x73, 0x55, 0x6e, 0x6d, 0x6f, 0x75, 0x6e, 0x74, 0x52, 0x65, 0x71, 0x75,
+ 0x65, 0x73, 0x74, 0x12, 0x1b, 0x0a, 0x09, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x69, 0x64,
+ 0x18, 0x01, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x08, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x49, 0x64,
+ 0x12, 0x1b, 0x0a, 0x09, 0x73, 0x68, 0x61, 0x72, 0x64, 0x5f, 0x69, 0x64, 0x73, 0x18, 0x03, 0x20,
+ 0x03, 0x28, 0x0d, 0x52, 0x08, 0x73, 0x68, 0x61, 0x72, 0x64, 0x49, 0x64, 0x73, 0x22, 0x1f, 0x0a,
+ 0x1d, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x45, 0x63, 0x53, 0x68, 0x61, 0x72, 0x64, 0x73, 0x55,
+ 0x6e, 0x6d, 0x6f, 0x75, 0x6e, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x99,
+ 0x01, 0x0a, 0x18, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x45, 0x63, 0x53, 0x68, 0x61, 0x72, 0x64,
+ 0x52, 0x65, 0x61, 0x64, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1b, 0x0a, 0x09, 0x76,
+ 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x08,
+ 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x49, 0x64, 0x12, 0x19, 0x0a, 0x08, 0x73, 0x68, 0x61, 0x72,
+ 0x64, 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x07, 0x73, 0x68, 0x61, 0x72,
+ 0x64, 0x49, 0x64, 0x12, 0x16, 0x0a, 0x06, 0x6f, 0x66, 0x66, 0x73, 0x65, 0x74, 0x18, 0x03, 0x20,
+ 0x01, 0x28, 0x03, 0x52, 0x06, 0x6f, 0x66, 0x66, 0x73, 0x65, 0x74, 0x12, 0x12, 0x0a, 0x04, 0x73,
+ 0x69, 0x7a, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x03, 0x52, 0x04, 0x73, 0x69, 0x7a, 0x65, 0x12,
+ 0x19, 0x0a, 0x08, 0x66, 0x69, 0x6c, 0x65, 0x5f, 0x6b, 0x65, 0x79, 0x18, 0x05, 0x20, 0x01, 0x28,
+ 0x04, 0x52, 0x07, 0x66, 0x69, 0x6c, 0x65, 0x4b, 0x65, 0x79, 0x22, 0x4e, 0x0a, 0x19, 0x56, 0x6f,
+ 0x6c, 0x75, 0x6d, 0x65, 0x45, 0x63, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x65, 0x61, 0x64, 0x52,
+ 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x64, 0x61, 0x74, 0x61, 0x18,
+ 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x04, 0x64, 0x61, 0x74, 0x61, 0x12, 0x1d, 0x0a, 0x0a, 0x69,
+ 0x73, 0x5f, 0x64, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52,
+ 0x09, 0x69, 0x73, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x64, 0x22, 0x8d, 0x01, 0x0a, 0x19, 0x56,
+ 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x45, 0x63, 0x42, 0x6c, 0x6f, 0x62, 0x44, 0x65, 0x6c, 0x65, 0x74,
+ 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1b, 0x0a, 0x09, 0x76, 0x6f, 0x6c, 0x75,
+ 0x6d, 0x65, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x08, 0x76, 0x6f, 0x6c,
+ 0x75, 0x6d, 0x65, 0x49, 0x64, 0x12, 0x1e, 0x0a, 0x0a, 0x63, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74,
+ 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x63, 0x6f, 0x6c, 0x6c, 0x65,
+ 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x19, 0x0a, 0x08, 0x66, 0x69, 0x6c, 0x65, 0x5f, 0x6b, 0x65,
+ 0x79, 0x18, 0x03, 0x20, 0x01, 0x28, 0x04, 0x52, 0x07, 0x66, 0x69, 0x6c, 0x65, 0x4b, 0x65, 0x79,
+ 0x12, 0x18, 0x0a, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x04, 0x20, 0x01, 0x28,
+ 0x0d, 0x52, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x22, 0x1c, 0x0a, 0x1a, 0x56, 0x6f,
+ 0x6c, 0x75, 0x6d, 0x65, 0x45, 0x63, 0x42, 0x6c, 0x6f, 0x62, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65,
+ 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x5c, 0x0a, 0x1d, 0x56, 0x6f, 0x6c, 0x75,
+ 0x6d, 0x65, 0x45, 0x63, 0x53, 0x68, 0x61, 0x72, 0x64, 0x73, 0x54, 0x6f, 0x56, 0x6f, 0x6c, 0x75,
+ 0x6d, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1b, 0x0a, 0x09, 0x76, 0x6f, 0x6c,
+ 0x75, 0x6d, 0x65, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x08, 0x76, 0x6f,
+ 0x6c, 0x75, 0x6d, 0x65, 0x49, 0x64, 0x12, 0x1e, 0x0a, 0x0a, 0x63, 0x6f, 0x6c, 0x6c, 0x65, 0x63,
+ 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x63, 0x6f, 0x6c, 0x6c,
+ 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0x20, 0x0a, 0x1e, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65,
+ 0x45, 0x63, 0x53, 0x68, 0x61, 0x72, 0x64, 0x73, 0x54, 0x6f, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65,
+ 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x3a, 0x0a, 0x1b, 0x52, 0x65, 0x61, 0x64,
+ 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x46, 0x69, 0x6c, 0x65, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73,
+ 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1b, 0x0a, 0x09, 0x76, 0x6f, 0x6c, 0x75, 0x6d,
+ 0x65, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x08, 0x76, 0x6f, 0x6c, 0x75,
+ 0x6d, 0x65, 0x49, 0x64, 0x22, 0x8a, 0x03, 0x0a, 0x1c, 0x52, 0x65, 0x61, 0x64, 0x56, 0x6f, 0x6c,
+ 0x75, 0x6d, 0x65, 0x46, 0x69, 0x6c, 0x65, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x73,
+ 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x1b, 0x0a, 0x09, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f,
+ 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x08, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65,
+ 0x49, 0x64, 0x12, 0x3b, 0x0a, 0x1a, 0x69, 0x64, 0x78, 0x5f, 0x66, 0x69, 0x6c, 0x65, 0x5f, 0x74,
+ 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x5f, 0x73, 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x73,
+ 0x18, 0x02, 0x20, 0x01, 0x28, 0x04, 0x52, 0x17, 0x69, 0x64, 0x78, 0x46, 0x69, 0x6c, 0x65, 0x54,
+ 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x53, 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x73, 0x12,
+ 0x22, 0x0a, 0x0d, 0x69, 0x64, 0x78, 0x5f, 0x66, 0x69, 0x6c, 0x65, 0x5f, 0x73, 0x69, 0x7a, 0x65,
+ 0x18, 0x03, 0x20, 0x01, 0x28, 0x04, 0x52, 0x0b, 0x69, 0x64, 0x78, 0x46, 0x69, 0x6c, 0x65, 0x53,
+ 0x69, 0x7a, 0x65, 0x12, 0x3b, 0x0a, 0x1a, 0x64, 0x61, 0x74, 0x5f, 0x66, 0x69, 0x6c, 0x65, 0x5f,
+ 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x5f, 0x73, 0x65, 0x63, 0x6f, 0x6e, 0x64,
+ 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, 0x04, 0x52, 0x17, 0x64, 0x61, 0x74, 0x46, 0x69, 0x6c, 0x65,
+ 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x53, 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x73,
+ 0x12, 0x22, 0x0a, 0x0d, 0x64, 0x61, 0x74, 0x5f, 0x66, 0x69, 0x6c, 0x65, 0x5f, 0x73, 0x69, 0x7a,
+ 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x04, 0x52, 0x0b, 0x64, 0x61, 0x74, 0x46, 0x69, 0x6c, 0x65,
+ 0x53, 0x69, 0x7a, 0x65, 0x12, 0x1d, 0x0a, 0x0a, 0x66, 0x69, 0x6c, 0x65, 0x5f, 0x63, 0x6f, 0x75,
+ 0x6e, 0x74, 0x18, 0x06, 0x20, 0x01, 0x28, 0x04, 0x52, 0x09, 0x66, 0x69, 0x6c, 0x65, 0x43, 0x6f,
+ 0x75, 0x6e, 0x74, 0x12, 0x2f, 0x0a, 0x13, 0x63, 0x6f, 0x6d, 0x70, 0x61, 0x63, 0x74, 0x69, 0x6f,
+ 0x6e, 0x5f, 0x72, 0x65, 0x76, 0x69, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0d,
+ 0x52, 0x12, 0x63, 0x6f, 0x6d, 0x70, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x76, 0x69,
+ 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x1e, 0x0a, 0x0a, 0x63, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69,
+ 0x6f, 0x6e, 0x18, 0x08, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x63, 0x6f, 0x6c, 0x6c, 0x65, 0x63,
+ 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x1b, 0x0a, 0x09, 0x64, 0x69, 0x73, 0x6b, 0x5f, 0x74, 0x79, 0x70,
+ 0x65, 0x18, 0x09, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x64, 0x69, 0x73, 0x6b, 0x54, 0x79, 0x70,
+ 0x65, 0x22, 0xbb, 0x01, 0x0a, 0x0a, 0x44, 0x69, 0x73, 0x6b, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73,
+ 0x12, 0x10, 0x0a, 0x03, 0x64, 0x69, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x64,
+ 0x69, 0x72, 0x12, 0x10, 0x0a, 0x03, 0x61, 0x6c, 0x6c, 0x18, 0x02, 0x20, 0x01, 0x28, 0x04, 0x52,
+ 0x03, 0x61, 0x6c, 0x6c, 0x12, 0x12, 0x0a, 0x04, 0x75, 0x73, 0x65, 0x64, 0x18, 0x03, 0x20, 0x01,
+ 0x28, 0x04, 0x52, 0x04, 0x75, 0x73, 0x65, 0x64, 0x12, 0x12, 0x0a, 0x04, 0x66, 0x72, 0x65, 0x65,
+ 0x18, 0x04, 0x20, 0x01, 0x28, 0x04, 0x52, 0x04, 0x66, 0x72, 0x65, 0x65, 0x12, 0x21, 0x0a, 0x0c,
+ 0x70, 0x65, 0x72, 0x63, 0x65, 0x6e, 0x74, 0x5f, 0x66, 0x72, 0x65, 0x65, 0x18, 0x05, 0x20, 0x01,
+ 0x28, 0x02, 0x52, 0x0b, 0x70, 0x65, 0x72, 0x63, 0x65, 0x6e, 0x74, 0x46, 0x72, 0x65, 0x65, 0x12,
+ 0x21, 0x0a, 0x0c, 0x70, 0x65, 0x72, 0x63, 0x65, 0x6e, 0x74, 0x5f, 0x75, 0x73, 0x65, 0x64, 0x18,
+ 0x06, 0x20, 0x01, 0x28, 0x02, 0x52, 0x0b, 0x70, 0x65, 0x72, 0x63, 0x65, 0x6e, 0x74, 0x55, 0x73,
+ 0x65, 0x64, 0x12, 0x1b, 0x0a, 0x09, 0x64, 0x69, 0x73, 0x6b, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18,
+ 0x07, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x64, 0x69, 0x73, 0x6b, 0x54, 0x79, 0x70, 0x65, 0x22,
+ 0xa3, 0x01, 0x0a, 0x09, 0x4d, 0x65, 0x6d, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x1e, 0x0a,
+ 0x0a, 0x67, 0x6f, 0x72, 0x6f, 0x75, 0x74, 0x69, 0x6e, 0x65, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28,
+ 0x05, 0x52, 0x0a, 0x67, 0x6f, 0x72, 0x6f, 0x75, 0x74, 0x69, 0x6e, 0x65, 0x73, 0x12, 0x10, 0x0a,
+ 0x03, 0x61, 0x6c, 0x6c, 0x18, 0x02, 0x20, 0x01, 0x28, 0x04, 0x52, 0x03, 0x61, 0x6c, 0x6c, 0x12,
+ 0x12, 0x0a, 0x04, 0x75, 0x73, 0x65, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x04, 0x52, 0x04, 0x75,
+ 0x73, 0x65, 0x64, 0x12, 0x12, 0x0a, 0x04, 0x66, 0x72, 0x65, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28,
+ 0x04, 0x52, 0x04, 0x66, 0x72, 0x65, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x73, 0x65, 0x6c, 0x66, 0x18,
+ 0x05, 0x20, 0x01, 0x28, 0x04, 0x52, 0x04, 0x73, 0x65, 0x6c, 0x66, 0x12, 0x12, 0x0a, 0x04, 0x68,
+ 0x65, 0x61, 0x70, 0x18, 0x06, 0x20, 0x01, 0x28, 0x04, 0x52, 0x04, 0x68, 0x65, 0x61, 0x70, 0x12,
+ 0x14, 0x0a, 0x05, 0x73, 0x74, 0x61, 0x63, 0x6b, 0x18, 0x07, 0x20, 0x01, 0x28, 0x04, 0x52, 0x05,
+ 0x73, 0x74, 0x61, 0x63, 0x6b, 0x22, 0xd8, 0x01, 0x0a, 0x0a, 0x52, 0x65, 0x6d, 0x6f, 0x74, 0x65,
+ 0x46, 0x69, 0x6c, 0x65, 0x12, 0x21, 0x0a, 0x0c, 0x62, 0x61, 0x63, 0x6b, 0x65, 0x6e, 0x64, 0x5f,
+ 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x62, 0x61, 0x63, 0x6b,
+ 0x65, 0x6e, 0x64, 0x54, 0x79, 0x70, 0x65, 0x12, 0x1d, 0x0a, 0x0a, 0x62, 0x61, 0x63, 0x6b, 0x65,
+ 0x6e, 0x64, 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x62, 0x61, 0x63,
+ 0x6b, 0x65, 0x6e, 0x64, 0x49, 0x64, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x03, 0x20,
+ 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x16, 0x0a, 0x06, 0x6f, 0x66, 0x66, 0x73,
+ 0x65, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x04, 0x52, 0x06, 0x6f, 0x66, 0x66, 0x73, 0x65, 0x74,
+ 0x12, 0x1b, 0x0a, 0x09, 0x66, 0x69, 0x6c, 0x65, 0x5f, 0x73, 0x69, 0x7a, 0x65, 0x18, 0x05, 0x20,
+ 0x01, 0x28, 0x04, 0x52, 0x08, 0x66, 0x69, 0x6c, 0x65, 0x53, 0x69, 0x7a, 0x65, 0x12, 0x23, 0x0a,
+ 0x0d, 0x6d, 0x6f, 0x64, 0x69, 0x66, 0x69, 0x65, 0x64, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x06,
+ 0x20, 0x01, 0x28, 0x04, 0x52, 0x0c, 0x6d, 0x6f, 0x64, 0x69, 0x66, 0x69, 0x65, 0x64, 0x54, 0x69,
+ 0x6d, 0x65, 0x12, 0x1c, 0x0a, 0x09, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x18,
+ 0x07, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e,
+ 0x22, 0x7c, 0x0a, 0x0a, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x32,
+ 0x0a, 0x05, 0x66, 0x69, 0x6c, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1c, 0x2e,
+ 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x70, 0x62,
+ 0x2e, 0x52, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x46, 0x69, 0x6c, 0x65, 0x52, 0x05, 0x66, 0x69, 0x6c,
+ 0x65, 0x73, 0x12, 0x18, 0x0a, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20,
+ 0x01, 0x28, 0x0d, 0x52, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x20, 0x0a, 0x0b,
+ 0x72, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28,
+ 0x09, 0x52, 0x0b, 0x72, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0xc8,
+ 0x01, 0x0a, 0x20, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x54, 0x69, 0x65, 0x72, 0x4d, 0x6f, 0x76,
+ 0x65, 0x44, 0x61, 0x74, 0x54, 0x6f, 0x52, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x52, 0x65, 0x71, 0x75,
+ 0x65, 0x73, 0x74, 0x12, 0x1b, 0x0a, 0x09, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x69, 0x64,
+ 0x18, 0x01, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x08, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x49, 0x64,
+ 0x12, 0x1e, 0x0a, 0x0a, 0x63, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x02,
+ 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x63, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e,
+ 0x12, 0x38, 0x0a, 0x18, 0x64, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f,
+ 0x62, 0x61, 0x63, 0x6b, 0x65, 0x6e, 0x64, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x03, 0x20, 0x01,
+ 0x28, 0x09, 0x52, 0x16, 0x64, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x42,
+ 0x61, 0x63, 0x6b, 0x65, 0x6e, 0x64, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x2d, 0x0a, 0x13, 0x6b, 0x65,
+ 0x65, 0x70, 0x5f, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x5f, 0x64, 0x61, 0x74, 0x5f, 0x66, 0x69, 0x6c,
+ 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x08, 0x52, 0x10, 0x6b, 0x65, 0x65, 0x70, 0x4c, 0x6f, 0x63,
+ 0x61, 0x6c, 0x44, 0x61, 0x74, 0x46, 0x69, 0x6c, 0x65, 0x22, 0x73, 0x0a, 0x21, 0x56, 0x6f, 0x6c,
+ 0x75, 0x6d, 0x65, 0x54, 0x69, 0x65, 0x72, 0x4d, 0x6f, 0x76, 0x65, 0x44, 0x61, 0x74, 0x54, 0x6f,
+ 0x52, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x1c,
+ 0x0a, 0x09, 0x70, 0x72, 0x6f, 0x63, 0x65, 0x73, 0x73, 0x65, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28,
+ 0x03, 0x52, 0x09, 0x70, 0x72, 0x6f, 0x63, 0x65, 0x73, 0x73, 0x65, 0x64, 0x12, 0x30, 0x0a, 0x13,
+ 0x70, 0x72, 0x6f, 0x63, 0x65, 0x73, 0x73, 0x65, 0x64, 0x50, 0x65, 0x72, 0x63, 0x65, 0x6e, 0x74,
+ 0x61, 0x67, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x02, 0x52, 0x13, 0x70, 0x72, 0x6f, 0x63, 0x65,
+ 0x73, 0x73, 0x65, 0x64, 0x50, 0x65, 0x72, 0x63, 0x65, 0x6e, 0x74, 0x61, 0x67, 0x65, 0x22, 0x92,
+ 0x01, 0x0a, 0x22, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x54, 0x69, 0x65, 0x72, 0x4d, 0x6f, 0x76,
+ 0x65, 0x44, 0x61, 0x74, 0x46, 0x72, 0x6f, 0x6d, 0x52, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x52, 0x65,
+ 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1b, 0x0a, 0x09, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f,
+ 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x08, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65,
+ 0x49, 0x64, 0x12, 0x1e, 0x0a, 0x0a, 0x63, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e,
+ 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x63, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69,
+ 0x6f, 0x6e, 0x12, 0x2f, 0x0a, 0x14, 0x6b, 0x65, 0x65, 0x70, 0x5f, 0x72, 0x65, 0x6d, 0x6f, 0x74,
+ 0x65, 0x5f, 0x64, 0x61, 0x74, 0x5f, 0x66, 0x69, 0x6c, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08,
+ 0x52, 0x11, 0x6b, 0x65, 0x65, 0x70, 0x52, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x44, 0x61, 0x74, 0x46,
+ 0x69, 0x6c, 0x65, 0x22, 0x75, 0x0a, 0x23, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x54, 0x69, 0x65,
+ 0x72, 0x4d, 0x6f, 0x76, 0x65, 0x44, 0x61, 0x74, 0x46, 0x72, 0x6f, 0x6d, 0x52, 0x65, 0x6d, 0x6f,
+ 0x74, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x1c, 0x0a, 0x09, 0x70, 0x72,
+ 0x6f, 0x63, 0x65, 0x73, 0x73, 0x65, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, 0x52, 0x09, 0x70,
+ 0x72, 0x6f, 0x63, 0x65, 0x73, 0x73, 0x65, 0x64, 0x12, 0x30, 0x0a, 0x13, 0x70, 0x72, 0x6f, 0x63,
+ 0x65, 0x73, 0x73, 0x65, 0x64, 0x50, 0x65, 0x72, 0x63, 0x65, 0x6e, 0x74, 0x61, 0x67, 0x65, 0x18,
+ 0x02, 0x20, 0x01, 0x28, 0x02, 0x52, 0x13, 0x70, 0x72, 0x6f, 0x63, 0x65, 0x73, 0x73, 0x65, 0x64,
+ 0x50, 0x65, 0x72, 0x63, 0x65, 0x6e, 0x74, 0x61, 0x67, 0x65, 0x22, 0x1b, 0x0a, 0x19, 0x56, 0x6f,
+ 0x6c, 0x75, 0x6d, 0x65, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73,
+ 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x22, 0xa1, 0x01, 0x0a, 0x1a, 0x56, 0x6f, 0x6c, 0x75,
+ 0x6d, 0x65, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65,
+ 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x41, 0x0a, 0x0d, 0x64, 0x69, 0x73, 0x6b, 0x5f, 0x73,
+ 0x74, 0x61, 0x74, 0x75, 0x73, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1c, 0x2e,
+ 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x70, 0x62,
+ 0x2e, 0x44, 0x69, 0x73, 0x6b, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x0c, 0x64, 0x69, 0x73,
+ 0x6b, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x65, 0x73, 0x12, 0x40, 0x0a, 0x0d, 0x6d, 0x65, 0x6d,
+ 0x6f, 0x72, 0x79, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b,
+ 0x32, 0x1b, 0x2e, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72,
+ 0x5f, 0x70, 0x62, 0x2e, 0x4d, 0x65, 0x6d, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x0c, 0x6d,
+ 0x65, 0x6d, 0x6f, 0x72, 0x79, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x22, 0x1a, 0x0a, 0x18, 0x56,
+ 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x4c, 0x65, 0x61, 0x76, 0x65,
+ 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x22, 0x1b, 0x0a, 0x19, 0x56, 0x6f, 0x6c, 0x75, 0x6d,
+ 0x65, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x4c, 0x65, 0x61, 0x76, 0x65, 0x52, 0x65, 0x73, 0x70,
+ 0x6f, 0x6e, 0x73, 0x65, 0x22, 0xf8, 0x0c, 0x0a, 0x0c, 0x51, 0x75, 0x65, 0x72, 0x79, 0x52, 0x65,
+ 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1e, 0x0a, 0x0a, 0x73, 0x65, 0x6c, 0x65, 0x63, 0x74, 0x69,
+ 0x6f, 0x6e, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0a, 0x73, 0x65, 0x6c, 0x65, 0x63,
+ 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x22, 0x0a, 0x0d, 0x66, 0x72, 0x6f, 0x6d, 0x5f, 0x66, 0x69,
+ 0x6c, 0x65, 0x5f, 0x69, 0x64, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0b, 0x66, 0x72,
+ 0x6f, 0x6d, 0x46, 0x69, 0x6c, 0x65, 0x49, 0x64, 0x73, 0x12, 0x3d, 0x0a, 0x06, 0x66, 0x69, 0x6c,
+ 0x74, 0x65, 0x72, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x76, 0x6f, 0x6c, 0x75,
+ 0x6d, 0x65, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x51, 0x75, 0x65,
+ 0x72, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x2e, 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72,
+ 0x52, 0x06, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x12, 0x62, 0x0a, 0x13, 0x69, 0x6e, 0x70, 0x75,
+ 0x74, 0x5f, 0x73, 0x65, 0x72, 0x69, 0x61, 0x6c, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18,
+ 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x31, 0x2e, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x73,
+ 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x51, 0x75, 0x65, 0x72, 0x79, 0x52, 0x65,
+ 0x71, 0x75, 0x65, 0x73, 0x74, 0x2e, 0x49, 0x6e, 0x70, 0x75, 0x74, 0x53, 0x65, 0x72, 0x69, 0x61,
+ 0x6c, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x12, 0x69, 0x6e, 0x70, 0x75, 0x74, 0x53,
+ 0x65, 0x72, 0x69, 0x61, 0x6c, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x65, 0x0a, 0x14,
+ 0x6f, 0x75, 0x74, 0x70, 0x75, 0x74, 0x5f, 0x73, 0x65, 0x72, 0x69, 0x61, 0x6c, 0x69, 0x7a, 0x61,
+ 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x32, 0x2e, 0x76, 0x6f, 0x6c,
+ 0x75, 0x6d, 0x65, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x51, 0x75,
+ 0x65, 0x72, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x2e, 0x4f, 0x75, 0x74, 0x70, 0x75,
+ 0x74, 0x53, 0x65, 0x72, 0x69, 0x61, 0x6c, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x13,
+ 0x6f, 0x75, 0x74, 0x70, 0x75, 0x74, 0x53, 0x65, 0x72, 0x69, 0x61, 0x6c, 0x69, 0x7a, 0x61, 0x74,
+ 0x69, 0x6f, 0x6e, 0x1a, 0x4e, 0x0a, 0x06, 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x12, 0x14, 0x0a,
+ 0x05, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x66, 0x69,
+ 0x65, 0x6c, 0x64, 0x12, 0x18, 0x0a, 0x07, 0x6f, 0x70, 0x65, 0x72, 0x61, 0x6e, 0x64, 0x18, 0x02,
+ 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x6f, 0x70, 0x65, 0x72, 0x61, 0x6e, 0x64, 0x12, 0x14, 0x0a,
+ 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61,
+ 0x6c, 0x75, 0x65, 0x1a, 0xd5, 0x05, 0x0a, 0x12, 0x49, 0x6e, 0x70, 0x75, 0x74, 0x53, 0x65, 0x72,
+ 0x69, 0x61, 0x6c, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x29, 0x0a, 0x10, 0x63, 0x6f,
+ 0x6d, 0x70, 0x72, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x01,
+ 0x20, 0x01, 0x28, 0x09, 0x52, 0x0f, 0x63, 0x6f, 0x6d, 0x70, 0x72, 0x65, 0x73, 0x73, 0x69, 0x6f,
+ 0x6e, 0x54, 0x79, 0x70, 0x65, 0x12, 0x57, 0x0a, 0x09, 0x63, 0x73, 0x76, 0x5f, 0x69, 0x6e, 0x70,
+ 0x75, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x3a, 0x2e, 0x76, 0x6f, 0x6c, 0x75, 0x6d,
+ 0x65, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x51, 0x75, 0x65, 0x72,
+ 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x2e, 0x49, 0x6e, 0x70, 0x75, 0x74, 0x53, 0x65,
+ 0x72, 0x69, 0x61, 0x6c, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x43, 0x53, 0x56, 0x49,
+ 0x6e, 0x70, 0x75, 0x74, 0x52, 0x08, 0x63, 0x73, 0x76, 0x49, 0x6e, 0x70, 0x75, 0x74, 0x12, 0x5a,
+ 0x0a, 0x0a, 0x6a, 0x73, 0x6f, 0x6e, 0x5f, 0x69, 0x6e, 0x70, 0x75, 0x74, 0x18, 0x03, 0x20, 0x01,
+ 0x28, 0x0b, 0x32, 0x3b, 0x2e, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x73, 0x65, 0x72, 0x76,
+ 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x51, 0x75, 0x65, 0x72, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65,
+ 0x73, 0x74, 0x2e, 0x49, 0x6e, 0x70, 0x75, 0x74, 0x53, 0x65, 0x72, 0x69, 0x61, 0x6c, 0x69, 0x7a,
+ 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x4a, 0x53, 0x4f, 0x4e, 0x49, 0x6e, 0x70, 0x75, 0x74, 0x52,
+ 0x09, 0x6a, 0x73, 0x6f, 0x6e, 0x49, 0x6e, 0x70, 0x75, 0x74, 0x12, 0x63, 0x0a, 0x0d, 0x70, 0x61,
+ 0x72, 0x71, 0x75, 0x65, 0x74, 0x5f, 0x69, 0x6e, 0x70, 0x75, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28,
+ 0x0b, 0x32, 0x3e, 0x2e, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x65,
+ 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x51, 0x75, 0x65, 0x72, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73,
+ 0x74, 0x2e, 0x49, 0x6e, 0x70, 0x75, 0x74, 0x53, 0x65, 0x72, 0x69, 0x61, 0x6c, 0x69, 0x7a, 0x61,
+ 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x50, 0x61, 0x72, 0x71, 0x75, 0x65, 0x74, 0x49, 0x6e, 0x70, 0x75,
+ 0x74, 0x52, 0x0c, 0x70, 0x61, 0x72, 0x71, 0x75, 0x65, 0x74, 0x49, 0x6e, 0x70, 0x75, 0x74, 0x1a,
+ 0xc8, 0x02, 0x0a, 0x08, 0x43, 0x53, 0x56, 0x49, 0x6e, 0x70, 0x75, 0x74, 0x12, 0x28, 0x0a, 0x10,
+ 0x66, 0x69, 0x6c, 0x65, 0x5f, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x5f, 0x69, 0x6e, 0x66, 0x6f,
+ 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0e, 0x66, 0x69, 0x6c, 0x65, 0x48, 0x65, 0x61, 0x64,
+ 0x65, 0x72, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x29, 0x0a, 0x10, 0x72, 0x65, 0x63, 0x6f, 0x72, 0x64,
+ 0x5f, 0x64, 0x65, 0x6c, 0x69, 0x6d, 0x69, 0x74, 0x65, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09,
+ 0x52, 0x0f, 0x72, 0x65, 0x63, 0x6f, 0x72, 0x64, 0x44, 0x65, 0x6c, 0x69, 0x6d, 0x69, 0x74, 0x65,
+ 0x72, 0x12, 0x27, 0x0a, 0x0f, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x5f, 0x64, 0x65, 0x6c, 0x69, 0x6d,
+ 0x69, 0x74, 0x65, 0x72, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0e, 0x66, 0x69, 0x65, 0x6c,
+ 0x64, 0x44, 0x65, 0x6c, 0x69, 0x6d, 0x69, 0x74, 0x65, 0x72, 0x12, 0x29, 0x0a, 0x10, 0x71, 0x75,
+ 0x6f, 0x74, 0x65, 0x5f, 0x63, 0x68, 0x61, 0x72, 0x61, 0x63, 0x74, 0x6f, 0x65, 0x72, 0x18, 0x04,
+ 0x20, 0x01, 0x28, 0x09, 0x52, 0x0f, 0x71, 0x75, 0x6f, 0x74, 0x65, 0x43, 0x68, 0x61, 0x72, 0x61,
+ 0x63, 0x74, 0x6f, 0x65, 0x72, 0x12, 0x34, 0x0a, 0x16, 0x71, 0x75, 0x6f, 0x74, 0x65, 0x5f, 0x65,
+ 0x73, 0x63, 0x61, 0x70, 0x65, 0x5f, 0x63, 0x68, 0x61, 0x72, 0x61, 0x63, 0x74, 0x65, 0x72, 0x18,
+ 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x14, 0x71, 0x75, 0x6f, 0x74, 0x65, 0x45, 0x73, 0x63, 0x61,
+ 0x70, 0x65, 0x43, 0x68, 0x61, 0x72, 0x61, 0x63, 0x74, 0x65, 0x72, 0x12, 0x1a, 0x0a, 0x08, 0x63,
+ 0x6f, 0x6d, 0x6d, 0x65, 0x6e, 0x74, 0x73, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x63,
+ 0x6f, 0x6d, 0x6d, 0x65, 0x6e, 0x74, 0x73, 0x12, 0x41, 0x0a, 0x1d, 0x61, 0x6c, 0x6c, 0x6f, 0x77,
+ 0x5f, 0x71, 0x75, 0x6f, 0x74, 0x65, 0x64, 0x5f, 0x72, 0x65, 0x63, 0x6f, 0x72, 0x64, 0x5f, 0x64,
+ 0x65, 0x6c, 0x69, 0x6d, 0x69, 0x74, 0x65, 0x72, 0x18, 0x07, 0x20, 0x01, 0x28, 0x08, 0x52, 0x1a,
+ 0x61, 0x6c, 0x6c, 0x6f, 0x77, 0x51, 0x75, 0x6f, 0x74, 0x65, 0x64, 0x52, 0x65, 0x63, 0x6f, 0x72,
+ 0x64, 0x44, 0x65, 0x6c, 0x69, 0x6d, 0x69, 0x74, 0x65, 0x72, 0x1a, 0x1f, 0x0a, 0x09, 0x4a, 0x53,
+ 0x4f, 0x4e, 0x49, 0x6e, 0x70, 0x75, 0x74, 0x12, 0x12, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x18,
+ 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, 0x1a, 0x0e, 0x0a, 0x0c, 0x50,
+ 0x61, 0x72, 0x71, 0x75, 0x65, 0x74, 0x49, 0x6e, 0x70, 0x75, 0x74, 0x1a, 0xf1, 0x03, 0x0a, 0x13,
+ 0x4f, 0x75, 0x74, 0x70, 0x75, 0x74, 0x53, 0x65, 0x72, 0x69, 0x61, 0x6c, 0x69, 0x7a, 0x61, 0x74,
+ 0x69, 0x6f, 0x6e, 0x12, 0x5b, 0x0a, 0x0a, 0x63, 0x73, 0x76, 0x5f, 0x6f, 0x75, 0x74, 0x70, 0x75,
+ 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x3c, 0x2e, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65,
+ 0x5f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x51, 0x75, 0x65, 0x72, 0x79,
+ 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x2e, 0x4f, 0x75, 0x74, 0x70, 0x75, 0x74, 0x53, 0x65,
+ 0x72, 0x69, 0x61, 0x6c, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x43, 0x53, 0x56, 0x4f,
+ 0x75, 0x74, 0x70, 0x75, 0x74, 0x52, 0x09, 0x63, 0x73, 0x76, 0x4f, 0x75, 0x74, 0x70, 0x75, 0x74,
+ 0x12, 0x5e, 0x0a, 0x0b, 0x6a, 0x73, 0x6f, 0x6e, 0x5f, 0x6f, 0x75, 0x74, 0x70, 0x75, 0x74, 0x18,
+ 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x3d, 0x2e, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x73,
+ 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x51, 0x75, 0x65, 0x72, 0x79, 0x52, 0x65,
+ 0x71, 0x75, 0x65, 0x73, 0x74, 0x2e, 0x4f, 0x75, 0x74, 0x70, 0x75, 0x74, 0x53, 0x65, 0x72, 0x69,
+ 0x61, 0x6c, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x4a, 0x53, 0x4f, 0x4e, 0x4f, 0x75,
+ 0x74, 0x70, 0x75, 0x74, 0x52, 0x0a, 0x6a, 0x73, 0x6f, 0x6e, 0x4f, 0x75, 0x74, 0x70, 0x75, 0x74,
+ 0x1a, 0xe3, 0x01, 0x0a, 0x09, 0x43, 0x53, 0x56, 0x4f, 0x75, 0x74, 0x70, 0x75, 0x74, 0x12, 0x21,
+ 0x0a, 0x0c, 0x71, 0x75, 0x6f, 0x74, 0x65, 0x5f, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x73, 0x18, 0x01,
+ 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x71, 0x75, 0x6f, 0x74, 0x65, 0x46, 0x69, 0x65, 0x6c, 0x64,
+ 0x73, 0x12, 0x29, 0x0a, 0x10, 0x72, 0x65, 0x63, 0x6f, 0x72, 0x64, 0x5f, 0x64, 0x65, 0x6c, 0x69,
+ 0x6d, 0x69, 0x74, 0x65, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0f, 0x72, 0x65, 0x63,
+ 0x6f, 0x72, 0x64, 0x44, 0x65, 0x6c, 0x69, 0x6d, 0x69, 0x74, 0x65, 0x72, 0x12, 0x27, 0x0a, 0x0f,
+ 0x66, 0x69, 0x65, 0x6c, 0x64, 0x5f, 0x64, 0x65, 0x6c, 0x69, 0x6d, 0x69, 0x74, 0x65, 0x72, 0x18,
+ 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0e, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x44, 0x65, 0x6c, 0x69,
+ 0x6d, 0x69, 0x74, 0x65, 0x72, 0x12, 0x29, 0x0a, 0x10, 0x71, 0x75, 0x6f, 0x74, 0x65, 0x5f, 0x63,
+ 0x68, 0x61, 0x72, 0x61, 0x63, 0x74, 0x6f, 0x65, 0x72, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52,
+ 0x0f, 0x71, 0x75, 0x6f, 0x74, 0x65, 0x43, 0x68, 0x61, 0x72, 0x61, 0x63, 0x74, 0x6f, 0x65, 0x72,
+ 0x12, 0x34, 0x0a, 0x16, 0x71, 0x75, 0x6f, 0x74, 0x65, 0x5f, 0x65, 0x73, 0x63, 0x61, 0x70, 0x65,
+ 0x5f, 0x63, 0x68, 0x61, 0x72, 0x61, 0x63, 0x74, 0x65, 0x72, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09,
+ 0x52, 0x14, 0x71, 0x75, 0x6f, 0x74, 0x65, 0x45, 0x73, 0x63, 0x61, 0x70, 0x65, 0x43, 0x68, 0x61,
+ 0x72, 0x61, 0x63, 0x74, 0x65, 0x72, 0x1a, 0x37, 0x0a, 0x0a, 0x4a, 0x53, 0x4f, 0x4e, 0x4f, 0x75,
+ 0x74, 0x70, 0x75, 0x74, 0x12, 0x29, 0x0a, 0x10, 0x72, 0x65, 0x63, 0x6f, 0x72, 0x64, 0x5f, 0x64,
+ 0x65, 0x6c, 0x69, 0x6d, 0x69, 0x74, 0x65, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0f,
+ 0x72, 0x65, 0x63, 0x6f, 0x72, 0x64, 0x44, 0x65, 0x6c, 0x69, 0x6d, 0x69, 0x74, 0x65, 0x72, 0x22,
+ 0x29, 0x0a, 0x0d, 0x51, 0x75, 0x65, 0x72, 0x69, 0x65, 0x64, 0x53, 0x74, 0x72, 0x69, 0x70, 0x65,
+ 0x12, 0x18, 0x0a, 0x07, 0x72, 0x65, 0x63, 0x6f, 0x72, 0x64, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28,
+ 0x0c, 0x52, 0x07, 0x72, 0x65, 0x63, 0x6f, 0x72, 0x64, 0x73, 0x22, 0x55, 0x0a, 0x19, 0x56, 0x6f,
+ 0x6c, 0x75, 0x6d, 0x65, 0x4e, 0x65, 0x65, 0x64, 0x6c, 0x65, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73,
+ 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1b, 0x0a, 0x09, 0x76, 0x6f, 0x6c, 0x75, 0x6d,
+ 0x65, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x08, 0x76, 0x6f, 0x6c, 0x75,
+ 0x6d, 0x65, 0x49, 0x64, 0x12, 0x1b, 0x0a, 0x09, 0x6e, 0x65, 0x65, 0x64, 0x6c, 0x65, 0x5f, 0x69,
+ 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x04, 0x52, 0x08, 0x6e, 0x65, 0x65, 0x64, 0x6c, 0x65, 0x49,
+ 0x64, 0x22, 0xae, 0x01, 0x0a, 0x1a, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x4e, 0x65, 0x65, 0x64,
+ 0x6c, 0x65, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65,
+ 0x12, 0x1b, 0x0a, 0x09, 0x6e, 0x65, 0x65, 0x64, 0x6c, 0x65, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20,
+ 0x01, 0x28, 0x04, 0x52, 0x08, 0x6e, 0x65, 0x65, 0x64, 0x6c, 0x65, 0x49, 0x64, 0x12, 0x16, 0x0a,
+ 0x06, 0x63, 0x6f, 0x6f, 0x6b, 0x69, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x06, 0x63,
+ 0x6f, 0x6f, 0x6b, 0x69, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x73, 0x69, 0x7a, 0x65, 0x18, 0x03, 0x20,
+ 0x01, 0x28, 0x0d, 0x52, 0x04, 0x73, 0x69, 0x7a, 0x65, 0x12, 0x23, 0x0a, 0x0d, 0x6c, 0x61, 0x73,
+ 0x74, 0x5f, 0x6d, 0x6f, 0x64, 0x69, 0x66, 0x69, 0x65, 0x64, 0x18, 0x04, 0x20, 0x01, 0x28, 0x04,
+ 0x52, 0x0c, 0x6c, 0x61, 0x73, 0x74, 0x4d, 0x6f, 0x64, 0x69, 0x66, 0x69, 0x65, 0x64, 0x12, 0x10,
+ 0x0a, 0x03, 0x63, 0x72, 0x63, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x03, 0x63, 0x72, 0x63,
+ 0x12, 0x10, 0x0a, 0x03, 0x74, 0x74, 0x6c, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x74,
+ 0x74, 0x6c, 0x32, 0xa9, 0x21, 0x0a, 0x0c, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x53, 0x65, 0x72,
+ 0x76, 0x65, 0x72, 0x12, 0x5c, 0x0a, 0x0b, 0x42, 0x61, 0x74, 0x63, 0x68, 0x44, 0x65, 0x6c, 0x65,
+ 0x74, 0x65, 0x12, 0x24, 0x2e, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x73, 0x65, 0x72, 0x76,
+ 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x42, 0x61, 0x74, 0x63, 0x68, 0x44, 0x65, 0x6c, 0x65, 0x74,
+ 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x25, 0x2e, 0x76, 0x6f, 0x6c, 0x75, 0x6d,
+ 0x65, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x42, 0x61, 0x74, 0x63,
+ 0x68, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22,
+ 0x00, 0x12, 0x6e, 0x0a, 0x11, 0x56, 0x61, 0x63, 0x75, 0x75, 0x6d, 0x56, 0x6f, 0x6c, 0x75, 0x6d,
+ 0x65, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x12, 0x2a, 0x2e, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f,
+ 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x56, 0x61, 0x63, 0x75, 0x75, 0x6d,
+ 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x52, 0x65, 0x71, 0x75, 0x65,
+ 0x73, 0x74, 0x1a, 0x2b, 0x2e, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x73, 0x65, 0x72, 0x76,
+ 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x56, 0x61, 0x63, 0x75, 0x75, 0x6d, 0x56, 0x6f, 0x6c, 0x75,
+ 0x6d, 0x65, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22,
+ 0x00, 0x12, 0x74, 0x0a, 0x13, 0x56, 0x61, 0x63, 0x75, 0x75, 0x6d, 0x56, 0x6f, 0x6c, 0x75, 0x6d,
+ 0x65, 0x43, 0x6f, 0x6d, 0x70, 0x61, 0x63, 0x74, 0x12, 0x2c, 0x2e, 0x76, 0x6f, 0x6c, 0x75, 0x6d,
+ 0x65, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x56, 0x61, 0x63, 0x75,
+ 0x75, 0x6d, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x43, 0x6f, 0x6d, 0x70, 0x61, 0x63, 0x74, 0x52,
+ 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2d, 0x2e, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f,
+ 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x56, 0x61, 0x63, 0x75, 0x75, 0x6d,
+ 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x43, 0x6f, 0x6d, 0x70, 0x61, 0x63, 0x74, 0x52, 0x65, 0x73,
+ 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x71, 0x0a, 0x12, 0x56, 0x61, 0x63, 0x75, 0x75,
+ 0x6d, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x43, 0x6f, 0x6d, 0x6d, 0x69, 0x74, 0x12, 0x2b, 0x2e,
+ 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x70, 0x62,
+ 0x2e, 0x56, 0x61, 0x63, 0x75, 0x75, 0x6d, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x43, 0x6f, 0x6d,
+ 0x6d, 0x69, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2c, 0x2e, 0x76, 0x6f, 0x6c,
+ 0x75, 0x6d, 0x65, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x56, 0x61,
+ 0x63, 0x75, 0x75, 0x6d, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x43, 0x6f, 0x6d, 0x6d, 0x69, 0x74,
+ 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x74, 0x0a, 0x13, 0x56, 0x61,
+ 0x63, 0x75, 0x75, 0x6d, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x43, 0x6c, 0x65, 0x61, 0x6e, 0x75,
+ 0x70, 0x12, 0x2c, 0x2e, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x65,
+ 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x56, 0x61, 0x63, 0x75, 0x75, 0x6d, 0x56, 0x6f, 0x6c, 0x75, 0x6d,
+ 0x65, 0x43, 0x6c, 0x65, 0x61, 0x6e, 0x75, 0x70, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a,
+ 0x2d, 0x2e, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f,
+ 0x70, 0x62, 0x2e, 0x56, 0x61, 0x63, 0x75, 0x75, 0x6d, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x43,
+ 0x6c, 0x65, 0x61, 0x6e, 0x75, 0x70, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00,
+ 0x12, 0x6b, 0x0a, 0x10, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x43, 0x6f, 0x6c, 0x6c, 0x65, 0x63,
+ 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x29, 0x2e, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x73, 0x65,
+ 0x72, 0x76, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x43, 0x6f,
+ 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a,
+ 0x2a, 0x2e, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f,
+ 0x70, 0x62, 0x2e, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x43, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74,
+ 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x65, 0x0a,
+ 0x0e, 0x41, 0x6c, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x65, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x12,
+ 0x27, 0x2e, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f,
+ 0x70, 0x62, 0x2e, 0x41, 0x6c, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x65, 0x56, 0x6f, 0x6c, 0x75, 0x6d,
+ 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x28, 0x2e, 0x76, 0x6f, 0x6c, 0x75, 0x6d,
+ 0x65, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x41, 0x6c, 0x6c, 0x6f,
+ 0x63, 0x61, 0x74, 0x65, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e,
+ 0x73, 0x65, 0x22, 0x00, 0x12, 0x6b, 0x0a, 0x10, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x53, 0x79,
+ 0x6e, 0x63, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x29, 0x2e, 0x76, 0x6f, 0x6c, 0x75, 0x6d,
+ 0x65, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x56, 0x6f, 0x6c, 0x75,
+ 0x6d, 0x65, 0x53, 0x79, 0x6e, 0x63, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x71, 0x75,
+ 0x65, 0x73, 0x74, 0x1a, 0x2a, 0x2e, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x73, 0x65, 0x72,
+ 0x76, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x53, 0x79, 0x6e,
+ 0x63, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22,
+ 0x00, 0x12, 0x7c, 0x0a, 0x15, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x49, 0x6e, 0x63, 0x72, 0x65,
+ 0x6d, 0x65, 0x6e, 0x74, 0x61, 0x6c, 0x43, 0x6f, 0x70, 0x79, 0x12, 0x2e, 0x2e, 0x76, 0x6f, 0x6c,
+ 0x75, 0x6d, 0x65, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x56, 0x6f,
+ 0x6c, 0x75, 0x6d, 0x65, 0x49, 0x6e, 0x63, 0x72, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x61, 0x6c, 0x43,
+ 0x6f, 0x70, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2f, 0x2e, 0x76, 0x6f, 0x6c,
+ 0x75, 0x6d, 0x65, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x56, 0x6f,
+ 0x6c, 0x75, 0x6d, 0x65, 0x49, 0x6e, 0x63, 0x72, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x61, 0x6c, 0x43,
+ 0x6f, 0x70, 0x79, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x30, 0x01, 0x12,
+ 0x5c, 0x0a, 0x0b, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x4d, 0x6f, 0x75, 0x6e, 0x74, 0x12, 0x24,
+ 0x2e, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x70,
+ 0x62, 0x2e, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x4d, 0x6f, 0x75, 0x6e, 0x74, 0x52, 0x65, 0x71,
+ 0x75, 0x65, 0x73, 0x74, 0x1a, 0x25, 0x2e, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x73, 0x65,
+ 0x72, 0x76, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x4d, 0x6f,
+ 0x75, 0x6e, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x62, 0x0a,
+ 0x0d, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x55, 0x6e, 0x6d, 0x6f, 0x75, 0x6e, 0x74, 0x12, 0x26,
+ 0x2e, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x70,
+ 0x62, 0x2e, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x55, 0x6e, 0x6d, 0x6f, 0x75, 0x6e, 0x74, 0x52,
+ 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x27, 0x2e, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f,
+ 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65,
+ 0x55, 0x6e, 0x6d, 0x6f, 0x75, 0x6e, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22,
+ 0x00, 0x12, 0x5f, 0x0a, 0x0c, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x44, 0x65, 0x6c, 0x65, 0x74,
+ 0x65, 0x12, 0x25, 0x2e, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x65,
+ 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x44, 0x65, 0x6c, 0x65, 0x74,
+ 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x26, 0x2e, 0x76, 0x6f, 0x6c, 0x75, 0x6d,
+ 0x65, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x56, 0x6f, 0x6c, 0x75,
+ 0x6d, 0x65, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65,
+ 0x22, 0x00, 0x12, 0x71, 0x0a, 0x12, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x4d, 0x61, 0x72, 0x6b,
+ 0x52, 0x65, 0x61, 0x64, 0x6f, 0x6e, 0x6c, 0x79, 0x12, 0x2b, 0x2e, 0x76, 0x6f, 0x6c, 0x75, 0x6d,
+ 0x65, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x56, 0x6f, 0x6c, 0x75,
+ 0x6d, 0x65, 0x4d, 0x61, 0x72, 0x6b, 0x52, 0x65, 0x61, 0x64, 0x6f, 0x6e, 0x6c, 0x79, 0x52, 0x65,
+ 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2c, 0x2e, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x73,
+ 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x4d,
+ 0x61, 0x72, 0x6b, 0x52, 0x65, 0x61, 0x64, 0x6f, 0x6e, 0x6c, 0x79, 0x52, 0x65, 0x73, 0x70, 0x6f,
+ 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x71, 0x0a, 0x12, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x4d,
+ 0x61, 0x72, 0x6b, 0x57, 0x72, 0x69, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x12, 0x2b, 0x2e, 0x76, 0x6f,
+ 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x56,
+ 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x4d, 0x61, 0x72, 0x6b, 0x57, 0x72, 0x69, 0x74, 0x61, 0x62, 0x6c,
+ 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2c, 0x2e, 0x76, 0x6f, 0x6c, 0x75, 0x6d,
+ 0x65, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x56, 0x6f, 0x6c, 0x75,
+ 0x6d, 0x65, 0x4d, 0x61, 0x72, 0x6b, 0x57, 0x72, 0x69, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x52, 0x65,
+ 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x68, 0x0a, 0x0f, 0x56, 0x6f, 0x6c, 0x75,
+ 0x6d, 0x65, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x65, 0x12, 0x28, 0x2e, 0x76, 0x6f,
+ 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x56,
+ 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x65, 0x52, 0x65,
+ 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x29, 0x2e, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x73,
+ 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x43,
+ 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65,
+ 0x22, 0x00, 0x12, 0x5f, 0x0a, 0x0c, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x53, 0x74, 0x61, 0x74,
+ 0x75, 0x73, 0x12, 0x25, 0x2e, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x73, 0x65, 0x72, 0x76,
+ 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x53, 0x74, 0x61, 0x74,
+ 0x75, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x26, 0x2e, 0x76, 0x6f, 0x6c, 0x75,
+ 0x6d, 0x65, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x56, 0x6f, 0x6c,
+ 0x75, 0x6d, 0x65, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73,
+ 0x65, 0x22, 0x00, 0x12, 0x59, 0x0a, 0x0a, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x43, 0x6f, 0x70,
+ 0x79, 0x12, 0x23, 0x2e, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x65,
+ 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x43, 0x6f, 0x70, 0x79, 0x52,
+ 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x24, 0x2e, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f,
+ 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65,
+ 0x43, 0x6f, 0x70, 0x79, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x77,
+ 0x0a, 0x14, 0x52, 0x65, 0x61, 0x64, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x46, 0x69, 0x6c, 0x65,
+ 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x2d, 0x2e, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f,
+ 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x52, 0x65, 0x61, 0x64, 0x56, 0x6f,
+ 0x6c, 0x75, 0x6d, 0x65, 0x46, 0x69, 0x6c, 0x65, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65,
+ 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2e, 0x2e, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x73,
+ 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x52, 0x65, 0x61, 0x64, 0x56, 0x6f, 0x6c,
+ 0x75, 0x6d, 0x65, 0x46, 0x69, 0x6c, 0x65, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x73,
+ 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x55, 0x0a, 0x08, 0x43, 0x6f, 0x70, 0x79, 0x46,
+ 0x69, 0x6c, 0x65, 0x12, 0x21, 0x2e, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x73, 0x65, 0x72,
+ 0x76, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x43, 0x6f, 0x70, 0x79, 0x46, 0x69, 0x6c, 0x65, 0x52,
+ 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x22, 0x2e, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f,
+ 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x43, 0x6f, 0x70, 0x79, 0x46, 0x69,
+ 0x6c, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x30, 0x01, 0x12, 0x65,
+ 0x0a, 0x0e, 0x52, 0x65, 0x61, 0x64, 0x4e, 0x65, 0x65, 0x64, 0x6c, 0x65, 0x42, 0x6c, 0x6f, 0x62,
+ 0x12, 0x27, 0x2e, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72,
+ 0x5f, 0x70, 0x62, 0x2e, 0x52, 0x65, 0x61, 0x64, 0x4e, 0x65, 0x65, 0x64, 0x6c, 0x65, 0x42, 0x6c,
+ 0x6f, 0x62, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x28, 0x2e, 0x76, 0x6f, 0x6c, 0x75,
+ 0x6d, 0x65, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x52, 0x65, 0x61,
+ 0x64, 0x4e, 0x65, 0x65, 0x64, 0x6c, 0x65, 0x42, 0x6c, 0x6f, 0x62, 0x52, 0x65, 0x73, 0x70, 0x6f,
+ 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x68, 0x0a, 0x0f, 0x57, 0x72, 0x69, 0x74, 0x65, 0x4e, 0x65,
+ 0x65, 0x64, 0x6c, 0x65, 0x42, 0x6c, 0x6f, 0x62, 0x12, 0x28, 0x2e, 0x76, 0x6f, 0x6c, 0x75, 0x6d,
+ 0x65, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x57, 0x72, 0x69, 0x74,
+ 0x65, 0x4e, 0x65, 0x65, 0x64, 0x6c, 0x65, 0x42, 0x6c, 0x6f, 0x62, 0x52, 0x65, 0x71, 0x75, 0x65,
+ 0x73, 0x74, 0x1a, 0x29, 0x2e, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x73, 0x65, 0x72, 0x76,
+ 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x57, 0x72, 0x69, 0x74, 0x65, 0x4e, 0x65, 0x65, 0x64, 0x6c,
+ 0x65, 0x42, 0x6c, 0x6f, 0x62, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12,
+ 0x6d, 0x0a, 0x10, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x54, 0x61, 0x69, 0x6c, 0x53, 0x65, 0x6e,
+ 0x64, 0x65, 0x72, 0x12, 0x29, 0x2e, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x73, 0x65, 0x72,
+ 0x76, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x54, 0x61, 0x69,
+ 0x6c, 0x53, 0x65, 0x6e, 0x64, 0x65, 0x72, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2a,
+ 0x2e, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x70,
+ 0x62, 0x2e, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x54, 0x61, 0x69, 0x6c, 0x53, 0x65, 0x6e, 0x64,
+ 0x65, 0x72, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x30, 0x01, 0x12, 0x71,
+ 0x0a, 0x12, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x54, 0x61, 0x69, 0x6c, 0x52, 0x65, 0x63, 0x65,
+ 0x69, 0x76, 0x65, 0x72, 0x12, 0x2b, 0x2e, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x73, 0x65,
+ 0x72, 0x76, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x54, 0x61,
+ 0x69, 0x6c, 0x52, 0x65, 0x63, 0x65, 0x69, 0x76, 0x65, 0x72, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73,
+ 0x74, 0x1a, 0x2c, 0x2e, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x65,
+ 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x54, 0x61, 0x69, 0x6c, 0x52,
+ 0x65, 0x63, 0x65, 0x69, 0x76, 0x65, 0x72, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22,
+ 0x00, 0x12, 0x7d, 0x0a, 0x16, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x45, 0x63, 0x53, 0x68, 0x61,
+ 0x72, 0x64, 0x73, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x65, 0x12, 0x2f, 0x2e, 0x76, 0x6f,
+ 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x56,
+ 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x45, 0x63, 0x53, 0x68, 0x61, 0x72, 0x64, 0x73, 0x47, 0x65, 0x6e,
+ 0x65, 0x72, 0x61, 0x74, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x30, 0x2e, 0x76,
+ 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e,
+ 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x45, 0x63, 0x53, 0x68, 0x61, 0x72, 0x64, 0x73, 0x47, 0x65,
+ 0x6e, 0x65, 0x72, 0x61, 0x74, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00,
+ 0x12, 0x7a, 0x0a, 0x15, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x45, 0x63, 0x53, 0x68, 0x61, 0x72,
+ 0x64, 0x73, 0x52, 0x65, 0x62, 0x75, 0x69, 0x6c, 0x64, 0x12, 0x2e, 0x2e, 0x76, 0x6f, 0x6c, 0x75,
+ 0x6d, 0x65, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x56, 0x6f, 0x6c,
+ 0x75, 0x6d, 0x65, 0x45, 0x63, 0x53, 0x68, 0x61, 0x72, 0x64, 0x73, 0x52, 0x65, 0x62, 0x75, 0x69,
+ 0x6c, 0x64, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2f, 0x2e, 0x76, 0x6f, 0x6c, 0x75,
+ 0x6d, 0x65, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x56, 0x6f, 0x6c,
+ 0x75, 0x6d, 0x65, 0x45, 0x63, 0x53, 0x68, 0x61, 0x72, 0x64, 0x73, 0x52, 0x65, 0x62, 0x75, 0x69,
+ 0x6c, 0x64, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x71, 0x0a, 0x12,
+ 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x45, 0x63, 0x53, 0x68, 0x61, 0x72, 0x64, 0x73, 0x43, 0x6f,
+ 0x70, 0x79, 0x12, 0x2b, 0x2e, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x73, 0x65, 0x72, 0x76,
+ 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x45, 0x63, 0x53, 0x68,
+ 0x61, 0x72, 0x64, 0x73, 0x43, 0x6f, 0x70, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a,
+ 0x2c, 0x2e, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f,
+ 0x70, 0x62, 0x2e, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x45, 0x63, 0x53, 0x68, 0x61, 0x72, 0x64,
+ 0x73, 0x43, 0x6f, 0x70, 0x79, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12,
+ 0x77, 0x0a, 0x14, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x45, 0x63, 0x53, 0x68, 0x61, 0x72, 0x64,
+ 0x73, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x12, 0x2d, 0x2e, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65,
+ 0x5f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x56, 0x6f, 0x6c, 0x75, 0x6d,
+ 0x65, 0x45, 0x63, 0x53, 0x68, 0x61, 0x72, 0x64, 0x73, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x52,
+ 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2e, 0x2e, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f,
+ 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65,
+ 0x45, 0x63, 0x53, 0x68, 0x61, 0x72, 0x64, 0x73, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x52, 0x65,
+ 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x74, 0x0a, 0x13, 0x56, 0x6f, 0x6c, 0x75,
+ 0x6d, 0x65, 0x45, 0x63, 0x53, 0x68, 0x61, 0x72, 0x64, 0x73, 0x4d, 0x6f, 0x75, 0x6e, 0x74, 0x12,
+ 0x2c, 0x2e, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f,
+ 0x70, 0x62, 0x2e, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x45, 0x63, 0x53, 0x68, 0x61, 0x72, 0x64,
+ 0x73, 0x4d, 0x6f, 0x75, 0x6e, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2d, 0x2e,
+ 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x70, 0x62,
+ 0x2e, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x45, 0x63, 0x53, 0x68, 0x61, 0x72, 0x64, 0x73, 0x4d,
+ 0x6f, 0x75, 0x6e, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x7a,
+ 0x0a, 0x15, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x45, 0x63, 0x53, 0x68, 0x61, 0x72, 0x64, 0x73,
+ 0x55, 0x6e, 0x6d, 0x6f, 0x75, 0x6e, 0x74, 0x12, 0x2e, 0x2e, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65,
+ 0x5f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x56, 0x6f, 0x6c, 0x75, 0x6d,
+ 0x65, 0x45, 0x63, 0x53, 0x68, 0x61, 0x72, 0x64, 0x73, 0x55, 0x6e, 0x6d, 0x6f, 0x75, 0x6e, 0x74,
+ 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2f, 0x2e, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65,
+ 0x5f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x56, 0x6f, 0x6c, 0x75, 0x6d,
+ 0x65, 0x45, 0x63, 0x53, 0x68, 0x61, 0x72, 0x64, 0x73, 0x55, 0x6e, 0x6d, 0x6f, 0x75, 0x6e, 0x74,
+ 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x70, 0x0a, 0x11, 0x56, 0x6f,
+ 0x6c, 0x75, 0x6d, 0x65, 0x45, 0x63, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x65, 0x61, 0x64, 0x12,
+ 0x2a, 0x2e, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f,
+ 0x70, 0x62, 0x2e, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x45, 0x63, 0x53, 0x68, 0x61, 0x72, 0x64,
+ 0x52, 0x65, 0x61, 0x64, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2b, 0x2e, 0x76, 0x6f,
+ 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x56,
+ 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x45, 0x63, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x65, 0x61, 0x64,
+ 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x30, 0x01, 0x12, 0x71, 0x0a, 0x12,
+ 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x45, 0x63, 0x42, 0x6c, 0x6f, 0x62, 0x44, 0x65, 0x6c, 0x65,
+ 0x74, 0x65, 0x12, 0x2b, 0x2e, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x73, 0x65, 0x72, 0x76,
+ 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x45, 0x63, 0x42, 0x6c,
+ 0x6f, 0x62, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a,
+ 0x2c, 0x2e, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f,
+ 0x70, 0x62, 0x2e, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x45, 0x63, 0x42, 0x6c, 0x6f, 0x62, 0x44,
+ 0x65, 0x6c, 0x65, 0x74, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12,
+ 0x7d, 0x0a, 0x16, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x45, 0x63, 0x53, 0x68, 0x61, 0x72, 0x64,
+ 0x73, 0x54, 0x6f, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x12, 0x2f, 0x2e, 0x76, 0x6f, 0x6c, 0x75,
+ 0x6d, 0x65, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x56, 0x6f, 0x6c,
+ 0x75, 0x6d, 0x65, 0x45, 0x63, 0x53, 0x68, 0x61, 0x72, 0x64, 0x73, 0x54, 0x6f, 0x56, 0x6f, 0x6c,
+ 0x75, 0x6d, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x30, 0x2e, 0x76, 0x6f, 0x6c,
+ 0x75, 0x6d, 0x65, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x56, 0x6f,
+ 0x6c, 0x75, 0x6d, 0x65, 0x45, 0x63, 0x53, 0x68, 0x61, 0x72, 0x64, 0x73, 0x54, 0x6f, 0x56, 0x6f,
+ 0x6c, 0x75, 0x6d, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x88,
+ 0x01, 0x0a, 0x19, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x54, 0x69, 0x65, 0x72, 0x4d, 0x6f, 0x76,
+ 0x65, 0x44, 0x61, 0x74, 0x54, 0x6f, 0x52, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x12, 0x32, 0x2e, 0x76,
+ 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e,
+ 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x54, 0x69, 0x65, 0x72, 0x4d, 0x6f, 0x76, 0x65, 0x44, 0x61,
+ 0x74, 0x54, 0x6f, 0x52, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74,
+ 0x1a, 0x33, 0x2e, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72,
+ 0x5f, 0x70, 0x62, 0x2e, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x54, 0x69, 0x65, 0x72, 0x4d, 0x6f,
+ 0x76, 0x65, 0x44, 0x61, 0x74, 0x54, 0x6f, 0x52, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x52, 0x65, 0x73,
+ 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x30, 0x01, 0x12, 0x8e, 0x01, 0x0a, 0x1b, 0x56, 0x6f,
+ 0x6c, 0x75, 0x6d, 0x65, 0x54, 0x69, 0x65, 0x72, 0x4d, 0x6f, 0x76, 0x65, 0x44, 0x61, 0x74, 0x46,
+ 0x72, 0x6f, 0x6d, 0x52, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x12, 0x34, 0x2e, 0x76, 0x6f, 0x6c, 0x75,
+ 0x6d, 0x65, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x56, 0x6f, 0x6c,
+ 0x75, 0x6d, 0x65, 0x54, 0x69, 0x65, 0x72, 0x4d, 0x6f, 0x76, 0x65, 0x44, 0x61, 0x74, 0x46, 0x72,
+ 0x6f, 0x6d, 0x52, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a,
+ 0x35, 0x2e, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f,
+ 0x70, 0x62, 0x2e, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x54, 0x69, 0x65, 0x72, 0x4d, 0x6f, 0x76,
+ 0x65, 0x44, 0x61, 0x74, 0x46, 0x72, 0x6f, 0x6d, 0x52, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x52, 0x65,
+ 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x30, 0x01, 0x12, 0x71, 0x0a, 0x12, 0x56, 0x6f,
+ 0x6c, 0x75, 0x6d, 0x65, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73,
+ 0x12, 0x2b, 0x2e, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72,
+ 0x5f, 0x70, 0x62, 0x2e, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72,
+ 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2c, 0x2e,
+ 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x70, 0x62,
+ 0x2e, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x53, 0x74, 0x61,
+ 0x74, 0x75, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x6e, 0x0a,
+ 0x11, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x4c, 0x65, 0x61,
+ 0x76, 0x65, 0x12, 0x2a, 0x2e, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x73, 0x65, 0x72, 0x76,
+ 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x53, 0x65, 0x72, 0x76,
+ 0x65, 0x72, 0x4c, 0x65, 0x61, 0x76, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2b,
+ 0x2e, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x70,
+ 0x62, 0x2e, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x4c, 0x65,
+ 0x61, 0x76, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x4c, 0x0a,
+ 0x05, 0x51, 0x75, 0x65, 0x72, 0x79, 0x12, 0x1e, 0x2e, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f,
+ 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x51, 0x75, 0x65, 0x72, 0x79, 0x52,
+ 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1f, 0x2e, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f,
+ 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x51, 0x75, 0x65, 0x72, 0x69, 0x65,
+ 0x64, 0x53, 0x74, 0x72, 0x69, 0x70, 0x65, 0x22, 0x00, 0x30, 0x01, 0x12, 0x71, 0x0a, 0x12, 0x56,
+ 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x4e, 0x65, 0x65, 0x64, 0x6c, 0x65, 0x53, 0x74, 0x61, 0x74, 0x75,
+ 0x73, 0x12, 0x2b, 0x2e, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x65,
+ 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x4e, 0x65, 0x65, 0x64, 0x6c,
+ 0x65, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2c,
+ 0x2e, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x70,
+ 0x62, 0x2e, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x4e, 0x65, 0x65, 0x64, 0x6c, 0x65, 0x53, 0x74,
+ 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x42, 0x39,
+ 0x5a, 0x37, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x63, 0x68, 0x72,
+ 0x69, 0x73, 0x6c, 0x75, 0x73, 0x66, 0x2f, 0x73, 0x65, 0x61, 0x77, 0x65, 0x65, 0x64, 0x66, 0x73,
+ 0x2f, 0x77, 0x65, 0x65, 0x64, 0x2f, 0x70, 0x62, 0x2f, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f,
+ 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f,
+ 0x33,
+}
+
+var (
+ file_volume_server_proto_rawDescOnce sync.Once
+ file_volume_server_proto_rawDescData = file_volume_server_proto_rawDesc
+)
+
+func file_volume_server_proto_rawDescGZIP() []byte {
+ file_volume_server_proto_rawDescOnce.Do(func() {
+ file_volume_server_proto_rawDescData = protoimpl.X.CompressGZIP(file_volume_server_proto_rawDescData)
+ })
+ return file_volume_server_proto_rawDescData
+}
+
+var file_volume_server_proto_msgTypes = make([]protoimpl.MessageInfo, 90)
+var file_volume_server_proto_goTypes = []interface{}{
+ (*BatchDeleteRequest)(nil), // 0: volume_server_pb.BatchDeleteRequest
+ (*BatchDeleteResponse)(nil), // 1: volume_server_pb.BatchDeleteResponse
+ (*DeleteResult)(nil), // 2: volume_server_pb.DeleteResult
+ (*Empty)(nil), // 3: volume_server_pb.Empty
+ (*VacuumVolumeCheckRequest)(nil), // 4: volume_server_pb.VacuumVolumeCheckRequest
+ (*VacuumVolumeCheckResponse)(nil), // 5: volume_server_pb.VacuumVolumeCheckResponse
+ (*VacuumVolumeCompactRequest)(nil), // 6: volume_server_pb.VacuumVolumeCompactRequest
+ (*VacuumVolumeCompactResponse)(nil), // 7: volume_server_pb.VacuumVolumeCompactResponse
+ (*VacuumVolumeCommitRequest)(nil), // 8: volume_server_pb.VacuumVolumeCommitRequest
+ (*VacuumVolumeCommitResponse)(nil), // 9: volume_server_pb.VacuumVolumeCommitResponse
+ (*VacuumVolumeCleanupRequest)(nil), // 10: volume_server_pb.VacuumVolumeCleanupRequest
+ (*VacuumVolumeCleanupResponse)(nil), // 11: volume_server_pb.VacuumVolumeCleanupResponse
+ (*DeleteCollectionRequest)(nil), // 12: volume_server_pb.DeleteCollectionRequest
+ (*DeleteCollectionResponse)(nil), // 13: volume_server_pb.DeleteCollectionResponse
+ (*AllocateVolumeRequest)(nil), // 14: volume_server_pb.AllocateVolumeRequest
+ (*AllocateVolumeResponse)(nil), // 15: volume_server_pb.AllocateVolumeResponse
+ (*VolumeSyncStatusRequest)(nil), // 16: volume_server_pb.VolumeSyncStatusRequest
+ (*VolumeSyncStatusResponse)(nil), // 17: volume_server_pb.VolumeSyncStatusResponse
+ (*VolumeIncrementalCopyRequest)(nil), // 18: volume_server_pb.VolumeIncrementalCopyRequest
+ (*VolumeIncrementalCopyResponse)(nil), // 19: volume_server_pb.VolumeIncrementalCopyResponse
+ (*VolumeMountRequest)(nil), // 20: volume_server_pb.VolumeMountRequest
+ (*VolumeMountResponse)(nil), // 21: volume_server_pb.VolumeMountResponse
+ (*VolumeUnmountRequest)(nil), // 22: volume_server_pb.VolumeUnmountRequest
+ (*VolumeUnmountResponse)(nil), // 23: volume_server_pb.VolumeUnmountResponse
+ (*VolumeDeleteRequest)(nil), // 24: volume_server_pb.VolumeDeleteRequest
+ (*VolumeDeleteResponse)(nil), // 25: volume_server_pb.VolumeDeleteResponse
+ (*VolumeMarkReadonlyRequest)(nil), // 26: volume_server_pb.VolumeMarkReadonlyRequest
+ (*VolumeMarkReadonlyResponse)(nil), // 27: volume_server_pb.VolumeMarkReadonlyResponse
+ (*VolumeMarkWritableRequest)(nil), // 28: volume_server_pb.VolumeMarkWritableRequest
+ (*VolumeMarkWritableResponse)(nil), // 29: volume_server_pb.VolumeMarkWritableResponse
+ (*VolumeConfigureRequest)(nil), // 30: volume_server_pb.VolumeConfigureRequest
+ (*VolumeConfigureResponse)(nil), // 31: volume_server_pb.VolumeConfigureResponse
+ (*VolumeStatusRequest)(nil), // 32: volume_server_pb.VolumeStatusRequest
+ (*VolumeStatusResponse)(nil), // 33: volume_server_pb.VolumeStatusResponse
+ (*VolumeCopyRequest)(nil), // 34: volume_server_pb.VolumeCopyRequest
+ (*VolumeCopyResponse)(nil), // 35: volume_server_pb.VolumeCopyResponse
+ (*CopyFileRequest)(nil), // 36: volume_server_pb.CopyFileRequest
+ (*CopyFileResponse)(nil), // 37: volume_server_pb.CopyFileResponse
+ (*ReadNeedleBlobRequest)(nil), // 38: volume_server_pb.ReadNeedleBlobRequest
+ (*ReadNeedleBlobResponse)(nil), // 39: volume_server_pb.ReadNeedleBlobResponse
+ (*WriteNeedleBlobRequest)(nil), // 40: volume_server_pb.WriteNeedleBlobRequest
+ (*WriteNeedleBlobResponse)(nil), // 41: volume_server_pb.WriteNeedleBlobResponse
+ (*VolumeTailSenderRequest)(nil), // 42: volume_server_pb.VolumeTailSenderRequest
+ (*VolumeTailSenderResponse)(nil), // 43: volume_server_pb.VolumeTailSenderResponse
+ (*VolumeTailReceiverRequest)(nil), // 44: volume_server_pb.VolumeTailReceiverRequest
+ (*VolumeTailReceiverResponse)(nil), // 45: volume_server_pb.VolumeTailReceiverResponse
+ (*VolumeEcShardsGenerateRequest)(nil), // 46: volume_server_pb.VolumeEcShardsGenerateRequest
+ (*VolumeEcShardsGenerateResponse)(nil), // 47: volume_server_pb.VolumeEcShardsGenerateResponse
+ (*VolumeEcShardsRebuildRequest)(nil), // 48: volume_server_pb.VolumeEcShardsRebuildRequest
+ (*VolumeEcShardsRebuildResponse)(nil), // 49: volume_server_pb.VolumeEcShardsRebuildResponse
+ (*VolumeEcShardsCopyRequest)(nil), // 50: volume_server_pb.VolumeEcShardsCopyRequest
+ (*VolumeEcShardsCopyResponse)(nil), // 51: volume_server_pb.VolumeEcShardsCopyResponse
+ (*VolumeEcShardsDeleteRequest)(nil), // 52: volume_server_pb.VolumeEcShardsDeleteRequest
+ (*VolumeEcShardsDeleteResponse)(nil), // 53: volume_server_pb.VolumeEcShardsDeleteResponse
+ (*VolumeEcShardsMountRequest)(nil), // 54: volume_server_pb.VolumeEcShardsMountRequest
+ (*VolumeEcShardsMountResponse)(nil), // 55: volume_server_pb.VolumeEcShardsMountResponse
+ (*VolumeEcShardsUnmountRequest)(nil), // 56: volume_server_pb.VolumeEcShardsUnmountRequest
+ (*VolumeEcShardsUnmountResponse)(nil), // 57: volume_server_pb.VolumeEcShardsUnmountResponse
+ (*VolumeEcShardReadRequest)(nil), // 58: volume_server_pb.VolumeEcShardReadRequest
+ (*VolumeEcShardReadResponse)(nil), // 59: volume_server_pb.VolumeEcShardReadResponse
+ (*VolumeEcBlobDeleteRequest)(nil), // 60: volume_server_pb.VolumeEcBlobDeleteRequest
+ (*VolumeEcBlobDeleteResponse)(nil), // 61: volume_server_pb.VolumeEcBlobDeleteResponse
+ (*VolumeEcShardsToVolumeRequest)(nil), // 62: volume_server_pb.VolumeEcShardsToVolumeRequest
+ (*VolumeEcShardsToVolumeResponse)(nil), // 63: volume_server_pb.VolumeEcShardsToVolumeResponse
+ (*ReadVolumeFileStatusRequest)(nil), // 64: volume_server_pb.ReadVolumeFileStatusRequest
+ (*ReadVolumeFileStatusResponse)(nil), // 65: volume_server_pb.ReadVolumeFileStatusResponse
+ (*DiskStatus)(nil), // 66: volume_server_pb.DiskStatus
+ (*MemStatus)(nil), // 67: volume_server_pb.MemStatus
+ (*RemoteFile)(nil), // 68: volume_server_pb.RemoteFile
+ (*VolumeInfo)(nil), // 69: volume_server_pb.VolumeInfo
+ (*VolumeTierMoveDatToRemoteRequest)(nil), // 70: volume_server_pb.VolumeTierMoveDatToRemoteRequest
+ (*VolumeTierMoveDatToRemoteResponse)(nil), // 71: volume_server_pb.VolumeTierMoveDatToRemoteResponse
+ (*VolumeTierMoveDatFromRemoteRequest)(nil), // 72: volume_server_pb.VolumeTierMoveDatFromRemoteRequest
+ (*VolumeTierMoveDatFromRemoteResponse)(nil), // 73: volume_server_pb.VolumeTierMoveDatFromRemoteResponse
+ (*VolumeServerStatusRequest)(nil), // 74: volume_server_pb.VolumeServerStatusRequest
+ (*VolumeServerStatusResponse)(nil), // 75: volume_server_pb.VolumeServerStatusResponse
+ (*VolumeServerLeaveRequest)(nil), // 76: volume_server_pb.VolumeServerLeaveRequest
+ (*VolumeServerLeaveResponse)(nil), // 77: volume_server_pb.VolumeServerLeaveResponse
+ (*QueryRequest)(nil), // 78: volume_server_pb.QueryRequest
+ (*QueriedStripe)(nil), // 79: volume_server_pb.QueriedStripe
+ (*VolumeNeedleStatusRequest)(nil), // 80: volume_server_pb.VolumeNeedleStatusRequest
+ (*VolumeNeedleStatusResponse)(nil), // 81: volume_server_pb.VolumeNeedleStatusResponse
+ (*QueryRequest_Filter)(nil), // 82: volume_server_pb.QueryRequest.Filter
+ (*QueryRequest_InputSerialization)(nil), // 83: volume_server_pb.QueryRequest.InputSerialization
+ (*QueryRequest_OutputSerialization)(nil), // 84: volume_server_pb.QueryRequest.OutputSerialization
+ (*QueryRequest_InputSerialization_CSVInput)(nil), // 85: volume_server_pb.QueryRequest.InputSerialization.CSVInput
+ (*QueryRequest_InputSerialization_JSONInput)(nil), // 86: volume_server_pb.QueryRequest.InputSerialization.JSONInput
+ (*QueryRequest_InputSerialization_ParquetInput)(nil), // 87: volume_server_pb.QueryRequest.InputSerialization.ParquetInput
+ (*QueryRequest_OutputSerialization_CSVOutput)(nil), // 88: volume_server_pb.QueryRequest.OutputSerialization.CSVOutput
+ (*QueryRequest_OutputSerialization_JSONOutput)(nil), // 89: volume_server_pb.QueryRequest.OutputSerialization.JSONOutput
+}
+var file_volume_server_proto_depIdxs = []int32{
+ 2, // 0: volume_server_pb.BatchDeleteResponse.results:type_name -> volume_server_pb.DeleteResult
+ 68, // 1: volume_server_pb.VolumeInfo.files:type_name -> volume_server_pb.RemoteFile
+ 66, // 2: volume_server_pb.VolumeServerStatusResponse.disk_statuses:type_name -> volume_server_pb.DiskStatus
+ 67, // 3: volume_server_pb.VolumeServerStatusResponse.memory_status:type_name -> volume_server_pb.MemStatus
+ 82, // 4: volume_server_pb.QueryRequest.filter:type_name -> volume_server_pb.QueryRequest.Filter
+ 83, // 5: volume_server_pb.QueryRequest.input_serialization:type_name -> volume_server_pb.QueryRequest.InputSerialization
+ 84, // 6: volume_server_pb.QueryRequest.output_serialization:type_name -> volume_server_pb.QueryRequest.OutputSerialization
+ 85, // 7: volume_server_pb.QueryRequest.InputSerialization.csv_input:type_name -> volume_server_pb.QueryRequest.InputSerialization.CSVInput
+ 86, // 8: volume_server_pb.QueryRequest.InputSerialization.json_input:type_name -> volume_server_pb.QueryRequest.InputSerialization.JSONInput
+ 87, // 9: volume_server_pb.QueryRequest.InputSerialization.parquet_input:type_name -> volume_server_pb.QueryRequest.InputSerialization.ParquetInput
+ 88, // 10: volume_server_pb.QueryRequest.OutputSerialization.csv_output:type_name -> volume_server_pb.QueryRequest.OutputSerialization.CSVOutput
+ 89, // 11: volume_server_pb.QueryRequest.OutputSerialization.json_output:type_name -> volume_server_pb.QueryRequest.OutputSerialization.JSONOutput
+ 0, // 12: volume_server_pb.VolumeServer.BatchDelete:input_type -> volume_server_pb.BatchDeleteRequest
+ 4, // 13: volume_server_pb.VolumeServer.VacuumVolumeCheck:input_type -> volume_server_pb.VacuumVolumeCheckRequest
+ 6, // 14: volume_server_pb.VolumeServer.VacuumVolumeCompact:input_type -> volume_server_pb.VacuumVolumeCompactRequest
+ 8, // 15: volume_server_pb.VolumeServer.VacuumVolumeCommit:input_type -> volume_server_pb.VacuumVolumeCommitRequest
+ 10, // 16: volume_server_pb.VolumeServer.VacuumVolumeCleanup:input_type -> volume_server_pb.VacuumVolumeCleanupRequest
+ 12, // 17: volume_server_pb.VolumeServer.DeleteCollection:input_type -> volume_server_pb.DeleteCollectionRequest
+ 14, // 18: volume_server_pb.VolumeServer.AllocateVolume:input_type -> volume_server_pb.AllocateVolumeRequest
+ 16, // 19: volume_server_pb.VolumeServer.VolumeSyncStatus:input_type -> volume_server_pb.VolumeSyncStatusRequest
+ 18, // 20: volume_server_pb.VolumeServer.VolumeIncrementalCopy:input_type -> volume_server_pb.VolumeIncrementalCopyRequest
+ 20, // 21: volume_server_pb.VolumeServer.VolumeMount:input_type -> volume_server_pb.VolumeMountRequest
+ 22, // 22: volume_server_pb.VolumeServer.VolumeUnmount:input_type -> volume_server_pb.VolumeUnmountRequest
+ 24, // 23: volume_server_pb.VolumeServer.VolumeDelete:input_type -> volume_server_pb.VolumeDeleteRequest
+ 26, // 24: volume_server_pb.VolumeServer.VolumeMarkReadonly:input_type -> volume_server_pb.VolumeMarkReadonlyRequest
+ 28, // 25: volume_server_pb.VolumeServer.VolumeMarkWritable:input_type -> volume_server_pb.VolumeMarkWritableRequest
+ 30, // 26: volume_server_pb.VolumeServer.VolumeConfigure:input_type -> volume_server_pb.VolumeConfigureRequest
+ 32, // 27: volume_server_pb.VolumeServer.VolumeStatus:input_type -> volume_server_pb.VolumeStatusRequest
+ 34, // 28: volume_server_pb.VolumeServer.VolumeCopy:input_type -> volume_server_pb.VolumeCopyRequest
+ 64, // 29: volume_server_pb.VolumeServer.ReadVolumeFileStatus:input_type -> volume_server_pb.ReadVolumeFileStatusRequest
+ 36, // 30: volume_server_pb.VolumeServer.CopyFile:input_type -> volume_server_pb.CopyFileRequest
+ 38, // 31: volume_server_pb.VolumeServer.ReadNeedleBlob:input_type -> volume_server_pb.ReadNeedleBlobRequest
+ 40, // 32: volume_server_pb.VolumeServer.WriteNeedleBlob:input_type -> volume_server_pb.WriteNeedleBlobRequest
+ 42, // 33: volume_server_pb.VolumeServer.VolumeTailSender:input_type -> volume_server_pb.VolumeTailSenderRequest
+ 44, // 34: volume_server_pb.VolumeServer.VolumeTailReceiver:input_type -> volume_server_pb.VolumeTailReceiverRequest
+ 46, // 35: volume_server_pb.VolumeServer.VolumeEcShardsGenerate:input_type -> volume_server_pb.VolumeEcShardsGenerateRequest
+ 48, // 36: volume_server_pb.VolumeServer.VolumeEcShardsRebuild:input_type -> volume_server_pb.VolumeEcShardsRebuildRequest
+ 50, // 37: volume_server_pb.VolumeServer.VolumeEcShardsCopy:input_type -> volume_server_pb.VolumeEcShardsCopyRequest
+ 52, // 38: volume_server_pb.VolumeServer.VolumeEcShardsDelete:input_type -> volume_server_pb.VolumeEcShardsDeleteRequest
+ 54, // 39: volume_server_pb.VolumeServer.VolumeEcShardsMount:input_type -> volume_server_pb.VolumeEcShardsMountRequest
+ 56, // 40: volume_server_pb.VolumeServer.VolumeEcShardsUnmount:input_type -> volume_server_pb.VolumeEcShardsUnmountRequest
+ 58, // 41: volume_server_pb.VolumeServer.VolumeEcShardRead:input_type -> volume_server_pb.VolumeEcShardReadRequest
+ 60, // 42: volume_server_pb.VolumeServer.VolumeEcBlobDelete:input_type -> volume_server_pb.VolumeEcBlobDeleteRequest
+ 62, // 43: volume_server_pb.VolumeServer.VolumeEcShardsToVolume:input_type -> volume_server_pb.VolumeEcShardsToVolumeRequest
+ 70, // 44: volume_server_pb.VolumeServer.VolumeTierMoveDatToRemote:input_type -> volume_server_pb.VolumeTierMoveDatToRemoteRequest
+ 72, // 45: volume_server_pb.VolumeServer.VolumeTierMoveDatFromRemote:input_type -> volume_server_pb.VolumeTierMoveDatFromRemoteRequest
+ 74, // 46: volume_server_pb.VolumeServer.VolumeServerStatus:input_type -> volume_server_pb.VolumeServerStatusRequest
+ 76, // 47: volume_server_pb.VolumeServer.VolumeServerLeave:input_type -> volume_server_pb.VolumeServerLeaveRequest
+ 78, // 48: volume_server_pb.VolumeServer.Query:input_type -> volume_server_pb.QueryRequest
+ 80, // 49: volume_server_pb.VolumeServer.VolumeNeedleStatus:input_type -> volume_server_pb.VolumeNeedleStatusRequest
+ 1, // 50: volume_server_pb.VolumeServer.BatchDelete:output_type -> volume_server_pb.BatchDeleteResponse
+ 5, // 51: volume_server_pb.VolumeServer.VacuumVolumeCheck:output_type -> volume_server_pb.VacuumVolumeCheckResponse
+ 7, // 52: volume_server_pb.VolumeServer.VacuumVolumeCompact:output_type -> volume_server_pb.VacuumVolumeCompactResponse
+ 9, // 53: volume_server_pb.VolumeServer.VacuumVolumeCommit:output_type -> volume_server_pb.VacuumVolumeCommitResponse
+ 11, // 54: volume_server_pb.VolumeServer.VacuumVolumeCleanup:output_type -> volume_server_pb.VacuumVolumeCleanupResponse
+ 13, // 55: volume_server_pb.VolumeServer.DeleteCollection:output_type -> volume_server_pb.DeleteCollectionResponse
+ 15, // 56: volume_server_pb.VolumeServer.AllocateVolume:output_type -> volume_server_pb.AllocateVolumeResponse
+ 17, // 57: volume_server_pb.VolumeServer.VolumeSyncStatus:output_type -> volume_server_pb.VolumeSyncStatusResponse
+ 19, // 58: volume_server_pb.VolumeServer.VolumeIncrementalCopy:output_type -> volume_server_pb.VolumeIncrementalCopyResponse
+ 21, // 59: volume_server_pb.VolumeServer.VolumeMount:output_type -> volume_server_pb.VolumeMountResponse
+ 23, // 60: volume_server_pb.VolumeServer.VolumeUnmount:output_type -> volume_server_pb.VolumeUnmountResponse
+ 25, // 61: volume_server_pb.VolumeServer.VolumeDelete:output_type -> volume_server_pb.VolumeDeleteResponse
+ 27, // 62: volume_server_pb.VolumeServer.VolumeMarkReadonly:output_type -> volume_server_pb.VolumeMarkReadonlyResponse
+ 29, // 63: volume_server_pb.VolumeServer.VolumeMarkWritable:output_type -> volume_server_pb.VolumeMarkWritableResponse
+ 31, // 64: volume_server_pb.VolumeServer.VolumeConfigure:output_type -> volume_server_pb.VolumeConfigureResponse
+ 33, // 65: volume_server_pb.VolumeServer.VolumeStatus:output_type -> volume_server_pb.VolumeStatusResponse
+ 35, // 66: volume_server_pb.VolumeServer.VolumeCopy:output_type -> volume_server_pb.VolumeCopyResponse
+ 65, // 67: volume_server_pb.VolumeServer.ReadVolumeFileStatus:output_type -> volume_server_pb.ReadVolumeFileStatusResponse
+ 37, // 68: volume_server_pb.VolumeServer.CopyFile:output_type -> volume_server_pb.CopyFileResponse
+ 39, // 69: volume_server_pb.VolumeServer.ReadNeedleBlob:output_type -> volume_server_pb.ReadNeedleBlobResponse
+ 41, // 70: volume_server_pb.VolumeServer.WriteNeedleBlob:output_type -> volume_server_pb.WriteNeedleBlobResponse
+ 43, // 71: volume_server_pb.VolumeServer.VolumeTailSender:output_type -> volume_server_pb.VolumeTailSenderResponse
+ 45, // 72: volume_server_pb.VolumeServer.VolumeTailReceiver:output_type -> volume_server_pb.VolumeTailReceiverResponse
+ 47, // 73: volume_server_pb.VolumeServer.VolumeEcShardsGenerate:output_type -> volume_server_pb.VolumeEcShardsGenerateResponse
+ 49, // 74: volume_server_pb.VolumeServer.VolumeEcShardsRebuild:output_type -> volume_server_pb.VolumeEcShardsRebuildResponse
+ 51, // 75: volume_server_pb.VolumeServer.VolumeEcShardsCopy:output_type -> volume_server_pb.VolumeEcShardsCopyResponse
+ 53, // 76: volume_server_pb.VolumeServer.VolumeEcShardsDelete:output_type -> volume_server_pb.VolumeEcShardsDeleteResponse
+ 55, // 77: volume_server_pb.VolumeServer.VolumeEcShardsMount:output_type -> volume_server_pb.VolumeEcShardsMountResponse
+ 57, // 78: volume_server_pb.VolumeServer.VolumeEcShardsUnmount:output_type -> volume_server_pb.VolumeEcShardsUnmountResponse
+ 59, // 79: volume_server_pb.VolumeServer.VolumeEcShardRead:output_type -> volume_server_pb.VolumeEcShardReadResponse
+ 61, // 80: volume_server_pb.VolumeServer.VolumeEcBlobDelete:output_type -> volume_server_pb.VolumeEcBlobDeleteResponse
+ 63, // 81: volume_server_pb.VolumeServer.VolumeEcShardsToVolume:output_type -> volume_server_pb.VolumeEcShardsToVolumeResponse
+ 71, // 82: volume_server_pb.VolumeServer.VolumeTierMoveDatToRemote:output_type -> volume_server_pb.VolumeTierMoveDatToRemoteResponse
+ 73, // 83: volume_server_pb.VolumeServer.VolumeTierMoveDatFromRemote:output_type -> volume_server_pb.VolumeTierMoveDatFromRemoteResponse
+ 75, // 84: volume_server_pb.VolumeServer.VolumeServerStatus:output_type -> volume_server_pb.VolumeServerStatusResponse
+ 77, // 85: volume_server_pb.VolumeServer.VolumeServerLeave:output_type -> volume_server_pb.VolumeServerLeaveResponse
+ 79, // 86: volume_server_pb.VolumeServer.Query:output_type -> volume_server_pb.QueriedStripe
+ 81, // 87: volume_server_pb.VolumeServer.VolumeNeedleStatus:output_type -> volume_server_pb.VolumeNeedleStatusResponse
+ 50, // [50:88] is the sub-list for method output_type
+ 12, // [12:50] is the sub-list for method input_type
+ 12, // [12:12] is the sub-list for extension type_name
+ 12, // [12:12] is the sub-list for extension extendee
+ 0, // [0:12] is the sub-list for field type_name
+}
+
+func init() { file_volume_server_proto_init() }
+func file_volume_server_proto_init() {
+ if File_volume_server_proto != nil {
+ return
+ }
+ if !protoimpl.UnsafeEnabled {
+ file_volume_server_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*BatchDeleteRequest); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_volume_server_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*BatchDeleteResponse); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_volume_server_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*DeleteResult); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_volume_server_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*Empty); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_volume_server_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*VacuumVolumeCheckRequest); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_volume_server_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*VacuumVolumeCheckResponse); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_volume_server_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*VacuumVolumeCompactRequest); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_volume_server_proto_msgTypes[7].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*VacuumVolumeCompactResponse); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_volume_server_proto_msgTypes[8].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*VacuumVolumeCommitRequest); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_volume_server_proto_msgTypes[9].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*VacuumVolumeCommitResponse); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_volume_server_proto_msgTypes[10].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*VacuumVolumeCleanupRequest); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_volume_server_proto_msgTypes[11].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*VacuumVolumeCleanupResponse); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_volume_server_proto_msgTypes[12].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*DeleteCollectionRequest); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_volume_server_proto_msgTypes[13].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*DeleteCollectionResponse); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_volume_server_proto_msgTypes[14].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*AllocateVolumeRequest); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_volume_server_proto_msgTypes[15].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*AllocateVolumeResponse); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_volume_server_proto_msgTypes[16].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*VolumeSyncStatusRequest); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_volume_server_proto_msgTypes[17].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*VolumeSyncStatusResponse); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_volume_server_proto_msgTypes[18].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*VolumeIncrementalCopyRequest); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_volume_server_proto_msgTypes[19].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*VolumeIncrementalCopyResponse); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_volume_server_proto_msgTypes[20].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*VolumeMountRequest); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_volume_server_proto_msgTypes[21].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*VolumeMountResponse); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_volume_server_proto_msgTypes[22].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*VolumeUnmountRequest); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_volume_server_proto_msgTypes[23].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*VolumeUnmountResponse); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_volume_server_proto_msgTypes[24].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*VolumeDeleteRequest); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_volume_server_proto_msgTypes[25].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*VolumeDeleteResponse); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_volume_server_proto_msgTypes[26].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*VolumeMarkReadonlyRequest); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_volume_server_proto_msgTypes[27].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*VolumeMarkReadonlyResponse); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_volume_server_proto_msgTypes[28].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*VolumeMarkWritableRequest); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_volume_server_proto_msgTypes[29].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*VolumeMarkWritableResponse); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_volume_server_proto_msgTypes[30].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*VolumeConfigureRequest); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_volume_server_proto_msgTypes[31].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*VolumeConfigureResponse); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_volume_server_proto_msgTypes[32].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*VolumeStatusRequest); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_volume_server_proto_msgTypes[33].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*VolumeStatusResponse); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_volume_server_proto_msgTypes[34].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*VolumeCopyRequest); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_volume_server_proto_msgTypes[35].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*VolumeCopyResponse); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_volume_server_proto_msgTypes[36].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*CopyFileRequest); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_volume_server_proto_msgTypes[37].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*CopyFileResponse); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_volume_server_proto_msgTypes[38].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*ReadNeedleBlobRequest); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_volume_server_proto_msgTypes[39].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*ReadNeedleBlobResponse); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_volume_server_proto_msgTypes[40].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*WriteNeedleBlobRequest); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_volume_server_proto_msgTypes[41].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*WriteNeedleBlobResponse); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_volume_server_proto_msgTypes[42].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*VolumeTailSenderRequest); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_volume_server_proto_msgTypes[43].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*VolumeTailSenderResponse); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_volume_server_proto_msgTypes[44].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*VolumeTailReceiverRequest); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_volume_server_proto_msgTypes[45].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*VolumeTailReceiverResponse); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_volume_server_proto_msgTypes[46].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*VolumeEcShardsGenerateRequest); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_volume_server_proto_msgTypes[47].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*VolumeEcShardsGenerateResponse); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_volume_server_proto_msgTypes[48].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*VolumeEcShardsRebuildRequest); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_volume_server_proto_msgTypes[49].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*VolumeEcShardsRebuildResponse); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_volume_server_proto_msgTypes[50].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*VolumeEcShardsCopyRequest); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_volume_server_proto_msgTypes[51].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*VolumeEcShardsCopyResponse); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_volume_server_proto_msgTypes[52].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*VolumeEcShardsDeleteRequest); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_volume_server_proto_msgTypes[53].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*VolumeEcShardsDeleteResponse); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_volume_server_proto_msgTypes[54].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*VolumeEcShardsMountRequest); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_volume_server_proto_msgTypes[55].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*VolumeEcShardsMountResponse); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_volume_server_proto_msgTypes[56].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*VolumeEcShardsUnmountRequest); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_volume_server_proto_msgTypes[57].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*VolumeEcShardsUnmountResponse); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_volume_server_proto_msgTypes[58].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*VolumeEcShardReadRequest); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_volume_server_proto_msgTypes[59].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*VolumeEcShardReadResponse); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_volume_server_proto_msgTypes[60].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*VolumeEcBlobDeleteRequest); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_volume_server_proto_msgTypes[61].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*VolumeEcBlobDeleteResponse); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_volume_server_proto_msgTypes[62].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*VolumeEcShardsToVolumeRequest); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_volume_server_proto_msgTypes[63].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*VolumeEcShardsToVolumeResponse); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_volume_server_proto_msgTypes[64].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*ReadVolumeFileStatusRequest); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_volume_server_proto_msgTypes[65].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*ReadVolumeFileStatusResponse); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_volume_server_proto_msgTypes[66].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*DiskStatus); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_volume_server_proto_msgTypes[67].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*MemStatus); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_volume_server_proto_msgTypes[68].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*RemoteFile); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_volume_server_proto_msgTypes[69].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*VolumeInfo); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_volume_server_proto_msgTypes[70].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*VolumeTierMoveDatToRemoteRequest); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_volume_server_proto_msgTypes[71].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*VolumeTierMoveDatToRemoteResponse); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_volume_server_proto_msgTypes[72].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*VolumeTierMoveDatFromRemoteRequest); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_volume_server_proto_msgTypes[73].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*VolumeTierMoveDatFromRemoteResponse); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_volume_server_proto_msgTypes[74].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*VolumeServerStatusRequest); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_volume_server_proto_msgTypes[75].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*VolumeServerStatusResponse); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_volume_server_proto_msgTypes[76].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*VolumeServerLeaveRequest); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_volume_server_proto_msgTypes[77].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*VolumeServerLeaveResponse); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_volume_server_proto_msgTypes[78].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*QueryRequest); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_volume_server_proto_msgTypes[79].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*QueriedStripe); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_volume_server_proto_msgTypes[80].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*VolumeNeedleStatusRequest); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_volume_server_proto_msgTypes[81].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*VolumeNeedleStatusResponse); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_volume_server_proto_msgTypes[82].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*QueryRequest_Filter); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_volume_server_proto_msgTypes[83].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*QueryRequest_InputSerialization); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_volume_server_proto_msgTypes[84].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*QueryRequest_OutputSerialization); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_volume_server_proto_msgTypes[85].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*QueryRequest_InputSerialization_CSVInput); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_volume_server_proto_msgTypes[86].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*QueryRequest_InputSerialization_JSONInput); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_volume_server_proto_msgTypes[87].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*QueryRequest_InputSerialization_ParquetInput); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_volume_server_proto_msgTypes[88].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*QueryRequest_OutputSerialization_CSVOutput); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_volume_server_proto_msgTypes[89].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*QueryRequest_OutputSerialization_JSONOutput); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ }
+ type x struct{}
+ out := protoimpl.TypeBuilder{
+ File: protoimpl.DescBuilder{
+ GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
+ RawDescriptor: file_volume_server_proto_rawDesc,
+ NumEnums: 0,
+ NumMessages: 90,
+ NumExtensions: 0,
+ NumServices: 1,
+ },
+ GoTypes: file_volume_server_proto_goTypes,
+ DependencyIndexes: file_volume_server_proto_depIdxs,
+ MessageInfos: file_volume_server_proto_msgTypes,
+ }.Build()
+ File_volume_server_proto = out.File
+ file_volume_server_proto_rawDesc = nil
+ file_volume_server_proto_goTypes = nil
+ file_volume_server_proto_depIdxs = nil
}
// Reference imports to suppress errors if they are not otherwise used.
var _ context.Context
-var _ grpc.ClientConn
+var _ grpc.ClientConnInterface
// This is a compile-time assertion to ensure that this generated file
// is compatible with the grpc package it is being compiled against.
-const _ = grpc.SupportPackageIsVersion4
-
-// Client API for VolumeServer service
+const _ = grpc.SupportPackageIsVersion6
+// VolumeServerClient is the client API for VolumeServer service.
+//
+// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream.
type VolumeServerClient interface {
- // Experts only: takes multiple fid parameters. This function does not propagate deletes to replicas.
+ //Experts only: takes multiple fid parameters. This function does not propagate deletes to replicas.
BatchDelete(ctx context.Context, in *BatchDeleteRequest, opts ...grpc.CallOption) (*BatchDeleteResponse, error)
VacuumVolumeCheck(ctx context.Context, in *VacuumVolumeCheckRequest, opts ...grpc.CallOption) (*VacuumVolumeCheckResponse, error)
VacuumVolumeCompact(ctx context.Context, in *VacuumVolumeCompactRequest, opts ...grpc.CallOption) (*VacuumVolumeCompactResponse, error)
@@ -2122,10 +7270,15 @@ type VolumeServerClient interface {
VolumeUnmount(ctx context.Context, in *VolumeUnmountRequest, opts ...grpc.CallOption) (*VolumeUnmountResponse, error)
VolumeDelete(ctx context.Context, in *VolumeDeleteRequest, opts ...grpc.CallOption) (*VolumeDeleteResponse, error)
VolumeMarkReadonly(ctx context.Context, in *VolumeMarkReadonlyRequest, opts ...grpc.CallOption) (*VolumeMarkReadonlyResponse, error)
+ VolumeMarkWritable(ctx context.Context, in *VolumeMarkWritableRequest, opts ...grpc.CallOption) (*VolumeMarkWritableResponse, error)
+ VolumeConfigure(ctx context.Context, in *VolumeConfigureRequest, opts ...grpc.CallOption) (*VolumeConfigureResponse, error)
+ VolumeStatus(ctx context.Context, in *VolumeStatusRequest, opts ...grpc.CallOption) (*VolumeStatusResponse, error)
// copy the .idx .dat files, and mount this volume
VolumeCopy(ctx context.Context, in *VolumeCopyRequest, opts ...grpc.CallOption) (*VolumeCopyResponse, error)
ReadVolumeFileStatus(ctx context.Context, in *ReadVolumeFileStatusRequest, opts ...grpc.CallOption) (*ReadVolumeFileStatusResponse, error)
CopyFile(ctx context.Context, in *CopyFileRequest, opts ...grpc.CallOption) (VolumeServer_CopyFileClient, error)
+ ReadNeedleBlob(ctx context.Context, in *ReadNeedleBlobRequest, opts ...grpc.CallOption) (*ReadNeedleBlobResponse, error)
+ WriteNeedleBlob(ctx context.Context, in *WriteNeedleBlobRequest, opts ...grpc.CallOption) (*WriteNeedleBlobResponse, error)
VolumeTailSender(ctx context.Context, in *VolumeTailSenderRequest, opts ...grpc.CallOption) (VolumeServer_VolumeTailSenderClient, error)
VolumeTailReceiver(ctx context.Context, in *VolumeTailReceiverRequest, opts ...grpc.CallOption) (*VolumeTailReceiverResponse, error)
// erasure coding
@@ -2141,21 +7294,24 @@ type VolumeServerClient interface {
// tiered storage
VolumeTierMoveDatToRemote(ctx context.Context, in *VolumeTierMoveDatToRemoteRequest, opts ...grpc.CallOption) (VolumeServer_VolumeTierMoveDatToRemoteClient, error)
VolumeTierMoveDatFromRemote(ctx context.Context, in *VolumeTierMoveDatFromRemoteRequest, opts ...grpc.CallOption) (VolumeServer_VolumeTierMoveDatFromRemoteClient, error)
- // query
+ VolumeServerStatus(ctx context.Context, in *VolumeServerStatusRequest, opts ...grpc.CallOption) (*VolumeServerStatusResponse, error)
+ VolumeServerLeave(ctx context.Context, in *VolumeServerLeaveRequest, opts ...grpc.CallOption) (*VolumeServerLeaveResponse, error)
+ // query
Query(ctx context.Context, in *QueryRequest, opts ...grpc.CallOption) (VolumeServer_QueryClient, error)
+ VolumeNeedleStatus(ctx context.Context, in *VolumeNeedleStatusRequest, opts ...grpc.CallOption) (*VolumeNeedleStatusResponse, error)
}
type volumeServerClient struct {
- cc *grpc.ClientConn
+ cc grpc.ClientConnInterface
}
-func NewVolumeServerClient(cc *grpc.ClientConn) VolumeServerClient {
+func NewVolumeServerClient(cc grpc.ClientConnInterface) VolumeServerClient {
return &volumeServerClient{cc}
}
func (c *volumeServerClient) BatchDelete(ctx context.Context, in *BatchDeleteRequest, opts ...grpc.CallOption) (*BatchDeleteResponse, error) {
out := new(BatchDeleteResponse)
- err := grpc.Invoke(ctx, "/volume_server_pb.VolumeServer/BatchDelete", in, out, c.cc, opts...)
+ err := c.cc.Invoke(ctx, "/volume_server_pb.VolumeServer/BatchDelete", in, out, opts...)
if err != nil {
return nil, err
}
@@ -2164,7 +7320,7 @@ func (c *volumeServerClient) BatchDelete(ctx context.Context, in *BatchDeleteReq
func (c *volumeServerClient) VacuumVolumeCheck(ctx context.Context, in *VacuumVolumeCheckRequest, opts ...grpc.CallOption) (*VacuumVolumeCheckResponse, error) {
out := new(VacuumVolumeCheckResponse)
- err := grpc.Invoke(ctx, "/volume_server_pb.VolumeServer/VacuumVolumeCheck", in, out, c.cc, opts...)
+ err := c.cc.Invoke(ctx, "/volume_server_pb.VolumeServer/VacuumVolumeCheck", in, out, opts...)
if err != nil {
return nil, err
}
@@ -2173,7 +7329,7 @@ func (c *volumeServerClient) VacuumVolumeCheck(ctx context.Context, in *VacuumVo
func (c *volumeServerClient) VacuumVolumeCompact(ctx context.Context, in *VacuumVolumeCompactRequest, opts ...grpc.CallOption) (*VacuumVolumeCompactResponse, error) {
out := new(VacuumVolumeCompactResponse)
- err := grpc.Invoke(ctx, "/volume_server_pb.VolumeServer/VacuumVolumeCompact", in, out, c.cc, opts...)
+ err := c.cc.Invoke(ctx, "/volume_server_pb.VolumeServer/VacuumVolumeCompact", in, out, opts...)
if err != nil {
return nil, err
}
@@ -2182,7 +7338,7 @@ func (c *volumeServerClient) VacuumVolumeCompact(ctx context.Context, in *Vacuum
func (c *volumeServerClient) VacuumVolumeCommit(ctx context.Context, in *VacuumVolumeCommitRequest, opts ...grpc.CallOption) (*VacuumVolumeCommitResponse, error) {
out := new(VacuumVolumeCommitResponse)
- err := grpc.Invoke(ctx, "/volume_server_pb.VolumeServer/VacuumVolumeCommit", in, out, c.cc, opts...)
+ err := c.cc.Invoke(ctx, "/volume_server_pb.VolumeServer/VacuumVolumeCommit", in, out, opts...)
if err != nil {
return nil, err
}
@@ -2191,7 +7347,7 @@ func (c *volumeServerClient) VacuumVolumeCommit(ctx context.Context, in *VacuumV
func (c *volumeServerClient) VacuumVolumeCleanup(ctx context.Context, in *VacuumVolumeCleanupRequest, opts ...grpc.CallOption) (*VacuumVolumeCleanupResponse, error) {
out := new(VacuumVolumeCleanupResponse)
- err := grpc.Invoke(ctx, "/volume_server_pb.VolumeServer/VacuumVolumeCleanup", in, out, c.cc, opts...)
+ err := c.cc.Invoke(ctx, "/volume_server_pb.VolumeServer/VacuumVolumeCleanup", in, out, opts...)
if err != nil {
return nil, err
}
@@ -2200,7 +7356,7 @@ func (c *volumeServerClient) VacuumVolumeCleanup(ctx context.Context, in *Vacuum
func (c *volumeServerClient) DeleteCollection(ctx context.Context, in *DeleteCollectionRequest, opts ...grpc.CallOption) (*DeleteCollectionResponse, error) {
out := new(DeleteCollectionResponse)
- err := grpc.Invoke(ctx, "/volume_server_pb.VolumeServer/DeleteCollection", in, out, c.cc, opts...)
+ err := c.cc.Invoke(ctx, "/volume_server_pb.VolumeServer/DeleteCollection", in, out, opts...)
if err != nil {
return nil, err
}
@@ -2209,7 +7365,7 @@ func (c *volumeServerClient) DeleteCollection(ctx context.Context, in *DeleteCol
func (c *volumeServerClient) AllocateVolume(ctx context.Context, in *AllocateVolumeRequest, opts ...grpc.CallOption) (*AllocateVolumeResponse, error) {
out := new(AllocateVolumeResponse)
- err := grpc.Invoke(ctx, "/volume_server_pb.VolumeServer/AllocateVolume", in, out, c.cc, opts...)
+ err := c.cc.Invoke(ctx, "/volume_server_pb.VolumeServer/AllocateVolume", in, out, opts...)
if err != nil {
return nil, err
}
@@ -2218,7 +7374,7 @@ func (c *volumeServerClient) AllocateVolume(ctx context.Context, in *AllocateVol
func (c *volumeServerClient) VolumeSyncStatus(ctx context.Context, in *VolumeSyncStatusRequest, opts ...grpc.CallOption) (*VolumeSyncStatusResponse, error) {
out := new(VolumeSyncStatusResponse)
- err := grpc.Invoke(ctx, "/volume_server_pb.VolumeServer/VolumeSyncStatus", in, out, c.cc, opts...)
+ err := c.cc.Invoke(ctx, "/volume_server_pb.VolumeServer/VolumeSyncStatus", in, out, opts...)
if err != nil {
return nil, err
}
@@ -2226,7 +7382,7 @@ func (c *volumeServerClient) VolumeSyncStatus(ctx context.Context, in *VolumeSyn
}
func (c *volumeServerClient) VolumeIncrementalCopy(ctx context.Context, in *VolumeIncrementalCopyRequest, opts ...grpc.CallOption) (VolumeServer_VolumeIncrementalCopyClient, error) {
- stream, err := grpc.NewClientStream(ctx, &_VolumeServer_serviceDesc.Streams[0], c.cc, "/volume_server_pb.VolumeServer/VolumeIncrementalCopy", opts...)
+ stream, err := c.cc.NewStream(ctx, &_VolumeServer_serviceDesc.Streams[0], "/volume_server_pb.VolumeServer/VolumeIncrementalCopy", opts...)
if err != nil {
return nil, err
}
@@ -2259,7 +7415,7 @@ func (x *volumeServerVolumeIncrementalCopyClient) Recv() (*VolumeIncrementalCopy
func (c *volumeServerClient) VolumeMount(ctx context.Context, in *VolumeMountRequest, opts ...grpc.CallOption) (*VolumeMountResponse, error) {
out := new(VolumeMountResponse)
- err := grpc.Invoke(ctx, "/volume_server_pb.VolumeServer/VolumeMount", in, out, c.cc, opts...)
+ err := c.cc.Invoke(ctx, "/volume_server_pb.VolumeServer/VolumeMount", in, out, opts...)
if err != nil {
return nil, err
}
@@ -2268,7 +7424,7 @@ func (c *volumeServerClient) VolumeMount(ctx context.Context, in *VolumeMountReq
func (c *volumeServerClient) VolumeUnmount(ctx context.Context, in *VolumeUnmountRequest, opts ...grpc.CallOption) (*VolumeUnmountResponse, error) {
out := new(VolumeUnmountResponse)
- err := grpc.Invoke(ctx, "/volume_server_pb.VolumeServer/VolumeUnmount", in, out, c.cc, opts...)
+ err := c.cc.Invoke(ctx, "/volume_server_pb.VolumeServer/VolumeUnmount", in, out, opts...)
if err != nil {
return nil, err
}
@@ -2277,7 +7433,7 @@ func (c *volumeServerClient) VolumeUnmount(ctx context.Context, in *VolumeUnmoun
func (c *volumeServerClient) VolumeDelete(ctx context.Context, in *VolumeDeleteRequest, opts ...grpc.CallOption) (*VolumeDeleteResponse, error) {
out := new(VolumeDeleteResponse)
- err := grpc.Invoke(ctx, "/volume_server_pb.VolumeServer/VolumeDelete", in, out, c.cc, opts...)
+ err := c.cc.Invoke(ctx, "/volume_server_pb.VolumeServer/VolumeDelete", in, out, opts...)
if err != nil {
return nil, err
}
@@ -2286,7 +7442,34 @@ func (c *volumeServerClient) VolumeDelete(ctx context.Context, in *VolumeDeleteR
func (c *volumeServerClient) VolumeMarkReadonly(ctx context.Context, in *VolumeMarkReadonlyRequest, opts ...grpc.CallOption) (*VolumeMarkReadonlyResponse, error) {
out := new(VolumeMarkReadonlyResponse)
- err := grpc.Invoke(ctx, "/volume_server_pb.VolumeServer/VolumeMarkReadonly", in, out, c.cc, opts...)
+ err := c.cc.Invoke(ctx, "/volume_server_pb.VolumeServer/VolumeMarkReadonly", in, out, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
+func (c *volumeServerClient) VolumeMarkWritable(ctx context.Context, in *VolumeMarkWritableRequest, opts ...grpc.CallOption) (*VolumeMarkWritableResponse, error) {
+ out := new(VolumeMarkWritableResponse)
+ err := c.cc.Invoke(ctx, "/volume_server_pb.VolumeServer/VolumeMarkWritable", in, out, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
+func (c *volumeServerClient) VolumeConfigure(ctx context.Context, in *VolumeConfigureRequest, opts ...grpc.CallOption) (*VolumeConfigureResponse, error) {
+ out := new(VolumeConfigureResponse)
+ err := c.cc.Invoke(ctx, "/volume_server_pb.VolumeServer/VolumeConfigure", in, out, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
+func (c *volumeServerClient) VolumeStatus(ctx context.Context, in *VolumeStatusRequest, opts ...grpc.CallOption) (*VolumeStatusResponse, error) {
+ out := new(VolumeStatusResponse)
+ err := c.cc.Invoke(ctx, "/volume_server_pb.VolumeServer/VolumeStatus", in, out, opts...)
if err != nil {
return nil, err
}
@@ -2295,7 +7478,7 @@ func (c *volumeServerClient) VolumeMarkReadonly(ctx context.Context, in *VolumeM
func (c *volumeServerClient) VolumeCopy(ctx context.Context, in *VolumeCopyRequest, opts ...grpc.CallOption) (*VolumeCopyResponse, error) {
out := new(VolumeCopyResponse)
- err := grpc.Invoke(ctx, "/volume_server_pb.VolumeServer/VolumeCopy", in, out, c.cc, opts...)
+ err := c.cc.Invoke(ctx, "/volume_server_pb.VolumeServer/VolumeCopy", in, out, opts...)
if err != nil {
return nil, err
}
@@ -2304,7 +7487,7 @@ func (c *volumeServerClient) VolumeCopy(ctx context.Context, in *VolumeCopyReque
func (c *volumeServerClient) ReadVolumeFileStatus(ctx context.Context, in *ReadVolumeFileStatusRequest, opts ...grpc.CallOption) (*ReadVolumeFileStatusResponse, error) {
out := new(ReadVolumeFileStatusResponse)
- err := grpc.Invoke(ctx, "/volume_server_pb.VolumeServer/ReadVolumeFileStatus", in, out, c.cc, opts...)
+ err := c.cc.Invoke(ctx, "/volume_server_pb.VolumeServer/ReadVolumeFileStatus", in, out, opts...)
if err != nil {
return nil, err
}
@@ -2312,7 +7495,7 @@ func (c *volumeServerClient) ReadVolumeFileStatus(ctx context.Context, in *ReadV
}
func (c *volumeServerClient) CopyFile(ctx context.Context, in *CopyFileRequest, opts ...grpc.CallOption) (VolumeServer_CopyFileClient, error) {
- stream, err := grpc.NewClientStream(ctx, &_VolumeServer_serviceDesc.Streams[1], c.cc, "/volume_server_pb.VolumeServer/CopyFile", opts...)
+ stream, err := c.cc.NewStream(ctx, &_VolumeServer_serviceDesc.Streams[1], "/volume_server_pb.VolumeServer/CopyFile", opts...)
if err != nil {
return nil, err
}
@@ -2343,8 +7526,26 @@ func (x *volumeServerCopyFileClient) Recv() (*CopyFileResponse, error) {
return m, nil
}
+func (c *volumeServerClient) ReadNeedleBlob(ctx context.Context, in *ReadNeedleBlobRequest, opts ...grpc.CallOption) (*ReadNeedleBlobResponse, error) {
+ out := new(ReadNeedleBlobResponse)
+ err := c.cc.Invoke(ctx, "/volume_server_pb.VolumeServer/ReadNeedleBlob", in, out, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
+func (c *volumeServerClient) WriteNeedleBlob(ctx context.Context, in *WriteNeedleBlobRequest, opts ...grpc.CallOption) (*WriteNeedleBlobResponse, error) {
+ out := new(WriteNeedleBlobResponse)
+ err := c.cc.Invoke(ctx, "/volume_server_pb.VolumeServer/WriteNeedleBlob", in, out, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
func (c *volumeServerClient) VolumeTailSender(ctx context.Context, in *VolumeTailSenderRequest, opts ...grpc.CallOption) (VolumeServer_VolumeTailSenderClient, error) {
- stream, err := grpc.NewClientStream(ctx, &_VolumeServer_serviceDesc.Streams[2], c.cc, "/volume_server_pb.VolumeServer/VolumeTailSender", opts...)
+ stream, err := c.cc.NewStream(ctx, &_VolumeServer_serviceDesc.Streams[2], "/volume_server_pb.VolumeServer/VolumeTailSender", opts...)
if err != nil {
return nil, err
}
@@ -2377,7 +7578,7 @@ func (x *volumeServerVolumeTailSenderClient) Recv() (*VolumeTailSenderResponse,
func (c *volumeServerClient) VolumeTailReceiver(ctx context.Context, in *VolumeTailReceiverRequest, opts ...grpc.CallOption) (*VolumeTailReceiverResponse, error) {
out := new(VolumeTailReceiverResponse)
- err := grpc.Invoke(ctx, "/volume_server_pb.VolumeServer/VolumeTailReceiver", in, out, c.cc, opts...)
+ err := c.cc.Invoke(ctx, "/volume_server_pb.VolumeServer/VolumeTailReceiver", in, out, opts...)
if err != nil {
return nil, err
}
@@ -2386,7 +7587,7 @@ func (c *volumeServerClient) VolumeTailReceiver(ctx context.Context, in *VolumeT
func (c *volumeServerClient) VolumeEcShardsGenerate(ctx context.Context, in *VolumeEcShardsGenerateRequest, opts ...grpc.CallOption) (*VolumeEcShardsGenerateResponse, error) {
out := new(VolumeEcShardsGenerateResponse)
- err := grpc.Invoke(ctx, "/volume_server_pb.VolumeServer/VolumeEcShardsGenerate", in, out, c.cc, opts...)
+ err := c.cc.Invoke(ctx, "/volume_server_pb.VolumeServer/VolumeEcShardsGenerate", in, out, opts...)
if err != nil {
return nil, err
}
@@ -2395,7 +7596,7 @@ func (c *volumeServerClient) VolumeEcShardsGenerate(ctx context.Context, in *Vol
func (c *volumeServerClient) VolumeEcShardsRebuild(ctx context.Context, in *VolumeEcShardsRebuildRequest, opts ...grpc.CallOption) (*VolumeEcShardsRebuildResponse, error) {
out := new(VolumeEcShardsRebuildResponse)
- err := grpc.Invoke(ctx, "/volume_server_pb.VolumeServer/VolumeEcShardsRebuild", in, out, c.cc, opts...)
+ err := c.cc.Invoke(ctx, "/volume_server_pb.VolumeServer/VolumeEcShardsRebuild", in, out, opts...)
if err != nil {
return nil, err
}
@@ -2404,7 +7605,7 @@ func (c *volumeServerClient) VolumeEcShardsRebuild(ctx context.Context, in *Volu
func (c *volumeServerClient) VolumeEcShardsCopy(ctx context.Context, in *VolumeEcShardsCopyRequest, opts ...grpc.CallOption) (*VolumeEcShardsCopyResponse, error) {
out := new(VolumeEcShardsCopyResponse)
- err := grpc.Invoke(ctx, "/volume_server_pb.VolumeServer/VolumeEcShardsCopy", in, out, c.cc, opts...)
+ err := c.cc.Invoke(ctx, "/volume_server_pb.VolumeServer/VolumeEcShardsCopy", in, out, opts...)
if err != nil {
return nil, err
}
@@ -2413,7 +7614,7 @@ func (c *volumeServerClient) VolumeEcShardsCopy(ctx context.Context, in *VolumeE
func (c *volumeServerClient) VolumeEcShardsDelete(ctx context.Context, in *VolumeEcShardsDeleteRequest, opts ...grpc.CallOption) (*VolumeEcShardsDeleteResponse, error) {
out := new(VolumeEcShardsDeleteResponse)
- err := grpc.Invoke(ctx, "/volume_server_pb.VolumeServer/VolumeEcShardsDelete", in, out, c.cc, opts...)
+ err := c.cc.Invoke(ctx, "/volume_server_pb.VolumeServer/VolumeEcShardsDelete", in, out, opts...)
if err != nil {
return nil, err
}
@@ -2422,7 +7623,7 @@ func (c *volumeServerClient) VolumeEcShardsDelete(ctx context.Context, in *Volum
func (c *volumeServerClient) VolumeEcShardsMount(ctx context.Context, in *VolumeEcShardsMountRequest, opts ...grpc.CallOption) (*VolumeEcShardsMountResponse, error) {
out := new(VolumeEcShardsMountResponse)
- err := grpc.Invoke(ctx, "/volume_server_pb.VolumeServer/VolumeEcShardsMount", in, out, c.cc, opts...)
+ err := c.cc.Invoke(ctx, "/volume_server_pb.VolumeServer/VolumeEcShardsMount", in, out, opts...)
if err != nil {
return nil, err
}
@@ -2431,7 +7632,7 @@ func (c *volumeServerClient) VolumeEcShardsMount(ctx context.Context, in *Volume
func (c *volumeServerClient) VolumeEcShardsUnmount(ctx context.Context, in *VolumeEcShardsUnmountRequest, opts ...grpc.CallOption) (*VolumeEcShardsUnmountResponse, error) {
out := new(VolumeEcShardsUnmountResponse)
- err := grpc.Invoke(ctx, "/volume_server_pb.VolumeServer/VolumeEcShardsUnmount", in, out, c.cc, opts...)
+ err := c.cc.Invoke(ctx, "/volume_server_pb.VolumeServer/VolumeEcShardsUnmount", in, out, opts...)
if err != nil {
return nil, err
}
@@ -2439,7 +7640,7 @@ func (c *volumeServerClient) VolumeEcShardsUnmount(ctx context.Context, in *Volu
}
func (c *volumeServerClient) VolumeEcShardRead(ctx context.Context, in *VolumeEcShardReadRequest, opts ...grpc.CallOption) (VolumeServer_VolumeEcShardReadClient, error) {
- stream, err := grpc.NewClientStream(ctx, &_VolumeServer_serviceDesc.Streams[3], c.cc, "/volume_server_pb.VolumeServer/VolumeEcShardRead", opts...)
+ stream, err := c.cc.NewStream(ctx, &_VolumeServer_serviceDesc.Streams[3], "/volume_server_pb.VolumeServer/VolumeEcShardRead", opts...)
if err != nil {
return nil, err
}
@@ -2472,7 +7673,7 @@ func (x *volumeServerVolumeEcShardReadClient) Recv() (*VolumeEcShardReadResponse
func (c *volumeServerClient) VolumeEcBlobDelete(ctx context.Context, in *VolumeEcBlobDeleteRequest, opts ...grpc.CallOption) (*VolumeEcBlobDeleteResponse, error) {
out := new(VolumeEcBlobDeleteResponse)
- err := grpc.Invoke(ctx, "/volume_server_pb.VolumeServer/VolumeEcBlobDelete", in, out, c.cc, opts...)
+ err := c.cc.Invoke(ctx, "/volume_server_pb.VolumeServer/VolumeEcBlobDelete", in, out, opts...)
if err != nil {
return nil, err
}
@@ -2481,7 +7682,7 @@ func (c *volumeServerClient) VolumeEcBlobDelete(ctx context.Context, in *VolumeE
func (c *volumeServerClient) VolumeEcShardsToVolume(ctx context.Context, in *VolumeEcShardsToVolumeRequest, opts ...grpc.CallOption) (*VolumeEcShardsToVolumeResponse, error) {
out := new(VolumeEcShardsToVolumeResponse)
- err := grpc.Invoke(ctx, "/volume_server_pb.VolumeServer/VolumeEcShardsToVolume", in, out, c.cc, opts...)
+ err := c.cc.Invoke(ctx, "/volume_server_pb.VolumeServer/VolumeEcShardsToVolume", in, out, opts...)
if err != nil {
return nil, err
}
@@ -2489,7 +7690,7 @@ func (c *volumeServerClient) VolumeEcShardsToVolume(ctx context.Context, in *Vol
}
func (c *volumeServerClient) VolumeTierMoveDatToRemote(ctx context.Context, in *VolumeTierMoveDatToRemoteRequest, opts ...grpc.CallOption) (VolumeServer_VolumeTierMoveDatToRemoteClient, error) {
- stream, err := grpc.NewClientStream(ctx, &_VolumeServer_serviceDesc.Streams[4], c.cc, "/volume_server_pb.VolumeServer/VolumeTierMoveDatToRemote", opts...)
+ stream, err := c.cc.NewStream(ctx, &_VolumeServer_serviceDesc.Streams[4], "/volume_server_pb.VolumeServer/VolumeTierMoveDatToRemote", opts...)
if err != nil {
return nil, err
}
@@ -2521,7 +7722,7 @@ func (x *volumeServerVolumeTierMoveDatToRemoteClient) Recv() (*VolumeTierMoveDat
}
func (c *volumeServerClient) VolumeTierMoveDatFromRemote(ctx context.Context, in *VolumeTierMoveDatFromRemoteRequest, opts ...grpc.CallOption) (VolumeServer_VolumeTierMoveDatFromRemoteClient, error) {
- stream, err := grpc.NewClientStream(ctx, &_VolumeServer_serviceDesc.Streams[5], c.cc, "/volume_server_pb.VolumeServer/VolumeTierMoveDatFromRemote", opts...)
+ stream, err := c.cc.NewStream(ctx, &_VolumeServer_serviceDesc.Streams[5], "/volume_server_pb.VolumeServer/VolumeTierMoveDatFromRemote", opts...)
if err != nil {
return nil, err
}
@@ -2552,8 +7753,26 @@ func (x *volumeServerVolumeTierMoveDatFromRemoteClient) Recv() (*VolumeTierMoveD
return m, nil
}
+func (c *volumeServerClient) VolumeServerStatus(ctx context.Context, in *VolumeServerStatusRequest, opts ...grpc.CallOption) (*VolumeServerStatusResponse, error) {
+ out := new(VolumeServerStatusResponse)
+ err := c.cc.Invoke(ctx, "/volume_server_pb.VolumeServer/VolumeServerStatus", in, out, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
+func (c *volumeServerClient) VolumeServerLeave(ctx context.Context, in *VolumeServerLeaveRequest, opts ...grpc.CallOption) (*VolumeServerLeaveResponse, error) {
+ out := new(VolumeServerLeaveResponse)
+ err := c.cc.Invoke(ctx, "/volume_server_pb.VolumeServer/VolumeServerLeave", in, out, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
func (c *volumeServerClient) Query(ctx context.Context, in *QueryRequest, opts ...grpc.CallOption) (VolumeServer_QueryClient, error) {
- stream, err := grpc.NewClientStream(ctx, &_VolumeServer_serviceDesc.Streams[6], c.cc, "/volume_server_pb.VolumeServer/Query", opts...)
+ stream, err := c.cc.NewStream(ctx, &_VolumeServer_serviceDesc.Streams[6], "/volume_server_pb.VolumeServer/Query", opts...)
if err != nil {
return nil, err
}
@@ -2584,10 +7803,18 @@ func (x *volumeServerQueryClient) Recv() (*QueriedStripe, error) {
return m, nil
}
-// Server API for VolumeServer service
+func (c *volumeServerClient) VolumeNeedleStatus(ctx context.Context, in *VolumeNeedleStatusRequest, opts ...grpc.CallOption) (*VolumeNeedleStatusResponse, error) {
+ out := new(VolumeNeedleStatusResponse)
+ err := c.cc.Invoke(ctx, "/volume_server_pb.VolumeServer/VolumeNeedleStatus", in, out, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+// VolumeServerServer is the server API for VolumeServer service.
type VolumeServerServer interface {
- // Experts only: takes multiple fid parameters. This function does not propagate deletes to replicas.
+ //Experts only: takes multiple fid parameters. This function does not propagate deletes to replicas.
BatchDelete(context.Context, *BatchDeleteRequest) (*BatchDeleteResponse, error)
VacuumVolumeCheck(context.Context, *VacuumVolumeCheckRequest) (*VacuumVolumeCheckResponse, error)
VacuumVolumeCompact(context.Context, *VacuumVolumeCompactRequest) (*VacuumVolumeCompactResponse, error)
@@ -2601,10 +7828,15 @@ type VolumeServerServer interface {
VolumeUnmount(context.Context, *VolumeUnmountRequest) (*VolumeUnmountResponse, error)
VolumeDelete(context.Context, *VolumeDeleteRequest) (*VolumeDeleteResponse, error)
VolumeMarkReadonly(context.Context, *VolumeMarkReadonlyRequest) (*VolumeMarkReadonlyResponse, error)
+ VolumeMarkWritable(context.Context, *VolumeMarkWritableRequest) (*VolumeMarkWritableResponse, error)
+ VolumeConfigure(context.Context, *VolumeConfigureRequest) (*VolumeConfigureResponse, error)
+ VolumeStatus(context.Context, *VolumeStatusRequest) (*VolumeStatusResponse, error)
// copy the .idx .dat files, and mount this volume
VolumeCopy(context.Context, *VolumeCopyRequest) (*VolumeCopyResponse, error)
ReadVolumeFileStatus(context.Context, *ReadVolumeFileStatusRequest) (*ReadVolumeFileStatusResponse, error)
CopyFile(*CopyFileRequest, VolumeServer_CopyFileServer) error
+ ReadNeedleBlob(context.Context, *ReadNeedleBlobRequest) (*ReadNeedleBlobResponse, error)
+ WriteNeedleBlob(context.Context, *WriteNeedleBlobRequest) (*WriteNeedleBlobResponse, error)
VolumeTailSender(*VolumeTailSenderRequest, VolumeServer_VolumeTailSenderServer) error
VolumeTailReceiver(context.Context, *VolumeTailReceiverRequest) (*VolumeTailReceiverResponse, error)
// erasure coding
@@ -2620,8 +7852,130 @@ type VolumeServerServer interface {
// tiered storage
VolumeTierMoveDatToRemote(*VolumeTierMoveDatToRemoteRequest, VolumeServer_VolumeTierMoveDatToRemoteServer) error
VolumeTierMoveDatFromRemote(*VolumeTierMoveDatFromRemoteRequest, VolumeServer_VolumeTierMoveDatFromRemoteServer) error
- // query
+ VolumeServerStatus(context.Context, *VolumeServerStatusRequest) (*VolumeServerStatusResponse, error)
+ VolumeServerLeave(context.Context, *VolumeServerLeaveRequest) (*VolumeServerLeaveResponse, error)
+ // query
Query(*QueryRequest, VolumeServer_QueryServer) error
+ VolumeNeedleStatus(context.Context, *VolumeNeedleStatusRequest) (*VolumeNeedleStatusResponse, error)
+}
+
+// UnimplementedVolumeServerServer can be embedded to have forward compatible implementations.
+type UnimplementedVolumeServerServer struct {
+}
+
+func (*UnimplementedVolumeServerServer) BatchDelete(context.Context, *BatchDeleteRequest) (*BatchDeleteResponse, error) {
+ return nil, status.Errorf(codes.Unimplemented, "method BatchDelete not implemented")
+}
+func (*UnimplementedVolumeServerServer) VacuumVolumeCheck(context.Context, *VacuumVolumeCheckRequest) (*VacuumVolumeCheckResponse, error) {
+ return nil, status.Errorf(codes.Unimplemented, "method VacuumVolumeCheck not implemented")
+}
+func (*UnimplementedVolumeServerServer) VacuumVolumeCompact(context.Context, *VacuumVolumeCompactRequest) (*VacuumVolumeCompactResponse, error) {
+ return nil, status.Errorf(codes.Unimplemented, "method VacuumVolumeCompact not implemented")
+}
+func (*UnimplementedVolumeServerServer) VacuumVolumeCommit(context.Context, *VacuumVolumeCommitRequest) (*VacuumVolumeCommitResponse, error) {
+ return nil, status.Errorf(codes.Unimplemented, "method VacuumVolumeCommit not implemented")
+}
+func (*UnimplementedVolumeServerServer) VacuumVolumeCleanup(context.Context, *VacuumVolumeCleanupRequest) (*VacuumVolumeCleanupResponse, error) {
+ return nil, status.Errorf(codes.Unimplemented, "method VacuumVolumeCleanup not implemented")
+}
+func (*UnimplementedVolumeServerServer) DeleteCollection(context.Context, *DeleteCollectionRequest) (*DeleteCollectionResponse, error) {
+ return nil, status.Errorf(codes.Unimplemented, "method DeleteCollection not implemented")
+}
+func (*UnimplementedVolumeServerServer) AllocateVolume(context.Context, *AllocateVolumeRequest) (*AllocateVolumeResponse, error) {
+ return nil, status.Errorf(codes.Unimplemented, "method AllocateVolume not implemented")
+}
+func (*UnimplementedVolumeServerServer) VolumeSyncStatus(context.Context, *VolumeSyncStatusRequest) (*VolumeSyncStatusResponse, error) {
+ return nil, status.Errorf(codes.Unimplemented, "method VolumeSyncStatus not implemented")
+}
+func (*UnimplementedVolumeServerServer) VolumeIncrementalCopy(*VolumeIncrementalCopyRequest, VolumeServer_VolumeIncrementalCopyServer) error {
+ return status.Errorf(codes.Unimplemented, "method VolumeIncrementalCopy not implemented")
+}
+func (*UnimplementedVolumeServerServer) VolumeMount(context.Context, *VolumeMountRequest) (*VolumeMountResponse, error) {
+ return nil, status.Errorf(codes.Unimplemented, "method VolumeMount not implemented")
+}
+func (*UnimplementedVolumeServerServer) VolumeUnmount(context.Context, *VolumeUnmountRequest) (*VolumeUnmountResponse, error) {
+ return nil, status.Errorf(codes.Unimplemented, "method VolumeUnmount not implemented")
+}
+func (*UnimplementedVolumeServerServer) VolumeDelete(context.Context, *VolumeDeleteRequest) (*VolumeDeleteResponse, error) {
+ return nil, status.Errorf(codes.Unimplemented, "method VolumeDelete not implemented")
+}
+func (*UnimplementedVolumeServerServer) VolumeMarkReadonly(context.Context, *VolumeMarkReadonlyRequest) (*VolumeMarkReadonlyResponse, error) {
+ return nil, status.Errorf(codes.Unimplemented, "method VolumeMarkReadonly not implemented")
+}
+func (*UnimplementedVolumeServerServer) VolumeMarkWritable(context.Context, *VolumeMarkWritableRequest) (*VolumeMarkWritableResponse, error) {
+ return nil, status.Errorf(codes.Unimplemented, "method VolumeMarkWritable not implemented")
+}
+func (*UnimplementedVolumeServerServer) VolumeConfigure(context.Context, *VolumeConfigureRequest) (*VolumeConfigureResponse, error) {
+ return nil, status.Errorf(codes.Unimplemented, "method VolumeConfigure not implemented")
+}
+func (*UnimplementedVolumeServerServer) VolumeStatus(context.Context, *VolumeStatusRequest) (*VolumeStatusResponse, error) {
+ return nil, status.Errorf(codes.Unimplemented, "method VolumeStatus not implemented")
+}
+func (*UnimplementedVolumeServerServer) VolumeCopy(context.Context, *VolumeCopyRequest) (*VolumeCopyResponse, error) {
+ return nil, status.Errorf(codes.Unimplemented, "method VolumeCopy not implemented")
+}
+func (*UnimplementedVolumeServerServer) ReadVolumeFileStatus(context.Context, *ReadVolumeFileStatusRequest) (*ReadVolumeFileStatusResponse, error) {
+ return nil, status.Errorf(codes.Unimplemented, "method ReadVolumeFileStatus not implemented")
+}
+func (*UnimplementedVolumeServerServer) CopyFile(*CopyFileRequest, VolumeServer_CopyFileServer) error {
+ return status.Errorf(codes.Unimplemented, "method CopyFile not implemented")
+}
+func (*UnimplementedVolumeServerServer) ReadNeedleBlob(context.Context, *ReadNeedleBlobRequest) (*ReadNeedleBlobResponse, error) {
+ return nil, status.Errorf(codes.Unimplemented, "method ReadNeedleBlob not implemented")
+}
+func (*UnimplementedVolumeServerServer) WriteNeedleBlob(context.Context, *WriteNeedleBlobRequest) (*WriteNeedleBlobResponse, error) {
+ return nil, status.Errorf(codes.Unimplemented, "method WriteNeedleBlob not implemented")
+}
+func (*UnimplementedVolumeServerServer) VolumeTailSender(*VolumeTailSenderRequest, VolumeServer_VolumeTailSenderServer) error {
+ return status.Errorf(codes.Unimplemented, "method VolumeTailSender not implemented")
+}
+func (*UnimplementedVolumeServerServer) VolumeTailReceiver(context.Context, *VolumeTailReceiverRequest) (*VolumeTailReceiverResponse, error) {
+ return nil, status.Errorf(codes.Unimplemented, "method VolumeTailReceiver not implemented")
+}
+func (*UnimplementedVolumeServerServer) VolumeEcShardsGenerate(context.Context, *VolumeEcShardsGenerateRequest) (*VolumeEcShardsGenerateResponse, error) {
+ return nil, status.Errorf(codes.Unimplemented, "method VolumeEcShardsGenerate not implemented")
+}
+func (*UnimplementedVolumeServerServer) VolumeEcShardsRebuild(context.Context, *VolumeEcShardsRebuildRequest) (*VolumeEcShardsRebuildResponse, error) {
+ return nil, status.Errorf(codes.Unimplemented, "method VolumeEcShardsRebuild not implemented")
+}
+func (*UnimplementedVolumeServerServer) VolumeEcShardsCopy(context.Context, *VolumeEcShardsCopyRequest) (*VolumeEcShardsCopyResponse, error) {
+ return nil, status.Errorf(codes.Unimplemented, "method VolumeEcShardsCopy not implemented")
+}
+func (*UnimplementedVolumeServerServer) VolumeEcShardsDelete(context.Context, *VolumeEcShardsDeleteRequest) (*VolumeEcShardsDeleteResponse, error) {
+ return nil, status.Errorf(codes.Unimplemented, "method VolumeEcShardsDelete not implemented")
+}
+func (*UnimplementedVolumeServerServer) VolumeEcShardsMount(context.Context, *VolumeEcShardsMountRequest) (*VolumeEcShardsMountResponse, error) {
+ return nil, status.Errorf(codes.Unimplemented, "method VolumeEcShardsMount not implemented")
+}
+func (*UnimplementedVolumeServerServer) VolumeEcShardsUnmount(context.Context, *VolumeEcShardsUnmountRequest) (*VolumeEcShardsUnmountResponse, error) {
+ return nil, status.Errorf(codes.Unimplemented, "method VolumeEcShardsUnmount not implemented")
+}
+func (*UnimplementedVolumeServerServer) VolumeEcShardRead(*VolumeEcShardReadRequest, VolumeServer_VolumeEcShardReadServer) error {
+ return status.Errorf(codes.Unimplemented, "method VolumeEcShardRead not implemented")
+}
+func (*UnimplementedVolumeServerServer) VolumeEcBlobDelete(context.Context, *VolumeEcBlobDeleteRequest) (*VolumeEcBlobDeleteResponse, error) {
+ return nil, status.Errorf(codes.Unimplemented, "method VolumeEcBlobDelete not implemented")
+}
+func (*UnimplementedVolumeServerServer) VolumeEcShardsToVolume(context.Context, *VolumeEcShardsToVolumeRequest) (*VolumeEcShardsToVolumeResponse, error) {
+ return nil, status.Errorf(codes.Unimplemented, "method VolumeEcShardsToVolume not implemented")
+}
+func (*UnimplementedVolumeServerServer) VolumeTierMoveDatToRemote(*VolumeTierMoveDatToRemoteRequest, VolumeServer_VolumeTierMoveDatToRemoteServer) error {
+ return status.Errorf(codes.Unimplemented, "method VolumeTierMoveDatToRemote not implemented")
+}
+func (*UnimplementedVolumeServerServer) VolumeTierMoveDatFromRemote(*VolumeTierMoveDatFromRemoteRequest, VolumeServer_VolumeTierMoveDatFromRemoteServer) error {
+ return status.Errorf(codes.Unimplemented, "method VolumeTierMoveDatFromRemote not implemented")
+}
+func (*UnimplementedVolumeServerServer) VolumeServerStatus(context.Context, *VolumeServerStatusRequest) (*VolumeServerStatusResponse, error) {
+ return nil, status.Errorf(codes.Unimplemented, "method VolumeServerStatus not implemented")
+}
+func (*UnimplementedVolumeServerServer) VolumeServerLeave(context.Context, *VolumeServerLeaveRequest) (*VolumeServerLeaveResponse, error) {
+ return nil, status.Errorf(codes.Unimplemented, "method VolumeServerLeave not implemented")
+}
+func (*UnimplementedVolumeServerServer) Query(*QueryRequest, VolumeServer_QueryServer) error {
+ return status.Errorf(codes.Unimplemented, "method Query not implemented")
+}
+func (*UnimplementedVolumeServerServer) VolumeNeedleStatus(context.Context, *VolumeNeedleStatusRequest) (*VolumeNeedleStatusResponse, error) {
+ return nil, status.Errorf(codes.Unimplemented, "method VolumeNeedleStatus not implemented")
}
func RegisterVolumeServerServer(s *grpc.Server, srv VolumeServerServer) {
@@ -2865,6 +8219,60 @@ func _VolumeServer_VolumeMarkReadonly_Handler(srv interface{}, ctx context.Conte
return interceptor(ctx, in, info, handler)
}
+func _VolumeServer_VolumeMarkWritable_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(VolumeMarkWritableRequest)
+ if err := dec(in); err != nil {
+ return nil, err
+ }
+ if interceptor == nil {
+ return srv.(VolumeServerServer).VolumeMarkWritable(ctx, in)
+ }
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: "/volume_server_pb.VolumeServer/VolumeMarkWritable",
+ }
+ handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+ return srv.(VolumeServerServer).VolumeMarkWritable(ctx, req.(*VolumeMarkWritableRequest))
+ }
+ return interceptor(ctx, in, info, handler)
+}
+
+func _VolumeServer_VolumeConfigure_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(VolumeConfigureRequest)
+ if err := dec(in); err != nil {
+ return nil, err
+ }
+ if interceptor == nil {
+ return srv.(VolumeServerServer).VolumeConfigure(ctx, in)
+ }
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: "/volume_server_pb.VolumeServer/VolumeConfigure",
+ }
+ handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+ return srv.(VolumeServerServer).VolumeConfigure(ctx, req.(*VolumeConfigureRequest))
+ }
+ return interceptor(ctx, in, info, handler)
+}
+
+func _VolumeServer_VolumeStatus_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(VolumeStatusRequest)
+ if err := dec(in); err != nil {
+ return nil, err
+ }
+ if interceptor == nil {
+ return srv.(VolumeServerServer).VolumeStatus(ctx, in)
+ }
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: "/volume_server_pb.VolumeServer/VolumeStatus",
+ }
+ handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+ return srv.(VolumeServerServer).VolumeStatus(ctx, req.(*VolumeStatusRequest))
+ }
+ return interceptor(ctx, in, info, handler)
+}
+
func _VolumeServer_VolumeCopy_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
in := new(VolumeCopyRequest)
if err := dec(in); err != nil {
@@ -2922,6 +8330,42 @@ func (x *volumeServerCopyFileServer) Send(m *CopyFileResponse) error {
return x.ServerStream.SendMsg(m)
}
+func _VolumeServer_ReadNeedleBlob_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(ReadNeedleBlobRequest)
+ if err := dec(in); err != nil {
+ return nil, err
+ }
+ if interceptor == nil {
+ return srv.(VolumeServerServer).ReadNeedleBlob(ctx, in)
+ }
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: "/volume_server_pb.VolumeServer/ReadNeedleBlob",
+ }
+ handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+ return srv.(VolumeServerServer).ReadNeedleBlob(ctx, req.(*ReadNeedleBlobRequest))
+ }
+ return interceptor(ctx, in, info, handler)
+}
+
+func _VolumeServer_WriteNeedleBlob_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(WriteNeedleBlobRequest)
+ if err := dec(in); err != nil {
+ return nil, err
+ }
+ if interceptor == nil {
+ return srv.(VolumeServerServer).WriteNeedleBlob(ctx, in)
+ }
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: "/volume_server_pb.VolumeServer/WriteNeedleBlob",
+ }
+ handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+ return srv.(VolumeServerServer).WriteNeedleBlob(ctx, req.(*WriteNeedleBlobRequest))
+ }
+ return interceptor(ctx, in, info, handler)
+}
+
func _VolumeServer_VolumeTailSender_Handler(srv interface{}, stream grpc.ServerStream) error {
m := new(VolumeTailSenderRequest)
if err := stream.RecvMsg(m); err != nil {
@@ -3168,6 +8612,42 @@ func (x *volumeServerVolumeTierMoveDatFromRemoteServer) Send(m *VolumeTierMoveDa
return x.ServerStream.SendMsg(m)
}
+func _VolumeServer_VolumeServerStatus_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(VolumeServerStatusRequest)
+ if err := dec(in); err != nil {
+ return nil, err
+ }
+ if interceptor == nil {
+ return srv.(VolumeServerServer).VolumeServerStatus(ctx, in)
+ }
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: "/volume_server_pb.VolumeServer/VolumeServerStatus",
+ }
+ handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+ return srv.(VolumeServerServer).VolumeServerStatus(ctx, req.(*VolumeServerStatusRequest))
+ }
+ return interceptor(ctx, in, info, handler)
+}
+
+func _VolumeServer_VolumeServerLeave_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(VolumeServerLeaveRequest)
+ if err := dec(in); err != nil {
+ return nil, err
+ }
+ if interceptor == nil {
+ return srv.(VolumeServerServer).VolumeServerLeave(ctx, in)
+ }
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: "/volume_server_pb.VolumeServer/VolumeServerLeave",
+ }
+ handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+ return srv.(VolumeServerServer).VolumeServerLeave(ctx, req.(*VolumeServerLeaveRequest))
+ }
+ return interceptor(ctx, in, info, handler)
+}
+
func _VolumeServer_Query_Handler(srv interface{}, stream grpc.ServerStream) error {
m := new(QueryRequest)
if err := stream.RecvMsg(m); err != nil {
@@ -3189,6 +8669,24 @@ func (x *volumeServerQueryServer) Send(m *QueriedStripe) error {
return x.ServerStream.SendMsg(m)
}
+func _VolumeServer_VolumeNeedleStatus_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(VolumeNeedleStatusRequest)
+ if err := dec(in); err != nil {
+ return nil, err
+ }
+ if interceptor == nil {
+ return srv.(VolumeServerServer).VolumeNeedleStatus(ctx, in)
+ }
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: "/volume_server_pb.VolumeServer/VolumeNeedleStatus",
+ }
+ handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+ return srv.(VolumeServerServer).VolumeNeedleStatus(ctx, req.(*VolumeNeedleStatusRequest))
+ }
+ return interceptor(ctx, in, info, handler)
+}
+
var _VolumeServer_serviceDesc = grpc.ServiceDesc{
ServiceName: "volume_server_pb.VolumeServer",
HandlerType: (*VolumeServerServer)(nil),
@@ -3241,6 +8739,18 @@ var _VolumeServer_serviceDesc = grpc.ServiceDesc{
MethodName: "VolumeMarkReadonly",
Handler: _VolumeServer_VolumeMarkReadonly_Handler,
},
+ {
+ MethodName: "VolumeMarkWritable",
+ Handler: _VolumeServer_VolumeMarkWritable_Handler,
+ },
+ {
+ MethodName: "VolumeConfigure",
+ Handler: _VolumeServer_VolumeConfigure_Handler,
+ },
+ {
+ MethodName: "VolumeStatus",
+ Handler: _VolumeServer_VolumeStatus_Handler,
+ },
{
MethodName: "VolumeCopy",
Handler: _VolumeServer_VolumeCopy_Handler,
@@ -3249,6 +8759,14 @@ var _VolumeServer_serviceDesc = grpc.ServiceDesc{
MethodName: "ReadVolumeFileStatus",
Handler: _VolumeServer_ReadVolumeFileStatus_Handler,
},
+ {
+ MethodName: "ReadNeedleBlob",
+ Handler: _VolumeServer_ReadNeedleBlob_Handler,
+ },
+ {
+ MethodName: "WriteNeedleBlob",
+ Handler: _VolumeServer_WriteNeedleBlob_Handler,
+ },
{
MethodName: "VolumeTailReceiver",
Handler: _VolumeServer_VolumeTailReceiver_Handler,
@@ -3285,6 +8803,18 @@ var _VolumeServer_serviceDesc = grpc.ServiceDesc{
MethodName: "VolumeEcShardsToVolume",
Handler: _VolumeServer_VolumeEcShardsToVolume_Handler,
},
+ {
+ MethodName: "VolumeServerStatus",
+ Handler: _VolumeServer_VolumeServerStatus_Handler,
+ },
+ {
+ MethodName: "VolumeServerLeave",
+ Handler: _VolumeServer_VolumeServerLeave_Handler,
+ },
+ {
+ MethodName: "VolumeNeedleStatus",
+ Handler: _VolumeServer_VolumeNeedleStatus_Handler,
+ },
},
Streams: []grpc.StreamDesc{
{
@@ -3325,191 +8855,3 @@ var _VolumeServer_serviceDesc = grpc.ServiceDesc{
},
Metadata: "volume_server.proto",
}
-
-func init() { proto.RegisterFile("volume_server.proto", fileDescriptor0) }
-
-var fileDescriptor0 = []byte{
- // 2905 bytes of a gzipped FileDescriptorProto
- 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0xd4, 0x3a, 0x4d, 0x73, 0xdc, 0xc6,
- 0xb1, 0x5c, 0x2e, 0x3f, 0x76, 0x7b, 0x49, 0x91, 0x1a, 0xd2, 0xd4, 0x1a, 0xa2, 0x24, 0x1a, 0xf2,
- 0x87, 0x24, 0x5b, 0x94, 0x4c, 0xdb, 0xcf, 0x7e, 0xf6, 0xb3, 0xdf, 0x93, 0x28, 0xe9, 0x45, 0xb1,
- 0x45, 0xd9, 0xa0, 0xac, 0x38, 0xb6, 0x2b, 0xa8, 0x21, 0x30, 0x2b, 0xc2, 0x04, 0x30, 0x10, 0x30,
- 0x4b, 0x6b, 0x55, 0xc9, 0xc9, 0x39, 0xa4, 0x2a, 0x95, 0x1c, 0x52, 0xb9, 0xe4, 0x9c, 0x7b, 0xae,
- 0xf9, 0x03, 0x39, 0xf8, 0x0f, 0xa4, 0x2a, 0xa7, 0x5c, 0x72, 0xce, 0x21, 0xb7, 0x54, 0xe5, 0x92,
- 0x9a, 0x2f, 0x2c, 0x3e, 0xb9, 0xa0, 0xc5, 0x54, 0x2a, 0xb7, 0x41, 0x4f, 0x7f, 0x4c, 0xf7, 0x74,
- 0xf7, 0x4c, 0x4f, 0x03, 0x56, 0x0e, 0xa9, 0x3f, 0x0c, 0x88, 0x9d, 0x90, 0xf8, 0x90, 0xc4, 0x9b,
- 0x51, 0x4c, 0x19, 0x45, 0xcb, 0x39, 0xa0, 0x1d, 0xed, 0x99, 0xd7, 0x00, 0xdd, 0xc4, 0xcc, 0xd9,
- 0xbf, 0x45, 0x7c, 0xc2, 0x88, 0x45, 0x1e, 0x0f, 0x49, 0xc2, 0xd0, 0xf3, 0xd0, 0x19, 0x78, 0x3e,
- 0xb1, 0x3d, 0x37, 0xe9, 0xb7, 0x36, 0xda, 0x97, 0xba, 0xd6, 0x3c, 0xff, 0xbe, 0xeb, 0x26, 0xe6,
- 0x7d, 0x58, 0xc9, 0x11, 0x24, 0x11, 0x0d, 0x13, 0x82, 0xde, 0x81, 0xf9, 0x98, 0x24, 0x43, 0x9f,
- 0x49, 0x82, 0xde, 0xd6, 0xf9, 0xcd, 0xa2, 0xac, 0xcd, 0x94, 0x64, 0xe8, 0x33, 0x4b, 0xa3, 0x9b,
- 0xdf, 0xb4, 0x60, 0x21, 0x3b, 0x83, 0xce, 0xc0, 0xbc, 0x12, 0xde, 0x6f, 0x6d, 0xb4, 0x2e, 0x75,
- 0xad, 0x39, 0x29, 0x1b, 0xad, 0xc1, 0x5c, 0xc2, 0x30, 0x1b, 0x26, 0xfd, 0xe9, 0x8d, 0xd6, 0xa5,
- 0x59, 0x4b, 0x7d, 0xa1, 0x55, 0x98, 0x25, 0x71, 0x4c, 0xe3, 0x7e, 0x5b, 0xa0, 0xcb, 0x0f, 0x84,
- 0x60, 0x26, 0xf1, 0x9e, 0x92, 0xfe, 0xcc, 0x46, 0xeb, 0xd2, 0xa2, 0x25, 0xc6, 0xa8, 0x0f, 0xf3,
- 0x87, 0x24, 0x4e, 0x3c, 0x1a, 0xf6, 0x67, 0x05, 0x58, 0x7f, 0x9a, 0xf3, 0x30, 0x7b, 0x3b, 0x88,
- 0xd8, 0xc8, 0x7c, 0x1b, 0xfa, 0x0f, 0xb1, 0x33, 0x1c, 0x06, 0x0f, 0xc5, 0xf2, 0xb7, 0xf7, 0x89,
- 0x73, 0xa0, 0xcd, 0x72, 0x16, 0xba, 0x4a, 0x29, 0xb5, 0xb6, 0x45, 0xab, 0x23, 0x01, 0x77, 0x5d,
- 0xf3, 0xff, 0xe0, 0xf9, 0x0a, 0x42, 0x65, 0x9e, 0x8b, 0xb0, 0xf8, 0x08, 0xc7, 0x7b, 0xf8, 0x11,
- 0xb1, 0x63, 0xcc, 0x3c, 0x2a, 0xa8, 0x5b, 0xd6, 0x82, 0x02, 0x5a, 0x1c, 0x66, 0x7e, 0x01, 0x46,
- 0x8e, 0x03, 0x0d, 0x22, 0xec, 0xb0, 0x26, 0xc2, 0xd1, 0x06, 0xf4, 0xa2, 0x98, 0x60, 0xdf, 0xa7,
- 0x0e, 0x66, 0x44, 0xd8, 0xa7, 0x6d, 0x65, 0x41, 0xe6, 0x39, 0x38, 0x5b, 0xc9, 0x5c, 0x2e, 0xd0,
- 0x7c, 0xa7, 0xb0, 0x7a, 0x1a, 0x04, 0x5e, 0x23, 0xd1, 0xe6, 0x7a, 0x69, 0xd5, 0x82, 0x52, 0xf1,
- 0xfd, 0xef, 0xc2, 0xac, 0x4f, 0x70, 0x38, 0x8c, 0x1a, 0x31, 0x2e, 0xae, 0x58, 0x93, 0xa6, 0x9c,
- 0xcf, 0x48, 0xb7, 0xd9, 0xa6, 0xbe, 0x4f, 0x1c, 0xe6, 0xd1, 0x50, 0xb3, 0x3d, 0x0f, 0xe0, 0xa4,
- 0x40, 0xe5, 0x44, 0x19, 0x88, 0x69, 0x40, 0xbf, 0x4c, 0xaa, 0xd8, 0xfe, 0xb9, 0x05, 0xcf, 0xdd,
- 0x50, 0x46, 0x93, 0x82, 0x1b, 0x6d, 0x40, 0x5e, 0xe4, 0x74, 0x51, 0x64, 0x71, 0x83, 0xda, 0xa5,
- 0x0d, 0xe2, 0x18, 0x31, 0x89, 0x7c, 0xcf, 0xc1, 0x82, 0xc5, 0x8c, 0x60, 0x91, 0x05, 0xa1, 0x65,
- 0x68, 0x33, 0xe6, 0x0b, 0xcf, 0xed, 0x5a, 0x7c, 0x88, 0xb6, 0x60, 0x2d, 0x20, 0x01, 0x8d, 0x47,
- 0x76, 0x80, 0x23, 0x3b, 0xc0, 0x4f, 0x6c, 0xee, 0xe6, 0x76, 0xb0, 0xd7, 0x9f, 0x13, 0xeb, 0x43,
- 0x72, 0xf6, 0x1e, 0x8e, 0xee, 0xe1, 0x27, 0xbb, 0xde, 0x53, 0x72, 0x6f, 0xcf, 0xec, 0xc3, 0x5a,
- 0x51, 0x3f, 0xa5, 0xfa, 0x7f, 0xc1, 0x19, 0x09, 0xd9, 0x1d, 0x85, 0xce, 0xae, 0x88, 0xad, 0x46,
- 0x1b, 0xf5, 0x8f, 0x16, 0xf4, 0xcb, 0x84, 0xca, 0xf3, 0x9f, 0xd5, 0x6a, 0xc7, 0xb6, 0xc9, 0x05,
- 0xe8, 0x31, 0xec, 0xf9, 0x36, 0x1d, 0x0c, 0x12, 0xc2, 0x84, 0x21, 0x66, 0x2c, 0xe0, 0xa0, 0xfb,
- 0x02, 0x82, 0x2e, 0xc3, 0xb2, 0x23, 0xbd, 0xdf, 0x8e, 0xc9, 0xa1, 0x27, 0xb2, 0xc1, 0xbc, 0x58,
- 0xd8, 0x92, 0xa3, 0xa3, 0x42, 0x82, 0x91, 0x09, 0x8b, 0x9e, 0xfb, 0xc4, 0x16, 0xe9, 0x48, 0x24,
- 0x93, 0x8e, 0xe0, 0xd6, 0xf3, 0xdc, 0x27, 0x77, 0x3c, 0x9f, 0x70, 0x8b, 0x9a, 0x0f, 0x61, 0x5d,
- 0x2a, 0x7f, 0x37, 0x74, 0x62, 0x12, 0x90, 0x90, 0x61, 0x7f, 0x9b, 0x46, 0xa3, 0x46, 0x6e, 0xf3,
- 0x3c, 0x74, 0x12, 0x2f, 0x74, 0x88, 0x1d, 0xca, 0xa4, 0x36, 0x63, 0xcd, 0x8b, 0xef, 0x9d, 0xc4,
- 0xbc, 0x09, 0xe7, 0x6a, 0xf8, 0x2a, 0xcb, 0xbe, 0x00, 0x0b, 0x62, 0x61, 0x0e, 0x0d, 0x19, 0x09,
- 0x99, 0xe0, 0xbd, 0x60, 0xf5, 0x38, 0x6c, 0x5b, 0x82, 0xcc, 0xd7, 0x01, 0x49, 0x1e, 0xf7, 0xe8,
- 0x30, 0x6c, 0x16, 0xce, 0xcf, 0xc1, 0x4a, 0x8e, 0x44, 0xf9, 0xc6, 0x1b, 0xb0, 0x2a, 0xc1, 0x9f,
- 0x86, 0x41, 0x63, 0x5e, 0x67, 0xe0, 0xb9, 0x02, 0x91, 0xe2, 0xb6, 0xa5, 0x85, 0xe4, 0x8f, 0x9d,
- 0x23, 0x99, 0xad, 0xe9, 0x15, 0xe4, 0x4f, 0x1e, 0x91, 0xb9, 0xe4, 0x82, 0x71, 0x7c, 0x60, 0x11,
- 0xec, 0xd2, 0xd0, 0x1f, 0x35, 0xce, 0x5c, 0x15, 0x94, 0x8a, 0xef, 0xef, 0x5a, 0x70, 0x5a, 0xa7,
- 0xb4, 0x86, 0xbb, 0x79, 0x4c, 0x77, 0x6e, 0xd7, 0xba, 0xf3, 0xcc, 0xd8, 0x9d, 0x2f, 0xc1, 0x72,
- 0x42, 0x87, 0xb1, 0x43, 0x6c, 0x17, 0x33, 0x6c, 0x87, 0xd4, 0x25, 0xca, 0xdb, 0x4f, 0x49, 0xf8,
- 0x2d, 0xcc, 0xf0, 0x0e, 0x75, 0x89, 0xf9, 0xbf, 0x7a, 0xb3, 0x73, 0x5e, 0x72, 0x19, 0x4e, 0xfb,
- 0x38, 0x61, 0x36, 0x8e, 0x22, 0x12, 0xba, 0x36, 0x66, 0xdc, 0xd5, 0x5a, 0xc2, 0xd5, 0x4e, 0xf1,
- 0x89, 0x1b, 0x02, 0x7e, 0x83, 0xed, 0x24, 0xe6, 0xaf, 0xa7, 0x61, 0x89, 0xd3, 0x72, 0xd7, 0x6e,
- 0xa4, 0xef, 0x32, 0xb4, 0xc9, 0x13, 0xa6, 0x14, 0xe5, 0x43, 0x74, 0x0d, 0x56, 0x54, 0x0c, 0x79,
- 0x34, 0x1c, 0x87, 0x57, 0x5b, 0x66, 0xa3, 0xf1, 0x54, 0x1a, 0x61, 0x17, 0xa0, 0x97, 0x30, 0x1a,
- 0xe9, 0x68, 0x9d, 0x91, 0xd1, 0xca, 0x41, 0x2a, 0x5a, 0xf3, 0x36, 0x9d, 0xad, 0xb0, 0xe9, 0x82,
- 0x97, 0xd8, 0xc4, 0xb1, 0xe5, 0xaa, 0x44, 0xbc, 0x77, 0x2c, 0xf0, 0x92, 0xdb, 0x8e, 0xb4, 0x06,
- 0xfa, 0x00, 0xd6, 0xbd, 0x47, 0x21, 0x8d, 0x89, 0xad, 0x0c, 0x29, 0xa2, 0x26, 0xa4, 0xcc, 0x1e,
- 0xd0, 0x61, 0xe8, 0x8a, 0xd8, 0xef, 0x58, 0x7d, 0x89, 0xb3, 0x2b, 0x50, 0xb8, 0x05, 0x76, 0x28,
- 0xbb, 0xc3, 0xe7, 0xcd, 0xb7, 0x60, 0x79, 0x6c, 0x95, 0xe6, 0xb1, 0xf7, 0x4d, 0x4b, 0xa7, 0xd3,
- 0x07, 0xd8, 0xf3, 0x77, 0x49, 0xe8, 0x92, 0xf8, 0x19, 0x73, 0x02, 0xba, 0x0e, 0xab, 0x9e, 0xeb,
- 0x13, 0x9b, 0x79, 0x01, 0xa1, 0x43, 0x66, 0x27, 0xc4, 0xa1, 0xa1, 0x9b, 0x68, 0xfb, 0xf2, 0xb9,
- 0x07, 0x72, 0x6a, 0x57, 0xce, 0x98, 0x3f, 0x4d, 0x73, 0x73, 0x76, 0x15, 0xe3, 0x5b, 0x49, 0x48,
- 0x08, 0x67, 0xb8, 0x4f, 0xb0, 0x4b, 0x62, 0xa5, 0xc6, 0x82, 0x04, 0x7e, 0x4f, 0xc0, 0xf8, 0x0e,
- 0x29, 0xa4, 0x3d, 0xea, 0x8e, 0xc4, 0x8a, 0x16, 0x2c, 0x90, 0xa0, 0x9b, 0xd4, 0x1d, 0x89, 0x24,
- 0x99, 0xd8, 0xc2, 0xc9, 0x9c, 0xfd, 0x61, 0x78, 0x20, 0x56, 0xd3, 0xb1, 0x7a, 0x5e, 0xf2, 0x11,
- 0x4e, 0xd8, 0x36, 0x07, 0x99, 0xbf, 0x6f, 0xe9, 0x28, 0xe5, 0xcb, 0xb0, 0x88, 0x43, 0xbc, 0xc3,
- 0x7f, 0x83, 0x39, 0x38, 0x85, 0x72, 0x82, 0xdc, 0xed, 0x54, 0x05, 0x1c, 0x92, 0x73, 0xea, 0x2c,
- 0x13, 0x33, 0xe3, 0x24, 0x91, 0x5f, 0xb8, 0x4a, 0x12, 0x5f, 0xea, 0x24, 0x7d, 0xdb, 0xd9, 0xdd,
- 0xc7, 0xb1, 0x9b, 0xfc, 0x3f, 0x09, 0x49, 0x8c, 0xd9, 0x89, 0x5c, 0x1a, 0xcc, 0x0d, 0x38, 0x5f,
- 0xc7, 0x5d, 0xc9, 0xff, 0x42, 0x1f, 0x3e, 0x1a, 0xc3, 0x22, 0x7b, 0x43, 0xcf, 0x77, 0x4f, 0x44,
- 0xfc, 0x87, 0x45, 0xe5, 0x52, 0xe6, 0xca, 0x7f, 0xae, 0xc0, 0xe9, 0x58, 0x80, 0x98, 0x9d, 0x70,
- 0x84, 0xb4, 0x5e, 0x58, 0xb4, 0x96, 0xd4, 0x84, 0x20, 0xe4, 0x75, 0xc3, 0xcf, 0xa7, 0xb5, 0x07,
- 0x68, 0x6e, 0x27, 0x96, 0x56, 0xcf, 0x42, 0x77, 0x2c, 0xbe, 0x2d, 0xc4, 0x77, 0x12, 0x25, 0x97,
- 0x7b, 0xa7, 0x43, 0xa3, 0x91, 0x4d, 0x1c, 0x79, 0x8e, 0x8b, 0xad, 0xee, 0x58, 0x3d, 0x0e, 0xbc,
- 0xed, 0x88, 0x63, 0xbc, 0x79, 0x8e, 0xcd, 0x70, 0xfb, 0x4a, 0x72, 0x9b, 0xcb, 0x72, 0xfb, 0x4a,
- 0x70, 0xd3, 0x38, 0x87, 0xde, 0x40, 0xe2, 0xcc, 0x8f, 0x71, 0x1e, 0x7a, 0x03, 0x8e, 0x33, 0xf6,
- 0xaa, 0xbc, 0x31, 0xd4, 0xae, 0x7e, 0x0d, 0x67, 0xf3, 0xb3, 0xcd, 0x8f, 0xc9, 0x67, 0x32, 0x96,
- 0x79, 0xbe, 0xe8, 0x4e, 0x85, 0xb3, 0xf6, 0xb0, 0xb8, 0xec, 0xc6, 0xf7, 0x8a, 0x67, 0x5b, 0xd7,
- 0xb9, 0xa2, 0x41, 0xf2, 0x97, 0x93, 0xcf, 0x8a, 0xcb, 0x3e, 0xc6, 0x25, 0xe5, 0x68, 0xc1, 0x17,
- 0x8a, 0x21, 0x50, 0xbc, 0xc9, 0xfc, 0x26, 0xcd, 0xaf, 0x0a, 0x83, 0xdf, 0x23, 0x1a, 0xe7, 0x35,
- 0x25, 0x57, 0x98, 0x63, 0xd1, 0x9a, 0x57, 0x62, 0x79, 0xa1, 0xab, 0xce, 0x43, 0x59, 0x27, 0xa8,
- 0xaf, 0x5c, 0x49, 0xdb, 0x56, 0x25, 0xad, 0x2e, 0xd5, 0x0f, 0xc8, 0x48, 0xf8, 0xec, 0x8c, 0x2c,
- 0xd5, 0x3f, 0x24, 0x23, 0x73, 0xa7, 0x10, 0x71, 0x72, 0x69, 0x2a, 0x76, 0x11, 0xcc, 0x70, 0x67,
- 0x57, 0x29, 0x5f, 0x8c, 0xd1, 0x39, 0x00, 0x2f, 0xb1, 0x5d, 0xb1, 0xe7, 0x72, 0x51, 0x1d, 0xab,
- 0xeb, 0x29, 0x27, 0x70, 0xcd, 0x5f, 0xb4, 0xc6, 0x0c, 0x6f, 0xfa, 0x74, 0xef, 0x04, 0xbd, 0x32,
- 0xab, 0x45, 0x3b, 0xa7, 0x45, 0xb6, 0x66, 0x9f, 0xc9, 0xd7, 0xec, 0x99, 0x20, 0xca, 0x2e, 0xa7,
- 0x2e, 0x35, 0x3f, 0xa0, 0x27, 0x57, 0xcf, 0x95, 0x53, 0xf3, 0x98, 0xbb, 0x92, 0xff, 0x2e, 0x9c,
- 0xe5, 0x06, 0x97, 0x50, 0x51, 0x2d, 0x34, 0xaf, 0xa8, 0xfe, 0x3a, 0x0d, 0xeb, 0xd5, 0xc4, 0x4d,
- 0xaa, 0xaa, 0xf7, 0xc0, 0x48, 0xab, 0x16, 0x7e, 0x34, 0x26, 0x0c, 0x07, 0x51, 0x7a, 0x38, 0xca,
- 0x33, 0xf4, 0x8c, 0x2a, 0x61, 0x1e, 0xe8, 0x79, 0x7d, 0x42, 0x96, 0x4a, 0x9e, 0x76, 0xa9, 0xe4,
- 0xe1, 0x02, 0x5c, 0xcc, 0xea, 0x04, 0xc8, 0x3b, 0xdc, 0x19, 0x17, 0xb3, 0x3a, 0x01, 0x29, 0xb1,
- 0x10, 0x20, 0xbd, 0xb6, 0xa7, 0xf0, 0x85, 0x80, 0x73, 0x00, 0xea, 0x7a, 0x35, 0x0c, 0x75, 0x09,
- 0xd7, 0x95, 0x97, 0xab, 0x61, 0x58, 0x7b, 0xcb, 0x9c, 0xaf, 0xbd, 0x65, 0xe6, 0x77, 0xb3, 0x53,
- 0xda, 0xcd, 0xcf, 0x00, 0x6e, 0x79, 0xc9, 0x81, 0x34, 0x32, 0xbf, 0xd6, 0xba, 0x5e, 0xac, 0xde,
- 0x0d, 0xf8, 0x90, 0x43, 0xb0, 0xef, 0x2b, 0xd3, 0xf1, 0x21, 0x0f, 0x9f, 0x61, 0x42, 0x5c, 0x65,
- 0x1d, 0x31, 0xe6, 0xb0, 0x41, 0x4c, 0x88, 0x32, 0x80, 0x18, 0x9b, 0xbf, 0x6d, 0x41, 0xf7, 0x1e,
- 0x09, 0x14, 0xe7, 0xf3, 0x00, 0x8f, 0x68, 0x4c, 0x87, 0xcc, 0x0b, 0x89, 0xbc, 0x85, 0xcf, 0x5a,
- 0x19, 0xc8, 0x77, 0x97, 0x23, 0x52, 0x03, 0xf1, 0x07, 0xca, 0x98, 0x62, 0xcc, 0x61, 0xfb, 0x04,
- 0x47, 0xca, 0x7e, 0x62, 0x8c, 0x56, 0x61, 0x36, 0x61, 0xd8, 0x39, 0x10, 0xc6, 0x9a, 0xb1, 0xe4,
- 0x87, 0xf9, 0xa7, 0x16, 0x80, 0x45, 0x02, 0xca, 0x84, 0xaf, 0xf1, 0xdb, 0xed, 0x1e, 0x76, 0x0e,
- 0x78, 0xbd, 0xc0, 0x46, 0x11, 0x51, 0x96, 0xe8, 0x29, 0xd8, 0x83, 0x51, 0x24, 0x76, 0x48, 0xa3,
- 0xa8, 0xfc, 0xd5, 0xb5, 0xba, 0x0a, 0x22, 0x2b, 0x03, 0x1d, 0xca, 0x5d, 0x8b, 0x0f, 0x33, 0x39,
- 0x4d, 0x2e, 0x5b, 0xe7, 0xb4, 0xb3, 0xd0, 0x2d, 0xba, 0x82, 0x48, 0x05, 0xc2, 0x0f, 0x2e, 0xc2,
- 0x62, 0x40, 0x5d, 0x6f, 0xe0, 0x11, 0x57, 0x38, 0x9a, 0x52, 0x65, 0x41, 0x03, 0xb9, 0x73, 0xa1,
- 0x75, 0xe8, 0x92, 0x27, 0x8c, 0x84, 0xa9, 0x0f, 0x74, 0xad, 0x31, 0xc0, 0xfc, 0x1c, 0x40, 0x97,
- 0xd1, 0x03, 0x8a, 0xb6, 0x60, 0x96, 0x33, 0xd7, 0x8f, 0x94, 0xeb, 0xe5, 0x47, 0xca, 0xb1, 0x19,
- 0x2c, 0x89, 0x9a, 0x4d, 0x40, 0xd3, 0xf9, 0x04, 0xf4, 0x6d, 0x0b, 0x36, 0xd4, 0xe5, 0xd0, 0x23,
- 0xf1, 0x3d, 0x7a, 0xc8, 0x2f, 0x0a, 0x0f, 0xa8, 0x64, 0x71, 0x22, 0x79, 0xf1, 0x1d, 0xe8, 0xbb,
- 0x24, 0x61, 0x5e, 0x28, 0xca, 0x43, 0x5b, 0x9b, 0x3c, 0xc4, 0x01, 0x51, 0xc6, 0x5d, 0xcb, 0xcc,
- 0xdf, 0x94, 0xd3, 0x3b, 0x38, 0x20, 0xe8, 0x2a, 0xac, 0x1c, 0x10, 0x12, 0xd9, 0x3e, 0x75, 0xb0,
- 0x6f, 0xeb, 0x88, 0x53, 0xb7, 0x9f, 0x65, 0x3e, 0xf5, 0x11, 0x9f, 0xb9, 0x25, 0xa3, 0xce, 0x4c,
- 0xe0, 0x85, 0x23, 0x34, 0x51, 0x59, 0x67, 0x1d, 0xba, 0x51, 0x4c, 0x1d, 0x92, 0x70, 0x8f, 0x6c,
- 0x89, 0x43, 0x68, 0x0c, 0x40, 0xd7, 0x61, 0x25, 0xfd, 0xf8, 0x98, 0xc4, 0x0e, 0x09, 0x19, 0x7e,
- 0x24, 0xdf, 0x22, 0xa7, 0xad, 0xaa, 0x29, 0xf3, 0x57, 0x2d, 0x30, 0x4b, 0x52, 0xef, 0xc4, 0x34,
- 0x38, 0x41, 0x0b, 0x5e, 0x83, 0x55, 0x61, 0x87, 0x58, 0xb0, 0x1c, 0x1b, 0x42, 0x16, 0x29, 0xa7,
- 0xf9, 0x9c, 0x94, 0xa6, 0x2d, 0x31, 0x84, 0x8b, 0x47, 0xae, 0xe9, 0x5f, 0x64, 0x8b, 0xbf, 0x2f,
- 0xc0, 0xc2, 0x27, 0x43, 0x12, 0x8f, 0x32, 0x8f, 0x98, 0x09, 0x51, 0x5a, 0xe8, 0x57, 0xf8, 0x0c,
- 0x84, 0xe7, 0xd1, 0x41, 0x4c, 0x03, 0x3b, 0x7d, 0xa8, 0x9f, 0x16, 0x28, 0x3d, 0x0e, 0xbc, 0x23,
- 0x1f, 0xeb, 0xd1, 0xfb, 0x30, 0x37, 0xf0, 0x7c, 0x46, 0xe4, 0xd3, 0x78, 0x6f, 0xeb, 0xa5, 0xb2,
- 0xbf, 0x67, 0x65, 0x6e, 0xde, 0x11, 0xc8, 0x96, 0x22, 0x42, 0x7b, 0xb0, 0xe2, 0x85, 0x91, 0x28,
- 0xac, 0x62, 0x0f, 0xfb, 0xde, 0xd3, 0xf1, 0x33, 0x5c, 0x6f, 0xeb, 0xf5, 0x09, 0xbc, 0xee, 0x72,
- 0xca, 0xdd, 0x2c, 0xa1, 0x85, 0xbc, 0x12, 0x0c, 0x11, 0x58, 0xa5, 0x43, 0x56, 0x16, 0x32, 0x2b,
- 0x84, 0x6c, 0x4d, 0x10, 0x72, 0x5f, 0x90, 0xe6, 0xa5, 0xac, 0xd0, 0x32, 0xd0, 0xd8, 0x81, 0x39,
- 0xa9, 0x1c, 0xcf, 0x80, 0x03, 0x8f, 0xf8, 0xba, 0xb9, 0x20, 0x3f, 0x78, 0x90, 0xd3, 0x88, 0xc4,
- 0x38, 0xd4, 0xc9, 0x4c, 0x7f, 0x72, 0xfc, 0x43, 0xec, 0x0f, 0x75, 0xbc, 0xc9, 0x0f, 0xe3, 0x8f,
- 0xb3, 0x80, 0xca, 0x1a, 0xea, 0xb7, 0xc5, 0x98, 0x24, 0x3c, 0x41, 0x64, 0xb3, 0xe7, 0x52, 0x06,
- 0x2e, 0x32, 0xe8, 0x0f, 0xa0, 0xeb, 0x24, 0x87, 0xb6, 0x30, 0x89, 0x90, 0xd9, 0xdb, 0x7a, 0xf7,
- 0xd8, 0x26, 0xdd, 0xdc, 0xde, 0x7d, 0x28, 0xa0, 0x56, 0xc7, 0x49, 0x0e, 0xc5, 0x08, 0x7d, 0x0e,
- 0xf0, 0x55, 0x42, 0x43, 0xc5, 0x59, 0x6e, 0xfc, 0x7b, 0xc7, 0xe7, 0xfc, 0xfd, 0xdd, 0xfb, 0x3b,
- 0x92, 0x75, 0x97, 0xb3, 0x93, 0xbc, 0x1d, 0x58, 0x8c, 0x70, 0xfc, 0x78, 0x48, 0x98, 0x62, 0x2f,
- 0x7d, 0xe1, 0x83, 0xe3, 0xb3, 0xff, 0x58, 0xb2, 0x91, 0x12, 0x16, 0xa2, 0xcc, 0x97, 0xf1, 0xed,
- 0x34, 0x74, 0xb4, 0x5e, 0xbc, 0x36, 0x13, 0x1e, 0x2e, 0x5f, 0x28, 0x6c, 0x2f, 0x1c, 0x50, 0x65,
- 0xd1, 0x53, 0x1c, 0x2e, 0x1f, 0x29, 0x44, 0x6e, 0xbf, 0x0c, 0xcb, 0x31, 0x71, 0x68, 0xec, 0xf2,
- 0x1b, 0xac, 0x17, 0x78, 0xdc, 0xed, 0xe5, 0x5e, 0x2e, 0x49, 0xf8, 0x2d, 0x0d, 0x46, 0xaf, 0xc0,
- 0x92, 0xd8, 0xf6, 0x0c, 0x66, 0x5b, 0xf3, 0x24, 0x7e, 0x06, 0xf1, 0x32, 0x2c, 0x3f, 0x1e, 0xf2,
- 0xbc, 0xe1, 0xec, 0xe3, 0x18, 0x3b, 0x8c, 0xa6, 0x6f, 0x05, 0x4b, 0x02, 0xbe, 0x9d, 0x82, 0xd1,
- 0x9b, 0xb0, 0x26, 0x51, 0x49, 0xe2, 0xe0, 0x28, 0xa5, 0x20, 0xb1, 0x2a, 0x25, 0x57, 0xc5, 0xec,
- 0x6d, 0x31, 0xb9, 0xad, 0xe7, 0x90, 0x01, 0x1d, 0x87, 0x06, 0x01, 0x09, 0x59, 0x22, 0x0e, 0xb7,
- 0xae, 0x95, 0x7e, 0xa3, 0x1b, 0x70, 0x0e, 0xfb, 0x3e, 0xfd, 0xda, 0x16, 0x94, 0xae, 0x5d, 0xd2,
- 0x4e, 0x16, 0x96, 0x86, 0x40, 0xfa, 0x44, 0xe0, 0x58, 0x79, 0x45, 0x8d, 0x0b, 0xd0, 0x4d, 0xf7,
- 0x91, 0xdf, 0x07, 0x32, 0x0e, 0x29, 0xc6, 0xc6, 0x29, 0x58, 0xc8, 0xee, 0x84, 0xf1, 0xb7, 0x36,
- 0xac, 0x54, 0x04, 0x15, 0xfa, 0x02, 0x80, 0x7b, 0xab, 0x0c, 0x2d, 0xe5, 0xae, 0xff, 0x73, 0xfc,
- 0xe0, 0xe4, 0xfe, 0x2a, 0xc1, 0x16, 0xf7, 0x7e, 0x39, 0x44, 0x3f, 0x82, 0x9e, 0xf0, 0x58, 0xc5,
- 0x5d, 0xba, 0xec, 0xfb, 0xdf, 0x81, 0x3b, 0xd7, 0x55, 0xb1, 0x17, 0x31, 0x20, 0xc7, 0xc6, 0x5f,
- 0x5a, 0xd0, 0x4d, 0x05, 0xf3, 0xdb, 0x8d, 0xdc, 0x28, 0xb1, 0xd7, 0x89, 0xbe, 0xdd, 0x08, 0xd8,
- 0x1d, 0x01, 0xfa, 0x8f, 0x74, 0x25, 0xe3, 0x6d, 0x80, 0xb1, 0xfe, 0x95, 0x2a, 0xb4, 0x2a, 0x55,
- 0x30, 0x2f, 0xc3, 0x22, 0xb7, 0xac, 0x47, 0xdc, 0x5d, 0x16, 0x7b, 0x91, 0x68, 0x93, 0x4a, 0x9c,
- 0x44, 0x95, 0x87, 0xfa, 0x73, 0xeb, 0x0f, 0x06, 0x2c, 0x64, 0x9f, 0xc7, 0xd0, 0x97, 0xd0, 0xcb,
- 0xb4, 0x83, 0xd1, 0x8b, 0xe5, 0x4d, 0x2b, 0xb7, 0x97, 0x8d, 0x97, 0x26, 0x60, 0xa9, 0x0a, 0x6a,
- 0x0a, 0x85, 0x70, 0xba, 0xd4, 0x53, 0x45, 0x57, 0xca, 0xd4, 0x75, 0x1d, 0x5b, 0xe3, 0xd5, 0x46,
- 0xb8, 0xa9, 0x3c, 0x06, 0x2b, 0x15, 0x4d, 0x52, 0xf4, 0xda, 0x04, 0x2e, 0xb9, 0x46, 0xad, 0x71,
- 0xb5, 0x21, 0x76, 0x2a, 0xf5, 0x31, 0xa0, 0x72, 0x07, 0x15, 0xbd, 0x3a, 0x91, 0xcd, 0xb8, 0x43,
- 0x6b, 0xbc, 0xd6, 0x0c, 0xb9, 0x56, 0x51, 0xd9, 0x5b, 0x9d, 0xa8, 0x68, 0xae, 0x7b, 0x3b, 0x51,
- 0xd1, 0x42, 0xc3, 0x76, 0x0a, 0x1d, 0xc0, 0x72, 0xb1, 0xef, 0x8a, 0x2e, 0xd7, 0xfd, 0x27, 0x50,
- 0x6a, 0xeb, 0x1a, 0x57, 0x9a, 0xa0, 0xa6, 0xc2, 0x08, 0x9c, 0xca, 0xf7, 0x39, 0xd1, 0x2b, 0x65,
- 0xfa, 0xca, 0x4e, 0xaf, 0x71, 0x69, 0x32, 0x62, 0x56, 0xa7, 0x62, 0xef, 0xb3, 0x4a, 0xa7, 0x9a,
- 0xc6, 0x6a, 0x95, 0x4e, 0x75, 0xad, 0x54, 0x73, 0x0a, 0xfd, 0x58, 0x37, 0xd4, 0x0a, 0x3d, 0x41,
- 0xb4, 0x59, 0xc7, 0xa6, 0xba, 0x29, 0x69, 0x5c, 0x6b, 0x8c, 0xaf, 0x65, 0x5f, 0x6f, 0xf1, 0x58,
- 0xcf, 0xb4, 0x06, 0xab, 0x62, 0xbd, 0xdc, 0x6c, 0xac, 0x8a, 0xf5, 0xaa, 0xfe, 0xe2, 0x14, 0xda,
- 0x83, 0xc5, 0x5c, 0xb3, 0x10, 0xbd, 0x5c, 0x47, 0x99, 0x7f, 0xdd, 0x33, 0x5e, 0x99, 0x88, 0x97,
- 0xca, 0xb0, 0x75, 0xf6, 0x52, 0xe9, 0xaa, 0x76, 0x71, 0xf9, 0x7c, 0xf5, 0xf2, 0x24, 0xb4, 0x5c,
- 0x28, 0x97, 0x5a, 0x8a, 0x95, 0xa1, 0x5c, 0xd7, 0xb2, 0xac, 0x0c, 0xe5, 0xfa, 0x2e, 0xe5, 0x14,
- 0xfa, 0xa1, 0x2e, 0x70, 0x85, 0x23, 0x5c, 0xac, 0xa3, 0xce, 0xee, 0xfe, 0x8b, 0x47, 0x23, 0xa5,
- 0xac, 0xbf, 0x86, 0xd5, 0xaa, 0x57, 0x28, 0x74, 0xb5, 0xaa, 0x6c, 0xae, 0x7d, 0xea, 0x32, 0x36,
- 0x9b, 0xa2, 0xa7, 0x82, 0x3f, 0x85, 0x8e, 0x6e, 0xb9, 0xa1, 0x17, 0xca, 0xd4, 0x85, 0x26, 0xa5,
- 0x61, 0x1e, 0x85, 0x92, 0x71, 0xe0, 0x40, 0xc7, 0xea, 0xb8, 0x17, 0x56, 0x1f, 0xab, 0xa5, 0xae,
- 0x5d, 0x7d, 0xac, 0x96, 0x5b, 0x6b, 0x42, 0x5c, 0xea, 0x0c, 0xd9, 0xd6, 0x51, 0xbd, 0x33, 0x54,
- 0x74, 0xc6, 0xea, 0x9d, 0xa1, 0xb2, 0x1b, 0x35, 0x85, 0x7e, 0x02, 0x6b, 0xd5, 0x1d, 0x23, 0x54,
- 0x1b, 0xf1, 0x35, 0x9d, 0x2b, 0xe3, 0x7a, 0x73, 0x82, 0x54, 0xfc, 0x53, 0x9d, 0x9f, 0x0a, 0x1d,
- 0xa3, 0xfa, 0xfc, 0x54, 0xdd, 0xb7, 0x32, 0xae, 0x35, 0xc6, 0x2f, 0x87, 0x5e, 0xb6, 0xa5, 0x52,
- 0x6f, 0xed, 0x8a, 0x2e, 0x54, 0xbd, 0xb5, 0x2b, 0xbb, 0x34, 0x22, 0x3e, 0xaa, 0xda, 0x25, 0x55,
- 0xf1, 0x71, 0x44, 0x3f, 0xc7, 0xd8, 0x6c, 0x8a, 0x9e, 0x3b, 0xbe, 0xcb, 0xfd, 0x10, 0x34, 0x71,
- 0xfd, 0xb9, 0xcc, 0x7c, 0xb5, 0x21, 0x76, 0xfd, 0xee, 0xea, 0x4c, 0x3d, 0x51, 0x81, 0x42, 0xc6,
- 0xbe, 0xd6, 0x18, 0x3f, 0x95, 0x1d, 0xe9, 0x9f, 0x31, 0x32, 0xbd, 0x0c, 0x74, 0x65, 0x02, 0x9f,
- 0x4c, 0x2f, 0xc6, 0x78, 0xb5, 0x11, 0x6e, 0x55, 0xf4, 0x66, 0xbb, 0x0b, 0x47, 0xf9, 0x53, 0xa9,
- 0x25, 0x72, 0x94, 0x3f, 0x55, 0x34, 0x2c, 0x2a, 0xa2, 0x57, 0x37, 0x15, 0x26, 0x47, 0x6f, 0xa1,
- 0xb9, 0x31, 0x39, 0x7a, 0x4b, 0xfd, 0x8a, 0x29, 0xf4, 0xb3, 0x71, 0x93, 0xbe, 0xfc, 0x08, 0x88,
- 0xb6, 0x6a, 0x53, 0x51, 0xed, 0xdb, 0xa7, 0xf1, 0xc6, 0xb1, 0x68, 0x32, 0xc6, 0xff, 0x65, 0x4b,
- 0x77, 0xfc, 0x2a, 0x5f, 0xe1, 0xd0, 0x9b, 0x0d, 0x18, 0x97, 0x1e, 0x12, 0x8d, 0xb7, 0x8e, 0x49,
- 0x95, 0x59, 0xd0, 0x47, 0x30, 0x2b, 0xaa, 0x4f, 0x74, 0xfe, 0xe8, 0xb2, 0xd4, 0xb8, 0x50, 0x3d,
- 0x9f, 0x16, 0x57, 0x9c, 0xdb, 0xde, 0x9c, 0xf8, 0x1d, 0xf7, 0x8d, 0x7f, 0x06, 0x00, 0x00, 0xff,
- 0xff, 0x96, 0x31, 0x6f, 0x58, 0xa5, 0x2b, 0x00, 0x00,
-}
diff --git a/weed/replication/repl_util/replication_util.go b/weed/replication/repl_util/replication_util.go
new file mode 100644
index 000000000..519a9a201
--- /dev/null
+++ b/weed/replication/repl_util/replication_util.go
@@ -0,0 +1,42 @@
+package repl_util
+
+import (
+ "github.com/chrislusf/seaweedfs/weed/filer"
+ "github.com/chrislusf/seaweedfs/weed/glog"
+ "github.com/chrislusf/seaweedfs/weed/replication/source"
+ "github.com/chrislusf/seaweedfs/weed/util"
+)
+
+func CopyFromChunkViews(chunkViews []*filer.ChunkView, filerSource *source.FilerSource, writeFunc func(data []byte) error) error {
+
+ for _, chunk := range chunkViews {
+
+ fileUrls, err := filerSource.LookupFileId(chunk.FileId)
+ if err != nil {
+ return err
+ }
+
+ var writeErr error
+ var shouldRetry bool
+
+ for _, fileUrl := range fileUrls {
+ shouldRetry, err = util.ReadUrlAsStream(fileUrl, nil, false, chunk.IsFullChunk(), chunk.Offset, int(chunk.Size), func(data []byte) {
+ writeErr = writeFunc(data)
+ })
+ if err != nil {
+ glog.V(1).Infof("read from %s: %v", fileUrl, err)
+ } else if writeErr != nil {
+ glog.V(1).Infof("copy from %s: %v", fileUrl, writeErr)
+ } else {
+ break
+ }
+ }
+ if shouldRetry && err != nil {
+ return err
+ }
+ if writeErr != nil {
+ return writeErr
+ }
+ }
+ return nil
+}
diff --git a/weed/replication/replicator.go b/weed/replication/replicator.go
index 7353cdc91..d7e609c68 100644
--- a/weed/replication/replicator.go
+++ b/weed/replication/replicator.go
@@ -3,8 +3,10 @@ package replication
import (
"context"
"fmt"
- "path/filepath"
+ "github.com/chrislusf/seaweedfs/weed/pb"
+ "google.golang.org/grpc"
"strings"
+ "time"
"github.com/chrislusf/seaweedfs/weed/glog"
"github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
@@ -18,10 +20,10 @@ type Replicator struct {
source *source.FilerSource
}
-func NewReplicator(sourceConfig util.Configuration, dataSink sink.ReplicationSink) *Replicator {
+func NewReplicator(sourceConfig util.Configuration, configPrefix string, dataSink sink.ReplicationSink) *Replicator {
source := &source.FilerSource{}
- source.Initialize(sourceConfig)
+ source.Initialize(sourceConfig, configPrefix)
dataSink.SetSourceFiler(source)
@@ -32,37 +34,64 @@ func NewReplicator(sourceConfig util.Configuration, dataSink sink.ReplicationSin
}
func (r *Replicator) Replicate(ctx context.Context, key string, message *filer_pb.EventNotification) error {
+ if message.IsFromOtherCluster && r.sink.GetName() == "filer" {
+ return nil
+ }
if !strings.HasPrefix(key, r.source.Dir) {
glog.V(4).Infof("skipping %v outside of %v", key, r.source.Dir)
return nil
}
- newKey := filepath.ToSlash(filepath.Join(r.sink.GetSinkToDirectory(), key[len(r.source.Dir):]))
+ var dateKey string
+ if r.sink.IsIncremental() {
+ var mTime int64
+ if message.NewEntry != nil {
+ mTime = message.NewEntry.Attributes.Mtime
+ } else if message.OldEntry != nil {
+ mTime = message.OldEntry.Attributes.Mtime
+ }
+ dateKey = time.Unix(mTime, 0).Format("2006-01-02")
+ }
+ newKey := util.Join(r.sink.GetSinkToDirectory(), dateKey, key[len(r.source.Dir):])
glog.V(3).Infof("replicate %s => %s", key, newKey)
key = newKey
if message.OldEntry != nil && message.NewEntry == nil {
glog.V(4).Infof("deleting %v", key)
- return r.sink.DeleteEntry(ctx, key, message.OldEntry.IsDirectory, message.DeleteChunks)
+ return r.sink.DeleteEntry(key, message.OldEntry.IsDirectory, message.DeleteChunks, message.Signatures)
}
if message.OldEntry == nil && message.NewEntry != nil {
glog.V(4).Infof("creating %v", key)
- return r.sink.CreateEntry(ctx, key, message.NewEntry)
+ return r.sink.CreateEntry(key, message.NewEntry, message.Signatures)
}
if message.OldEntry == nil && message.NewEntry == nil {
glog.V(0).Infof("weird message %+v", message)
return nil
}
- foundExisting, err := r.sink.UpdateEntry(ctx, key, message.OldEntry, message.NewParentPath, message.NewEntry, message.DeleteChunks)
+ foundExisting, err := r.sink.UpdateEntry(key, message.OldEntry, message.NewParentPath, message.NewEntry, message.DeleteChunks, message.Signatures)
if foundExisting {
glog.V(4).Infof("updated %v", key)
return err
}
- err = r.sink.DeleteEntry(ctx, key, message.OldEntry.IsDirectory, false)
+ err = r.sink.DeleteEntry(key, message.OldEntry.IsDirectory, false, message.Signatures)
if err != nil {
return fmt.Errorf("delete old entry %v: %v", key, err)
}
glog.V(4).Infof("creating missing %v", key)
- return r.sink.CreateEntry(ctx, key, message.NewEntry)
+ return r.sink.CreateEntry(key, message.NewEntry, message.Signatures)
+}
+
+func ReadFilerSignature(grpcDialOption grpc.DialOption, filer string) (filerSignature int32, readErr error) {
+ if readErr = pb.WithFilerClient(filer, grpcDialOption, func(client filer_pb.SeaweedFilerClient) error {
+ if resp, err := client.GetFilerConfiguration(context.Background(), &filer_pb.GetFilerConfigurationRequest{}); err != nil {
+ return fmt.Errorf("GetFilerConfiguration %s: %v", filer, err)
+ } else {
+ filerSignature = resp.Signature
+ }
+ return nil
+ }); readErr != nil {
+ return 0, readErr
+ }
+ return filerSignature, nil
}
diff --git a/weed/replication/sink/azuresink/azure_sink.go b/weed/replication/sink/azuresink/azure_sink.go
index 6381908a1..d13a1049b 100644
--- a/weed/replication/sink/azuresink/azure_sink.go
+++ b/weed/replication/sink/azuresink/azure_sink.go
@@ -4,11 +4,12 @@ import (
"bytes"
"context"
"fmt"
+ "github.com/chrislusf/seaweedfs/weed/replication/repl_util"
"net/url"
"strings"
"github.com/Azure/azure-storage-blob-go/azblob"
- "github.com/chrislusf/seaweedfs/weed/filer2"
+ "github.com/chrislusf/seaweedfs/weed/filer"
"github.com/chrislusf/seaweedfs/weed/glog"
"github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
"github.com/chrislusf/seaweedfs/weed/replication/sink"
@@ -17,10 +18,11 @@ import (
)
type AzureSink struct {
- containerURL azblob.ContainerURL
- container string
- dir string
- filerSource *source.FilerSource
+ containerURL azblob.ContainerURL
+ container string
+ dir string
+ filerSource *source.FilerSource
+ isIncremental bool
}
func init() {
@@ -35,12 +37,17 @@ func (g *AzureSink) GetSinkToDirectory() string {
return g.dir
}
-func (g *AzureSink) Initialize(configuration util.Configuration) error {
+func (g *AzureSink) IsIncremental() bool {
+ return g.isIncremental
+}
+
+func (g *AzureSink) Initialize(configuration util.Configuration, prefix string) error {
+ g.isIncremental = configuration.GetBool(prefix + "is_incremental")
return g.initialize(
- configuration.GetString("account_name"),
- configuration.GetString("account_key"),
- configuration.GetString("container"),
- configuration.GetString("directory"),
+ configuration.GetString(prefix+"account_name"),
+ configuration.GetString(prefix+"account_key"),
+ configuration.GetString(prefix+"container"),
+ configuration.GetString(prefix+"directory"),
)
}
@@ -70,7 +77,7 @@ func (g *AzureSink) initialize(accountName, accountKey, container, dir string) e
return nil
}
-func (g *AzureSink) DeleteEntry(ctx context.Context, key string, isDirectory, deleteIncludeChunks bool) error {
+func (g *AzureSink) DeleteEntry(key string, isDirectory, deleteIncludeChunks bool, signatures []int32) error {
key = cleanKey(key)
@@ -78,7 +85,7 @@ func (g *AzureSink) DeleteEntry(ctx context.Context, key string, isDirectory, de
key = key + "/"
}
- if _, err := g.containerURL.NewBlobURL(key).Delete(ctx,
+ if _, err := g.containerURL.NewBlobURL(key).Delete(context.Background(),
azblob.DeleteSnapshotsOptionInclude, azblob.BlobAccessConditions{}); err != nil {
return fmt.Errorf("azure delete %s/%s: %v", g.container, key, err)
}
@@ -87,7 +94,7 @@ func (g *AzureSink) DeleteEntry(ctx context.Context, key string, isDirectory, de
}
-func (g *AzureSink) CreateEntry(ctx context.Context, key string, entry *filer_pb.Entry) error {
+func (g *AzureSink) CreateEntry(key string, entry *filer_pb.Entry, signatures []int32) error {
key = cleanKey(key)
@@ -95,44 +102,32 @@ func (g *AzureSink) CreateEntry(ctx context.Context, key string, entry *filer_pb
return nil
}
- totalSize := filer2.TotalSize(entry.Chunks)
- chunkViews := filer2.ViewFromChunks(entry.Chunks, 0, int(totalSize))
+ totalSize := filer.FileSize(entry)
+ chunkViews := filer.ViewFromChunks(g.filerSource.LookupFileId, entry.Chunks, 0, int64(totalSize))
// Create a URL that references a to-be-created blob in your
// Azure Storage account's container.
appendBlobURL := g.containerURL.NewAppendBlobURL(key)
- _, err := appendBlobURL.Create(ctx, azblob.BlobHTTPHeaders{}, azblob.Metadata{}, azblob.BlobAccessConditions{})
+ _, err := appendBlobURL.Create(context.Background(), azblob.BlobHTTPHeaders{}, azblob.Metadata{}, azblob.BlobAccessConditions{})
if err != nil {
return err
}
- for _, chunk := range chunkViews {
-
- fileUrl, err := g.filerSource.LookupFileId(ctx, chunk.FileId)
- if err != nil {
- return err
- }
-
- var writeErr error
- _, readErr := util.ReadUrlAsStream(fileUrl, chunk.Offset, int(chunk.Size), func(data []byte) {
- _, writeErr = appendBlobURL.AppendBlock(ctx, bytes.NewReader(data), azblob.AppendBlobAccessConditions{}, nil)
- })
-
- if readErr != nil {
- return readErr
- }
- if writeErr != nil {
- return writeErr
- }
+ writeFunc := func(data []byte) error {
+ _, writeErr := appendBlobURL.AppendBlock(context.Background(), bytes.NewReader(data), azblob.AppendBlobAccessConditions{}, nil)
+ return writeErr
+ }
+ if err := repl_util.CopyFromChunkViews(chunkViews, g.filerSource, writeFunc); err != nil {
+ return err
}
return nil
}
-func (g *AzureSink) UpdateEntry(ctx context.Context, key string, oldEntry *filer_pb.Entry, newParentPath string, newEntry *filer_pb.Entry, deleteIncludeChunks bool) (foundExistingEntry bool, err error) {
+func (g *AzureSink) UpdateEntry(key string, oldEntry *filer_pb.Entry, newParentPath string, newEntry *filer_pb.Entry, deleteIncludeChunks bool, signatures []int32) (foundExistingEntry bool, err error) {
key = cleanKey(key)
// TODO improve efficiency
return false, nil
diff --git a/weed/replication/sink/b2sink/b2_sink.go b/weed/replication/sink/b2sink/b2_sink.go
index 35c2230fa..90a0bb2e8 100644
--- a/weed/replication/sink/b2sink/b2_sink.go
+++ b/weed/replication/sink/b2sink/b2_sink.go
@@ -2,9 +2,10 @@ package B2Sink
import (
"context"
+ "github.com/chrislusf/seaweedfs/weed/replication/repl_util"
"strings"
- "github.com/chrislusf/seaweedfs/weed/filer2"
+ "github.com/chrislusf/seaweedfs/weed/filer"
"github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
"github.com/chrislusf/seaweedfs/weed/replication/sink"
"github.com/chrislusf/seaweedfs/weed/replication/source"
@@ -13,10 +14,11 @@ import (
)
type B2Sink struct {
- client *b2.Client
- bucket string
- dir string
- filerSource *source.FilerSource
+ client *b2.Client
+ bucket string
+ dir string
+ filerSource *source.FilerSource
+ isIncremental bool
}
func init() {
@@ -31,12 +33,17 @@ func (g *B2Sink) GetSinkToDirectory() string {
return g.dir
}
-func (g *B2Sink) Initialize(configuration util.Configuration) error {
+func (g *B2Sink) IsIncremental() bool {
+ return g.isIncremental
+}
+
+func (g *B2Sink) Initialize(configuration util.Configuration, prefix string) error {
+ g.isIncremental = configuration.GetBool(prefix + "is_incremental")
return g.initialize(
- configuration.GetString("b2_account_id"),
- configuration.GetString("b2_master_application_key"),
- configuration.GetString("bucket"),
- configuration.GetString("directory"),
+ configuration.GetString(prefix+"b2_account_id"),
+ configuration.GetString(prefix+"b2_master_application_key"),
+ configuration.GetString(prefix+"bucket"),
+ configuration.GetString(prefix+"directory"),
)
}
@@ -45,8 +52,7 @@ func (g *B2Sink) SetSourceFiler(s *source.FilerSource) {
}
func (g *B2Sink) initialize(accountId, accountKey, bucket, dir string) error {
- ctx := context.Background()
- client, err := b2.NewClient(ctx, accountId, accountKey)
+ client, err := b2.NewClient(context.Background(), accountId, accountKey)
if err != nil {
return err
}
@@ -58,7 +64,7 @@ func (g *B2Sink) initialize(accountId, accountKey, bucket, dir string) error {
return nil
}
-func (g *B2Sink) DeleteEntry(ctx context.Context, key string, isDirectory, deleteIncludeChunks bool) error {
+func (g *B2Sink) DeleteEntry(key string, isDirectory, deleteIncludeChunks bool, signatures []int32) error {
key = cleanKey(key)
@@ -66,18 +72,18 @@ func (g *B2Sink) DeleteEntry(ctx context.Context, key string, isDirectory, delet
key = key + "/"
}
- bucket, err := g.client.Bucket(ctx, g.bucket)
+ bucket, err := g.client.Bucket(context.Background(), g.bucket)
if err != nil {
return err
}
targetObject := bucket.Object(key)
- return targetObject.Delete(ctx)
+ return targetObject.Delete(context.Background())
}
-func (g *B2Sink) CreateEntry(ctx context.Context, key string, entry *filer_pb.Entry) error {
+func (g *B2Sink) CreateEntry(key string, entry *filer_pb.Entry, signatures []int32) error {
key = cleanKey(key)
@@ -85,46 +91,33 @@ func (g *B2Sink) CreateEntry(ctx context.Context, key string, entry *filer_pb.En
return nil
}
- totalSize := filer2.TotalSize(entry.Chunks)
- chunkViews := filer2.ViewFromChunks(entry.Chunks, 0, int(totalSize))
+ totalSize := filer.FileSize(entry)
+ chunkViews := filer.ViewFromChunks(g.filerSource.LookupFileId, entry.Chunks, 0, int64(totalSize))
- bucket, err := g.client.Bucket(ctx, g.bucket)
+ bucket, err := g.client.Bucket(context.Background(), g.bucket)
if err != nil {
return err
}
targetObject := bucket.Object(key)
- writer := targetObject.NewWriter(ctx)
-
- for _, chunk := range chunkViews {
-
- fileUrl, err := g.filerSource.LookupFileId(ctx, chunk.FileId)
- if err != nil {
- return err
- }
-
- var writeErr error
- _, readErr := util.ReadUrlAsStream(fileUrl, chunk.Offset, int(chunk.Size), func(data []byte) {
- _, err := writer.Write(data)
- if err != nil {
- writeErr = err
- }
- })
-
- if readErr != nil {
- return readErr
- }
- if writeErr != nil {
- return writeErr
- }
+ writer := targetObject.NewWriter(context.Background())
+ writeFunc := func(data []byte) error {
+ _, writeErr := writer.Write(data)
+ return writeErr
}
- return writer.Close()
+ defer writer.Close()
+
+ if err := repl_util.CopyFromChunkViews(chunkViews, g.filerSource, writeFunc); err != nil {
+ return err
+ }
+
+ return nil
}
-func (g *B2Sink) UpdateEntry(ctx context.Context, key string, oldEntry *filer_pb.Entry, newParentPath string, newEntry *filer_pb.Entry, deleteIncludeChunks bool) (foundExistingEntry bool, err error) {
+func (g *B2Sink) UpdateEntry(key string, oldEntry *filer_pb.Entry, newParentPath string, newEntry *filer_pb.Entry, deleteIncludeChunks bool, signatures []int32) (foundExistingEntry bool, err error) {
key = cleanKey(key)
diff --git a/weed/replication/sink/filersink/README.txt b/weed/replication/sink/filersink/README.txt
new file mode 100644
index 000000000..4ba0fc752
--- /dev/null
+++ b/weed/replication/sink/filersink/README.txt
@@ -0,0 +1,12 @@
+How replication works
+======
+
+All metadata changes within current cluster would be notified to a message queue.
+
+If the meta data change is from other clusters, this metadata would change would not be notified to the message queue.
+
+So active<=>active replication is possible.
+
+
+All metadata changes would be published as metadata changes.
+So all mounts listening for metadata changes will get updated.
\ No newline at end of file
diff --git a/weed/replication/sink/filersink/fetch_write.go b/weed/replication/sink/filersink/fetch_write.go
index 97e9671a3..a7392d856 100644
--- a/weed/replication/sink/filersink/fetch_write.go
+++ b/weed/replication/sink/filersink/fetch_write.go
@@ -3,41 +3,46 @@ package filersink
import (
"context"
"fmt"
- "google.golang.org/grpc"
- "strings"
+ "github.com/chrislusf/seaweedfs/weed/util"
"sync"
+ "google.golang.org/grpc"
+
"github.com/chrislusf/seaweedfs/weed/glog"
"github.com/chrislusf/seaweedfs/weed/operation"
+ "github.com/chrislusf/seaweedfs/weed/pb"
"github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
"github.com/chrislusf/seaweedfs/weed/security"
- "github.com/chrislusf/seaweedfs/weed/util"
)
-func (fs *FilerSink) replicateChunks(ctx context.Context, sourceChunks []*filer_pb.FileChunk) (replicatedChunks []*filer_pb.FileChunk, err error) {
+func (fs *FilerSink) replicateChunks(sourceChunks []*filer_pb.FileChunk, path string) (replicatedChunks []*filer_pb.FileChunk, err error) {
if len(sourceChunks) == 0 {
return
}
+
+ replicatedChunks = make([]*filer_pb.FileChunk, len(sourceChunks))
+
var wg sync.WaitGroup
- for _, sourceChunk := range sourceChunks {
+ for chunkIndex, sourceChunk := range sourceChunks {
wg.Add(1)
- go func(chunk *filer_pb.FileChunk) {
+ go func(chunk *filer_pb.FileChunk, index int) {
defer wg.Done()
- replicatedChunk, e := fs.replicateOneChunk(ctx, chunk)
+ replicatedChunk, e := fs.replicateOneChunk(chunk, path)
if e != nil {
err = e
+ return
}
- replicatedChunks = append(replicatedChunks, replicatedChunk)
- }(sourceChunk)
+ replicatedChunks[index] = replicatedChunk
+ }(sourceChunk, chunkIndex)
}
wg.Wait()
return
}
-func (fs *FilerSink) replicateOneChunk(ctx context.Context, sourceChunk *filer_pb.FileChunk) (*filer_pb.FileChunk, error) {
+func (fs *FilerSink) replicateOneChunk(sourceChunk *filer_pb.FileChunk, path string) (*filer_pb.FileChunk, error) {
- fileId, err := fs.fetchAndWrite(ctx, sourceChunk)
+ fileId, err := fs.fetchAndWrite(sourceChunk, path)
if err != nil {
return nil, fmt.Errorf("copy %s: %v", sourceChunk.GetFileIdString(), err)
}
@@ -49,21 +54,23 @@ func (fs *FilerSink) replicateOneChunk(ctx context.Context, sourceChunk *filer_p
Mtime: sourceChunk.Mtime,
ETag: sourceChunk.ETag,
SourceFileId: sourceChunk.GetFileIdString(),
+ CipherKey: sourceChunk.CipherKey,
+ IsCompressed: sourceChunk.IsCompressed,
}, nil
}
-func (fs *FilerSink) fetchAndWrite(ctx context.Context, sourceChunk *filer_pb.FileChunk) (fileId string, err error) {
+func (fs *FilerSink) fetchAndWrite(sourceChunk *filer_pb.FileChunk, path string) (fileId string, err error) {
- filename, header, readCloser, err := fs.filerSource.ReadPart(ctx, sourceChunk.GetFileIdString())
+ filename, header, resp, err := fs.filerSource.ReadPart(sourceChunk.GetFileIdString())
if err != nil {
return "", fmt.Errorf("read part %s: %v", sourceChunk.GetFileIdString(), err)
}
- defer readCloser.Close()
+ defer util.CloseResponse(resp)
var host string
var auth security.EncodedJwt
- if err := fs.withFilerClient(ctx, func(client filer_pb.SeaweedFilerClient) error {
+ if err := fs.WithFilerClient(func(client filer_pb.SeaweedFilerClient) error {
request := &filer_pb.AssignVolumeRequest{
Count: 1,
@@ -71,13 +78,18 @@ func (fs *FilerSink) fetchAndWrite(ctx context.Context, sourceChunk *filer_pb.Fi
Collection: fs.collection,
TtlSec: fs.ttlSec,
DataCenter: fs.dataCenter,
+ DiskType: fs.diskType,
+ Path: path,
}
- resp, err := client.AssignVolume(ctx, request)
+ resp, err := client.AssignVolume(context.Background(), request)
if err != nil {
glog.V(0).Infof("assign volume failure %v: %v", request, err)
return err
}
+ if resp.Error != "" {
+ return fmt.Errorf("assign volume failure %v: %v", request, resp.Error)
+ }
fileId, host, auth = resp.FileId, resp.Url, security.EncodedJwt(resp.Auth)
@@ -87,13 +99,16 @@ func (fs *FilerSink) fetchAndWrite(ctx context.Context, sourceChunk *filer_pb.Fi
}
fileUrl := fmt.Sprintf("http://%s/%s", host, fileId)
+ if fs.writeChunkByFiler {
+ fileUrl = fmt.Sprintf("http://%s/?proxyChunkId=%s", fs.address, fileId)
+ }
glog.V(4).Infof("replicating %s to %s header:%+v", filename, fileUrl, header)
- uploadResult, err := operation.Upload(fileUrl, filename, readCloser,
- "gzip" == header.Get("Content-Encoding"), header.Get("Content-Type"), nil, auth)
+ // fetch data as is, regardless whether it is encrypted or not
+ uploadResult, err, _ := operation.Upload(fileUrl, filename, false, resp.Body, "gzip" == header.Get("Content-Encoding"), header.Get("Content-Type"), nil, auth)
if err != nil {
- glog.V(0).Infof("upload data %v to %s: %v", filename, fileUrl, err)
+ glog.V(0).Infof("upload source data %v to %s: %v", sourceChunk.GetFileIdString(), fileUrl, err)
return "", fmt.Errorf("upload data: %v", err)
}
if uploadResult.Error != "" {
@@ -104,19 +119,16 @@ func (fs *FilerSink) fetchAndWrite(ctx context.Context, sourceChunk *filer_pb.Fi
return
}
-func (fs *FilerSink) withFilerClient(ctx context.Context, fn func(filer_pb.SeaweedFilerClient) error) error {
+var _ = filer_pb.FilerClient(&FilerSink{})
- return util.WithCachedGrpcClient(ctx, func(grpcConnection *grpc.ClientConn) error {
+func (fs *FilerSink) WithFilerClient(fn func(filer_pb.SeaweedFilerClient) error) error {
+
+ return pb.WithCachedGrpcClient(func(grpcConnection *grpc.ClientConn) error {
client := filer_pb.NewSeaweedFilerClient(grpcConnection)
return fn(client)
}, fs.grpcAddress, fs.grpcDialOption)
}
-
-func volumeId(fileId string) string {
- lastCommaIndex := strings.LastIndex(fileId, ",")
- if lastCommaIndex > 0 {
- return fileId[:lastCommaIndex]
- }
- return fileId
+func (fs *FilerSink) AdjustedUrl(location *filer_pb.Location) string {
+ return location.Url
}
diff --git a/weed/replication/sink/filersink/filer_sink.go b/weed/replication/sink/filersink/filer_sink.go
index f99c7fdf6..d7c5fccc3 100644
--- a/weed/replication/sink/filersink/filer_sink.go
+++ b/weed/replication/sink/filersink/filer_sink.go
@@ -3,11 +3,14 @@ package filersink
import (
"context"
"fmt"
- "github.com/chrislusf/seaweedfs/weed/security"
- "github.com/spf13/viper"
+ "github.com/chrislusf/seaweedfs/weed/pb"
+ "github.com/chrislusf/seaweedfs/weed/wdclient"
+
"google.golang.org/grpc"
- "github.com/chrislusf/seaweedfs/weed/filer2"
+ "github.com/chrislusf/seaweedfs/weed/security"
+
+ "github.com/chrislusf/seaweedfs/weed/filer"
"github.com/chrislusf/seaweedfs/weed/glog"
"github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
"github.com/chrislusf/seaweedfs/weed/replication/sink"
@@ -16,14 +19,18 @@ import (
)
type FilerSink struct {
- filerSource *source.FilerSource
- grpcAddress string
- dir string
- replication string
- collection string
- ttlSec int32
- dataCenter string
- grpcDialOption grpc.DialOption
+ filerSource *source.FilerSource
+ grpcAddress string
+ dir string
+ replication string
+ collection string
+ ttlSec int32
+ diskType string
+ dataCenter string
+ grpcDialOption grpc.DialOption
+ address string
+ writeChunkByFiler bool
+ isIncremental bool
}
func init() {
@@ -38,58 +45,63 @@ func (fs *FilerSink) GetSinkToDirectory() string {
return fs.dir
}
-func (fs *FilerSink) Initialize(configuration util.Configuration) error {
- return fs.initialize(
- configuration.GetString("grpcAddress"),
- configuration.GetString("directory"),
- configuration.GetString("replication"),
- configuration.GetString("collection"),
- configuration.GetInt("ttlSec"),
- )
+func (fs *FilerSink) IsIncremental() bool {
+ return fs.isIncremental
+}
+
+func (fs *FilerSink) Initialize(configuration util.Configuration, prefix string) error {
+ fs.isIncremental = configuration.GetBool(prefix + "is_incremental")
+ return fs.DoInitialize(
+ "",
+ configuration.GetString(prefix+"grpcAddress"),
+ configuration.GetString(prefix+"directory"),
+ configuration.GetString(prefix+"replication"),
+ configuration.GetString(prefix+"collection"),
+ configuration.GetInt(prefix+"ttlSec"),
+ configuration.GetString(prefix+"disk"),
+ security.LoadClientTLS(util.GetViper(), "grpc.client"),
+ false)
}
func (fs *FilerSink) SetSourceFiler(s *source.FilerSource) {
fs.filerSource = s
}
-func (fs *FilerSink) initialize(grpcAddress string, dir string,
- replication string, collection string, ttlSec int) (err error) {
+func (fs *FilerSink) DoInitialize(address, grpcAddress string, dir string,
+ replication string, collection string, ttlSec int, diskType string, grpcDialOption grpc.DialOption, writeChunkByFiler bool) (err error) {
+ fs.address = address
+ if fs.address == "" {
+ fs.address = pb.GrpcAddressToServerAddress(grpcAddress)
+ }
fs.grpcAddress = grpcAddress
fs.dir = dir
fs.replication = replication
fs.collection = collection
fs.ttlSec = int32(ttlSec)
- fs.grpcDialOption = security.LoadClientTLS(viper.Sub("grpc"), "client")
+ fs.diskType = diskType
+ fs.grpcDialOption = grpcDialOption
+ fs.writeChunkByFiler = writeChunkByFiler
return nil
}
-func (fs *FilerSink) DeleteEntry(ctx context.Context, key string, isDirectory, deleteIncludeChunks bool) error {
- return fs.withFilerClient(ctx, func(client filer_pb.SeaweedFilerClient) error {
-
- dir, name := filer2.FullPath(key).DirAndName()
-
- request := &filer_pb.DeleteEntryRequest{
- Directory: dir,
- Name: name,
- IsDeleteData: deleteIncludeChunks,
- }
+func (fs *FilerSink) DeleteEntry(key string, isDirectory, deleteIncludeChunks bool, signatures []int32) error {
- glog.V(1).Infof("delete entry: %v", request)
- _, err := client.DeleteEntry(ctx, request)
- if err != nil {
- glog.V(0).Infof("delete entry %s: %v", key, err)
- return fmt.Errorf("delete entry %s: %v", key, err)
- }
+ dir, name := util.FullPath(key).DirAndName()
- return nil
- })
+ glog.V(4).Infof("delete entry: %v", key)
+ err := filer_pb.Remove(fs, dir, name, deleteIncludeChunks, true, true, true, signatures)
+ if err != nil {
+ glog.V(0).Infof("delete entry %s: %v", key, err)
+ return fmt.Errorf("delete entry %s: %v", key, err)
+ }
+ return nil
}
-func (fs *FilerSink) CreateEntry(ctx context.Context, key string, entry *filer_pb.Entry) error {
+func (fs *FilerSink) CreateEntry(key string, entry *filer_pb.Entry, signatures []int32) error {
- return fs.withFilerClient(ctx, func(client filer_pb.SeaweedFilerClient) error {
+ return fs.WithFilerClient(func(client filer_pb.SeaweedFilerClient) error {
- dir, name := filer2.FullPath(key).DirAndName()
+ dir, name := util.FullPath(key).DirAndName()
// look up existing entry
lookupRequest := &filer_pb.LookupDirectoryEntryRequest{
@@ -97,21 +109,21 @@ func (fs *FilerSink) CreateEntry(ctx context.Context, key string, entry *filer_p
Name: name,
}
glog.V(1).Infof("lookup: %v", lookupRequest)
- if resp, err := client.LookupDirectoryEntry(ctx, lookupRequest); err == nil {
- if filer2.ETag(resp.Entry.Chunks) == filer2.ETag(entry.Chunks) {
- glog.V(0).Infof("already replicated %s", key)
+ if resp, err := filer_pb.LookupEntry(client, lookupRequest); err == nil {
+ if filer.ETag(resp.Entry) == filer.ETag(entry) {
+ glog.V(3).Infof("already replicated %s", key)
return nil
}
}
- replicatedChunks, err := fs.replicateChunks(ctx, entry.Chunks)
+ replicatedChunks, err := fs.replicateChunks(entry.Chunks, key)
if err != nil {
- glog.V(0).Infof("replicate entry chunks %s: %v", key, err)
- return fmt.Errorf("replicate entry chunks %s: %v", key, err)
+ // only warning here since the source chunk may have been deleted already
+ glog.Warningf("replicate entry chunks %s: %v", key, err)
}
- glog.V(0).Infof("replicated %s %+v ===> %+v", key, entry.Chunks, replicatedChunks)
+ glog.V(4).Infof("replicated %s %+v ===> %+v", key, entry.Chunks, replicatedChunks)
request := &filer_pb.CreateEntryRequest{
Directory: dir,
@@ -120,11 +132,14 @@ func (fs *FilerSink) CreateEntry(ctx context.Context, key string, entry *filer_p
IsDirectory: entry.IsDirectory,
Attributes: entry.Attributes,
Chunks: replicatedChunks,
+ Content: entry.Content,
},
+ IsFromOtherCluster: true,
+ Signatures: signatures,
}
- glog.V(1).Infof("create: %v", request)
- if _, err := client.CreateEntry(ctx, request); err != nil {
+ glog.V(3).Infof("create: %v", request)
+ if err := filer_pb.CreateEntry(client, request); err != nil {
glog.V(0).Infof("create entry %s: %v", key, err)
return fmt.Errorf("create entry %s: %v", key, err)
}
@@ -133,13 +148,13 @@ func (fs *FilerSink) CreateEntry(ctx context.Context, key string, entry *filer_p
})
}
-func (fs *FilerSink) UpdateEntry(ctx context.Context, key string, oldEntry *filer_pb.Entry, newParentPath string, newEntry *filer_pb.Entry, deleteIncludeChunks bool) (foundExistingEntry bool, err error) {
+func (fs *FilerSink) UpdateEntry(key string, oldEntry *filer_pb.Entry, newParentPath string, newEntry *filer_pb.Entry, deleteIncludeChunks bool, signatures []int32) (foundExistingEntry bool, err error) {
- dir, name := filer2.FullPath(key).DirAndName()
+ dir, name := util.FullPath(key).DirAndName()
// read existing entry
var existingEntry *filer_pb.Entry
- err = fs.withFilerClient(ctx, func(client filer_pb.SeaweedFilerClient) error {
+ err = fs.WithFilerClient(func(client filer_pb.SeaweedFilerClient) error {
request := &filer_pb.LookupDirectoryEntryRequest{
Directory: dir,
@@ -147,7 +162,7 @@ func (fs *FilerSink) UpdateEntry(ctx context.Context, key string, oldEntry *file
}
glog.V(4).Infof("lookup entry: %v", request)
- resp, err := client.LookupDirectoryEntry(ctx, request)
+ resp, err := filer_pb.LookupEntry(client, request)
if err != nil {
glog.V(0).Infof("lookup %s: %v", key, err)
return err
@@ -162,28 +177,31 @@ func (fs *FilerSink) UpdateEntry(ctx context.Context, key string, oldEntry *file
return false, fmt.Errorf("lookup %s: %v", key, err)
}
- glog.V(0).Infof("oldEntry %+v, newEntry %+v, existingEntry: %+v", oldEntry, newEntry, existingEntry)
+ glog.V(4).Infof("oldEntry %+v, newEntry %+v, existingEntry: %+v", oldEntry, newEntry, existingEntry)
if existingEntry.Attributes.Mtime > newEntry.Attributes.Mtime {
// skip if already changed
// this usually happens when the messages are not ordered
- glog.V(0).Infof("late updates %s", key)
- } else if filer2.ETag(newEntry.Chunks) == filer2.ETag(existingEntry.Chunks) {
+ glog.V(2).Infof("late updates %s", key)
+ } else if filer.ETag(newEntry) == filer.ETag(existingEntry) {
// skip if no change
// this usually happens when retrying the replication
- glog.V(0).Infof("already replicated %s", key)
+ glog.V(3).Infof("already replicated %s", key)
} else {
// find out what changed
- deletedChunks, newChunks := compareChunks(oldEntry, newEntry)
+ deletedChunks, newChunks, err := compareChunks(filer.LookupFn(fs), oldEntry, newEntry)
+ if err != nil {
+ return true, fmt.Errorf("replicte %s compare chunks error: %v", key, err)
+ }
// delete the chunks that are deleted from the source
if deleteIncludeChunks {
// remove the deleted chunks. Actual data deletion happens in filer UpdateEntry FindUnusedFileChunks
- existingEntry.Chunks = filer2.MinusChunks(existingEntry.Chunks, deletedChunks)
+ existingEntry.Chunks = filer.DoMinusChunks(existingEntry.Chunks, deletedChunks)
}
// replicate the chunks that are new in the source
- replicatedChunks, err := fs.replicateChunks(ctx, newChunks)
+ replicatedChunks, err := fs.replicateChunks(newChunks, key)
if err != nil {
return true, fmt.Errorf("replicte %s chunks error: %v", key, err)
}
@@ -191,14 +209,16 @@ func (fs *FilerSink) UpdateEntry(ctx context.Context, key string, oldEntry *file
}
// save updated meta data
- return true, fs.withFilerClient(ctx, func(client filer_pb.SeaweedFilerClient) error {
+ return true, fs.WithFilerClient(func(client filer_pb.SeaweedFilerClient) error {
request := &filer_pb.UpdateEntryRequest{
- Directory: newParentPath,
- Entry: existingEntry,
+ Directory: newParentPath,
+ Entry: existingEntry,
+ IsFromOtherCluster: true,
+ Signatures: signatures,
}
- if _, err := client.UpdateEntry(ctx, request); err != nil {
+ if _, err := client.UpdateEntry(context.Background(), request); err != nil {
return fmt.Errorf("update existingEntry %s: %v", key, err)
}
@@ -206,8 +226,21 @@ func (fs *FilerSink) UpdateEntry(ctx context.Context, key string, oldEntry *file
})
}
-func compareChunks(oldEntry, newEntry *filer_pb.Entry) (deletedChunks, newChunks []*filer_pb.FileChunk) {
- deletedChunks = filer2.MinusChunks(oldEntry.Chunks, newEntry.Chunks)
- newChunks = filer2.MinusChunks(newEntry.Chunks, oldEntry.Chunks)
+func compareChunks(lookupFileIdFn wdclient.LookupFileIdFunctionType, oldEntry, newEntry *filer_pb.Entry) (deletedChunks, newChunks []*filer_pb.FileChunk, err error) {
+ aData, aMeta, aErr := filer.ResolveChunkManifest(lookupFileIdFn, oldEntry.Chunks)
+ if aErr != nil {
+ return nil, nil, aErr
+ }
+ bData, bMeta, bErr := filer.ResolveChunkManifest(lookupFileIdFn, newEntry.Chunks)
+ if bErr != nil {
+ return nil, nil, bErr
+ }
+
+ deletedChunks = append(deletedChunks, filer.DoMinusChunks(aData, bData)...)
+ deletedChunks = append(deletedChunks, filer.DoMinusChunks(aMeta, bMeta)...)
+
+ newChunks = append(newChunks, filer.DoMinusChunks(bData, aData)...)
+ newChunks = append(newChunks, filer.DoMinusChunks(bMeta, aMeta)...)
+
return
}
diff --git a/weed/replication/sink/gcssink/gcs_sink.go b/weed/replication/sink/gcssink/gcs_sink.go
index abd7c49b9..5cf5b7317 100644
--- a/weed/replication/sink/gcssink/gcs_sink.go
+++ b/weed/replication/sink/gcssink/gcs_sink.go
@@ -3,23 +3,26 @@ package gcssink
import (
"context"
"fmt"
+ "github.com/chrislusf/seaweedfs/weed/replication/repl_util"
"os"
"cloud.google.com/go/storage"
- "github.com/chrislusf/seaweedfs/weed/filer2"
+ "google.golang.org/api/option"
+
+ "github.com/chrislusf/seaweedfs/weed/filer"
"github.com/chrislusf/seaweedfs/weed/glog"
"github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
"github.com/chrislusf/seaweedfs/weed/replication/sink"
"github.com/chrislusf/seaweedfs/weed/replication/source"
"github.com/chrislusf/seaweedfs/weed/util"
- "google.golang.org/api/option"
)
type GcsSink struct {
- client *storage.Client
- bucket string
- dir string
- filerSource *source.FilerSource
+ client *storage.Client
+ bucket string
+ dir string
+ filerSource *source.FilerSource
+ isIncremental bool
}
func init() {
@@ -34,11 +37,16 @@ func (g *GcsSink) GetSinkToDirectory() string {
return g.dir
}
-func (g *GcsSink) Initialize(configuration util.Configuration) error {
+func (g *GcsSink) IsIncremental() bool {
+ return g.isIncremental
+}
+
+func (g *GcsSink) Initialize(configuration util.Configuration, prefix string) error {
+ g.isIncremental = configuration.GetBool(prefix + "is_incremental")
return g.initialize(
- configuration.GetString("google_application_credentials"),
- configuration.GetString("bucket"),
- configuration.GetString("directory"),
+ configuration.GetString(prefix+"google_application_credentials"),
+ configuration.GetString(prefix+"bucket"),
+ configuration.GetString(prefix+"directory"),
)
}
@@ -50,7 +58,6 @@ func (g *GcsSink) initialize(google_application_credentials, bucketName, dir str
g.bucket = bucketName
g.dir = dir
- ctx := context.Background()
// Creates a client.
if google_application_credentials == "" {
var found bool
@@ -59,7 +66,7 @@ func (g *GcsSink) initialize(google_application_credentials, bucketName, dir str
glog.Fatalf("need to specific GOOGLE_APPLICATION_CREDENTIALS env variable or google_application_credentials in replication.toml")
}
}
- client, err := storage.NewClient(ctx, option.WithCredentialsFile(google_application_credentials))
+ client, err := storage.NewClient(context.Background(), option.WithCredentialsFile(google_application_credentials))
if err != nil {
glog.Fatalf("Failed to create client: %v", err)
}
@@ -69,13 +76,13 @@ func (g *GcsSink) initialize(google_application_credentials, bucketName, dir str
return nil
}
-func (g *GcsSink) DeleteEntry(ctx context.Context, key string, isDirectory, deleteIncludeChunks bool) error {
+func (g *GcsSink) DeleteEntry(key string, isDirectory, deleteIncludeChunks bool, signatures []int32) error {
if isDirectory {
key = key + "/"
}
- if err := g.client.Bucket(g.bucket).Object(key).Delete(ctx); err != nil {
+ if err := g.client.Bucket(g.bucket).Object(key).Delete(context.Background()); err != nil {
return fmt.Errorf("gcs delete %s%s: %v", g.bucket, key, err)
}
@@ -83,35 +90,24 @@ func (g *GcsSink) DeleteEntry(ctx context.Context, key string, isDirectory, dele
}
-func (g *GcsSink) CreateEntry(ctx context.Context, key string, entry *filer_pb.Entry) error {
+func (g *GcsSink) CreateEntry(key string, entry *filer_pb.Entry, signatures []int32) error {
if entry.IsDirectory {
return nil
}
- totalSize := filer2.TotalSize(entry.Chunks)
- chunkViews := filer2.ViewFromChunks(entry.Chunks, 0, int(totalSize))
-
- wc := g.client.Bucket(g.bucket).Object(key).NewWriter(ctx)
+ totalSize := filer.FileSize(entry)
+ chunkViews := filer.ViewFromChunks(g.filerSource.LookupFileId, entry.Chunks, 0, int64(totalSize))
- for _, chunk := range chunkViews {
-
- fileUrl, err := g.filerSource.LookupFileId(ctx, chunk.FileId)
- if err != nil {
- return err
- }
-
- _, err = util.ReadUrlAsStream(fileUrl, chunk.Offset, int(chunk.Size), func(data []byte) {
- wc.Write(data)
- })
-
- if err != nil {
- return err
- }
+ wc := g.client.Bucket(g.bucket).Object(key).NewWriter(context.Background())
+ defer wc.Close()
+ writeFunc := func(data []byte) error {
+ _, writeErr := wc.Write(data)
+ return writeErr
}
- if err := wc.Close(); err != nil {
+ if err := repl_util.CopyFromChunkViews(chunkViews, g.filerSource, writeFunc); err != nil {
return err
}
@@ -119,7 +115,7 @@ func (g *GcsSink) CreateEntry(ctx context.Context, key string, entry *filer_pb.E
}
-func (g *GcsSink) UpdateEntry(ctx context.Context, key string, oldEntry *filer_pb.Entry, newParentPath string, newEntry *filer_pb.Entry, deleteIncludeChunks bool) (foundExistingEntry bool, err error) {
+func (g *GcsSink) UpdateEntry(key string, oldEntry *filer_pb.Entry, newParentPath string, newEntry *filer_pb.Entry, deleteIncludeChunks bool, signatures []int32) (foundExistingEntry bool, err error) {
// TODO improve efficiency
return false, nil
}
diff --git a/weed/replication/sink/localsink/local_incremental_sink.go b/weed/replication/sink/localsink/local_incremental_sink.go
new file mode 100644
index 000000000..a1d49e28a
--- /dev/null
+++ b/weed/replication/sink/localsink/local_incremental_sink.go
@@ -0,0 +1,17 @@
+package localsink
+
+import (
+ "github.com/chrislusf/seaweedfs/weed/replication/sink"
+)
+
+type LocalIncSink struct {
+ LocalSink
+}
+
+func (localincsink *LocalIncSink) GetName() string {
+ return "local_incremental"
+}
+
+func init() {
+ sink.Sinks = append(sink.Sinks, &LocalIncSink{})
+}
diff --git a/weed/replication/sink/localsink/local_sink.go b/weed/replication/sink/localsink/local_sink.go
new file mode 100644
index 000000000..2b9b3e69a
--- /dev/null
+++ b/weed/replication/sink/localsink/local_sink.go
@@ -0,0 +1,105 @@
+package localsink
+
+import (
+ "github.com/chrislusf/seaweedfs/weed/filer"
+ "github.com/chrislusf/seaweedfs/weed/glog"
+ "github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
+ "github.com/chrislusf/seaweedfs/weed/replication/repl_util"
+ "github.com/chrislusf/seaweedfs/weed/replication/sink"
+ "github.com/chrislusf/seaweedfs/weed/replication/source"
+ "github.com/chrislusf/seaweedfs/weed/util"
+ "io/ioutil"
+ "os"
+ "path/filepath"
+ "strings"
+)
+
+type LocalSink struct {
+ Dir string
+ filerSource *source.FilerSource
+}
+
+func init() {
+ sink.Sinks = append(sink.Sinks, &LocalSink{})
+}
+
+func (localsink *LocalSink) SetSourceFiler(s *source.FilerSource) {
+ localsink.filerSource = s
+}
+
+func (localsink *LocalSink) GetName() string {
+ return "local"
+}
+
+func (localsink *LocalSink) isMultiPartEntry(key string) bool {
+ return strings.HasSuffix(key, ".part") && strings.Contains(key, "/.uploads/")
+}
+
+func (localsink *LocalSink) initialize(dir string) error {
+ localsink.Dir = dir
+ return nil
+}
+
+func (localsink *LocalSink) Initialize(configuration util.Configuration, prefix string) error {
+ dir := configuration.GetString(prefix + "directory")
+ glog.V(4).Infof("sink.local.directory: %v", dir)
+ return localsink.initialize(dir)
+}
+
+func (localsink *LocalSink) GetSinkToDirectory() string {
+ return localsink.Dir
+}
+
+func (localsink *LocalSink) IsIncremental() bool {
+ return true
+}
+
+func (localsink *LocalSink) DeleteEntry(key string, isDirectory, deleteIncludeChunks bool, signatures []int32) error {
+ if localsink.isMultiPartEntry(key) {
+ return nil
+ }
+ glog.V(4).Infof("Delete Entry key: %s", key)
+ if err := os.Remove(key); err != nil {
+ glog.V(0).Infof("remove entry key %s: %s", key, err)
+ }
+ return nil
+}
+
+func (localsink *LocalSink) CreateEntry(key string, entry *filer_pb.Entry, signatures []int32) error {
+ if entry.IsDirectory || localsink.isMultiPartEntry(key) {
+ return nil
+ }
+ glog.V(4).Infof("Create Entry key: %s", key)
+
+ totalSize := filer.FileSize(entry)
+ chunkViews := filer.ViewFromChunks(localsink.filerSource.LookupFileId, entry.Chunks, 0, int64(totalSize))
+
+ dir := filepath.Dir(key)
+
+ if _, err := os.Stat(dir); os.IsNotExist(err) {
+ glog.V(4).Infof("Create Direcotry key: %s", dir)
+ if err = os.MkdirAll(dir, 0755); err != nil {
+ return err
+ }
+ }
+
+ writeFunc := func(data []byte) error {
+ writeErr := ioutil.WriteFile(key, data, 0755)
+ return writeErr
+ }
+
+ if err := repl_util.CopyFromChunkViews(chunkViews, localsink.filerSource, writeFunc); err != nil {
+ return err
+ }
+
+ return nil
+}
+
+func (localsink *LocalSink) UpdateEntry(key string, oldEntry *filer_pb.Entry, newParentPath string, newEntry *filer_pb.Entry, deleteIncludeChunks bool, signatures []int32) (foundExistingEntry bool, err error) {
+ if localsink.isMultiPartEntry(key) {
+ return true, nil
+ }
+ glog.V(4).Infof("Update Entry key: %s", key)
+ // do delete and create
+ return false, nil
+}
diff --git a/weed/replication/sink/replication_sink.go b/weed/replication/sink/replication_sink.go
index dd54f0005..4ffd09462 100644
--- a/weed/replication/sink/replication_sink.go
+++ b/weed/replication/sink/replication_sink.go
@@ -1,7 +1,6 @@
package sink
import (
- "context"
"github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
"github.com/chrislusf/seaweedfs/weed/replication/source"
"github.com/chrislusf/seaweedfs/weed/util"
@@ -9,12 +8,13 @@ import (
type ReplicationSink interface {
GetName() string
- Initialize(configuration util.Configuration) error
- DeleteEntry(ctx context.Context, key string, isDirectory, deleteIncludeChunks bool) error
- CreateEntry(ctx context.Context, key string, entry *filer_pb.Entry) error
- UpdateEntry(ctx context.Context, key string, oldEntry *filer_pb.Entry, newParentPath string, newEntry *filer_pb.Entry, deleteIncludeChunks bool) (foundExistingEntry bool, err error)
+ Initialize(configuration util.Configuration, prefix string) error
+ DeleteEntry(key string, isDirectory, deleteIncludeChunks bool, signatures []int32) error
+ CreateEntry(key string, entry *filer_pb.Entry, signatures []int32) error
+ UpdateEntry(key string, oldEntry *filer_pb.Entry, newParentPath string, newEntry *filer_pb.Entry, deleteIncludeChunks bool, signatures []int32) (foundExistingEntry bool, err error)
GetSinkToDirectory() string
SetSourceFiler(s *source.FilerSource)
+ IsIncremental() bool
}
var (
diff --git a/weed/replication/sink/s3sink/s3_sink.go b/weed/replication/sink/s3sink/s3_sink.go
index 4cff341d0..9a36573e3 100644
--- a/weed/replication/sink/s3sink/s3_sink.go
+++ b/weed/replication/sink/s3sink/s3_sink.go
@@ -11,7 +11,8 @@ import (
"github.com/aws/aws-sdk-go/aws/session"
"github.com/aws/aws-sdk-go/service/s3"
"github.com/aws/aws-sdk-go/service/s3/s3iface"
- "github.com/chrislusf/seaweedfs/weed/filer2"
+
+ "github.com/chrislusf/seaweedfs/weed/filer"
"github.com/chrislusf/seaweedfs/weed/glog"
"github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
"github.com/chrislusf/seaweedfs/weed/replication/sink"
@@ -20,11 +21,13 @@ import (
)
type S3Sink struct {
- conn s3iface.S3API
- region string
- bucket string
- dir string
- filerSource *source.FilerSource
+ conn s3iface.S3API
+ region string
+ bucket string
+ dir string
+ endpoint string
+ filerSource *source.FilerSource
+ isIncremental bool
}
func init() {
@@ -39,16 +42,24 @@ func (s3sink *S3Sink) GetSinkToDirectory() string {
return s3sink.dir
}
-func (s3sink *S3Sink) Initialize(configuration util.Configuration) error {
- glog.V(0).Infof("sink.s3.region: %v", configuration.GetString("region"))
- glog.V(0).Infof("sink.s3.bucket: %v", configuration.GetString("bucket"))
- glog.V(0).Infof("sink.s3.directory: %v", configuration.GetString("directory"))
+func (s3sink *S3Sink) IsIncremental() bool {
+ return s3sink.isIncremental
+}
+
+func (s3sink *S3Sink) Initialize(configuration util.Configuration, prefix string) error {
+ glog.V(0).Infof("sink.s3.region: %v", configuration.GetString(prefix+"region"))
+ glog.V(0).Infof("sink.s3.bucket: %v", configuration.GetString(prefix+"bucket"))
+ glog.V(0).Infof("sink.s3.directory: %v", configuration.GetString(prefix+"directory"))
+ glog.V(0).Infof("sink.s3.endpoint: %v", configuration.GetString(prefix+"endpoint"))
+ glog.V(0).Infof("sink.s3.is_incremental: %v", configuration.GetString(prefix+"is_incremental"))
+ s3sink.isIncremental = configuration.GetBool(prefix + "is_incremental")
return s3sink.initialize(
- configuration.GetString("aws_access_key_id"),
- configuration.GetString("aws_secret_access_key"),
- configuration.GetString("region"),
- configuration.GetString("bucket"),
- configuration.GetString("directory"),
+ configuration.GetString(prefix+"aws_access_key_id"),
+ configuration.GetString(prefix+"aws_secret_access_key"),
+ configuration.GetString(prefix+"region"),
+ configuration.GetString(prefix+"bucket"),
+ configuration.GetString(prefix+"directory"),
+ configuration.GetString(prefix+"endpoint"),
)
}
@@ -56,13 +67,16 @@ func (s3sink *S3Sink) SetSourceFiler(s *source.FilerSource) {
s3sink.filerSource = s
}
-func (s3sink *S3Sink) initialize(awsAccessKeyId, awsSecretAccessKey, region, bucket, dir string) error {
+func (s3sink *S3Sink) initialize(awsAccessKeyId, awsSecretAccessKey, region, bucket, dir, endpoint string) error {
s3sink.region = region
s3sink.bucket = bucket
s3sink.dir = dir
+ s3sink.endpoint = endpoint
config := &aws.Config{
- Region: aws.String(s3sink.region),
+ Region: aws.String(s3sink.region),
+ Endpoint: aws.String(s3sink.endpoint),
+ S3ForcePathStyle: aws.Bool(true),
}
if awsAccessKeyId != "" && awsSecretAccessKey != "" {
config.Credentials = credentials.NewStaticCredentials(awsAccessKeyId, awsSecretAccessKey, "")
@@ -77,7 +91,7 @@ func (s3sink *S3Sink) initialize(awsAccessKeyId, awsSecretAccessKey, region, buc
return nil
}
-func (s3sink *S3Sink) DeleteEntry(ctx context.Context, key string, isDirectory, deleteIncludeChunks bool) error {
+func (s3sink *S3Sink) DeleteEntry(key string, isDirectory, deleteIncludeChunks bool, signatures []int32) error {
key = cleanKey(key)
@@ -89,8 +103,7 @@ func (s3sink *S3Sink) DeleteEntry(ctx context.Context, key string, isDirectory,
}
-func (s3sink *S3Sink) CreateEntry(ctx context.Context, key string, entry *filer_pb.Entry) error {
-
+func (s3sink *S3Sink) CreateEntry(key string, entry *filer_pb.Entry, signatures []int32) error {
key = cleanKey(key)
if entry.IsDirectory {
@@ -99,38 +112,40 @@ func (s3sink *S3Sink) CreateEntry(ctx context.Context, key string, entry *filer_
uploadId, err := s3sink.createMultipartUpload(key, entry)
if err != nil {
- return err
+ return fmt.Errorf("createMultipartUpload: %v", err)
}
- totalSize := filer2.TotalSize(entry.Chunks)
- chunkViews := filer2.ViewFromChunks(entry.Chunks, 0, int(totalSize))
+ totalSize := filer.FileSize(entry)
+ chunkViews := filer.ViewFromChunks(s3sink.filerSource.LookupFileId, entry.Chunks, 0, int64(totalSize))
+
+ parts := make([]*s3.CompletedPart, len(chunkViews))
- var parts []*s3.CompletedPart
var wg sync.WaitGroup
for chunkIndex, chunk := range chunkViews {
partId := chunkIndex + 1
wg.Add(1)
- go func(chunk *filer2.ChunkView) {
+ go func(chunk *filer.ChunkView, index int) {
defer wg.Done()
- if part, uploadErr := s3sink.uploadPart(ctx, key, uploadId, partId, chunk); uploadErr != nil {
+ if part, uploadErr := s3sink.uploadPart(key, uploadId, partId, chunk); uploadErr != nil {
err = uploadErr
+ glog.Errorf("uploadPart: %v", uploadErr)
} else {
- parts = append(parts, part)
+ parts[index] = part
}
- }(chunk)
+ }(chunk, chunkIndex)
}
wg.Wait()
if err != nil {
s3sink.abortMultipartUpload(key, uploadId)
- return err
+ return fmt.Errorf("uploadPart: %v", err)
}
- return s3sink.completeMultipartUpload(ctx, key, uploadId, parts)
+ return s3sink.completeMultipartUpload(context.Background(), key, uploadId, parts)
}
-func (s3sink *S3Sink) UpdateEntry(ctx context.Context, key string, oldEntry *filer_pb.Entry, newParentPath string, newEntry *filer_pb.Entry, deleteIncludeChunks bool) (foundExistingEntry bool, err error) {
+func (s3sink *S3Sink) UpdateEntry(key string, oldEntry *filer_pb.Entry, newParentPath string, newEntry *filer_pb.Entry, deleteIncludeChunks bool, signatures []int32) (foundExistingEntry bool, err error) {
key = cleanKey(key)
// TODO improve efficiency
return false, nil
diff --git a/weed/replication/sink/s3sink/s3_write.go b/weed/replication/sink/s3sink/s3_write.go
index 0a190b27d..3dde52616 100644
--- a/weed/replication/sink/s3sink/s3_write.go
+++ b/weed/replication/sink/s3sink/s3_write.go
@@ -9,7 +9,7 @@ import (
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/aws/awserr"
"github.com/aws/aws-sdk-go/service/s3"
- "github.com/chrislusf/seaweedfs/weed/filer2"
+ "github.com/chrislusf/seaweedfs/weed/filer"
"github.com/chrislusf/seaweedfs/weed/glog"
"github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
"github.com/chrislusf/seaweedfs/weed/util"
@@ -24,7 +24,7 @@ func (s3sink *S3Sink) deleteObject(key string) error {
result, err := s3sink.conn.DeleteObject(input)
if err == nil {
- glog.V(0).Infof("[%s] delete %s: %v", s3sink.bucket, key, result)
+ glog.V(2).Infof("[%s] delete %s: %v", s3sink.bucket, key, result)
} else {
glog.Errorf("[%s] delete %s: %v", s3sink.bucket, key, err)
}
@@ -43,7 +43,7 @@ func (s3sink *S3Sink) createMultipartUpload(key string, entry *filer_pb.Entry) (
result, err := s3sink.conn.CreateMultipartUpload(input)
if err == nil {
- glog.V(0).Infof("[%s] createMultipartUpload %s: %v", s3sink.bucket, key, result)
+ glog.V(2).Infof("[%s] createMultipartUpload %s: %v", s3sink.bucket, key, result)
} else {
glog.Errorf("[%s] createMultipartUpload %s: %v", s3sink.bucket, key, err)
return "", err
@@ -94,19 +94,20 @@ func (s3sink *S3Sink) completeMultipartUpload(ctx context.Context, key, uploadId
result, err := s3sink.conn.CompleteMultipartUpload(input)
if err == nil {
- glog.V(0).Infof("[%s] completeMultipartUpload %s: %v", s3sink.bucket, key, result)
+ glog.V(2).Infof("[%s] completeMultipartUpload %s: %v", s3sink.bucket, key, result)
} else {
glog.Errorf("[%s] completeMultipartUpload %s: %v", s3sink.bucket, key, err)
+ return fmt.Errorf("[%s] completeMultipartUpload %s: %v", s3sink.bucket, key, err)
}
- return err
+ return nil
}
// To upload a part
-func (s3sink *S3Sink) uploadPart(ctx context.Context, key, uploadId string, partId int, chunk *filer2.ChunkView) (*s3.CompletedPart, error) {
+func (s3sink *S3Sink) uploadPart(key, uploadId string, partId int, chunk *filer.ChunkView) (*s3.CompletedPart, error) {
var readSeeker io.ReadSeeker
- readSeeker, err := s3sink.buildReadSeeker(ctx, chunk)
+ readSeeker, err := s3sink.buildReadSeeker(chunk)
if err != nil {
glog.Errorf("[%s] uploadPart %s %d read: %v", s3sink.bucket, key, partId, err)
return nil, fmt.Errorf("[%s] uploadPart %s %d read: %v", s3sink.bucket, key, partId, err)
@@ -122,7 +123,7 @@ func (s3sink *S3Sink) uploadPart(ctx context.Context, key, uploadId string, part
result, err := s3sink.conn.UploadPart(input)
if err == nil {
- glog.V(0).Infof("[%s] uploadPart %s %d upload: %v", s3sink.bucket, key, partId, result)
+ glog.V(2).Infof("[%s] uploadPart %s %d upload: %v", s3sink.bucket, key, partId, result)
} else {
glog.Errorf("[%s] uploadPart %s %d upload: %v", s3sink.bucket, key, partId, err)
}
@@ -156,12 +157,19 @@ func (s3sink *S3Sink) uploadPartCopy(key, uploadId string, partId int64, copySou
return err
}
-func (s3sink *S3Sink) buildReadSeeker(ctx context.Context, chunk *filer2.ChunkView) (io.ReadSeeker, error) {
- fileUrl, err := s3sink.filerSource.LookupFileId(ctx, chunk.FileId)
+func (s3sink *S3Sink) buildReadSeeker(chunk *filer.ChunkView) (io.ReadSeeker, error) {
+ fileUrls, err := s3sink.filerSource.LookupFileId(chunk.FileId)
if err != nil {
return nil, err
}
buf := make([]byte, chunk.Size)
- util.ReadUrl(fileUrl, chunk.Offset, int(chunk.Size), buf, true)
+ for _, fileUrl := range fileUrls {
+ _, err = util.ReadUrl(fileUrl, chunk.CipherKey, chunk.IsGzipped, false, chunk.Offset, int(chunk.Size), buf)
+ if err != nil {
+ glog.V(1).Infof("read from %s: %v", fileUrl, err)
+ } else {
+ break
+ }
+ }
return bytes.NewReader(buf), nil
}
diff --git a/weed/replication/source/filer_source.go b/weed/replication/source/filer_source.go
index d7b5ebc4d..e2e3575dc 100644
--- a/weed/replication/source/filer_source.go
+++ b/weed/replication/source/filer_source.go
@@ -3,13 +3,15 @@ package source
import (
"context"
"fmt"
- "github.com/chrislusf/seaweedfs/weed/security"
- "github.com/spf13/viper"
- "google.golang.org/grpc"
"io"
"net/http"
"strings"
+ "google.golang.org/grpc"
+
+ "github.com/chrislusf/seaweedfs/weed/pb"
+ "github.com/chrislusf/seaweedfs/weed/security"
+
"github.com/chrislusf/seaweedfs/weed/glog"
"github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
"github.com/chrislusf/seaweedfs/weed/util"
@@ -23,32 +25,41 @@ type FilerSource struct {
grpcAddress string
grpcDialOption grpc.DialOption
Dir string
+ address string
+ proxyByFiler bool
}
-func (fs *FilerSource) Initialize(configuration util.Configuration) error {
- return fs.initialize(
- configuration.GetString("grpcAddress"),
- configuration.GetString("directory"),
+func (fs *FilerSource) Initialize(configuration util.Configuration, prefix string) error {
+ return fs.DoInitialize(
+ "",
+ configuration.GetString(prefix+"grpcAddress"),
+ configuration.GetString(prefix+"directory"),
+ false,
)
}
-func (fs *FilerSource) initialize(grpcAddress string, dir string) (err error) {
+func (fs *FilerSource) DoInitialize(address, grpcAddress string, dir string, readChunkFromFiler bool) (err error) {
+ fs.address = address
+ if fs.address == "" {
+ fs.address = pb.GrpcAddressToServerAddress(grpcAddress)
+ }
fs.grpcAddress = grpcAddress
fs.Dir = dir
- fs.grpcDialOption = security.LoadClientTLS(viper.Sub("grpc"), "client")
+ fs.grpcDialOption = security.LoadClientTLS(util.GetViper(), "grpc.client")
+ fs.proxyByFiler = readChunkFromFiler
return nil
}
-func (fs *FilerSource) LookupFileId(ctx context.Context, part string) (fileUrl string, err error) {
+func (fs *FilerSource) LookupFileId(part string) (fileUrls []string, err error) {
vid2Locations := make(map[string]*filer_pb.Locations)
vid := volumeId(part)
- err = fs.withFilerClient(ctx, fs.grpcDialOption, func(client filer_pb.SeaweedFilerClient) error {
+ err = fs.WithFilerClient(func(client filer_pb.SeaweedFilerClient) error {
glog.V(4).Infof("read lookup volume id locations: %v", vid)
- resp, err := client.LookupVolume(ctx, &filer_pb.LookupVolumeRequest{
+ resp, err := client.LookupVolume(context.Background(), &filer_pb.LookupVolumeRequest{
VolumeIds: []string{vid},
})
if err != nil {
@@ -62,42 +73,65 @@ func (fs *FilerSource) LookupFileId(ctx context.Context, part string) (fileUrl s
if err != nil {
glog.V(1).Infof("LookupFileId volume id %s: %v", vid, err)
- return "", fmt.Errorf("LookupFileId volume id %s: %v", vid, err)
+ return nil, fmt.Errorf("LookupFileId volume id %s: %v", vid, err)
}
locations := vid2Locations[vid]
if locations == nil || len(locations.Locations) == 0 {
glog.V(1).Infof("LookupFileId locate volume id %s: %v", vid, err)
- return "", fmt.Errorf("LookupFileId locate volume id %s: %v", vid, err)
+ return nil, fmt.Errorf("LookupFileId locate volume id %s: %v", vid, err)
}
- fileUrl = fmt.Sprintf("http://%s/%s", locations.Locations[0].Url, part)
+ if !fs.proxyByFiler {
+ for _, loc := range locations.Locations {
+ fileUrls = append(fileUrls, fmt.Sprintf("http://%s/%s?readDeleted=true", loc.Url, part))
+ }
+ } else {
+ fileUrls = append(fileUrls, fmt.Sprintf("http://%s/?proxyChunkId=%s", fs.address, part))
+ }
return
}
-func (fs *FilerSource) ReadPart(ctx context.Context, part string) (filename string, header http.Header, readCloser io.ReadCloser, err error) {
+func (fs *FilerSource) ReadPart(fileId string) (filename string, header http.Header, resp *http.Response, err error) {
- fileUrl, err := fs.LookupFileId(ctx, part)
+ if fs.proxyByFiler {
+ return util.DownloadFile("http://" + fs.address + "/?proxyChunkId=" + fileId)
+ }
+
+ fileUrls, err := fs.LookupFileId(fileId)
if err != nil {
return "", nil, nil, err
}
- filename, header, readCloser, err = util.DownloadFile(fileUrl)
+ for _, fileUrl := range fileUrls {
+ filename, header, resp, err = util.DownloadFile(fileUrl)
+ if err != nil {
+ glog.V(1).Infof("fail to read from %s: %v", fileUrl, err)
+ } else {
+ break
+ }
+ }
- return filename, header, readCloser, err
+ return filename, header, resp, err
}
-func (fs *FilerSource) withFilerClient(ctx context.Context, grpcDialOption grpc.DialOption, fn func(filer_pb.SeaweedFilerClient) error) error {
+var _ = filer_pb.FilerClient(&FilerSource{})
+
+func (fs *FilerSource) WithFilerClient(fn func(filer_pb.SeaweedFilerClient) error) error {
- return util.WithCachedGrpcClient(ctx, func(grpcConnection *grpc.ClientConn) error {
+ return pb.WithCachedGrpcClient(func(grpcConnection *grpc.ClientConn) error {
client := filer_pb.NewSeaweedFilerClient(grpcConnection)
return fn(client)
}, fs.grpcAddress, fs.grpcDialOption)
}
+func (fs *FilerSource) AdjustedUrl(location *filer_pb.Location) string {
+ return location.Url
+}
+
func volumeId(fileId string) string {
lastCommaIndex := strings.LastIndex(fileId, ",")
if lastCommaIndex > 0 {
diff --git a/weed/replication/sub/notification_aws_sqs.go b/weed/replication/sub/notification_aws_sqs.go
index bed26c79c..642834c72 100644
--- a/weed/replication/sub/notification_aws_sqs.go
+++ b/weed/replication/sub/notification_aws_sqs.go
@@ -27,14 +27,14 @@ func (k *AwsSqsInput) GetName() string {
return "aws_sqs"
}
-func (k *AwsSqsInput) Initialize(configuration util.Configuration) error {
- glog.V(0).Infof("replication.notification.aws_sqs.region: %v", configuration.GetString("region"))
- glog.V(0).Infof("replication.notification.aws_sqs.sqs_queue_name: %v", configuration.GetString("sqs_queue_name"))
+func (k *AwsSqsInput) Initialize(configuration util.Configuration, prefix string) error {
+ glog.V(0).Infof("replication.notification.aws_sqs.region: %v", configuration.GetString(prefix+"region"))
+ glog.V(0).Infof("replication.notification.aws_sqs.sqs_queue_name: %v", configuration.GetString(prefix+"sqs_queue_name"))
return k.initialize(
- configuration.GetString("aws_access_key_id"),
- configuration.GetString("aws_secret_access_key"),
- configuration.GetString("region"),
- configuration.GetString("sqs_queue_name"),
+ configuration.GetString(prefix+"aws_access_key_id"),
+ configuration.GetString(prefix+"aws_secret_access_key"),
+ configuration.GetString(prefix+"region"),
+ configuration.GetString(prefix+"sqs_queue_name"),
)
}
@@ -68,7 +68,7 @@ func (k *AwsSqsInput) initialize(awsAccessKeyId, awsSecretAccessKey, region, que
return nil
}
-func (k *AwsSqsInput) ReceiveMessage() (key string, message *filer_pb.EventNotification, err error) {
+func (k *AwsSqsInput) ReceiveMessage() (key string, message *filer_pb.EventNotification, onSuccessFn func(), onFailureFn func(), err error) {
// receive message
result, err := k.svc.ReceiveMessage(&sqs.ReceiveMessageInput{
@@ -92,7 +92,9 @@ func (k *AwsSqsInput) ReceiveMessage() (key string, message *filer_pb.EventNotif
}
// process the message
- key = *result.Messages[0].Attributes["key"]
+ // fmt.Printf("messages: %+v\n", result.Messages[0])
+ keyValue := result.Messages[0].MessageAttributes["key"]
+ key = *keyValue.StringValue
text := *result.Messages[0].Body
message = &filer_pb.EventNotification{}
err = proto.UnmarshalText(text, message)
diff --git a/weed/replication/sub/notification_gocdk_pub_sub.go b/weed/replication/sub/notification_gocdk_pub_sub.go
index eddba9ff8..b16eec2e1 100644
--- a/weed/replication/sub/notification_gocdk_pub_sub.go
+++ b/weed/replication/sub/notification_gocdk_pub_sub.go
@@ -2,13 +2,20 @@ package sub
import (
"context"
-
"github.com/chrislusf/seaweedfs/weed/glog"
"github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
"github.com/chrislusf/seaweedfs/weed/util"
"github.com/golang/protobuf/proto"
+ "github.com/streadway/amqp"
"gocloud.dev/pubsub"
_ "gocloud.dev/pubsub/awssnssqs"
+ "gocloud.dev/pubsub/rabbitpubsub"
+ "net/url"
+ "os"
+ "path"
+ "strings"
+ "time"
+
// _ "gocloud.dev/pubsub/azuresb"
_ "gocloud.dev/pubsub/gcppubsub"
_ "gocloud.dev/pubsub/natspubsub"
@@ -19,32 +26,139 @@ func init() {
NotificationInputs = append(NotificationInputs, &GoCDKPubSubInput{})
}
+func getPath(rawUrl string) string {
+ parsedUrl, _ := url.Parse(rawUrl)
+ return path.Join(parsedUrl.Host, parsedUrl.Path)
+}
+
+func QueueDeclareAndBind(conn *amqp.Connection, exchangeUrl string, queueUrl string) error {
+ exchangeName := getPath(exchangeUrl)
+ queueName := getPath(queueUrl)
+ exchangeNameDLX := "DLX." + exchangeName
+ queueNameDLX := "DLX." + queueName
+ ch, err := conn.Channel()
+ if err != nil {
+ glog.Error(err)
+ return err
+ }
+ defer ch.Close()
+ if err := ch.ExchangeDeclare(
+ exchangeNameDLX, "fanout", false, false, false, false, nil); err != nil {
+ glog.Error(err)
+ return err
+ }
+ if err := ch.ExchangeDeclare(
+ exchangeName, "fanout", false, false, false, false, nil); err != nil {
+ glog.Error(err)
+ return err
+ }
+ if _, err := ch.QueueDeclare(
+ queueName, false, false, false, false,
+ amqp.Table{"x-dead-letter-exchange": exchangeNameDLX}); err != nil {
+ glog.Error(err)
+ return err
+ }
+ if err := ch.QueueBind(queueName, "", exchangeName, false, nil); err != nil {
+ glog.Error(err)
+ return err
+ }
+ if _, err := ch.QueueDeclare(
+ queueNameDLX, false, false, false, false,
+ amqp.Table{"x-dead-letter-exchange": exchangeName, "x-message-ttl": 600000}); err != nil {
+ glog.Error(err)
+ return err
+ }
+ if err := ch.QueueBind(queueNameDLX, "", exchangeNameDLX, false, nil); err != nil {
+ glog.Error(err)
+ return err
+ }
+ return nil
+}
+
type GoCDKPubSubInput struct {
- sub *pubsub.Subscription
+ sub *pubsub.Subscription
+ subURL string
}
func (k *GoCDKPubSubInput) GetName() string {
return "gocdk_pub_sub"
}
-func (k *GoCDKPubSubInput) Initialize(config util.Configuration) error {
- subURL := config.GetString("sub_url")
- glog.V(0).Infof("notification.gocdk_pub_sub.sub_url: %v", subURL)
- sub, err := pubsub.OpenSubscription(context.Background(), subURL)
+func (k *GoCDKPubSubInput) Initialize(configuration util.Configuration, prefix string) error {
+ topicUrl := configuration.GetString(prefix + "topic_url")
+ k.subURL = configuration.GetString(prefix + "sub_url")
+ glog.V(0).Infof("notification.gocdk_pub_sub.sub_url: %v", k.subURL)
+ sub, err := pubsub.OpenSubscription(context.Background(), k.subURL)
if err != nil {
return err
}
+ var conn *amqp.Connection
+ if sub.As(&conn) {
+ ch, err := conn.Channel()
+ if err != nil {
+ return err
+ }
+ defer ch.Close()
+ _, err = ch.QueueInspect(getPath(k.subURL))
+ if err != nil {
+ if strings.HasPrefix(err.Error(), "Exception (404) Reason") {
+ if err := QueueDeclareAndBind(conn, topicUrl, k.subURL); err != nil {
+ return err
+ }
+ } else {
+ return err
+ }
+ }
+ }
k.sub = sub
return nil
}
-func (k *GoCDKPubSubInput) ReceiveMessage() (key string, message *filer_pb.EventNotification, err error) {
- msg, err := k.sub.Receive(context.Background())
+func (k *GoCDKPubSubInput) ReceiveMessage() (key string, message *filer_pb.EventNotification, onSuccessFn func(), onFailureFn func(), err error) {
+ ctx := context.Background()
+ msg, err := k.sub.Receive(ctx)
+ if err != nil {
+ var conn *amqp.Connection
+ if k.sub.As(&conn) && conn.IsClosed() {
+ conn.Close()
+ k.sub.Shutdown(ctx)
+ conn, err = amqp.Dial(os.Getenv("RABBIT_SERVER_URL"))
+ if err != nil {
+ glog.Error(err)
+ time.Sleep(time.Second)
+ return
+ }
+ k.sub = rabbitpubsub.OpenSubscription(conn, getPath(k.subURL), nil)
+ return
+ }
+ // This is permanent cached sub err
+ glog.Fatal(err)
+ }
+ onFailureFn = func() {
+ if msg.Nackable() {
+ isRedelivered := false
+ var delivery amqp.Delivery
+ if msg.As(&delivery) {
+ isRedelivered = delivery.Redelivered
+ glog.Warningf("onFailureFn() metadata: %+v, redelivered: %v", msg.Metadata, delivery.Redelivered)
+ }
+ if isRedelivered {
+ if err := delivery.Nack(false, false); err != nil {
+ glog.Error(err)
+ }
+ } else {
+ msg.Nack()
+ }
+ }
+ }
+ onSuccessFn = func() {
+ msg.Ack()
+ }
key = msg.Metadata["key"]
message = &filer_pb.EventNotification{}
err = proto.Unmarshal(msg.Body, message)
if err != nil {
- return "", nil, err
+ return "", nil, onSuccessFn, onFailureFn, err
}
- return key, message, nil
+ return key, message, onSuccessFn, onFailureFn, nil
}
diff --git a/weed/replication/sub/notification_google_pub_sub.go b/weed/replication/sub/notification_google_pub_sub.go
index ad6b42a2e..f7c767d4a 100644
--- a/weed/replication/sub/notification_google_pub_sub.go
+++ b/weed/replication/sub/notification_google_pub_sub.go
@@ -27,13 +27,13 @@ func (k *GooglePubSubInput) GetName() string {
return "google_pub_sub"
}
-func (k *GooglePubSubInput) Initialize(configuration util.Configuration) error {
- glog.V(0).Infof("notification.google_pub_sub.project_id: %v", configuration.GetString("project_id"))
- glog.V(0).Infof("notification.google_pub_sub.topic: %v", configuration.GetString("topic"))
+func (k *GooglePubSubInput) Initialize(configuration util.Configuration, prefix string) error {
+ glog.V(0).Infof("notification.google_pub_sub.project_id: %v", configuration.GetString(prefix+"project_id"))
+ glog.V(0).Infof("notification.google_pub_sub.topic: %v", configuration.GetString(prefix+"topic"))
return k.initialize(
- configuration.GetString("google_application_credentials"),
- configuration.GetString("project_id"),
- configuration.GetString("topic"),
+ configuration.GetString(prefix+"google_application_credentials"),
+ configuration.GetString(prefix+"project_id"),
+ configuration.GetString(prefix+"topic"),
)
}
@@ -85,16 +85,22 @@ func (k *GooglePubSubInput) initialize(google_application_credentials, projectId
go k.sub.Receive(ctx, func(ctx context.Context, m *pubsub.Message) {
k.messageChan <- m
- m.Ack()
})
return err
}
-func (k *GooglePubSubInput) ReceiveMessage() (key string, message *filer_pb.EventNotification, err error) {
+func (k *GooglePubSubInput) ReceiveMessage() (key string, message *filer_pb.EventNotification, onSuccessFn func(), onFailureFn func(), err error) {
m := <-k.messageChan
+ onSuccessFn = func() {
+ m.Ack()
+ }
+ onFailureFn = func() {
+ m.Nack()
+ }
+
// process the message
key = m.Attributes["key"]
message = &filer_pb.EventNotification{}
diff --git a/weed/replication/sub/notification_kafka.go b/weed/replication/sub/notification_kafka.go
index 1a86a8307..622a759ea 100644
--- a/weed/replication/sub/notification_kafka.go
+++ b/weed/replication/sub/notification_kafka.go
@@ -28,14 +28,14 @@ func (k *KafkaInput) GetName() string {
return "kafka"
}
-func (k *KafkaInput) Initialize(configuration util.Configuration) error {
- glog.V(0).Infof("replication.notification.kafka.hosts: %v\n", configuration.GetStringSlice("hosts"))
- glog.V(0).Infof("replication.notification.kafka.topic: %v\n", configuration.GetString("topic"))
+func (k *KafkaInput) Initialize(configuration util.Configuration, prefix string) error {
+ glog.V(0).Infof("replication.notification.kafka.hosts: %v\n", configuration.GetStringSlice(prefix+"hosts"))
+ glog.V(0).Infof("replication.notification.kafka.topic: %v\n", configuration.GetString(prefix+"topic"))
return k.initialize(
- configuration.GetStringSlice("hosts"),
- configuration.GetString("topic"),
- configuration.GetString("offsetFile"),
- configuration.GetInt("offsetSaveIntervalSeconds"),
+ configuration.GetStringSlice(prefix+"hosts"),
+ configuration.GetString(prefix+"topic"),
+ configuration.GetString(prefix+"offsetFile"),
+ configuration.GetInt(prefix+"offsetSaveIntervalSeconds"),
)
}
@@ -97,7 +97,7 @@ func (k *KafkaInput) initialize(hosts []string, topic string, offsetFile string,
return nil
}
-func (k *KafkaInput) ReceiveMessage() (key string, message *filer_pb.EventNotification, err error) {
+func (k *KafkaInput) ReceiveMessage() (key string, message *filer_pb.EventNotification, onSuccessFn func(), onFailureFn func(), err error) {
msg := <-k.messageChan
diff --git a/weed/replication/sub/notifications.go b/weed/replication/sub/notifications.go
index 66fbef824..d5a910db9 100644
--- a/weed/replication/sub/notifications.go
+++ b/weed/replication/sub/notifications.go
@@ -9,8 +9,8 @@ type NotificationInput interface {
// GetName gets the name to locate the configuration in sync.toml file
GetName() string
// Initialize initializes the file store
- Initialize(configuration util.Configuration) error
- ReceiveMessage() (key string, message *filer_pb.EventNotification, err error)
+ Initialize(configuration util.Configuration, prefix string) error
+ ReceiveMessage() (key string, message *filer_pb.EventNotification, onSuccessFn func(), onFailureFn func(), err error)
}
var (
diff --git a/weed/s3api/auth_credentials.go b/weed/s3api/auth_credentials.go
new file mode 100644
index 000000000..b8af6381a
--- /dev/null
+++ b/weed/s3api/auth_credentials.go
@@ -0,0 +1,275 @@
+package s3api
+
+import (
+ "fmt"
+ "github.com/chrislusf/seaweedfs/weed/filer"
+ "github.com/chrislusf/seaweedfs/weed/s3api/s3_constants"
+ "io/ioutil"
+ "net/http"
+
+ "github.com/chrislusf/seaweedfs/weed/glog"
+ "github.com/chrislusf/seaweedfs/weed/pb/iam_pb"
+ xhttp "github.com/chrislusf/seaweedfs/weed/s3api/http"
+ "github.com/chrislusf/seaweedfs/weed/s3api/s3err"
+)
+
+type Action string
+
+type Iam interface {
+ Check(f http.HandlerFunc, actions ...Action) http.HandlerFunc
+}
+
+type IdentityAccessManagement struct {
+ identities []*Identity
+ domain string
+}
+
+type Identity struct {
+ Name string
+ Credentials []*Credential
+ Actions []Action
+}
+
+type Credential struct {
+ AccessKey string
+ SecretKey string
+}
+
+func NewIdentityAccessManagement(option *S3ApiServerOption) *IdentityAccessManagement {
+ iam := &IdentityAccessManagement{
+ domain: option.DomainName,
+ }
+ if option.Config != "" {
+ if err := iam.loadS3ApiConfigurationFromFile(option.Config); err != nil {
+ glog.Fatalf("fail to load config file %s: %v", option.Config, err)
+ }
+ } else {
+ if err := iam.loadS3ApiConfigurationFromFiler(option); err != nil {
+ glog.Warningf("fail to load config: %v", err)
+ }
+ }
+ return iam
+}
+
+func (iam *IdentityAccessManagement) loadS3ApiConfigurationFromFiler(option *S3ApiServerOption) error {
+ content, err := filer.ReadContent(option.Filer, filer.IamConfigDirecotry, filer.IamIdentityFile)
+ if err != nil {
+ return fmt.Errorf("read S3 config: %v", err)
+ }
+ return iam.loadS3ApiConfigurationFromBytes(content)
+}
+
+func (iam *IdentityAccessManagement) loadS3ApiConfigurationFromFile(fileName string) error {
+ content, readErr := ioutil.ReadFile(fileName)
+ if readErr != nil {
+ glog.Warningf("fail to read %s : %v", fileName, readErr)
+ return fmt.Errorf("fail to read %s : %v", fileName, readErr)
+ }
+ return iam.loadS3ApiConfigurationFromBytes(content)
+}
+
+func (iam *IdentityAccessManagement) loadS3ApiConfigurationFromBytes(content []byte) error {
+ s3ApiConfiguration := &iam_pb.S3ApiConfiguration{}
+ if err := filer.ParseS3ConfigurationFromBytes(content, s3ApiConfiguration); err != nil {
+ glog.Warningf("unmarshal error: %v", err)
+ return fmt.Errorf("unmarshal error: %v", err)
+ }
+ if err := iam.loadS3ApiConfiguration(s3ApiConfiguration); err != nil {
+ return err
+ }
+ return nil
+}
+
+func (iam *IdentityAccessManagement) loadS3ApiConfiguration(config *iam_pb.S3ApiConfiguration) error {
+ var identities []*Identity
+ for _, ident := range config.Identities {
+ t := &Identity{
+ Name: ident.Name,
+ Credentials: nil,
+ Actions: nil,
+ }
+ for _, action := range ident.Actions {
+ t.Actions = append(t.Actions, Action(action))
+ }
+ for _, cred := range ident.Credentials {
+ t.Credentials = append(t.Credentials, &Credential{
+ AccessKey: cred.AccessKey,
+ SecretKey: cred.SecretKey,
+ })
+ }
+ identities = append(identities, t)
+ }
+
+ // atomically switch
+ iam.identities = identities
+ return nil
+}
+
+func (iam *IdentityAccessManagement) isEnabled() bool {
+
+ return len(iam.identities) > 0
+}
+
+func (iam *IdentityAccessManagement) lookupByAccessKey(accessKey string) (identity *Identity, cred *Credential, found bool) {
+
+ for _, ident := range iam.identities {
+ for _, cred := range ident.Credentials {
+ if cred.AccessKey == accessKey {
+ return ident, cred, true
+ }
+ }
+ }
+ return nil, nil, false
+}
+
+func (iam *IdentityAccessManagement) lookupAnonymous() (identity *Identity, found bool) {
+
+ for _, ident := range iam.identities {
+ if ident.Name == "anonymous" {
+ return ident, true
+ }
+ }
+ return nil, false
+}
+
+func (iam *IdentityAccessManagement) Auth(f http.HandlerFunc, action Action) http.HandlerFunc {
+
+ if !iam.isEnabled() {
+ return f
+ }
+
+ return func(w http.ResponseWriter, r *http.Request) {
+ identity, errCode := iam.authRequest(r, action)
+ if errCode == s3err.ErrNone {
+ if identity != nil && identity.Name != "" {
+ r.Header.Set(xhttp.AmzIdentityId, identity.Name)
+ if identity.isAdmin() {
+ r.Header.Set(xhttp.AmzIsAdmin, "true")
+ }
+ }
+ f(w, r)
+ return
+ }
+ writeErrorResponse(w, errCode, r.URL)
+ }
+}
+
+// check whether the request has valid access keys
+func (iam *IdentityAccessManagement) authRequest(r *http.Request, action Action) (*Identity, s3err.ErrorCode) {
+ var identity *Identity
+ var s3Err s3err.ErrorCode
+ var found bool
+ switch getRequestAuthType(r) {
+ case authTypeStreamingSigned:
+ return identity, s3err.ErrNone
+ case authTypeUnknown:
+ glog.V(3).Infof("unknown auth type")
+ return identity, s3err.ErrAccessDenied
+ case authTypePresignedV2, authTypeSignedV2:
+ glog.V(3).Infof("v2 auth type")
+ identity, s3Err = iam.isReqAuthenticatedV2(r)
+ case authTypeSigned, authTypePresigned:
+ glog.V(3).Infof("v4 auth type")
+ identity, s3Err = iam.reqSignatureV4Verify(r)
+ case authTypePostPolicy:
+ glog.V(3).Infof("post policy auth type")
+ return identity, s3err.ErrNone
+ case authTypeJWT:
+ glog.V(3).Infof("jwt auth type")
+ return identity, s3err.ErrNotImplemented
+ case authTypeAnonymous:
+ identity, found = iam.lookupAnonymous()
+ if !found {
+ return identity, s3err.ErrAccessDenied
+ }
+ default:
+ return identity, s3err.ErrNotImplemented
+ }
+
+ if s3Err != s3err.ErrNone {
+ return identity, s3Err
+ }
+
+ glog.V(3).Infof("user name: %v actions: %v", identity.Name, identity.Actions)
+
+ bucket, _ := getBucketAndObject(r)
+
+ if !identity.canDo(action, bucket) {
+ return identity, s3err.ErrAccessDenied
+ }
+
+ return identity, s3err.ErrNone
+
+}
+
+func (iam *IdentityAccessManagement) authUser(r *http.Request) (*Identity, s3err.ErrorCode) {
+ var identity *Identity
+ var s3Err s3err.ErrorCode
+ var found bool
+ switch getRequestAuthType(r) {
+ case authTypeStreamingSigned:
+ return identity, s3err.ErrNone
+ case authTypeUnknown:
+ glog.V(3).Infof("unknown auth type")
+ return identity, s3err.ErrAccessDenied
+ case authTypePresignedV2, authTypeSignedV2:
+ glog.V(3).Infof("v2 auth type")
+ identity, s3Err = iam.isReqAuthenticatedV2(r)
+ case authTypeSigned, authTypePresigned:
+ glog.V(3).Infof("v4 auth type")
+ identity, s3Err = iam.reqSignatureV4Verify(r)
+ case authTypePostPolicy:
+ glog.V(3).Infof("post policy auth type")
+ return identity, s3err.ErrNone
+ case authTypeJWT:
+ glog.V(3).Infof("jwt auth type")
+ return identity, s3err.ErrNotImplemented
+ case authTypeAnonymous:
+ identity, found = iam.lookupAnonymous()
+ if !found {
+ return identity, s3err.ErrAccessDenied
+ }
+ default:
+ return identity, s3err.ErrNotImplemented
+ }
+
+ glog.V(3).Infof("auth error: %v", s3Err)
+ if s3Err != s3err.ErrNone {
+ return identity, s3Err
+ }
+ return identity, s3err.ErrNone
+}
+
+func (identity *Identity) canDo(action Action, bucket string) bool {
+ if identity.isAdmin() {
+ return true
+ }
+ for _, a := range identity.Actions {
+ if a == action {
+ return true
+ }
+ }
+ if bucket == "" {
+ return false
+ }
+ limitedByBucket := string(action) + ":" + bucket
+ adminLimitedByBucket := s3_constants.ACTION_ADMIN + ":" + bucket
+ for _, a := range identity.Actions {
+ if string(a) == limitedByBucket {
+ return true
+ }
+ if string(a) == adminLimitedByBucket {
+ return true
+ }
+ }
+ return false
+}
+
+func (identity *Identity) isAdmin() bool {
+ for _, a := range identity.Actions {
+ if a == "Admin" {
+ return true
+ }
+ }
+ return false
+}
diff --git a/weed/s3api/auth_credentials_subscribe.go b/weed/s3api/auth_credentials_subscribe.go
new file mode 100644
index 000000000..ea4b69550
--- /dev/null
+++ b/weed/s3api/auth_credentials_subscribe.go
@@ -0,0 +1,70 @@
+package s3api
+
+import (
+ "context"
+ "fmt"
+ "github.com/chrislusf/seaweedfs/weed/filer"
+ "github.com/chrislusf/seaweedfs/weed/glog"
+ "github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
+ "io"
+ "time"
+)
+
+func (s3a *S3ApiServer) subscribeMetaEvents(clientName string, prefix string, lastTsNs int64) error {
+
+ processEventFn := func(resp *filer_pb.SubscribeMetadataResponse) error {
+
+ message := resp.EventNotification
+ if message.NewEntry == nil {
+ return nil
+ }
+
+ dir := resp.Directory
+
+ if message.NewParentPath != "" {
+ dir = message.NewParentPath
+ }
+ if dir == filer.IamConfigDirecotry && message.NewEntry.Name == filer.IamIdentityFile {
+ if err := s3a.iam.loadS3ApiConfigurationFromBytes(message.NewEntry.Content); err != nil {
+ return err
+ }
+ glog.V(0).Infof("updated %s/%s", filer.IamConfigDirecotry, filer.IamIdentityFile)
+ }
+
+ return nil
+ }
+
+ for {
+ err := s3a.WithFilerClient(func(client filer_pb.SeaweedFilerClient) error {
+ ctx, cancel := context.WithCancel(context.Background())
+ defer cancel()
+ stream, err := client.SubscribeMetadata(ctx, &filer_pb.SubscribeMetadataRequest{
+ ClientName: clientName,
+ PathPrefix: prefix,
+ SinceNs: lastTsNs,
+ })
+ if err != nil {
+ return fmt.Errorf("subscribe: %v", err)
+ }
+
+ for {
+ resp, listenErr := stream.Recv()
+ if listenErr == io.EOF {
+ return nil
+ }
+ if listenErr != nil {
+ return listenErr
+ }
+
+ if err := processEventFn(resp); err != nil {
+ glog.Fatalf("process %v: %v", resp, err)
+ }
+ lastTsNs = resp.TsNs
+ }
+ })
+ if err != nil {
+ glog.Errorf("subscribing filer meta change: %v", err)
+ }
+ time.Sleep(time.Second)
+ }
+}
diff --git a/weed/s3api/auth_credentials_test.go b/weed/s3api/auth_credentials_test.go
new file mode 100644
index 000000000..0383ddbcd
--- /dev/null
+++ b/weed/s3api/auth_credentials_test.go
@@ -0,0 +1,69 @@
+package s3api
+
+import (
+ . "github.com/chrislusf/seaweedfs/weed/s3api/s3_constants"
+ "testing"
+
+ "github.com/golang/protobuf/jsonpb"
+
+ "github.com/chrislusf/seaweedfs/weed/pb/iam_pb"
+)
+
+func TestIdentityListFileFormat(t *testing.T) {
+
+ s3ApiConfiguration := &iam_pb.S3ApiConfiguration{}
+
+ identity1 := &iam_pb.Identity{
+ Name: "some_name",
+ Credentials: []*iam_pb.Credential{
+ {
+ AccessKey: "some_access_key1",
+ SecretKey: "some_secret_key2",
+ },
+ },
+ Actions: []string{
+ ACTION_ADMIN,
+ ACTION_READ,
+ ACTION_WRITE,
+ },
+ }
+ identity2 := &iam_pb.Identity{
+ Name: "some_read_only_user",
+ Credentials: []*iam_pb.Credential{
+ {
+ AccessKey: "some_access_key1",
+ SecretKey: "some_secret_key1",
+ },
+ },
+ Actions: []string{
+ ACTION_READ,
+ },
+ }
+ identity3 := &iam_pb.Identity{
+ Name: "some_normal_user",
+ Credentials: []*iam_pb.Credential{
+ {
+ AccessKey: "some_access_key2",
+ SecretKey: "some_secret_key2",
+ },
+ },
+ Actions: []string{
+ ACTION_READ,
+ ACTION_WRITE,
+ },
+ }
+
+ s3ApiConfiguration.Identities = append(s3ApiConfiguration.Identities, identity1)
+ s3ApiConfiguration.Identities = append(s3ApiConfiguration.Identities, identity2)
+ s3ApiConfiguration.Identities = append(s3ApiConfiguration.Identities, identity3)
+
+ m := jsonpb.Marshaler{
+ EmitDefaults: true,
+ Indent: " ",
+ }
+
+ text, _ := m.MarshalToString(s3ApiConfiguration)
+
+ println(text)
+
+}
diff --git a/weed/s3api/auth_signature_v2.go b/weed/s3api/auth_signature_v2.go
new file mode 100644
index 000000000..5694a96ac
--- /dev/null
+++ b/weed/s3api/auth_signature_v2.go
@@ -0,0 +1,427 @@
+/*
+ * The following code tries to reverse engineer the Amazon S3 APIs,
+ * and is mostly copied from minio implementation.
+ */
+
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+// implied. See the License for the specific language governing
+// permissions and limitations under the License.
+
+package s3api
+
+import (
+ "crypto/hmac"
+ "crypto/sha1"
+ "crypto/subtle"
+ "encoding/base64"
+ "fmt"
+ "github.com/chrislusf/seaweedfs/weed/s3api/s3err"
+ "net"
+ "net/http"
+ "net/url"
+ "path"
+ "sort"
+ "strconv"
+ "strings"
+ "time"
+)
+
+// Whitelist resource list that will be used in query string for signature-V2 calculation.
+// The list should be alphabetically sorted
+var resourceList = []string{
+ "acl",
+ "delete",
+ "lifecycle",
+ "location",
+ "logging",
+ "notification",
+ "partNumber",
+ "policy",
+ "requestPayment",
+ "response-cache-control",
+ "response-content-disposition",
+ "response-content-encoding",
+ "response-content-language",
+ "response-content-type",
+ "response-expires",
+ "torrent",
+ "uploadId",
+ "uploads",
+ "versionId",
+ "versioning",
+ "versions",
+ "website",
+}
+
+// Verify if request has valid AWS Signature Version '2'.
+func (iam *IdentityAccessManagement) isReqAuthenticatedV2(r *http.Request) (*Identity, s3err.ErrorCode) {
+ if isRequestSignatureV2(r) {
+ return iam.doesSignV2Match(r)
+ }
+ return iam.doesPresignV2SignatureMatch(r)
+}
+
+func (iam *IdentityAccessManagement) doesPolicySignatureV2Match(formValues http.Header) s3err.ErrorCode {
+ accessKey := formValues.Get("AWSAccessKeyId")
+ _, cred, found := iam.lookupByAccessKey(accessKey)
+ if !found {
+ return s3err.ErrInvalidAccessKeyID
+ }
+ policy := formValues.Get("Policy")
+ signature := formValues.Get("Signature")
+ if !compareSignatureV2(signature, calculateSignatureV2(policy, cred.SecretKey)) {
+ return s3err.ErrSignatureDoesNotMatch
+ }
+ return s3err.ErrNone
+}
+
+// Authorization = "AWS" + " " + AWSAccessKeyId + ":" + Signature;
+// Signature = Base64( HMAC-SHA1( YourSecretKey, UTF-8-Encoding-Of( StringToSign ) ) );
+//
+// StringToSign = HTTP-Verb + "\n" +
+// Content-Md5 + "\n" +
+// Content-Type + "\n" +
+// Date + "\n" +
+// CanonicalizedProtocolHeaders +
+// CanonicalizedResource;
+//
+// CanonicalizedResource = [ "/" + Bucket ] +
+// +
+// [ subresource, if present. For example "?acl", "?location", "?logging", or "?torrent"];
+//
+// CanonicalizedProtocolHeaders =
+
+// doesSignV2Match - Verify authorization header with calculated header in accordance with
+// - http://docs.aws.amazon.com/AmazonS3/latest/dev/auth-request-sig-v2.html
+// returns true if matches, false otherwise. if error is not nil then it is always false
+
+func validateV2AuthHeader(v2Auth string) (accessKey string, errCode s3err.ErrorCode) {
+ if v2Auth == "" {
+ return "", s3err.ErrAuthHeaderEmpty
+ }
+ // Verify if the header algorithm is supported or not.
+ if !strings.HasPrefix(v2Auth, signV2Algorithm) {
+ return "", s3err.ErrSignatureVersionNotSupported
+ }
+
+ // below is V2 Signed Auth header format, splitting on `space` (after the `AWS` string).
+ // Authorization = "AWS" + " " + AWSAccessKeyId + ":" + Signature
+ authFields := strings.Split(v2Auth, " ")
+ if len(authFields) != 2 {
+ return "", s3err.ErrMissingFields
+ }
+
+ // Then will be splitting on ":", this will seprate `AWSAccessKeyId` and `Signature` string.
+ keySignFields := strings.Split(strings.TrimSpace(authFields[1]), ":")
+ if len(keySignFields) != 2 {
+ return "", s3err.ErrMissingFields
+ }
+
+ return keySignFields[0], s3err.ErrNone
+}
+
+func (iam *IdentityAccessManagement) doesSignV2Match(r *http.Request) (*Identity, s3err.ErrorCode) {
+ v2Auth := r.Header.Get("Authorization")
+
+ accessKey, apiError := validateV2AuthHeader(v2Auth)
+ if apiError != s3err.ErrNone {
+ return nil, apiError
+ }
+
+ // Access credentials.
+ // Validate if access key id same.
+ ident, cred, found := iam.lookupByAccessKey(accessKey)
+ if !found {
+ return nil, s3err.ErrInvalidAccessKeyID
+ }
+
+ // r.RequestURI will have raw encoded URI as sent by the client.
+ tokens := strings.SplitN(r.RequestURI, "?", 2)
+ encodedResource := tokens[0]
+ encodedQuery := ""
+ if len(tokens) == 2 {
+ encodedQuery = tokens[1]
+ }
+
+ unescapedQueries, err := unescapeQueries(encodedQuery)
+ if err != nil {
+ return nil, s3err.ErrInvalidQueryParams
+ }
+
+ encodedResource, err = getResource(encodedResource, r.Host, iam.domain)
+ if err != nil {
+ return nil, s3err.ErrInvalidRequest
+ }
+
+ prefix := fmt.Sprintf("%s %s:", signV2Algorithm, cred.AccessKey)
+ if !strings.HasPrefix(v2Auth, prefix) {
+ return nil, s3err.ErrSignatureDoesNotMatch
+ }
+ v2Auth = v2Auth[len(prefix):]
+ expectedAuth := signatureV2(cred, r.Method, encodedResource, strings.Join(unescapedQueries, "&"), r.Header)
+ if !compareSignatureV2(v2Auth, expectedAuth) {
+ return nil, s3err.ErrSignatureDoesNotMatch
+ }
+ return ident, s3err.ErrNone
+}
+
+// doesPresignV2SignatureMatch - Verify query headers with presigned signature
+// - http://docs.aws.amazon.com/AmazonS3/latest/dev/RESTAuthentication.html#RESTAuthenticationQueryStringAuth
+// returns ErrNone if matches. S3 errors otherwise.
+func (iam *IdentityAccessManagement) doesPresignV2SignatureMatch(r *http.Request) (*Identity, s3err.ErrorCode) {
+
+ // r.RequestURI will have raw encoded URI as sent by the client.
+ tokens := strings.SplitN(r.RequestURI, "?", 2)
+ encodedResource := tokens[0]
+ encodedQuery := ""
+ if len(tokens) == 2 {
+ encodedQuery = tokens[1]
+ }
+
+ var (
+ filteredQueries []string
+ gotSignature string
+ expires string
+ accessKey string
+ err error
+ )
+
+ var unescapedQueries []string
+ unescapedQueries, err = unescapeQueries(encodedQuery)
+ if err != nil {
+ return nil, s3err.ErrInvalidQueryParams
+ }
+
+ // Extract the necessary values from presigned query, construct a list of new filtered queries.
+ for _, query := range unescapedQueries {
+ keyval := strings.SplitN(query, "=", 2)
+ if len(keyval) != 2 {
+ return nil, s3err.ErrInvalidQueryParams
+ }
+ switch keyval[0] {
+ case "AWSAccessKeyId":
+ accessKey = keyval[1]
+ case "Signature":
+ gotSignature = keyval[1]
+ case "Expires":
+ expires = keyval[1]
+ default:
+ filteredQueries = append(filteredQueries, query)
+ }
+ }
+
+ // Invalid values returns error.
+ if accessKey == "" || gotSignature == "" || expires == "" {
+ return nil, s3err.ErrInvalidQueryParams
+ }
+
+ // Validate if access key id same.
+ ident, cred, found := iam.lookupByAccessKey(accessKey)
+ if !found {
+ return nil, s3err.ErrInvalidAccessKeyID
+ }
+
+ // Make sure the request has not expired.
+ expiresInt, err := strconv.ParseInt(expires, 10, 64)
+ if err != nil {
+ return nil, s3err.ErrMalformedExpires
+ }
+
+ // Check if the presigned URL has expired.
+ if expiresInt < time.Now().UTC().Unix() {
+ return nil, s3err.ErrExpiredPresignRequest
+ }
+
+ encodedResource, err = getResource(encodedResource, r.Host, iam.domain)
+ if err != nil {
+ return nil, s3err.ErrInvalidRequest
+ }
+
+ expectedSignature := preSignatureV2(cred, r.Method, encodedResource, strings.Join(filteredQueries, "&"), r.Header, expires)
+ if !compareSignatureV2(gotSignature, expectedSignature) {
+ return nil, s3err.ErrSignatureDoesNotMatch
+ }
+
+ return ident, s3err.ErrNone
+}
+
+// Escape encodedQuery string into unescaped list of query params, returns error
+// if any while unescaping the values.
+func unescapeQueries(encodedQuery string) (unescapedQueries []string, err error) {
+ for _, query := range strings.Split(encodedQuery, "&") {
+ var unescapedQuery string
+ unescapedQuery, err = url.QueryUnescape(query)
+ if err != nil {
+ return nil, err
+ }
+ unescapedQueries = append(unescapedQueries, unescapedQuery)
+ }
+ return unescapedQueries, nil
+}
+
+// Returns "/bucketName/objectName" for path-style or virtual-host-style requests.
+func getResource(path string, host string, domain string) (string, error) {
+ if domain == "" {
+ return path, nil
+ }
+ // If virtual-host-style is enabled construct the "resource" properly.
+ if strings.Contains(host, ":") {
+ // In bucket.mydomain.com:9000, strip out :9000
+ var err error
+ if host, _, err = net.SplitHostPort(host); err != nil {
+ return "", err
+ }
+ }
+ if !strings.HasSuffix(host, "."+domain) {
+ return path, nil
+ }
+ bucket := strings.TrimSuffix(host, "."+domain)
+ return "/" + pathJoin(bucket, path), nil
+}
+
+// pathJoin - like path.Join() but retains trailing "/" of the last element
+func pathJoin(elem ...string) string {
+ trailingSlash := ""
+ if len(elem) > 0 {
+ if strings.HasSuffix(elem[len(elem)-1], "/") {
+ trailingSlash = "/"
+ }
+ }
+ return path.Join(elem...) + trailingSlash
+}
+
+// Return the signature v2 of a given request.
+func signatureV2(cred *Credential, method string, encodedResource string, encodedQuery string, headers http.Header) string {
+ stringToSign := getStringToSignV2(method, encodedResource, encodedQuery, headers, "")
+ signature := calculateSignatureV2(stringToSign, cred.SecretKey)
+ return signature
+}
+
+// Return string to sign under two different conditions.
+// - if expires string is set then string to sign includes date instead of the Date header.
+// - if expires string is empty then string to sign includes date header instead.
+func getStringToSignV2(method string, encodedResource, encodedQuery string, headers http.Header, expires string) string {
+ canonicalHeaders := canonicalizedAmzHeadersV2(headers)
+ if len(canonicalHeaders) > 0 {
+ canonicalHeaders += "\n"
+ }
+
+ date := expires // Date is set to expires date for presign operations.
+ if date == "" {
+ // If expires date is empty then request header Date is used.
+ date = headers.Get("Date")
+ }
+
+ // From the Amazon docs:
+ //
+ // StringToSign = HTTP-Verb + "\n" +
+ // Content-Md5 + "\n" +
+ // Content-Type + "\n" +
+ // Date/Expires + "\n" +
+ // CanonicalizedProtocolHeaders +
+ // CanonicalizedResource;
+ stringToSign := strings.Join([]string{
+ method,
+ headers.Get("Content-MD5"),
+ headers.Get("Content-Type"),
+ date,
+ canonicalHeaders,
+ }, "\n")
+
+ return stringToSign + canonicalizedResourceV2(encodedResource, encodedQuery)
+}
+
+// Return canonical resource string.
+func canonicalizedResourceV2(encodedResource, encodedQuery string) string {
+ queries := strings.Split(encodedQuery, "&")
+ keyval := make(map[string]string)
+ for _, query := range queries {
+ key := query
+ val := ""
+ index := strings.Index(query, "=")
+ if index != -1 {
+ key = query[:index]
+ val = query[index+1:]
+ }
+ keyval[key] = val
+ }
+
+ var canonicalQueries []string
+ for _, key := range resourceList {
+ val, ok := keyval[key]
+ if !ok {
+ continue
+ }
+ if val == "" {
+ canonicalQueries = append(canonicalQueries, key)
+ continue
+ }
+ canonicalQueries = append(canonicalQueries, key+"="+val)
+ }
+
+ // The queries will be already sorted as resourceList is sorted, if canonicalQueries
+ // is empty strings.Join returns empty.
+ canonicalQuery := strings.Join(canonicalQueries, "&")
+ if canonicalQuery != "" {
+ return encodedResource + "?" + canonicalQuery
+ }
+ return encodedResource
+}
+
+// Return canonical headers.
+func canonicalizedAmzHeadersV2(headers http.Header) string {
+ var keys []string
+ keyval := make(map[string]string)
+ for key := range headers {
+ lkey := strings.ToLower(key)
+ if !strings.HasPrefix(lkey, "x-amz-") {
+ continue
+ }
+ keys = append(keys, lkey)
+ keyval[lkey] = strings.Join(headers[key], ",")
+ }
+ sort.Strings(keys)
+ var canonicalHeaders []string
+ for _, key := range keys {
+ canonicalHeaders = append(canonicalHeaders, key+":"+keyval[key])
+ }
+ return strings.Join(canonicalHeaders, "\n")
+}
+
+func calculateSignatureV2(stringToSign string, secret string) string {
+ hm := hmac.New(sha1.New, []byte(secret))
+ hm.Write([]byte(stringToSign))
+ return base64.StdEncoding.EncodeToString(hm.Sum(nil))
+}
+
+// compareSignatureV2 returns true if and only if both signatures
+// are equal. The signatures are expected to be base64 encoded strings
+// according to the AWS S3 signature V2 spec.
+func compareSignatureV2(sig1, sig2 string) bool {
+ // Decode signature string to binary byte-sequence representation is required
+ // as Base64 encoding of a value is not unique:
+ // For example "aGVsbG8=" and "aGVsbG8=\r" will result in the same byte slice.
+ signature1, err := base64.StdEncoding.DecodeString(sig1)
+ if err != nil {
+ return false
+ }
+ signature2, err := base64.StdEncoding.DecodeString(sig2)
+ if err != nil {
+ return false
+ }
+ return subtle.ConstantTimeCompare(signature1, signature2) == 1
+}
+
+// Return signature-v2 for the presigned request.
+func preSignatureV2(cred *Credential, method string, encodedResource string, encodedQuery string, headers http.Header, expires string) string {
+ stringToSign := getStringToSignV2(method, encodedResource, encodedQuery, headers, expires)
+ return calculateSignatureV2(stringToSign, cred.SecretKey)
+}
diff --git a/weed/s3api/auth_signature_v4.go b/weed/s3api/auth_signature_v4.go
new file mode 100644
index 000000000..0df26e6fc
--- /dev/null
+++ b/weed/s3api/auth_signature_v4.go
@@ -0,0 +1,770 @@
+/*
+ * The following code tries to reverse engineer the Amazon S3 APIs,
+ * and is mostly copied from minio implementation.
+ */
+
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+// implied. See the License for the specific language governing
+// permissions and limitations under the License.
+
+package s3api
+
+import (
+ "bytes"
+ "crypto/hmac"
+ "crypto/sha256"
+ "crypto/subtle"
+ "encoding/hex"
+ "github.com/chrislusf/seaweedfs/weed/s3api/s3err"
+ "io/ioutil"
+ "net/http"
+ "net/url"
+ "regexp"
+ "sort"
+ "strconv"
+ "strings"
+ "time"
+ "unicode/utf8"
+)
+
+func (iam *IdentityAccessManagement) reqSignatureV4Verify(r *http.Request) (*Identity, s3err.ErrorCode) {
+ sha256sum := getContentSha256Cksum(r)
+ switch {
+ case isRequestSignatureV4(r):
+ return iam.doesSignatureMatch(sha256sum, r)
+ case isRequestPresignedSignatureV4(r):
+ return iam.doesPresignedSignatureMatch(sha256sum, r)
+ }
+ return nil, s3err.ErrAccessDenied
+}
+
+// Streaming AWS Signature Version '4' constants.
+const (
+ emptySHA256 = "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855"
+ streamingContentSHA256 = "STREAMING-AWS4-HMAC-SHA256-PAYLOAD"
+ signV4ChunkedAlgorithm = "AWS4-HMAC-SHA256-PAYLOAD"
+
+ // http Header "x-amz-content-sha256" == "UNSIGNED-PAYLOAD" indicates that the
+ // client did not calculate sha256 of the payload.
+ unsignedPayload = "UNSIGNED-PAYLOAD"
+)
+
+// Returns SHA256 for calculating canonical-request.
+func getContentSha256Cksum(r *http.Request) string {
+ var (
+ defaultSha256Cksum string
+ v []string
+ ok bool
+ )
+
+ // For a presigned request we look at the query param for sha256.
+ if isRequestPresignedSignatureV4(r) {
+ // X-Amz-Content-Sha256, if not set in presigned requests, checksum
+ // will default to 'UNSIGNED-PAYLOAD'.
+ defaultSha256Cksum = unsignedPayload
+ v, ok = r.URL.Query()["X-Amz-Content-Sha256"]
+ if !ok {
+ v, ok = r.Header["X-Amz-Content-Sha256"]
+ }
+ } else {
+ // X-Amz-Content-Sha256, if not set in signed requests, checksum
+ // will default to sha256([]byte("")).
+ defaultSha256Cksum = emptySHA256
+ v, ok = r.Header["X-Amz-Content-Sha256"]
+ }
+
+ // We found 'X-Amz-Content-Sha256' return the captured value.
+ if ok {
+ return v[0]
+ }
+
+ // We couldn't find 'X-Amz-Content-Sha256'.
+ return defaultSha256Cksum
+}
+
+// Verify authorization header - http://docs.aws.amazon.com/AmazonS3/latest/API/sig-v4-authenticating-requests.html
+func (iam *IdentityAccessManagement) doesSignatureMatch(hashedPayload string, r *http.Request) (*Identity, s3err.ErrorCode) {
+
+ // Copy request.
+ req := *r
+
+ // Save authorization header.
+ v4Auth := req.Header.Get("Authorization")
+
+ // Parse signature version '4' header.
+ signV4Values, err := parseSignV4(v4Auth)
+ if err != s3err.ErrNone {
+ return nil, err
+ }
+
+ // Extract all the signed headers along with its values.
+ extractedSignedHeaders, errCode := extractSignedHeaders(signV4Values.SignedHeaders, r)
+ if errCode != s3err.ErrNone {
+ return nil, errCode
+ }
+
+ // Verify if the access key id matches.
+ identity, cred, found := iam.lookupByAccessKey(signV4Values.Credential.accessKey)
+ if !found {
+ return nil, s3err.ErrInvalidAccessKeyID
+ }
+
+ // Extract date, if not present throw error.
+ var date string
+ if date = req.Header.Get(http.CanonicalHeaderKey("X-Amz-Date")); date == "" {
+ if date = r.Header.Get("Date"); date == "" {
+ return nil, s3err.ErrMissingDateHeader
+ }
+ }
+ // Parse date header.
+ t, e := time.Parse(iso8601Format, date)
+ if e != nil {
+ return nil, s3err.ErrMalformedDate
+ }
+
+ // Query string.
+ queryStr := req.URL.Query().Encode()
+
+ // Get hashed Payload
+ if signV4Values.Credential.scope.service != "s3" && hashedPayload == emptySHA256 && r.Body != nil {
+ buf, _ := ioutil.ReadAll(r.Body)
+ r.Body = ioutil.NopCloser(bytes.NewBuffer(buf))
+ b, _ := ioutil.ReadAll(bytes.NewBuffer(buf))
+ if len(b) != 0 {
+ bodyHash := sha256.Sum256(b)
+ hashedPayload = hex.EncodeToString(bodyHash[:])
+ }
+ }
+
+ // Get canonical request.
+ canonicalRequest := getCanonicalRequest(extractedSignedHeaders, hashedPayload, queryStr, req.URL.Path, req.Method)
+
+ // Get string to sign from canonical request.
+ stringToSign := getStringToSign(canonicalRequest, t, signV4Values.Credential.getScope())
+
+ // Get hmac signing key.
+ signingKey := getSigningKey(cred.SecretKey,
+ signV4Values.Credential.scope.date,
+ signV4Values.Credential.scope.region,
+ signV4Values.Credential.scope.service)
+
+ // Calculate signature.
+ newSignature := getSignature(signingKey, stringToSign)
+
+ // Verify if signature match.
+ if !compareSignatureV4(newSignature, signV4Values.Signature) {
+ return nil, s3err.ErrSignatureDoesNotMatch
+ }
+
+ // Return error none.
+ return identity, s3err.ErrNone
+}
+
+// credentialHeader data type represents structured form of Credential
+// string from authorization header.
+type credentialHeader struct {
+ accessKey string
+ scope struct {
+ date time.Time
+ region string
+ service string
+ request string
+ }
+}
+
+// signValues data type represents structured form of AWS Signature V4 header.
+type signValues struct {
+ Credential credentialHeader
+ SignedHeaders []string
+ Signature string
+}
+
+// Return scope string.
+func (c credentialHeader) getScope() string {
+ return strings.Join([]string{
+ c.scope.date.Format(yyyymmdd),
+ c.scope.region,
+ c.scope.service,
+ c.scope.request,
+ }, "/")
+}
+
+// Authorization: algorithm Credential=accessKeyID/credScope, \
+// SignedHeaders=signedHeaders, Signature=signature
+//
+func parseSignV4(v4Auth string) (sv signValues, aec s3err.ErrorCode) {
+ // Replace all spaced strings, some clients can send spaced
+ // parameters and some won't. So we pro-actively remove any spaces
+ // to make parsing easier.
+ v4Auth = strings.Replace(v4Auth, " ", "", -1)
+ if v4Auth == "" {
+ return sv, s3err.ErrAuthHeaderEmpty
+ }
+
+ // Verify if the header algorithm is supported or not.
+ if !strings.HasPrefix(v4Auth, signV4Algorithm) {
+ return sv, s3err.ErrSignatureVersionNotSupported
+ }
+
+ // Strip off the Algorithm prefix.
+ v4Auth = strings.TrimPrefix(v4Auth, signV4Algorithm)
+ authFields := strings.Split(strings.TrimSpace(v4Auth), ",")
+ if len(authFields) != 3 {
+ return sv, s3err.ErrMissingFields
+ }
+
+ // Initialize signature version '4' structured header.
+ signV4Values := signValues{}
+
+ var err s3err.ErrorCode
+ // Save credentail values.
+ signV4Values.Credential, err = parseCredentialHeader(authFields[0])
+ if err != s3err.ErrNone {
+ return sv, err
+ }
+
+ // Save signed headers.
+ signV4Values.SignedHeaders, err = parseSignedHeader(authFields[1])
+ if err != s3err.ErrNone {
+ return sv, err
+ }
+
+ // Save signature.
+ signV4Values.Signature, err = parseSignature(authFields[2])
+ if err != s3err.ErrNone {
+ return sv, err
+ }
+
+ // Return the structure here.
+ return signV4Values, s3err.ErrNone
+}
+
+// parse credentialHeader string into its structured form.
+func parseCredentialHeader(credElement string) (ch credentialHeader, aec s3err.ErrorCode) {
+ creds := strings.Split(strings.TrimSpace(credElement), "=")
+ if len(creds) != 2 {
+ return ch, s3err.ErrMissingFields
+ }
+ if creds[0] != "Credential" {
+ return ch, s3err.ErrMissingCredTag
+ }
+ credElements := strings.Split(strings.TrimSpace(creds[1]), "/")
+ if len(credElements) != 5 {
+ return ch, s3err.ErrCredMalformed
+ }
+ // Save access key id.
+ cred := credentialHeader{
+ accessKey: credElements[0],
+ }
+ var e error
+ cred.scope.date, e = time.Parse(yyyymmdd, credElements[1])
+ if e != nil {
+ return ch, s3err.ErrMalformedCredentialDate
+ }
+
+ cred.scope.region = credElements[2]
+ cred.scope.service = credElements[3] // "s3"
+ cred.scope.request = credElements[4] // "aws4_request"
+ return cred, s3err.ErrNone
+}
+
+// Parse slice of signed headers from signed headers tag.
+func parseSignedHeader(signedHdrElement string) ([]string, s3err.ErrorCode) {
+ signedHdrFields := strings.Split(strings.TrimSpace(signedHdrElement), "=")
+ if len(signedHdrFields) != 2 {
+ return nil, s3err.ErrMissingFields
+ }
+ if signedHdrFields[0] != "SignedHeaders" {
+ return nil, s3err.ErrMissingSignHeadersTag
+ }
+ if signedHdrFields[1] == "" {
+ return nil, s3err.ErrMissingFields
+ }
+ signedHeaders := strings.Split(signedHdrFields[1], ";")
+ return signedHeaders, s3err.ErrNone
+}
+
+// Parse signature from signature tag.
+func parseSignature(signElement string) (string, s3err.ErrorCode) {
+ signFields := strings.Split(strings.TrimSpace(signElement), "=")
+ if len(signFields) != 2 {
+ return "", s3err.ErrMissingFields
+ }
+ if signFields[0] != "Signature" {
+ return "", s3err.ErrMissingSignTag
+ }
+ if signFields[1] == "" {
+ return "", s3err.ErrMissingFields
+ }
+ signature := signFields[1]
+ return signature, s3err.ErrNone
+}
+
+// doesPolicySignatureMatch - Verify query headers with post policy
+// - http://docs.aws.amazon.com/AmazonS3/latest/API/sigv4-HTTPPOSTConstructPolicy.html
+// returns ErrNone if the signature matches.
+func (iam *IdentityAccessManagement) doesPolicySignatureV4Match(formValues http.Header) s3err.ErrorCode {
+
+ // Parse credential tag.
+ credHeader, err := parseCredentialHeader("Credential=" + formValues.Get("X-Amz-Credential"))
+ if err != s3err.ErrNone {
+ return s3err.ErrMissingFields
+ }
+
+ _, cred, found := iam.lookupByAccessKey(credHeader.accessKey)
+ if !found {
+ return s3err.ErrInvalidAccessKeyID
+ }
+
+ // Get signing key.
+ signingKey := getSigningKey(cred.SecretKey, credHeader.scope.date, credHeader.scope.region, credHeader.scope.service)
+
+ // Get signature.
+ newSignature := getSignature(signingKey, formValues.Get("Policy"))
+
+ // Verify signature.
+ if !compareSignatureV4(newSignature, formValues.Get("X-Amz-Signature")) {
+ return s3err.ErrSignatureDoesNotMatch
+ }
+
+ // Success.
+ return s3err.ErrNone
+}
+
+// check query headers with presigned signature
+// - http://docs.aws.amazon.com/AmazonS3/latest/API/sigv4-query-string-auth.html
+func (iam *IdentityAccessManagement) doesPresignedSignatureMatch(hashedPayload string, r *http.Request) (*Identity, s3err.ErrorCode) {
+
+ // Copy request
+ req := *r
+
+ // Parse request query string.
+ pSignValues, err := parsePreSignV4(req.URL.Query())
+ if err != s3err.ErrNone {
+ return nil, err
+ }
+
+ // Verify if the access key id matches.
+ identity, cred, found := iam.lookupByAccessKey(pSignValues.Credential.accessKey)
+ if !found {
+ return nil, s3err.ErrInvalidAccessKeyID
+ }
+
+ // Extract all the signed headers along with its values.
+ extractedSignedHeaders, errCode := extractSignedHeaders(pSignValues.SignedHeaders, r)
+ if errCode != s3err.ErrNone {
+ return nil, errCode
+ }
+ // Construct new query.
+ query := make(url.Values)
+ if req.URL.Query().Get("X-Amz-Content-Sha256") != "" {
+ query.Set("X-Amz-Content-Sha256", hashedPayload)
+ }
+
+ query.Set("X-Amz-Algorithm", signV4Algorithm)
+
+ now := time.Now().UTC()
+
+ // If the host which signed the request is slightly ahead in time (by less than globalMaxSkewTime) the
+ // request should still be allowed.
+ if pSignValues.Date.After(now.Add(15 * time.Minute)) {
+ return nil, s3err.ErrRequestNotReadyYet
+ }
+
+ if now.Sub(pSignValues.Date) > pSignValues.Expires {
+ return nil, s3err.ErrExpiredPresignRequest
+ }
+
+ // Save the date and expires.
+ t := pSignValues.Date
+ expireSeconds := int(pSignValues.Expires / time.Second)
+
+ // Construct the query.
+ query.Set("X-Amz-Date", t.Format(iso8601Format))
+ query.Set("X-Amz-Expires", strconv.Itoa(expireSeconds))
+ query.Set("X-Amz-SignedHeaders", getSignedHeaders(extractedSignedHeaders))
+ query.Set("X-Amz-Credential", cred.AccessKey+"/"+getScope(t, pSignValues.Credential.scope.region))
+
+ // Save other headers available in the request parameters.
+ for k, v := range req.URL.Query() {
+
+ // Handle the metadata in presigned put query string
+ if strings.Contains(strings.ToLower(k), "x-amz-meta-") {
+ query.Set(k, v[0])
+ }
+
+ if strings.HasPrefix(strings.ToLower(k), "x-amz") {
+ continue
+ }
+ query[k] = v
+ }
+
+ // Get the encoded query.
+ encodedQuery := query.Encode()
+
+ // Verify if date query is same.
+ if req.URL.Query().Get("X-Amz-Date") != query.Get("X-Amz-Date") {
+ return nil, s3err.ErrSignatureDoesNotMatch
+ }
+ // Verify if expires query is same.
+ if req.URL.Query().Get("X-Amz-Expires") != query.Get("X-Amz-Expires") {
+ return nil, s3err.ErrSignatureDoesNotMatch
+ }
+ // Verify if signed headers query is same.
+ if req.URL.Query().Get("X-Amz-SignedHeaders") != query.Get("X-Amz-SignedHeaders") {
+ return nil, s3err.ErrSignatureDoesNotMatch
+ }
+ // Verify if credential query is same.
+ if req.URL.Query().Get("X-Amz-Credential") != query.Get("X-Amz-Credential") {
+ return nil, s3err.ErrSignatureDoesNotMatch
+ }
+ // Verify if sha256 payload query is same.
+ if req.URL.Query().Get("X-Amz-Content-Sha256") != "" {
+ if req.URL.Query().Get("X-Amz-Content-Sha256") != query.Get("X-Amz-Content-Sha256") {
+ return nil, s3err.ErrContentSHA256Mismatch
+ }
+ }
+
+ /// Verify finally if signature is same.
+
+ // Get canonical request.
+ presignedCanonicalReq := getCanonicalRequest(extractedSignedHeaders, hashedPayload, encodedQuery, req.URL.Path, req.Method)
+
+ // Get string to sign from canonical request.
+ presignedStringToSign := getStringToSign(presignedCanonicalReq, t, pSignValues.Credential.getScope())
+
+ // Get hmac presigned signing key.
+ presignedSigningKey := getSigningKey(cred.SecretKey,
+ pSignValues.Credential.scope.date,
+ pSignValues.Credential.scope.region,
+ pSignValues.Credential.scope.service)
+
+ // Get new signature.
+ newSignature := getSignature(presignedSigningKey, presignedStringToSign)
+
+ // Verify signature.
+ if !compareSignatureV4(req.URL.Query().Get("X-Amz-Signature"), newSignature) {
+ return nil, s3err.ErrSignatureDoesNotMatch
+ }
+ return identity, s3err.ErrNone
+}
+
+func contains(list []string, elem string) bool {
+ for _, t := range list {
+ if t == elem {
+ return true
+ }
+ }
+ return false
+}
+
+// preSignValues data type represents structued form of AWS Signature V4 query string.
+type preSignValues struct {
+ signValues
+ Date time.Time
+ Expires time.Duration
+}
+
+// Parses signature version '4' query string of the following form.
+//
+// querystring = X-Amz-Algorithm=algorithm
+// querystring += &X-Amz-Credential= urlencode(accessKey + '/' + credential_scope)
+// querystring += &X-Amz-Date=date
+// querystring += &X-Amz-Expires=timeout interval
+// querystring += &X-Amz-SignedHeaders=signed_headers
+// querystring += &X-Amz-Signature=signature
+//
+// verifies if any of the necessary query params are missing in the presigned request.
+func doesV4PresignParamsExist(query url.Values) s3err.ErrorCode {
+ v4PresignQueryParams := []string{"X-Amz-Algorithm", "X-Amz-Credential", "X-Amz-Signature", "X-Amz-Date", "X-Amz-SignedHeaders", "X-Amz-Expires"}
+ for _, v4PresignQueryParam := range v4PresignQueryParams {
+ if _, ok := query[v4PresignQueryParam]; !ok {
+ return s3err.ErrInvalidQueryParams
+ }
+ }
+ return s3err.ErrNone
+}
+
+// Parses all the presigned signature values into separate elements.
+func parsePreSignV4(query url.Values) (psv preSignValues, aec s3err.ErrorCode) {
+ var err s3err.ErrorCode
+ // verify whether the required query params exist.
+ err = doesV4PresignParamsExist(query)
+ if err != s3err.ErrNone {
+ return psv, err
+ }
+
+ // Verify if the query algorithm is supported or not.
+ if query.Get("X-Amz-Algorithm") != signV4Algorithm {
+ return psv, s3err.ErrInvalidQuerySignatureAlgo
+ }
+
+ // Initialize signature version '4' structured header.
+ preSignV4Values := preSignValues{}
+
+ // Save credential.
+ preSignV4Values.Credential, err = parseCredentialHeader("Credential=" + query.Get("X-Amz-Credential"))
+ if err != s3err.ErrNone {
+ return psv, err
+ }
+
+ var e error
+ // Save date in native time.Time.
+ preSignV4Values.Date, e = time.Parse(iso8601Format, query.Get("X-Amz-Date"))
+ if e != nil {
+ return psv, s3err.ErrMalformedPresignedDate
+ }
+
+ // Save expires in native time.Duration.
+ preSignV4Values.Expires, e = time.ParseDuration(query.Get("X-Amz-Expires") + "s")
+ if e != nil {
+ return psv, s3err.ErrMalformedExpires
+ }
+
+ if preSignV4Values.Expires < 0 {
+ return psv, s3err.ErrNegativeExpires
+ }
+
+ // Check if Expiry time is less than 7 days (value in seconds).
+ if preSignV4Values.Expires.Seconds() > 604800 {
+ return psv, s3err.ErrMaximumExpires
+ }
+
+ // Save signed headers.
+ preSignV4Values.SignedHeaders, err = parseSignedHeader("SignedHeaders=" + query.Get("X-Amz-SignedHeaders"))
+ if err != s3err.ErrNone {
+ return psv, err
+ }
+
+ // Save signature.
+ preSignV4Values.Signature, err = parseSignature("Signature=" + query.Get("X-Amz-Signature"))
+ if err != s3err.ErrNone {
+ return psv, err
+ }
+
+ // Return structed form of signature query string.
+ return preSignV4Values, s3err.ErrNone
+}
+
+// extractSignedHeaders extract signed headers from Authorization header
+func extractSignedHeaders(signedHeaders []string, r *http.Request) (http.Header, s3err.ErrorCode) {
+ reqHeaders := r.Header
+ // find whether "host" is part of list of signed headers.
+ // if not return ErrUnsignedHeaders. "host" is mandatory.
+ if !contains(signedHeaders, "host") {
+ return nil, s3err.ErrUnsignedHeaders
+ }
+ extractedSignedHeaders := make(http.Header)
+ for _, header := range signedHeaders {
+ // `host` will not be found in the headers, can be found in r.Host.
+ // but its alway necessary that the list of signed headers containing host in it.
+ val, ok := reqHeaders[http.CanonicalHeaderKey(header)]
+ if ok {
+ for _, enc := range val {
+ extractedSignedHeaders.Add(header, enc)
+ }
+ continue
+ }
+ switch header {
+ case "expect":
+ // Golang http server strips off 'Expect' header, if the
+ // client sent this as part of signed headers we need to
+ // handle otherwise we would see a signature mismatch.
+ // `aws-cli` sets this as part of signed headers.
+ //
+ // According to
+ // http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.20
+ // Expect header is always of form:
+ //
+ // Expect = "Expect" ":" 1#expectation
+ // expectation = "100-continue" | expectation-extension
+ //
+ // So it safe to assume that '100-continue' is what would
+ // be sent, for the time being keep this work around.
+ // Adding a *TODO* to remove this later when Golang server
+ // doesn't filter out the 'Expect' header.
+ extractedSignedHeaders.Set(header, "100-continue")
+ case "host":
+ // Go http server removes "host" from Request.Header
+ extractedSignedHeaders.Set(header, r.Host)
+ case "transfer-encoding":
+ for _, enc := range r.TransferEncoding {
+ extractedSignedHeaders.Add(header, enc)
+ }
+ case "content-length":
+ // Signature-V4 spec excludes Content-Length from signed headers list for signature calculation.
+ // But some clients deviate from this rule. Hence we consider Content-Length for signature
+ // calculation to be compatible with such clients.
+ extractedSignedHeaders.Set(header, strconv.FormatInt(r.ContentLength, 10))
+ default:
+ return nil, s3err.ErrUnsignedHeaders
+ }
+ }
+ return extractedSignedHeaders, s3err.ErrNone
+}
+
+// getSignedHeaders generate a string i.e alphabetically sorted, semicolon-separated list of lowercase request header names
+func getSignedHeaders(signedHeaders http.Header) string {
+ var headers []string
+ for k := range signedHeaders {
+ headers = append(headers, strings.ToLower(k))
+ }
+ sort.Strings(headers)
+ return strings.Join(headers, ";")
+}
+
+// getScope generate a string of a specific date, an AWS region, and a service.
+func getScope(t time.Time, region string) string {
+ scope := strings.Join([]string{
+ t.Format(yyyymmdd),
+ region,
+ "s3",
+ "aws4_request",
+ }, "/")
+ return scope
+}
+
+// getCanonicalRequest generate a canonical request of style
+//
+// canonicalRequest =
+// \n
+// \n
+// \n
+// \n
+// \n
+//
+//
+func getCanonicalRequest(extractedSignedHeaders http.Header, payload, queryStr, urlPath, method string) string {
+ rawQuery := strings.Replace(queryStr, "+", "%20", -1)
+ encodedPath := encodePath(urlPath)
+ canonicalRequest := strings.Join([]string{
+ method,
+ encodedPath,
+ rawQuery,
+ getCanonicalHeaders(extractedSignedHeaders),
+ getSignedHeaders(extractedSignedHeaders),
+ payload,
+ }, "\n")
+ return canonicalRequest
+}
+
+// getStringToSign a string based on selected query values.
+func getStringToSign(canonicalRequest string, t time.Time, scope string) string {
+ stringToSign := signV4Algorithm + "\n" + t.Format(iso8601Format) + "\n"
+ stringToSign = stringToSign + scope + "\n"
+ canonicalRequestBytes := sha256.Sum256([]byte(canonicalRequest))
+ stringToSign = stringToSign + hex.EncodeToString(canonicalRequestBytes[:])
+ return stringToSign
+}
+
+// sumHMAC calculate hmac between two input byte array.
+func sumHMAC(key []byte, data []byte) []byte {
+ hash := hmac.New(sha256.New, key)
+ hash.Write(data)
+ return hash.Sum(nil)
+}
+
+// getSigningKey hmac seed to calculate final signature.
+func getSigningKey(secretKey string, t time.Time, region string, service string) []byte {
+ date := sumHMAC([]byte("AWS4"+secretKey), []byte(t.Format(yyyymmdd)))
+ regionBytes := sumHMAC(date, []byte(region))
+ serviceBytes := sumHMAC(regionBytes, []byte(service))
+ signingKey := sumHMAC(serviceBytes, []byte("aws4_request"))
+ return signingKey
+}
+
+// getSignature final signature in hexadecimal form.
+func getSignature(signingKey []byte, stringToSign string) string {
+ return hex.EncodeToString(sumHMAC(signingKey, []byte(stringToSign)))
+}
+
+// getCanonicalHeaders generate a list of request headers with their values
+func getCanonicalHeaders(signedHeaders http.Header) string {
+ var headers []string
+ vals := make(http.Header)
+ for k, vv := range signedHeaders {
+ headers = append(headers, strings.ToLower(k))
+ vals[strings.ToLower(k)] = vv
+ }
+ sort.Strings(headers)
+
+ var buf bytes.Buffer
+ for _, k := range headers {
+ buf.WriteString(k)
+ buf.WriteByte(':')
+ for idx, v := range vals[k] {
+ if idx > 0 {
+ buf.WriteByte(',')
+ }
+ buf.WriteString(signV4TrimAll(v))
+ }
+ buf.WriteByte('\n')
+ }
+ return buf.String()
+}
+
+// Trim leading and trailing spaces and replace sequential spaces with one space, following Trimall()
+// in http://docs.aws.amazon.com/general/latest/gr/sigv4-create-canonical-request.html
+func signV4TrimAll(input string) string {
+ // Compress adjacent spaces (a space is determined by
+ // unicode.IsSpace() internally here) to one space and return
+ return strings.Join(strings.Fields(input), " ")
+}
+
+// if object matches reserved string, no need to encode them
+var reservedObjectNames = regexp.MustCompile("^[a-zA-Z0-9-_.~/]+$")
+
+// EncodePath encode the strings from UTF-8 byte representations to HTML hex escape sequences
+//
+// This is necessary since regular url.Parse() and url.Encode() functions do not support UTF-8
+// non english characters cannot be parsed due to the nature in which url.Encode() is written
+//
+// This function on the other hand is a direct replacement for url.Encode() technique to support
+// pretty much every UTF-8 character.
+func encodePath(pathName string) string {
+ if reservedObjectNames.MatchString(pathName) {
+ return pathName
+ }
+ var encodedPathname string
+ for _, s := range pathName {
+ if 'A' <= s && s <= 'Z' || 'a' <= s && s <= 'z' || '0' <= s && s <= '9' { // §2.3 Unreserved characters (mark)
+ encodedPathname = encodedPathname + string(s)
+ continue
+ }
+ switch s {
+ case '-', '_', '.', '~', '/': // §2.3 Unreserved characters (mark)
+ encodedPathname = encodedPathname + string(s)
+ continue
+ default:
+ len := utf8.RuneLen(s)
+ if len < 0 {
+ // if utf8 cannot convert return the same string as is
+ return pathName
+ }
+ u := make([]byte, len)
+ utf8.EncodeRune(u, s)
+ for _, r := range u {
+ hex := hex.EncodeToString([]byte{r})
+ encodedPathname = encodedPathname + "%" + strings.ToUpper(hex)
+ }
+ }
+ }
+ return encodedPathname
+}
+
+// compareSignatureV4 returns true if and only if both signatures
+// are equal. The signatures are expected to be HEX encoded strings
+// according to the AWS S3 signature V4 spec.
+func compareSignatureV4(sig1, sig2 string) bool {
+ // The CTC using []byte(str) works because the hex encoding
+ // is unique for a sequence of bytes. See also compareSignatureV2.
+ return subtle.ConstantTimeCompare([]byte(sig1), []byte(sig2)) == 1
+}
diff --git a/weed/s3api/auto_signature_v4_test.go b/weed/s3api/auto_signature_v4_test.go
new file mode 100644
index 000000000..b47cd5f2d
--- /dev/null
+++ b/weed/s3api/auto_signature_v4_test.go
@@ -0,0 +1,421 @@
+package s3api
+
+import (
+ "bytes"
+ "crypto/md5"
+ "crypto/sha256"
+ "encoding/base64"
+ "encoding/hex"
+ "errors"
+ "fmt"
+ "github.com/chrislusf/seaweedfs/weed/s3api/s3err"
+ "io"
+ "io/ioutil"
+ "net/http"
+ "net/url"
+ "sort"
+ "strconv"
+ "strings"
+ "testing"
+ "time"
+ "unicode/utf8"
+)
+
+// TestIsRequestPresignedSignatureV4 - Test validates the logic for presign signature verision v4 detection.
+func TestIsRequestPresignedSignatureV4(t *testing.T) {
+ testCases := []struct {
+ inputQueryKey string
+ inputQueryValue string
+ expectedResult bool
+ }{
+ // Test case - 1.
+ // Test case with query key ""X-Amz-Credential" set.
+ {"", "", false},
+ // Test case - 2.
+ {"X-Amz-Credential", "", true},
+ // Test case - 3.
+ {"X-Amz-Content-Sha256", "", false},
+ }
+
+ for i, testCase := range testCases {
+ // creating an input HTTP request.
+ // Only the query parameters are relevant for this particular test.
+ inputReq, err := http.NewRequest("GET", "http://example.com", nil)
+ if err != nil {
+ t.Fatalf("Error initializing input HTTP request: %v", err)
+ }
+ q := inputReq.URL.Query()
+ q.Add(testCase.inputQueryKey, testCase.inputQueryValue)
+ inputReq.URL.RawQuery = q.Encode()
+
+ actualResult := isRequestPresignedSignatureV4(inputReq)
+ if testCase.expectedResult != actualResult {
+ t.Errorf("Test %d: Expected the result to `%v`, but instead got `%v`", i+1, testCase.expectedResult, actualResult)
+ }
+ }
+}
+
+// Tests is requested authenticated function, tests replies for s3 errors.
+func TestIsReqAuthenticated(t *testing.T) {
+ option := S3ApiServerOption{}
+ iam := NewIdentityAccessManagement(&option)
+ iam.identities = []*Identity{
+ {
+ Name: "someone",
+ Credentials: []*Credential{
+ {
+ AccessKey: "access_key_1",
+ SecretKey: "secret_key_1",
+ },
+ },
+ Actions: nil,
+ },
+ }
+
+ // List of test cases for validating http request authentication.
+ testCases := []struct {
+ req *http.Request
+ s3Error s3err.ErrorCode
+ }{
+ // When request is unsigned, access denied is returned.
+ {mustNewRequest("GET", "http://127.0.0.1:9000", 0, nil, t), s3err.ErrAccessDenied},
+ // When request is properly signed, error is none.
+ {mustNewSignedRequest("GET", "http://127.0.0.1:9000", 0, nil, t), s3err.ErrNone},
+ }
+
+ // Validates all testcases.
+ for i, testCase := range testCases {
+ if _, s3Error := iam.reqSignatureV4Verify(testCase.req); s3Error != testCase.s3Error {
+ ioutil.ReadAll(testCase.req.Body)
+ t.Fatalf("Test %d: Unexpected S3 error: want %d - got %d", i, testCase.s3Error, s3Error)
+ }
+ }
+}
+
+func TestCheckAdminRequestAuthType(t *testing.T) {
+ option := S3ApiServerOption{}
+ iam := NewIdentityAccessManagement(&option)
+ iam.identities = []*Identity{
+ {
+ Name: "someone",
+ Credentials: []*Credential{
+ {
+ AccessKey: "access_key_1",
+ SecretKey: "secret_key_1",
+ },
+ },
+ Actions: nil,
+ },
+ }
+
+ testCases := []struct {
+ Request *http.Request
+ ErrCode s3err.ErrorCode
+ }{
+ {Request: mustNewRequest("GET", "http://127.0.0.1:9000", 0, nil, t), ErrCode: s3err.ErrAccessDenied},
+ {Request: mustNewSignedRequest("GET", "http://127.0.0.1:9000", 0, nil, t), ErrCode: s3err.ErrNone},
+ {Request: mustNewPresignedRequest("GET", "http://127.0.0.1:9000", 0, nil, t), ErrCode: s3err.ErrNone},
+ }
+ for i, testCase := range testCases {
+ if _, s3Error := iam.reqSignatureV4Verify(testCase.Request); s3Error != testCase.ErrCode {
+ t.Errorf("Test %d: Unexpected s3error returned wanted %d, got %d", i, testCase.ErrCode, s3Error)
+ }
+ }
+}
+
+// Provides a fully populated http request instance, fails otherwise.
+func mustNewRequest(method string, urlStr string, contentLength int64, body io.ReadSeeker, t *testing.T) *http.Request {
+ req, err := newTestRequest(method, urlStr, contentLength, body)
+ if err != nil {
+ t.Fatalf("Unable to initialize new http request %s", err)
+ }
+ return req
+}
+
+// This is similar to mustNewRequest but additionally the request
+// is signed with AWS Signature V4, fails if not able to do so.
+func mustNewSignedRequest(method string, urlStr string, contentLength int64, body io.ReadSeeker, t *testing.T) *http.Request {
+ req := mustNewRequest(method, urlStr, contentLength, body, t)
+ cred := &Credential{"access_key_1", "secret_key_1"}
+ if err := signRequestV4(req, cred.AccessKey, cred.SecretKey); err != nil {
+ t.Fatalf("Unable to inititalized new signed http request %s", err)
+ }
+ return req
+}
+
+// This is similar to mustNewRequest but additionally the request
+// is presigned with AWS Signature V4, fails if not able to do so.
+func mustNewPresignedRequest(method string, urlStr string, contentLength int64, body io.ReadSeeker, t *testing.T) *http.Request {
+ req := mustNewRequest(method, urlStr, contentLength, body, t)
+ cred := &Credential{"access_key_1", "secret_key_1"}
+ if err := preSignV4(req, cred.AccessKey, cred.SecretKey, int64(10*time.Minute.Seconds())); err != nil {
+ t.Fatalf("Unable to inititalized new signed http request %s", err)
+ }
+ return req
+}
+
+// Returns new HTTP request object.
+func newTestRequest(method, urlStr string, contentLength int64, body io.ReadSeeker) (*http.Request, error) {
+ if method == "" {
+ method = "POST"
+ }
+
+ // Save for subsequent use
+ var hashedPayload string
+ var md5Base64 string
+ switch {
+ case body == nil:
+ hashedPayload = getSHA256Hash([]byte{})
+ default:
+ payloadBytes, err := ioutil.ReadAll(body)
+ if err != nil {
+ return nil, err
+ }
+ hashedPayload = getSHA256Hash(payloadBytes)
+ md5Base64 = getMD5HashBase64(payloadBytes)
+ }
+ // Seek back to beginning.
+ if body != nil {
+ body.Seek(0, 0)
+ } else {
+ body = bytes.NewReader([]byte(""))
+ }
+ req, err := http.NewRequest(method, urlStr, body)
+ if err != nil {
+ return nil, err
+ }
+ if md5Base64 != "" {
+ req.Header.Set("Content-Md5", md5Base64)
+ }
+ req.Header.Set("x-amz-content-sha256", hashedPayload)
+
+ // Add Content-Length
+ req.ContentLength = contentLength
+
+ return req, nil
+}
+
+// getSHA256Hash returns SHA-256 hash in hex encoding of given data.
+func getSHA256Hash(data []byte) string {
+ return hex.EncodeToString(getSHA256Sum(data))
+}
+
+// getMD5HashBase64 returns MD5 hash in base64 encoding of given data.
+func getMD5HashBase64(data []byte) string {
+ return base64.StdEncoding.EncodeToString(getMD5Sum(data))
+}
+
+// getSHA256Hash returns SHA-256 sum of given data.
+func getSHA256Sum(data []byte) []byte {
+ hash := sha256.New()
+ hash.Write(data)
+ return hash.Sum(nil)
+}
+
+// getMD5Sum returns MD5 sum of given data.
+func getMD5Sum(data []byte) []byte {
+ hash := md5.New()
+ hash.Write(data)
+ return hash.Sum(nil)
+}
+
+// getMD5Hash returns MD5 hash in hex encoding of given data.
+func getMD5Hash(data []byte) string {
+ return hex.EncodeToString(getMD5Sum(data))
+}
+
+var ignoredHeaders = map[string]bool{
+ "Authorization": true,
+ "Content-Type": true,
+ "Content-Length": true,
+ "User-Agent": true,
+}
+
+// Sign given request using Signature V4.
+func signRequestV4(req *http.Request, accessKey, secretKey string) error {
+ // Get hashed payload.
+ hashedPayload := req.Header.Get("x-amz-content-sha256")
+ if hashedPayload == "" {
+ return fmt.Errorf("Invalid hashed payload")
+ }
+
+ currTime := time.Now()
+
+ // Set x-amz-date.
+ req.Header.Set("x-amz-date", currTime.Format(iso8601Format))
+
+ // Get header map.
+ headerMap := make(map[string][]string)
+ for k, vv := range req.Header {
+ // If request header key is not in ignored headers, then add it.
+ if _, ok := ignoredHeaders[http.CanonicalHeaderKey(k)]; !ok {
+ headerMap[strings.ToLower(k)] = vv
+ }
+ }
+
+ // Get header keys.
+ headers := []string{"host"}
+ for k := range headerMap {
+ headers = append(headers, k)
+ }
+ sort.Strings(headers)
+
+ region := "us-east-1"
+
+ // Get canonical headers.
+ var buf bytes.Buffer
+ for _, k := range headers {
+ buf.WriteString(k)
+ buf.WriteByte(':')
+ switch {
+ case k == "host":
+ buf.WriteString(req.URL.Host)
+ fallthrough
+ default:
+ for idx, v := range headerMap[k] {
+ if idx > 0 {
+ buf.WriteByte(',')
+ }
+ buf.WriteString(v)
+ }
+ buf.WriteByte('\n')
+ }
+ }
+ canonicalHeaders := buf.String()
+
+ // Get signed headers.
+ signedHeaders := strings.Join(headers, ";")
+
+ // Get canonical query string.
+ req.URL.RawQuery = strings.Replace(req.URL.Query().Encode(), "+", "%20", -1)
+
+ // Get canonical URI.
+ canonicalURI := EncodePath(req.URL.Path)
+
+ // Get canonical request.
+ // canonicalRequest =
+ // \n
+ // \n
+ // \n
+ //