Browse Source

less logs

pull/7526/head
chrislu 1 week ago
parent
commit
8af6692ace
  1. 19
      other/java/hdfs3/src/main/java/seaweed/hdfs/SeaweedHadoopInputStream.java
  2. 14
      test/java/spark/src/test/resources/log4j.properties
  3. 3
      test/java/spark/src/test/resources/test-local-only.properties

19
other/java/hdfs3/src/main/java/seaweed/hdfs/SeaweedHadoopInputStream.java

@ -14,8 +14,10 @@ import java.io.IOException;
/**
* SeaweedFS Hadoop InputStream.
*
* NOTE: Does NOT implement ByteBufferReadable to match RawLocalFileSystem behavior.
* This ensures BufferedFSInputStream is used, which properly handles position tracking
* NOTE: Does NOT implement ByteBufferReadable to match RawLocalFileSystem
* behavior.
* This ensures BufferedFSInputStream is used, which properly handles position
* tracking
* for positioned reads (critical for Parquet and other formats).
*/
public class SeaweedHadoopInputStream extends FSInputStream {
@ -76,8 +78,10 @@ public class SeaweedHadoopInputStream extends FSInputStream {
}
/**
* Returns the length of the file that this stream refers to. Note that the length returned is the length
* as of the time the Stream was opened. Specifically, if there have been subsequent appends to the file,
* Returns the length of the file that this stream refers to. Note that the
* length returned is the length
* as of the time the Stream was opened. Specifically, if there have been
* subsequent appends to the file,
* they wont be reflected in the returned length.
*
* @return length of the file.
@ -96,13 +100,13 @@ public class SeaweedHadoopInputStream extends FSInputStream {
public synchronized long getPos() throws IOException {
return seaweedInputStream.getPos();
}
public String getPath() {
return path;
}
/**
* Seeks a different copy of the data. Returns true if
* Seeks a different copy of the data. Returns true if
* found a new source, false otherwise.
*
* @throws IOException throws {@link IOException} if there is an error
@ -136,7 +140,8 @@ public class SeaweedHadoopInputStream extends FSInputStream {
}
/**
* gets whether mark and reset are supported by {@code ADLFileInputStream}. Always returns false.
* gets whether mark and reset are supported by {@code ADLFileInputStream}.
* Always returns false.
*
* @return always {@code false}
*/

14
test/java/spark/src/test/resources/log4j.properties

@ -10,20 +10,8 @@ log4j.appender.console.layout.ConversionPattern=%d{yy/MM/dd HH:mm:ss} %p %c{1}:
# Set log levels for specific packages
log4j.logger.org.apache.spark=WARN
log4j.logger.org.apache.hadoop=WARN
log4j.logger.org.apache.parquet=WARN
log4j.logger.seaweed=INFO
# Enable DEBUG for SeaweedFS HDFS integration to see position tracking
log4j.logger.seaweed.hdfs=DEBUG
# Enable DEBUG for SeaweedFS client to see file size calculations
log4j.logger.seaweedfs.client.SeaweedRead=DEBUG
log4j.logger.seaweedfs.client.SeaweedOutputStream=DEBUG
log4j.logger.seaweedfs.client.SeaweedInputStream=DEBUG
# Enable Parquet DEBUG logging to see offset calculations
log4j.logger.org.apache.parquet=DEBUG
log4j.logger.org.apache.parquet.hadoop.ParquetFileWriter=DEBUG
log4j.logger.org.apache.parquet.hadoop.ParquetFileReader=DEBUG
log4j.logger.org.apache.parquet.hadoop.InternalParquetRecordWriter=DEBUG
log4j.logger.org.apache.parquet.hadoop.util.H2SeekableInputStream=DEBUG
# Suppress unnecessary warnings
log4j.logger.org.apache.spark.util.Utils=ERROR

3
test/java/spark/src/test/resources/test-local-only.properties

@ -1,3 +0,0 @@
# Test with LOCAL_ONLY mode - bypasses SeaweedFS entirely
fs.seaweedfs.debug.mode=LOCAL_ONLY
fs.seaweedfs.debug.dir=/workspace/target/debug-local
Loading…
Cancel
Save