Browse Source

refactor: remove emojis from logging and workflow messages

Removed all emoji characters from:
1. SeaweedOutputStream.java
   - write() logs
   - close() logs
   - getPos() logs
   - flushWrittenBytesToServiceInternal() logs
   - writeCurrentBufferToService() logs

2. SeaweedWrite.java
   - Chunk write logs
   - Metadata write logs
   - Mismatch warnings

3. SeaweedHadoopOutputStream.java
   - Constructor logs

4. spark-integration-tests.yml workflow
   - Replaced checkmarks with 'OK'
   - Replaced X marks with 'FAILED'
   - Replaced error marks with 'ERROR'
   - Replaced warning marks with 'WARNING:'

All functionality remains the same, just cleaner ASCII-only output.
pull/7526/head
chrislu 1 week ago
parent
commit
ac9fbeefac
  1. 42
      .github/workflows/spark-integration-tests.yml
  2. 18
      other/java/client/src/main/java/seaweedfs/client/SeaweedOutputStream.java
  3. 6
      other/java/client/src/main/java/seaweedfs/client/SeaweedWrite.java
  4. 2
      other/java/hdfs3/src/main/java/seaweed/hdfs/SeaweedHadoopOutputStream.java

42
.github/workflows/spark-integration-tests.yml

@ -57,26 +57,26 @@ jobs:
cd ../docker
ls -la weed filer.toml entrypoint.sh
file weed
echo " SeaweedFS binary built"
echo "OK SeaweedFS binary built"
- name: Build SeaweedFS Java dependencies
run: |
echo "Building Java client..."
cd other/java/client
mvn clean install -U -DskipTests -Dgpg.skip=true -Dcentral.publishing.skip=true
echo " Java client built"
echo "OK Java client built"
cd ../../..
echo "Building HDFS2 client..."
cd other/java/hdfs2
mvn clean install -U -DskipTests -Dgpg.skip=true -Dcentral.publishing.skip=true
echo " HDFS2 client built"
echo "OK HDFS2 client built"
cd ../../..
echo "Building HDFS3 client..."
cd other/java/hdfs3
mvn clean install -U -DskipTests -Dgpg.skip=true -Dcentral.publishing.skip=true
echo " HDFS3 client built"
echo "OK HDFS3 client built"
echo ""
echo "All Java dependencies installed to ~/.m2/repository"
@ -94,11 +94,11 @@ jobs:
echo "Waiting for services..."
for i in {1..30}; do
if curl -f http://localhost:8888/ > /dev/null 2>&1; then
echo " SeaweedFS filer is ready!"
echo "OK SeaweedFS filer is ready!"
break
fi
if [ $i -eq 30 ]; then
echo " Services failed to start"
echo "FAILED Services failed to start"
docker compose ps -a
docker compose logs
exit 1
@ -108,7 +108,7 @@ jobs:
done
curl -f http://localhost:9333/cluster/status || exit 1
echo " All services healthy"
echo "OK All services healthy"
- name: Prepare Maven repository for Docker
working-directory: test/java/spark
@ -116,7 +116,7 @@ jobs:
echo "Copying Maven artifacts for Docker container..."
mkdir -p .m2/repository/com
cp -r ~/.m2/repository/com/seaweedfs .m2/repository/com/
echo " Maven artifacts ready"
echo "OK Maven artifacts ready"
- name: Run Spark integration tests
working-directory: test/java/spark
@ -140,7 +140,7 @@ jobs:
echo "Waiting for filer to be ready..."
for i in {1..10}; do
if curl -f http://localhost:8888/ > /dev/null 2>&1; then
echo " Filer is ready"
echo "OK Filer is ready"
break
fi
sleep 2
@ -175,7 +175,7 @@ jobs:
curl -o test.parquet "http://localhost:8888/test-spark/employees/$PARQUET_FILE"
if [ ! -f test.parquet ] || [ ! -s test.parquet ]; then
echo "⚠️ Failed to download via HTTP, trying direct volume access..."
echo "WARNING: Failed to download via HTTP, trying direct volume access..."
# Find the actual file ID from filer
docker compose exec -T seaweedfs-filer weed filer.cat -dir=/test-spark/employees/ -name="$PARQUET_FILE" > test.parquet
fi
@ -212,18 +212,18 @@ jobs:
echo ""
echo "=== File appears to be: ==="
if head -c 4 test.parquet | grep -q "PAR1"; then
echo " Valid Parquet header"
echo "OK Valid Parquet header"
else
echo " INVALID Parquet header"
echo "FAILED INVALID Parquet header"
fi
if tail -c 4 test.parquet | grep -q "PAR1"; then
echo " Valid Parquet trailer"
echo "OK Valid Parquet trailer"
else
echo " INVALID Parquet trailer"
echo "FAILED INVALID Parquet trailer"
fi
else
echo " No Parquet files found via HTTP API"
echo "ERROR No Parquet files found via HTTP API"
echo ""
echo "Trying alternative: list files via docker exec..."
docker compose exec -T seaweedfs-filer sh -c 'curl -s http://localhost:8888/test-spark/employees/' || echo "Docker exec failed"
@ -270,7 +270,7 @@ SHELL_EOF
- name: Check test results
if: steps.test-run.outcome == 'failure'
run: |
echo " Tests failed with exit code: ${{ steps.test-run.outputs.exit_code }}"
echo "ERROR Tests failed with exit code: ${{ steps.test-run.outputs.exit_code }}"
echo "But file analysis was completed above."
exit 1
@ -291,7 +291,7 @@ SHELL_EOF
echo "Downloading Apache Spark 3.5.0..."
wget -q https://archive.apache.org/dist/spark/spark-3.5.0/spark-3.5.0-bin-hadoop3.tgz
tar xzf spark-3.5.0-bin-hadoop3.tgz
echo " Spark downloaded"
echo "OK Spark downloaded"
- name: Start SeaweedFS services for example
if: github.event_name == 'push' || github.event_name == 'workflow_dispatch'
@ -305,11 +305,11 @@ SHELL_EOF
echo "Waiting for services..."
for i in {1..30}; do
if curl -f http://localhost:8888/ > /dev/null 2>&1; then
echo " SeaweedFS filer is ready!"
echo "OK SeaweedFS filer is ready!"
break
fi
if [ $i -eq 30 ]; then
echo " Services failed to start"
echo "FAILED Services failed to start"
docker compose ps -a
docker compose logs
exit 1
@ -319,7 +319,7 @@ SHELL_EOF
done
curl -f http://localhost:9333/cluster/status || exit 1
echo " All services healthy"
echo "OK All services healthy"
- name: Build project for example
if: github.event_name == 'push' || github.event_name == 'workflow_dispatch'
@ -342,7 +342,7 @@ SHELL_EOF
--conf spark.hadoop.fs.seaweed.replication="" \
target/seaweedfs-spark-integration-tests-1.0-SNAPSHOT.jar \
seaweedfs://localhost:8888/ci-spark-output
echo " Example completed"
echo "OK Example completed"
- name: Verify example output
if: github.event_name == 'push' || github.event_name == 'workflow_dispatch'

18
other/java/client/src/main/java/seaweedfs/client/SeaweedOutputStream.java

@ -101,7 +101,7 @@ public class SeaweedOutputStream extends OutputStream {
long currentPos = position + buffer.position();
if (path.contains("parquet")) {
LOG.info(
"[DEBUG-2024] 📍 getPos() called: flushedPosition={} bufferPosition={} returning={} totalBytesWritten={} writeCalls={}",
"[DEBUG-2024] getPos() called: flushedPosition={} bufferPosition={} returning={} totalBytesWritten={} writeCalls={}",
position, buffer.position(), currentPos, totalBytesWritten, writeCallCount);
}
return currentPos;
@ -133,7 +133,7 @@ public class SeaweedOutputStream extends OutputStream {
private synchronized void flushWrittenBytesToServiceInternal(final long offset) throws IOException {
try {
LOG.info("[DEBUG-2024] ⚠️ flushWrittenBytesToServiceInternal: path={} offset={} #chunks={}",
LOG.info("[DEBUG-2024] flushWrittenBytesToServiceInternal: path={} offset={} #chunks={}",
path, offset, entry.getChunksCount());
// Set the file size in attributes based on our position
@ -142,7 +142,7 @@ public class SeaweedOutputStream extends OutputStream {
attrBuilder.setFileSize(offset);
entry.setAttributes(attrBuilder);
LOG.info("[DEBUG-2024] Set entry.attributes.fileSize = {} bytes before writeMeta", offset);
LOG.info("[DEBUG-2024] Set entry.attributes.fileSize = {} bytes before writeMeta", offset);
SeaweedWrite.writeMeta(filerClient, getParentDirectory(path), entry);
} catch (Exception ex) {
@ -175,13 +175,13 @@ public class SeaweedOutputStream extends OutputStream {
if (path.contains("parquet")) {
if (length >= 20) {
LOG.info(
"[DEBUG-2024] ✍️ write({} bytes): totalSoFar={} writeCalls={} position={} bufferPos={}, file={}",
"[DEBUG-2024] write({} bytes): totalSoFar={} writeCalls={} position={} bufferPos={}, file={}",
length, totalBytesWritten, writeCallCount, position, buffer.position(),
path.substring(path.lastIndexOf('/') + 1));
} else if (writeCallCount >= 220) {
// Log all small writes after call 220 (likely footer writes)
LOG.info(
"[DEBUG-2024] ✍️ write({} bytes): totalSoFar={} writeCalls={} position={} bufferPos={} [FOOTER?], file={}",
"[DEBUG-2024] write({} bytes): totalSoFar={} writeCalls={} position={} bufferPos={} [FOOTER?], file={}",
length, totalBytesWritten, writeCallCount, position, buffer.position(),
path.substring(path.lastIndexOf('/') + 1));
}
@ -241,13 +241,13 @@ public class SeaweedOutputStream extends OutputStream {
int bufferPosBeforeFlush = buffer.position();
LOG.info(
"[DEBUG-2024] 🔒 close START: path={} position={} buffer.position()={} totalBytesWritten={} writeCalls={}",
"[DEBUG-2024] close START: path={} position={} buffer.position()={} totalBytesWritten={} writeCalls={}",
path, position, bufferPosBeforeFlush, totalBytesWritten, writeCallCount);
try {
flushInternal();
threadExecutor.shutdown();
LOG.info(
"[DEBUG-2024] close END: path={} finalPosition={} totalBytesWritten={} writeCalls={} (buffer had {} bytes)",
"[DEBUG-2024] close END: path={} finalPosition={} totalBytesWritten={} writeCalls={} (buffer had {} bytes)",
path, position, totalBytesWritten, writeCallCount, bufferPosBeforeFlush);
} finally {
lastError = new IOException("Stream is closed!");
@ -267,13 +267,13 @@ public class SeaweedOutputStream extends OutputStream {
LOG.info("[DEBUG-2024] writeCurrentBufferToService: path={} buffer.position()={} totalPosition={}", path,
bufferPos, position);
if (bufferPos == 0) {
LOG.info(" Skipping write, buffer is empty");
LOG.info(" Skipping write, buffer is empty");
return;
}
int written = submitWriteBufferToService(buffer, position);
position += written;
LOG.info(" Submitted {} bytes for write, new position={}", written, position);
LOG.info(" Submitted {} bytes for write, new position={}", written, position);
buffer = ByteBufferPool.request(bufferSize);

6
other/java/client/src/main/java/seaweedfs/client/SeaweedWrite.java

@ -125,7 +125,7 @@ public class SeaweedWrite {
String etag = multipartUpload(targetUrl, auth, bytes, bytesOffset, bytesLength, cipherKey);
LOG.info("[DEBUG-2024] Wrote chunk to {} at offset {} size {} bytes, etag: {}", targetUrl, offset,
LOG.info("[DEBUG-2024] Wrote chunk to {} at offset {} size {} bytes, etag: {}", targetUrl, offset,
bytesLength, etag);
return FilerProto.FileChunk.newBuilder()
@ -152,12 +152,12 @@ public class SeaweedWrite {
// Check if there's a size mismatch with attributes
long attrFileSize = entry.getAttributes().getFileSize();
LOG.info(
"[DEBUG-2024] Writing metadata to {} with {} chunks, totalSize from chunks: {} bytes, attr.fileSize: {} bytes{}",
"[DEBUG-2024] Writing metadata to {} with {} chunks, totalSize from chunks: {} bytes, attr.fileSize: {} bytes{}",
parentDirectory + "/" + entry.getName(),
chunks.size(),
totalSize,
attrFileSize,
(totalSize != attrFileSize) ? " ⚠️ MISMATCH!" : "");
(totalSize != attrFileSize) ? " MISMATCH!" : "");
entry.clearChunks();
entry.addAllChunks(chunks);

2
other/java/hdfs3/src/main/java/seaweed/hdfs/SeaweedHadoopOutputStream.java

@ -20,7 +20,7 @@ public class SeaweedHadoopOutputStream extends SeaweedOutputStream implements Sy
public SeaweedHadoopOutputStream(FilerClient filerClient, final String path, FilerProto.Entry.Builder entry,
final long position, final int bufferSize, final String replication) {
super(filerClient, path, entry, position, bufferSize, replication);
LOG.info("[DEBUG-2024] 🔧 SeaweedHadoopOutputStream created: path={} position={} bufferSize={} replication={}",
LOG.info("[DEBUG-2024] SeaweedHadoopOutputStream created: path={} position={} bufferSize={} replication={}",
path, position, bufferSize, replication);
}

Loading…
Cancel
Save