mirror of https://github.com/trapexit/mergerfs.git
11 changed files with 442 additions and 292 deletions
-
10libfuse/include/fuse.h
-
5libfuse/lib/fuse.cpp
-
2libfuse/lib/fuse_lowlevel.cpp
-
15src/fileinfo.hpp
-
124src/fuse_create.cpp
-
242src/fuse_open.cpp
-
84src/fuse_release.cpp
-
12src/state.hpp
-
103tests/TEST_io_passthrough_create_open
-
132tests/TEST_io_passthrough_open_race
-
5tests/tests.cpp
@ -0,0 +1,103 @@ |
|||
#!/usr/bin/env python3 |
|||
|
|||
import os |
|||
import sys |
|||
import tempfile |
|||
import time |
|||
|
|||
# Test create + write + read (passthrough.io=rw) |
|||
(fd, filepath) = tempfile.mkstemp(dir=sys.argv[1]) |
|||
|
|||
test_data = b"passthrough io test data for create\n" * 100 |
|||
|
|||
# Write to newly created file |
|||
bytes_written = os.write(fd, test_data) |
|||
if bytes_written != len(test_data): |
|||
print("create write failed: expected {} bytes, wrote {}".format(len(test_data), bytes_written)) |
|||
sys.exit(1) |
|||
|
|||
# Seek and read back |
|||
os.lseek(fd, 0, os.SEEK_SET) |
|||
read_data = os.read(fd, len(test_data)) |
|||
if read_data != test_data: |
|||
print("create read failed: data mismatch") |
|||
sys.exit(1) |
|||
|
|||
os.close(fd) |
|||
|
|||
# Test open existing file + write + read |
|||
fd = os.open(filepath, os.O_RDWR) |
|||
|
|||
# Read existing data |
|||
os.lseek(fd, 0, os.SEEK_SET) |
|||
read_data = os.read(fd, len(test_data)) |
|||
if read_data != test_data: |
|||
print("open read failed: data mismatch") |
|||
sys.exit(1) |
|||
|
|||
# Write more data at end |
|||
os.lseek(fd, 0, os.SEEK_END) |
|||
more_data = b"additional passthrough data for open\n" * 50 |
|||
bytes_written = os.write(fd, more_data) |
|||
if bytes_written != len(more_data): |
|||
print("open write failed: expected {} bytes, wrote {}".format(len(more_data), bytes_written)) |
|||
sys.exit(1) |
|||
|
|||
# Verify all data |
|||
os.lseek(fd, 0, os.SEEK_SET) |
|||
all_data = os.read(fd, len(test_data) + len(more_data)) |
|||
if all_data != test_data + more_data: |
|||
print("open final read failed: data mismatch") |
|||
sys.exit(1) |
|||
|
|||
# Test multiple opens of same file (single backing_id feature) |
|||
# When a file is already open with passthrough, subsequent opens |
|||
# must reuse the same backing_id rather than creating new ones |
|||
fd2 = os.open(filepath, os.O_RDWR) |
|||
|
|||
# Read from second fd - should see same data |
|||
os.lseek(fd2, 0, os.SEEK_SET) |
|||
read_data2 = os.read(fd2, len(test_data) + len(more_data)) |
|||
if read_data2 != test_data + more_data: |
|||
print("second open read failed: data mismatch") |
|||
os.close(fd) |
|||
os.close(fd2) |
|||
sys.exit(1) |
|||
|
|||
# Write from second fd |
|||
os.lseek(fd2, 0, os.SEEK_END) |
|||
extra_data = b"data from second file descriptor\n" * 25 |
|||
bytes_written = os.write(fd2, extra_data) |
|||
if bytes_written != len(extra_data): |
|||
print("second open write failed: expected {} bytes, wrote {}".format(len(extra_data), bytes_written)) |
|||
os.close(fd) |
|||
os.close(fd2) |
|||
sys.exit(1) |
|||
|
|||
# Verify write is visible from first fd (shared backing) |
|||
os.lseek(fd, 0, os.SEEK_SET) |
|||
combined_data = os.read(fd, len(test_data) + len(more_data) + len(extra_data)) |
|||
expected_data = test_data + more_data + extra_data |
|||
if combined_data != expected_data: |
|||
print("cross-fd read failed: data mismatch (writes from fd2 not visible on fd)") |
|||
os.close(fd) |
|||
os.close(fd2) |
|||
sys.exit(1) |
|||
|
|||
# Open a third fd while others are still open |
|||
fd3 = os.open(filepath, os.O_RDONLY) |
|||
os.lseek(fd3, 0, os.SEEK_SET) |
|||
read_data3 = os.read(fd3, len(expected_data)) |
|||
if read_data3 != expected_data: |
|||
print("third open read failed: data mismatch") |
|||
os.close(fd) |
|||
os.close(fd2) |
|||
os.close(fd3) |
|||
sys.exit(1) |
|||
|
|||
os.close(fd3) |
|||
os.close(fd2) |
|||
os.close(fd) |
|||
|
|||
# Cleanup |
|||
os.unlink(filepath) |
|||
@ -0,0 +1,132 @@ |
|||
#!/usr/bin/env python3 |
|||
|
|||
import os |
|||
import sys |
|||
import tempfile |
|||
import threading |
|||
import time |
|||
|
|||
NUM_THREADS = 50 |
|||
TEST_DATA = b"race condition test data\n" |
|||
|
|||
def open_and_operate(filepath, barrier, results, index): |
|||
""" |
|||
Wait at barrier, then open file, write, read, and store result. |
|||
""" |
|||
try: |
|||
# Wait for all threads to be ready |
|||
barrier.wait() |
|||
|
|||
# All threads try to open simultaneously |
|||
fd = os.open(filepath, os.O_RDWR) |
|||
|
|||
# Write thread-specific data at a unique offset |
|||
offset = index * len(TEST_DATA) |
|||
os.lseek(fd, offset, os.SEEK_SET) |
|||
bytes_written = os.write(fd, TEST_DATA) |
|||
|
|||
if bytes_written != len(TEST_DATA): |
|||
results[index] = ("write_error", "expected {} bytes, wrote {}".format(len(TEST_DATA), bytes_written)) |
|||
os.close(fd) |
|||
return |
|||
|
|||
# Read back what we wrote |
|||
os.lseek(fd, offset, os.SEEK_SET) |
|||
read_data = os.read(fd, len(TEST_DATA)) |
|||
|
|||
if read_data != TEST_DATA: |
|||
results[index] = ("read_error", "data mismatch at offset {}".format(offset)) |
|||
os.close(fd) |
|||
return |
|||
|
|||
results[index] = ("success", fd) |
|||
|
|||
except Exception as e: |
|||
results[index] = ("exception", str(e)) |
|||
|
|||
# Create test file with enough space for all threads |
|||
(fd, filepath) = tempfile.mkstemp(dir=sys.argv[1]) |
|||
|
|||
# Pre-allocate file to avoid issues with concurrent writes extending file |
|||
total_size = NUM_THREADS * len(TEST_DATA) |
|||
os.ftruncate(fd, total_size) |
|||
os.close(fd) |
|||
|
|||
# Set up synchronization |
|||
barrier = threading.Barrier(NUM_THREADS) |
|||
results = [None] * NUM_THREADS |
|||
threads = [] |
|||
|
|||
# Create and start all threads |
|||
for i in range(NUM_THREADS): |
|||
t = threading.Thread(target=open_and_operate, args=(filepath, barrier, results, i)) |
|||
threads.append(t) |
|||
|
|||
for t in threads: |
|||
t.start() |
|||
|
|||
for t in threads: |
|||
t.join() |
|||
|
|||
# Check results |
|||
failed = False |
|||
fds_to_close = [] |
|||
|
|||
for i, result in enumerate(results): |
|||
if result is None: |
|||
print("thread {} returned no result".format(i)) |
|||
failed = True |
|||
elif result[0] == "success": |
|||
fds_to_close.append(result[1]) |
|||
else: |
|||
print("thread {} failed: {} - {}".format(i, result[0], result[1])) |
|||
failed = True |
|||
|
|||
if failed: |
|||
for fd in fds_to_close: |
|||
os.close(fd) |
|||
os.unlink(filepath) |
|||
sys.exit(1) |
|||
|
|||
# Verify all data is consistent by reading through one fd |
|||
if fds_to_close: |
|||
verify_fd = fds_to_close[0] |
|||
os.lseek(verify_fd, 0, os.SEEK_SET) |
|||
all_data = os.read(verify_fd, total_size) |
|||
|
|||
expected_data = TEST_DATA * NUM_THREADS |
|||
if all_data != expected_data: |
|||
print("final verification failed: data mismatch") |
|||
print("expected {} bytes, got {} bytes".format(len(expected_data), len(all_data))) |
|||
for fd in fds_to_close: |
|||
os.close(fd) |
|||
os.unlink(filepath) |
|||
sys.exit(1) |
|||
|
|||
# Test cross-fd visibility: write from one fd, read from another |
|||
if len(fds_to_close) >= 2: |
|||
fd_a = fds_to_close[0] |
|||
fd_b = fds_to_close[1] |
|||
|
|||
# Write new data from fd_a |
|||
new_data = b"cross-fd visibility test\n" |
|||
os.lseek(fd_a, 0, os.SEEK_SET) |
|||
os.write(fd_a, new_data) |
|||
|
|||
# Read from fd_b - should see the new data |
|||
os.lseek(fd_b, 0, os.SEEK_SET) |
|||
read_back = os.read(fd_b, len(new_data)) |
|||
|
|||
if read_back != new_data: |
|||
print("cross-fd visibility failed: write from fd_a not visible on fd_b") |
|||
for fd in fds_to_close: |
|||
os.close(fd) |
|||
os.unlink(filepath) |
|||
sys.exit(1) |
|||
|
|||
# Close all file descriptors |
|||
for fd in fds_to_close: |
|||
os.close(fd) |
|||
|
|||
# Cleanup |
|||
os.unlink(filepath) |
|||
Write
Preview
Loading…
Cancel
Save
Reference in new issue