Browse Source

build: patch reed-solomon-erasure to use safe lru

rust-volume-server
Chris Lu 4 days ago
parent
commit
195934fe14
  1. 29
      seaweed-volume/Cargo.lock
  2. 3
      seaweed-volume/Cargo.toml
  3. 1
      seaweed-volume/vendor/reed-solomon-erasure/.cargo-ok
  4. 6
      seaweed-volume/vendor/reed-solomon-erasure/.cargo_vcs_info.json
  5. 3
      seaweed-volume/vendor/reed-solomon-erasure/.gitattributes
  6. 2
      seaweed-volume/vendor/reed-solomon-erasure/.gitignore
  7. 181
      seaweed-volume/vendor/reed-solomon-erasure/CHANGELOG.md
  8. 87
      seaweed-volume/vendor/reed-solomon-erasure/Cargo.toml
  9. 56
      seaweed-volume/vendor/reed-solomon-erasure/Cargo.toml.orig
  10. 24
      seaweed-volume/vendor/reed-solomon-erasure/LICENSE
  11. 166
      seaweed-volume/vendor/reed-solomon-erasure/README.md
  12. 108
      seaweed-volume/vendor/reed-solomon-erasure/benches/reconstruct.rs
  13. 196
      seaweed-volume/vendor/reed-solomon-erasure/build.rs
  14. 26
      seaweed-volume/vendor/reed-solomon-erasure/sage/galois_ext_test.sage
  15. 574
      seaweed-volume/vendor/reed-solomon-erasure/simd_c/reedsolomon.c
  16. 54
      seaweed-volume/vendor/reed-solomon-erasure/simd_c/reedsolomon.h
  17. 927
      seaweed-volume/vendor/reed-solomon-erasure/src/core.rs
  18. 158
      seaweed-volume/vendor/reed-solomon-erasure/src/errors.rs
  19. 412
      seaweed-volume/vendor/reed-solomon-erasure/src/galois_16.rs
  20. 621
      seaweed-volume/vendor/reed-solomon-erasure/src/galois_8.rs
  21. 200
      seaweed-volume/vendor/reed-solomon-erasure/src/lib.rs
  22. 245
      seaweed-volume/vendor/reed-solomon-erasure/src/macros.rs
  23. 425
      seaweed-volume/vendor/reed-solomon-erasure/src/matrix.rs
  24. 489
      seaweed-volume/vendor/reed-solomon-erasure/src/tests/galois_16.rs
  25. 2619
      seaweed-volume/vendor/reed-solomon-erasure/src/tests/mod.rs

29
seaweed-volume/Cargo.lock

@ -17,17 +17,6 @@ version = "2.0.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "320119579fcad9c21884f5c4861d16174d0e06250625266f50fe6898340abefa"
[[package]]
name = "ahash"
version = "0.7.8"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "891477e0c6a8957309ee5c45a6368af3ae14bb510732d2684ffa19af310920f9"
dependencies = [
"getrandom 0.2.17",
"once_cell",
"version_check",
]
[[package]]
name = "aho-corasick"
version = "1.1.4"
@ -281,7 +270,7 @@ dependencies = [
"http 0.2.12",
"http 1.4.0",
"http-body 1.0.1",
"lru 0.16.3",
"lru",
"percent-encoding",
"regex-lite",
"sha2",
@ -1647,9 +1636,6 @@ name = "hashbrown"
version = "0.12.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "8a9ee70c43aaf417c914396645a0fa852624801b24ebb7ae78fe8272889ac888"
dependencies = [
"ahash",
]
[[package]]
name = "hashbrown"
@ -2252,15 +2238,6 @@ version = "0.4.29"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "5e5032e24019045c762d3c0f28f5b6b8bbf38563a65908389bf7978758920897"
[[package]]
name = "lru"
version = "0.7.8"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "e999beba7b6e8345721bd280141ed958096a2e4abdf74f67ff4ce49b4b54e47a"
dependencies = [
"hashbrown 0.12.3",
]
[[package]]
name = "lru"
version = "0.16.3"
@ -3193,11 +3170,9 @@ dependencies = [
[[package]]
name = "reed-solomon-erasure"
version = "6.0.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "7263373d500d4d4f505d43a2a662d475a894aa94503a1ee28e9188b5f3960d4f"
dependencies = [
"libm",
"lru 0.7.8",
"lru",
"parking_lot 0.11.2",
"smallvec",
"spin 0.9.8",

3
seaweed-volume/Cargo.toml

@ -109,3 +109,6 @@ tempfile = "3"
[build-dependencies]
tonic-build = "0.12"
[patch.crates-io]
reed-solomon-erasure = { path = "vendor/reed-solomon-erasure" }

1
seaweed-volume/vendor/reed-solomon-erasure/.cargo-ok

@ -0,0 +1 @@
{"v":1}

6
seaweed-volume/vendor/reed-solomon-erasure/.cargo_vcs_info.json

@ -0,0 +1,6 @@
{
"git": {
"sha1": "a1ca49de5384445b68ade7d72f31f0379c199943"
},
"path_in_vcs": ""
}

3
seaweed-volume/vendor/reed-solomon-erasure/.gitattributes

@ -0,0 +1,3 @@
BackBlaze_JavaReedSolomon/* linguist-vendored
KlausPost_reedsolomon/* linguist-vendored
NicolasT_reedsolomon/* linguist-vendored

2
seaweed-volume/vendor/reed-solomon-erasure/.gitignore

@ -0,0 +1,2 @@
/target/
/Cargo.lock

181
seaweed-volume/vendor/reed-solomon-erasure/CHANGELOG.md

@ -0,0 +1,181 @@
## 6.0.0
- Use LruCache instead of InversionTree for caching data decode matrices
- See [PR #104](https://github.com/rust-rse/reed-solomon-erasure/pull/104)
- Minor code duplication
- See [PR #102](https://github.com/rust-rse/reed-solomon-erasure/pull/102)
- Dependencies update
- Updated `smallvec` from `0.6.1` to `1.8.0`
## 5.0.3
- Fixed cross build bug for aarch64 with simd-accel
- See [PR #100](https://github.com/rust-rse/reed-solomon-erasure/pull/100)
## 5.0.2
* Add support for `RUST_REED_SOLOMON_ERASURE_ARCH` environment variable and stop using `native` architecture for SIMD code
- See [PR #98](https://github.com/rust-rse/reed-solomon-erasure/pull/98)
## 5.0.1
- The `simd-accel` feature now builds on M1 Macs
- See [PR #92](https://github.com/rust-rse/reed-solomon-erasure/pull/92)
- Minor code cleanup
## 5.0.0
- Merged several PRs
- Not fully reviewed as I am no longer maintaining this crate
## 4.0.2
- Updated build.rs to respect RUSTFLAGS's target-cpu if available
- See [PR #75](https://github.com/darrenldl/reed-solomon-erasure/pull/75)
- Added AVX512 support
- See [PR #69](https://github.com/darrenldl/reed-solomon-erasure/pull/69)
- Disabled SIMD acceleration when MSVC is being used to build the library
- See [PR #67](https://github.com/darrenldl/reed-solomon-erasure/pull/67)
- Dependencies update
- Updated `smallvec` from `0.6` to `1.2`
## 4.0.1
- Updated SIMD C code for Windows compatibility
- Removed include of `unistd.h` in `simd_c/reedsolomon.c`
- Removed GCC `nonnull` attribute in `simd_c/reedsolomon.h`
- See PR [#63](https://github.com/darrenldl/reed-solomon-erasure/pull/63) [#64](https://github.com/darrenldl/reed-solomon-erasure/pull/64) for details
- Replaced use of `libc::uint8_t` in `src/galois_8.rs` with `u8`
## 4.0.0
- Major API restructure: removed `Shard` type in favor of generic functions
- The logic of this crate is now generic over choice of finite field
- The SIMD acceleration feature for GF(2^8) is now activated with the `simd-accel` Cargo feature. Pure-rust behavior is default.
- Ran rustfmt
- Adds a GF(2^16) implementation
## 3.1.2 (not published)
- Doc fix
- Added space before parantheses in code comments and documentation
- Disabled SIMD C code for Android and iOS targets entirely
## 3.1.1
- Fixed `Matrix::augment`
- The error checking code was incorrect
- Since this method is used in internal code only, and the only use case is a correct use case, the error did not lead to any bugs
- Fixed benchmark data
- Previously used MB=10^6 bytes while I should have used MB=2^20 bytes
- Table in README has been updated accordingly
- The `>= 2.1.0` data is obtained by measuring again with the corrected `rse-benchmark` code
- The `2.0.X` and `1.X.X` data are simply adjusted by mutiplying `10^6` then dividing by `2^20`
- Dependencies update
- Updated `rand` from `0.4` to `0.5.4`
- Added special handling in `build.rs` for CC options on Android and iOS
- `-march=native` is not available for GCC on Android, see issue #23
## 3.1.0
- Impl'd `std::error::Error` for `reed_solomon_erasure::Error` and `reed_solomon_erasure::SBSError`
- See issue [#17](https://github.com/darrenldl/reed-solomon-erasure/issues/17), suggested by [DrPeterVanNostrand](https://github.com/DrPeterVanNostrand)
- Added fuzzing suite
- No code changes due to this as no bugs were found
- Upgraded InversionTree QuickCheck test
- No code changes due to this as no bugs were found
- Upgraded test suite for main codec methods (e.g. encode, reconstruct)
- A lot of heavy QuickCheck tests were added
- No code changes due to this as no bugs were found
- Upgraded test suite for ShardByShard methods
- A lot of heavy QuickCheck tests were added
- No code changes due to this as no bugs were found
- Minor code refactoring in `reconstruct_internal` method
- This means `reconstruct` and related methods are slightly more optimized
## 3.0.3
- Added QuickCheck tests to the test suite
- InversionTree is heavily tested now
- No code changes as no bugs were found
- Deps update
- Updated rayon from 0.9 to 1.0
## 3.0.2
- Same as 3.0.1, but 3.0.1 had unapplied changes
## 3.0.1 (yanked)
- Updated doc for `with_buffer` variants of verifying methods
- Stated explicitly that the buffer contains the correct parity shards after a successful call
- Added tests for the above statement
## 3.0.0
- Added `with_buffer` variants for verifying methods
- This gives user the option of reducing heap allocation(s)
- Core code clean up, improvements, and review, added more AUDIT comments
- Improved shard utils
- Added code to remove leftover parity shards in `reconstruct_data_shards`
- This means one fewer gotcha of using the methods
- `ShardByShard` code review and overhaul
- `InversionTree` code review and improvements
## 2.4.0
- Added more flexibility for `convert_2D_slices` macro
- Now accepts expressions rather than just identifiers
- The change requires change of syntax
## 2.3.3
- Replaced all slice splitting functions in `misc_utils` with std lib ones or rayon ones
- This means there are fewer heap allocations in general
## 2.3.2
- Made `==`(`eq`) for `ReedSolomon` more reasonable
- Previously `==` would compare
- data shard count
- parity shard count
- total shard count
- internal encoding matrix
- internal `ParallelParam`
- Now it only compares
- data shard count
- parity shard count
## 2.3.1
- Added info on encoding behaviour to doc
## 2.3.0
- Made Reed-Solomon codec creation methods return error instead of panic when shard numbers are not correct
## 2.2.0
- Fixed SBS error checking code
- Documentation fixes and polishing
- Renamed `Error::InvalidShardsIndicator` to `Error::InvalidShardFlags`
- Added more details to documentation on error handling
- Error handling code overhaul and checks for all method variants
- Dead commented out code cleanup and indent fix
## 2.1.0
- Added Nicolas's SIMD C code files, gaining major speedup on supported CPUs
- Added support for "shard by shard" encoding, allowing easier streamed encoding
- Added functions for shard by shard encoding
## 2.0.0
- Complete rewrite of most code following Klaus Post's design
- Added optimsations (parallelism, loop unrolling)
- 4-5x faster than `1.X.X`
## 1.1.1
- Documentation polish
- Added documentation badge to README
- Optimised internal matrix related operations
- This largely means `decode_missing` is faster
## 1.1.0
- Added more helper functions
- Added more tests
## 1.0.1
- Added more tests
- Fixed decode_missing
- Previously may reconstruct the missing shards with incorrect length
## 1.0.0
- Added more tests
- Added integration with Codecov (via kcov)
- Code refactoring
- Added integration with Coveralls (via kcov)
## 0.9.1
- Code restructuring
- Added documentation
## 0.9.0
- Base version

87
seaweed-volume/vendor/reed-solomon-erasure/Cargo.toml

@ -0,0 +1,87 @@
# THIS FILE IS AUTOMATICALLY GENERATED BY CARGO
#
# When uploading crates to the registry Cargo will automatically
# "normalize" Cargo.toml files for maximal compatibility
# with all versions of Cargo and also rewrite `path` dependencies
# to registry (e.g., crates.io) dependencies.
#
# If you are reading this file be aware that the original Cargo.toml
# will likely look very different (and much more reasonable).
# See Cargo.toml.orig for the original contents.
[package]
edition = "2018"
name = "reed-solomon-erasure"
version = "6.0.0"
authors = ["Darren Ldl <darrenldldev@gmail.com>"]
build = "build.rs"
exclude = [
"appveyor.yml",
".travis.yml",
]
description = "Rust implementation of Reed-Solomon erasure coding"
homepage = "https://github.com/darrenldl/reed-solomon-erasure"
documentation = "https://docs.rs/reed-solomon-erasure"
readme = "README.md"
keywords = [
"reed-solomon",
"erasure",
]
categories = ["encoding"]
license = "MIT"
repository = "https://github.com/darrenldl/reed-solomon-erasure"
[[bench]]
name = "reconstruct"
[dependencies.libc]
version = "0.2"
optional = true
[dependencies.libm]
version = "0.2.1"
[dependencies.lru]
version = "0.16.3"
[dependencies.parking_lot]
version = "0.11.2"
optional = true
[dependencies.smallvec]
version = "1.2"
[dependencies.spin]
version = "0.9.2"
features = ["spin_mutex"]
default-features = false
[dev-dependencies.quickcheck]
version = "0.9"
[dev-dependencies.rand]
version = "0.7.2"
[build-dependencies.cc]
version = "1.0"
optional = true
[features]
default = ["std"]
simd-accel = [
"cc",
"libc",
]
std = ["parking_lot"]
[badges.appveyor]
repository = "darrenldl/reed-solomon-erasure"
[badges.codecov]
repository = "darrenldl/reed-solomon-erasure"
[badges.coveralls]
repository = "darrenldl/reed-solomon-erasure"
[badges.travis-ci]
repository = "darrenldl/reed-solomon-erasure"

56
seaweed-volume/vendor/reed-solomon-erasure/Cargo.toml.orig

@ -0,0 +1,56 @@
[package]
name= "reed-solomon-erasure"
version = "6.0.0"
authors = ["Darren Ldl <darrenldldev@gmail.com>"]
edition = "2018"
build = "build.rs"
exclude = [
"appveyor.yml",
".travis.yml"
]
description = "Rust implementation of Reed-Solomon erasure coding"
documentation = "https://docs.rs/reed-solomon-erasure"
homepage= "https://github.com/darrenldl/reed-solomon-erasure"
repository= "https://github.com/darrenldl/reed-solomon-erasure"
readme= "README.md"
keywords= ["reed-solomon", "erasure"]
categories= ["encoding"]
license = "MIT"
[features]
default = ["std"] # simd off by default
std = ["parking_lot"]
simd-accel = ["cc", "libc"]
[badges]
travis-ci = { repository = "darrenldl/reed-solomon-erasure" }
appveyor= { repository = "darrenldl/reed-solomon-erasure" }
codecov = { repository = "darrenldl/reed-solomon-erasure" }
coveralls = { repository = "darrenldl/reed-solomon-erasure" }
[dependencies]
libc = { version = "0.2", optional = true }
# `log2()` impl for `no_std`
libm = "0.2.1"
lru = "0.16.3"
# Efficient `Mutex` implementation for `std` environment
parking_lot = { version = "0.11.2", optional = true }
smallvec = "1.2"
# `Mutex` implementation for `no_std` environment with the same high-level API as `parking_lot`
spin = { version = "0.9.2", default-features = false, features = ["spin_mutex"] }
[dev-dependencies]
rand = "0.7.2"
quickcheck = "0.9"
[build-dependencies]
cc = { version = "1.0", optional = true }
[[bench]]
name = "reconstruct"

24
seaweed-volume/vendor/reed-solomon-erasure/LICENSE

@ -0,0 +1,24 @@
MIT License
Copyright (c) 2017 Darren Ldl
Copyright (c) 2015, 2016 Nicolas Trangez
Copyright (c) 2015 Klaus Post
Copyright (c) 2015 Backblaze
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.

166
seaweed-volume/vendor/reed-solomon-erasure/README.md

@ -0,0 +1,166 @@
# reed-solomon-erasure
[![Build Status](https://travis-ci.org/darrenldl/reed-solomon-erasure.svg?branch=master)](https://travis-ci.org/darrenldl/reed-solomon-erasure)
[![Build status](https://ci.appveyor.com/api/projects/status/47c0emjoa9bhpjlb/branch/master?svg=true)](https://ci.appveyor.com/project/darrenldl/reed-solomon-erasure/branch/master)
[![codecov](https://codecov.io/gh/darrenldl/reed-solomon-erasure/branch/master/graph/badge.svg)](https://codecov.io/gh/darrenldl/reed-solomon-erasure)
[![Coverage Status](https://coveralls.io/repos/github/darrenldl/reed-solomon-erasure/badge.svg?branch=master)](https://coveralls.io/github/darrenldl/reed-solomon-erasure?branch=master)
[![Crates](https://img.shields.io/crates/v/reed-solomon-erasure.svg)](https://crates.io/crates/reed-solomon-erasure)
[![Documentation](https://docs.rs/reed-solomon-erasure/badge.svg)](https://docs.rs/reed-solomon-erasure)
[![dependency status](https://deps.rs/repo/github/darrenldl/reed-solomon-erasure/status.svg)](https://deps.rs/repo/github/darrenldl/reed-solomon-erasure)
Rust implementation of Reed-Solomon erasure coding
WASM builds are also available, see section **WASM usage** below for details
This is a port of [BackBlaze's Java implementation](https://github.com/Backblaze/JavaReedSolomon), [Klaus Post's Go implementation](https://github.com/klauspost/reedsolomon), and [Nicolas Trangez's Haskell implementation](https://github.com/NicolasT/reedsolomon).
Version `1.X.X` copies BackBlaze's implementation, and is less performant as there were fewer places where parallelism could be added.
Version `>= 2.0.0` copies Klaus Post's implementation. The SIMD C code is copied from Nicolas Trangez's implementation with minor modifications.
See [Notes](#notes) and [License](#license) section for details.
## WASM usage
See [here](wasm/README.md) for details
## Rust usage
Add the following to your `Cargo.toml` for the normal version (pure Rust version)
```toml
[dependencies]
reed-solomon-erasure = "4.0"
```
or the following for the version which tries to utilise SIMD
```toml
[dependencies]
reed-solomon-erasure = { version = "4.0", features = [ "simd-accel" ] }
```
and the following to your crate root
```rust
extern crate reed_solomon_erasure;
```
NOTE: `simd-accel` is tuned for Haswell+ processors on x86-64 and not in any way for other architectures, set
environment variable `RUST_REED_SOLOMON_ERASURE_ARCH` during build to force compilation of C code for specific architecture (`-march` flag in
GCC/Clang). Even on x86-64 you can achieve better performance by setting it to `native`, but it will stop running on
older CPUs, YMMV.
## Example
```rust
#[macro_use(shards)]
extern crate reed_solomon_erasure;
use reed_solomon_erasure::galois_8::ReedSolomon;
// or use the following for Galois 2^16 backend
// use reed_solomon_erasure::galois_16::ReedSolomon;
fn main () {
let r = ReedSolomon::new(3, 2).unwrap(); // 3 data shards, 2 parity shards
let mut master_copy = shards!(
[0, 1, 2, 3],
[4, 5, 6, 7],
[8, 9, 10, 11],
[0, 0, 0, 0], // last 2 rows are parity shards
[0, 0, 0, 0]
);
// Construct the parity shards
r.encode(&mut master_copy).unwrap();
// Make a copy and transform it into option shards arrangement
// for feeding into reconstruct_shards
let mut shards: Vec<_> = master_copy.iter().cloned().map(Some).collect();
// We can remove up to 2 shards, which may be data or parity shards
shards[0] = None;
shards[4] = None;
// Try to reconstruct missing shards
r.reconstruct(&mut shards).unwrap();
// Convert back to normal shard arrangement
let result: Vec<_> = shards.into_iter().filter_map(|x| x).collect();
assert!(r.verify(&result).unwrap());
assert_eq!(master_copy, result);
}
```
## Benchmark it yourself
You can test performance under different configurations quickly (e.g. data parity shards ratio, parallel parameters)
by cloning this repo: https://github.com/darrenldl/rse-benchmark
`rse-benchmark` contains a copy of this library (usually a fully functional dev version), so you only need to adjust `main.rs`
then do `cargo run --release` to start the benchmark.
## Performance
Version `1.X.X`, `2.0.0` do not utilise SIMD.
Version `2.1.0` onward uses Nicolas's C files for SIMD operations.
Machine: laptop with `Intel(R) Core(TM) i5-3337U CPU @ 1.80GHz (max 2.70GHz) 2 Cores 4 Threads`
Below shows the result of one of the test configurations, other configurations show similar results in terms of ratio.
|Configuration| Klaus Post's | >= 2.1.0 && < 4.0.0 | 2.0.X | 1.X.X |
|---|---|---|---|---|
| 10x2x1M | ~7800MB/s |~4500MB/s | ~1000MB/s | ~240MB/s |
Versions `>= 4.0.0` have not been benchmarked thoroughly yet
## Changelog
[Changelog](CHANGELOG.md)
## Contributions
Contributions are welcome. Note that by submitting contributions, you agree to license your work under the same license used by this project as stated in the LICENSE file.
## Credits
#### Library overhaul and Galois 2^16 backend
Many thanks to the following people for overhaul of the library and introduction of Galois 2^16 backend
- [@drskalman](https://github.com/drskalman)
- Jeff Burdges [@burdges](https://github.com/burdges)
- Robert Habermeier [@rphmeier](https://github.com/rphmeier)
#### WASM builds
Many thanks to Nazar Mokrynskyi [@nazar-pc](https://github.com/nazar-pc) for submitting his package for WASM builds
He is the original author of the files stored in `wasm` folder. The files may have been modified by me later.
#### AVX512 support
Many thanks to [@sakridge](https://github.com/sakridge) for adding support for AVX512 (see [PR #69](https://github.com/darrenldl/reed-solomon-erasure/pull/69))
#### build.rs improvements
Many thanks to [@ryoqun](https://github.com/ryoqun) for improving the usability of the library in the context of cross-compilation (see [PR #75](https://github.com/darrenldl/reed-solomon-erasure/pull/75))
#### no_std support
Many thanks to Nazar Mokrynskyi [@nazar-pc](https://github.com/nazar-pc) for adding `no_std` support (see [PR #90](https://github.com/darrenldl/reed-solomon-erasure/pull/90))
#### Testers
Many thanks to the following people for testing and benchmarking on various platforms
- Laurențiu Nicola [@lnicola](https://github.com/lnicola/) (platforms: Linux, Intel)
- Roger Andersen [@hexjelly](https://github.com/hexjelly) (platforms: Windows, AMD)
## Notes
#### Code quality review
If you'd like to evaluate the quality of this library, you may find audit comments helpful.
Simply search for "AUDIT" to see the dev notes that are aimed at facilitating code reviews.
#### Implementation notes
The `1.X.X` implementation mostly copies [BackBlaze's Java implementation](https://github.com/Backblaze/JavaReedSolomon).
`2.0.0` onward mostly copies [Klaus Post's Go implementation](https://github.com/klauspost/reedsolomon), and copies C files from [Nicolas Trangez's Haskell implementation](https://github.com/NicolasT/reedsolomon).
The test suite for all versions copies [Klaus Post's Go implementation](https://github.com/klauspost/reedsolomon) as basis.
## License
#### Nicolas Trangez's Haskell Reed-Solomon implementation
The C files for SIMD operations are copied (with no/minor modifications) from [Nicolas Trangez's Haskell implementation](https://github.com/NicolasT/reedsolomon), and are under the same MIT License as used by NicolasT's project
#### TL;DR
All files are released under the MIT License

108
seaweed-volume/vendor/reed-solomon-erasure/benches/reconstruct.rs

@ -0,0 +1,108 @@
#![feature(test)]
extern crate test;
use {
rand::{prelude::*, Rng},
reed_solomon_erasure::galois_8::Field,
test::Bencher,
};
type ReedSolomon = reed_solomon_erasure::ReedSolomon<Field>;
const SHARD_SIZE: usize = 1024;
fn run_reconstruct_bench(bencher: &mut Bencher, num_data_shards: usize, num_parity_shards: usize) {
let mut rng = rand::thread_rng();
let mut shards = vec![vec![0u8; SHARD_SIZE]; num_data_shards + num_parity_shards];
for shard in &mut shards[..num_data_shards] {
rng.fill(&mut shard[..]);
}
let reed_solomon = ReedSolomon::new(num_data_shards, num_parity_shards).unwrap();
reed_solomon.encode(&mut shards[..]).unwrap();
let shards: Vec<_> = shards.into_iter().map(Some).collect();
bencher.iter(|| {
let mut shards = shards.clone();
for _ in 0..num_parity_shards {
*shards.choose_mut(&mut rng).unwrap() = None;
}
reed_solomon.reconstruct(&mut shards[..]).unwrap();
assert!(shards.iter().all(Option::is_some));
});
}
#[bench]
fn bench_reconstruct_2_2(bencher: &mut Bencher) {
run_reconstruct_bench(bencher, 2, 2)
}
#[bench]
fn bench_reconstruct_4_2(bencher: &mut Bencher) {
run_reconstruct_bench(bencher, 4, 2)
}
#[bench]
fn bench_reconstruct_4_4(bencher: &mut Bencher) {
run_reconstruct_bench(bencher, 4, 4)
}
#[bench]
fn bench_reconstruct_8_2(bencher: &mut Bencher) {
run_reconstruct_bench(bencher, 8, 2)
}
#[bench]
fn bench_reconstruct_8_4(bencher: &mut Bencher) {
run_reconstruct_bench(bencher, 8, 4)
}
#[bench]
fn bench_reconstruct_8_8(bencher: &mut Bencher) {
run_reconstruct_bench(bencher, 8, 8)
}
#[bench]
fn bench_reconstruct_16_2(bencher: &mut Bencher) {
run_reconstruct_bench(bencher, 16, 2)
}
#[bench]
fn bench_reconstruct_16_4(bencher: &mut Bencher) {
run_reconstruct_bench(bencher, 16, 4)
}
#[bench]
fn bench_reconstruct_16_8(bencher: &mut Bencher) {
run_reconstruct_bench(bencher, 16, 8)
}
#[bench]
fn bench_reconstruct_16_16(bencher: &mut Bencher) {
run_reconstruct_bench(bencher, 16, 16)
}
#[bench]
fn bench_reconstruct_32_2(bencher: &mut Bencher) {
run_reconstruct_bench(bencher, 32, 2)
}
#[bench]
fn bench_reconstruct_32_4(bencher: &mut Bencher) {
run_reconstruct_bench(bencher, 32, 4)
}
#[bench]
fn bench_reconstruct_32_8(bencher: &mut Bencher) {
run_reconstruct_bench(bencher, 32, 8)
}
#[bench]
fn bench_reconstruct_32_16(bencher: &mut Bencher) {
run_reconstruct_bench(bencher, 32, 16)
}
#[bench]
fn bench_reconstruct_32_32(bencher: &mut Bencher) {
run_reconstruct_bench(bencher, 32, 32)
}

196
seaweed-volume/vendor/reed-solomon-erasure/build.rs

@ -0,0 +1,196 @@
use std::env;
use std::fs::File;
use std::io::Write;
use std::path::Path;
#[cfg(feature = "simd-accel")]
extern crate cc;
const FIELD_SIZE: usize = 256;
const GENERATING_POLYNOMIAL: usize = 29;
fn gen_log_table(polynomial: usize) -> [u8; FIELD_SIZE] {
let mut result: [u8; FIELD_SIZE] = [0; FIELD_SIZE];
let mut b: usize = 1;
for log in 0..FIELD_SIZE - 1 {
result[b] = log as u8;
b = b << 1;
if FIELD_SIZE <= b {
b = (b - FIELD_SIZE) ^ polynomial;
}
}
result
}
const EXP_TABLE_SIZE: usize = FIELD_SIZE * 2 - 2;
fn gen_exp_table(log_table: &[u8; FIELD_SIZE]) -> [u8; EXP_TABLE_SIZE] {
let mut result: [u8; EXP_TABLE_SIZE] = [0; EXP_TABLE_SIZE];
for i in 1..FIELD_SIZE {
let log = log_table[i] as usize;
result[log] = i as u8;
result[log + FIELD_SIZE - 1] = i as u8;
}
result
}
fn multiply(log_table: &[u8; FIELD_SIZE], exp_table: &[u8; EXP_TABLE_SIZE], a: u8, b: u8) -> u8 {
if a == 0 || b == 0 {
0
} else {
let log_a = log_table[a as usize];
let log_b = log_table[b as usize];
let log_result = log_a as usize + log_b as usize;
exp_table[log_result]
}
}
fn gen_mul_table(
log_table: &[u8; FIELD_SIZE],
exp_table: &[u8; EXP_TABLE_SIZE],
) -> [[u8; FIELD_SIZE]; FIELD_SIZE] {
let mut result: [[u8; FIELD_SIZE]; FIELD_SIZE] = [[0; 256]; 256];
for a in 0..FIELD_SIZE {
for b in 0..FIELD_SIZE {
result[a][b] = multiply(log_table, exp_table, a as u8, b as u8);
}
}
result
}
fn gen_mul_table_half(
log_table: &[u8; FIELD_SIZE],
exp_table: &[u8; EXP_TABLE_SIZE],
) -> ([[u8; 16]; FIELD_SIZE], [[u8; 16]; FIELD_SIZE]) {
let mut low: [[u8; 16]; FIELD_SIZE] = [[0; 16]; FIELD_SIZE];
let mut high: [[u8; 16]; FIELD_SIZE] = [[0; 16]; FIELD_SIZE];
for a in 0..low.len() {
for b in 0..low.len() {
let mut result = 0;
if !(a == 0 || b == 0) {
let log_a = log_table[a];
let log_b = log_table[b];
result = exp_table[log_a as usize + log_b as usize];
}
if (b & 0x0F) == b {
low[a][b] = result;
}
if (b & 0xF0) == b {
high[a][b >> 4] = result;
}
}
}
(low, high)
}
macro_rules! write_table {
(1D => $file:ident, $table:ident, $name:expr, $type:expr) => {{
let len = $table.len();
let mut table_str = String::from(format!("pub static {}: [{}; {}] = [", $name, $type, len));
for v in $table.iter() {
let str = format!("{}, ", v);
table_str.push_str(&str);
}
table_str.push_str("];\n");
$file.write_all(table_str.as_bytes()).unwrap();
}};
(2D => $file:ident, $table:ident, $name:expr, $type:expr) => {{
let rows = $table.len();
let cols = $table[0].len();
let mut table_str = String::from(format!(
"pub static {}: [[{}; {}]; {}] = [",
$name, $type, cols, rows
));
for a in $table.iter() {
table_str.push_str("[");
for b in a.iter() {
let str = format!("{}, ", b);
table_str.push_str(&str);
}
table_str.push_str("],\n");
}
table_str.push_str("];\n");
$file.write_all(table_str.as_bytes()).unwrap();
}};
}
fn write_tables() {
let log_table = gen_log_table(GENERATING_POLYNOMIAL);
let exp_table = gen_exp_table(&log_table);
let mul_table = gen_mul_table(&log_table, &exp_table);
let out_dir = env::var("OUT_DIR").unwrap();
let dest_path = Path::new(&out_dir).join("table.rs");
let mut f = File::create(&dest_path).unwrap();
write_table!(1D => f, log_table, "LOG_TABLE", "u8");
write_table!(1D => f, exp_table, "EXP_TABLE", "u8");
write_table!(2D => f, mul_table, "MUL_TABLE", "u8");
if cfg!(feature = "simd-accel") {
let (mul_table_low, mul_table_high) = gen_mul_table_half(&log_table, &exp_table);
write_table!(2D => f, mul_table_low, "MUL_TABLE_LOW", "u8");
write_table!(2D => f, mul_table_high, "MUL_TABLE_HIGH", "u8");
}
}
#[cfg(all(
feature = "simd-accel",
any(target_arch = "x86_64", target_arch = "aarch64"),
not(target_env = "msvc"),
not(any(target_os = "android", target_os = "ios"))
))]
fn compile_simd_c() {
let mut build = cc::Build::new();
build.opt_level(3);
match env::var("RUST_REED_SOLOMON_ERASURE_ARCH") {
Ok(arch) => {
// Use explicitly specified environment variable as architecture.
build.flag(&format!("-march={}", arch));
}
Err(_error) => {
// On x86-64 enabling Haswell architecture unlocks useful instructions and improves performance
// dramatically while allowing it to run ony modern CPU.
match env::var("CARGO_CFG_TARGET_ARCH").unwrap().as_str(){
"x86_64" => { build.flag(&"-march=haswell"); },
_ => ()
}
}
}
build
.flag("-std=c11")
.file("simd_c/reedsolomon.c")
.compile("reedsolomon");
}
#[cfg(not(all(
feature = "simd-accel",
any(target_arch = "x86_64", target_arch = "aarch64"),
not(target_env = "msvc"),
not(any(target_os = "android", target_os = "ios"))
)))]
fn compile_simd_c() {}
fn main() {
compile_simd_c();
write_tables();
}

26
seaweed-volume/vendor/reed-solomon-erasure/sage/galois_ext_test.sage

@ -0,0 +1,26 @@
GF256.<a> = FiniteField(256)
R.<x> = GF256[x]
ext_poly = R.irreducible_element(2,algorithm="first_lexicographic" )
ExtField.<b> = GF256.extension(ext_poly)
print ExtField
print len(ExtField)
x^2 + a*x + a^7
e1 = (a^7 + a^6 + a^4 + a)*b + a^3 + a^2 + a + 1
e2 = (a^7 + a^5 + a^2)*b + a^7 + a^4 + a^3 + a
print "e1: ", e1
print "e2: ", e2
print "e1 + e2: ", e1 + e2
#(a^6 + a^5 + a^4 + a^2 + a)*b + a^7 + a^4 + a^2 + 1
print "e1 * e2: ", e1 * e2
#(a^4 + a^2 + a + 1)*b + a^7 + a^5 + a^3 + a
print "e1 / e2: ", e1 / e2
#(a^7 + a^6 + a^5 + a^4 + a^3 + a^2 + 1)*b + a^6 + a^3 + a
print "1/b: ", 1/b
#(a^4 + a^3 + a + 1)*b + a^5 + a^4 + a^2 + a

574
seaweed-volume/vendor/reed-solomon-erasure/simd_c/reedsolomon.c

@ -0,0 +1,574 @@
/* reedsolomon.c - SIMD-optimized Galois-field multiplication routines
*
* Copyright (c) 2015, 2016 Nicolas Trangez
* Copyright (c) 2015 Klaus Post
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to
* deal in the Software without restriction, including without limitation the
* rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
* sell copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE
*/
#if HAVE_CONFIG_H
# include "config.h"
#endif
#include <stdint.h>
#include <string.h>
//#if defined(__SSE2__) && __SSE2__ && defined(HAVE_EMMINTRIN_H) && HAVE_EMMINTRIN_H
//#ifdef __SSE2__
#if defined(__SSE2__) && __SSE2__
# define USE_SSE2 1
# undef VECTOR_SIZE
# define VECTOR_SIZE 16
# include <emmintrin.h>
#else
# define USE_SSE2 0
#endif
//#if defined(__SSSE3__) && __SSSE3__ && defined(HAVE_TMMINTRIN_H) && HAVE_TMMINTRIN_H
//#ifdef __SSSE3__
#if defined(__SSSE3__) && __SSSE3__
# define USE_SSSE3 1
# undef VECTOR_SIZE
# define VECTOR_SIZE 16
# include <tmmintrin.h>
#else
# define USE_SSSE3 0
#endif
//#if defined(__AVX2__) && __AVX2__ && defined(HAVE_IMMINTRIN_H) && HAVE_IMMINTRIN_H
//#ifdef __AVX2__
#if defined(__AVX2__) && __AVX2__
# define USE_AVX2 1
# undef VECTOR_SIZE
# define VECTOR_SIZE 32
# include <immintrin.h>
#else
# define USE_AVX2 0
#endif
#if defined(__AVX512F__) && __AVX512F__
# define USE_AVX512 1
# undef VECTOR_SIZE
# define VECTOR_SIZE 64
# include <immintrin.h>
#else
# define USE_AVX512 0
#endif
/*#if ((defined(__ARM_NEON__) && __ARM_NEON__) \
|| (defined(__ARM_NEON) && __ARM_NEON) \
|| (defined(__aarch64__) && __aarch64__)) \
&& defined(HAVE_ARM_NEON_H) && HAVE_ARM_NEON_H*/
#if ((defined(__ARM_NEON__) && __ARM_NEON__) \
|| (defined(__ARM_NEON) && __ARM_NEON) \
|| (defined(__aarch64__) && __aarch64__))
# define USE_ARM_NEON 1
#undef VECTOR_SIZE
# define VECTOR_SIZE 16
# include <arm_neon.h>
#else
# define USE_ARM_NEON 0
#endif
//#if defined(__ALTIVEC__) && __ALTIVEC__ && defined(HAVE_ALTIVEC_H) && HAVE_ALTIVEC_H
#if defined(__ALTIVEC__) && __ALTIVEC__
# define USE_ALTIVEC 1
# undef VECTOR_SIZE
# define VECTOR_SIZE 16
# include <altivec.h>
#else
# define USE_ALTIVEC 0
#endif
#ifndef VECTOR_SIZE
/* 'Generic' code */
# define VECTOR_SIZE 16
#endif
# define USE_ALIGNED_ACCESS 0
# define ALIGNED_ACCESS __attribute__((unused))
# define UNALIGNED_ACCESS
#include "reedsolomon.h"
#if defined(HAVE_FUNC_ATTRIBUTE_HOT) && HAVE_FUNC_ATTRIBUTE_HOT
# define HOT_FUNCTION __attribute__((hot))
#else
# define HOT_FUNCTION
#endif
#if defined(HAVE_FUNC_ATTRIBUTE_CONST) && HAVE_FUNC_ATTRIBUTE_CONST
# define CONST_FUNCTION __attribute__((const))
#else
# define CONST_FUNCTION
#endif
#if defined(HAVE_FUNC_ATTRIBUTE_ALWAYS_INLINE) && HAVE_FUNC_ATTRIBUTE_ALWAYS_INLINE
# define ALWAYS_INLINE inline __attribute__((always_inline))
#else
# define ALWAYS_INLINE inline
#endif
#if defined(HAVE_FUNC_ATTRIBUTE_FORCE_ALIGN_ARG_POINTER) && HAVE_FUNC_ATTRIBUTE_FORCE_ALIGN_ARG_POINTER
# define FORCE_ALIGN_ARG_POINTER __attribute__((force_align_arg_pointer))
#else
# define FORCE_ALIGN_ARG_POINTER
#endif
#define CONCAT_HELPER(a, b) a ## b
#define CONCAT(a, b) CONCAT_HELPER(a, b)
typedef uint8_t v16u8v __attribute__((vector_size(16), aligned(1)));
typedef uint64_t v2u64v __attribute__((vector_size(16), aligned(1)));
#define T(t, n) t n[VSIZE / 8 / sizeof(t)]
#define T1(t, n) t n
#define VSIZE 128
typedef union {
T(uint8_t, u8);
T(uint64_t, u64);
#if USE_SSE2
T1(__m128i, m128i);
#endif
#if USE_ARM_NEON
T1(uint8x16_t, uint8x16);
T1(uint8x8x2_t, uint8x8x2);
#endif
#if USE_ALTIVEC
T1(__vector uint8_t, uint8x16);
T1(__vector uint64_t, uint64x2);
#endif
T1(v16u8v, v16u8);
T1(v2u64v, v2u64);
} v128 __attribute__((aligned(1)));
#undef VSIZE
#define VSIZE 256
typedef union {
T(uint8_t, u8);
#if USE_AVX2
__m256i m256i;
#endif
} v256 __attribute__((aligned(1)));
#undef VSIZE
#define VSIZE 512
typedef union {
T(uint8_t, u8);
#if USE_AVX512
__m512i m512i;
#endif
} v512 __attribute__((aligned(1)));
#undef T
#undef T1
#if VECTOR_SIZE == 16
typedef v128 v;
#elif VECTOR_SIZE == 32
typedef v256 v;
#elif VECTOR_SIZE == 64
typedef v512 v;
#else
# error Unsupported VECTOR_SIZE
#endif
static ALWAYS_INLINE UNALIGNED_ACCESS v128 loadu_v128(const uint8_t *in) {
#if USE_SSE2
const v128 result = { .m128i = _mm_loadu_si128((const __m128i *)in) };
#else
v128 result;
memcpy(&result.u64, in, sizeof(result.u64));
#endif
return result;
}
static ALWAYS_INLINE UNALIGNED_ACCESS v loadu_v(const uint8_t *in) {
#if USE_AVX512
const v512 result = { .m512i = _mm512_loadu_si512((const __m512i *)in) };
#elif USE_AVX2
const v256 result = { .m256i = _mm256_loadu_si256((const __m256i *)in) };
#else
const v128 result = loadu_v128(in);
#endif
return result;
}
static ALWAYS_INLINE ALIGNED_ACCESS v load_v(const uint8_t *in) {
#if USE_AVX512
const v512 result = { .m512i = _mm512_load_si512((const __m512i *)in) };
#elif USE_AVX2
const v256 result = { .m256i = _mm256_load_si256((const __m256i *)in) };
#elif USE_SSE2
const v128 result = { .m128i = _mm_load_si128((const __m128i *)in) };
#elif USE_ARM_NEON
const v128 result = { .uint8x16 = vld1q_u8(in) };
#elif USE_ALTIVEC
const v128 result = { .uint8x16 = vec_ld(0, in) };
#else
const v128 result = loadu_v128(in);
#endif
return result;
}
static ALWAYS_INLINE CONST_FUNCTION v set1_epi8_v(const uint8_t c) {
#if USE_AVX512
const v512 result = { .m512i = _mm512_set1_epi8(c) };
#elif USE_AVX2
const v256 result = { .m256i = _mm256_set1_epi8(c) };
#elif USE_SSE2
const v128 result = { .m128i = _mm_set1_epi8(c) };
#elif USE_ARM_NEON
const v128 result = { .uint8x16 = vdupq_n_u8(c) };
#elif USE_ALTIVEC
const v128 result = { .uint8x16 = { c, c, c, c, c, c, c, c,
c, c, c, c, c, c, c, c } };
#else
uint64_t c2 = c,
tmp = (c2 << (7 * 8)) |
(c2 << (6 * 8)) |
(c2 << (5 * 8)) |
(c2 << (4 * 8)) |
(c2 << (3 * 8)) |
(c2 << (2 * 8)) |
(c2 << (1 * 8)) |
(c2 << (0 * 8));
const v128 result = { .u64 = { tmp, tmp } };
#endif
return result;
}
static ALWAYS_INLINE CONST_FUNCTION v srli_epi64_v(const v in /*, const unsigned int n*/) {
// TODO: Hard code n to 4 to avoid build issues on M1 Macs (the
// `USE_ARM_NEON` path below) where apple clang is failing to
// recognize the constant `n`.
//
// See https://github.com/rust-rse/reed-solomon-erasure/pull/92
//
#define n 4
#if USE_AVX512
const v512 result = { .m512i = _mm512_srli_epi64(in.m512i, n) };
#elif USE_AVX2
const v256 result = { .m256i = _mm256_srli_epi64(in.m256i, n) };
#elif USE_SSE2
const v128 result = { .m128i = _mm_srli_epi64(in.m128i, n) };
#elif USE_ARM_NEON
const v128 result = { .uint8x16 = vshrq_n_u8(in.uint8x16, n) };
#elif USE_ALTIVEC
# if RS_HAVE_VEC_VSRD
const v128 shift = { .v2u64 = { n, n } },
result = { .uint64x2 = vec_vsrd(in.v2u64, shift.v2u64) };
# else
const v128 result = { .v2u64 = in.v2u64 >> n };
# endif
#else
const v128 result = { .u64 = { in.u64[0] >> n,
in.u64[1] >> n } };
#endif
#undef n
return result;
}
static ALWAYS_INLINE CONST_FUNCTION v and_v(const v a, const v b) {
#if USE_AVX512
const v512 result = { .m512i = _mm512_and_si512(a.m512i, b.m512i) };
#elif USE_AVX2
const v256 result = { .m256i = _mm256_and_si256(a.m256i, b.m256i) };
#elif USE_SSE2
const v128 result = { .m128i = _mm_and_si128(a.m128i, b.m128i) };
#elif USE_ARM_NEON
const v128 result = { .uint8x16 = vandq_u8(a.uint8x16, b.uint8x16) };
#elif USE_ALTIVEC
const v128 result = { .uint8x16 = vec_and(a.uint8x16, b.uint8x16) };
#else
const v128 result = { .v2u64 = a.v2u64 & b.v2u64 };
#endif
return result;
}
static ALWAYS_INLINE CONST_FUNCTION v xor_v(const v a, const v b) {
#if USE_AVX512
const v512 result = { .m512i = _mm512_xor_si512(a.m512i, b.m512i) };
#elif USE_AVX2
const v256 result = { .m256i = _mm256_xor_si256(a.m256i, b.m256i) };
#elif USE_SSE2
const v128 result = { .m128i = _mm_xor_si128(a.m128i, b.m128i) };
#elif USE_ARM_NEON
const v128 result = { .uint8x16 = veorq_u8(a.uint8x16, b.uint8x16) };
#elif USE_ALTIVEC
const v128 result = { .uint8x16 = vec_xor(a.uint8x16, b.uint8x16) };
#else
const v128 result = { .v2u64 = a.v2u64 ^ b.v2u64 };
#endif
return result;
}
static ALWAYS_INLINE CONST_FUNCTION v shuffle_epi8_v(const v vec, const v mask) {
#if USE_AVX512
const v512 result = { .m512i = _mm512_shuffle_epi8(vec.m512i, mask.m512i) };
#elif USE_AVX2
const v256 result = { .m256i = _mm256_shuffle_epi8(vec.m256i, mask.m256i) };
#elif USE_SSSE3
const v128 result = { .m128i = _mm_shuffle_epi8(vec.m128i, mask.m128i) };
#elif USE_ARM_NEON
# if defined(RS_HAVE_VQTBL1Q_U8) && RS_HAVE_VQTBL1Q_U8
const v128 result = { .uint8x16 = vqtbl1q_u8(vec.uint8x16, mask.uint8x16) };
# else
/* There's no NEON instruction mapping 1-to-1 to _mm_shuffle_epi8, but
* this should have the same result...
*/
const v128 result = { .uint8x16 = vcombine_u8(vtbl2_u8(vec.uint8x8x2,
vget_low_u8(mask.uint8x16)),
vtbl2_u8(vec.uint8x8x2,
vget_high_u8(mask.uint8x16))) };
# endif
#elif USE_ALTIVEC
const v128 zeros = set1_epi8_v(0),
result = { .uint8x16 = vec_perm(vec.uint8x16, zeros.uint8x16, mask.uint8x16) };
#elif defined(RS_HAVE_BUILTIN_SHUFFLE) && RS_HAVE_BUILTIN_SHUFFLE
const v16u8v zeros = { 0, 0, 0, 0, 0, 0, 0, 0
, 0, 0, 0, 0, 0, 0, 0, 0 };
const v128 result = { .v16u8 = __builtin_shuffle(vec.v16u8, zeros, mask.v16u8) };
#else
v128 result = { .u64 = { 0, 0 } };
# define DO_BYTE(i) \
result.u8[i] = mask.u8[i] & 0x80 ? 0 : vec.u8[mask.u8[i] & 0x0F];
DO_BYTE( 0); DO_BYTE( 1); DO_BYTE( 2); DO_BYTE( 3);
DO_BYTE( 4); DO_BYTE( 5); DO_BYTE( 6); DO_BYTE( 7);
DO_BYTE( 8); DO_BYTE( 9); DO_BYTE(10); DO_BYTE(11);
DO_BYTE(12); DO_BYTE(13); DO_BYTE(14); DO_BYTE(15);
#endif
return result;
}
static ALWAYS_INLINE UNALIGNED_ACCESS void storeu_v(uint8_t *out, const v vec) {
#if USE_AVX512
_mm512_storeu_si512((__m512i *)out, vec.m512i);
#elif USE_AVX2
_mm256_storeu_si256((__m256i *)out, vec.m256i);
#elif USE_SSE2
_mm_storeu_si128((__m128i *)out, vec.m128i);
#else
memcpy(out, &vec.u64, sizeof(vec.u64));
#endif
}
static ALWAYS_INLINE ALIGNED_ACCESS void store_v(uint8_t *out, const v vec) {
#if USE_AVX512
_mm512_store_si512((__m512i *)out, vec.m512i);
#elif USE_AVX2
_mm256_store_si256((__m256i *)out, vec.m256i);
#elif USE_SSE2
_mm_store_si128((__m128i *)out, vec.m128i);
#elif USE_ARM_NEON
vst1q_u8(out, vec.uint8x16);
#elif USE_ALTIVEC
vec_st(vec.uint8x16, 0, out);
#else
storeu_v(out, vec);
#endif
}
static ALWAYS_INLINE CONST_FUNCTION v replicate_v128_v(const v128 vec) {
#if USE_AVX512
const v512 result = { .m512i = _mm512_broadcast_i32x4(vec.m128i) };
#elif USE_AVX2
const v256 result = { .m256i = _mm256_broadcastsi128_si256(vec.m128i) };
#else
const v128 result = vec;
#endif
return result;
}
//+build !noasm !appengine
// Copyright 2015, Klaus Post, see LICENSE for details.
// Based on http://www.snia.org/sites/default/files2/SDC2013/presentations/NewThinking/EthanMiller_Screaming_Fast_Galois_Field%20Arithmetic_SIMD%20Instructions.pdf
// and http://jerasure.org/jerasure/gf-complete/tree/master
/*
// func galMulSSSE3Xor(low, high, in, out []byte)
TEXT ·galMulSSSE3Xor(SB), 7, $0
MOVQ low+0(FP),SI // SI: &low
MOVQ high+24(FP),DX // DX: &high
MOVOU (SI), X6 // X6 low
MOVOU (DX), X7 // X7: high
MOVQ $15, BX // BX: low mask
MOVQ BX, X8
PXOR X5, X5
MOVQ in+48(FP),SI // R11: &in
MOVQ in_len+56(FP),R9 // R9: len(in)
MOVQ out+72(FP), DX // DX: &out
PSHUFB X5, X8 // X8: lomask (unpacked)
SHRQ $4, R9 // len(in) / 16
CMPQ R9 ,$0
JEQ done_xor
loopback_xor:
MOVOU (SI),X0 // in[x]
MOVOU (DX),X4 // out[x]
MOVOU X0, X1 // in[x]
MOVOU X6, X2 // low copy
MOVOU X7, X3 // high copy
PSRLQ $4, X1 // X1: high input
PAND X8, X0 // X0: low input
PAND X8, X1 // X0: high input
PSHUFB X0, X2 // X2: mul low part
PSHUFB X1, X3 // X3: mul high part
PXOR X2, X3 // X3: Result
PXOR X4, X3 // X3: Result xor existing out
MOVOU X3, (DX) // Store
ADDQ $16, SI // in+=16
ADDQ $16, DX // out+=16
SUBQ $1, R9
JNZ loopback_xor
done_xor:
RET
// func galMulSSSE3(low, high, in, out []byte)
TEXT ·galMulSSSE3(SB), 7, $0
MOVQ low+0(FP),SI // SI: &low
MOVQ high+24(FP),DX // DX: &high
MOVOU (SI), X6 // X6 low
MOVOU (DX), X7 // X7: high
MOVQ $15, BX // BX: low mask
MOVQ BX, X8
PXOR X5, X5
MOVQ in+48(FP),SI // R11: &in
MOVQ in_len+56(FP),R9 // R9: len(in)
MOVQ out+72(FP), DX // DX: &out
PSHUFB X5, X8 // X8: lomask (unpacked)
SHRQ $4, R9 // len(in) / 16
CMPQ R9 ,$0
JEQ done
loopback:
MOVOU (SI),X0 // in[x]
MOVOU X0, X1 // in[x]
MOVOU X6, X2 // low copy
MOVOU X7, X3 // high copy
PSRLQ $4, X1 // X1: high input
PAND X8, X0 // X0: low input
PAND X8, X1 // X0: high input
PSHUFB X0, X2 // X2: mul low part
PSHUFB X1, X3 // X3: mul high part
PXOR X2, X3 // X3: Result
MOVOU X3, (DX) // Store
ADDQ $16, SI // in+=16
ADDQ $16, DX // out+=16
SUBQ $1, R9
JNZ loopback
done:
RET
*/
static ALWAYS_INLINE v reedsolomon_gal_mul_v(
const v low_mask_unpacked,
const v low_vector,
const v high_vector,
v (*modifier)(const v new, const v old),
const v in_x,
const v old) {
const v low_input = and_v(in_x, low_mask_unpacked),
in_x_shifted = srli_epi64_v(in_x /*, 4*/),
high_input = and_v(in_x_shifted, low_mask_unpacked),
mul_low_part = shuffle_epi8_v(low_vector, low_input),
mul_high_part = shuffle_epi8_v(high_vector, high_input),
new = xor_v(mul_low_part, mul_high_part),
result = modifier(new, old);
return result;
}
static ALWAYS_INLINE PROTO_RETURN reedsolomon_gal_mul_impl(
PROTO_ARGS,
v (*modifier)(const v new, const v old)) {
const v low_mask_unpacked = set1_epi8_v(0x0f);
const v128 low_vector128 = loadu_v128(low),
high_vector128 = loadu_v128(high);
const v low_vector = replicate_v128_v(low_vector128),
high_vector = replicate_v128_v(high_vector128);
size_t done = 0;
#if USE_ALIGNED_ACCESS
# define LOAD(addr) load_v(addr)
# define STORE(addr, vec) store_v(addr, vec)
#else
# define LOAD(addr) loadu_v(addr)
# define STORE(addr, vec) storeu_v(addr, vec)
#endif
#if RS_HAVE_CLANG_LOOP_UNROLL
# pragma clang loop unroll(enable)
#endif
for(size_t x = 0; x < len / sizeof(v); x++) {
const v in_x = LOAD(&in[done]),
old = LOAD(&out[done]),
result = reedsolomon_gal_mul_v(
low_mask_unpacked,
low_vector, high_vector,
modifier,
in_x,
old);
STORE(&out[done], result);
done += sizeof(v);
}
return done;
}
static ALWAYS_INLINE CONST_FUNCTION v noop(const v new, const v old __attribute__((__unused__))) {
return new;
}
#ifdef HOT
HOT_FUNCTION
#endif
FORCE_ALIGN_ARG_POINTER PROTO(reedsolomon_gal_mul) {
return reedsolomon_gal_mul_impl(low, high, in, out, len, noop);
}
#ifdef HOT
HOT_FUNCTION
#endif
FORCE_ALIGN_ARG_POINTER PROTO(reedsolomon_gal_mul_xor) {
return reedsolomon_gal_mul_impl(low, high, in, out, len, xor_v);
}

54
seaweed-volume/vendor/reed-solomon-erasure/simd_c/reedsolomon.h

@ -0,0 +1,54 @@
/* reedsolomon.h - SIMD-optimized Galois-field multiplication routines
*
* Copyright (c) 2015, 2016 Nicolas Trangez
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to
* deal in the Software without restriction, including without limitation the
* rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
* sell copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE
*/
#include <stdint.h>
#if HAVE_CONFIG_H
# include "config.h"
#endif
#define PROTO_RETURN size_t
#define PROTO_ARGS \
const uint8_t low[16], \
const uint8_t high[16], \
const uint8_t *restrict const in, \
uint8_t *restrict const out, \
const size_t len
#define PROTO(name) \
PROTO_RETURN \
name (PROTO_ARGS)
PROTO(reedsolomon_gal_mul);
PROTO(reedsolomon_gal_mul_xor);
typedef enum {
REEDSOLOMON_CPU_GENERIC = 0,
REEDSOLOMON_CPU_SSE2 = 1,
REEDSOLOMON_CPU_SSSE3 = 2,
REEDSOLOMON_CPU_AVX = 3,
REEDSOLOMON_CPU_AVX2 = 4,
REEDSOLOMON_CPU_NEON = 5,
REEDSOLOMON_CPU_ALTIVEC = 6,
} reedsolomon_cpu_support;
reedsolomon_cpu_support reedsolomon_determine_cpu_support(void);

927
seaweed-volume/vendor/reed-solomon-erasure/src/core.rs

@ -0,0 +1,927 @@
extern crate alloc;
use alloc::sync::Arc;
use alloc::vec;
use alloc::vec::Vec;
use core::num::NonZeroUsize;
use smallvec::SmallVec;
use crate::errors::Error;
use crate::errors::SBSError;
use crate::matrix::Matrix;
use lru::LruCache;
#[cfg(feature = "std")]
use parking_lot::Mutex;
#[cfg(not(feature = "std"))]
use spin::Mutex;
use super::Field;
use super::ReconstructShard;
const DATA_DECODE_MATRIX_CACHE_CAPACITY: usize = 254;
// /// Parameters for parallelism.
// #[derive(PartialEq, Debug, Clone, Copy)]
// pub struct ParallelParam {
// /// Number of bytes to split the slices into for computations
// /// which can be done in parallel.
// ///
// /// Default is 32768.
// pub bytes_per_encode: usize,
// }
// impl ParallelParam {
// /// Create a new `ParallelParam` with the given split arity.
// pub fn new(bytes_per_encode: usize) -> ParallelParam {
// ParallelParam { bytes_per_encode }
// }
// }
// impl Default for ParallelParam {
// fn default() -> Self {
// ParallelParam::new(32768)
// }
// }
/// Bookkeeper for shard by shard encoding.
///
/// This is useful for avoiding incorrect use of
/// `encode_single` and `encode_single_sep`
///
/// # Use cases
///
/// Shard by shard encoding is useful for streamed data encoding
/// where you do not have all the needed data shards immediately,
/// but you want to spread out the encoding workload rather than
/// doing the encoding after everything is ready.
///
/// A concrete example would be network packets encoding,
/// where encoding packet by packet as you receive them may be more efficient
/// than waiting for N packets then encode them all at once.
///
/// # Example
///
/// ```
/// # #[macro_use] extern crate reed_solomon_erasure;
/// # use reed_solomon_erasure::*;
/// # fn main () {
/// use reed_solomon_erasure::galois_8::Field;
/// let r: ReedSolomon<Field> = ReedSolomon::new(3, 2).unwrap();
///
/// let mut sbs = ShardByShard::new(&r);
///
/// let mut shards = shards!([0u8, 1, 2, 3, 4],
/// [5, 6, 7, 8, 9],
/// // say we don't have the 3rd data shard yet
/// // and we want to fill it in later
/// [0, 0, 0, 0, 0],
/// [0, 0, 0, 0, 0],
/// [0, 0, 0, 0, 0]);
///
/// // encode 1st and 2nd data shard
/// sbs.encode(&mut shards).unwrap();
/// sbs.encode(&mut shards).unwrap();
///
/// // fill in 3rd data shard
/// shards[2][0] = 10.into();
/// shards[2][1] = 11.into();
/// shards[2][2] = 12.into();
/// shards[2][3] = 13.into();
/// shards[2][4] = 14.into();
///
/// // now do the encoding
/// sbs.encode(&mut shards).unwrap();
///
/// assert!(r.verify(&shards).unwrap());
/// # }
/// ```
#[derive(PartialEq, Debug)]
pub struct ShardByShard<'a, F: 'a + Field> {
codec: &'a ReedSolomon<F>,
cur_input: usize,
}
impl<'a, F: 'a + Field> ShardByShard<'a, F> {
/// Creates a new instance of the bookkeeping struct.
pub fn new(codec: &'a ReedSolomon<F>) -> ShardByShard<'a, F> {
ShardByShard {
codec,
cur_input: 0,
}
}
/// Checks if the parity shards are ready to use.
pub fn parity_ready(&self) -> bool {
self.cur_input == self.codec.data_shard_count
}
/// Resets the bookkeeping data.
///
/// You should call this when you have added and encoded
/// all data shards, and have finished using the parity shards.
///
/// Returns `SBSError::LeftoverShards` when there are shards encoded
/// but parity shards are not ready to use.
pub fn reset(&mut self) -> Result<(), SBSError> {
if self.cur_input > 0 && !self.parity_ready() {
return Err(SBSError::LeftoverShards);
}
self.cur_input = 0;
Ok(())
}
/// Resets the bookkeeping data without checking.
pub fn reset_force(&mut self) {
self.cur_input = 0;
}
/// Returns the current input shard index.
pub fn cur_input_index(&self) -> usize {
self.cur_input
}
fn return_ok_and_incre_cur_input(&mut self) -> Result<(), SBSError> {
self.cur_input += 1;
Ok(())
}
fn sbs_encode_checks<U: AsRef<[F::Elem]> + AsMut<[F::Elem]>>(
&mut self,
slices: &mut [U],
) -> Result<(), SBSError> {
let internal_checks = |codec: &ReedSolomon<F>, data: &mut [U]| {
check_piece_count!(all => codec, data);
check_slices!(multi => data);
Ok(())
};
if self.parity_ready() {
return Err(SBSError::TooManyCalls);
}
match internal_checks(self.codec, slices) {
Ok(()) => Ok(()),
Err(e) => Err(SBSError::RSError(e)),
}
}
fn sbs_encode_sep_checks<T: AsRef<[F::Elem]>, U: AsRef<[F::Elem]> + AsMut<[F::Elem]>>(
&mut self,
data: &[T],
parity: &mut [U],
) -> Result<(), SBSError> {
let internal_checks = |codec: &ReedSolomon<F>, data: &[T], parity: &mut [U]| {
check_piece_count!(data => codec, data);
check_piece_count!(parity => codec, parity);
check_slices!(multi => data, multi => parity);
Ok(())
};
if self.parity_ready() {
return Err(SBSError::TooManyCalls);
}
match internal_checks(self.codec, data, parity) {
Ok(()) => Ok(()),
Err(e) => Err(SBSError::RSError(e)),
}
}
/// Constructs the parity shards partially using the current input data shard.
///
/// Returns `SBSError::TooManyCalls` when all input data shards
/// have already been filled in via `encode`
pub fn encode<T, U>(&mut self, mut shards: T) -> Result<(), SBSError>
where
T: AsRef<[U]> + AsMut<[U]>,
U: AsRef<[F::Elem]> + AsMut<[F::Elem]>,
{
let shards = shards.as_mut();
self.sbs_encode_checks(shards)?;
self.codec.encode_single(self.cur_input, shards).unwrap();
self.return_ok_and_incre_cur_input()
}
/// Constructs the parity shards partially using the current input data shard.
///
/// Returns `SBSError::TooManyCalls` when all input data shards
/// have already been filled in via `encode`
pub fn encode_sep<T: AsRef<[F::Elem]>, U: AsRef<[F::Elem]> + AsMut<[F::Elem]>>(
&mut self,
data: &[T],
parity: &mut [U],
) -> Result<(), SBSError> {
self.sbs_encode_sep_checks(data, parity)?;
self.codec
.encode_single_sep(self.cur_input, data[self.cur_input].as_ref(), parity)
.unwrap();
self.return_ok_and_incre_cur_input()
}
}
/// Reed-Solomon erasure code encoder/decoder.
///
/// # Common error handling
///
/// ## For `encode`, `encode_shards`, `verify`, `verify_shards`, `reconstruct`, `reconstruct_data`, `reconstruct_shards`, `reconstruct_data_shards`
///
/// Return `Error::TooFewShards` or `Error::TooManyShards`
/// when the number of provided shards
/// does not match the codec's one.
///
/// Return `Error::EmptyShard` when the first shard provided is
/// of zero length.
///
/// Return `Error::IncorrectShardSize` when the provided shards
/// are of different lengths.
///
/// ## For `reconstruct`, `reconstruct_data`, `reconstruct_shards`, `reconstruct_data_shards`
///
/// Return `Error::TooFewShardsPresent` when there are not
/// enough shards for reconstruction.
///
/// Return `Error::InvalidShardFlags` when the number of flags does not match
/// the total number of shards.
///
/// # Variants of encoding methods
///
/// ## `sep`
///
/// Methods ending in `_sep` takes an immutable reference to data shards,
/// and a mutable reference to parity shards.
///
/// They are useful as they do not need to borrow the data shards mutably,
/// and other work that only needs read-only access to data shards can be done
/// in parallel/concurrently during the encoding.
///
/// Following is a table of all the `sep` variants
///
/// | not `sep` | `sep` |
/// | --- | --- |
/// | `encode_single` | `encode_single_sep` |
/// | `encode` | `encode_sep` |
///
/// The `sep` variants do similar checks on the provided data shards and
/// parity shards.
///
/// Return `Error::TooFewDataShards`, `Error::TooManyDataShards`,
/// `Error::TooFewParityShards`, or `Error::TooManyParityShards` when applicable.
///
/// ## `single`
///
/// Methods containing `single` facilitate shard by shard encoding, where
/// the parity shards are partially constructed using one data shard at a time.
/// See `ShardByShard` struct for more details on how shard by shard encoding
/// can be useful.
///
/// They are prone to **misuse**, and it is recommended to use the `ShardByShard`
/// bookkeeping struct instead for shard by shard encoding.
///
/// The ones that are also `sep` are **ESPECIALLY** prone to **misuse**.
/// Only use them when you actually need the flexibility.
///
/// Following is a table of all the shard by shard variants
///
/// | all shards at once | shard by shard |
/// | --- | --- |
/// | `encode` | `encode_single` |
/// | `encode_sep` | `encode_single_sep` |
///
/// The `single` variants do similar checks on the provided data shards and parity shards,
/// and also do index check on `i_data`.
///
/// Return `Error::InvalidIndex` if `i_data >= data_shard_count`.
///
/// # Encoding behaviour
/// ## For `encode`
///
/// You do not need to clear the parity shards beforehand, as the methods
/// will overwrite them completely.
///
/// ## For `encode_single`, `encode_single_sep`
///
/// Calling them with `i_data` being `0` will overwrite the parity shards
/// completely. If you are using the methods correctly, then you do not need
/// to clear the parity shards beforehand.
///
/// # Variants of verifying methods
///
/// `verify` allocate sa buffer on the heap of the same size
/// as the parity shards, and encode the input once using the buffer to store
/// the computed parity shards, then check if the provided parity shards
/// match the computed ones.
///
/// `verify_with_buffer`, allows you to provide
/// the buffer to avoid making heap allocation(s) for the buffer in every call.
///
/// The `with_buffer` variants also guarantee that the buffer contains the correct
/// parity shards if the result is `Ok(_)` (i.e. it does not matter whether the
/// verification passed or not, as long as the result is not an error, the buffer
/// will contain the correct parity shards after the call).
///
/// Following is a table of all the `with_buffer` variants
///
/// | not `with_buffer` | `with_buffer` |
/// | --- | --- |
/// | `verify` | `verify_with_buffer` |
///
/// The `with_buffer` variants also check the dimensions of the buffer and return
/// `Error::TooFewBufferShards`, `Error::TooManyBufferShards`, `Error::EmptyShard`,
/// or `Error::IncorrectShardSize` when applicable.
///
#[derive(Debug)]
pub struct ReedSolomon<F: Field> {
data_shard_count: usize,
parity_shard_count: usize,
total_shard_count: usize,
matrix: Matrix<F>,
data_decode_matrix_cache: Mutex<LruCache<Vec<usize>, Arc<Matrix<F>>>>,
}
impl<F: Field> Clone for ReedSolomon<F> {
fn clone(&self) -> ReedSolomon<F> {
ReedSolomon::new(self.data_shard_count, self.parity_shard_count)
.expect("basic checks already passed as precondition of existence of self")
}
}
impl<F: Field> PartialEq for ReedSolomon<F> {
fn eq(&self, rhs: &ReedSolomon<F>) -> bool {
self.data_shard_count == rhs.data_shard_count
&& self.parity_shard_count == rhs.parity_shard_count
}
}
impl<F: Field> ReedSolomon<F> {
// AUDIT
//
// Error detection responsibilities
//
// Terminologies and symbols:
// X =A, B, C=> Y: X delegates error checking responsibilities A, B, C to Y
// X:= A, B, C: X needs to handle responsibilities A, B, C
//
// Encode methods
//
// `encode_single`:=
// - check index `i_data` within range [0, data shard count)
// - check length of `slices` matches total shard count exactly
// - check consistency of length of individual slices
// `encode_single_sep`:=
// - check index `i_data` within range [0, data shard count)
// - check length of `parity` matches parity shard count exactly
// - check consistency of length of individual parity slices
// - check length of `single_data` matches length of first parity slice
// `encode`:=
// - check length of `slices` matches total shard count exactly
// - check consistency of length of individual slices
// `encode_sep`:=
// - check length of `data` matches data shard count exactly
// - check length of `parity` matches parity shard count exactly
// - check consistency of length of individual data slices
// - check consistency of length of individual parity slices
// - check length of first parity slice matches length of first data slice
//
// Verify methods
//
// `verify`:=
// - check length of `slices` matches total shard count exactly
// - check consistency of length of individual slices
//
// Generates buffer then passes control to verify_with_buffer
//
// `verify_with_buffer`:=
// - check length of `slices` matches total shard count exactly
// - check length of `buffer` matches parity shard count exactly
// - check consistency of length of individual slices
// - check consistency of length of individual slices in buffer
// - check length of first slice in buffer matches length of first slice
//
// Reconstruct methods
//
// `reconstruct` =ALL=> `reconstruct_internal`
// `reconstruct_data`=ALL=> `reconstruct_internal`
// `reconstruct_internal`:=
// - check length of `slices` matches total shard count exactly
// - check consistency of length of individual slices
// - check length of `slice_present` matches length of `slices`
fn get_parity_rows(&self) -> SmallVec<[&[F::Elem]; 32]> {
let mut parity_rows = SmallVec::with_capacity(self.parity_shard_count);
let matrix = &self.matrix;
for i in self.data_shard_count..self.total_shard_count {
parity_rows.push(matrix.get_row(i));
}
parity_rows
}
fn build_matrix(data_shards: usize, total_shards: usize) -> Matrix<F> {
let vandermonde = Matrix::vandermonde(total_shards, data_shards);
let top = vandermonde.sub_matrix(0, 0, data_shards, data_shards);
vandermonde.multiply(&top.invert().unwrap())
}
/// Creates a new instance of Reed-Solomon erasure code encoder/decoder.
///
/// Returns `Error::TooFewDataShards` if `data_shards == 0`.
///
/// Returns `Error::TooFewParityShards` if `parity_shards == 0`.
///
/// Returns `Error::TooManyShards` if `data_shards + parity_shards > F::ORDER`.
pub fn new(data_shards: usize, parity_shards: usize) -> Result<ReedSolomon<F>, Error> {
if data_shards == 0 {
return Err(Error::TooFewDataShards);
}
if parity_shards == 0 {
return Err(Error::TooFewParityShards);
}
if data_shards + parity_shards > F::ORDER {
return Err(Error::TooManyShards);
}
let total_shards = data_shards + parity_shards;
let matrix = Self::build_matrix(data_shards, total_shards);
Ok(ReedSolomon {
data_shard_count: data_shards,
parity_shard_count: parity_shards,
total_shard_count: total_shards,
matrix,
data_decode_matrix_cache: Mutex::new(LruCache::new(
NonZeroUsize::new(DATA_DECODE_MATRIX_CACHE_CAPACITY).unwrap(),
)),
})
}
pub fn data_shard_count(&self) -> usize {
self.data_shard_count
}
pub fn parity_shard_count(&self) -> usize {
self.parity_shard_count
}
pub fn total_shard_count(&self) -> usize {
self.total_shard_count
}
fn code_some_slices<T: AsRef<[F::Elem]>, U: AsMut<[F::Elem]>>(
&self,
matrix_rows: &[&[F::Elem]],
inputs: &[T],
outputs: &mut [U],
) {
for i_input in 0..self.data_shard_count {
self.code_single_slice(matrix_rows, i_input, inputs[i_input].as_ref(), outputs);
}
}
fn code_single_slice<U: AsMut<[F::Elem]>>(
&self,
matrix_rows: &[&[F::Elem]],
i_input: usize,
input: &[F::Elem],
outputs: &mut [U],
) {
outputs.iter_mut().enumerate().for_each(|(i_row, output)| {
let matrix_row_to_use = matrix_rows[i_row][i_input];
let output = output.as_mut();
if i_input == 0 {
F::mul_slice(matrix_row_to_use, input, output);
} else {
F::mul_slice_add(matrix_row_to_use, input, output);
}
})
}
fn check_some_slices_with_buffer<T, U>(
&self,
matrix_rows: &[&[F::Elem]],
inputs: &[T],
to_check: &[T],
buffer: &mut [U],
) -> bool
where
T: AsRef<[F::Elem]>,
U: AsRef<[F::Elem]> + AsMut<[F::Elem]>,
{
self.code_some_slices(matrix_rows, inputs, buffer);
let at_least_one_mismatch_present = buffer
.iter_mut()
.enumerate()
.map(|(i, expected_parity_shard)| {
expected_parity_shard.as_ref() == to_check[i].as_ref()
})
.any(|x| !x); // find the first false (some slice is different from the expected one)
!at_least_one_mismatch_present
}
/// Constructs the parity shards partially using only the data shard
/// indexed by `i_data`.
///
/// The slots where the parity shards sit at will be overwritten.
///
/// # Warning
///
/// You must apply this method on the data shards in strict sequential order (0..data shard count),
/// otherwise the parity shards will be incorrect.
///
/// It is recommended to use the `ShardByShard` bookkeeping struct instead of this method directly.
pub fn encode_single<T, U>(&self, i_data: usize, mut shards: T) -> Result<(), Error>
where
T: AsRef<[U]> + AsMut<[U]>,
U: AsRef<[F::Elem]> + AsMut<[F::Elem]>,
{
let slices = shards.as_mut();
check_slice_index!(data => self, i_data);
check_piece_count!(all=> self, slices);
check_slices!(multi => slices);
// Get the slice of output buffers.
let (mut_input, output) = slices.split_at_mut(self.data_shard_count);
let input = mut_input[i_data].as_ref();
self.encode_single_sep(i_data, input, output)
}
/// Constructs the parity shards partially using only the data shard provided.
///
/// The data shard must match the index `i_data`.
///
/// The slots where the parity shards sit at will be overwritten.
///
/// # Warning
///
/// You must apply this method on the data shards in strict sequential order (0..data shard count),
/// otherwise the parity shards will be incorrect.
///
/// It is recommended to use the `ShardByShard` bookkeeping struct instead of this method directly.
pub fn encode_single_sep<U: AsRef<[F::Elem]> + AsMut<[F::Elem]>>(
&self,
i_data: usize,
single_data: &[F::Elem],
parity: &mut [U],
) -> Result<(), Error> {
check_slice_index!(data => self, i_data);
check_piece_count!(parity => self, parity);
check_slices!(multi => parity, single => single_data);
let parity_rows = self.get_parity_rows();
// Do the coding.
self.code_single_slice(&parity_rows, i_data, single_data, parity);
Ok(())
}
/// Constructs the parity shards.
///
/// The slots where the parity shards sit at will be overwritten.
pub fn encode<T, U>(&self, mut shards: T) -> Result<(), Error>
where
T: AsRef<[U]> + AsMut<[U]>,
U: AsRef<[F::Elem]> + AsMut<[F::Elem]>,
{
let slices: &mut [U] = shards.as_mut();
check_piece_count!(all => self, slices);
check_slices!(multi => slices);
// Get the slice of output buffers.
let (input, output) = slices.split_at_mut(self.data_shard_count);
self.encode_sep(&*input, output)
}
/// Constructs the parity shards using a read-only view into the
/// data shards.
///
/// The slots where the parity shards sit at will be overwritten.
pub fn encode_sep<T: AsRef<[F::Elem]>, U: AsRef<[F::Elem]> + AsMut<[F::Elem]>>(
&self,
data: &[T],
parity: &mut [U],
) -> Result<(), Error> {
check_piece_count!(data => self, data);
check_piece_count!(parity => self, parity);
check_slices!(multi => data, multi => parity);
let parity_rows = self.get_parity_rows();
// Do the coding.
self.code_some_slices(&parity_rows, data, parity);
Ok(())
}
/// Checks if the parity shards are correct.
///
/// This is a wrapper of `verify_with_buffer`.
pub fn verify<T: AsRef<[F::Elem]>>(&self, slices: &[T]) -> Result<bool, Error> {
check_piece_count!(all => self, slices);
check_slices!(multi => slices);
let slice_len = slices[0].as_ref().len();
let mut buffer: SmallVec<[Vec<F::Elem>; 32]> =
SmallVec::with_capacity(self.parity_shard_count);
for _ in 0..self.parity_shard_count {
buffer.push(vec![F::zero(); slice_len]);
}
self.verify_with_buffer(slices, &mut buffer)
}
/// Checks if the parity shards are correct.
pub fn verify_with_buffer<T, U>(&self, slices: &[T], buffer: &mut [U]) -> Result<bool, Error>
where
T: AsRef<[F::Elem]>,
U: AsRef<[F::Elem]> + AsMut<[F::Elem]>,
{
check_piece_count!(all => self, slices);
check_piece_count!(parity_buf => self, buffer);
check_slices!(multi => slices, multi => buffer);
let data = &slices[0..self.data_shard_count];
let to_check = &slices[self.data_shard_count..];
let parity_rows = self.get_parity_rows();
Ok(self.check_some_slices_with_buffer(&parity_rows, data, to_check, buffer))
}
/// Reconstructs all shards.
///
/// The shards marked not present are only overwritten when no error
/// is detected. All provided shards must have the same length.
///
/// This means if the method returns an `Error`, then nothing is touched.
///
/// `reconstruct`, `reconstruct_data`, `reconstruct_shards`,
/// `reconstruct_data_shards` share the same core code base.
pub fn reconstruct<T: ReconstructShard<F>>(&self, slices: &mut [T]) -> Result<(), Error> {
self.reconstruct_internal(slices, false)
}
/// Reconstructs only the data shards.
///
/// The shards marked not present are only overwritten when no error
/// is detected. All provided shards must have the same length.
///
/// This means if the method returns an `Error`, then nothing is touched.
///
/// `reconstruct`, `reconstruct_data`, `reconstruct_shards`,
/// `reconstruct_data_shards` share the same core code base.
pub fn reconstruct_data<T: ReconstructShard<F>>(&self, slices: &mut [T]) -> Result<(), Error> {
self.reconstruct_internal(slices, true)
}
fn get_data_decode_matrix(
&self,
valid_indices: &[usize],
invalid_indices: &[usize],
) -> Arc<Matrix<F>> {
{
let mut cache = self.data_decode_matrix_cache.lock();
if let Some(entry) = cache.get(invalid_indices) {
return entry.clone();
}
}
// Pull out the rows of the matrix that correspond to the shards that
// we have and build a square matrix. This matrix could be used to
// generate the shards that we have from the original data.
let mut sub_matrix = Matrix::new(self.data_shard_count, self.data_shard_count);
for (sub_matrix_row, &valid_index) in valid_indices.iter().enumerate() {
for c in 0..self.data_shard_count {
sub_matrix.set(sub_matrix_row, c, self.matrix.get(valid_index, c));
}
}
// Invert the matrix, so we can go from the encoded shards back to the
// original data. Then pull out the row that generates the shard that
// we want to decode. Note that since this matrix maps back to the
// original data, it can be used to create a data shard, but not a
// parity shard.
let data_decode_matrix = Arc::new(sub_matrix.invert().unwrap());
// Cache the inverted matrix for future use keyed on the indices of the
// invalid rows.
{
let data_decode_matrix = data_decode_matrix.clone();
let mut cache = self.data_decode_matrix_cache.lock();
cache.put(Vec::from(invalid_indices), data_decode_matrix);
}
data_decode_matrix
}
fn reconstruct_internal<T: ReconstructShard<F>>(
&self,
shards: &mut [T],
data_only: bool,
) -> Result<(), Error> {
check_piece_count!(all => self, shards);
let data_shard_count = self.data_shard_count;
// Quick check: are all of the shards present? If so, there's
// nothing to do.
let mut number_present = 0;
let mut shard_len = None;
for shard in shards.iter_mut() {
if let Some(len) = shard.len() {
if len == 0 {
return Err(Error::EmptyShard);
}
number_present += 1;
if let Some(old_len) = shard_len {
if len != old_len {
// mismatch between shards.
return Err(Error::IncorrectShardSize);
}
}
shard_len = Some(len);
}
}
if number_present == self.total_shard_count {
// Cool. All of the shards are there. We don't
// need to do anything.
return Ok(());
}
// More complete sanity check
if number_present < data_shard_count {
return Err(Error::TooFewShardsPresent);
}
let shard_len = shard_len.expect("at least one shard present; qed");
// Pull out an array holding just the shards that
// correspond to the rows of the submatrix. These shards
// will be the input to the decoding process that re-creates
// the missing data shards.
//
// Also, create an array of indices of the valid rows we do have
// and the invalid rows we don't have.
//
// The valid indices are used to construct the data decode matrix,
// the invalid indices are used to key the data decode matrix
// in the data decode matrix cache.
//
// We only need exactly N valid indices, where N = `data_shard_count`,
// as the data decode matrix is a N x N matrix, thus only needs
// N valid indices for determining the N rows to pick from
// `self.matrix`.
let mut sub_shards: SmallVec<[&[F::Elem]; 32]> = SmallVec::with_capacity(data_shard_count);
let mut missing_data_slices: SmallVec<[&mut [F::Elem]; 32]> =
SmallVec::with_capacity(self.parity_shard_count);
let mut missing_parity_slices: SmallVec<[&mut [F::Elem]; 32]> =
SmallVec::with_capacity(self.parity_shard_count);
let mut valid_indices: SmallVec<[usize; 32]> = SmallVec::with_capacity(data_shard_count);
let mut invalid_indices: SmallVec<[usize; 32]> = SmallVec::with_capacity(data_shard_count);
// Separate the shards into groups
for (matrix_row, shard) in shards.iter_mut().enumerate() {
// get or initialize the shard so we can reconstruct in-place,
// but if we are only reconstructing data shard,
// do not initialize if the shard is not a data shard
let shard_data = if matrix_row >= data_shard_count && data_only {
shard.get().ok_or(None)
} else {
shard.get_or_initialize(shard_len).map_err(Some)
};
match shard_data {
Ok(shard) => {
if sub_shards.len() < data_shard_count {
sub_shards.push(shard);
valid_indices.push(matrix_row);
} else {
// Already have enough shards in `sub_shards`
// as we only need N shards, where N = `data_shard_count`,
// for the data decode matrix
//
// So nothing to do here
}
}
Err(None) => {
// the shard data is not meant to be initialized here,
// but we should still note it missing.
invalid_indices.push(matrix_row);
}
Err(Some(x)) => {
// initialized missing shard data.
let shard = x?;
if matrix_row < data_shard_count {
missing_data_slices.push(shard);
} else {
missing_parity_slices.push(shard);
}
invalid_indices.push(matrix_row);
}
}
}
let data_decode_matrix = self.get_data_decode_matrix(&valid_indices, &invalid_indices);
// Re-create any data shards that were missing.
//
// The input to the coding is all of the shards we actually
// have, and the output is the missing data shards. The computation
// is done using the special decode matrix we just built.
let mut matrix_rows: SmallVec<[&[F::Elem]; 32]> =
SmallVec::with_capacity(self.parity_shard_count);
for i_slice in invalid_indices
.iter()
.cloned()
.take_while(|i| i < &data_shard_count)
{
matrix_rows.push(data_decode_matrix.get_row(i_slice));
}
self.code_some_slices(&matrix_rows, &sub_shards, &mut missing_data_slices);
if data_only {
Ok(())
} else {
// Now that we have all of the data shards intact, we can
// compute any of the parity that is missing.
//
// The input to the coding is ALL of the data shards, including
// any that we just calculated. The output is whichever of the
// parity shards were missing.
let mut matrix_rows: SmallVec<[&[F::Elem]; 32]> =
SmallVec::with_capacity(self.parity_shard_count);
let parity_rows = self.get_parity_rows();
for i_slice in invalid_indices
.iter()
.cloned()
.skip_while(|i| i < &data_shard_count)
{
matrix_rows.push(parity_rows[i_slice - data_shard_count]);
}
{
// Gather up all the data shards.
// old data shards are in `sub_shards`,
// new ones are in `missing_data_slices`.
let mut i_old_data_slice = 0;
let mut i_new_data_slice = 0;
let mut all_data_slices: SmallVec<[&[F::Elem]; 32]> =
SmallVec::with_capacity(data_shard_count);
let mut next_maybe_good = 0;
let mut push_good_up_to = move |data_slices: &mut SmallVec<_>, up_to| {
// if next_maybe_good == up_to, this loop is a no-op.
for _ in next_maybe_good..up_to {
// push all good indices we just skipped.
data_slices.push(sub_shards[i_old_data_slice]);
i_old_data_slice += 1;
}
next_maybe_good = up_to + 1;
};
for i_slice in invalid_indices
.iter()
.cloned()
.take_while(|i| i < &data_shard_count)
{
push_good_up_to(&mut all_data_slices, i_slice);
all_data_slices.push(missing_data_slices[i_new_data_slice]);
i_new_data_slice += 1;
}
push_good_up_to(&mut all_data_slices, data_shard_count);
// Now do the actual computation for the missing
// parity shards
self.code_some_slices(&matrix_rows, &all_data_slices, &mut missing_parity_slices);
}
Ok(())
}
}
}

158
seaweed-volume/vendor/reed-solomon-erasure/src/errors.rs

@ -0,0 +1,158 @@
use core::fmt::Formatter;
#[derive(PartialEq, Debug, Clone, Copy)]
pub enum Error {
TooFewShards,
TooManyShards,
TooFewDataShards,
TooManyDataShards,
TooFewParityShards,
TooManyParityShards,
TooFewBufferShards,
TooManyBufferShards,
IncorrectShardSize,
TooFewShardsPresent,
EmptyShard,
InvalidShardFlags,
InvalidIndex,
}
impl Error {
fn to_string(&self) -> &str {
match *self {
Error::TooFewShards=> "The number of provided shards is smaller than the one in codec",
Error::TooManyShards => "The number of provided shards is greater than the one in codec",
Error::TooFewDataShards => "The number of provided data shards is smaller than the one in codec",
Error::TooManyDataShards => "The number of provided data shards is greater than the one in codec",
Error::TooFewParityShards => "The number of provided parity shards is smaller than the one in codec",
Error::TooManyParityShards => "The number of provided parity shards is greater than the one in codec",
Error::TooFewBufferShards => "The number of provided buffer shards is smaller than the number of parity shards in codec",
Error::TooManyBufferShards => "The number of provided buffer shards is greater than the number of parity shards in codec",
Error::IncorrectShardSize => "At least one of the provided shards is not of the correct size",
Error::TooFewShardsPresent => "The number of shards present is smaller than number of parity shards, cannot reconstruct missing shards",
Error::EmptyShard => "The first shard provided is of zero length",
Error::InvalidShardFlags => "The number of flags does not match the total number of shards",
Error::InvalidIndex => "The data shard index provided is greater or equal to the number of data shards in codec",
}
}
}
impl core::fmt::Display for Error {
fn fmt(&self, f: &mut Formatter) -> Result<(), core::fmt::Error> {
write!(f, "{}", self.to_string())
}
}
#[cfg(feature = "std")]
impl std::error::Error for Error {
fn description(&self) -> &str {
self.to_string()
}
}
#[derive(PartialEq, Debug, Clone, Copy)]
pub enum SBSError {
TooManyCalls,
LeftoverShards,
RSError(Error),
}
impl SBSError {
fn to_string(&self) -> &str {
match *self {
SBSError::TooManyCalls => "Too many calls",
SBSError::LeftoverShards => "Leftover shards",
SBSError::RSError(ref e) => e.to_string(),
}
}
}
impl core::fmt::Display for SBSError {
fn fmt(&self, f: &mut Formatter) -> Result<(), core::fmt::Error> {
write!(f, "{}", self.to_string())
}
}
#[cfg(feature = "std")]
impl std::error::Error for SBSError {
fn description(&self) -> &str {
self.to_string()
}
}
#[cfg(test)]
mod tests {
use crate::errors::Error;
use crate::errors::SBSError;
#[test]
fn test_error_to_string_is_okay() {
assert_eq!(
Error::TooFewShards.to_string(),
"The number of provided shards is smaller than the one in codec"
);
assert_eq!(
Error::TooManyShards.to_string(),
"The number of provided shards is greater than the one in codec"
);
assert_eq!(
Error::TooFewDataShards.to_string(),
"The number of provided data shards is smaller than the one in codec"
);
assert_eq!(
Error::TooManyDataShards.to_string(),
"The number of provided data shards is greater than the one in codec"
);
assert_eq!(
Error::TooFewParityShards.to_string(),
"The number of provided parity shards is smaller than the one in codec"
);
assert_eq!(
Error::TooManyParityShards.to_string(),
"The number of provided parity shards is greater than the one in codec"
);
assert_eq!(
Error::TooFewBufferShards.to_string(),
"The number of provided buffer shards is smaller than the number of parity shards in codec"
);
assert_eq!(
Error::TooManyBufferShards.to_string(),
"The number of provided buffer shards is greater than the number of parity shards in codec"
);
assert_eq!(
Error::IncorrectShardSize.to_string(),
"At least one of the provided shards is not of the correct size"
);
assert_eq!(Error::TooFewShardsPresent.to_string(), "The number of shards present is smaller than number of parity shards, cannot reconstruct missing shards");
assert_eq!(
Error::EmptyShard.to_string(),
"The first shard provided is of zero length"
);
assert_eq!(
Error::InvalidShardFlags.to_string(),
"The number of flags does not match the total number of shards"
);
assert_eq!(
Error::InvalidIndex.to_string(),
"The data shard index provided is greater or equal to the number of data shards in codec"
);
}
#[test]
fn test_sbserror_to_string_is_okay() {
assert_eq!(SBSError::TooManyCalls.to_string(), "Too many calls");
assert_eq!(SBSError::LeftoverShards.to_string(), "Leftover shards");
}
#[cfg(feature = "std")]
#[test]
fn test_error_display_does_not_panic() {
println!("{}", Error::TooFewShards);
}
#[cfg(feature = "std")]
#[test]
fn test_sbserror_display_does_not_panic() {
println!("{}", SBSError::TooManyCalls);
}
}

412
seaweed-volume/vendor/reed-solomon-erasure/src/galois_16.rs

@ -0,0 +1,412 @@
//! GF(2^16) implementation.
//!
//! More accurately, this is a `GF((2^8)^2)` implementation which builds an extension
//! field of `GF(2^8)`, as defined in the `galois_8` module.
use crate::galois_8;
use core::ops::{Add, Div, Mul, Sub};
// the irreducible polynomial used as a modulus for the field.
// print R.irreducible_element(2,algorithm="first_lexicographic" )
// x^2 + a*x + a^7
//
// hopefully it is a fast polynomial
const EXT_POLY: [u8; 3] = [1, 2, 128];
/// The field GF(2^16).
#[derive(Debug, Default, Copy, Clone, PartialEq, Eq)]
pub struct Field;
impl crate::Field for Field {
const ORDER: usize = 65536;
type Elem = [u8; 2];
fn add(a: [u8; 2], b: [u8; 2]) -> [u8; 2] {
(Element(a) + Element(b)).0
}
fn mul(a: [u8; 2], b: [u8; 2]) -> [u8; 2] {
(Element(a) * Element(b)).0
}
fn div(a: [u8; 2], b: [u8; 2]) -> [u8; 2] {
(Element(a) / Element(b)).0
}
fn exp(elem: [u8; 2], n: usize) -> [u8; 2] {
Element(elem).exp(n).0
}
fn zero() -> [u8; 2] {
[0; 2]
}
fn one() -> [u8; 2] {
[0, 1]
}
fn nth_internal(n: usize) -> [u8; 2] {
[(n >> 8) as u8, n as u8]
}
}
/// Type alias of ReedSolomon over GF(2^8).
pub type ReedSolomon = crate::ReedSolomon<Field>;
/// Type alias of ShardByShard over GF(2^8).
pub type ShardByShard<'a> = crate::ShardByShard<'a, Field>;
/// An element of `GF(2^16)`.
#[derive(Debug, Copy, Clone, PartialEq, Eq)]
struct Element(pub [u8; 2]);
impl Element {
// Create the zero element.
fn zero() -> Self {
Element([0, 0])
}
// A constant element evaluating to `n`.
fn constant(n: u8) -> Element {
Element([0, n])
}
// Whether this is the zero element.
fn is_zero(&self) -> bool {
self.0 == [0; 2]
}
fn exp(mut self, n: usize) -> Element {
if n == 0 {
Element::constant(1)
} else if self == Element::zero() {
Element::zero()
} else {
let x = self;
for _ in 1..n {
self = self * x;
}
self
}
}
// reduces from some polynomial with degree <= 2.
#[inline]
fn reduce_from(mut x: [u8; 3]) -> Self {
if x[0] != 0 {
// divide x by EXT_POLY and use remainder.
// i = 0 here.
// c*x^(i+j) = a*x^i*b*x^j
x[1] ^= galois_8::mul(EXT_POLY[1], x[0]);
x[2] ^= galois_8::mul(EXT_POLY[2], x[0]);
}
Element([x[1], x[2]])
}
fn degree(&self) -> usize {
if self.0[0] != 0 {
1
} else {
0
}
}
}
impl From<[u8; 2]> for Element {
fn from(c: [u8; 2]) -> Self {
Element(c)
}
}
impl Default for Element {
fn default() -> Self {
Element::zero()
}
}
impl Add for Element {
type Output = Element;
fn add(self, other: Self) -> Element {
Element([self.0[0] ^ other.0[0], self.0[1] ^ other.0[1]])
}
}
impl Sub for Element {
type Output = Element;
fn sub(self, other: Self) -> Element {
self.add(other)
}
}
impl Mul for Element {
type Output = Element;
fn mul(self, rhs: Self) -> Element {
// FOIL; our elements are linear at most, with two coefficients
let out: [u8; 3] = [
galois_8::mul(self.0[0], rhs.0[0]),
galois_8::add(
galois_8::mul(self.0[1], rhs.0[0]),
galois_8::mul(self.0[0], rhs.0[1]),
),
galois_8::mul(self.0[1], rhs.0[1]),
];
Element::reduce_from(out)
}
}
impl Mul<u8> for Element {
type Output = Element;
fn mul(self, rhs: u8) -> Element {
Element([galois_8::mul(rhs, self.0[0]), galois_8::mul(rhs, self.0[1])])
}
}
impl Div for Element {
type Output = Element;
fn div(self, rhs: Self) -> Element {
self * rhs.inverse()
}
}
// helpers for division.
#[derive(Debug)]
enum EgcdRhs {
Element(Element),
ExtPoly,
}
impl Element {
// compute extended euclidean algorithm against an element of self,
// where the GCD is known to be constant.
fn const_egcd(self, rhs: EgcdRhs) -> (u8, Element, Element) {
if self.is_zero() {
let rhs = match rhs {
EgcdRhs::Element(elem) => elem,
EgcdRhs::ExtPoly => panic!("const_egcd invoked with divisible"),
};
(rhs.0[1], Element::constant(0), Element::constant(1))
} else {
let (cur_quotient, cur_remainder) = match rhs {
EgcdRhs::Element(rhs) => rhs.polynom_div(self),
EgcdRhs::ExtPoly => Element::div_ext_by(self),
};
// GCD is constant because EXT_POLY is irreducible
let (g, x, y) = cur_remainder.const_egcd(EgcdRhs::Element(self));
(g, y + (cur_quotient * x), x)
}
}
// divide EXT_POLY by self.
fn div_ext_by(rhs: Self) -> (Element, Element) {
if rhs.degree() == 0 {
// dividing by constant is the same as multiplying by another constant.
// and all constant multiples of EXT_POLY are in the equivalence class
// of 0.
return (Element::zero(), Element::zero());
}
// divisor is ensured linear here.
// now ensure divisor is monic.
let leading_mul_inv = galois_8::div(1, rhs.0[0]);
let monictized = rhs * leading_mul_inv;
let mut poly = EXT_POLY;
for i in 0..2 {
let coef = poly[i];
for j in 1..2 {
if rhs.0[j] != 0 {
poly[i + j] ^= galois_8::mul(monictized.0[j], coef);
}
}
}
let remainder = Element::constant(poly[2]);
let quotient = Element([poly[0], poly[1]]) * leading_mul_inv;
(quotient, remainder)
}
fn polynom_div(self, rhs: Self) -> (Element, Element) {
let divisor_degree = rhs.degree();
if rhs.is_zero() {
panic!("divide by 0");
} else if self.degree() < divisor_degree {
// If divisor's degree (len-1) is bigger, all dividend is a remainder
(Element::zero(), self)
} else if divisor_degree == 0 {
// divide by constant.
let invert = galois_8::div(1, rhs.0[1]);
let quotient = Element([
galois_8::mul(invert, self.0[0]),
galois_8::mul(invert, self.0[1]),
]);
(quotient, Element::zero())
} else {
// self degree is at least divisor degree, divisor degree not 0.
// therefore both are 1.
debug_assert_eq!(self.degree(), divisor_degree);
debug_assert_eq!(self.degree(), 1);
// ensure rhs is constant.
let leading_mul_inv = galois_8::div(1, rhs.0[0]);
let monic = Element([
galois_8::mul(leading_mul_inv, rhs.0[0]),
galois_8::mul(leading_mul_inv, rhs.0[1]),
]);
let leading_coeff = self.0[0];
let mut remainder = self.0[1];
if monic.0[1] != 0 {
remainder ^= galois_8::mul(monic.0[1], self.0[0]);
}
(
Element::constant(galois_8::mul(leading_mul_inv, leading_coeff)),
Element::constant(remainder),
)
}
}
/// Convert the inverse of this field element. Panics if zero.
fn inverse(self) -> Element {
if self.is_zero() {
panic!("Cannot invert 0");
}
// first step of extended euclidean algorithm.
// done here because EXT_POLY is outside the scope of `Element`.
let (gcd, y) = {
// self / EXT_POLY = (0, self)
let remainder = self;
// GCD is constant because EXT_POLY is irreducible
let (g, x, _) = remainder.const_egcd(EgcdRhs::ExtPoly);
(g, x)
};
// we still need to normalize it by dividing by the gcd
if gcd != 0 {
// EXT_POLY is irreducible so the GCD will always be constant.
// EXT_POLY*x + self*y = gcd
// self*y = gcd - EXT_POLY*x
//
// EXT_POLY*x is representative of the equivalence class of 0.
let normalizer = galois_8::div(1, gcd);
y * normalizer
} else {
// self is equivalent to zero.
panic!("Cannot invert 0");
}
}
}
#[cfg(test)]
mod tests {
use super::*;
use quickcheck::Arbitrary;
impl Arbitrary for Element {
fn arbitrary<G: quickcheck::Gen>(gen: &mut G) -> Self {
let a = u8::arbitrary(gen);
let b = u8::arbitrary(gen);
Element([a, b])
}
}
quickcheck! {
fn qc_add_associativity(a: Element, b: Element, c: Element) -> bool {
a + (b + c) == (a + b) + c
}
fn qc_mul_associativity(a: Element, b: Element, c: Element) -> bool {
a * (b * c) == (a * b) * c
}
fn qc_additive_identity(a: Element) -> bool {
let zero = Element::zero();
a - (zero - a) == zero
}
fn qc_multiplicative_identity(a: Element) -> bool {
a.is_zero() || {
let one = Element([0, 1]);
(one / a) * a == one
}
}
fn qc_add_commutativity(a: Element, b: Element) -> bool {
a + b == b + a
}
fn qc_mul_commutativity(a: Element, b: Element) -> bool {
a * b == b * a
}
fn qc_add_distributivity(a: Element, b: Element, c: Element) -> bool {
a * (b + c) == (a * b) + (a * c)
}
fn qc_inverse(a: Element) -> bool {
a.is_zero() || {
let inv = a.inverse();
a * inv == Element::constant(1)
}
}
fn qc_exponent_1(a: Element, n: u8) -> bool {
a.is_zero() || n == 0 || {
let mut b = a.exp(n as usize);
for _ in 1..n {
b = b / a;
}
a == b
}
}
fn qc_exponent_2(a: Element, n: u8) -> bool {
a.is_zero() || {
let mut res = true;
let mut b = Element::constant(1);
for i in 0..n {
res = res && b == a.exp(i as usize);
b = b * a;
}
res
}
}
fn qc_exp_zero_is_one(a: Element) -> bool {
a.exp(0) == Element::constant(1)
}
}
#[test]
#[should_panic]
fn test_div_b_is_0() {
let _ = Element([1, 0]) / Element::zero();
}
#[test]
fn zero_to_zero_is_one() {
assert_eq!(Element::zero().exp(0), Element::constant(1))
}
}

621
seaweed-volume/vendor/reed-solomon-erasure/src/galois_8.rs

@ -0,0 +1,621 @@
//! Implementation of GF(2^8): the finite field with 2^8 elements.
include!(concat!(env!("OUT_DIR"), "/table.rs"));
/// The field GF(2^8).
#[derive(Debug, Default, Copy, Clone, PartialEq, Eq)]
pub struct Field;
impl crate::Field for Field {
const ORDER: usize = 256;
type Elem = u8;
fn add(a: u8, b: u8) -> u8 {
add(a, b)
}
fn mul(a: u8, b: u8) -> u8 {
mul(a, b)
}
fn div(a: u8, b: u8) -> u8 {
div(a, b)
}
fn exp(elem: u8, n: usize) -> u8 {
exp(elem, n)
}
fn zero() -> u8 {
0
}
fn one() -> u8 {
1
}
fn nth_internal(n: usize) -> u8 {
n as u8
}
fn mul_slice(c: u8, input: &[u8], out: &mut [u8]) {
mul_slice(c, input, out)
}
fn mul_slice_add(c: u8, input: &[u8], out: &mut [u8]) {
mul_slice_xor(c, input, out)
}
}
/// Type alias of ReedSolomon over GF(2^8).
pub type ReedSolomon = crate::ReedSolomon<Field>;
/// Type alias of ShardByShard over GF(2^8).
pub type ShardByShard<'a> = crate::ShardByShard<'a, Field>;
/// Add two elements.
pub fn add(a: u8, b: u8) -> u8 {
a ^ b
}
/// Subtract `b` from `a`.
#[cfg(test)]
pub fn sub(a: u8, b: u8) -> u8 {
a ^ b
}
/// Multiply two elements.
pub fn mul(a: u8, b: u8) -> u8 {
MUL_TABLE[a as usize][b as usize]
}
/// Divide one element by another. `b`, the divisor, may not be 0.
pub fn div(a: u8, b: u8) -> u8 {
if a == 0 {
0
} else if b == 0 {
panic!("Divisor is 0")
} else {
let log_a = LOG_TABLE[a as usize];
let log_b = LOG_TABLE[b as usize];
let mut log_result = log_a as isize - log_b as isize;
if log_result < 0 {
log_result += 255;
}
EXP_TABLE[log_result as usize]
}
}
/// Compute a^n.
pub fn exp(a: u8, n: usize) -> u8 {
if n == 0 {
1
} else if a == 0 {
0
} else {
let log_a = LOG_TABLE[a as usize];
let mut log_result = log_a as usize * n;
while 255 <= log_result {
log_result -= 255;
}
EXP_TABLE[log_result]
}
}
const PURE_RUST_UNROLL: isize = 4;
macro_rules! return_if_empty {
(
$len:expr
) => {
if $len == 0 {
return;
}
};
}
#[cfg(not(all(
feature = "simd-accel",
any(target_arch = "x86_64", target_arch = "aarch64"),
not(target_env = "msvc"),
not(any(target_os = "android", target_os = "ios"))
)))]
pub fn mul_slice(c: u8, input: &[u8], out: &mut [u8]) {
mul_slice_pure_rust(c, input, out);
}
#[cfg(not(all(
feature = "simd-accel",
any(target_arch = "x86_64", target_arch = "aarch64"),
not(target_env = "msvc"),
not(any(target_os = "android", target_os = "ios"))
)))]
pub fn mul_slice_xor(c: u8, input: &[u8], out: &mut [u8]) {
mul_slice_xor_pure_rust(c, input, out);
}
fn mul_slice_pure_rust(c: u8, input: &[u8], out: &mut [u8]) {
let mt = &MUL_TABLE[c as usize];
let mt_ptr: *const u8 = &mt[0];
assert_eq!(input.len(), out.len());
let len: isize = input.len() as isize;
return_if_empty!(len);
let mut input_ptr: *const u8 = &input[0];
let mut out_ptr: *mut u8 = &mut out[0];
let mut n: isize = 0;
unsafe {
assert_eq!(4, PURE_RUST_UNROLL);
if len > PURE_RUST_UNROLL {
let len_minus_unroll = len - PURE_RUST_UNROLL;
while n < len_minus_unroll {
*out_ptr = *mt_ptr.offset(*input_ptr as isize);
*out_ptr.offset(1) = *mt_ptr.offset(*input_ptr.offset(1) as isize);
*out_ptr.offset(2) = *mt_ptr.offset(*input_ptr.offset(2) as isize);
*out_ptr.offset(3) = *mt_ptr.offset(*input_ptr.offset(3) as isize);
input_ptr = input_ptr.offset(PURE_RUST_UNROLL);
out_ptr = out_ptr.offset(PURE_RUST_UNROLL);
n += PURE_RUST_UNROLL;
}
}
while n < len {
*out_ptr = *mt_ptr.offset(*input_ptr as isize);
input_ptr = input_ptr.offset(1);
out_ptr = out_ptr.offset(1);
n += 1;
}
}
/* for n in 0..input.len() {
* out[n] = mt[input[n] as usize]
* }
*/
}
fn mul_slice_xor_pure_rust(c: u8, input: &[u8], out: &mut [u8]) {
let mt = &MUL_TABLE[c as usize];
let mt_ptr: *const u8 = &mt[0];
assert_eq!(input.len(), out.len());
let len: isize = input.len() as isize;
return_if_empty!(len);
let mut input_ptr: *const u8 = &input[0];
let mut out_ptr: *mut u8 = &mut out[0];
let mut n: isize = 0;
unsafe {
assert_eq!(4, PURE_RUST_UNROLL);
if len > PURE_RUST_UNROLL {
let len_minus_unroll = len - PURE_RUST_UNROLL;
while n < len_minus_unroll {
*out_ptr ^= *mt_ptr.offset(*input_ptr as isize);
*out_ptr.offset(1) ^= *mt_ptr.offset(*input_ptr.offset(1) as isize);
*out_ptr.offset(2) ^= *mt_ptr.offset(*input_ptr.offset(2) as isize);
*out_ptr.offset(3) ^= *mt_ptr.offset(*input_ptr.offset(3) as isize);
input_ptr = input_ptr.offset(PURE_RUST_UNROLL);
out_ptr = out_ptr.offset(PURE_RUST_UNROLL);
n += PURE_RUST_UNROLL;
}
}
while n < len {
*out_ptr ^= *mt_ptr.offset(*input_ptr as isize);
input_ptr = input_ptr.offset(1);
out_ptr = out_ptr.offset(1);
n += 1;
}
}
/* for n in 0..input.len() {
* out[n] ^= mt[input[n] as usize];
* }
*/
}
#[cfg(test)]
fn slice_xor(input: &[u8], out: &mut [u8]) {
assert_eq!(input.len(), out.len());
let len: isize = input.len() as isize;
return_if_empty!(len);
let mut input_ptr: *const u8 = &input[0];
let mut out_ptr: *mut u8 = &mut out[0];
let mut n: isize = 0;
unsafe {
assert_eq!(4, PURE_RUST_UNROLL);
if len > PURE_RUST_UNROLL {
let len_minus_unroll = len - PURE_RUST_UNROLL;
while n < len_minus_unroll {
*out_ptr ^= *input_ptr;
*out_ptr.offset(1) ^= *input_ptr.offset(1);
*out_ptr.offset(2) ^= *input_ptr.offset(2);
*out_ptr.offset(3) ^= *input_ptr.offset(3);
input_ptr = input_ptr.offset(PURE_RUST_UNROLL);
out_ptr = out_ptr.offset(PURE_RUST_UNROLL);
n += PURE_RUST_UNROLL;
}
}
while n < len {
*out_ptr ^= *input_ptr;
input_ptr = input_ptr.offset(1);
out_ptr = out_ptr.offset(1);
n += 1;
}
}
/* for n in 0..input.len() {
* out[n] ^= input[n]
* }
*/
}
#[cfg(all(
feature = "simd-accel",
any(target_arch = "x86_64", target_arch = "aarch64"),
not(target_env = "msvc"),
not(any(target_os = "android", target_os = "ios"))
))]
extern "C" {
fn reedsolomon_gal_mul(
low: *const u8,
high: *const u8,
input: *const u8,
out: *mut u8,
len: libc::size_t,
) -> libc::size_t;
fn reedsolomon_gal_mul_xor(
low: *const u8,
high: *const u8,
input: *const u8,
out: *mut u8,
len: libc::size_t,
) -> libc::size_t;
}
#[cfg(all(
feature = "simd-accel",
any(target_arch = "x86_64", target_arch = "aarch64"),
not(target_env = "msvc"),
not(any(target_os = "android", target_os = "ios"))
))]
pub fn mul_slice(c: u8, input: &[u8], out: &mut [u8]) {
let low: *const u8 = &MUL_TABLE_LOW[c as usize][0];
let high: *const u8 = &MUL_TABLE_HIGH[c as usize][0];
assert_eq!(input.len(), out.len());
let input_ptr: *const u8 = &input[0];
let out_ptr: *mut u8 = &mut out[0];
let size: libc::size_t = input.len();
let bytes_done: usize =
unsafe { reedsolomon_gal_mul(low, high, input_ptr, out_ptr, size) as usize };
mul_slice_pure_rust(c, &input[bytes_done..], &mut out[bytes_done..]);
}
#[cfg(all(
feature = "simd-accel",
any(target_arch = "x86_64", target_arch = "aarch64"),
not(target_env = "msvc"),
not(any(target_os = "android", target_os = "ios"))
))]
pub fn mul_slice_xor(c: u8, input: &[u8], out: &mut [u8]) {
let low: *const u8 = &MUL_TABLE_LOW[c as usize][0];
let high: *const u8 = &MUL_TABLE_HIGH[c as usize][0];
assert_eq!(input.len(), out.len());
let input_ptr: *const u8 = &input[0];
let out_ptr: *mut u8 = &mut out[0];
let size: libc::size_t = input.len();
let bytes_done: usize =
unsafe { reedsolomon_gal_mul_xor(low, high, input_ptr, out_ptr, size) as usize };
mul_slice_xor_pure_rust(c, &input[bytes_done..], &mut out[bytes_done..]);
}
#[cfg(test)]
mod tests {
extern crate alloc;
use alloc::vec;
use super::*;
use crate::tests::fill_random;
use rand;
static BACKBLAZE_LOG_TABLE: [u8; 256] = [
//-1, 0, 1, 25, 2, 50, 26, 198,
// first value is changed from -1 to 0
0, 0, 1, 25, 2, 50, 26, 198, 3, 223, 51, 238, 27, 104, 199, 75, 4, 100, 224, 14, 52, 141,
239, 129, 28, 193, 105, 248, 200, 8, 76, 113, 5, 138, 101, 47, 225, 36, 15, 33, 53, 147,
142, 218, 240, 18, 130, 69, 29, 181, 194, 125, 106, 39, 249, 185, 201, 154, 9, 120, 77,
228, 114, 166, 6, 191, 139, 98, 102, 221, 48, 253, 226, 152, 37, 179, 16, 145, 34, 136, 54,
208, 148, 206, 143, 150, 219, 189, 241, 210, 19, 92, 131, 56, 70, 64, 30, 66, 182, 163,
195, 72, 126, 110, 107, 58, 40, 84, 250, 133, 186, 61, 202, 94, 155, 159, 10, 21, 121, 43,
78, 212, 229, 172, 115, 243, 167, 87, 7, 112, 192, 247, 140, 128, 99, 13, 103, 74, 222,
237, 49, 197, 254, 24, 227, 165, 153, 119, 38, 184, 180, 124, 17, 68, 146, 217, 35, 32,
137, 46, 55, 63, 209, 91, 149, 188, 207, 205, 144, 135, 151, 178, 220, 252, 190, 97, 242,
86, 211, 171, 20, 42, 93, 158, 132, 60, 57, 83, 71, 109, 65, 162, 31, 45, 67, 216, 183,
123, 164, 118, 196, 23, 73, 236, 127, 12, 111, 246, 108, 161, 59, 82, 41, 157, 85, 170,
251, 96, 134, 177, 187, 204, 62, 90, 203, 89, 95, 176, 156, 169, 160, 81, 11, 245, 22, 235,
122, 117, 44, 215, 79, 174, 213, 233, 230, 231, 173, 232, 116, 214, 244, 234, 168, 80, 88,
175,
];
#[test]
fn log_table_same_as_backblaze() {
for i in 0..256 {
assert_eq!(LOG_TABLE[i], BACKBLAZE_LOG_TABLE[i]);
}
}
#[test]
fn test_associativity() {
for a in 0..256 {
let a = a as u8;
for b in 0..256 {
let b = b as u8;
for c in 0..256 {
let c = c as u8;
let x = add(a, add(b, c));
let y = add(add(a, b), c);
assert_eq!(x, y);
let x = mul(a, mul(b, c));
let y = mul(mul(a, b), c);
assert_eq!(x, y);
}
}
}
}
quickcheck! {
fn qc_add_associativity(a: u8, b: u8, c: u8) -> bool {
add(a, add(b, c)) == add(add(a, b), c)
}
fn qc_mul_associativity(a: u8, b: u8, c: u8) -> bool {
mul(a, mul(b, c)) == mul(mul(a, b), c)
}
}
#[test]
fn test_identity() {
for a in 0..256 {
let a = a as u8;
let b = sub(0, a);
let c = sub(a, b);
assert_eq!(c, 0);
if a != 0 {
let b = div(1, a);
let c = mul(a, b);
assert_eq!(c, 1);
}
}
}
quickcheck! {
fn qc_additive_identity(a: u8) -> bool {
sub(a, sub(0, a)) == 0
}
fn qc_multiplicative_identity(a: u8) -> bool {
if a == 0 { true }
else { mul(a, div(1, a)) == 1 }
}
}
#[test]
fn test_commutativity() {
for a in 0..256 {
let a = a as u8;
for b in 0..256 {
let b = b as u8;
let x = add(a, b);
let y = add(b, a);
assert_eq!(x, y);
let x = mul(a, b);
let y = mul(b, a);
assert_eq!(x, y);
}
}
}
quickcheck! {
fn qc_add_commutativity(a: u8, b: u8) -> bool {
add(a, b) == add(b, a)
}
fn qc_mul_commutativity(a: u8, b: u8) -> bool {
mul(a, b) == mul(b, a)
}
}
#[test]
fn test_distributivity() {
for a in 0..256 {
let a = a as u8;
for b in 0..256 {
let b = b as u8;
for c in 0..256 {
let c = c as u8;
let x = mul(a, add(b, c));
let y = add(mul(a, b), mul(a, c));
assert_eq!(x, y);
}
}
}
}
quickcheck! {
fn qc_add_distributivity(a: u8, b: u8, c: u8) -> bool {
mul(a, add(b, c)) == add(mul(a, b), mul(a, c))
}
}
#[test]
fn test_exp() {
for a in 0..256 {
let a = a as u8;
let mut power = 1u8;
for j in 0..256 {
let x = exp(a, j);
assert_eq!(x, power);
power = mul(power, a);
}
}
}
#[test]
fn test_galois() {
assert_eq!(mul(3, 4), 12);
assert_eq!(mul(7, 7), 21);
assert_eq!(mul(23, 45), 41);
let input = [
0, 1, 2, 3, 4, 5, 6, 10, 50, 100, 150, 174, 201, 255, 99, 32, 67, 85, 200, 199, 198,
197, 196, 195, 194, 193, 192, 191, 190, 189, 188, 187, 186, 185,
];
let mut output1 = vec![0; input.len()];
let mut output2 = vec![0; input.len()];
mul_slice(25, &input, &mut output1);
let expect = [
0x0, 0x19, 0x32, 0x2b, 0x64, 0x7d, 0x56, 0xfa, 0xb8, 0x6d, 0xc7, 0x85, 0xc3, 0x1f,
0x22, 0x7, 0x25, 0xfe, 0xda, 0x5d, 0x44, 0x6f, 0x76, 0x39, 0x20, 0xb, 0x12, 0x11, 0x8,
0x23, 0x3a, 0x75, 0x6c, 0x47,
];
for i in 0..input.len() {
assert_eq!(expect[i], output1[i]);
}
mul_slice(25, &input, &mut output2);
for i in 0..input.len() {
assert_eq!(expect[i], output2[i]);
}
let expect_xor = [
0x0, 0x2d, 0x5a, 0x77, 0xb4, 0x99, 0xee, 0x2f, 0x79, 0xf2, 0x7, 0x51, 0xd4, 0x19, 0x31,
0xc9, 0xf8, 0xfc, 0xf9, 0x4f, 0x62, 0x15, 0x38, 0xfb, 0xd6, 0xa1, 0x8c, 0x96, 0xbb,
0xcc, 0xe1, 0x22, 0xf, 0x78,
];
mul_slice_xor(52, &input, &mut output1);
for i in 0..input.len() {
assert_eq!(expect_xor[i], output1[i]);
}
mul_slice_xor(52, &input, &mut output2);
for i in 0..input.len() {
assert_eq!(expect_xor[i], output2[i]);
}
let expect = [
0x0, 0xb1, 0x7f, 0xce, 0xfe, 0x4f, 0x81, 0x9e, 0x3, 0x6, 0xe8, 0x75, 0xbd, 0x40, 0x36,
0xa3, 0x95, 0xcb, 0xc, 0xdd, 0x6c, 0xa2, 0x13, 0x23, 0x92, 0x5c, 0xed, 0x1b, 0xaa,
0x64, 0xd5, 0xe5, 0x54, 0x9a,
];
mul_slice(177, &input, &mut output1);
for i in 0..input.len() {
assert_eq!(expect[i], output1[i]);
}
mul_slice(177, &input, &mut output2);
for i in 0..input.len() {
assert_eq!(expect[i], output2[i]);
}
let expect_xor = [
0x0, 0xc4, 0x95, 0x51, 0x37, 0xf3, 0xa2, 0xfb, 0xec, 0xc5, 0xd0, 0xc7, 0x53, 0x88,
0xa3, 0xa5, 0x6, 0x78, 0x97, 0x9f, 0x5b, 0xa, 0xce, 0xa8, 0x6c, 0x3d, 0xf9, 0xdf, 0x1b,
0x4a, 0x8e, 0xe8, 0x2c, 0x7d,
];
mul_slice_xor(117, &input, &mut output1);
for i in 0..input.len() {
assert_eq!(expect_xor[i], output1[i]);
}
mul_slice_xor(117, &input, &mut output2);
for i in 0..input.len() {
assert_eq!(expect_xor[i], output2[i]);
}
assert_eq!(exp(2, 2), 4);
assert_eq!(exp(5, 20), 235);
assert_eq!(exp(13, 7), 43);
}
#[test]
fn test_slice_add() {
let length_list = [16, 32, 34];
for len in length_list.iter() {
let mut input = vec![0; *len];
fill_random(&mut input);
let mut output = vec![0; *len];
fill_random(&mut output);
let mut expect = vec![0; *len];
for i in 0..expect.len() {
expect[i] = input[i] ^ output[i];
}
slice_xor(&input, &mut output);
for i in 0..expect.len() {
assert_eq!(expect[i], output[i]);
}
fill_random(&mut output);
for i in 0..expect.len() {
expect[i] = input[i] ^ output[i];
}
slice_xor(&input, &mut output);
for i in 0..expect.len() {
assert_eq!(expect[i], output[i]);
}
}
}
#[test]
fn test_div_a_is_0() {
assert_eq!(0, div(0, 100));
}
#[test]
#[should_panic]
fn test_div_b_is_0() {
div(1, 0);
}
#[test]
fn test_same_as_maybe_ffi() {
let len = 10_003;
for _ in 0..100 {
let c = rand::random::<u8>();
let mut input = vec![0; len];
fill_random(&mut input);
{
let mut output = vec![0; len];
fill_random(&mut output);
let mut output_copy = output.clone();
mul_slice(c, &input, &mut output);
mul_slice(c, &input, &mut output_copy);
assert_eq!(output, output_copy);
}
{
let mut output = vec![0; len];
fill_random(&mut output);
let mut output_copy = output.clone();
mul_slice_xor(c, &input, &mut output);
mul_slice_xor(c, &input, &mut output_copy);
assert_eq!(output, output_copy);
}
}
}
}

200
seaweed-volume/vendor/reed-solomon-erasure/src/lib.rs

@ -0,0 +1,200 @@
//! This crate provides an encoder/decoder for Reed-Solomon erasure code.
//!
//! Please note that erasure coding means errors are not directly detected or corrected,
//! but missing data pieces (shards) can be reconstructed given that
//! the configuration provides high enough redundancy.
//!
//! You will have to implement error detection separately (e.g. via checksums)
//! and simply leave out the corrupted shards when attempting to reconstruct
//! the missing data.
#![allow(dead_code)]
#![cfg_attr(not(feature = "std"), no_std)]
#[cfg(test)]
#[macro_use]
extern crate quickcheck;
#[cfg(test)]
extern crate rand;
extern crate smallvec;
#[cfg(feature = "simd-accel")]
extern crate libc;
use ::core::iter;
use ::core::iter::FromIterator;
#[macro_use]
mod macros;
mod core;
mod errors;
mod matrix;
#[cfg(test)]
mod tests;
pub mod galois_16;
pub mod galois_8;
pub use crate::errors::Error;
pub use crate::errors::SBSError;
pub use crate::core::ReedSolomon;
pub use crate::core::ShardByShard;
// TODO: Can be simplified once https://github.com/rust-lang/rfcs/issues/2505 is resolved
#[cfg(not(feature = "std"))]
use libm::log2f as log2;
#[cfg(feature = "std")]
fn log2(n: f32) -> f32 {
n.log2()
}
/// A finite field to perform encoding over.
pub trait Field: Sized {
/// The order of the field. This is a limit on the number of shards
/// in an encoding.
const ORDER: usize;
/// The representational type of the field.
type Elem: Default + Clone + Copy + PartialEq + ::core::fmt::Debug;
/// Add two elements together.
fn add(a: Self::Elem, b: Self::Elem) -> Self::Elem;
/// Multiply two elements together.
fn mul(a: Self::Elem, b: Self::Elem) -> Self::Elem;
/// Divide a by b. Panics is b is zero.
fn div(a: Self::Elem, b: Self::Elem) -> Self::Elem;
/// Raise `a` to the n'th power.
fn exp(a: Self::Elem, n: usize) -> Self::Elem;
/// The "zero" element or additive identity.
fn zero() -> Self::Elem;
/// The "one" element or multiplicative identity.
fn one() -> Self::Elem;
fn nth_internal(n: usize) -> Self::Elem;
/// Yield the nth element of the field. Panics if n >= ORDER.
/// Assignment is arbitrary but must be unique to `n`.
fn nth(n: usize) -> Self::Elem {
if n >= Self::ORDER {
let pow = log2(Self::ORDER as f32) as usize;
panic!("{} out of bounds for GF(2^{}) member", n, pow)
}
Self::nth_internal(n)
}
/// Multiply a slice of elements by another. Writes into the output slice.
///
/// # Panics
/// Panics if the output slice does not have equal length to the input.
fn mul_slice(elem: Self::Elem, input: &[Self::Elem], out: &mut [Self::Elem]) {
assert_eq!(input.len(), out.len());
for (i, o) in input.iter().zip(out) {
*o = Self::mul(elem.clone(), i.clone())
}
}
/// Multiply a slice of elements by another, adding each result to the corresponding value in
/// `out`.
///
/// # Panics
/// Panics if the output slice does not have equal length to the input.
fn mul_slice_add(elem: Self::Elem, input: &[Self::Elem], out: &mut [Self::Elem]) {
assert_eq!(input.len(), out.len());
for (i, o) in input.iter().zip(out) {
*o = Self::add(o.clone(), Self::mul(elem.clone(), i.clone()))
}
}
}
/// Something which might hold a shard.
///
/// This trait is used in reconstruction, where some of the shards
/// may be unknown.
pub trait ReconstructShard<F: Field> {
/// The size of the shard data; `None` if empty.
fn len(&self) -> Option<usize>;
/// Get a mutable reference to the shard data, returning `None` if uninitialized.
fn get(&mut self) -> Option<&mut [F::Elem]>;
/// Get a mutable reference to the shard data, initializing it to the
/// given length if it was `None`. Returns an error if initialization fails.
fn get_or_initialize(
&mut self,
len: usize,
) -> Result<&mut [F::Elem], Result<&mut [F::Elem], Error>>;
}
impl<F: Field, T: AsRef<[F::Elem]> + AsMut<[F::Elem]> + FromIterator<F::Elem>> ReconstructShard<F>
for Option<T>
{
fn len(&self) -> Option<usize> {
self.as_ref().map(|x| x.as_ref().len())
}
fn get(&mut self) -> Option<&mut [F::Elem]> {
self.as_mut().map(|x| x.as_mut())
}
fn get_or_initialize(
&mut self,
len: usize,
) -> Result<&mut [F::Elem], Result<&mut [F::Elem], Error>> {
let is_some = self.is_some();
let x = self
.get_or_insert_with(|| iter::repeat(F::zero()).take(len).collect())
.as_mut();
if is_some {
Ok(x)
} else {
Err(Ok(x))
}
}
}
impl<F: Field, T: AsRef<[F::Elem]> + AsMut<[F::Elem]>> ReconstructShard<F> for (T, bool) {
fn len(&self) -> Option<usize> {
if !self.1 {
None
} else {
Some(self.0.as_ref().len())
}
}
fn get(&mut self) -> Option<&mut [F::Elem]> {
if !self.1 {
None
} else {
Some(self.0.as_mut())
}
}
fn get_or_initialize(
&mut self,
len: usize,
) -> Result<&mut [F::Elem], Result<&mut [F::Elem], Error>> {
let x = self.0.as_mut();
if x.len() == len {
if self.1 {
Ok(x)
} else {
Err(Ok(x))
}
} else {
Err(Err(Error::IncorrectShardSize))
}
}
}

245
seaweed-volume/vendor/reed-solomon-erasure/src/macros.rs

@ -0,0 +1,245 @@
/// Constructs vector of shards.
///
/// # Example
/// ```rust
/// # #[macro_use] extern crate reed_solomon_erasure;
/// # use reed_solomon_erasure::*;
/// # fn main () {
/// let shards: Vec<Vec<u8>> = shards!([1, 2, 3],
/// [4, 5, 6]);
/// # }
/// ```
#[macro_export]
macro_rules! shards {
(
$( [ $( $x:expr ),* ] ),*
) => {{
vec![ $( vec![ $( $x ),* ] ),* ]
}}
}
/// Makes it easier to work with 2D slices, arrays, etc.
///
/// # Examples
/// ## Byte arrays on stack to `Vec<&[u8]>`
/// ```rust
/// # #[macro_use] extern crate reed_solomon_erasure;
/// # fn main () {
/// let array: [[u8; 3]; 2] = [[1, 2, 3],
/// [4, 5, 6]];
///
/// let refs: Vec<&[u8]> =
/// convert_2D_slices!(array =>to_vec &[u8]);
/// # }
/// ```
/// ## Byte arrays on stack to `Vec<&mut [u8]>` (borrow mutably)
/// ```rust
/// # #[macro_use] extern crate reed_solomon_erasure;
/// # fn main () {
/// let mut array: [[u8; 3]; 2] = [[1, 2, 3],
/// [4, 5, 6]];
///
/// let refs: Vec<&mut [u8]> =
/// convert_2D_slices!(array =>to_mut_vec &mut [u8]);
/// # }
/// ```
/// ## Byte arrays on stack to `SmallVec<[&mut [u8]; 32]>` (borrow mutably)
/// ```rust
/// # #[macro_use] extern crate reed_solomon_erasure;
/// # extern crate smallvec;
/// # use smallvec::SmallVec;
/// # fn main () {
/// let mut array: [[u8; 3]; 2] = [[1, 2, 3],
/// [4, 5, 6]];
///
/// let refs: SmallVec<[&mut [u8]; 32]> =
/// convert_2D_slices!(array =>to_mut SmallVec<[&mut [u8]; 32]>,
/// SmallVec::with_capacity);
/// # }
/// ```
/// ## Shard array to `SmallVec<[&mut [u8]; 32]>` (borrow mutably)
/// ```rust
/// # #[macro_use] extern crate reed_solomon_erasure;
/// # extern crate smallvec;
/// # use smallvec::SmallVec;
/// # fn main () {
/// let mut shards = shards!([1, 2, 3],
/// [4, 5, 6]);
///
/// let refs: SmallVec<[&mut [u8]; 32]> =
/// convert_2D_slices!(shards =>to_mut SmallVec<[&mut [u8]; 32]>,
/// SmallVec::with_capacity);
/// # }
/// ```
/// ## Shard array to `Vec<&mut [u8]>` (borrow mutably) into `SmallVec<[&mut [u8]; 32]>` (move)
/// ```rust
/// # #[macro_use] extern crate reed_solomon_erasure;
/// # extern crate smallvec;
/// # use smallvec::SmallVec;
/// # fn main () {
/// let mut shards = shards!([1, 2, 3],
/// [4, 5, 6]);
///
/// let refs1 = convert_2D_slices!(shards =>to_mut_vec &mut [u8]);
///
/// let refs2: SmallVec<[&mut [u8]; 32]> =
/// convert_2D_slices!(refs1 =>into SmallVec<[&mut [u8]; 32]>,
/// SmallVec::with_capacity);
/// # }
/// ```
#[macro_export]
macro_rules! convert_2D_slices {
(
$slice:expr =>into_vec $dst_type:ty
) => {
convert_2D_slices!($slice =>into Vec<$dst_type>,
Vec::with_capacity)
};
(
$slice:expr =>to_vec $dst_type:ty
) => {
convert_2D_slices!($slice =>to Vec<$dst_type>,
Vec::with_capacity)
};
(
$slice:expr =>to_mut_vec $dst_type:ty
) => {
convert_2D_slices!($slice =>to_mut Vec<$dst_type>,
Vec::with_capacity)
};
(
$slice:expr =>into $dst_type:ty, $with_capacity:path
) => {{
let mut result: $dst_type =
$with_capacity($slice.len());
for i in $slice.into_iter() {
result.push(i);
}
result
}};
(
$slice:expr =>to $dst_type:ty, $with_capacity:path
) => {{
let mut result: $dst_type =
$with_capacity($slice.len());
for i in $slice.iter() {
result.push(i);
}
result
}};
(
$slice:expr =>to_mut $dst_type:ty, $with_capacity:path
) => {{
let mut result: $dst_type =
$with_capacity($slice.len());
for i in $slice.iter_mut() {
result.push(i);
}
result
}}
}
macro_rules! check_slices {
(
multi => $slices:expr
) => {{
let size = $slices[0].as_ref().len();
if size == 0 {
return Err(Error::EmptyShard);
}
for slice in $slices.iter() {
if slice.as_ref().len() != size {
return Err(Error::IncorrectShardSize);
}
}
}};
(
single => $slice_left:expr, single => $slice_right:expr
) => {{
if $slice_left.as_ref().len() != $slice_right.as_ref().len() {
return Err(Error::IncorrectShardSize);
}
}};
(
multi => $slices:expr, single => $single:expr
) => {{
check_slices!(multi => $slices);
check_slices!(single => $slices[0], single => $single);
}};
(
multi => $slices_left:expr, multi => $slices_right:expr
) => {{
check_slices!(multi => $slices_left);
check_slices!(multi => $slices_right);
check_slices!(single => $slices_left[0], single => $slices_right[0]);
}}
}
macro_rules! check_slice_index {
(
all => $codec:expr, $index:expr
) => {{
if $index >= $codec.total_shard_count {
return Err(Error::InvalidIndex);
}
}};
(
data => $codec:expr, $index:expr
) => {{
if $index >= $codec.data_shard_count {
return Err(Error::InvalidIndex);
}
}};
(
parity => $codec:expr, $index:expr
) => {{
if $index >= $codec.parity_shard_count {
return Err(Error::InvalidIndex);
}
}};
}
macro_rules! check_piece_count {
(
all => $codec:expr, $pieces:expr
) => {{
if $pieces.as_ref().len() < $codec.total_shard_count {
return Err(Error::TooFewShards);
}
if $pieces.as_ref().len() > $codec.total_shard_count {
return Err(Error::TooManyShards);
}
}};
(
data => $codec:expr, $pieces:expr
) => {{
if $pieces.as_ref().len() < $codec.data_shard_count {
return Err(Error::TooFewDataShards);
}
if $pieces.as_ref().len() > $codec.data_shard_count {
return Err(Error::TooManyDataShards);
}
}};
(
parity => $codec:expr, $pieces:expr
) => {{
if $pieces.as_ref().len() < $codec.parity_shard_count {
return Err(Error::TooFewParityShards);
}
if $pieces.as_ref().len() > $codec.parity_shard_count {
return Err(Error::TooManyParityShards);
}
}};
(
parity_buf => $codec:expr, $pieces:expr
) => {{
if $pieces.as_ref().len() < $codec.parity_shard_count {
return Err(Error::TooFewBufferShards);
}
if $pieces.as_ref().len() > $codec.parity_shard_count {
return Err(Error::TooManyBufferShards);
}
}};
}

425
seaweed-volume/vendor/reed-solomon-erasure/src/matrix.rs

@ -0,0 +1,425 @@
#![allow(dead_code)]
extern crate alloc;
use alloc::vec;
use alloc::vec::Vec;
use crate::Field;
use smallvec::SmallVec;
#[derive(Debug)]
pub enum Error {
SingularMatrix,
}
macro_rules! acc {
(
$m:ident, $r:expr, $c:expr
) => {
$m.data[$r * $m.col_count + $c]
};
}
pub fn flatten<T>(m: Vec<Vec<T>>) -> Vec<T> {
let mut result: Vec<T> = Vec::with_capacity(m.len() * m[0].len());
for row in m {
for v in row {
result.push(v);
}
}
result
}
#[derive(PartialEq, Debug, Clone)]
pub struct Matrix<F: Field> {
row_count: usize,
col_count: usize,
data: SmallVec<[F::Elem; 1024]>, // store in flattened structure
// the smallvec can hold a matrix of size up to 32x32 in stack
}
fn calc_matrix_row_start_end(col_count: usize, row: usize) -> (usize, usize) {
let start = row * col_count;
let end = start + col_count;
(start, end)
}
impl<F: Field> Matrix<F> {
fn calc_row_start_end(&self, row: usize) -> (usize, usize) {
calc_matrix_row_start_end(self.col_count, row)
}
pub fn new(rows: usize, cols: usize) -> Matrix<F> {
let data = SmallVec::from_vec(vec![F::zero(); rows * cols]);
Matrix {
row_count: rows,
col_count: cols,
data,
}
}
pub fn new_with_data(init_data: Vec<Vec<F::Elem>>) -> Matrix<F> {
let rows = init_data.len();
let cols = init_data[0].len();
for r in init_data.iter() {
if r.len() != cols {
panic!("Inconsistent row sizes")
}
}
let data = SmallVec::from_vec(flatten(init_data));
Matrix {
row_count: rows,
col_count: cols,
data,
}
}
#[cfg(test)]
pub fn make_random(size: usize) -> Matrix<F>
where
rand::distributions::Standard: rand::distributions::Distribution<F::Elem>,
{
let mut vec: Vec<Vec<F::Elem>> = vec![vec![Default::default(); size]; size];
for v in vec.iter_mut() {
crate::tests::fill_random(v);
}
Matrix::new_with_data(vec)
}
pub fn identity(size: usize) -> Matrix<F> {
let mut result = Self::new(size, size);
for i in 0..size {
acc!(result, i, i) = F::one();
}
result
}
pub fn col_count(&self) -> usize {
self.col_count
}
pub fn row_count(&self) -> usize {
self.row_count
}
pub fn get(&self, r: usize, c: usize) -> F::Elem {
acc!(self, r, c).clone()
}
pub fn set(&mut self, r: usize, c: usize, val: F::Elem) {
acc!(self, r, c) = val;
}
pub fn multiply(&self, rhs: &Matrix<F>) -> Matrix<F> {
if self.col_count != rhs.row_count {
panic!(
"Colomn count on left is different from row count on right, lhs: {}, rhs: {}",
self.col_count, rhs.row_count
)
}
let mut result = Self::new(self.row_count, rhs.col_count);
for r in 0..self.row_count {
for c in 0..rhs.col_count {
let mut val = F::zero();
for i in 0..self.col_count {
let mul = F::mul(acc!(self, r, i).clone(), acc!(rhs, i, c).clone());
val = F::add(val, mul);
}
acc!(result, r, c) = val;
}
}
result
}
pub fn augment(&self, rhs: &Matrix<F>) -> Matrix<F> {
if self.row_count != rhs.row_count {
panic!(
"Matrices do not have the same row count, lhs: {}, rhs: {}",
self.row_count, rhs.row_count
)
}
let mut result = Self::new(self.row_count, self.col_count + rhs.col_count);
for r in 0..self.row_count {
for c in 0..self.col_count {
acc!(result, r, c) = acc!(self, r, c).clone();
}
let self_column_count = self.col_count;
for c in 0..rhs.col_count {
acc!(result, r, self_column_count + c) = acc!(rhs, r, c).clone();
}
}
result
}
pub fn sub_matrix(&self, rmin: usize, cmin: usize, rmax: usize, cmax: usize) -> Matrix<F> {
let mut result = Self::new(rmax - rmin, cmax - cmin);
for r in rmin..rmax {
for c in cmin..cmax {
acc!(result, r - rmin, c - cmin) = acc!(self, r, c).clone();
}
}
result
}
pub fn get_row(&self, row: usize) -> &[F::Elem] {
let (start, end) = self.calc_row_start_end(row);
&self.data[start..end]
}
pub fn swap_rows(&mut self, r1: usize, r2: usize) {
let (r1_s, _) = self.calc_row_start_end(r1);
let (r2_s, _) = self.calc_row_start_end(r2);
if r1 == r2 {
return;
} else {
for i in 0..self.col_count {
self.data.swap(r1_s + i, r2_s + i);
}
}
}
pub fn is_square(&self) -> bool {
self.row_count == self.col_count
}
pub fn gaussian_elim(&mut self) -> Result<(), Error> {
for r in 0..self.row_count {
if acc!(self, r, r) == F::zero() {
for r_below in r + 1..self.row_count {
if acc!(self, r_below, r) != F::zero() {
self.swap_rows(r, r_below);
break;
}
}
}
// If we couldn't find one, the matrix is singular.
if acc!(self, r, r) == F::zero() {
return Err(Error::SingularMatrix);
}
// Scale to 1.
if acc!(self, r, r) != F::one() {
let scale = F::div(F::one(), acc!(self, r, r).clone());
for c in 0..self.col_count {
acc!(self, r, c) = F::mul(scale, acc!(self, r, c).clone());
}
}
// Make everything below the 1 be a 0 by subtracting
// a multiple of it. (Subtraction and addition are
// both exclusive or in the Galois field.)
for r_below in r + 1..self.row_count {
if acc!(self, r_below, r) != F::zero() {
let scale = acc!(self, r_below, r).clone();
for c in 0..self.col_count {
acc!(self, r_below, c) = F::add(
acc!(self, r_below, c).clone(),
F::mul(scale, acc!(self, r, c).clone()),
);
}
}
}
}
// Now clear the part above the main diagonal.
for d in 0..self.row_count {
for r_above in 0..d {
if acc!(self, r_above, d) != F::zero() {
let scale = acc!(self, r_above, d).clone();
for c in 0..self.col_count {
acc!(self, r_above, c) = F::add(
acc!(self, r_above, c).clone(),
F::mul(scale, acc!(self, d, c).clone()),
);
}
}
}
}
Ok(())
}
pub fn invert(&self) -> Result<Matrix<F>, Error> {
if !self.is_square() {
panic!("Trying to invert a non-square matrix")
}
let row_count = self.row_count;
let col_count = self.col_count;
let mut work = self.augment(&Self::identity(row_count));
work.gaussian_elim()?;
Ok(work.sub_matrix(0, row_count, col_count, col_count * 2))
}
pub fn vandermonde(rows: usize, cols: usize) -> Matrix<F> {
let mut result = Self::new(rows, cols);
for r in 0..rows {
// doesn't matter what `r_a` is as long as it's unique.
// then the vandermonde matrix is invertible.
let r_a = F::nth(r);
for c in 0..cols {
acc!(result, r, c) = F::exp(r_a, c);
}
}
result
}
}
#[cfg(test)]
mod tests {
extern crate alloc;
use alloc::vec;
use super::Matrix;
use crate::galois_8;
macro_rules! matrix {
(
$(
[ $( $x:expr ),+ ]
),*
) => (
Matrix::<galois_8::Field>::new_with_data(vec![ $( vec![$( $x ),*] ),* ])
);
($rows:expr, $cols:expr) => (Matrix::new($rows, $cols));
}
#[test]
fn test_matrix_col_count() {
let m1 = matrix!([1, 0, 0]);
let m2 = matrix!([0, 0, 0], [0, 0, 0]);
let m3: Matrix<galois_8::Field> = Matrix::new(1, 4);
assert_eq!(3, m1.col_count());
assert_eq!(3, m2.col_count());
assert_eq!(4, m3.col_count());
}
#[test]
fn test_matrix_row_count() {
let m1 = matrix!([1, 0, 0]);
let m2 = matrix!([0, 0, 0], [0, 0, 0]);
let m3: Matrix<galois_8::Field> = Matrix::new(1, 4);
assert_eq!(1, m1.row_count());
assert_eq!(2, m2.row_count());
assert_eq!(1, m3.row_count());
}
#[test]
fn test_matrix_swap_rows() {
{
let mut m1 = matrix!([1, 2, 3], [4, 5, 6], [7, 8, 9]);
let expect = matrix!([7, 8, 9], [4, 5, 6], [1, 2, 3]);
m1.swap_rows(0, 2);
assert_eq!(expect, m1);
}
{
let mut m1 = matrix!([1, 2, 3], [4, 5, 6], [7, 8, 9]);
let expect = m1.clone();
m1.swap_rows(0, 0);
assert_eq!(expect, m1);
m1.swap_rows(1, 1);
assert_eq!(expect, m1);
m1.swap_rows(2, 2);
assert_eq!(expect, m1);
}
}
#[test]
#[should_panic]
fn test_inconsistent_row_sizes() {
matrix!([1, 0, 0], [0, 1], [0, 0, 1]);
}
#[test]
#[should_panic]
fn test_incompatible_multiply() {
let m1 = matrix!([0, 1], [0, 1], [0, 1]);
let m2 = matrix!([0, 1, 2]);
m1.multiply(&m2);
}
#[test]
#[should_panic]
fn test_incompatible_augment() {
let m1 = matrix!([0, 1]);
let m2 = matrix!([0, 1], [2, 3]);
m1.augment(&m2);
}
#[test]
fn test_matrix_identity() {
let m1 = Matrix::identity(3);
let m2 = matrix!([1, 0, 0], [0, 1, 0], [0, 0, 1]);
assert_eq!(m1, m2);
}
#[test]
fn test_matrix_multiply() {
let m1 = matrix!([1, 2], [3, 4]);
let m2 = matrix!([5, 6], [7, 8]);
let actual = m1.multiply(&m2);
let expect = matrix!([11, 22], [19, 42]);
assert_eq!(actual, expect);
}
#[test]
fn test_matrix_inverse_pass_cases() {
{
// Test case validating inverse of the input Matrix.
let m = matrix!([56, 23, 98], [3, 100, 200], [45, 201, 123])
.invert()
.unwrap();
let expect = matrix!([175, 133, 33], [130, 13, 245], [112, 35, 126]);
assert_eq!(m, expect);
}
{
// Test case validating inverse of the input Matrix.
let m = matrix!(
[1, 0, 0, 0, 0],
[0, 1, 0, 0, 0],
[0, 0, 0, 1, 0],
[0, 0, 0, 0, 1],
[7, 7, 6, 6, 1]
)
.invert()
.unwrap();
let expect = matrix!(
[1, 0, 0, 0, 0],
[0, 1, 0, 0, 0],
[123, 123, 1, 122, 122],
[0, 0, 1, 0, 0],
[0, 0, 0, 1, 0]
);
assert_eq!(m, expect);
}
}
#[test]
#[should_panic]
fn test_matrix_inverse_non_square() {
// Test case with a non-square matrix.
matrix!([56, 23], [3, 100], [45, 201]).invert().unwrap();
}
#[test]
#[should_panic]
fn test_matrix_inverse_singular() {
matrix!([4, 2], [12, 6]).invert().unwrap();
}
}

489
seaweed-volume/vendor/reed-solomon-erasure/src/tests/galois_16.rs

@ -0,0 +1,489 @@
extern crate alloc;
use alloc::vec;
use alloc::vec::Vec;
use super::{fill_random, option_shards_into_shards, shards_into_option_shards};
use crate::galois_16::ReedSolomon;
macro_rules! make_random_shards {
($per_shard:expr, $size:expr) => {{
let mut shards = Vec::with_capacity(20);
for _ in 0..$size {
shards.push(vec![[0; 2]; $per_shard]);
}
for s in shards.iter_mut() {
fill_random(s);
}
shards
}};
}
#[test]
fn correct_field_order_restriction() {
const ORDER: usize = 1 << 16;
assert!(ReedSolomon::new(ORDER, 1).is_err());
assert!(ReedSolomon::new(1, ORDER).is_err());
// way too slow, because it needs to build a 65536*65536 vandermonde matrix
// assert!(ReedSolomon::new(ORDER - 1, 1).is_ok());
assert!(ReedSolomon::new(1, ORDER - 1).is_ok());
}
quickcheck! {
fn qc_encode_verify_reconstruct_verify(data: usize,
parity: usize,
corrupt: usize,
size: usize) -> bool {
let data = 1 + data % 255;
let mut parity = 1 + parity % 255;
if data + parity > 256 {
parity -= data + parity - 256;
}
let corrupt = corrupt % (parity + 1);
let mut corrupt_pos_s = Vec::with_capacity(corrupt);
for _ in 0..corrupt {
let mut pos = rand::random::<usize>() % (data + parity);
while let Some(_) = corrupt_pos_s.iter().find(|&&x| x == pos) {
pos = rand::random::<usize>() % (data + parity);
}
corrupt_pos_s.push(pos);
}
let size = 1 + size % 1_000_000;
let r = ReedSolomon::new(data, parity).unwrap();
let mut expect = make_random_shards!(size, data + parity);
{
let mut refs =
convert_2D_slices!(expect =>to_mut_vec &mut [[u8; 2]]);
r.encode(&mut refs).unwrap();
}
let expect = expect;
let mut shards = expect.clone();
// corrupt shards
for &p in corrupt_pos_s.iter() {
fill_random(&mut shards[p]);
}
let mut slice_present = vec![true; data + parity];
for &p in corrupt_pos_s.iter() {
slice_present[p] = false;
}
// reconstruct
{
let mut refs: Vec<_> = shards.iter_mut()
.map(|i| &mut i[..])
.zip(slice_present.iter().cloned())
.collect();
r.reconstruct(&mut refs[..]).unwrap();
}
({
let refs =
convert_2D_slices!(expect =>to_vec &[[u8; 2]]);
r.verify(&refs).unwrap()
})
&&
expect == shards
&&
({
let refs =
convert_2D_slices!(shards =>to_vec &[[u8; 2]]);
r.verify(&refs).unwrap()
})
}
fn qc_encode_verify_reconstruct_verify_shards(data: usize,
parity: usize,
corrupt: usize,
size: usize) -> bool {
let data = 1 + data % 255;
let mut parity = 1 + parity % 255;
if data + parity > 256 {
parity -= data + parity - 256;
}
let corrupt = corrupt % (parity + 1);
let mut corrupt_pos_s = Vec::with_capacity(corrupt);
for _ in 0..corrupt {
let mut pos = rand::random::<usize>() % (data + parity);
while let Some(_) = corrupt_pos_s.iter().find(|&&x| x == pos) {
pos = rand::random::<usize>() % (data + parity);
}
corrupt_pos_s.push(pos);
}
let size = 1 + size % 1_000_000;
let r = ReedSolomon::new(data, parity).unwrap();
let mut expect = make_random_shards!(size, data + parity);
r.encode(&mut expect).unwrap();
let expect = expect;
let mut shards = shards_into_option_shards(expect.clone());
// corrupt shards
for &p in corrupt_pos_s.iter() {
shards[p] = None;
}
// reconstruct
r.reconstruct(&mut shards).unwrap();
let shards = option_shards_into_shards(shards);
r.verify(&expect).unwrap()
&& expect == shards
&& r.verify(&shards).unwrap()
}
fn qc_verify(data: usize,
parity: usize,
corrupt: usize,
size: usize) -> bool {
let data = 1 + data % 255;
let mut parity = 1 + parity % 255;
if data + parity > 256 {
parity -= data + parity - 256;
}
let corrupt = corrupt % (parity + 1);
let mut corrupt_pos_s = Vec::with_capacity(corrupt);
for _ in 0..corrupt {
let mut pos = rand::random::<usize>() % (data + parity);
while let Some(_) = corrupt_pos_s.iter().find(|&&x| x == pos) {
pos = rand::random::<usize>() % (data + parity);
}
corrupt_pos_s.push(pos);
}
let size = 1 + size % 1_000_000;
let r = ReedSolomon::new(data, parity).unwrap();
let mut expect = make_random_shards!(size, data + parity);
{
let mut refs =
convert_2D_slices!(expect =>to_mut_vec &mut [[u8; 2]]);
r.encode(&mut refs).unwrap();
}
let expect = expect;
let mut shards = expect.clone();
// corrupt shards
for &p in corrupt_pos_s.iter() {
fill_random(&mut shards[p]);
}
({
let refs =
convert_2D_slices!(expect =>to_vec &[[u8; 2]]);
r.verify(&refs).unwrap()
})
&&
((corrupt > 0 && expect != shards)
|| (corrupt == 0 && expect == shards))
&&
({
let refs =
convert_2D_slices!(shards =>to_vec &[[u8; 2]]);
(corrupt > 0 && !r.verify(&refs).unwrap())
|| (corrupt == 0 && r.verify(&refs).unwrap())
})
}
fn qc_verify_shards(data: usize,
parity: usize,
corrupt: usize,
size: usize) -> bool {
let data = 1 + data % 255;
let mut parity = 1 + parity % 255;
if data + parity > 256 {
parity -= data + parity - 256;
}
let corrupt = corrupt % (parity + 1);
let mut corrupt_pos_s = Vec::with_capacity(corrupt);
for _ in 0..corrupt {
let mut pos = rand::random::<usize>() % (data + parity);
while let Some(_) = corrupt_pos_s.iter().find(|&&x| x == pos) {
pos = rand::random::<usize>() % (data + parity);
}
corrupt_pos_s.push(pos);
}
let size = 1 + size % 1_000_000;
let r = ReedSolomon::new(data, parity).unwrap();
let mut expect = make_random_shards!(size, data + parity);
r.encode(&mut expect).unwrap();
let expect = expect;
let mut shards = expect.clone();
// corrupt shards
for &p in corrupt_pos_s.iter() {
fill_random(&mut shards[p]);
}
r.verify(&expect).unwrap()
&&
((corrupt > 0 && expect != shards)
|| (corrupt == 0 && expect == shards))
&&
((corrupt > 0 && !r.verify(&shards).unwrap())
|| (corrupt == 0 && r.verify(&shards).unwrap()))
}
fn qc_encode_sep_same_as_encode(data: usize,
parity: usize,
size: usize) -> bool {
let data = 1 + data % 255;
let mut parity = 1 + parity % 255;
if data + parity > 256 {
parity -= data + parity - 256;
}
let size = 1 + size % 1_000_000;
let r = ReedSolomon::new(data, parity).unwrap();
let mut expect = make_random_shards!(size, data + parity);
let mut shards = expect.clone();
{
let mut refs =
convert_2D_slices!(expect =>to_mut_vec &mut [[u8; 2]]);
r.encode(&mut refs).unwrap();
}
let expect = expect;
{
let (data, parity) = shards.split_at_mut(data);
let data_refs =
convert_2D_slices!(data =>to_mut_vec &[[u8; 2]]);
let mut parity_refs =
convert_2D_slices!(parity =>to_mut_vec &mut [[u8; 2]]);
r.encode_sep(&data_refs, &mut parity_refs).unwrap();
}
let shards = shards;
expect == shards
}
fn qc_encode_sep_same_as_encode_shards(data: usize,
parity: usize,
size: usize) -> bool {
let data = 1 + data % 255;
let mut parity = 1 + parity % 255;
if data + parity > 256 {
parity -= data + parity - 256;
}
let size = 1 + size % 1_000_000;
let r = ReedSolomon::new(data, parity).unwrap();
let mut expect = make_random_shards!(size, data + parity);
let mut shards = expect.clone();
r.encode(&mut expect).unwrap();
let expect = expect;
{
let (data, parity) = shards.split_at_mut(data);
r.encode_sep(data, parity).unwrap();
}
let shards = shards;
expect == shards
}
fn qc_encode_single_same_as_encode(data: usize,
parity: usize,
size: usize) -> bool {
let data = 1 + data % 255;
let mut parity = 1 + parity % 255;
if data + parity > 256 {
parity -= data + parity - 256;
}
let size = 1 + size % 1_000_000;
let r = ReedSolomon::new(data, parity).unwrap();
let mut expect = make_random_shards!(size, data + parity);
let mut shards = expect.clone();
{
let mut refs =
convert_2D_slices!(expect =>to_mut_vec &mut [[u8; 2]]);
r.encode(&mut refs).unwrap();
}
let expect = expect;
{
let mut refs =
convert_2D_slices!(shards =>to_mut_vec &mut [[u8; 2]]);
for i in 0..data {
r.encode_single(i, &mut refs).unwrap();
}
}
let shards = shards;
expect == shards
}
fn qc_encode_single_same_as_encode_shards(data: usize,
parity: usize,
size: usize) -> bool {
let data = 1 + data % 255;
let mut parity = 1 + parity % 255;
if data + parity > 256 {
parity -= data + parity - 256;
}
let size = 1 + size % 1_000_000;
let r = ReedSolomon::new(data, parity).unwrap();
let mut expect = make_random_shards!(size, data + parity);
let mut shards = expect.clone();
r.encode(&mut expect).unwrap();
let expect = expect;
for i in 0..data {
r.encode_single(i, &mut shards).unwrap();
}
let shards = shards;
expect == shards
}
fn qc_encode_single_sep_same_as_encode(data: usize,
parity: usize,
size: usize) -> bool {
let data = 1 + data % 255;
let mut parity = 1 + parity % 255;
if data + parity > 256 {
parity -= data + parity - 256;
}
let size = 1 + size % 1_000_000;
let r = ReedSolomon::new(data, parity).unwrap();
let mut expect = make_random_shards!(size, data + parity);
let mut shards = expect.clone();
{
let mut refs =
convert_2D_slices!(expect =>to_mut_vec &mut [[u8; 2]]);
r.encode(&mut refs).unwrap();
}
let expect = expect;
{
let (data_shards, parity_shards) = shards.split_at_mut(data);
let data_refs =
convert_2D_slices!(data_shards =>to_mut_vec &[[u8; 2]]);
let mut parity_refs =
convert_2D_slices!(parity_shards =>to_mut_vec &mut [[u8; 2]]);
for i in 0..data {
r.encode_single_sep(i, data_refs[i], &mut parity_refs).unwrap();
}
}
let shards = shards;
expect == shards
}
fn qc_encode_single_sep_same_as_encode_shards(data: usize,
parity: usize,
size: usize) -> bool {
let data = 1 + data % 255;
let mut parity = 1 + parity % 255;
if data + parity > 256 {
parity -= data + parity - 256;
}
let size = 1 + size % 1_000_000;
let r = ReedSolomon::new(data, parity).unwrap();
let mut expect = make_random_shards!(size, data + parity);
let mut shards = expect.clone();
r.encode(&mut expect).unwrap();
let expect = expect;
{
let (data_shards, parity_shards) = shards.split_at_mut(data);
for i in 0..data {
r.encode_single_sep(i, &data_shards[i], parity_shards).unwrap();
}
}
let shards = shards;
expect == shards
}
}

2619
seaweed-volume/vendor/reed-solomon-erasure/src/tests/mod.rs
File diff suppressed because it is too large
View File

Loading…
Cancel
Save