summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorDongHun Kwak <dh0128.kwak@samsung.com>2023-03-22 12:47:10 +0900
committerDongHun Kwak <dh0128.kwak@samsung.com>2023-03-22 12:47:10 +0900
commit60c4fab7823aef8b8285a1e092edfa553a5eecad (patch)
tree42fff1feda516566e0daece973bb1b392dbf0c5f
downloadrust-bitvec-upstream.tar.gz
rust-bitvec-upstream.tar.bz2
rust-bitvec-upstream.zip
Import bitvec 1.0.1upstream/1.0.1upstream
-rw-r--r--.cargo_vcs_info.json6
-rw-r--r--Cargo.lock729
-rw-r--r--Cargo.toml115
-rw-r--r--Cargo.toml.orig109
-rw-r--r--LICENSE.txt21
-rw-r--r--README.md426
-rw-r--r--benches/eq.rs43
-rw-r--r--benches/iter.rs32
-rw-r--r--benches/macros.rs153
-rw-r--r--benches/memcpy.rs288
-rw-r--r--benches/mut_access.rs25
-rw-r--r--benches/slice.rs139
-rw-r--r--doc/README.md14
-rw-r--r--doc/access.md22
-rw-r--r--doc/access/BitAccess.md40
-rw-r--r--doc/access/BitSafe.md16
-rw-r--r--doc/access/impl_BitSafe.md12
-rw-r--r--doc/array.md21
-rw-r--r--doc/array/BitArray.md108
-rw-r--r--doc/array/IntoIter.md8
-rw-r--r--doc/array/TryFromBitSliceError.md16
-rw-r--r--doc/array/api.md19
-rw-r--r--doc/array/iter.md5
-rw-r--r--doc/boxed.md15
-rw-r--r--doc/boxed/BitBox.md59
-rw-r--r--doc/boxed/iter.md11
-rw-r--r--doc/domain.md132
-rw-r--r--doc/domain/BitDomain.md30
-rw-r--r--doc/domain/Domain.md63
-rw-r--r--doc/domain/PartialElement.md30
-rw-r--r--doc/field.md133
-rw-r--r--doc/field/BitField.md96
-rw-r--r--doc/field/BitField_Lsb0.md15
-rw-r--r--doc/field/BitField_Lsb0_load_be.md77
-rw-r--r--doc/field/BitField_Lsb0_load_le.md77
-rw-r--r--doc/field/BitField_Lsb0_store_be.md69
-rw-r--r--doc/field/BitField_Lsb0_store_le.md69
-rw-r--r--doc/field/BitField_Msb0.md23
-rw-r--r--doc/field/BitField_Msb0_load_be.md77
-rw-r--r--doc/field/BitField_Msb0_load_le.md77
-rw-r--r--doc/field/BitField_Msb0_store_be.md69
-rw-r--r--doc/field/BitField_Msb0_store_le.md69
-rw-r--r--doc/field/BitField_load.md58
-rw-r--r--doc/field/BitField_load_be.md145
-rw-r--r--doc/field/BitField_load_le.md145
-rw-r--r--doc/field/BitField_store.md57
-rw-r--r--doc/field/BitField_store_be.md143
-rw-r--r--doc/field/BitField_store_le.md143
-rw-r--r--doc/field/get.md26
-rw-r--r--doc/field/impl_BitArray.md9
-rw-r--r--doc/field/io.md10
-rw-r--r--doc/field/io/Read_BitSlice.md20
-rw-r--r--doc/field/io/Read_BitVec.md14
-rw-r--r--doc/field/io/Write_BitSlice.md20
-rw-r--r--doc/field/io/Write_BitVec.md18
-rw-r--r--doc/field/resize.md18
-rw-r--r--doc/field/set.md25
-rw-r--r--doc/field/sign.md30
-rw-r--r--doc/index.md45
-rw-r--r--doc/index/BitEnd.md35
-rw-r--r--doc/index/BitIdx.md31
-rw-r--r--doc/index/BitIdxError.md6
-rw-r--r--doc/index/BitMask.md19
-rw-r--r--doc/index/BitPos.md29
-rw-r--r--doc/index/BitSel.md28
-rw-r--r--doc/macros.md31
-rw-r--r--doc/macros/BitArr_type.md35
-rw-r--r--doc/macros/bitarr_value.md61
-rw-r--r--doc/macros/bitbox.md10
-rw-r--r--doc/macros/bits.md102
-rw-r--r--doc/macros/bitvec.md12
-rw-r--r--doc/macros/encode_bits.md62
-rw-r--r--doc/macros/internal.md6
-rw-r--r--doc/macros/make_elem.md21
-rw-r--r--doc/mem.md11
-rw-r--r--doc/mem/BitElement.md17
-rw-r--r--doc/mem/BitRegister.md6
-rw-r--r--doc/mem/elts.md28
-rw-r--r--doc/order.md24
-rw-r--r--doc/order/BitOrder.md92
-rw-r--r--doc/order/LocalBits.md23
-rw-r--r--doc/order/Lsb0.md13
-rw-r--r--doc/order/Msb0.md13
-rw-r--r--doc/order/verify.md23
-rw-r--r--doc/order/verify_for_type.md29
-rw-r--r--doc/prelude.md9
-rw-r--r--doc/ptr.md22
-rw-r--r--doc/ptr/BitPtr.md61
-rw-r--r--doc/ptr/BitPtrRange.md36
-rw-r--r--doc/ptr/BitRef.md49
-rw-r--r--doc/ptr/BitSpan.md134
-rw-r--r--doc/ptr/addr.md5
-rw-r--r--doc/ptr/bitslice_from_raw_parts.md30
-rw-r--r--doc/ptr/bitslice_from_raw_parts_mut.md31
-rw-r--r--doc/ptr/copy.md87
-rw-r--r--doc/ptr/copy_nonoverlapping.md59
-rw-r--r--doc/ptr/drop_in_place.md9
-rw-r--r--doc/ptr/eq.md38
-rw-r--r--doc/ptr/hash.md11
-rw-r--r--doc/ptr/null.md10
-rw-r--r--doc/ptr/null_mut.md10
-rw-r--r--doc/ptr/proxy.md9
-rw-r--r--doc/ptr/range.md20
-rw-r--r--doc/ptr/read.md28
-rw-r--r--doc/ptr/read_unaligned.md30
-rw-r--r--doc/ptr/read_volatile.md39
-rw-r--r--doc/ptr/replace.md35
-rw-r--r--doc/ptr/single.md10
-rw-r--r--doc/ptr/slice_from_raw_parts.md10
-rw-r--r--doc/ptr/slice_from_raw_parts_mut.md10
-rw-r--r--doc/ptr/span.md33
-rw-r--r--doc/ptr/swap.md34
-rw-r--r--doc/ptr/swap_nonoverlapping.md35
-rw-r--r--doc/ptr/write.md37
-rw-r--r--doc/ptr/write_bits.md42
-rw-r--r--doc/ptr/write_bytes.md10
-rw-r--r--doc/ptr/write_unaligned.md37
-rw-r--r--doc/ptr/write_volatile.md45
-rw-r--r--doc/serdes.md121
-rw-r--r--doc/serdes/array.md26
-rw-r--r--doc/serdes/slice.md14
-rw-r--r--doc/serdes/utils.md26
-rw-r--r--doc/slice.md34
-rw-r--r--doc/slice/BitSlice.md371
-rw-r--r--doc/slice/BitSliceIndex.md31
-rw-r--r--doc/slice/api.md14
-rw-r--r--doc/slice/bitop_assign.md56
-rw-r--r--doc/slice/format.md46
-rw-r--r--doc/slice/from_raw_parts.md68
-rw-r--r--doc/slice/from_raw_parts_mut.md70
-rw-r--r--doc/slice/from_raw_parts_unchecked.md29
-rw-r--r--doc/slice/from_raw_parts_unchecked_mut.md31
-rw-r--r--doc/slice/iter.md13
-rw-r--r--doc/slice/iter/Chunks.md29
-rw-r--r--doc/slice/iter/ChunksExact.md31
-rw-r--r--doc/slice/iter/ChunksExactMut.md42
-rw-r--r--doc/slice/iter/ChunksMut.md38
-rw-r--r--doc/slice/iter/Iter.md36
-rw-r--r--doc/slice/iter/IterMut.md30
-rw-r--r--doc/slice/iter/IterOnes.md23
-rw-r--r--doc/slice/iter/IterZeros.md23
-rw-r--r--doc/slice/iter/NoAlias.md103
-rw-r--r--doc/slice/iter/RChunks.md29
-rw-r--r--doc/slice/iter/RChunksExact.md31
-rw-r--r--doc/slice/iter/RChunksExactMut.md41
-rw-r--r--doc/slice/iter/RChunksMut.md37
-rw-r--r--doc/slice/iter/RSplit.md35
-rw-r--r--doc/slice/iter/RSplitMut.md41
-rw-r--r--doc/slice/iter/RSplitN.md36
-rw-r--r--doc/slice/iter/RSplitNMut.md40
-rw-r--r--doc/slice/iter/Split.md35
-rw-r--r--doc/slice/iter/SplitInclusive.md29
-rw-r--r--doc/slice/iter/SplitInclusiveMut.md33
-rw-r--r--doc/slice/iter/SplitMut.md41
-rw-r--r--doc/slice/iter/SplitN.md36
-rw-r--r--doc/slice/iter/SplitNMut.md40
-rw-r--r--doc/slice/iter/Windows.md35
-rw-r--r--doc/slice/ops.md16
-rw-r--r--doc/slice/specialization.md29
-rw-r--r--doc/slice/threadsafe.md19
-rw-r--r--doc/slice/traits.md7
-rw-r--r--doc/store.md90
-rw-r--r--doc/store/BitStore.md37
-rw-r--r--doc/vec.md16
-rw-r--r--doc/vec/BitVec.md174
-rw-r--r--doc/vec/iter.md14
-rw-r--r--doc/vec/iter/Drain.md17
-rw-r--r--doc/vec/iter/Extend_BitRef.md10
-rw-r--r--doc/vec/iter/Extend_bool.md21
-rw-r--r--doc/vec/iter/FillStatus.md14
-rw-r--r--doc/vec/iter/FromIterator_BitRef.md10
-rw-r--r--doc/vec/iter/FromIterator_bool.md21
-rw-r--r--doc/vec/iter/IntoIterator.md11
-rw-r--r--doc/vec/iter/Splice.md17
-rw-r--r--doc/view.md22
-rw-r--r--doc/view/AsBits.md27
-rw-r--r--doc/view/AsMutBits.md27
-rw-r--r--doc/view/BitView.md27
-rw-r--r--src/access.rs290
-rw-r--r--src/array.rs118
-rw-r--r--src/array/api.rs55
-rw-r--r--src/array/iter.rs229
-rw-r--r--src/array/ops.rs242
-rw-r--r--src/array/tests.rs176
-rw-r--r--src/array/traits.rs382
-rw-r--r--src/boxed.rs365
-rw-r--r--src/boxed/api.rs139
-rw-r--r--src/boxed/iter.rs241
-rw-r--r--src/boxed/ops.rs257
-rw-r--r--src/boxed/tests.rs187
-rw-r--r--src/boxed/traits.rs391
-rw-r--r--src/devel.rs115
-rw-r--r--src/domain.rs1139
-rw-r--r--src/field.rs638
-rw-r--r--src/field/io.rs106
-rw-r--r--src/field/tests.rs315
-rw-r--r--src/index.rs1349
-rw-r--r--src/lib.rs77
-rw-r--r--src/macros.rs365
-rw-r--r--src/macros/internal.rs414
-rw-r--r--src/macros/tests.rs587
-rw-r--r--src/mem.rs159
-rw-r--r--src/order.rs531
-rw-r--r--src/ptr.rs349
-rw-r--r--src/ptr/addr.rs170
-rw-r--r--src/ptr/proxy.rs475
-rw-r--r--src/ptr/range.rs403
-rw-r--r--src/ptr/single.rs1446
-rw-r--r--src/ptr/span.rs874
-rw-r--r--src/ptr/tests.rs172
-rw-r--r--src/serdes.rs160
-rw-r--r--src/serdes/array.rs467
-rw-r--r--src/serdes/slice.rs460
-rw-r--r--src/serdes/utils.rs478
-rw-r--r--src/slice.rs1819
-rw-r--r--src/slice/api.rs2778
-rw-r--r--src/slice/iter.rs2595
-rw-r--r--src/slice/ops.rs239
-rw-r--r--src/slice/specialization.rs81
-rw-r--r--src/slice/specialization/lsb0.rs310
-rw-r--r--src/slice/specialization/msb0.rs305
-rw-r--r--src/slice/tests.rs273
-rw-r--r--src/slice/tests/api.rs139
-rw-r--r--src/slice/tests/iter.rs703
-rw-r--r--src/slice/tests/ops.rs128
-rw-r--r--src/slice/tests/traits.rs349
-rw-r--r--src/slice/traits.rs582
-rw-r--r--src/store.rs329
-rw-r--r--src/vec.rs666
-rw-r--r--src/vec/api.rs1031
-rw-r--r--src/vec/iter.rs689
-rw-r--r--src/vec/ops.rs272
-rw-r--r--src/vec/tests.rs81
-rw-r--r--src/vec/tests/api.rs70
-rw-r--r--src/vec/tests/iter.rs41
-rw-r--r--src/vec/tests/traits.rs102
-rw-r--r--src/vec/traits.rs409
-rw-r--r--src/view.rs301
238 files changed, 37399 insertions, 0 deletions
diff --git a/.cargo_vcs_info.json b/.cargo_vcs_info.json
new file mode 100644
index 0000000..5c397e0
--- /dev/null
+++ b/.cargo_vcs_info.json
@@ -0,0 +1,6 @@
+{
+ "git": {
+ "sha1": "c922de6998be994cb5be1a349c7a0a0d16a19f07"
+ },
+ "path_in_vcs": ""
+} \ No newline at end of file
diff --git a/Cargo.lock b/Cargo.lock
new file mode 100644
index 0000000..93b9cc6
--- /dev/null
+++ b/Cargo.lock
@@ -0,0 +1,729 @@
+# This file is automatically @generated by Cargo.
+# It is not intended for manual editing.
+version = 3
+
+[[package]]
+name = "atty"
+version = "0.2.14"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "d9b39be18770d11421cdb1b9947a45dd3f37e93092cbf377614828a319d5fee8"
+dependencies = [
+ "hermit-abi",
+ "libc",
+ "winapi",
+]
+
+[[package]]
+name = "autocfg"
+version = "1.1.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "d468802bab17cbc0cc575e9b053f41e72aa36bfa6b7f55e3529ffa43161b97fa"
+
+[[package]]
+name = "bincode"
+version = "1.3.3"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "b1f45e9417d87227c7a56d22e471c6206462cba514c7590c09aff4cf6d1ddcad"
+dependencies = [
+ "serde",
+]
+
+[[package]]
+name = "bitflags"
+version = "1.3.2"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "bef38d45163c2f1dde094a7dfd33ccf595c92905c8f8f4fdc18d06fb1037718a"
+
+[[package]]
+name = "bitvec"
+version = "1.0.1"
+dependencies = [
+ "bincode",
+ "criterion",
+ "funty",
+ "radium",
+ "rand",
+ "serde",
+ "serde_json",
+ "serde_test",
+ "static_assertions",
+ "tap",
+ "wyz",
+]
+
+[[package]]
+name = "bstr"
+version = "0.2.17"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "ba3569f383e8f1598449f1a423e72e99569137b47740b1da11ef19af3d5c3223"
+dependencies = [
+ "lazy_static",
+ "memchr",
+ "regex-automata",
+ "serde",
+]
+
+[[package]]
+name = "bumpalo"
+version = "3.9.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "a4a45a46ab1f2412e53d3a0ade76ffad2025804294569aae387231a0cd6e0899"
+
+[[package]]
+name = "cast"
+version = "0.2.7"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "4c24dab4283a142afa2fdca129b80ad2c6284e073930f964c3a1293c225ee39a"
+dependencies = [
+ "rustc_version",
+]
+
+[[package]]
+name = "cfg-if"
+version = "1.0.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "baf1de4339761588bc0619e3cbc0120ee582ebb74b53b4efbf79117bd2da40fd"
+
+[[package]]
+name = "clap"
+version = "2.34.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "a0610544180c38b88101fecf2dd634b174a62eef6946f84dfc6a7127512b381c"
+dependencies = [
+ "bitflags",
+ "textwrap",
+ "unicode-width",
+]
+
+[[package]]
+name = "criterion"
+version = "0.3.5"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "1604dafd25fba2fe2d5895a9da139f8dc9b319a5fe5354ca137cbbce4e178d10"
+dependencies = [
+ "atty",
+ "cast",
+ "clap",
+ "criterion-plot",
+ "csv",
+ "itertools",
+ "lazy_static",
+ "num-traits",
+ "oorandom",
+ "plotters",
+ "rayon",
+ "regex",
+ "serde",
+ "serde_cbor",
+ "serde_derive",
+ "serde_json",
+ "tinytemplate",
+ "walkdir",
+]
+
+[[package]]
+name = "criterion-plot"
+version = "0.4.4"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "d00996de9f2f7559f7f4dc286073197f83e92256a59ed395f9aac01fe717da57"
+dependencies = [
+ "cast",
+ "itertools",
+]
+
+[[package]]
+name = "crossbeam-channel"
+version = "0.5.2"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "e54ea8bc3fb1ee042f5aace6e3c6e025d3874866da222930f70ce62aceba0bfa"
+dependencies = [
+ "cfg-if",
+ "crossbeam-utils",
+]
+
+[[package]]
+name = "crossbeam-deque"
+version = "0.8.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "6455c0ca19f0d2fbf751b908d5c55c1f5cbc65e03c4225427254b46890bdde1e"
+dependencies = [
+ "cfg-if",
+ "crossbeam-epoch",
+ "crossbeam-utils",
+]
+
+[[package]]
+name = "crossbeam-epoch"
+version = "0.9.7"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "c00d6d2ea26e8b151d99093005cb442fb9a37aeaca582a03ec70946f49ab5ed9"
+dependencies = [
+ "cfg-if",
+ "crossbeam-utils",
+ "lazy_static",
+ "memoffset",
+ "scopeguard",
+]
+
+[[package]]
+name = "crossbeam-utils"
+version = "0.8.7"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "b5e5bed1f1c269533fa816a0a5492b3545209a205ca1a54842be180eb63a16a6"
+dependencies = [
+ "cfg-if",
+ "lazy_static",
+]
+
+[[package]]
+name = "csv"
+version = "1.1.6"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "22813a6dc45b335f9bade10bf7271dc477e81113e89eb251a0bc2a8a81c536e1"
+dependencies = [
+ "bstr",
+ "csv-core",
+ "itoa 0.4.8",
+ "ryu",
+ "serde",
+]
+
+[[package]]
+name = "csv-core"
+version = "0.1.10"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "2b2466559f260f48ad25fe6317b3c8dac77b5bdb5763ac7d9d6103530663bc90"
+dependencies = [
+ "memchr",
+]
+
+[[package]]
+name = "either"
+version = "1.6.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "e78d4f1cc4ae33bbfc157ed5d5a5ef3bc29227303d595861deb238fcec4e9457"
+
+[[package]]
+name = "funty"
+version = "2.0.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "e6d5a32815ae3f33302d95fdcb2ce17862f8c65363dcfd29360480ba1001fc9c"
+
+[[package]]
+name = "getrandom"
+version = "0.2.4"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "418d37c8b1d42553c93648be529cb70f920d3baf8ef469b74b9638df426e0b4c"
+dependencies = [
+ "cfg-if",
+ "libc",
+ "wasi",
+]
+
+[[package]]
+name = "half"
+version = "1.8.2"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "eabb4a44450da02c90444cf74558da904edde8fb4e9035a9a6a4e15445af0bd7"
+
+[[package]]
+name = "hermit-abi"
+version = "0.1.19"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "62b467343b94ba476dcb2500d242dadbb39557df889310ac77c5d99100aaac33"
+dependencies = [
+ "libc",
+]
+
+[[package]]
+name = "itertools"
+version = "0.10.3"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "a9a9d19fa1e79b6215ff29b9d6880b706147f16e9b1dbb1e4e5947b5b02bc5e3"
+dependencies = [
+ "either",
+]
+
+[[package]]
+name = "itoa"
+version = "0.4.8"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "b71991ff56294aa922b450139ee08b3bfc70982c6b2c7562771375cf73542dd4"
+
+[[package]]
+name = "itoa"
+version = "1.0.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "1aab8fc367588b89dcee83ab0fd66b72b50b72fa1904d7095045ace2b0c81c35"
+
+[[package]]
+name = "js-sys"
+version = "0.3.56"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "a38fc24e30fd564ce974c02bf1d337caddff65be6cc4735a1f7eab22a7440f04"
+dependencies = [
+ "wasm-bindgen",
+]
+
+[[package]]
+name = "lazy_static"
+version = "1.4.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "e2abad23fbc42b3700f2f279844dc832adb2b2eb069b2df918f455c4e18cc646"
+
+[[package]]
+name = "libc"
+version = "0.2.117"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "e74d72e0f9b65b5b4ca49a346af3976df0f9c61d550727f349ecd559f251a26c"
+
+[[package]]
+name = "log"
+version = "0.4.14"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "51b9bbe6c47d51fc3e1a9b945965946b4c44142ab8792c50835a980d362c2710"
+dependencies = [
+ "cfg-if",
+]
+
+[[package]]
+name = "memchr"
+version = "2.4.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "308cc39be01b73d0d18f82a0e7b2a3df85245f84af96fdddc5d202d27e47b86a"
+
+[[package]]
+name = "memoffset"
+version = "0.6.5"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "5aa361d4faea93603064a027415f07bd8e1d5c88c9fbf68bf56a285428fd79ce"
+dependencies = [
+ "autocfg",
+]
+
+[[package]]
+name = "num-traits"
+version = "0.2.14"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "9a64b1ec5cda2586e284722486d802acf1f7dbdc623e2bfc57e65ca1cd099290"
+dependencies = [
+ "autocfg",
+]
+
+[[package]]
+name = "num_cpus"
+version = "1.13.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "19e64526ebdee182341572e50e9ad03965aa510cd94427a4549448f285e957a1"
+dependencies = [
+ "hermit-abi",
+ "libc",
+]
+
+[[package]]
+name = "oorandom"
+version = "11.1.3"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "0ab1bc2a289d34bd04a330323ac98a1b4bc82c9d9fcb1e66b63caa84da26b575"
+
+[[package]]
+name = "plotters"
+version = "0.3.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "32a3fd9ec30b9749ce28cd91f255d569591cdf937fe280c312143e3c4bad6f2a"
+dependencies = [
+ "num-traits",
+ "plotters-backend",
+ "plotters-svg",
+ "wasm-bindgen",
+ "web-sys",
+]
+
+[[package]]
+name = "plotters-backend"
+version = "0.3.2"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "d88417318da0eaf0fdcdb51a0ee6c3bed624333bff8f946733049380be67ac1c"
+
+[[package]]
+name = "plotters-svg"
+version = "0.3.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "521fa9638fa597e1dc53e9412a4f9cefb01187ee1f7413076f9e6749e2885ba9"
+dependencies = [
+ "plotters-backend",
+]
+
+[[package]]
+name = "ppv-lite86"
+version = "0.2.16"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "eb9f9e6e233e5c4a35559a617bf40a4ec447db2e84c20b55a6f83167b7e57872"
+
+[[package]]
+name = "proc-macro2"
+version = "1.0.36"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "c7342d5883fbccae1cc37a2353b09c87c9b0f3afd73f5fb9bba687a1f733b029"
+dependencies = [
+ "unicode-xid",
+]
+
+[[package]]
+name = "quote"
+version = "1.0.15"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "864d3e96a899863136fc6e99f3d7cae289dafe43bf2c5ac19b70df7210c0a145"
+dependencies = [
+ "proc-macro2",
+]
+
+[[package]]
+name = "radium"
+version = "0.7.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "dc33ff2d4973d518d823d61aa239014831e521c75da58e3df4840d3f47749d09"
+
+[[package]]
+name = "rand"
+version = "0.8.4"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "2e7573632e6454cf6b99d7aac4ccca54be06da05aca2ef7423d22d27d4d4bcd8"
+dependencies = [
+ "libc",
+ "rand_chacha",
+ "rand_core",
+ "rand_hc",
+]
+
+[[package]]
+name = "rand_chacha"
+version = "0.3.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "e6c10a63a0fa32252be49d21e7709d4d4baf8d231c2dbce1eaa8141b9b127d88"
+dependencies = [
+ "ppv-lite86",
+ "rand_core",
+]
+
+[[package]]
+name = "rand_core"
+version = "0.6.3"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "d34f1408f55294453790c48b2f1ebbb1c5b4b7563eb1f418bcfcfdbb06ebb4e7"
+dependencies = [
+ "getrandom",
+]
+
+[[package]]
+name = "rand_hc"
+version = "0.3.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "d51e9f596de227fda2ea6c84607f5558e196eeaf43c986b724ba4fb8fdf497e7"
+dependencies = [
+ "rand_core",
+]
+
+[[package]]
+name = "rayon"
+version = "1.5.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "c06aca804d41dbc8ba42dfd964f0d01334eceb64314b9ecf7c5fad5188a06d90"
+dependencies = [
+ "autocfg",
+ "crossbeam-deque",
+ "either",
+ "rayon-core",
+]
+
+[[package]]
+name = "rayon-core"
+version = "1.9.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "d78120e2c850279833f1dd3582f730c4ab53ed95aeaaaa862a2a5c71b1656d8e"
+dependencies = [
+ "crossbeam-channel",
+ "crossbeam-deque",
+ "crossbeam-utils",
+ "lazy_static",
+ "num_cpus",
+]
+
+[[package]]
+name = "regex"
+version = "1.5.4"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "d07a8629359eb56f1e2fb1652bb04212c072a87ba68546a04065d525673ac461"
+dependencies = [
+ "regex-syntax",
+]
+
+[[package]]
+name = "regex-automata"
+version = "0.1.10"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "6c230d73fb8d8c1b9c0b3135c5142a8acee3a0558fb8db5cf1cb65f8d7862132"
+
+[[package]]
+name = "regex-syntax"
+version = "0.6.25"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "f497285884f3fcff424ffc933e56d7cbca511def0c9831a7f9b5f6153e3cc89b"
+
+[[package]]
+name = "rustc_version"
+version = "0.4.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "bfa0f585226d2e68097d4f95d113b15b83a82e819ab25717ec0590d9584ef366"
+dependencies = [
+ "semver",
+]
+
+[[package]]
+name = "ryu"
+version = "1.0.9"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "73b4b750c782965c211b42f022f59af1fbceabdd026623714f104152f1ec149f"
+
+[[package]]
+name = "same-file"
+version = "1.0.6"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "93fc1dc3aaa9bfed95e02e6eadabb4baf7e3078b0bd1b4d7b6b0b68378900502"
+dependencies = [
+ "winapi-util",
+]
+
+[[package]]
+name = "scopeguard"
+version = "1.1.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "d29ab0c6d3fc0ee92fe66e2d99f700eab17a8d57d1c1d3b748380fb20baa78cd"
+
+[[package]]
+name = "semver"
+version = "1.0.5"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "0486718e92ec9a68fbed73bb5ef687d71103b142595b406835649bebd33f72c7"
+
+[[package]]
+name = "serde"
+version = "1.0.136"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "ce31e24b01e1e524df96f1c2fdd054405f8d7376249a5110886fb4b658484789"
+
+[[package]]
+name = "serde_cbor"
+version = "0.11.2"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "2bef2ebfde456fb76bbcf9f59315333decc4fda0b2b44b420243c11e0f5ec1f5"
+dependencies = [
+ "half",
+ "serde",
+]
+
+[[package]]
+name = "serde_derive"
+version = "1.0.136"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "08597e7152fcd306f41838ed3e37be9eaeed2b61c42e2117266a554fab4662f9"
+dependencies = [
+ "proc-macro2",
+ "quote",
+ "syn",
+]
+
+[[package]]
+name = "serde_json"
+version = "1.0.78"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "d23c1ba4cf0efd44be32017709280b32d1cea5c3f1275c3b6d9e8bc54f758085"
+dependencies = [
+ "itoa 1.0.1",
+ "ryu",
+ "serde",
+]
+
+[[package]]
+name = "serde_test"
+version = "1.0.136"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "21675ba6f9d97711cc00eee79d8dd7d0a31e571c350fb4d8a7c78f70c0e7b0e9"
+dependencies = [
+ "serde",
+]
+
+[[package]]
+name = "static_assertions"
+version = "1.1.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "a2eb9349b6444b326872e140eb1cf5e7c522154d69e7a0ffb0fb81c06b37543f"
+
+[[package]]
+name = "syn"
+version = "1.0.86"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "8a65b3f4ffa0092e9887669db0eae07941f023991ab58ea44da8fe8e2d511c6b"
+dependencies = [
+ "proc-macro2",
+ "quote",
+ "unicode-xid",
+]
+
+[[package]]
+name = "tap"
+version = "1.0.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "55937e1799185b12863d447f42597ed69d9928686b8d88a1df17376a097d8369"
+
+[[package]]
+name = "textwrap"
+version = "0.11.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "d326610f408c7a4eb6f51c37c330e496b08506c9457c9d34287ecc38809fb060"
+dependencies = [
+ "unicode-width",
+]
+
+[[package]]
+name = "tinytemplate"
+version = "1.2.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "be4d6b5f19ff7664e8c98d03e2139cb510db9b0a60b55f8e8709b689d939b6bc"
+dependencies = [
+ "serde",
+ "serde_json",
+]
+
+[[package]]
+name = "unicode-width"
+version = "0.1.9"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "3ed742d4ea2bd1176e236172c8429aaf54486e7ac098db29ffe6529e0ce50973"
+
+[[package]]
+name = "unicode-xid"
+version = "0.2.2"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "8ccb82d61f80a663efe1f787a51b16b5a51e3314d6ac365b08639f52387b33f3"
+
+[[package]]
+name = "walkdir"
+version = "2.3.2"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "808cf2735cd4b6866113f648b791c6adc5714537bc222d9347bb203386ffda56"
+dependencies = [
+ "same-file",
+ "winapi",
+ "winapi-util",
+]
+
+[[package]]
+name = "wasi"
+version = "0.10.2+wasi-snapshot-preview1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "fd6fbd9a79829dd1ad0cc20627bf1ed606756a7f77edff7b66b7064f9cb327c6"
+
+[[package]]
+name = "wasm-bindgen"
+version = "0.2.79"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "25f1af7423d8588a3d840681122e72e6a24ddbcb3f0ec385cac0d12d24256c06"
+dependencies = [
+ "cfg-if",
+ "wasm-bindgen-macro",
+]
+
+[[package]]
+name = "wasm-bindgen-backend"
+version = "0.2.79"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "8b21c0df030f5a177f3cba22e9bc4322695ec43e7257d865302900290bcdedca"
+dependencies = [
+ "bumpalo",
+ "lazy_static",
+ "log",
+ "proc-macro2",
+ "quote",
+ "syn",
+ "wasm-bindgen-shared",
+]
+
+[[package]]
+name = "wasm-bindgen-macro"
+version = "0.2.79"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "2f4203d69e40a52ee523b2529a773d5ffc1dc0071801c87b3d270b471b80ed01"
+dependencies = [
+ "quote",
+ "wasm-bindgen-macro-support",
+]
+
+[[package]]
+name = "wasm-bindgen-macro-support"
+version = "0.2.79"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "bfa8a30d46208db204854cadbb5d4baf5fcf8071ba5bf48190c3e59937962ebc"
+dependencies = [
+ "proc-macro2",
+ "quote",
+ "syn",
+ "wasm-bindgen-backend",
+ "wasm-bindgen-shared",
+]
+
+[[package]]
+name = "wasm-bindgen-shared"
+version = "0.2.79"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "3d958d035c4438e28c70e4321a2911302f10135ce78a9c7834c0cab4123d06a2"
+
+[[package]]
+name = "web-sys"
+version = "0.3.56"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "c060b319f29dd25724f09a2ba1418f142f539b2be99fbf4d2d5a8f7330afb8eb"
+dependencies = [
+ "js-sys",
+ "wasm-bindgen",
+]
+
+[[package]]
+name = "winapi"
+version = "0.3.9"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "5c839a674fcd7a98952e593242ea400abe93992746761e38641405d28b00f419"
+dependencies = [
+ "winapi-i686-pc-windows-gnu",
+ "winapi-x86_64-pc-windows-gnu",
+]
+
+[[package]]
+name = "winapi-i686-pc-windows-gnu"
+version = "0.4.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "ac3b87c63620426dd9b991e5ce0329eff545bccbbb34f3be09ff6fb6ab51b7b6"
+
+[[package]]
+name = "winapi-util"
+version = "0.1.5"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "70ec6ce85bb158151cae5e5c87f95a8e97d2c0c4b001223f33a334e3ce5de178"
+dependencies = [
+ "winapi",
+]
+
+[[package]]
+name = "winapi-x86_64-pc-windows-gnu"
+version = "0.4.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "712e227841d057c1ee1cd2fb22fa7e5a5461ae8e48fa2ca79ec42cfc1931183f"
+
+[[package]]
+name = "wyz"
+version = "0.5.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "30b31594f29d27036c383b53b59ed3476874d518f0efb151b27a4c275141390e"
+dependencies = [
+ "tap",
+]
diff --git a/Cargo.toml b/Cargo.toml
new file mode 100644
index 0000000..c468dc6
--- /dev/null
+++ b/Cargo.toml
@@ -0,0 +1,115 @@
+# THIS FILE IS AUTOMATICALLY GENERATED BY CARGO
+#
+# When uploading crates to the registry Cargo will automatically
+# "normalize" Cargo.toml files for maximal compatibility
+# with all versions of Cargo and also rewrite `path` dependencies
+# to registry (e.g., crates.io) dependencies.
+#
+# If you are reading this file be aware that the original Cargo.toml
+# will likely look very different (and much more reasonable).
+# See Cargo.toml.orig for the original contents.
+
+[package]
+edition = "2021"
+rust-version = "1.56"
+name = "bitvec"
+version = "1.0.1"
+include = [
+ "Cargo.toml",
+ "LICENSE.txt",
+ "README.md",
+ "doc/**/*.md",
+ "src/**/*.rs",
+ "benches/*.rs",
+]
+description = "Addresses memory by bits, for packed collections and bitfields"
+homepage = "https://bitvecto-rs.github.io/bitvec"
+documentation = "https://docs.rs/bitvec/latest/bitvec"
+readme = "README.md"
+keywords = [
+ "bitfields",
+ "bitmap",
+ "bitstream",
+ "bitvec",
+ "bitvector",
+]
+categories = [
+ "data-structures",
+ "embedded",
+ "no-std",
+ "rust-patterns",
+]
+license = "MIT"
+repository = "https://github.com/bitvecto-rs/bitvec"
+resolver = "2"
+
+[package.metadata.docs.rs]
+features = [
+ "atomic",
+ "serde",
+ "std",
+]
+
+[dependencies.funty]
+version = "^2.0"
+default-features = false
+
+[dependencies.radium]
+version = "0.7"
+
+[dependencies.serde]
+version = "1"
+optional = true
+default-features = false
+
+[dependencies.tap]
+version = "1"
+
+[dependencies.wyz]
+version = "0.5"
+default-features = false
+
+[dev-dependencies.bincode]
+version = "1.3"
+
+[dev-dependencies.criterion]
+version = "0.3"
+
+[dev-dependencies.rand]
+version = "0.8"
+
+[dev-dependencies.serde]
+version = "1"
+
+[dev-dependencies.serde_json]
+version = "1"
+
+[dev-dependencies.serde_test]
+version = "1"
+
+[dev-dependencies.static_assertions]
+version = "1"
+
+[features]
+alloc = []
+atomic = []
+default = [
+ "atomic",
+ "std",
+]
+std = ["alloc"]
+testing = []
+
+[badges.codecov]
+branch = "main"
+repository = "bitvecto-rs/bitvec"
+service = "github"
+
+[badges.is-it-maintained-issue-resolution]
+repository = "bitvecto-rs/bitvec"
+
+[badges.is-it-maintained-open-issues]
+repository = "bitvecto-rs/bitvec"
+
+[badges.maintenance]
+status = "passively-maintained"
diff --git a/Cargo.toml.orig b/Cargo.toml.orig
new file mode 100644
index 0000000..6659453
--- /dev/null
+++ b/Cargo.toml.orig
@@ -0,0 +1,109 @@
+########################################################################
+# Project Manifest #
+# #
+# This file describes the `bitvec` project to Cargo. #
+########################################################################
+
+[package]
+name = "bitvec"
+version = "1.0.1"
+edition = "2021"
+
+categories = [
+ "data-structures",
+ "embedded",
+ "no-std",
+ "rust-patterns",
+]
+description = "Addresses memory by bits, for packed collections and bitfields"
+documentation = "https://docs.rs/bitvec/latest/bitvec"
+homepage = "https://bitvecto-rs.github.io/bitvec"
+include = [
+ "Cargo.toml",
+ "LICENSE.txt",
+ "README.md",
+ "doc/**/*.md",
+ "src/**/*.rs",
+ "benches/*.rs",
+]
+keywords = [
+ "bitfields",
+ "bitmap",
+ "bitstream",
+ "bitvec",
+ "bitvector",
+]
+license = "MIT"
+readme = "README.md"
+repository = "https://github.com/bitvecto-rs/bitvec"
+rust-version = "1.56"
+
+[features]
+alloc = [
+]
+atomic = [
+]
+# Enable use of atomics and the standard library by default. no-std
+# users will need to opt out with `default-features = false`.
+default = [
+ "atomic",
+ "std",
+]
+# The standard library includes the allocator.
+std = [
+ "alloc",
+]
+testing = [
+]
+
+[dependencies]
+radium = "0.7"
+tap = "1"
+
+[dependencies.funty]
+version = "^2.0"
+default-features = false
+
+[dependencies.serde]
+default-features = false
+optional = true
+version = "1"
+
+[dependencies.wyz]
+version = "0.5"
+default-features = false
+
+# Crates required when running the test suite.
+[dev-dependencies]
+bincode = "1.3"
+criterion = "0.3"
+rand = "0.8"
+serde = "1"
+serde_json = "1"
+serde_test = "1"
+static_assertions = "1"
+
+# [[bench]]
+# name = "memcpy"
+# harness = false
+# Indicates the features that docs.rs should enable when building documentation.
+[package.metadata.docs.rs]
+features = [
+ "atomic",
+ "serde",
+ "std",
+]
+
+[badges.codecov]
+repository = "bitvecto-rs/bitvec"
+branch = "main"
+service = "github"
+
+[badges.is-it-maintained-issue-resolution]
+repository = "bitvecto-rs/bitvec"
+
+[badges.is-it-maintained-open-issues]
+repository = "bitvecto-rs/bitvec"
+
+[badges.maintenance]
+status = "passively-maintained"
diff --git a/LICENSE.txt b/LICENSE.txt
new file mode 100644
index 0000000..467fbd9
--- /dev/null
+++ b/LICENSE.txt
@@ -0,0 +1,21 @@
+MIT License
+
+Copyright (c) 2018 myrrlyn (Alexander Payne)
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+SOFTWARE.
diff --git a/README.md b/README.md
new file mode 100644
index 0000000..6f543af
--- /dev/null
+++ b/README.md
@@ -0,0 +1,426 @@
+<div style="text-align: center;" align="center">
+
+# `bitvec`
+
+## A Magnifying Glass for Memory <!-- omit in toc -->
+
+[![Crate][crate_img]][crate]
+[![Documentation][docs_img]][docs]
+[![License][license_img]][license_file]
+
+[![Crate Downloads][downloads_img]][crate]
+[![Project Size][loc_img]][loc]
+
+</div>
+
+1. [Summary](#summary)
+1. [Introduction](#introduction)
+1. [Highlights](#highlights)
+1. [Usage](#usage)
+1. [Examples](#examples)
+1. [User Stories](#user-stories)
+ 1. [Bit Collections](#bit-collections)
+ 1. [Bit-Field Memory Access](#bit-field-memory-access)
+ 1. [Transport Protocols](#transport-protocols)
+1. [Feature Flags](#feature-flags)
+1. [Deeper Reading](#deeper-reading)
+
+## Summary
+
+`bitvec` provides a foundational API for bitfields in Rust. It specializes
+standard-library data structures (slices, arrays, and vectors of `bool`) to use
+one-bit-per-`bool` storage, similar to [`std::bitset<N>`] and
+[`std::vector<bool>`] in C++.
+
+Additionally, it allows a memory region to be divided into arbitrary regions of
+integer storage, like [binaries][erl_bit] in Erlang.
+
+If you need to view memory as bit-addressed instead of byte-addressed, then
+`bitvec` is the fastest, most complete, and Rust-idiomatic crate for you.
+
+## Introduction
+
+Computers do not operate on bits. The memory bus is byte-addressed, and
+processors operate on register words, which are typically four to eight bytes,
+or even wider. This means that when programmers wish to operate on individual
+bits within a byte of memory or a word of register, they have to do so manually,
+using shift and mask operations that are likely familiar to anyone who has done
+this before.
+
+`bitvec` brings the capabilities of C++’s compact `bool` storage and Erlang’s
+decomposable bit-streams to Rust, in a package that fits in with your existing
+Rust idioms and in the most capable, performant, implementation possible. The
+bit-stream behavior provides the logic necessary for C-style structural
+bitfields, and syntax sugar for it can be found in [`deku`].
+
+`bitvec` enables you to write code for bit-addressed memory that is simple,
+easy, and fast. It compiles to the same, or even better, object code than you
+would get from writing shift/mask instructions manually. It leverages Rust’s
+powerful reference and type systems to create a system that seamlessly bridges
+single-bit addressing, precise control of in-memory layout, and Rust-native
+ownership and borrowing mechanisms.
+
+## Highlights
+
+`bitvec` has a number of unique capabilities related to its place as a Rust
+library and as a bit-addressing system.
+
+- It supports arbitrary bit-addressing, and its bit slices can be munched from
+ the front.
+- `BitSlice` is a region type equivalent to `[bool]`, and can be described by
+ Rust references and thus fit into reference-based APIs.
+- Type parameters enable users to select the precise memory representation they
+ desire.
+- A memory model accounts for element-level aliasing and is safe for concurrent
+ use. In particular, the “Beware Bitfields” bug described in
+ [this Mozilla report][moz] is simply impossible to produce.
+- Native support for atomic integers as bit-field storage.
+- Users can supply their own translation layer for memory representation if the
+ built-in translations are insufficient.
+
+However, it does also have some small costs associated with its capabilities:
+
+- `BitSlice` cannot be used as a referent type in pointers, such as `Box`, `Rc`,
+ or `Arc`.
+- `BitSlice` cannot implement `IndexMut`, so `bitslice[index] = true;` does not
+ work.
+
+## Usage
+
+**Minimum Supported Rust Version**: 1.56.0
+
+`bitvec` strives to follow the sequence APIs in the standard library. However,
+as most of its functionality is a reïmplementation that does not require the
+standard library to actually have the symbols present, doing so may not require
+an MSRV raise.
+
+Now that `bitvec` is at 1.0, it will only raise MSRV in minor-edition releases.
+If you have a pinned Rust toolchain, you should depend on `bitvec` with a
+limiting minor-version constraint like `"~1.0"`.
+
+First, depend on it in your Cargo manifest:
+
+```toml
+[dependencies]
+bitvec = "1"
+```
+
+> Note: `bitvec` supports `#![no_std]` targets. If you do not have `std`,
+> disable the default features, and explicitly restore any features that you do
+> have:
+>
+> ```toml
+> [dependencies.bitvec]
+> version = "1"
+> default-features = false
+> features = ["atomic", "alloc"]
+> ```
+
+Once Cargo knows about it, bring its prelude into scope:
+
+```rust
+use bitvec::prelude::*;
+```
+
+You can read the [prelude reëxports][prelude] to see exactly which symbols are
+being imported. The prelude brings in many symbols, and while name collisions
+are not likely, you may wish to instead import the prelude *module* rather than
+its contents:
+
+```rust
+use bitvec::prelude as bv;
+```
+
+You should almost certainly use type aliases to make names for specific
+instantiations of `bitvec` type parameters, and use that rather than attempting
+to remain generic over an `<T: BitStore, O: BitOrder>` pair throughout your
+project.
+
+## Examples
+
+```rust
+use bitvec::prelude::*;
+
+// All data-types have macro
+// constructors.
+let arr = bitarr![u32, Lsb0; 0; 80];
+let bits = bits![u16, Msb0; 0; 40];
+
+// Unsigned integers (scalar, array,
+// and slice) can be borrowed.
+let data = 0x2021u16;
+let bits = data.view_bits::<Msb0>();
+let data = [0xA5u8, 0x3C];
+let bits = data.view_bits::<Lsb0>();
+
+// Bit-slices can split anywhere.
+let (head, rest) = bits.split_at(4);
+assert_eq!(head, bits[.. 4]);
+assert_eq!(rest, bits[4 ..]);
+
+// And they are writable!
+let mut data = [0u8; 2];
+let bits = data.view_bits_mut::<Lsb0>();
+// l and r each own one byte.
+let (l, r) = bits.split_at_mut(8);
+
+// but now a, b, c, and d own a nibble!
+let ((a, b), (c, d)) = (
+ l.split_at_mut(4),
+ r.split_at_mut(4),
+);
+
+// and all four of them are writable.
+a.set(0, true);
+b.set(1, true);
+c.set(2, true);
+d.set(3, true);
+
+assert!(bits[0]); // a[0]
+assert!(bits[5]); // b[1]
+assert!(bits[10]); // c[2]
+assert!(bits[15]); // d[3]
+
+// `BitSlice` is accessed by reference,
+// which means it respects NLL styles.
+assert_eq!(data, [0x21u8, 0x84]);
+
+// Furthermore, bit-slices can store
+// ordinary integers:
+let eight = [0u8, 4, 8, 12, 16, 20, 24, 28];
+// a b c d e f g h
+let mut five = [0u8; 5];
+for (slot, byte) in five
+ .view_bits_mut::<Msb0>()
+ .chunks_mut(5)
+ .zip(eight.iter().copied())
+{
+ slot.store_be(byte);
+ assert_eq!(slot.load_be::<u8>(), byte);
+}
+
+assert_eq!(five, [
+ 0b00000_001,
+// aaaaa bbb
+ 0b00_01000_0,
+// bb ccccc d
+ 0b1100_1000,
+// dddd eeee
+ 0b0_10100_11,
+// e fffff gg
+ 0b000_11100,
+// ggg hhhhh
+]);
+```
+
+The `BitSlice` type is a view that alters the behavior of a borrowed memory
+region. It is never held directly, but only by references (created by borrowing
+integer memory) or the `BitArray` value type. In addition, the presence of a
+dynamic allocator enables the `BitBox` and `BitVec` buffer types, which can be
+used for more advanced buffer manipulation:
+
+```rust
+#[cfg(feature = "alloc")]
+fn main() {
+
+use bitvec::prelude::*;
+
+let mut bv = bitvec![u8, Msb0;];
+bv.push(false);
+bv.push(true);
+bv.extend([false; 4].iter());
+bv.extend(&15u8.view_bits::<Lsb0>()[.. 4]);
+
+assert_eq!(bv.as_raw_slice(), &[
+ 0b01_0000_11, 0b11_000000
+// ^ dead
+]);
+
+}
+```
+
+While place expressions like `bits[index] = value;` are not available, `bitvec`
+instead provides a proxy structure that can be used as *nearly* an `&mut bit`
+reference:
+
+```rust
+use bitvec::prelude::*;
+
+let bits = bits![mut 0];
+// `bit` is not a reference, so
+// it must be bound with `mut`.
+let mut bit = bits.get_mut(0).unwrap();
+assert!(!*bit);
+*bit = true;
+assert!(*bit);
+// `bit` is not a reference,
+// so NLL rules do not apply.
+drop(bit);
+assert!(bits[0]);
+```
+
+The `bitvec` data types implement a complete replacement for their
+standard-library counterparts, including all of the inherent methods, traits,
+and operator behaviors.
+
+## User Stories
+
+Uses of `bitvec` generally fall into three major genres.
+
+- compact, fast, `usize => bit` collections
+- truncated integer storage
+- precise control of memory layout
+
+### Bit Collections
+
+At its most basic, `bitvec` provides sequence types analogous to the standard
+library’s `bool` collections. The default behavior is optimized for fast memory
+access and simple codegen, and can compact `[bool]` or `Vec<bool>` with minimal
+overhead.
+
+While `bitvec` does not attempt to take advantage of SIMD or other vectorized
+instructions in its default work, its codegen should be a good candidate for
+autovectorization in LLVM. If explicit vectorization is important to you, please
+[file an issue][issue].
+
+Example uses might be implementing a Sieve of Eratosthenes to store primes, or
+other collections that test a yes/no property of a number; or replacing
+`Vec<Option<T>>` with `(BitVec, Vec<MaybeUninit<T>>`).
+
+To get started, you can perform basic text replacement on your project.
+Translate any existing types as follows:
+
+- `[bool; N]` becomes `BitArray`
+- `[bool]` becomes `BitSlice`
+- `Vec<bool>` becomes `BitVec`
+- `Box<[bool]>` becomes `BitBox`
+
+and then follow any compiler errors that arise.
+
+### Bit-Field Memory Access
+
+A single bit of information has very few uses. `bitvec` also enables you to
+store integers wider than a single bit, by selecting a bit-slice and using the
+[`BitField`] trait on it. You can store and retrieve both unsigned and signed
+integers, as long as the ordering type parameter is [`Lsb0`] or [`Msb0`].
+
+If your bit-field storage buffers are never serialized for exchange between
+machines, then you can get away with using the default type parameters and
+unadorned load/store methods. While the in-memory layout of stored integers may
+be surprising if directly inspected, the overall behavior should be optimal for
+your target.
+
+Remember: `bitvec` only provides array place expressions, using integer start
+and end points. You can use [`deku`] if you want C-style named structural fields
+with bit-field memory storage.
+
+However, if you are de/serializing buffers for transport, then you fall into the
+third category.
+
+### Transport Protocols
+
+Many protocols use sub-element fields in order to save space in transport; for
+example, TCP headers have single-bit and 4-bit fields in order to pack all the
+needed information into a desirable amount of space. In C or Erlang, these TCP
+protocol fields could be mapped by record fields in the language. In Rust, they
+can be mapped by indexing into a bit-slice.
+
+When using `bitvec` to manage protocol buffers, you will need to select the
+exact type parameters that match your memory layout. For instance, TCP uses
+`<u8, Msb0>`, while IPv6 on a little-endian machine uses `<u32, Lsb0>`. Once you
+have done this, you can replace all of your `(memory & mask) >> shift` or
+`memory |= (value & mask) << shift` expressions with `memory[start .. end]`.
+
+As a direct example, the Itanium instruction set IA-64 uses very-long
+instruction words containing three 41-bit fields in a `[u8; 16]`. One IA-64
+disassembler replaced its manual shift/mask implementation with `bitvec` range
+indexing, taking the bit numbers directly from the datasheet, and observed that
+their code was both easier to maintain and also had better performance as a
+result!
+
+## Feature Flags
+
+`bitvec` has a few Cargo features that govern its API surface. The default
+feature set is:
+
+```toml
+[dependencies.bitvec]
+version = "1"
+features = [
+ "alloc",
+ "atomic",
+ # "serde",
+ "std",
+]
+```
+
+Use `default-features = false` to disable all of them, then `features = []` to
+restore the ones you need.
+
+- `alloc`: This links against the `alloc` distribution crate, and provides the
+ `BitVec` and `BitBox` types. It can be used on `#![no_std]` targets that
+ possess a dynamic allocator but not an operating system.
+
+- `atomic`: This controls whether atomic instructions can be used for aliased
+ memory. `bitvec` uses the [`radium`] crate to perform automatic detection of
+ atomic capability, and targets that do not possess atomic instructions can
+ still function with this feature *enabled*. Its only effect is that targets
+ which do have atomic instructions may choose to disable it and enforce
+ single-threaded behavior that never incurs atomic synchronization.
+
+- `serde`: This enables the de/serialization of `bitvec` buffers through the
+ `serde` system. This can be useful if you need to transmit `usize => bool`
+ collections.
+
+- `std`: This provides some `std::io::{Read,Write}` implementations, as well as
+ `std::error::Error` for the various error types. It is otherwise unnecessary.
+
+## Deeper Reading
+
+The [API Documentation][docsrs] explores `bitvec`’s usage and implementation in
+great detail. In particular, you should read the documentation for the
+[`order`], [`store`], and [`field`] modules, as well as the [`BitSlice`] and
+[`BitArray`] types.
+
+In addition, the [user guide][guide] explores the philosophical and academic
+concepts behind `bitvec`’s construction, its goals, and the more intricate parts
+of its behavior.
+
+While you should be able to get started with `bitvec` with only dropping it into
+your code and using the same habits you have with the standard library, both of
+these resources contain all of the information needed to understand what it
+does, how it works, and how it can be useful to you.
+
+<!-- Badges -->
+[crate]: https://crates.io/crates/bitvec "Crate listing"
+[crate_img]: https://img.shields.io/crates/v/bitvec.svg?logo=rust&style=for-the-badge "Crate badge"
+[docs]: https://docs.rs/bitvec/latest/bitvec "Crate documentation"
+[docs_img]: https://img.shields.io/docsrs/bitvec/latest.svg?style=for-the-badge "Documentation badge"
+[downloads_img]: https://img.shields.io/crates/dv/bitvec.svg?logo=rust&style=for-the-badge "Crate downloads"
+[license_file]: https://github.com/bitvecto-rs/bitvec/blob/main/LICENSE.txt "Project license"
+[license_img]: https://img.shields.io/crates/l/bitvec.svg?style=for-the-badge "License badge"
+[loc]: https://github.com/bitvecto-rs/bitvec "Project repository"
+[loc_img]: https://img.shields.io/tokei/lines/github/bitvecto-rs/bitvec?category=code&style=for-the-badge "Project size"
+
+<!-- Documentation -->
+[`BitArray`]: https://docs.rs/bitvec/latest/bitvec/array/struct.BitArray.html
+[`BitField`]: https://docs.rs/bitvec/latest/bitvec/field/trait.BitField.html
+[`BitSlice`]: https://docs.rs/bitvec/latest/bitvec/slice/struct.BitSlice.html
+[`Lsb0`]: https://docs.rs/bitvec/latest/bitvec/order/struct.Lsb0.html
+[`Msb0`]: https://docs.rs/bitvec/latest/bitvec/order/struct.Msb0.html
+[`field`]: https://docs.rs/bitvec/latest/bitvec/field/index.html
+[`order`]: https://docs.rs/bitvec/latest/bitvec/order/index.html
+[`store`]: https://docs.rs/bitvec/latest/bitvec/store/index.html
+[layout]: https://bitvecto-rs.github.io/bitvec/memory-representation
+[prelude]: https://docs.rs/bitvec/latest/bitvec/prelude
+
+<!-- External References -->
+[`deku`]: https://crates.io/crates/deku
+[docsrs]: https://docs.rs/bitvec/latest/bitvec
+[erl_bit]: https://www.erlang.org/doc/programming_examples/bit_syntax.html
+[guide]: https://bitvecto-rs.github.io/bitvec/
+[issue]: https://github.com/bitvecto-rs/bitvec/issues/new
+[moz]: https://hacks.mozilla.org/2021/04/eliminating-data-races-in-firefox-a-technical-report/ "Mozilla Hacks article describing various concurrency bugs in FireFox"
+[`radium`]: https://crates.io/crates/radium
+[`std::bitset<N>`]: https://en.cppreference.com/w/cpp/utility/bitset
+[`std::vector<bool>`]: https://en.cppreference.com/w/cpp/container/vector_bool
diff --git a/benches/eq.rs b/benches/eq.rs
new file mode 100644
index 0000000..666f0f0
--- /dev/null
+++ b/benches/eq.rs
@@ -0,0 +1,43 @@
+#![feature(test)]
+
+extern crate test;
+
+use bitvec::prelude::*;
+use test::Bencher;
+
+#[bench]
+fn bitwise_eq(bench: &mut Bencher) {
+ let a = bitarr![usize, Lsb0; 0; 500];
+ let b = bitarr![usize, Msb0; 0; 500];
+
+ bench.iter(|| {
+ a.iter()
+ .by_vals()
+ .zip(b.iter().by_vals())
+ .all(|(a, b)| a == b)
+ });
+}
+
+#[bench]
+fn plain_eq(bench: &mut Bencher) {
+ let a = bitarr![usize, Lsb0; 0; 500];
+ let b = bitarr![usize, Msb0; 0; 500];
+
+ bench.iter(|| a == b);
+}
+
+#[bench]
+fn lsb0_accel_eq(bench: &mut Bencher) {
+ let a = bitarr![usize, Lsb0; 0; 500];
+ let b = bitarr![usize, Lsb0; 0; 500];
+
+ bench.iter(|| a == b);
+}
+
+#[bench]
+fn msb0_accel_eq(bench: &mut Bencher) {
+ let a = bitarr![usize, Msb0; 0; 500];
+ let b = bitarr![usize, Msb0; 0; 500];
+
+ bench.iter(|| a == b);
+}
diff --git a/benches/iter.rs b/benches/iter.rs
new file mode 100644
index 0000000..f34ea58
--- /dev/null
+++ b/benches/iter.rs
@@ -0,0 +1,32 @@
+#![feature(test)]
+
+extern crate test;
+
+use bitvec::prelude::*;
+use test::Bencher;
+
+const LEN: usize = 1 << 10;
+
+#[bench]
+fn iter_proxy(bench: &mut Bencher) {
+ let a = bits![1; LEN];
+ bench.iter(|| a.iter().all(|b| *b));
+}
+
+#[bench]
+fn iter_ref(bench: &mut Bencher) {
+ let a = bits![1; LEN];
+ bench.iter(|| a.iter().by_refs().all(|b| *b));
+}
+
+#[bench]
+fn iter_val(bench: &mut Bencher) {
+ let a = bits![1; LEN];
+ bench.iter(|| a.iter().by_vals().all(|b| b));
+}
+
+#[bench]
+fn iter_bools(bench: &mut Bencher) {
+ let a = [true; LEN];
+ bench.iter(|| a.iter().copied().all(|b| b));
+}
diff --git a/benches/macros.rs b/benches/macros.rs
new file mode 100644
index 0000000..c19199b
--- /dev/null
+++ b/benches/macros.rs
@@ -0,0 +1,153 @@
+/*! Macro construction benchmarks.
+
+This is taken from [issue #28], which noted that the `bitvec![bit; rep]`
+expansion was horribly inefficient.
+
+This benchmark crate should be used for all macro performance recording, and
+compare the macros against `vec!`. While `vec!` will always be faster, because
+`bitvec!` does more work than `vec!`, they should at least be close.
+
+Original performance was 10,000x slower. Performance after the fix for #28 was
+within 20ns.
+
+[issue #28]: https://github.com/myrrlyn/bitvec/issues/28
+!*/
+
+#![feature(test)]
+
+extern crate test;
+
+use bitvec::prelude::*;
+use test::Bencher;
+
+#[bench]
+fn bits_seq_u8(b: &mut Bencher) {
+ b.iter(|| {
+ bitarr![u8, LocalBits;
+ 0, 1, 0, 1, 0, 0, 1, 1,
+ 0, 1, 1, 0, 0, 0, 0, 1,
+ 0, 1, 1, 0, 1, 1, 0, 0,
+ 0, 1, 1, 1, 0, 1, 0, 1,
+ 0, 1, 1, 1, 0, 1, 0, 0,
+ 0, 1, 1, 0, 1, 1, 1, 1,
+ 0, 1, 1, 0, 1, 1, 1, 0,
+ 0, 0, 1, 0, 1, 1, 0, 0,
+ 0, 0, 1, 0, 0, 0, 0, 0,
+ 0, 1, 1, 0, 1, 1, 0, 1,
+ 0, 1, 1, 0, 1, 1, 1, 1,
+ 0, 1, 1, 0, 1, 1, 1, 0,
+ 0, 1, 1, 0, 0, 1, 0, 0,
+ 0, 1, 1, 0, 1, 1, 1, 1,
+ 0, 0, 1, 0, 0, 0, 0, 1,
+ ]
+ });
+}
+
+#[bench]
+fn bits_seq_u16(b: &mut Bencher) {
+ b.iter(|| {
+ bitarr![u16, LocalBits;
+ 0, 1, 0, 1, 0, 0, 1, 1,
+ 0, 1, 1, 0, 0, 0, 0, 1,
+ 0, 1, 1, 0, 1, 1, 0, 0,
+ 0, 1, 1, 1, 0, 1, 0, 1,
+ 0, 1, 1, 1, 0, 1, 0, 0,
+ 0, 1, 1, 0, 1, 1, 1, 1,
+ 0, 1, 1, 0, 1, 1, 1, 0,
+ 0, 0, 1, 0, 1, 1, 0, 0,
+ 0, 0, 1, 0, 0, 0, 0, 0,
+ 0, 1, 1, 0, 1, 1, 0, 1,
+ 0, 1, 1, 0, 1, 1, 1, 1,
+ 0, 1, 1, 0, 1, 1, 1, 0,
+ 0, 1, 1, 0, 0, 1, 0, 0,
+ 0, 1, 1, 0, 1, 1, 1, 1,
+ 0, 0, 1, 0, 0, 0, 0, 1,
+ ]
+ });
+}
+
+#[bench]
+fn bits_seq_u32(b: &mut Bencher) {
+ b.iter(|| {
+ bitarr![u32, LocalBits;
+ 0, 1, 0, 1, 0, 0, 1, 1,
+ 0, 1, 1, 0, 0, 0, 0, 1,
+ 0, 1, 1, 0, 1, 1, 0, 0,
+ 0, 1, 1, 1, 0, 1, 0, 1,
+ 0, 1, 1, 1, 0, 1, 0, 0,
+ 0, 1, 1, 0, 1, 1, 1, 1,
+ 0, 1, 1, 0, 1, 1, 1, 0,
+ 0, 0, 1, 0, 1, 1, 0, 0,
+ 0, 0, 1, 0, 0, 0, 0, 0,
+ 0, 1, 1, 0, 1, 1, 0, 1,
+ 0, 1, 1, 0, 1, 1, 1, 1,
+ 0, 1, 1, 0, 1, 1, 1, 0,
+ 0, 1, 1, 0, 0, 1, 0, 0,
+ 0, 1, 1, 0, 1, 1, 1, 1,
+ 0, 0, 1, 0, 0, 0, 0, 1,
+ ]
+ });
+}
+
+#[bench]
+#[cfg(target_pointer_width = "64")]
+fn bits_seq_u64(b: &mut Bencher) {
+ b.iter(|| {
+ bitarr![u64, LocalBits;
+ 0, 1, 0, 1, 0, 0, 1, 1,
+ 0, 1, 1, 0, 0, 0, 0, 1,
+ 0, 1, 1, 0, 1, 1, 0, 0,
+ 0, 1, 1, 1, 0, 1, 0, 1,
+ 0, 1, 1, 1, 0, 1, 0, 0,
+ 0, 1, 1, 0, 1, 1, 1, 1,
+ 0, 1, 1, 0, 1, 1, 1, 0,
+ 0, 0, 1, 0, 1, 1, 0, 0,
+ 0, 0, 1, 0, 0, 0, 0, 0,
+ 0, 1, 1, 0, 1, 1, 0, 1,
+ 0, 1, 1, 0, 1, 1, 1, 1,
+ 0, 1, 1, 0, 1, 1, 1, 0,
+ 0, 1, 1, 0, 0, 1, 0, 0,
+ 0, 1, 1, 0, 1, 1, 1, 1,
+ 0, 0, 1, 0, 0, 0, 0, 1,
+ ]
+ });
+}
+
+// The repetition macros run at compile time, so should bench at zero.
+
+#[bench]
+fn bits_rep_u8(b: &mut Bencher) {
+ b.iter(|| bitarr![u8, LocalBits; 0; 120]);
+ b.iter(|| bitarr![u8, LocalBits; 1; 120]);
+}
+
+#[bench]
+fn bits_rep_u16(b: &mut Bencher) {
+ b.iter(|| bitarr![u16, LocalBits; 0; 120]);
+ b.iter(|| bitarr![u16, LocalBits; 1; 120]);
+}
+
+#[bench]
+fn bits_rep_u32(b: &mut Bencher) {
+ b.iter(|| bitarr![u32, LocalBits; 0; 120]);
+ b.iter(|| bitarr![u32, LocalBits; 1; 120]);
+}
+
+#[bench]
+#[cfg(target_pointer_width = "64")]
+fn bits_rep_u64(b: &mut Bencher) {
+ b.iter(|| bitarr![u64, LocalBits; 0; 120]);
+ b.iter(|| bitarr![u64, LocalBits; 1; 120]);
+}
+
+#[bench]
+fn bitvec_rep(b: &mut Bencher) {
+ b.iter(|| bitvec![0; 16 * 16 * 9]);
+ b.iter(|| bitvec![1; 16 * 16 * 9]);
+}
+
+#[bench]
+fn vec_rep(b: &mut Bencher) {
+ b.iter(|| vec![0u8; 16 * 16 * 9 / 8]);
+ b.iter(|| vec![-1i8; 16 * 16 * 9 / 8]);
+}
diff --git a/benches/memcpy.rs b/benches/memcpy.rs
new file mode 100644
index 0000000..96966b0
--- /dev/null
+++ b/benches/memcpy.rs
@@ -0,0 +1,288 @@
+/*! Benchmarks for `BitSlice::copy_from_slice`.
+
+The `copy_from_slice` implementation attempts to detect slice conditions that
+allow element-wise `memcpy` behavior, rather than the conservative bit-by-bit
+iteration, as element load/stores are faster than reading and writing each bit
+in an element individually.
+!*/
+
+#![feature(maybe_uninit_uninit_array, maybe_uninit_slice)]
+
+use std::mem::MaybeUninit;
+
+use bitvec::{
+ mem::{
+ bits_of,
+ elts,
+ },
+ prelude::*,
+};
+use criterion::{
+ criterion_group,
+ criterion_main,
+ BenchmarkId,
+ Criterion,
+ SamplingMode,
+ Throughput,
+};
+use tap::Tap;
+
+// One kibibyte
+const KIBIBYTE: usize = 1024;
+// Some number of kibibytes
+#[allow(clippy::identity_op)]
+const FACTOR: usize = 1 * KIBIBYTE;
+
+// Scalars applied to FACTOR to get a range of action
+const SCALARS: &[usize] = &[1, 2, 4, 8, 16, 24, 32, 40, 52, 64];
+// The maximum number of bits in a memory region.
+const MAX_BITS: usize = 64 * FACTOR * 8;
+
+fn make_slots<T, const LEN: usize>()
+-> ([MaybeUninit<T>; LEN], [MaybeUninit<T>; LEN])
+where T: BitStore {
+ (
+ MaybeUninit::<T>::uninit_array::<LEN>(),
+ MaybeUninit::<T>::uninit_array::<LEN>(),
+ )
+}
+
+fn view_slots<'a, 'b, T>(
+ src: &'a [MaybeUninit<T>],
+ dst: &'b mut [MaybeUninit<T>],
+) -> (&'a [T], &'b mut [T])
+where
+ T: BitStore,
+{
+ unsafe {
+ (
+ MaybeUninit::slice_assume_init_ref(src),
+ MaybeUninit::slice_assume_init_mut(dst),
+ )
+ }
+}
+
+pub fn benchmarks(crit: &mut Criterion) {
+ fn steps()
+ -> impl Iterator<Item = (impl Fn(&'static str) -> BenchmarkId, usize, Throughput)>
+ {
+ SCALARS.iter().map(|&n| {
+ (
+ move |name| BenchmarkId::new(name, n),
+ n * FACTOR * bits_of::<u8>(),
+ Throughput::Bytes((n * FACTOR) as u64),
+ )
+ })
+ }
+
+ let (src_words, mut dst_words) =
+ make_slots::<usize, { elts::<usize>(MAX_BITS) }>();
+ let (src_bytes, mut dst_bytes) =
+ make_slots::<u8, { elts::<u8>(MAX_BITS) }>();
+
+ let (src_words, dst_words) = view_slots(&src_words, &mut dst_words);
+ let (src_bytes, dst_bytes) = view_slots(&src_bytes, &mut dst_bytes);
+
+ macro_rules! mkgrp {
+ ($crit:ident, $name:literal) => {
+ (&mut *$crit).benchmark_group($name).tap_mut(|grp| {
+ grp.sampling_mode(SamplingMode::Flat).sample_size(2000);
+ })
+ };
+ }
+
+ let mut group = mkgrp!(crit, "Element-wise");
+ for (id, bits, thrpt) in steps() {
+ group.throughput(thrpt);
+ let words = bits / bits_of::<usize>();
+ let bytes = bits / bits_of::<u8>();
+
+ let (src_words, dst_words) =
+ (&src_words[.. words], &mut dst_words[.. words]);
+ let (src_bytes, dst_bytes) =
+ (&src_bytes[.. bytes], &mut dst_bytes[.. bytes]);
+
+ // Use the builtin memcpy to run the slices in bulk. This ought to be a
+ // lower bound on execution time.
+
+ group.bench_function(id("words_plain"), |b| {
+ let (src, dst) = (src_words, &mut *dst_words);
+ b.iter(|| dst.copy_from_slice(src))
+ });
+ group.bench_function(id("bytes_plain"), |b| {
+ let (src, dst) = (src_bytes, &mut *dst_bytes);
+ b.iter(|| dst.copy_from_slice(src))
+ });
+
+ group.bench_function(id("words_manual"), |b| {
+ let (src, dst) = (src_words, &mut *dst_words);
+ b.iter(|| {
+ for (from, to) in src.iter().zip(dst.iter_mut()) {
+ *to = *from;
+ }
+ })
+ });
+ group.bench_function(id("bytes_manual"), |b| {
+ let (src, dst) = (src_bytes, &mut *dst_bytes);
+ b.iter(|| {
+ for (from, to) in src.iter().zip(dst.iter_mut()) {
+ *to = *from;
+ }
+ })
+ });
+ }
+ group.finish();
+
+ let mut group = mkgrp!(crit, "Bit-wise accelerated");
+ for (id, bits, thrpt) in steps() {
+ group.throughput(thrpt);
+ let words = bits / bits_of::<usize>();
+ let bytes = bits / bits_of::<u8>();
+
+ let (src_words, dst_words) =
+ (&src_words[.. words], &mut dst_words[.. words]);
+ let (src_bytes, dst_bytes) =
+ (&src_bytes[.. bytes], &mut dst_bytes[.. bytes]);
+
+ // Ideal bitwise memcpy: no edges, same typarams, fully aligned.
+
+ group.bench_function(id("bits_words_plain"), |b| {
+ let (src, dst) = (
+ src_words.view_bits::<Lsb0>(),
+ dst_words.view_bits_mut::<Lsb0>(),
+ );
+ b.iter(|| dst.copy_from_bitslice(src));
+ });
+ group.bench_function(id("bits_bytes_plain"), |b| {
+ let (src, dst) = (
+ src_bytes.view_bits::<Lsb0>(),
+ dst_bytes.view_bits_mut::<Lsb0>(),
+ );
+ b.iter(|| dst.copy_from_bitslice(src));
+ });
+
+ // Same typarams, fully aligned, with fuzzed edges.
+
+ group.bench_function(id("bits_words_edges"), |b| {
+ let src = src_words.view_bits::<Lsb0>();
+ let len = src.len();
+ let (src, dst) = (
+ &src[10 .. len - 10],
+ &mut dst_words.view_bits_mut::<Lsb0>()[10 .. len - 10],
+ );
+ b.iter(|| dst.copy_from_bitslice(src));
+ });
+ group.bench_function(id("bits_bytes_edges"), |b| {
+ let src = src_bytes.view_bits::<Lsb0>();
+ let len = src.len();
+ let (src, dst) = (
+ &src[10 .. len - 10],
+ &mut dst_bytes.view_bits_mut::<Lsb0>()[10 .. len - 10],
+ );
+ b.iter(|| dst.copy_from_bitslice(src));
+ });
+
+ // Same typarams, misaligned.
+
+ group.bench_function(id("bits_words_misalign"), |b| {
+ let src = &src_words.view_bits::<Lsb0>()[10 ..];
+ let dst = &mut dst_words.view_bits_mut::<Lsb0>()[.. src.len()];
+ b.iter(|| dst.copy_from_bitslice(src));
+ });
+ group.bench_function(id("bits_bytes_misalign"), |b| {
+ let src = &src_bytes.view_bits::<Lsb0>()[10 ..];
+ let dst = &mut dst_bytes.view_bits_mut::<Lsb0>()[.. src.len()];
+ b.iter(|| dst.copy_from_bitslice(src));
+ });
+ }
+ group.finish();
+
+ let mut group = mkgrp!(crit, "Bit-wise crawl");
+ for (id, bits, thrpt) in steps() {
+ group.throughput(thrpt);
+ let words = bits / bits_of::<usize>();
+ let bytes = bits / bits_of::<u8>();
+
+ let (src_words, dst_words) =
+ (&src_words[.. words], &mut dst_words[.. words]);
+ let (src_bytes, dst_bytes) =
+ (&src_bytes[.. bytes], &mut dst_bytes[.. bytes]);
+
+ // Mismatched type parameters
+
+ group.bench_function(id("bits_words_mismatched"), |b| {
+ let (src, dst) = (
+ src_words.view_bits::<Msb0>(),
+ dst_words.view_bits_mut::<Lsb0>(),
+ );
+ b.iter(|| dst.clone_from_bitslice(src));
+ });
+ group.bench_function(id("bits_bytes_mismatched"), |b| {
+ let (src, dst) = (
+ src_bytes.view_bits::<Msb0>(),
+ dst_bytes.view_bits_mut::<Lsb0>(),
+ );
+ b.iter(|| dst.clone_from_bitslice(src));
+ });
+
+ // Crawl each bit individually. This ought to be an upper bound on
+ // execution time.
+
+ group.bench_function(id("bitwise_words"), |b| {
+ let (src, dst) = (
+ src_words.view_bits::<Lsb0>(),
+ dst_words.view_bits_mut::<Lsb0>(),
+ );
+ b.iter(|| unsafe {
+ for (from, to) in
+ src.as_bitptr_range().zip(dst.as_mut_bitptr_range())
+ {
+ to.write(from.read());
+ }
+ })
+ });
+ group.bench_function(id("bitwise_bytes"), |b| {
+ let (src, dst) = (
+ src_bytes.view_bits::<Lsb0>(),
+ dst_bytes.view_bits_mut::<Lsb0>(),
+ );
+ b.iter(|| unsafe {
+ for (from, to) in
+ src.as_bitptr_range().zip(dst.as_mut_bitptr_range())
+ {
+ to.write(from.read());
+ }
+ })
+ });
+
+ group.bench_function(id("bitwise_words_mismatch"), |b| {
+ let (src, dst) = (
+ src_words.view_bits::<Lsb0>(),
+ dst_words.view_bits_mut::<Msb0>(),
+ );
+ b.iter(|| unsafe {
+ for (from, to) in
+ src.as_bitptr_range().zip(dst.as_mut_bitptr_range())
+ {
+ to.write(from.read());
+ }
+ })
+ });
+ group.bench_function(id("bitwise_bytes_mismatch"), |b| {
+ let (src, dst) = (
+ src_bytes.view_bits::<Msb0>(),
+ dst_bytes.view_bits_mut::<Lsb0>(),
+ );
+ b.iter(|| unsafe {
+ for (from, to) in
+ src.as_bitptr_range().zip(dst.as_mut_bitptr_range())
+ {
+ to.write(from.read());
+ }
+ })
+ });
+ }
+}
+
+criterion_group!(benches, benchmarks);
+criterion_main!(benches);
diff --git a/benches/mut_access.rs b/benches/mut_access.rs
new file mode 100644
index 0000000..046ec75
--- /dev/null
+++ b/benches/mut_access.rs
@@ -0,0 +1,25 @@
+#![feature(test)]
+
+extern crate test;
+
+use bitvec::prelude::*;
+use test::Bencher;
+
+#[bench]
+fn iter_mut(b: &mut Bencher) {
+ let mut bits = bitarr![0; 500];
+ b.iter(|| bits.iter_mut().for_each(|mut b| *b = !*b));
+}
+
+#[bench]
+#[allow(deprecated)]
+fn native_for_each(b: &mut Bencher) {
+ let mut bits = bitarr![0; 500];
+ b.iter(|| bits.for_each(|_, b| !b));
+}
+
+#[bench]
+fn copy_within(b: &mut Bencher) {
+ let mut a = bitarr![1, 1, 1, 1, 0, 0, 0, 0];
+ b.iter(|| a.copy_within(.. 4, 2));
+}
diff --git a/benches/slice.rs b/benches/slice.rs
new file mode 100644
index 0000000..ad9d7f4
--- /dev/null
+++ b/benches/slice.rs
@@ -0,0 +1,139 @@
+#![feature(test)]
+
+extern crate test;
+
+use bitvec::prelude::*;
+use test::{
+ bench::black_box,
+ Bencher,
+};
+
+/* `BitSlice::empty` is not benched, because the compiler const-folds it. It
+is not a `const fn`, but it has exactly one function call, which is `const`, and
+creates a value object from that function. As such, the compiler can prove that
+the return value is a `const` value, and insert the value at all
+`BitSlice::empty` call sites. It takes 0ns.
+*/
+
+#[bench]
+fn element(b: &mut Bencher) {
+ b.iter(|| BitSlice::<u8, Msb0>::from_element(&!0));
+ b.iter(|| BitSlice::<u8, Lsb0>::from_element(&!0));
+ b.iter(|| BitSlice::<u16, Msb0>::from_element(&!0));
+ b.iter(|| BitSlice::<u16, Lsb0>::from_element(&!0));
+ b.iter(|| BitSlice::<u32, Msb0>::from_element(&!0));
+ b.iter(|| BitSlice::<u32, Lsb0>::from_element(&!0));
+
+ #[cfg(target_pointer_width = "64")]
+ {
+ b.iter(|| BitSlice::<u64, Msb0>::from_element(&!0));
+ b.iter(|| BitSlice::<u64, Lsb0>::from_element(&!0));
+ }
+}
+
+#[bench]
+fn slice(b: &mut Bencher) {
+ b.iter(|| BitSlice::<u8, Msb0>::from_slice(&[0, 1, !0 - 1, !0][..]));
+ b.iter(|| BitSlice::<u8, Lsb0>::from_slice(&[0, 1, !0 - 1, !0][..]));
+ b.iter(|| BitSlice::<u16, Msb0>::from_slice(&[0, 1, !0 - 1, !0][..]));
+ b.iter(|| BitSlice::<u16, Lsb0>::from_slice(&[0, 1, !0 - 1, !0][..]));
+ b.iter(|| BitSlice::<u32, Msb0>::from_slice(&[0, 1, !0 - 1, !0][..]));
+ b.iter(|| BitSlice::<u32, Lsb0>::from_slice(&[0, 1, !0 - 1, !0][..]));
+
+ #[cfg(target_pointer_width = "64")]
+ {
+ b.iter(|| BitSlice::<u64, Msb0>::from_slice(&[0, 1, !0 - 1, !0][..]));
+ b.iter(|| BitSlice::<u64, Lsb0>::from_slice(&[0, 1, !0 - 1, !0][..]));
+ }
+}
+
+#[bench]
+fn len(b: &mut Bencher) {
+ let bsb08 = [0u8; 16].view_bits::<Msb0>();
+ let bsl08 = [0u8; 16].view_bits::<Lsb0>();
+ b.iter(|| bsb08.len());
+ b.iter(|| bsl08.len());
+
+ let bsb16 = [0u16; 8].view_bits::<Msb0>();
+ let bsl16 = [0u16; 8].view_bits::<Lsb0>();
+ b.iter(|| bsb16.len());
+ b.iter(|| bsl16.len());
+
+ let bsb32 = [0u32; 4].view_bits::<Msb0>();
+ let bsl32 = [0u32; 4].view_bits::<Lsb0>();
+ b.iter(|| bsb32.len());
+ b.iter(|| bsl32.len());
+
+ #[cfg(target_pointer_width = "64")]
+ {
+ let bsb64 = [0u64; 2].view_bits::<Msb0>();
+ let bsl64 = [0u64; 2].view_bits::<Lsb0>();
+ b.iter(|| bsb64.len());
+ b.iter(|| bsl64.len());
+ }
+}
+
+// This index value is not only "nice", it also ensures that the hard path is
+// hit in `BitIdx::offset`.
+#[bench]
+fn index(b: &mut Bencher) {
+ let bsb08 = [0u8; 16].view_bits::<Msb0>();
+ let bsl08 = [0u8; 16].view_bits::<Lsb0>();
+ b.iter(|| assert!(!black_box(bsb08)[black_box(69)]));
+ b.iter(|| assert!(!black_box(bsl08)[black_box(69)]));
+
+ let bsb16 = [0u16; 8].view_bits::<Msb0>();
+ let bsl16 = [0u16; 8].view_bits::<Lsb0>();
+ b.iter(|| assert!(!black_box(bsb16)[black_box(69)]));
+ b.iter(|| assert!(!black_box(bsl16)[black_box(69)]));
+
+ let bsb32 = [0u32; 4].view_bits::<Msb0>();
+ let bsl32 = [0u32; 4].view_bits::<Lsb0>();
+ b.iter(|| assert!(!black_box(bsb32)[black_box(69)]));
+ b.iter(|| assert!(!black_box(bsl32)[black_box(69)]));
+
+ #[cfg(target_pointer_width = "64")]
+ {
+ let bsb64 = [0u64; 2].view_bits::<Msb0>();
+ let bsl64 = [0u64; 2].view_bits::<Lsb0>();
+ b.iter(|| assert!(!black_box(bsb64)[black_box(69)]));
+ b.iter(|| assert!(!black_box(bsl64)[black_box(69)]));
+ }
+}
+
+/* This routine has more work to do: index, create a reference struct, and drop
+it. The compiler *should* be able to properly arrange immediate drops, though.
+*/
+#[bench]
+fn get_mut(b: &mut Bencher) {
+ let mut src = [0u8; 16];
+ let bsb08 = src.view_bits_mut::<Msb0>();
+ b.iter(|| *bsb08.get_mut(69).unwrap() = true);
+ let mut src = [0u8; 16];
+ let bsl08 = src.view_bits_mut::<Lsb0>();
+ b.iter(|| *bsl08.get_mut(69).unwrap() = true);
+
+ let mut src = [0u16; 8];
+ let bsb16 = src.view_bits_mut::<Msb0>();
+ b.iter(|| *bsb16.get_mut(69).unwrap() = true);
+ let mut src = [0u16; 8];
+ let bsl16 = src.view_bits_mut::<Lsb0>();
+ b.iter(|| *bsl16.get_mut(69).unwrap() = true);
+
+ let mut src = [0u32; 4];
+ let bsb32 = src.view_bits_mut::<Msb0>();
+ b.iter(|| *bsb32.get_mut(69).unwrap() = true);
+ let mut src = [0u32; 4];
+ let bsl32 = src.view_bits_mut::<Lsb0>();
+ b.iter(|| *bsl32.get_mut(69).unwrap() = true);
+
+ #[cfg(target_pointer_width = "64")]
+ {
+ let mut src = [0u64; 2];
+ let bsb64 = src.view_bits_mut::<Msb0>();
+ b.iter(|| *bsb64.get_mut(69).unwrap() = true);
+ let mut src = [0u64; 2];
+ let bsl64 = src.view_bits_mut::<Lsb0>();
+ b.iter(|| *bsl64.get_mut(69).unwrap() = true);
+ }
+}
diff --git a/doc/README.md b/doc/README.md
new file mode 100644
index 0000000..a666c0c
--- /dev/null
+++ b/doc/README.md
@@ -0,0 +1,14 @@
+# `bitvec` API Documentation
+
+Rust release `1.54` stabilized the use of `#[doc = include_str!()]`, which
+allows documentation to be sourced from external files. This directory contains
+the Rustdoc API documentation for items whose text is larger than a comment
+block warrants.
+
+The files here use Rustdoc’s ability to resolve symbol paths as link references,
+and so will not render correctly in other Markdown viewers. The target renderer
+is Rustdoc, not CommonMark.
+
+Module and type documentation should generally be moved to this directory;
+function, struct field, and enum variant documentation should generally stay
+in source.
diff --git a/doc/access.md b/doc/access.md
new file mode 100644
index 0000000..a8a6ddc
--- /dev/null
+++ b/doc/access.md
@@ -0,0 +1,22 @@
+# Memory Bus Access Management
+
+`bitvec` allows a program to produce handles over memory that do not *logically*
+alias their bits, but *may* alias their hardware locations. This module provides
+a unified interface for memory accesses that can be specialized to handle such
+aliased and unaliased events.
+
+The [`BitAccess`] trait provides capabilities to access individual or clustered
+bits in memory elements through shared, maybe-aliased, references. Its
+implementations are responsible for coördinating synchronization and contention
+as needed.
+
+The [`BitSafe`] trait guards [`Radium`] types in order to forbid writing through
+shared-only references, and require access to an `&mut` exclusive reference for
+modification. This permits other components in the crate that do *not* have
+`BitSafe` reference guards to safely mutate a referent element that a `BitSafe`d
+reference can observe, while preventing that reference from emitting mutations
+of its own.
+
+[`BitAccess`]: self::BitAccess
+[`BitSafe`]: self::BitSafe
+[`Radium`]: radium::Radium
diff --git a/doc/access/BitAccess.md b/doc/access/BitAccess.md
new file mode 100644
index 0000000..bea026d
--- /dev/null
+++ b/doc/access/BitAccess.md
@@ -0,0 +1,40 @@
+# Bit-Level Access Instructions
+
+This trait extends [`Radium`] in order to manipulate specific bits in an element
+according to the crate’s logic. It drives all memory access instructions and is
+responsible for translating the bit-selection logic of the [`index`] module into
+real effects.
+
+This is blanket-implemented on all types that permit shared-mutable memory
+access via the [`radium`] crate. Its use is constrained in the [`store`] module.
+It is required to be a publicly accessible symbol, as it is exported in other
+traits, but it is a crate-internal item and is not part of the public API. Its
+blanket implementation for `<R: Radium>` prevents any other implementations from
+being written.
+
+## Implementation and Safety Notes
+
+This trait is automatically implemented for all types that implement `Radium`,
+and relies exclusively on `Radium`’s API and implementations for its work. In
+particular, `Radium` has no functions which operate on **pointers**: it
+exclusively operates on memory through **references**. Since references must
+always refer to initialized memory, `BitAccess` and, by extension, all APIs in
+`bitvec` that touch memory, cannot be used to operate on uninitialized memory in
+any way.
+
+While you may *create* a `bitvec` pointer object that targets uninitialized
+memory, you may not *dereference* it until the targeted memory has been wholly
+initialized with integer values.
+
+This restriction cannot be loosened without stable access to pointer-based
+atomic intrinsics in the Rust standard library and corresponding updates to the
+`Radium` trait.
+
+Do not attempt to access uninitialized memory through `bitvec`. Doing so will
+cause `bitvec` to produce references to uninitialized memory, which is undefined
+behavior.
+
+[`Radium`]: radium::Radium
+[`index`]: crate::index
+[`radium`]: radium
+[`store`]: crate::store
diff --git a/doc/access/BitSafe.md b/doc/access/BitSafe.md
new file mode 100644
index 0000000..6f2dd7c
--- /dev/null
+++ b/doc/access/BitSafe.md
@@ -0,0 +1,16 @@
+# Read-Only Semivolatile Handle
+
+This trait describes views of memory that are not permitted to modify the value
+they reference, but must tolerate external modification to that value.
+Implementors must tolerate shared-mutability behaviors, but are not allowed to
+expose shared mutation APIs. They are permitted to modify the referent only
+under `&mut` exclusive references.
+
+This behavior enables an important aspect of the `bitvec` memory model when
+working with memory elements that multiple [`&mut BitSlice`][0] references
+touch: each `BitSlice` needs to be able to give the caller a view of the memory
+element, but they also need to prevent modification of bits outside of their
+span. This trait enables callers to view raw underlying memory without
+improperly modifying memory that *other* `&mut BitSlice`s expect to be stable.
+
+[0]: crate::slice::BitSlice
diff --git a/doc/access/impl_BitSafe.md b/doc/access/impl_BitSafe.md
new file mode 100644
index 0000000..912d0f7
--- /dev/null
+++ b/doc/access/impl_BitSafe.md
@@ -0,0 +1,12 @@
+# Read-Only Shared-Mutable Handle
+
+This type marks a handle to a shared-mutable type that may be modified through
+*other* handles, but cannot be modified through *this* one. It is used when a
+[`BitSlice`] region has partial ownership of an element and wishes to expose the
+entire underlying raw element to the user without granting them write
+permissions.
+
+Under the `feature = "atomic"` build setting, this uses `radium`’s best-effort
+atomic alias; when this feature is disabled, it reverts to `Cell`.
+
+[`BitSlice`]: crate::slice::BitSlice
diff --git a/doc/array.md b/doc/array.md
new file mode 100644
index 0000000..0170563
--- /dev/null
+++ b/doc/array.md
@@ -0,0 +1,21 @@
+# Statically-Allocated, Fixed-Size, Bit Buffer
+
+This module defines a port of the [array fundamental][0] and its APIs. The
+primary export is the [`BitArray`] structure. This is a thin wrapper over
+`[T; N]` that provides a [`BitSlice`] view of its contents and is *roughly*
+analogous to the C++ type [`std::bitset<N>`].
+
+See the `BitArray` documentation for more details on its usage.
+
+## Submodules
+
+- `api` contains ports of the standard library’s array type and `core::array`
+ module.
+- `iter` contains ports of array iteration.
+- `ops` defines operator-sigil traits.
+- `traits` defines all the other traits.
+
+[0]: https://doc.rust-lang.org/std/primitive.array.html
+[`BitArray`]: self::BitArray
+[`BitSlice`]: crate::slice::BitSlice
+[`std::bitset<N>`]: https://en.cppreference.com/w/cpp/utility/bitset
diff --git a/doc/array/BitArray.md b/doc/array/BitArray.md
new file mode 100644
index 0000000..37a7cb3
--- /dev/null
+++ b/doc/array/BitArray.md
@@ -0,0 +1,108 @@
+# Bit-Precision Array Immediate
+
+This type is a wrapper over the [array fundamental][0] `[T; N]` that views its
+contents as a [`BitSlice`] region. As an array, it can be held directly by value
+and does not require an indirection such as the `&BitSlice` reference.
+
+## Original
+
+[`[T; N]`](https://doc.rust-lang.org/std/primitive.array.html)
+
+## Usage
+
+`BitArray` is a Rust analogue of the C++ [`std::bitset<N>`] container. However,
+restrictions in the Rust type system do not allow specifying exact bit lengths
+in the array type. Instead, it must specify a storage array that can contain all
+the bits you want.
+
+Because `BitArray` is a plain-old-data object, its fields are public and it has
+no restrictions on its interior value. You can freely access the interior
+storage and move data in or out of the `BitArray` type with no cost.
+
+As a convenience, the [`BitArr!`] type-constructor macro can produce correct
+type definitions from an exact bit count and your memory-layout type parameters.
+Values of that type can then be built from the [`bitarr!`] *value*-constructor
+macro:
+
+```rust
+use bitvec::prelude::*;
+
+type Example = BitArr!(for 43, in u32, Msb0);
+let example: Example = bitarr!(u32, Msb0; 1; 33);
+
+struct HasBitfield {
+ inner: Example,
+}
+
+let ex2 = HasBitfield {
+ inner: BitArray::new([1, 2]),
+};
+```
+
+Note that the actual type of the `Example` alias is `BitArray<[u32; 2], Msb0>`,
+as that is `ceil(32, 43)`, so the `bitarr!` macro can accept any number of bits
+in `33 .. 65` and will produce a value of the correct type.
+
+## Type Parameters
+
+`BitArray` differs from the other data structures in the crate in that it does
+not take a `T: BitStore` parameter, but rather takes `A: BitViewSized`. That
+trait is implemented by all `T: BitStore` scalars and all `[T; N]` arrays of
+them, and provides the logic to translate the aggregate storage into the memory
+sequence that the crate expects.
+
+As with all `BitSlice` regions, the `O: BitOrder` parameter specifies the
+ordering of bits within a single `A::Store` element.
+
+## Future API Changes
+
+Exact bit lengths cannot be encoded into the `BitArray` type until the
+const-generics system in the compiler can allow type-level computation on type
+integers. When this stabilizes, `bitvec` will issue a major upgrade that
+replaces the `BitArray<A, O>` definition with `BitArray<T, O, const N: usize>`
+and match the C++ `std::bitset<N>` definition.
+
+## Large Bit-Arrays
+
+As with ordinary arrays, large arrays can be expensive to move by value, and
+should generally be preferred to have static locations such as actual `static`
+bindings, a long lifetime in a low stack frame, or a heap allocation. While you
+certainly can `Box<[BitArray<A, O>]>` directly, you may instead prefer the
+[`BitBox`] or [`BitVec`] heap-allocated regions. These offer the same storage
+behavior and are better optimized than `Box<BitArray>` for working with the
+contained `BitSlice` region.
+
+## Examples
+
+```rust
+use bitvec::prelude::*;
+
+const WELL_KNOWN: BitArr!(for 16, in u8, Lsb0) = BitArray::<[u8; 2], Lsb0> {
+ data: *b"bv",
+ ..BitArray::ZERO
+};
+
+struct HasBitfields {
+ inner: BitArr!(for 50, in u8, Lsb0),
+}
+
+impl HasBitfields {
+ fn new() -> Self {
+ Self {
+ inner: bitarr!(u8, Lsb0; 0; 50),
+ }
+ }
+
+ fn some_field(&self) -> &BitSlice<u8, Lsb0> {
+ &self.inner[2 .. 52]
+ }
+}
+```
+
+[0]: https://doc.rust-lang.org/std/primitive.array.html
+[`BitArr!`]: macro@crate::BitArr
+[`BitBox`]: crate::boxed::BitBox
+[`BitSlice`]: crate::slice::BitSlice
+[`BitVec`]: crate::vec::BitVec
+[`bitarr!`]: macro@crate::bitarr
+[`std::bitset<N>`]: https://en.cppreference.com/w/cpp/utility/bitset
diff --git a/doc/array/IntoIter.md b/doc/array/IntoIter.md
new file mode 100644
index 0000000..04f3d02
--- /dev/null
+++ b/doc/array/IntoIter.md
@@ -0,0 +1,8 @@
+# Bit-Array Iteration
+
+This structure wraps a bit-array and provides by-value iteration of the bits it
+contains.
+
+## Original
+
+[`array::IntoIter`](core::array::IntoIter)
diff --git a/doc/array/TryFromBitSliceError.md b/doc/array/TryFromBitSliceError.md
new file mode 100644
index 0000000..f512a81
--- /dev/null
+++ b/doc/array/TryFromBitSliceError.md
@@ -0,0 +1,16 @@
+# Bit-Slice to Bit-Array Conversion Error
+
+This error is produced when an `&BitSlice` view is unable to be recast as a
+`&BitArray` view with the same parameters.
+
+Unlike ordinary scalars and arrays, where arrays are never aligned more
+stringently than their components, `BitSlice` is aligned to an individual bit
+while `BitArray` is aligned to its `A` storage type.
+
+This is produced whenever a `&BitSlice` view is not exactly as long as the
+destination `&BitArray` view is, or does not also begin at the zeroth bit in an
+`A::Store` element.
+
+## Original
+
+[`array::TryFromSliceError`](core::array::TryFromSliceError)
diff --git a/doc/array/api.md b/doc/array/api.md
new file mode 100644
index 0000000..3a485fb
--- /dev/null
+++ b/doc/array/api.md
@@ -0,0 +1,19 @@
+# Port of Array Inherent Methods
+
+This module ports the inherent methods available on the [array] fundamental
+type.
+
+As of 1.56, only `.map()` is stable. The `.as_slice()` and `.as_mut_slice()`
+methods are ported, as the *behavior* has always been stable, and only the name
+is new.
+
+The remaining methods (as of 1.56, `.each_mut()`, `.each_ref()`, `.zip()`) are
+not ported. While `BitArray` is capable of implementing their behavior with the
+existing crate APIs, the `const`-generic system is not yet able to allow
+construction of an array whose length is dependent on an associated `const` in a
+type parameter.
+
+These methods will not be available until the `const`-generic system improves
+enough for `bitvec 2` to use the proper `BitArray` API.
+
+[array]: https://doc.rust-lang.org/std/primitive.array.html
diff --git a/doc/array/iter.md b/doc/array/iter.md
new file mode 100644
index 0000000..87a7377
--- /dev/null
+++ b/doc/array/iter.md
@@ -0,0 +1,5 @@
+# Bit-Array Iteration
+
+This module defines the core iteration logic for `BitArray`. It includes the
+`IntoIterator` implementations on bit-arrays and their references, as well as
+the `IntoIter` struct that walks bit-arrays by value.
diff --git a/doc/boxed.md b/doc/boxed.md
new file mode 100644
index 0000000..653067b
--- /dev/null
+++ b/doc/boxed.md
@@ -0,0 +1,15 @@
+# Heap-Allocated, Fixed-Size, Bit Buffer
+
+This module defines an analogue to `Box<[bool]>`, as `Box<BitSlice>` cannot be
+constructed or used in any way. Like `Box<[T]>`, this is a heap allocation that
+can modify its contents, but cannot resize the collection. The `BitBox` value is
+an owning [`*BitSlice`] pointer, and can be used to access its contents without
+any decoding.
+
+You should generally prefer [`BitVec`] or [`BitArray`]; however, very large
+`BitArrays` are likely better served being copied into a `BitBox` rather than
+being boxed themselves when moved into the heap.
+
+[`BitArray`]: crate::array::BitArray
+[`BitVec`]: crate::vec::BitVec
+[`*BitSlice`]: crate::slice::BitSlice
diff --git a/doc/boxed/BitBox.md b/doc/boxed/BitBox.md
new file mode 100644
index 0000000..90a5cca
--- /dev/null
+++ b/doc/boxed/BitBox.md
@@ -0,0 +1,59 @@
+# Fixed-Size, Heap-Allocated, Bit Slice
+
+`BitBox` is a heap-allocated [`BitSlice`] region. It is a distinct type because
+the implementation of bit-slice pointers means that `Box<BitSlice>` cannot
+exist. It can be created by cloning a bit-slice into the heap, or by freezing
+the allocation of a [`BitVec`]
+
+## Original
+
+[`Box<[T]>`](alloc::boxed::Box)
+
+## API Differences
+
+As with `BitSlice`, this takes a pair of [`BitOrder`] and [`BitStore`] type
+parameters to govern the buffer’s memory representation. Because `BitSlice` is
+unsized, `BitBox` has almost none of the `Box` API, and is difficult to use
+directly.
+
+## Behavior
+
+`BitBox`, like `&BitSlice`, is an opaque pointer to a bit-addressed slice
+region. Unlike `&BitSlice`, it uses the allocator to guarantee that it is the
+sole accessor to the referent buffer, and is able to use that uniqueness
+guarantee to specialize some `BitSlice` behavior to be faster or more efficient.
+
+## Safety
+
+`BitBox` is, essentially, a `NonNull<BitSlice<T, O>>` pointer. The internal
+value is opaque and cannot be inspected or modified by user code.
+
+If you attempt to do so, your program becomes inconsistent. You will likely
+break the allocator’s internal state and cause a crash. No guarantees of crash
+*or* recovery are provided. Do not inspect or modify the `BitBox` handle value.
+
+## Construction
+
+The simplest way to construct a `BitBox` is by using the [`bitbox!`] macro. You
+can also explicitly clone a `BitSlice` with [`BitBox::from_bitslice`], or freeze
+a `BitVec` with [`BitVec::into_boxed_bitslice`].
+
+## Examples
+
+```rust
+use bitvec::prelude::*;
+
+let a = BitBox::from_bitslice(bits![1, 0, 1, 1, 0]);
+let b = bitbox![0, 1, 0, 0, 1];
+
+let b_raw: *mut BitSlice = BitBox::into_raw(b);
+let b_reformed = unsafe { BitBox::from_raw(b_raw) };
+```
+
+[`BitBox::from_bitslice`]: self::BitBox::from_bitslice
+[`BitOrder`]: crate::order::BitOrder
+[`BitSlice`]: crate::slice::BitSlice
+[`BitStore`]: crate::store::BitStore
+[`BitVec`]: crate::vec::BitVec
+[`BitVec::into_boxed_bitslice`]: crate::vec::BitVec::into_boxed_bitslice
+[`bitbox!`]: macro@crate::bitbox
diff --git a/doc/boxed/iter.md b/doc/boxed/iter.md
new file mode 100644
index 0000000..096c1bc
--- /dev/null
+++ b/doc/boxed/iter.md
@@ -0,0 +1,11 @@
+# Boxed Bit-Slice Iteration
+
+This module contains the by-value iterator used by both `BitBox` and `BitVec`.
+In the standard library, this iterator is defined under `alloc::vec`, not
+`alloc::boxed`, as `Box` already has an iteration implementation that forwards
+to its boxed value.
+
+It is moved here for simplicity: both `BitBox` and `BitVec` iterate over a
+dynamic bit-slice by value, and must deällocate the region when dropped. As
+`BitBox` has a smaller value than `BitVec`, it is used as the owning handle for
+the bit-slice being iterated.
diff --git a/doc/domain.md b/doc/domain.md
new file mode 100644
index 0000000..6c49c50
--- /dev/null
+++ b/doc/domain.md
@@ -0,0 +1,132 @@
+# Memory Region Description
+
+This module bridges the abstract [`BitSlice`] region to real memory by
+segmenting any bit-slice along its maybe-aliased and known-unaliased boundaries.
+This segmentation applies to both bit-slice and ordinary-element views of
+memory, and can be used to selectively remove alias restrictions or to enable
+access to the underlying memory with ordinary types.
+
+The four enums in this module all intentionally have the same variants by name
+and shape, in order to maintain textual consistency.
+
+## Memory Layout Model
+
+Any bit-slice resident in memory has one of two major kinds, which the enums in
+this module refer to as `Enclave` and `Region`
+
+### Enclave
+
+An `Enclave` layout occurs when a bit-slice is contained entirely within a
+single memory element, and does not include either the initial or final semantic
+index in its span.
+
+```text
+[ 7 | 6 | 5 | 4 | 3 | 2 | 1 | 0 ]
+[ ^^^^^^^^^^^^^^^^^^^^^ ]
+```
+
+In an 8-bit element, a bit-slice is considered to be an `Enclave` if it is
+contained entirely in the marked interior bits, and touches *neither* bit 7 nor
+bit 0. Wider elements may touch interior byte boundaries, and only restrict bits
+0 and `width - 1`.
+
+### Region
+
+A `Region` layout occurs when a bit-slice consists of:
+
+- zero or one half-spanned head element (excludes bit 0, includes `width - 1`)
+- zero or more fully-spanned elements body (includes both 0 and `width - 1`)
+- zero or one half-spanned tail element (includes bit 0, excludes `width - 1`)
+
+Each of these three sections is optionally present independently of the other
+two. That is, in the following three bytes, all of the following bit-slices have
+the `Region` layout:
+
+```text
+[ 7 6 5 4 3 2 1 0 ] [ 7 6 5 4 3 2 1 0 ] [ 7 6 5 4 3 2 1 0 ]
+[ ]
+
+[ h h h h ]
+[ b b b b b b b b ]
+[ t t t t ]
+
+[ h h h h t t t t ]
+
+[ h h h h b b b b b b b b ]
+[ b b b b b b b b t t t t ]
+[ h h h h b b b b b b b b t t t t ]
+```
+
+1. The empty bit-slice is a region with all of its segments blank.
+1. A bit-slice with one element that touches `width - 1` but not 0 has a head,
+ but no body or tail.
+1. A bit-slice that touches both `0` and `width - 1` of any number of elements
+ has a body, but no head or tail.
+1. A bit-slice with one element that touches 0 but not `width - 1` has a tail,
+ but no head or body.
+1. A bit-slice with two elements, that touches neither 0 of the first nor
+ `width - 1` of the second (but by definition `width - 1` of the first and 0
+ of the second; bit-slices are contiguous) has a head and tail, but no body.
+
+The final three rows show how the individual segments can be composed to
+describe all possible bit-slices.
+
+## Aliasing Awareness
+
+The contiguity property of `BitSlice` combines with the `&`/`&mut` exclusion
+rules of the Rust language to provide additional information about the state of
+the program that allows a given bit-slice to exist.
+
+Specifically, any well-formed Rust program knows that *if* a bit-slice is able
+to produce a `Region.body` segment, *then* that body is not aliased by `bitvec`,
+and can safely transition to the `T::Unalias` state. Alias-permitting types like
+`Cell` and the atomics will never change their types (because `bitvec` cannot
+know that there are no views to a region other than what it has been given), but
+a tainted `BitSlice<O, u8::Alias>` bit-slice can revert its interior body back
+to `u8` and no longer require the alias tainting.
+
+The head and tail segments do not retain their history, and cannot tell whether
+they have been created by splitting or by shrinking, so they do not change their
+types at all.
+
+## Raw Memory Access
+
+The [`BitDomain`] enum only splits a bit-slice along these boundaries, and
+allows a bit-slice view to safely shed aliasing protection added to it by
+[`.split_at_mut()`].
+
+The [`Domain`] enum completely sheds its bit-precision views, and reverts to
+ordinary element accesses. The body segment is an ordinary Rust slice with no
+additional information or restriction; it can be freely used without regard for
+any of `bitvec`’s constraints.
+
+In order to preserve the rules that any given bit-slice can never be used to
+affect bits outside of its own view of memory, the underlying memory of the head
+and tail segments is only made accessible through a [`PartialElement`] reference
+guard. This guard is an opaque proxy to the memory location, and holds both a
+reference and the bit-mask required to prevent reading from or writing to the
+bits outside the scope of the originating bit-slice.
+
+## Generics
+
+This module, and the contents of [`ptr`], make extensive use of a trait-level
+mutability and reference tracking system in order to reduce code duplication and
+provide a more powerful development environment than would be achieved with
+macros.
+
+As such, the trait bounds on types in this module are more intense than the
+standard `<T, O>` fare in the crate’s main data structures. However, they are
+only ever instantiated with shared or exclusive references, and all of the
+bounds are a much more verbose way of saying “a reference, that is maybe-mut and
+maybe-slice, of `T`”.
+
+User code does not need to be aware of any of this: the `BitSlice` APIs that
+call into this module always result in structures where the complex bounds are
+reduced to ordinary slice references.
+
+[`BitDomain`]: BitDomain
+[`BitSlice`]: crate::slice::BitSlice
+[`Domain`]: Domain
+[`PartialElement`]: PartialElement
+[`ptr`]: crate::ptr
+[`.split_at_mut()`]: crate::slice::BitSlice::split_at_mut
diff --git a/doc/domain/BitDomain.md b/doc/domain/BitDomain.md
new file mode 100644
index 0000000..2ddb19e
--- /dev/null
+++ b/doc/domain/BitDomain.md
@@ -0,0 +1,30 @@
+# Bit-Slice Partitioning
+
+This enum partitions a bit-slice into its head- and tail- edge bit-slices, and
+its interior body bit-slice, according to the definitions laid out in the module
+documentation.
+
+It fragments a [`BitSlice`] into smaller `BitSlice`s, and allows the interior
+bit-slice to become `::Unalias`ed. This is useful when you need to retain a
+bit-slice view of memory, but wish to remove synchronization costs imposed by a
+prior call to [`.split_at_mut()`] for as much of the bit-slice as possible.
+
+## Why Not `Option`?
+
+The `Enclave` variant always contains as its single field the exact bit-slice
+that created the `Enclave`. As such, this type is easily replaceäble with an
+`Option` of the `Region` variant, which when `None` is understood to be the
+original.
+
+This exists as a dedicated enum, even with a technically useless variant, in
+order to mirror the shape of the element-domain enum. This type should be
+understood as a shortcut to the end result of splitting by element-domain, then
+mapping each `PartialElement` and slice back into `BitSlice`s, rather than
+testing whether a bit-slice can be split on alias boundaries.
+
+You can get the alternate behavior, of testing whether or not a bit-slice can be
+split into a `Region` or is unsplittable, by calling `.bit_domain().region()`
+to produce exactly such an `Option`.
+
+[`BitSlice`]: crate::slice::BitSlice
+[`.split_at_mut()`]: crate::slice::BitSlice::split_at_mut
diff --git a/doc/domain/Domain.md b/doc/domain/Domain.md
new file mode 100644
index 0000000..c75413c
--- /dev/null
+++ b/doc/domain/Domain.md
@@ -0,0 +1,63 @@
+# Bit-Slice Element Partitioning
+
+This structure provides the bridge between bit-precision memory modeling and
+element-precision memory manipulation. It allows a bit-slice to provide a safe
+and correct view of the underlying memory elements, without exposing the values,
+or permitting mutation, of bits outside a bit-slice’s control but within the
+elements the bit-slice uses.
+
+Nearly all memory access that is not related to single-bit access goes through
+this structure, and it is highly likely to be in your hot path. Its code is a
+perpetual topic of optimization, and improvements are always welcome.
+
+This is essentially a fully-decoded `BitSpan` handle, in that it addresses
+memory elements directly and contains the bit-masks needed to selectively
+interact with them. It is therefore by necessity a large structure, and is
+usually only alive for a short time. It has a minimal API, as most of its
+logical operations are attached to `BitSlice`, and merely route through it.
+
+If your application cannot afford the cost of repeated `Domain` construction,
+please [file an issue][0].
+
+## Memory Model and Variants
+
+A given `BitSlice` has essentially two possibilities for where it resides in
+real memory:
+
+- it can reside entirely in the interior of a exactly one memory element,
+ touching neither edge bit, or
+- it can touch at least one edge bit of zero or more elements.
+
+These states correspond to the `Enclave` and `Region` variants, respectively.
+
+When a `BitSlice` has only partial control of a given memory element, that
+element can only be accessed through the bit-slice’s provenance by a
+[`PartialElement`] handle. This handle is an appropriately-guarded reference to
+the underlying element, as well as mask information needed to interact with the
+raw bits and to manipulate the numerical contents. Each `PartialElement` guard
+carries permissions for *its own bits* within the guarded element, independently
+of any other handle that may access the element, and all handles are
+appropriately synchronized with each other to prevent race conditions.
+
+The `Enclave` variant is a single `PartialElement`. The `Region` variant is more
+complex. It has:
+
+1. an optional `PartialElement` for the case where the bit-slice only partially
+ occupies the lowest-addressed memory element it governs, starting after
+ bit-index 0 and extending up to the maximal bit-index,
+1. a slice of zero or more fully-occupied memory elements,
+1. an optional `PartialElement` for the case where it only partially occupies
+ the highest-addressed memory element it governs, starting at bit-index 0 and
+ ending before the maximal.
+
+## Usage
+
+Once created, match upon a `Domain` to access its fields. Each `PartialElement`
+has a [`.load_value()`][`PartialElement::load_value`] method that produces its
+stored value (with all ungoverned bits cleared to 0), and a `.store_value()`
+that writes into its governed bits. If present, the fully-occupied slice can be
+used as normal.
+
+[0]: https://github.com/bitvecto-rs/bitvec/issues/new
+[`PartialElement`]: crate::domain::PartialElement
+[`PartialElement::load_value`]: crate::domain::PartialElement::load_value
diff --git a/doc/domain/PartialElement.md b/doc/domain/PartialElement.md
new file mode 100644
index 0000000..d50b89e
--- /dev/null
+++ b/doc/domain/PartialElement.md
@@ -0,0 +1,30 @@
+# Partially-Owned Memory Element
+
+This type is a guarded reference to memory that permits interacting with it as
+an integer, but only allows views to the section of the integer that the
+producing handle has permission to observe. Unlike the `BitSafe` type family in
+the [`access`] module, it is not a transparent wrapper that can be used for
+reference conversion; it is a “heavy reference” that carries the mask and
+
+## Type Parameters
+
+- `T`: The type, including register width and alias information, of the
+ bit-slice handle that created it.
+- `O`: This propagates the bit-ordering type used by the [`BitSlice`] handle
+ that created it.
+
+## Lifetime
+
+This carries the lifetime of the bit-slice handle that created it.
+
+## Usage
+
+This structure is only created as part of the [`Domain`] region descriptions,
+and refers to partially-occupied edge elements. The underlying referent memory
+can be read with `.load_value()` or written with `.store_value()`, and the
+appropriate masking will be applied in order to restrict access to only the
+permitted bits.
+
+[`access`]: crate::access
+[`BitSlice`]: crate::slice::BitSlice
+[`Domain`]: Domain
diff --git a/doc/field.md b/doc/field.md
new file mode 100644
index 0000000..85ce935
--- /dev/null
+++ b/doc/field.md
@@ -0,0 +1,133 @@
+# Bit-Field Memory Slots
+
+This module implements a load/store protocol for [`BitSlice`] regions that
+enables them to act as if they were a storage slot for integers. Implementations
+of the [`BitField`] trait provide behavior similar to C and C++ language
+bit-fields. While any `BitSlice<T, O>` instantiation is able to provide this
+behavior, the lack of specialization in the language means that it is instead
+only implemented for `BitSlice<T, Lsb0>` and `BitSlice<T, Msb0>` in order to
+gain a performance advantage.
+
+## Batched Behavior
+
+Bit-field behavior can be simulated using `BitSlice`’s existing APIs; however,
+the inherent methods are all required to operate on each bit individually in
+sequence. In addition to the semantic load/store behavior this module describes,
+it also implements it in a way that takes advantage of the contiguity properties
+of the `Lsb0` and `Msb0` orderings in order to maximize how many bits are
+transferred in each cycle of the overall operation.
+
+This is most efficient when using `BitSlice<usize, O>` as the storage bit-slice,
+or using `.load::<usize>()` or `.store::<usize>()` as the transfer type.
+
+## Bit-Slice Storage and Integer Value Relationships
+
+`BitField` permits any type of integer, *including signed integers*, to be
+stored into or loaded out of a `BitSlice<T, _>` with any storage type `T`. While
+the examples in this module will largely use `u8`, just to keep the text
+concise, `BitField` is tested, and will work correctly, for any combination of
+types.
+
+`BitField` implementations use the processor’s own concept of integer registers
+to operate. As such, the byte-wise memory access patters for types wider than
+`u8` depends on your processor’s byte endianness, as well as which `BitField`
+method, and which [`BitOrder`] type parameter, you are using.
+
+`BitField` only operates within processor registers; traffic of `T` elements
+between the memory bank and the processor register is controlled entirely by the
+processor.
+
+If you do not want to introduce the processor’s byte endianness as a variable
+that affects the in-memory representation of stored integers, use
+`BitSlice<u8, _>` as the bit-field storage type. In particular,
+`BitSlice<u8, Msb0>` will fill memory in a way that intuitively matches what
+most debuggers show when inspecting memory.
+
+On the other hand, if you do not care about memory representation and just need
+fast storage of less than an entire integer, `BitSlice<Lsb0, usize>` is likely
+your best bet. As always, the choice of type parameters is a trade-off with
+different advantages for each combination, which is why `bitvec` refuses to make
+the choice for you.
+
+### Signed Behavior
+
+The length of the `BitSlice` that stores a value is considered to be the width
+of that value when it is loaded back out. As such, storing an `i16` into a
+bit-slice of length `12` means that the stored value has type `i12`.
+
+When calling `.load::<i16>()` on a 12-bit slice, the load will detect the sign
+bit of the `i12` value and sign-extend it to `i16`. This means that storing
+`2048i16` into a 12-bit slice and then loading it back out into an `i16` will
+produce `-2048i16` (negative), not `2048i16` (positive), because `1 << 11` is
+the sign bit.
+
+`BitField` **does not** record the true sign bit of an integer being stored, and
+will not attempt to set the sign bit of the narrowed value in storage. Storing
+`-127i8` (`0b1000_0001`) into a 7-bit slice will load `1i8`.
+
+## Register Bit Order Preservation
+
+The implementations in this module assume that the bits within a *value* being
+transferred into or out of a bit-slice should not be re-ordered. While the
+implementations will segment a value in order to make it fit into bit-slice
+storage, and will order those *segments* in memory according to their type
+parameter and specific trait method called, each segment will remain
+individually unmodified.
+
+If we consider the value `0b100_1011`, segmented at the underscore, then the
+segments `0b100` and `0b1011` will be present somewhere in the bit-slice that
+stores them. They may be shifted within an element or re-ordered across
+elements, but each segment will not be changed.
+
+## Endianness
+
+`bitvec` uses the `BitOrder` trait to describe the order of bits within a single
+memory element. This ordering is independent of, and does not consider, the
+ordering of memory elements in a sequence; `bitvec` is always “little-endian” in
+this regard: lower indices are in lower memory addresses, higher indices are in
+higher memory addresses.
+
+However, `BitField` is *explicitly* aware of multiple storage elements in
+sequence. It is by design able to allow combinations such as
+`<BitSlice<u8, Lsb0> as BitField>::store_be::<u32>`. Even where the storage and
+value types are the same, or the value is narrower, the bit-slice may be spread
+across multiple elements and must segment the value across them.
+
+The `_be` and `_le` orderings on `BitField` method names refer to the numeric
+significance of *bit-slice storage elements*.
+
+In `_be` methods, lower-address storage elements will hold more-significant
+segments of the value, and higher-address storage will hold less-significant.
+
+In `_le` methods, lower-address storage elements will hold *less*-significant
+segments of the value, and higher-address storage will hold *more*-significant.
+
+Consider again the value `0b100_1011`, segmented at the underscore. When used
+with `.store_be()`, it will be placed into memory as `[0b…100…, 0b…1011…]`; when
+used with `.store_le()`, it will be placed into memory as `[0b…1011…, 0b…100…]`.
+
+## Bit-Ordering Behaviors
+
+The `_be` and `_le` suffices select the ordering of storage elements in memory.
+The other critical aspect of the `BitField` memory behavior is selecting
+*which bits* in a storage element are used when a bit-slice has partial
+elements.
+
+When `BitSlice<_, Lsb0>` produces a [`Domain::Region`], its `head` is in the
+most-significant bits of its element and its `tail` is in the least-significant
+bits. When `BitSlice<_, Msb0>` produces a `Region`, its `head` is in the
+*least*-significant bits, and its `tail` is in the *most*-significant bits.
+
+You can therefore use these combinations of `BitOrder` type parameter and
+`BitField` method suffix to select exactly the memory behavior you want for a
+storage region.
+
+Each implementation of `BitField` has documentation showing exactly what its
+memory layout looks like, with code examples and visual inspections of memory.
+This documentation is likely collapsed by default when viewing the trait docs;
+be sure to use the `[+]` button to expand it!
+
+[`BitField`]: self::BitField
+[`BitOrder`]: crate::order::BitOrder
+[`BitSlice`]: crate::slice::BitSlice
+[`Domain::Region`]: crate::domain::Domain::Region
diff --git a/doc/field/BitField.md b/doc/field/BitField.md
new file mode 100644
index 0000000..f77ac12
--- /dev/null
+++ b/doc/field/BitField.md
@@ -0,0 +1,96 @@
+# C-Style Bit-Field Access
+
+This trait describes data transfer between a [`BitSlice`] region and an ordinary
+integer. It is not intended for use by any other types than the data structures
+in this crate.
+
+The methods in this trait always operate on the `bitslice.len()` least
+significant bits of an integer, and ignore any remaining high bits. When
+loading, any excess high bits not copied out of a bit-slice are cleared to zero.
+
+## Usage
+
+The trait methods all panic if called on a bit-slice that is wider than the
+integer type being transferred. As such, the first step is generally to subslice
+a larger data structure into exactly the region used for storage, with
+`bits[start .. end]`. Then, call the desired method on the narrowed bit-slice.
+
+## Target-Specific Behavior
+
+If you do not care about the details of the memory layout of stored values, you
+can use the [`.load()`] and [`.store()`] unadorned methods. These each forward
+to their `_le` variant on little-endian targets, and their `_be` variant on
+big-endian. These will provide a reasonable default behavior, but do not
+guarantee a stable memory layout, and their buffers are not suitable for
+de/serialization.
+
+If you require a stable memory layout, you will need to choose a `BitSlice`
+with a fixed `O: BitOrder` type parameter (not `LocalBits`), and use a fixed
+method suffix (`_le` or `_be`). You should *probably* also use `u8` as your
+`T: BitStore` parameter, in order to avoid any byte-ordering issues. `bitvec`
+never interferes with processor concepts of wide-integer layout, and always
+relies on the target machine’s behavior for this work.
+
+## Element- and Bit- Ordering Combinations
+
+Remember: the `_le` and `_be` method suffixes are completely independent of the
+`Lsb0` and `Msb0` types! `_le` and `_be` refer to the order in which successive
+memory elements are considered to gain numerical significance, while `BitOrder`
+refers only to the order of successive bits in *one* memory element.
+
+The `BitField` and `BitOrder` traits are ***not*** related.
+
+When a load or store operation is contained in only one memory element, then the
+`_le` and `_be` methods have the same behavior: they exchange an integer value
+with the segment of the element that its governing `BitSlice` considers live.
+Only when a `BitSlice` covers multiple elements does the distinction come into
+play.
+
+The `_le` methods consider numerical significance to start low and increase with
+increasing memory address, while the `_be` methods consider numerical
+significance to start high and *decrease* with increasing memory address. This
+distinction affects the order in which memory elements are used to load or store
+segments of the exchanged integer value.
+
+Each trait method has detailed visual diagrams in its documentation.
+Additionally, each *implementation*’s documentation has diagrams that show what
+the governed bit-sections of elements are! Be sure to check each, or to run the
+demonstration with `cargo run --example bitfield`.
+
+## Bitfield Value Types
+
+When interacting with a bit-slice as a C-style bitfield, it can *only* store the
+signed or unsigned integer types. No other type is permitted, as the
+implementation relies on the 2’s-complement significance behavior of processor
+integers. Record types and floating-point numbers do not have this property, and
+thus have no sensible default protocol for truncation and un/marshalling that
+`bitvec` can use.
+
+If you have such a protocol, you may implement it yourself by providing a
+de/serialization transform between your type and the integers. For instance, a
+numerically-correct protocol to store floating-point numbers in bitfields might
+look like this:
+
+```rust
+use bitvec::mem::bits_of;
+use funty::Floating;
+
+fn to_storage<F>(num: F, width: usize) -> F::Raw
+where F: Floating {
+ num.to_bits() >> (bits_of::<F>() - width)
+}
+
+fn from_storage<F>(val: F::Raw, width: usize) -> F
+where F: Floating {
+ F::from_bits(val << (bits_of::<F>() - width))
+}
+```
+
+This implements truncation in the least-significant bits, where floating-point
+numbers store disposable bits in the mantissa, rather than in the
+most-significant bits which contain the sign, exponent, and most significant
+portion of the mantissa.
+
+[`BitSlice`]: crate::slice::BitSlice
+[`.load()`]: Self::load
+[`.store()`]: Self::store
diff --git a/doc/field/BitField_Lsb0.md b/doc/field/BitField_Lsb0.md
new file mode 100644
index 0000000..feaf019
--- /dev/null
+++ b/doc/field/BitField_Lsb0.md
@@ -0,0 +1,15 @@
+# `Lsb0` Bit-Field Behavior
+
+`BitField` has no requirements about the in-memory representation or layout of
+stored integers within a bit-slice, only that round-tripping an integer through
+a store and a load of the same element suffix on the same bit-slice is
+idempotent (with respect to sign truncation).
+
+`Lsb0` provides a contiguous translation from bit-index to real memory: for any
+given bit index `n` and its position `P(n)`, `P(n + 1)` is `P(n) + 1`. This
+allows it to provide batched behavior: since the section of contiguous indices
+used within an element translates to a section of contiguous bits in real
+memory, the transaction is always a single shift/mask operation.
+
+Each implemented method contains documentation and examples showing exactly how
+the abstract integer space is mapped to real memory.
diff --git a/doc/field/BitField_Lsb0_load_be.md b/doc/field/BitField_Lsb0_load_be.md
new file mode 100644
index 0000000..c378e69
--- /dev/null
+++ b/doc/field/BitField_Lsb0_load_be.md
@@ -0,0 +1,77 @@
+# `Lsb0` Big-Endian Integer Loading
+
+This implementation uses the `Lsb0` bit-ordering to determine *which* bits in a
+partially-occupied memory element contain the contents of an integer to be
+loaded, using big-endian element ordering.
+
+See the [trait method definition][orig] for an overview of what element ordering
+means.
+
+## Signed-Integer Loading
+
+As described in the trait definition, when loading as a signed integer, the most
+significant bit *loaded* from memory is sign-extended to the full width of the
+returned type. In this method, that means that the most-significant bit of the
+first element.
+
+## Examples
+
+In each memory element, the `Lsb0` ordering counts indices leftward from the
+right edge:
+
+```rust
+use bitvec::prelude::*;
+
+let raw = 0b00_10110_0u8;
+// 76 54321 0
+// ^ sign bit
+assert_eq!(
+ raw.view_bits::<Lsb0>()
+ [1 .. 6]
+ .load_be::<u8>(),
+ 0b000_10110,
+);
+assert_eq!(
+ raw.view_bits::<Lsb0>()
+ [1 .. 6]
+ .load_be::<i8>(),
+ 0b111_10110u8 as i8,
+);
+```
+
+In bit-slices that span multiple elements, the big-endian element ordering means
+that the slice index increases while numeric significance decreases:
+
+```rust
+use bitvec::prelude::*;
+
+let raw = [
+ 0b0010_1111u8,
+// ^ sign bit
+// 7 0
+ 0x0_1u8,
+// 15 8
+ 0xF_8u8,
+// 23 16
+];
+assert_eq!(
+ raw.view_bits::<Lsb0>()
+ [4 .. 20]
+ .load_be::<u16>(),
+ 0x2018u16,
+);
+```
+
+Note that while these examples use `u8` storage for convenience in displaying
+the literals, `BitField` operates identically with *any* storage type. As most
+machines use little-endian *byte ordering* within wider element types, and
+`bitvec` exclusively operates on *elements*, the actual bytes of memory may
+rapidly start to behave oddly when translating between numeric literals and
+in-memory representation.
+
+The [user guide] has a chapter that translates bit indices into memory positions
+for each combination of `<T: BitStore, O: BitOrder>`, and may be of additional
+use when choosing a combination of type parameters and load functions.
+
+[orig]: crate::field::BitField::load_le
+[user guide]: https://bitvecto-rs.github.io/bitvec/memory-layout
diff --git a/doc/field/BitField_Lsb0_load_le.md b/doc/field/BitField_Lsb0_load_le.md
new file mode 100644
index 0000000..77e4902
--- /dev/null
+++ b/doc/field/BitField_Lsb0_load_le.md
@@ -0,0 +1,77 @@
+# `Lsb0` Little-Endian Integer Loading
+
+This implementation uses the `Lsb0` bit-ordering to determine *which* bits in a
+partially-occupied memory element contain the contents of an integer to be
+loaded, using little-endian element ordering.
+
+See the [trait method definition][orig] for an overview of what element ordering
+means.
+
+## Signed-Integer Loading
+
+As described in the trait definition, when loading as a signed integer, the most
+significant bit *loaded* from memory is sign-extended to the full width of the
+returned type. In this method, that means the most-significant loaded bit of the
+final element.
+
+## Examples
+
+In each memory element, the `Lsb0` ordering counts indices leftward from the
+right edge:
+
+```rust
+use bitvec::prelude::*;
+
+let raw = 0b00_10110_0u8;
+// 76 54321 0
+// ^ sign bit
+assert_eq!(
+ raw.view_bits::<Lsb0>()
+ [1 .. 6]
+ .load_le::<u8>(),
+ 0b000_10110,
+);
+assert_eq!(
+ raw.view_bits::<Lsb0>()
+ [1 .. 6]
+ .load_le::<i8>(),
+ 0b111_10110u8 as i8,
+);
+```
+
+In bit-slices that span multiple elements, the little-endian element ordering
+means that the slice index increases with numerical significance:
+
+```rust
+use bitvec::prelude::*;
+
+let raw = [
+ 0x8_Fu8,
+// 7 0
+ 0x0_1u8,
+// 15 8
+ 0b1111_0010u8,
+// ^ sign bit
+// 23 16
+];
+assert_eq!(
+ raw.view_bits::<Lsb0>()
+ [4 .. 20]
+ .load_le::<u16>(),
+ 0x2018u16,
+);
+```
+
+Note that while these examples use `u8` storage for convenience in displaying
+the literals, `BitField` operates identically with *any* storage type. As most
+machines use little-endian *byte ordering* within wider element types, and
+`bitvec` exclusively operates on *elements*, the actual bytes of memory may
+rapidly start to behave oddly when translating between numeric literals and
+in-memory representation.
+
+The [user guide] has a chapter that translates bit indices into memory positions
+for each combination of `<T: BitStore, O: BitOrder>`, and may be of additional
+use when choosing a combination of type parameters and load functions.
+
+[orig]: crate::field::BitField::load_le
+[user guide]: https://bitvecto-rs.github.io/bitvec/memory-layout
diff --git a/doc/field/BitField_Lsb0_store_be.md b/doc/field/BitField_Lsb0_store_be.md
new file mode 100644
index 0000000..64582e7
--- /dev/null
+++ b/doc/field/BitField_Lsb0_store_be.md
@@ -0,0 +1,69 @@
+# `Lsb0` Big-Endian Integer Storing
+
+This implementation uses the `Lsb0` bit-ordering to determine *which* bits in a
+partially-occupied memory element are used for storage, using big-endian element
+ordering.
+
+See the [trait method definition][orig] for an overview of what element ordering
+means.
+
+## Narrowing Behavior
+
+Integers are truncated from the high end. When storing into a bit-slice of
+length `n`, the `n` least numerically significant bits are stored, and any
+remaining high bits are ignored.
+
+Be aware of this behavior if you are storing signed integers! The signed integer
+`-14i8` (bit pattern `0b1111_0010u8`) will, when stored into and loaded back
+from a 4-bit slice, become the value `2i8`.
+
+## Examples
+
+```rust
+use bitvec::prelude::*;
+
+let mut raw = 0u8;
+raw.view_bits_mut::<Lsb0>()
+ [1 .. 6]
+ .store_be(22u8);
+assert_eq!(raw, 0b00_10110_0);
+// 76 54321 0
+raw.view_bits_mut::<Lsb0>()
+ [1 .. 6]
+ .store_be(-10i8);
+assert_eq!(raw, 0b00_10110_0);
+```
+
+In bit-slices that span multiple elements, the big-endian element ordering means
+that the slice index increases while numerical significance decreases:
+
+```rust
+use bitvec::prelude::*;
+
+let mut raw = [!0u8; 3];
+raw.view_bits_mut::<Lsb0>()
+ [4 .. 20]
+ .store_be(0x2018u16);
+assert_eq!(raw, [
+ 0x2_F,
+// 7 0
+ 0x0_1,
+// 15 8
+ 0xF_8,
+// 23 16
+]);
+```
+
+Note that while these examples use `u8` storage for convenience in displaying
+the literals, `BitField` operates identically with *any* storage type. As most
+machines use little-endian *byte ordering* within wider element types, and
+`bitvec` exclusively operates on *elements*, the actual bytes of memory may
+rapidly start to behave oddly when translating between numeric literals and
+in-memory representation.
+
+The [user guide] has a chapter that translates bit indices into memory positions
+for each combination of `<T: BitStore, O: BitOrder>`, and may be of additional
+use when choosing a combination of type parameters and store functions.
+
+[orig]: crate::field::BitField::store_be
+[user guide]: https://bitvecto-rs.github.io/bitvec/memory-layout
diff --git a/doc/field/BitField_Lsb0_store_le.md b/doc/field/BitField_Lsb0_store_le.md
new file mode 100644
index 0000000..eb3dbed
--- /dev/null
+++ b/doc/field/BitField_Lsb0_store_le.md
@@ -0,0 +1,69 @@
+# `Lsb0` Little-Endian Integer Storing
+
+This implementation uses the `Lsb0` bit-ordering to determine *which* bits in a
+partially-occupied memory element are used for storage, using little-endian
+element ordering.
+
+See the [trait method definition][orig] for an overview of what element ordering
+means.
+
+## Narrowing Behavior
+
+Integers are truncated from the high end. When storing into a bit-slice of
+length `n`, the `n` least numerically significant bits are stored, and any
+remaining high bits are ignored.
+
+Be aware of this behavior if you are storing signed integers! The signed integer
+`-14i8` (bit pattern `0b1111_0010u8`) will, when stored into and loaded back
+from a 4-bit slice, become the value `2i8`.
+
+## Examples
+
+```rust
+use bitvec::prelude::*;
+
+let mut raw = 0u8;
+raw.view_bits_mut::<Lsb0>()
+ [1 .. 6]
+ .store_le(22u8);
+assert_eq!(raw, 0b00_10110_0);
+// 76 54321 0
+raw.view_bits_mut::<Lsb0>()
+ [1 .. 6]
+ .store_le(-10i8);
+assert_eq!(raw, 0b00_10110_0);
+```
+
+In bit-slices that span multiple elements, the little-endian element ordering
+means that the slice index increases with numerical significance:
+
+```rust
+use bitvec::prelude::*;
+
+let mut raw = [!0u8; 3];
+raw.view_bits_mut::<Lsb0>()
+ [4 .. 20]
+ .store_le(0x2018u16);
+assert_eq!(raw, [
+ 0x8_F,
+// 7 0
+ 0x0_1,
+// 15 8
+ 0xF_2,
+// 23 16
+]);
+```
+
+Note that while these examples use `u8` storage for convenience in displaying
+the literals, `BitField` operates identically with *any* storage type. As most
+machines use little-endian *byte ordering* within wider element types, and
+`bitvec` exclusively operates on *elements*, the actual bytes of memory may
+rapidly start to behave oddly when translating between numeric literals and
+in-memory representation.
+
+The [user guide] has a chapter that translates bit indices into memory positions
+for each combination of `<T: BitStore, O: BitOrder>`, and may be of additional
+use when choosing a combination of type parameters and store functions.
+
+[orig]: crate::field::BitField::store_le
+[user guide]: https://bitvecto-rs.github.io/bitvec/memory-layout
diff --git a/doc/field/BitField_Msb0.md b/doc/field/BitField_Msb0.md
new file mode 100644
index 0000000..4758ae4
--- /dev/null
+++ b/doc/field/BitField_Msb0.md
@@ -0,0 +1,23 @@
+# `Msb0` Bit-Field Behavior
+
+`BitField` has no requirements about the in-memory representation or layout of
+stored integers within a bit-slice, only that round-tripping an integer through
+a store and a load of the same element suffix on the same bit-slice is
+idempotent (with respect to sign truncation).
+
+`Msb0` provides a contiguous translation from bit-index to real memory: for any
+given bit index `n` and its position `P(n)`, `P(n + 1)` is `P(n) - 1`. This
+allows it to provide batched behavior: since the section of contiguous indices
+used within an element translates to a section of contiguous bits in real
+memory, the transaction is always a single shift-mask operation.
+
+Each implemented method contains documentation and examples showing exactly how
+the abstract integer space is mapped to real memory.
+
+## Notes
+
+In particular, note that while `Msb0` indexes bits from the most significant
+down to the least, and integers index from the least up to the most, this
+**does not** reörder any bits of the integer value! This ordering only finds a
+region in real memory; it does *not* affect the partial-integer contents stored
+in that region.
diff --git a/doc/field/BitField_Msb0_load_be.md b/doc/field/BitField_Msb0_load_be.md
new file mode 100644
index 0000000..eef136c
--- /dev/null
+++ b/doc/field/BitField_Msb0_load_be.md
@@ -0,0 +1,77 @@
+# `Msb0` Big-Endian Integer Loading
+
+This implementation uses the `Msb0` bit-ordering to determine *which* bits in a
+partially-occupied memory element contain the contents of an integer to be
+loaded, using big-endian element ordering.
+
+See the [trait method definition][orig] for an overview of what element ordering
+means.
+
+## Signed-Integer Loading
+
+As described in the trait definition, when loading as a signed integer, the most
+significant bit *loaded* from memory is sign-extended to the full width of the
+returned type. In this method, that means the most-significant loaded bit of the
+first element.
+
+## Examples
+
+In each memory element, the `Msb0` ordering counts indices rightward from the
+left edge:
+
+```rust
+use bitvec::prelude::*;
+
+let raw = 0b00_10110_0u8;
+// 01 23456 7
+// ^ sign bit
+assert_eq!(
+ raw.view_bits::<Msb0>()
+ [2 .. 7]
+ .load_be::<u8>(),
+ 0b000_10110,
+);
+assert_eq!(
+ raw.view_bits::<Msb0>()
+ [2 .. 7]
+ .load_be::<i8>(),
+ 0b111_10110u8 as i8,
+);
+```
+
+In bit-slices that span multiple elements, the big-endian element ordering means
+that the slice index increases with numerical significance:
+
+```rust
+use bitvec::prelude::*;
+
+let raw = [
+ 0b1111_0010u8,
+// ^ sign bit
+// 0 7
+ 0x0_1u8,
+// 8 15
+ 0x8_Fu8,
+// 16 23
+];
+assert_eq!(
+ raw.view_bits::<Msb0>()
+ [4 .. 20]
+ .load_be::<u16>(),
+ 0x2018u16,
+);
+```
+
+Note that while these examples use `u8` storage for convenience in displaying
+the literals, `BitField` operates identically with *any* storage type. As most
+machines use little-endian *byte ordering* within wider element types, and
+`bitvec` exclusively operates on *elements*, the actual bytes of memory may
+rapidly start to behave oddly when translating between numeric literals and
+in-memory representation.
+
+The [user guide] has a chapter that translates bit indices into memory positions
+for each combination of `<T: BitStore, O: BitOrder>`, and may be of additional
+use when choosing a combination of type parameters and load functions.
+
+[orig]: crate::field::BitField::load_le
+[user guide]: https://bitvecto-rs.github.io/bitvec/memory-layout
diff --git a/doc/field/BitField_Msb0_load_le.md b/doc/field/BitField_Msb0_load_le.md
new file mode 100644
index 0000000..7a88759
--- /dev/null
+++ b/doc/field/BitField_Msb0_load_le.md
@@ -0,0 +1,77 @@
+# `Msb0` Little-Endian Integer Loading
+
+This implementation uses the `Msb0` bit-ordering to determine *which* bits in a
+partially-occupied memory element contain the contents of an integer to be
+loaded, using little-endian element ordering.
+
+See the [trait method definition][orig] for an overview of what element ordering
+means.
+
+## Signed-Integer Loading
+
+As described in the trait definition, when loading as a signed integer, the most
+significant bit *loaded* from memory is sign-extended to the full width of the
+returned type. In this method, that means the most-significant loaded bit of the
+final element.
+
+## Examples
+
+In each memory element, the `Msb0` ordering counts indices rightward from the
+left edge:
+
+```rust
+use bitvec::prelude::*;
+
+let raw = 0b00_10110_0u8;
+// 01 23456 7
+// ^ sign bit
+assert_eq!(
+ raw.view_bits::<Msb0>()
+ [2 .. 7]
+ .load_le::<u8>(),
+ 0b000_10110,
+);
+assert_eq!(
+ raw.view_bits::<Msb0>()
+ [2 .. 7]
+ .load_le::<i8>(),
+ 0b111_10110u8 as i8,
+);
+```
+
+In bit-slices that span multiple elements, the little-endian element ordering
+means that the slice index increases with numerical significance:
+
+```rust
+use bitvec::prelude::*;
+
+let raw = [
+ 0xF_8u8,
+// 0 7
+ 0x0_1u8,
+// 8 15
+ 0b0010_1111u8,
+// ^ sign bit
+// 16 23
+];
+assert_eq!(
+ raw.view_bits::<Msb0>()
+ [4 .. 20]
+ .load_le::<u16>(),
+ 0x2018u16,
+);
+```
+
+Note that while these examples use `u8` storage for convenience in displaying
+the literals, `BitField` operates identically with *any* storage type. As most
+machines use little-endian *byte ordering* within wider element types, and
+`bitvec` exclusively operates on *elements*, the actual bytes of memory may
+rapidly start to behave oddly when translating between numeric literals and
+in-memory representation.
+
+The [user guide] has a chapter that translates bit indices into memory positions
+for each combination of `<T: BitStore, O: BitOrder>`, and may be of additional
+use when choosing a combination of type parameters and load functions.
+
+[orig]: crate::field::BitField::load_le
+[user guide]: https://bitvecto-rs.github.io/bitvec/memory-layout
diff --git a/doc/field/BitField_Msb0_store_be.md b/doc/field/BitField_Msb0_store_be.md
new file mode 100644
index 0000000..7af81dc
--- /dev/null
+++ b/doc/field/BitField_Msb0_store_be.md
@@ -0,0 +1,69 @@
+# `Msb0` Big-Endian Integer Storing
+
+This implementation uses the `Msb0` bit-ordering to determine *which* bits in a
+partially-occupied memory element are used for storage, using big-endian element
+ordering.
+
+See the [trait method definition][orig] for an overview of what element ordering
+means.
+
+## Narrowing Behavior
+
+Integers are truncated from the high end. When storing into a bit-slice of
+length `n`, the `n` least numerically significant bits are stored, and any
+remaining high bits are ignored.
+
+Be aware of this behavior if you are storing signed integers! The signed integer
+`-14i8` (bit pattern `0b1111_0010u8`) will, when stored into and loaded back
+from a 4-bit slice, become the value `2i8`.
+
+## Examples
+
+```rust
+use bitvec::prelude::*;
+
+let mut raw = 0u8;
+raw.view_bits_mut::<Msb0>()
+ [2 .. 7]
+ .store_be(22u8);
+assert_eq!(raw, 0b00_10110_0);
+// 01 23456 7
+raw.view_bits_mut::<Msb0>()
+ [2 .. 7]
+ .store_be(-10i8);
+assert_eq!(raw, 0b00_10110_0);
+```
+
+In bit-slices that span multiple elements, the big-endian element ordering means
+that the slice index increases while numerical significance decreases:
+
+```rust
+use bitvec::prelude::*;
+
+let mut raw = [!0u8; 3];
+raw.view_bits_mut::<Msb0>()
+ [4 .. 20]
+ .store_be(0x2018u16);
+assert_eq!(raw, [
+ 0xF_2,
+// 0 7
+ 0x0_1,
+// 8 15
+ 0x8_F,
+// 16 23
+]);
+```
+
+Note that while these examples use `u8` storage for convenience in displaying
+the literals, `BitField` operates identically with *any* storage type. As most
+machines use little-endian *byte ordering* within wider element types, and
+`bitvec` exclusively operates on *elements*, the actual bytes of memory may
+rapidly start to behave oddly when translating between numeric literals and
+in-memory representation.
+
+The [user guide] has a chapter that translates bit indices into memory positions
+for each combination of `<T: BitStore, O: BitOrder>`, and may be of additional
+use when choosing a combination of type parameters and store functions.
+
+[orig]: crate::field::BitField::store_be
+[user guide]: https://bitvecto-rs.github.io/bitvec/memory-layout
diff --git a/doc/field/BitField_Msb0_store_le.md b/doc/field/BitField_Msb0_store_le.md
new file mode 100644
index 0000000..fb27993
--- /dev/null
+++ b/doc/field/BitField_Msb0_store_le.md
@@ -0,0 +1,69 @@
+# `Msb0` Little-Endian Integer Storing
+
+This implementation uses the `Msb0` bit-ordering to determine *which* bits in a
+partially-occupied memory element are used for storage, using little-endian
+element ordering.
+
+See the [trait method definition][orig] for an overview of what element ordering
+means.
+
+## Narrowing Behavior
+
+Integers are truncated from the high end. When storing into a bit-slice of
+length `n`, the `n` least numerically significant bits are stored, and any
+remaining high bits are ignored.
+
+Be aware of this behavior if you are storing signed integers! The signed integer
+`-14i8` (bit pattern `0b1111_0010u8`) will, when stored into and loaded back
+from a 4-bit slice, become the value `2i8`.
+
+## Examples
+
+```rust
+use bitvec::prelude::*;
+
+let mut raw = 0u8;
+raw.view_bits_mut::<Msb0>()
+ [2 .. 7]
+ .store_le(22u8);
+assert_eq!(raw, 0b00_10110_0);
+// 01 23456 7
+raw.view_bits_mut::<Msb0>()
+ [2 .. 7]
+ .store_le(-10i8);
+assert_eq!(raw, 0b00_10110_0);
+```
+
+In bit-slices that span multiple elements, the little-endian element ordering
+means that the slice index increases with numerical significance:
+
+```rust
+use bitvec::prelude::*;
+
+let mut raw = [!0u8; 3];
+raw.view_bits_mut::<Msb0>()
+ [4 .. 20]
+ .store_le(0x2018u16);
+assert_eq!(raw, [
+ 0xF_8,
+// 0 7
+ 0x0_1,
+// 8 15
+ 0x2_F,
+// 16 23
+]);
+```
+
+Note that while these examples use `u8` storage for convenience in displaying
+the literals, `BitField` operates identically with *any* storage type. As most
+machines use little-endian *byte ordering* within wider element types, and
+`bitvec` exclusively operates on *elements*, the actual bytes of memory may
+rapidly start to behave oddly when translating between numeric literals and
+in-memory representation.
+
+The [user guide] has a chapter that translates bit indices into memory positions
+for each combination of `<T: BitStore, O: BitOrder>`, and may be of additional
+use when choosing a combination of type parameters and store functions.
+
+[orig]: crate::field::BitField::store_le
+[user guide]: https://bitvecto-rs.github.io/bitvec/memory-layout
diff --git a/doc/field/BitField_load.md b/doc/field/BitField_load.md
new file mode 100644
index 0000000..fb92086
--- /dev/null
+++ b/doc/field/BitField_load.md
@@ -0,0 +1,58 @@
+# Integer Loading
+
+This method reads the contents of a bit-slice region as an integer. The region
+may be shorter than the destination integer type, in which case the loaded value
+will be zero-extended (when `I: Unsigned`) or sign-extended from the most
+significant loaded bit (when `I: Signed`).
+
+The region may not be zero bits, nor wider than the destination type. Attempting
+to load a `u32` from a bit-slice of length 33 will panic the program.
+
+## Operation and Endianness Handling
+
+Each element in the bit-slice contains a segment of the value to be loaded. If
+the bit-slice contains more than one element, then the numerical significance of
+each loaded segment is interpreted according to the target’s endianness:
+
+- little-endian targets consider each *`T` element* to have increasing numerical
+ significance, starting with the least-significant segment at the low address
+ and ending with the most-significant segment at the high address.
+- big-endian targets consider each *`T` element* to have decreasing numerical
+ significance, starting with the most-significant segment at the high address
+ and ending with the least-significant segment at the low address.
+
+See the documentation for [`.load_le()`] and [`.load_be()`] for more detail on
+what this means for how the in-memory representation of bit-slices translates to
+loaded values.
+
+You must always use the loading method that exactly corresponds to the storing
+method previously used to insert data into the bit-slice: same suffix on the
+method name (none, `_le`, `_be`) and same integer type. `bitvec` is not required
+to, and will not, guarantee round-trip consistency if you change any of these
+parameters.
+
+## Type Parameters
+
+- `I`: The integer type being loaded. This can be any of the signed or unsigned
+ integers.
+
+## Parameters
+
+- `&self`: A bit-slice region whose length is in the range `1 ..= I::BITS`.
+
+## Returns
+
+The contents of the bit-slice, interpreted as an integer.
+
+## Panics
+
+This panics if `self.len()` is 0, or greater than `I::BITS`.
+
+## Examples
+
+This method is inherently non-portable, and changes behavior depending on the
+target characteristics. If your target is little-endian, see [`.load_le()`]; if
+your target is big-endian, see [`.load_be()`].
+
+[`.load_be()`]: Self::load_be
+[`.load_le()`]: Self::load_le
diff --git a/doc/field/BitField_load_be.md b/doc/field/BitField_load_be.md
new file mode 100644
index 0000000..4174b2f
--- /dev/null
+++ b/doc/field/BitField_load_be.md
@@ -0,0 +1,145 @@
+# Big-Endian Integer Loading
+
+This method loads an integer value from a bit-slice, using big-endian
+significance ordering when the bit-slice spans more than one `T` element in
+memory.
+
+Big-endian significance ordering means that if a bit-slice occupies an array
+`[A, B, C]`, then the bits stored in `A` are considered to be the most
+significant segment of the loaded integer, then `B` contains the middle segment,
+then `C` contains the least significant segment.
+
+The segments are combined in order, that is, as the raw bit-pattern
+`0b<padding><A><B><C>`. If the destination type is signed, the loaded value is
+sign-extended according to the most-significant bit in the `A` segment.
+
+It is important to note that the `O: BitOrder` parameter of the bit-slice from
+which the value is loaded **does not** affect the bit-pattern of the stored
+segments. They are always stored exactly as they exist in an ordinary integer.
+The ordering parameter only affects *which* bits in an element are available for
+storage.
+
+## Type Parameters
+
+- `I`: The integer type being loaded. This can be any of the signed or unsigned
+ integers.
+
+## Parameters
+
+- `&self`: A bit-slice region whose length is in the range `1 ..= I::BITS`.
+
+## Returns
+
+The contents of the bit-slice, interpreted as an integer.
+
+## Panics
+
+This panics if `self.len()` is 0, or greater than `I::BITS`.
+
+## Examples
+
+Let us consider an `i32` value stored in 24 bits of a `BitSlice<u8, Lsb0>`:
+
+```rust
+use bitvec::prelude::*;
+
+let mut raw = [0u8; 4];
+let bits = raw.view_bits_mut::<Lsb0>();
+
+let integer = 0x00__B4_96_3Cu32 as i32;
+bits[4 .. 28].store_be::<i32>(integer);
+let loaded = bits[4 .. 28].load_be::<i32>();
+assert_eq!(loaded, 0xFF__B4_96_3Cu32 as i32);
+```
+
+Observe that, because the lowest 24 bits began with the pattern `0b1101…`, the
+value was considered to be negative when interpreted as an `i24` and was
+sign-extended through the highest byte.
+
+Let us now look at the memory representation of this value:
+
+```rust
+# use bitvec::prelude::*;
+# let mut raw = [0u8; 4];
+# let bits = raw.view_bits_mut::<Lsb0>();
+# bits[4 .. 28].store_be::<u32>(0x00B4963Cu32);
+assert_eq!(raw, [
+ 0b1011_0000,
+// 0xB dead
+ 0b0100_1001,
+// 0x4 0x9
+ 0b0110_0011,
+// 0x6 0x3
+ 0b0000_1100,
+// dead 0xC
+]);
+```
+
+Notice how while the `Lsb0` bit-ordering means that indexing within the
+bit-slice proceeds right-to-left in each element, the actual bit-patterns stored
+in memory are not affected. Element `[0]` is more numerically significant than
+element `[1]`, but bit `[4]` is not more numerically significant than bit `[5]`.
+
+In the sequence `B496`, `B` is the most significant, and so it gets placed
+lowest in memory. `49` fits in one byte, and is stored directly as written.
+Lastly, `6` is the least significant nibble of the four, and is placed highest
+in memory.
+
+Now let’s look at the way different `BitOrder` parameters interpret the
+placement of bit indices within memory:
+
+```rust
+use bitvec::prelude::*;
+
+let raw = [
+// Bit index 14 ←
+// Lsb0: ─┤
+ 0b0100_0000_0000_0011u16,
+// Msb0: ├─
+// → 14
+
+// Bit index ← 19 16
+// Lsb0: ├──┤
+ 0b0001_0000_0000_1110u16,
+// Msb0: ├──┤
+// 16 19 →
+];
+
+assert_eq!(
+ raw.view_bits::<Lsb0>()
+ [14 .. 20]
+ .load_be::<u8>(),
+ 0b00_01_1110,
+);
+assert_eq!(
+ raw.view_bits::<Msb0>()
+ [14 .. 20]
+ .load_be::<u8>(),
+ 0b00_11_0001,
+);
+```
+
+Notice how the bit-orderings change which *parts* of the memory are loaded, but
+in both cases the segment in `raw[0]` is more significant than the segment in
+`raw[1]`, and the ordering of bits *within* each segment are unaffected by the
+bit-ordering.
+
+## Notes
+
+Be sure to see the documentation for
+[`<BitSlice<_, Lsb0> as BitField>::load_be`][llb] and
+[`<BitSlice<_, Msb0> as Bitfield>::load_be`][mlb] for more detailed information
+on the memory views!
+
+You can view the mask of all *storage regions* of a bit-slice by using its
+[`.domain()`] method to view the breakdown of its memory region, then print the
+[`.mask()`] of any [`PartialElement`] the domain contains. Whole elements are
+always used in their entirety. You should use the `domain` module’s types
+whenever you are uncertain of the exact locations in memory that a particular
+bit-slice governs.
+
+[llb]: https://docs.rs/bitvec/latest/bitvec/field/trait.BitField.html#method.load_be-3
+[mlb]: https://docs.rs/bitvec/latest/bitvec/field/trait.BitField.html#method.load_be-4
+[`PartialElement`]: crate::domain::PartialElement
+[`.domain()`]: crate::slice::BitSlice::domain
+[`.mask()`]: crate::domain::PartialElement::mask
diff --git a/doc/field/BitField_load_le.md b/doc/field/BitField_load_le.md
new file mode 100644
index 0000000..1cef06a
--- /dev/null
+++ b/doc/field/BitField_load_le.md
@@ -0,0 +1,145 @@
+# Little-Endian Integer Loading
+
+This method loads an integer value from a bit-slice, using little-endian
+significance ordering when the bit-slice spans more than one `T` element in
+memory.
+
+Little-endian significance ordering means that if a bit-slice occupies an array
+`[A, B, C]`, then the bits stored in `A` are considered to contain the least
+significant segment of the loaded integer, then `B` contains the middle segment,
+and then `C` contains the most significant segment.
+
+The segments are combined in order, that is, as the raw bit-pattern
+`0b<padding><C><B><A>`. If the destination type is signed, the loaded value is
+sign-extended according to the most-significant bit in the `C` segment.
+
+It is important to note that the `O: BitOrder` parameter of the bit-slice from
+which the value is loaded **does not** affect the bit-pattern of the stored
+segments. They are always stored exactly as they exist in an ordinary integer.
+The ordering parameter only affects *which* bits in an element are available for
+storage.
+
+## Type Parameters
+
+- `I`: The integer type being loaded. This can be any of the signed or unsigned
+ integers.
+
+## Parameters
+
+- `&self`: A bit-slice region whose length is in the range `1 ..= I::BITS`.
+
+## Returns
+
+The contents of the bit-slice, interpreted as an integer.
+
+## Panics
+
+This panics if `self.len()` is 0, or greater than `I::BITS`.
+
+## Examples
+
+Let us consider an `i32` value stored in 24 bits of a `BitSlice<u8, Msb0>`:
+
+```rust
+use bitvec::prelude::*;
+
+let mut raw = [0u8; 4];
+let bits = raw.view_bits_mut::<Msb0>();
+
+let integer = 0x00__B4_96_3Cu32 as i32;
+bits[4 .. 28].store_le::<i32>(integer);
+let loaded = bits[4 .. 28].load_le::<i32>();
+assert_eq!(loaded, 0xFF__B4_96_3Cu32 as i32);
+```
+
+Observe that, because the lowest 24 bits began with the pattern `0b1101…`, the
+value was considered to be negative when interpreted as an `i24` and was
+sign-extended through the highest byte.
+
+Let us now look at the memory representation of this value:
+
+```rust
+# use bitvec::prelude::*;
+# let mut raw = [0u8; 4];
+# let bits = raw.view_bits_mut::<Msb0>();
+# bits[4 .. 28].store_le::<u32>(0x00B4963Cu32);
+assert_eq!(raw, [
+ 0b0000_1100,
+// dead 0xC
+ 0b0110_0011,
+// 0x6 0x3
+ 0b0100_1001,
+// 0x4 0x9
+ 0b1011_0000,
+// 0xB dead
+]);
+```
+
+Notice how while the `Msb0` bit-ordering means that indexing within the
+bit-slice proceeds left-to-right in each element, and the bit-patterns in each
+element proceed left-to-right in the aggregate and the decomposed literals, the
+ordering of the elements is reversed from how the literal was written.
+
+In the sequence `B496`, `B` is the most significant, and so it gets placed
+highest in memory. `49` fits in one byte, and is stored directly as written.
+Lastly, `6` is the least significant nibble of the four, and is placed lowest
+in memory.
+
+Now let’s look at the way different `BitOrder` parameters interpret the
+placement of bit indices within memory:
+
+```rust
+use bitvec::prelude::*;
+
+let raw = [
+// Bit index 14 ←
+// Lsb0: ─┤
+ 0b0100_0000_0000_0011u16,
+// Msb0: ├─
+// → 14
+
+// Bit index ← 19 16
+// Lsb0: ├──┤
+ 0b0001_0000_0000_1110u16,
+// Msb0: ├──┤
+// 16 19 →
+];
+
+assert_eq!(
+ raw.view_bits::<Lsb0>()
+ [14 .. 20]
+ .load_le::<u8>(),
+ 0b00_1110_01,
+);
+assert_eq!(
+ raw.view_bits::<Msb0>()
+ [14 .. 20]
+ .load_le::<u8>(),
+ 0b00_0001_11,
+);
+```
+
+Notice how the bit-orderings change which *parts* of the memory are loaded, but
+in both cases the segment in `raw[0]` is less significant than the segment in
+`raw[1]`, and the ordering of bits *within* each segment are unaffected by the
+bit-ordering.
+
+## Notes
+
+Be sure to see the documentation for
+[`<BitSlice<_, Lsb0> as BitField>::load_le`][lll] and
+[`<BitSlice<_, Msb0> as Bitfield>::load_le`][mll] for more detailed information
+on the memory views!
+
+You can view the mask of all *storage regions* of a bit-slice by using its
+[`.domain()`] method to view the breakdown of its memory region, then print the
+[`.mask()`] of any [`PartialElement`] the domain contains. Whole elements are
+always used in their entirety. You should use the `domain` module’s types
+whenever you are uncertain of the exact locations in memory that a particular
+bit-slice governs.
+
+[lll]: https://docs.rs/bitvec/latest/bitvec/field/trait.BitField.html#method.load_le-3
+[mll]: https://docs.rs/bitvec/latest/bitvec/field/trait.BitField.html#method.load_le-4
+[`PartialElement`]: crate::domain::PartialElement
+[`.domain()`]: crate::slice::BitSlice::domain
+[`.mask()`]: crate::domain::PartialElement::mask
diff --git a/doc/field/BitField_store.md b/doc/field/BitField_store.md
new file mode 100644
index 0000000..cff46e7
--- /dev/null
+++ b/doc/field/BitField_store.md
@@ -0,0 +1,57 @@
+# Integer Storing
+
+This method writes an integer into the contents of a bit-slice region. The
+region may be shorter than the source integer type, in which case the stored
+value will be truncated. On load, it may be zero-extended (unsigned destination)
+or sign-extended from the most significant **stored** bit (signed destination).
+
+The region may not be zero bits, nor wider than the source type. Attempting
+to store a `u32` into a bit-slice of length 33 will panic the program.
+
+## Operation and Endianness Handling
+
+The value to be stored is broken into segments according to the elements of the
+bit-slice receiving it. If the bit-slice contains more than one element, then
+the numerical significance of each segment routes to a storage element according
+to the target’s endianness:
+
+- little-endian targets consider each *`T` element* to have increasing numerical
+ significance, starting with the least-significant segment at the low address
+ and ending with the most-significant segment at the high address.
+- big-endian targets consider each *`T` element* to have decreasing numerical
+ significance, starting with the most-significant segment at the high address
+ and ending with the least-significant segment at the low address.
+
+See the documentation for [`.store_le()`] and [`.store_be()`] for more detail on
+what this means for how the in-memory representation of bit-slices translates to
+stored values.
+
+You must always use the loading method that exactly corresponds to the storing
+method previously used to insert data into the bit-slice: same suffix on the
+method name (none, `_le`, `_be`) and same integer type. `bitvec` is not required
+to, and will not, guarantee round-trip consistency if you change any of these
+parameters.
+
+## Type Parameters
+
+- `I`: The integer type being stored. This can be any of the signed or unsigned
+ integers.
+
+## Parameters
+
+- `&self`: A bit-slice region whose length is in the range `1 ..= I::BITS`.
+- `value`: An integer value whose `self.len()` least numerically significant
+ bits will be written into `self`.
+
+## Panics
+
+This panics if `self.len()` is 0, or greater than `I::BITS`.
+
+## Examples
+
+This method is inherently non-portable, and changes behavior depending on the
+target characteristics. If your target is little-endian, see [`.store_le()`]; if
+your target is big-endian, see [`.store_be()`].
+
+[`.store_be()`]: Self::store_be
+[`.store_le()`]: Self::store_le
diff --git a/doc/field/BitField_store_be.md b/doc/field/BitField_store_be.md
new file mode 100644
index 0000000..e289272
--- /dev/null
+++ b/doc/field/BitField_store_be.md
@@ -0,0 +1,143 @@
+# Big-Endian Integer Storing
+
+This method stores an integer value into a bit-slice, using big-endian
+significance ordering when the bit-slice spans more than one `T` element in
+memory.
+
+Big-endian significance ordering means that if a bit-slice occupies an array
+`[A, B, C]`, then the bits stored in `A` are considered to contain the most
+significant segment of the stored integer, then `B` contains the middle segment,
+and then `C` contains the least significant segment.
+
+An integer is broken into segments in order, that is, the raw bit-pattern is
+fractured into `0b<padding><A><B><C>`. High bits beyond the length of the
+bit-slice into which the integer is stored are truncated.
+
+It is important to note that the `O: BitOrder` parameter of the bit-slice into
+which the value is stored **does not** affect the bit-pattern of the stored
+segments. They are always stored exactly as they exist in an ordinary integer.
+The ordering parameter only affects *which* bits in an element are available for
+storage.
+
+## Type Parameters
+
+- `I`: The integer type being stored. This can be any of the signed or unsigned
+ integers.
+
+## Parameters
+
+- `&mut self`: A bit-slice region whose length is in the range `1 ..= I::BITS`.
+- `value`: An integer value whose `self.len()` least numerically significant
+ bits will be written into `self`.
+
+## Panics
+
+This panics if `self.len()` is 0, or greater than `I::BITS`.
+
+## Examples
+
+Let us consider an `i32` value stored in 24 bits of a `BitSlice<u8, Lsb0>`:
+
+```rust
+use bitvec::prelude::*;
+
+let mut raw = [0u8; 4];
+let bits = raw.view_bits_mut::<Lsb0>();
+
+let integer = 0x00__B4_96_3Cu32 as i32;
+bits[4 .. 28].store_be::<i32>(integer);
+let loaded = bits[4 .. 28].load_be::<i32>();
+assert_eq!(loaded, 0xFF__B4_96_3Cu32 as i32);
+```
+
+Observe that, because the lowest 24 bits began with the pattern `0b1101…`, the
+value was considered to be negative when interpreted as an `i24` and was
+sign-extended through the highest byte.
+
+Let us now look at the memory representation of this value:
+
+```rust
+# use bitvec::prelude::*;
+# let mut raw = [0u8; 4];
+# let bits = raw.view_bits_mut::<Lsb0>();
+# bits[4 .. 28].store_be::<u32>(0x00B4963Cu32);
+assert_eq!(raw, [
+ 0b1011_0000,
+// 0xB dead
+ 0b0100_1001,
+// 0x4 0x9
+ 0b0110_0011,
+// 0x6 0x3
+ 0b0000_1100,
+// dead 0xC
+]);
+```
+
+Notice how while the `Lsb0` bit-ordering means that indexing within the
+bit-slice proceeds right-to-left in each element, the actual bit-patterns stored
+in memory are not affected. Element `[0]` is more numerically significant than
+element `[1]`, but bit `[4]` is not more numerically significant than bit `[5]`.
+
+In the sequence `B496`, `B` is the most significant, and so it gets placed
+lowest in memory. `49` fits in one byte, and is stored directly as written.
+Lastly, `6` is the least significant nibble of the four, and is placed highest
+in memory.
+
+Now let’s look at the way different `BitOrder` parameters interpret the
+placement of bit indices within memory:
+
+```rust
+use bitvec::prelude::*;
+
+let raw = [
+// Bit index 14 ←
+// Lsb0: ─┤
+ 0b0100_0000_0000_0011u16,
+// Msb0: ├─
+// → 14
+
+// Bit index ← 19 16
+// Lsb0: ├──┤
+ 0b0001_0000_0000_1110u16,
+// Msb0: ├──┤
+// 16 19 →
+];
+
+assert_eq!(
+ raw.view_bits::<Lsb0>()
+ [14 .. 20]
+ .load_be::<u8>(),
+ 0b00_01_1110,
+);
+assert_eq!(
+ raw.view_bits::<Msb0>()
+ [14 .. 20]
+ .load_be::<u8>(),
+ 0b00_11_0001,
+);
+```
+
+Notice how the bit-orderings change which *parts* of the memory are loaded, but
+in both cases the segment in `raw[0]` is more significant than the segment in
+`raw[1]`, and the ordering of bits *within* each segment are unaffected by the
+bit-ordering.
+
+## Notes
+
+Be sure to see the documentation for
+[`<BitSlice<_, Lsb0> as BitField>::store_be`][lsb] and
+[`<BitSlice<_, Msb0> as Bitfield>::store_be`][msb] for more detailed information
+on the memory views!
+
+You can view the mask of all *storage regions* of a bit-slice by using its
+[`.domain()`] method to view the breakdown of its memory region, then print the
+[`.mask()`] of any [`PartialElement`] the domain contains. Whole elements are
+always used in their entirety. You should use the `domain` module’s types
+whenever you are uncertain of the exact locations in memory that a particular
+bit-slice governs.
+
+[lsb]: https://docs.rs/bitvec/latest/bitvec/field/trait.BitField.html#method.store_be-3
+[msb]: https://docs.rs/bitvec/latest/bitvec/field/trait.BitField.html#method.store_be-4
+[`PartialElement`]: crate::domain::PartialElement
+[`.domain()`]: crate::slice::BitSlice::domain
+[`.mask()`]: crate::domain::PartialElement::mask
diff --git a/doc/field/BitField_store_le.md b/doc/field/BitField_store_le.md
new file mode 100644
index 0000000..783c4b1
--- /dev/null
+++ b/doc/field/BitField_store_le.md
@@ -0,0 +1,143 @@
+# Little-Endian Integer Storing
+
+This method stores an integer value into a bit-slice, using little-endian
+significance ordering when the bit-slice spans more than one `T` element in
+memory.
+
+Little-endian significance ordering means that if a bit-slice occupies an array
+`[A, B, C]`, then the bits stored in `A` are considered to contain the least
+significant segment of the stored integer, then `B` contains the middle segment,
+and then `C` contains the most significant segment.
+
+An integer is broken into segments in order, that is, the raw bit-pattern is
+fractured into `0b<padding><C><B><A>`. High bits beyond the length of the
+bit-slice into which the integer is stored are truncated.
+
+It is important to note that the `O: BitOrder` parameter of the bit-slice into
+which the value is stored **does not** affect the bit-pattern of the stored
+segments. They are always stored exactly as they exist in an ordinary integer.
+The ordering parameter only affects *which* bits in an element are available for
+storage.
+
+## Type Parameters
+
+- `I`: The integer type being stored. This can be any of the signed or unsigned
+ integers.
+
+## Parameters
+
+- `&mut self`: A bit-slice region whose length is in the range `1 ..= I::BITS`.
+- `value`: An integer value whose `self.len()` least numerically significant
+ bits will be written into `self`.
+
+## Panics
+
+This panics if `self.len()` is 0, or greater than `I::BITS`.
+
+## Examples
+
+Let us consider an `i32` value stored in 24 bits of a `BitSlice<u8, Msb0>`:
+
+```rust
+use bitvec::prelude::*;
+
+let mut raw = [0u8; 4];
+let bits = raw.view_bits_mut::<Msb0>();
+
+let integer = 0x00__B4_96_3Cu32 as i32;
+bits[4 .. 28].store_le::<i32>(integer);
+let loaded = bits[4 .. 28].load_le::<i32>();
+assert_eq!(loaded, 0xFF__B4_96_3Cu32 as i32);
+```
+
+Observe that, because the lowest 24 bits began with the pattern `0b1101…`, the
+value was considered to be negative when interpreted as an `i24` and was
+sign-extended through the highest byte.
+
+Let us now look at the memory representation of this value:
+
+```rust
+# use bitvec::prelude::*;
+# let mut raw = [0u8; 4];
+# let bits = raw.view_bits_mut::<Msb0>();
+# bits[4 .. 28].store_le::<u32>(0x00B4963Cu32);
+assert_eq!(raw, [
+ 0b0000_1100,
+// dead 0xC
+ 0b0110_0011,
+// 0x6 0x3
+ 0b0100_1001,
+// 0x4 0x9
+ 0b1011_0000,
+// 0xB dead
+]);
+```
+
+Notice how while the `Msb0` bit-ordering means that indexing within the
+bit-slice proceeds left-to-right in each element, and the bit-patterns in each
+element proceed left-to-right in the aggregate and the decomposed literals, the
+ordering of the elements is reversed from how the literal was written.
+
+In the sequence `B496`, `B` is the most significant, and so it gets placed
+highest in memory. `49` fits in one byte, and is stored directly as written.
+Lastly, `6` is the least significant nibble of the four, and is placed lowest
+in memory.
+
+Now let’s look at the way different `BitOrder` parameters interpret the
+placement of bit indices within memory:
+
+```rust
+use bitvec::prelude::*;
+
+let raw = [
+// Bit index 14 ←
+// Lsb0: ─┤
+ 0b0100_0000_0000_0011u16,
+// Msb0: ├─
+// → 14
+
+// Bit index ← 19 16
+// Lsb0: ├──┤
+ 0b0001_0000_0000_1110u16,
+// Msb0: ├──┤
+// 16 19 →
+];
+
+assert_eq!(
+ raw.view_bits::<Lsb0>()
+ [14 .. 20]
+ .load_le::<u8>(),
+ 0b00_1110_01,
+);
+assert_eq!(
+ raw.view_bits::<Msb0>()
+ [14 .. 20]
+ .load_le::<u8>(),
+ 0b00_0001_11,
+);
+```
+
+Notice how the bit-orderings change which *parts* of the memory are loaded, but
+in both cases the segment in `raw[0]` is less significant than the segment in
+`raw[1]`, and the ordering of bits *within* each segment are unaffected by the
+bit-ordering.
+
+## Notes
+
+Be sure to see the documentation for
+[`<BitSlice<_, Lsb0> as BitField>::store_le`][lsl] and
+[`<BitSlice<_, Msb0> as Bitfield>::store_le`][msl] for more detailed information
+on the memory views!
+
+You can view the mask of all *storage regions* of a bit-slice by using its
+[`.domain()`] method to view the breakdown of its memory region, then print the
+[`.mask()`] of any [`PartialElement`] the domain contains. Whole elements are
+always used in their entirety. You should use the `domain` module’s types
+whenever you are uncertain of the exact locations in memory that a particular
+bit-slice governs.
+
+[lsl]: https://docs.rs/bitvec/latest/bitvec/field/trait.BitField.html#method.store_le-3
+[msl]: https://docs.rs/bitvec/latest/bitvec/field/trait.BitField.html#method.store_le-4
+[`PartialElement`]: crate::domain::PartialElement
+[`.domain()`]: crate::slice::BitSlice::domain
+[`.mask()`]: crate::domain::PartialElement::mask
diff --git a/doc/field/get.md b/doc/field/get.md
new file mode 100644
index 0000000..7d75eb5
--- /dev/null
+++ b/doc/field/get.md
@@ -0,0 +1,26 @@
+# Partial-Element Getter
+
+This function extracts a portion of an integer value from a [`PartialElement`].
+The `BitField` implementations call it as they assemble a complete integer. It
+performs the following steps:
+
+1. the `PartialElement` is loaded (and masked to discard unused bits),
+1. the loaded value is then shifted to abut the LSedge of the stack local,
+1. and then `resize`d into a `U` value.
+
+## Type Parameters
+
+- `O` and `T` are the type parameters of the `PartialElement` argument.
+- `U` is the destination integer type.
+
+## Parameters
+
+- `elem`: A `PartialElement` containing a value segment.
+- `shamt`: The distance by which to right-shift the value loaded from `elem` so
+ that it abuts the LSedge.
+
+## Returns
+
+The segment of an integer stored in `elem`.
+
+[`PartialElement`]: crate::domain::PartialElement
diff --git a/doc/field/impl_BitArray.md b/doc/field/impl_BitArray.md
new file mode 100644
index 0000000..213eac7
--- /dev/null
+++ b/doc/field/impl_BitArray.md
@@ -0,0 +1,9 @@
+# Bit-Array Implementation of `BitField`
+
+The `BitArray` implementation is only ever called when the entire bit-array is
+available for use, which means it can skip the bit-slice memory detection and
+instead use the underlying storage elements directly.
+
+The implementation still performs the segmentation for each element contained in
+the array, in order to maintain value consistency so that viewing the array as a
+bit-slice is still able to correctly interact with data contained in it.
diff --git a/doc/field/io.md b/doc/field/io.md
new file mode 100644
index 0000000..1617dba
--- /dev/null
+++ b/doc/field/io.md
@@ -0,0 +1,10 @@
+# Bit-Field I/O Protocols
+
+This module defines the standard-library `io::{Read, Write}` byte-oriented
+protocols on `bitvec` structures that are capable of operating on bytes through
+the `BitField` trait.
+
+Note that calling [`BitField`] methods in a loop imposes a non-trivial, and
+irremovable, performance penalty on each invocation. The `.read()` and
+`.write()` methods implemented in this module are going to suffer this cost, and
+you should prefer to operate directly on the underlying buffer if possible.
diff --git a/doc/field/io/Read_BitSlice.md b/doc/field/io/Read_BitSlice.md
new file mode 100644
index 0000000..92f96dc
--- /dev/null
+++ b/doc/field/io/Read_BitSlice.md
@@ -0,0 +1,20 @@
+# Reading From a Bit-Slice
+
+The implementation loads bytes out of the referenced bit-slice until either the
+destination buffer is filled or the source has no more bytes to provide. When
+`.read()` returns, the provided bit-slice handle will have been updated to no
+longer include the leading segment copied out as bytes into `buf`.
+
+Note that the return value of `.read()` is always the number of *bytes* of `buf`
+filled!
+
+The implementation uses [`BitField::load_be`] to collect bytes. Note that unlike
+the standard library, it is implemented on bit-slices of *any* underlying
+element type. However, using a `BitSlice<_, u8>` is still likely to be fastest.
+
+## Original
+
+[`impl Read for [u8]`][orig]
+
+[orig]: https://doc.rust-lang.org/std/primitive.slice.html#impl-Read
+[`BitField::load_be`]: crate::field::BitField::load_be
diff --git a/doc/field/io/Read_BitVec.md b/doc/field/io/Read_BitVec.md
new file mode 100644
index 0000000..febeed7
--- /dev/null
+++ b/doc/field/io/Read_BitVec.md
@@ -0,0 +1,14 @@
+# Reading From a Bit-Vector
+
+The implementation loads bytes out of the reference bit-vector until either the
+destination buffer is filled or the source has no more bytes to provide. When
+`.read()` returns, the provided bit-vector will have its contents shifted down
+so that it begins at the first bit *after* the last byte copied out into `buf`.
+
+Note that the return value of `.read()` is always the number of *bytes* of `buf`
+filled!
+
+## API Differences
+
+The standard library does not `impl Read for Vec<u8>`. It is provided here as a
+courtesy.
diff --git a/doc/field/io/Write_BitSlice.md b/doc/field/io/Write_BitSlice.md
new file mode 100644
index 0000000..023676d
--- /dev/null
+++ b/doc/field/io/Write_BitSlice.md
@@ -0,0 +1,20 @@
+# Writing Into a Bit-Slice
+
+The implementation stores bytes into the referenced bit-slice until either the
+source buffer is exhausted or the destination has no more slots to fill. When
+`.write()` returns, the provided bit-slice handle will have been updated to no
+longer include the leading segment filled with bytes from `buf`.
+
+Note that the return value of `.write()` is always the number of *bytes* of
+`buf` consumed!
+
+The implementation uses [`BitField::store_be`] to fill bytes. Note that unlike
+the standard library, it is implemented on bit-slices of *any* underlying
+element type. However, using a `BitSlice<_, u8>` is still likely to be fastest.
+
+## Original
+
+[`impl Write for [u8]`][orig]
+
+[orig]: https://doc.rust-lang.org/std/primitive.slice.html#impl-Write
+[`BitField::store_be`]: crate::field::BitField::store_be
diff --git a/doc/field/io/Write_BitVec.md b/doc/field/io/Write_BitVec.md
new file mode 100644
index 0000000..d3887bd
--- /dev/null
+++ b/doc/field/io/Write_BitVec.md
@@ -0,0 +1,18 @@
+# Writing Into a Bit-Vector
+
+The implementation appends bytes to the referenced bit-vector until the source
+buffer is exhausted.
+
+Note that the return value of `.write()` is always the number of *bytes* of
+`buf` consumed!
+
+The implementation uses [`BitField::store_be`] to fill bytes. Note that unlike
+the standard library, it is implemented on bit-vectors of *any* underlying
+element type. However, using a `BitVec<_, u8>` is still likely to be fastest.
+
+## Original
+
+[`impl Write for Vec<u8>`][orig]
+
+[orig]: https://doc.rust-lang.org/std/vec/struct.Vec.html#impl-Write
+[`BitField::store_be`]: crate::field::BitField::store_be
diff --git a/doc/field/resize.md b/doc/field/resize.md
new file mode 100644
index 0000000..f41f412
--- /dev/null
+++ b/doc/field/resize.md
@@ -0,0 +1,18 @@
+# Value Resizing
+
+This zero-extends or truncates a source value to fit into a target type.
+
+## Type Parameters
+
+- `T`: The initial integer type of the value being resized.
+- `U`: The destination type of the value after resizing.
+
+## Parameters
+
+- `value`: Any (unsigned) integer.
+
+## Returns
+
+`value`, either zero-extended in the most-significant bits (if `U` is wider than
+`T`) or truncated retaining the least-significant bits (if `U` is narrower than
+`T`).
diff --git a/doc/field/set.md b/doc/field/set.md
new file mode 100644
index 0000000..a82b9bf
--- /dev/null
+++ b/doc/field/set.md
@@ -0,0 +1,25 @@
+# Partial-Element Setter
+
+This function inserts a portion of an integer value into a [`PartialElement`].
+The `BitField` implementations call it as they disassemble a complete integer.
+It performs the following steps:
+
+1. the value is `resize`d into a `T::Mem`,
+1. shifted up from LSedge as needed to fit in the governed region of the partial
+ element,
+1. and then stored (after masking away excess bits) through the `PartialElement`
+ into memory.
+
+## Type Parameters
+
+- `O` and `T` are the type parameters of the `PartialElement` argument.
+- `U` is the source integer type.
+
+## Parameters
+
+- `elem`: A `PartialElement` into which a value segment will be written.
+- `value`: A value, whose least-significant bits will be written into `elem`.
+- `shamt`: The shift distance from the storage location’s LSedge to its live
+ bits.
+
+[`PartialElement`]: crate::domain::PartialElement
diff --git a/doc/field/sign.md b/doc/field/sign.md
new file mode 100644
index 0000000..0c5593d
--- /dev/null
+++ b/doc/field/sign.md
@@ -0,0 +1,30 @@
+# Sign Extension
+
+When a bit-slice loads a value whose destination type is wider than the
+bit-slice itself, and the destination type is a signed integer, the loaded value
+must be sign-extended. The load accumulator always begins as the zero pattern,
+and the loaders do not attempt to detect a sign bit before they begin.
+
+As such, this function takes a value loaded out of a bit-slice, which has been
+zero-extended from the storage length to the destination type, and the length of
+the bit-slice that contained it. If the destination type is unsigned, then the
+value is returned as-is; if the destination type is signed, then the value is
+sign-extended according to the bit at `1 << (width - 1)`.
+
+## Type Parameters
+
+- `I`: The integer type of the loaded element. When this is one of
+ `u{8,16,32,64,size}`, no sign extension takes place.
+
+## Parameters
+
+- `elem`: The value loaded out of a bit-slice.
+- `width`: The width in bits of the source bit-slice. This is always known to be
+ in the domain `1 ..= I::BITS`.
+
+## Returns
+
+A correctly-signed copy of `elem`. Unsigned integers, and signed integers whose
+most significant loaded bit was `0`, are untouched. Signed integers whose most
+significant loaded bit was `1` have their remaining high bits set to `1` for
+sign extension.
diff --git a/doc/index.md b/doc/index.md
new file mode 100644
index 0000000..9ee7603
--- /dev/null
+++ b/doc/index.md
@@ -0,0 +1,45 @@
+# Bit Indices
+
+This module provides well-typed counters for working with bit-storage registers.
+The session types encode a strict chain of custody for translating semantic
+indices within [`BitSlice`] regions into real effects in memory.
+
+The main advantage of types within this module is that they provide
+register-dependent range requirements for counter values, making it impossible
+to have an index out of bounds for a register. They also create a sequence of
+type transformations that assure the library about the continued validity of
+each value in its surrounding context.
+
+By eliminating public constructors from arbitrary integers, `bitvec` can
+guarantee that only it can produce initial values, and only trusted functions
+can transform their numeric values or types, until the program reaches the
+property that it requires. This chain of assurance means that memory operations
+can be confident in the correctness of their actions and effects.
+
+## Type Sequence
+
+The library produces [`BitIdx`] values from region computation. These types
+cannot be publicly constructed, and are only ever the result of pointer
+analysis. As such, they rely on the correctness of the memory regions provided
+to library entry points, and those entry points can leverage the Rust type
+system to ensure safety there.
+
+`BitIdx` is transformed to [`BitPos`] through the [`BitOrder`] trait. The
+[`order`] module provides verification functions that implementors can use to
+demonstrate correctness. `BitPos` is the basis type that describes memory
+operations, and is used to create the selection masks [`BitSel`] and
+[`BitMask`].
+
+## Usage
+
+The types in this module should only be used by client crates in their test
+suites. They have no other purpose, and conjuring values for them is potentially
+memory-unsafe.
+
+[`BitIdx`]: self::BitIdx
+[`BitMask`]: self::BitMask
+[`BitOrder`]: crate::order::BitOrder
+[`BitPos`]: self::BitPos
+[`BitSel`]: self::BitSel
+[`BitSlice`]: crate::slice::BitSlice
+[`order`]: crate::order
diff --git a/doc/index/BitEnd.md b/doc/index/BitEnd.md
new file mode 100644
index 0000000..3fd94f9
--- /dev/null
+++ b/doc/index/BitEnd.md
@@ -0,0 +1,35 @@
+# One-Bit-After Tail Index
+
+This is a semantic bit-index within *or one bit after* an `R` register. It is
+the index of the first “dead” bit after a “live” region, and corresponds to the
+similar half-open range concept in the Rust `Range` type or the LLVM memory
+model, pointer values include the address one object past the end of a region.
+
+It is a counter in the ring `0 ..= R::BITS` (note the inclusive high end). Like
+[`BitIdx`], this is a virtual semantic index with no bearing on real memory
+effects; unlike `BitIdx`, it can never be translated to real memory because it
+does not describe real memory.
+
+This type is necessary in order to preserve the distinction between a dead
+memory address that is *not* part of a region and a live memory address that
+*is* within a region. Additionally, it makes computation of region extension or
+offsets easy. `BitIdx` is insufficient to this task, and produces off-by-one
+errors when used in its stead.
+
+## Type Parameters
+
+- `R`: The register element that this dead-bit index governs.
+
+## Validity
+
+Values of this type are **required** to be in the range `0 ..= R::BITS`. Any
+value greater than [`R::BITS`] makes the program invalid and will likely cause
+either a crash or incorrect memory access.
+
+## Construction
+
+This type cannot be publicly constructed except by using the iterators provided
+for testing.
+
+[`BitIdx`]: crate::index::BitIdx
+[`R::BITS`]: funty::Integral::BITS
diff --git a/doc/index/BitIdx.md b/doc/index/BitIdx.md
new file mode 100644
index 0000000..c5954e9
--- /dev/null
+++ b/doc/index/BitIdx.md
@@ -0,0 +1,31 @@
+# Semantic Bit Index
+
+This type is a counter in the ring `0 .. R::BITS` and serves to mark a semantic
+index within some register element. It is a virtual index, and is the stored
+value used in pointer encodings to track region start information.
+
+It is translated to a real index through the [`BitOrder`] trait. This virtual
+index is the only counter that can be used for address computation, and once
+lowered to an electrical index through [`BitOrder::at`], the electrical address
+can only be used for setting up machine instructions.
+
+## Type Parameters
+
+- `R`: The register element that this index governs.
+
+## Validity
+
+Values of this type are **required** to be in the range `0 .. R::BITS`. Any
+value not less than [`R::BITS`] makes the program invalid, and will likely cause
+either a crash or incorrect memory access.
+
+## Construction
+
+This type can never be constructed outside of the `bitvec` crate. It is passed
+in to [`BitOrder`] implementations, which may use it to construct electrical
+position values from it. All values of this type constructed by `bitvec` are
+known to be correct in their region; no other construction site can be trusted.
+
+[`BitOrder`]: crate::order::BitOrder
+[`BitOrder::at`]: crate::order::BitOrder::at
+[`R::BITS`]: funty::Integral::BITS
diff --git a/doc/index/BitIdxError.md b/doc/index/BitIdxError.md
new file mode 100644
index 0000000..852285d
--- /dev/null
+++ b/doc/index/BitIdxError.md
@@ -0,0 +1,6 @@
+# Bit Index Error
+
+This type marks that a value is out of range to be used as an index within an
+`R` element. It is likely never produced, as `bitvec` does not construct invalid
+indices, but is provided for completeness and to ensure that in the event of
+this error occurring, the diagnostic information is useful.
diff --git a/doc/index/BitMask.md b/doc/index/BitMask.md
new file mode 100644
index 0000000..e9a49f7
--- /dev/null
+++ b/doc/index/BitMask.md
@@ -0,0 +1,19 @@
+# Multi-Bit Selection Mask
+
+Unlike [`BitSel`], which enforces a strict one-hot mask encoding, this type
+permits any number of bits to be set or cleared. This is used to accumulate
+selections for batched operations on a register in real memory.
+
+## Type Parameters
+
+- `R`: The register element that this mask governs.
+
+## Construction
+
+This must only be constructed by combining `BitSel` selection masks produced
+through the accepted chains of custody beginning with [`BitIdx`] values.
+Bit-masks not constructed in this manner are not guaranteed to be correct in the
+caller’s context and may lead to incorrect memory behaviors.
+
+[`BitIdx`]: crate::index::BitIdx
+[`BitSel`]: crate::index::BitSel
diff --git a/doc/index/BitPos.md b/doc/index/BitPos.md
new file mode 100644
index 0000000..4b437d7
--- /dev/null
+++ b/doc/index/BitPos.md
@@ -0,0 +1,29 @@
+# Bit Position
+
+This is a position counter of a real bit in an `R` memory element.
+
+Like [`BitIdx`], it is a counter in the ring `0 .. R::BITS`. It marks a real bit
+in memory, and is the shift distance in the expression `1 << n`. It can only be
+produced by applying [`BitOrder::at`] to an existing `BitIdx` produced by
+`bitvec`.
+
+## Type Parameters
+
+- `R`: The register element that this position governs.
+
+## Validity
+
+Values of this type are **required** to be in the range `0 .. R::BITS`. Any
+value not less than [`R::BITS`] makes the program invalid, and will likely cause
+either a crash or incorrect memory access.
+
+## Construction
+
+This type is publicly constructible, but is only correct to do so within an
+implementation of `BitOrder::at`. `bitvec` will only request its creation
+through that trait implementation, and has no sites that can publicly accept
+untrusted values.
+
+[`BitIdx`]: crate::index::BitIdx
+[`BitOrder::at`]: crate::order::BitOrder::at
+[`R::BITS`]: funty::Integral::BITS
diff --git a/doc/index/BitSel.md b/doc/index/BitSel.md
new file mode 100644
index 0000000..940f314
--- /dev/null
+++ b/doc/index/BitSel.md
@@ -0,0 +1,28 @@
+# One-Hot Bit Selection Mask
+
+This type selects exactly one bit in a register. It is a [`BitPos`] shifted from
+a counter to a selector, and is used to apply test and write operations to real
+memory.
+
+## Type Parameters
+
+- `R`: The register element this selector governs.
+
+## Validity
+
+Values of this type are **required** to have exactly one bit set and all others
+cleared. Any other value makes the program incorrect, and will cause memory
+corruption.
+
+## Construction
+
+This type is only constructed from `BitPos`, and is always equivalent to
+`1 << BitPos`.
+
+The chain of custody from known-good [`BitIdx`] values, through proven-good
+[`BitOrder`] implementations, into `BitPos` and then `BitSel` proves that values
+of this type are always correct to apply to real memory.
+
+[`BitIdx`]: crate::index::BitIdx
+[`BitOrder`]: crate::order::BitOrder
+[`BitPos`]: crate::index::BitPos
diff --git a/doc/macros.md b/doc/macros.md
new file mode 100644
index 0000000..e18553b
--- /dev/null
+++ b/doc/macros.md
@@ -0,0 +1,31 @@
+# Constructor Macros
+
+This module provides macros that can be used to create `bitvec` data buffers at
+compile time. Each data structure has a corresponding macro:
+
+- `BitSlice` has [`bits!`]
+- `BitArray` has [`bitarr!`] (and [`BitArr!`] to produce type expressions)
+- `BitBox` has [`bitbox!`]
+- `BitVec` has [`bitvec!`]
+
+These macros take a sequence of bit literals, as well as some optional control
+prefixes, and expand to code that is generally solvable at compile-time. The
+provided bit-orderings `Lsb0` and `Msb0` have implementations that can be used
+in `const` contexts, while third-party user-provided orderings cannot be used in
+`const` contexts but almost certainly *can* be const-folded by LLVM.
+
+The sequences are encoded into element literals during compilation, and will be
+correctly encoded into the target binary. This is even true for targets with
+differing byte-endianness than the host compiler.
+
+See each macro for documentation on its invocation syntax. The general pattern
+is `[modifier] [T, O;] bits…`. The modifiers influence the nature of the
+produced binding, the `[T, O;]` pair provides type parameters when the default
+is undesirable, and the `bits…` provides the actual contents of the data
+buffer.
+
+[`BitArr!`]: macro@crate::BitArr
+[`bitarr!`]: macro@crate::bitarr
+[`bitbox!`]: macro@crate::bitbox
+[`bits!`]: macro@crate::bits
+[`bitvec!`]: macro@crate::bitvec
diff --git a/doc/macros/BitArr_type.md b/doc/macros/BitArr_type.md
new file mode 100644
index 0000000..b21ec39
--- /dev/null
+++ b/doc/macros/BitArr_type.md
@@ -0,0 +1,35 @@
+# Bit-Array Type Definition
+
+Because `BitArray<T, O, const BITS: usize>` is not expressible in stable Rust,
+this macro serves the purpose of creating a type definition that expands to a
+suitable `BitArray`. It creates the correct, rounded-up, `BitArray` to hold a
+requested number of bits in a requested set of ordering/storage parameters.
+
+The macro takes a minimum number of bits to store, and an optional set of
+bit-order and bit-store type names, and creates a `BitArray` that satisfies the
+request. As this macro is only usable in type position, it is named with
+`PascalCase` rather than `snake_case`.
+
+## Examples
+
+You must provide a bit-count; you may optionally provide a storage type, or a
+bit-ordering *and* a storage type, as subsequent arguments. When elided, the
+type parameters are set to the crate defaut type parameters of `Lsb0` and
+`usize`.
+
+```rust
+use bitvec::prelude::*;
+use core::cell::Cell;
+
+let a: BitArr!(for 100) = BitArray::ZERO;
+let b: BitArr!(for 100, in u32) = BitArray::<_>::ZERO;
+let c: BitArr!(for 100, in Cell<u16>, Msb0) = BitArray::<_, _>::ZERO;
+```
+
+The length expression must be `const`. It may be a literal, a named `const`
+item, or a `const` expression, as long as it evaluates to a `usize`. The type
+arguments have no restrictions, as long as they are in-scope at the invocation
+site and are implementors of [`BitOrder`] and [`BitStore`].
+
+[`BitOrder`]: crate::order::BitOrder
+[`BitStore`]: crate::store::BitStore
diff --git a/doc/macros/bitarr_value.md b/doc/macros/bitarr_value.md
new file mode 100644
index 0000000..ee14869
--- /dev/null
+++ b/doc/macros/bitarr_value.md
@@ -0,0 +1,61 @@
+# Bit-Array Value Constructor
+
+This macro provides a bit-initializer syntax for [`BitArray`] values. It takes a
+superset of the [`vec!`] arguments, and is capable of producing bit-arrays in
+`const` contexts (for known type parameters).
+
+Like `vec!`, it can accept a sequence of comma-separated bit values, or a
+semicolon-separated pair of a bit value and a repetition counter. Bit values may
+be any integer or name of a `const` integer, but *should* only be `0` or `1`.
+
+## Argument Syntax
+
+It accepts zero, one, or three prefix arguments:
+
+- `const`: If the first argument to the macro is the keyword `const`, separated
+ from remaining arguments by a space, then the macro expands to a
+ `const`-expression that can be used in any appropriate context (initializing
+ a `static`, a `const`, or passed to a `const fn`). This only works when the
+ bit-ordering argument is either implicit, or one of the three tokens that
+ `bitvec` can recognize.
+- `$order ,`: When this is one of the three literal tokens `LocalBits`, `Lsb0`,
+ or `Msb0`, then the macro is able to compute the encoded bit-array contents at
+ compile time, including in `const` contexts. When it is anything else, the
+ encoding must take place at runtime. The name or path chosen must be in scope
+ at the macro invocation site.
+
+ When not provided, this defaults to `Lbs0`.
+- `$store ;`: This must be one of `uTYPE`, `Cell<uTYPE>`, `AtomicUTYPE`, or
+ `RadiumUTYPE` where `TYPE` is one of `8`, `16`, `32`, `64`, or `size`. The
+ macro recognizes this token textually, and does not have access to the type
+ system resolver, so it will not accept aliases or qualified paths.
+
+ When not provided, this defaults to `usize`.
+
+The `const` argument can be present or absent independently of the
+type-parameter pair. The pair must be either both absent or both present
+together.
+
+> Previous versions of `bitvec` supported `$order`-only arguments. This has been
+> removed for clarity of use and ease of implementation.
+
+## Examples
+
+```rust
+use bitvec::prelude::*;
+use core::{cell::Cell, mem};
+use radium::types::*;
+
+let a: BitArray = bitarr![0, 1, 0, 0, 1];
+
+let b: BitArray = bitarr![1; 5];
+assert_eq!(b.len(), mem::size_of::<usize>() * 8);
+
+let c = bitarr![u16, Lsb0; 0, 1, 0, 0, 1];
+let d = bitarr![Cell<u16>, Msb0; 1; 10];
+const E: BitArray<[u32; 1], LocalBits> = bitarr![u32, LocalBits; 1; 15];
+let f = bitarr![RadiumU32, Msb0; 1; 20];
+```
+
+[`BitArray`]: crate::array::BitArray
+[`vec!`]: macro@alloc::vec
diff --git a/doc/macros/bitbox.md b/doc/macros/bitbox.md
new file mode 100644
index 0000000..1673a8f
--- /dev/null
+++ b/doc/macros/bitbox.md
@@ -0,0 +1,10 @@
+# Boxed Bit-Slice Constructor
+
+This macro creates encoded `BitSlice` buffers at compile-time, and at run-time
+copies them directly into a new heap allocation.
+
+It forwards all of its arguments to [`bitvec!`], and calls
+[`BitVec::into_boxed_bitslice`] on the produced `BitVec`.
+
+[`BitVec::into_boxed_bitslice`]: crate::vec::BitVec::into_boxed_bitslice
+[`bitvec!`]: macro@crate::bitvec
diff --git a/doc/macros/bits.md b/doc/macros/bits.md
new file mode 100644
index 0000000..c639434
--- /dev/null
+++ b/doc/macros/bits.md
@@ -0,0 +1,102 @@
+# Bit-Slice Region Constructor
+
+This macro provides a bit-initializer syntax for [`BitSlice`] reference values.
+It takes a superset of the [`vec!`] arguments, and is capable of producing
+bit-slices in `const` contexts (for known type parameters).
+
+Like `vec!`, it can accept a sequence of comma-separated bit values, or a
+semicolon-separated pair of a bit value and a repetition counter. Bit values may
+be any integer or name of a `const` integer, but *should* only be `0` or `1`.
+
+## Argument Syntax
+
+It accepts two modifier prefixes, zero or two type parameters, and the bit
+expressions described above.
+
+The modifier prefixes are separated from the remaining arguments by clearspace.
+
+- `static`: If the first argument is the keyword `static`, then this produces a
+ `&'static BitSlice` reference bound into a (hidden, unnameable)
+ `static BitArray` item. If not, then it produces a stack temporary that the
+ Rust compiler automatically extends to have the lifetime of the returned
+ reference. Note that non-`static` invocations rely on the compiler’s escape
+ analysis, and you should typically not try to move them up the call stack.
+- `mut`: If the first argument is the keyword `mut`, then this produces a `&mut`
+ writable `BitSlice`.
+- `static mut`: These can be combined to create a `&'static mut BitSlice`. It is
+ always safe to use this reference, because the `static mut BitArray` it
+ creates is concealed and unreachable by any other codepath, and so the
+ produced reference is always the sole handle that can reach it.
+
+The next possible arguments are a pair of `BitOrder`/`BitStore` type parameters.
+
+- `$order ,`: When this is one of the three literal tokens `LocalBits`, `Lsb0`,
+ or `Msb0`, then the macro is able to compute the encoded bit-array contents at
+ compile time, including in `const` contexts. When it is anything else, the
+ encoding must take place at runtime. The name or path chosen must be in scope
+ at the macro invocation site.
+
+ When not provided, this defaults to `Lsb0`.
+- `$store ;`: This must be one of `uTYPE`, `Cell<uTYPE>`, `AtomicUTYPE`, or
+ `RadiumUTYPE` where `TYPE` is one of `8`, `16`, `32`, `64`, or `size`. The
+ macro recognizes this token textually, and does not have access to the type
+ system resolver, so it will not accept aliases or qualified paths.
+
+ When not provided, this defaults to `usize`.
+
+The `static`/`mut` modifiers may be individually present or absent independently
+of the type-parameter pair. The pair must be either both absent or both present
+together.
+
+> Previous versions of `bitvec` supported $order`-only arguments. This has been
+> removed for clarity of use and ease of implementation.
+
+## Safety
+
+Rust considers all `static mut` bindings to be `unsafe` to use. While `bits!`
+can prevent *some* of this unsafety by preventing direct access to the created
+`static mut` buffer, there are still ways to create multiple names referring to
+the same underlying buffer.
+
+```rust,ignore
+use bitvec::prelude::*;
+
+fn unsound() -> &'static mut BitSlice<usize, Lsb0> {
+ unsafe { bits![static mut 0; 64] }
+}
+
+let a = unsound();
+let b = unsound();
+```
+
+The two names `a` and `b` can be used to produce aliasing `&mut [usize]`
+references.
+
+**You must not invoke `bits![static mut …]` in a context where it can be used**
+**to create multiple escaping names**. This, and only this, argument combination
+of the macro produces a value that requires a call-site `unsafe` block to use.
+
+If you do not use this behavior to create multiple names over the same
+underlying buffer, then the macro’s expansion is safe to use, as `bitvec`’s
+existing alias-protection behavior suffices.
+
+## Examples
+
+```rust
+use bitvec::prelude::*;
+use core::cell::Cell;
+use radium::types::*;
+
+let a: &BitSlice = bits![0, 1, 0, 0, 1];
+
+let b: &BitSlice = bits![1; 5];
+assert_eq!(b.len(), 5);
+
+let c = bits![u16, Lsb0; 0, 1, 0, 0, 1];
+let d = bits![static Cell<u16>, Msb0; 1; 10];
+let e = unsafe { bits![static mut u32, LocalBits; 0; 15] };
+let f = bits![RadiumU32, Msb0; 1; 20];
+```
+
+[`BitSlice`]: crate::slice::BitSlice
+[`vec!`]: macro@alloc::vec
diff --git a/doc/macros/bitvec.md b/doc/macros/bitvec.md
new file mode 100644
index 0000000..6b52cd5
--- /dev/null
+++ b/doc/macros/bitvec.md
@@ -0,0 +1,12 @@
+# Bit-Vector Constructor
+
+This macro creates encoded `BitSlice` buffers at compile-time, and at run-time
+copies them directly into a new heap allocation.
+
+It forwards all of its arguments to [`bits!`], and calls
+[`BitVec::from_bitslice`] on the produced `&BitSlice` expression. While you can
+use the `bits!` modifiers, there is no point, as the produced bit-slice is lost
+before the macro exits.
+
+[`BitVec::from_bitslice`]: crate::vec::BitVec::from_bitslice
+[`bits!`]: macro@crate::bits
diff --git a/doc/macros/encode_bits.md b/doc/macros/encode_bits.md
new file mode 100644
index 0000000..a88e3d0
--- /dev/null
+++ b/doc/macros/encode_bits.md
@@ -0,0 +1,62 @@
+# Bit-Sequence Buffer Encoding
+
+This macro accepts a sequence of bit expressions from the public macros and
+creates encoded `[T; N]` arrays from them. The public macros can then use these
+encoded arrays as the basis of the requested data structure.
+
+This is a complex macro that uses recursion to modify and inspect its input
+tokens. It is divided into three major sections.
+
+## Entry Points
+
+The first section provides a series of entry points that the public macros
+invoke. Each arm matches the syntax provided by public macros, and detects a
+specific `BitStore` implementor name: `uN`, `Cell<uN>`, `AtomicUN`, or
+`RadiumUN`, for each `N` in `8`, `16`, `32`, `64`, and `size`.
+
+These arms then recurse, adding a token for the raw unsigned integer used as the
+basis of the encoding. The `usize` arms take an additional recursion that routes
+to the 32-bit or 64-bit encoding, depending on the target.
+
+## Zero Extension
+
+The next two arms handle extending the list of bit-expressions with 64 `0,`s.
+The first arm captures initial reëntry and appends the zero-comma tokens, then
+recurses to enter the chunking group. The second arm traps when recursion has
+chunked all user-provided tokens, and only the literal `0,` tokens appended by
+the first arm remain.
+
+The second arm dispatches the chunked bit-expressions into the element encoder,
+and is the exit point of the macro. Its output is an array of encoded memory
+elements, typed as the initially-requested `BitStore` name.
+
+The `0,` tokens remain matchable as text literals because they never depart
+this macro: recursion within the same macro does not change the types in the
+AST, while invoking a new macro causes already-known tokens to become opacified
+into `:tt` whose contents cannot be matched. This is the reason that the macro
+is recursive rather than dispatching.
+
+## Chunking
+
+The stream of user-provided bit-expressions, followed by the appended zero-comma
+tokens, is divided into chunks by the width of the storage type.
+
+Each width (8, 16, 32, 64) has an arm that munches from the token stream and
+grows an opaque token-list containing munched groups. In syntax, this is
+represented by the `[$([$($bit:tt,)+],)*];` cluster:
+
+- it is an array
+ - of zero or more arrays
+ - of one or more bit expressions
+ - each followed by a comma
+ - each followed by a comma
+- followed by a semicolon
+
+By placing this array ahead of the bit-expression stream, we can use the array
+as an append-only list (matched as `[$($elem:tt)*]`, emitted as
+`[$($elem)* [new]]`) grown by munching from the token stream of unknown length
+at the end of the argument set.
+
+On each recursion, the second arm in zero-extension attempts to trap the input.
+If it fails, then user-provided tokens remain; if it succeeds, then it discards
+any remaining macro-appended zeros and terminates.
diff --git a/doc/macros/internal.md b/doc/macros/internal.md
new file mode 100644
index 0000000..93213d2
--- /dev/null
+++ b/doc/macros/internal.md
@@ -0,0 +1,6 @@
+# Internal Macro Implementations
+
+The contents of this module are required to be publicly reachable from external
+crates, because that is the context in which the public macros expand; however,
+the contents of this module are **not** public API and `bitvec` does not support
+any use of it other than within the public macros.
diff --git a/doc/macros/make_elem.md b/doc/macros/make_elem.md
new file mode 100644
index 0000000..6684600
--- /dev/null
+++ b/doc/macros/make_elem.md
@@ -0,0 +1,21 @@
+# Element Encoder Macro
+
+This macro is invoked by `__encode_bits!` with a set of bits that exactly fills
+some `BitStore` element type. It is responsible for encoding those bits into the
+raw memory bytes and assembling them into a whole integer.
+
+It works by inspecting the `$order` argument. If it is one of `LocalBits`,
+`Lsb0`, or `Msb0`, then it can do the construction in-place, and get solved
+during `const` evaluation. If it is any other ordering, then it emits runtime
+code to do the translation and defers to the optimizer for evaluation.
+
+It divides the input into clusters of eight bit expressions, then uses the
+`$order` argument to choose whether the bits are accumulated into a `u8` using
+`Lsb0`, `Msb0`, or `LocalBits` ordering. The accumulated byte array is then
+converted into an integer using the corresponding `uN::from_{b,l,n}e_bytes`
+function in `__ty_from_bytes!`.
+
+Once assembled, the raw integer is changed into the requested final type. This
+currently routes through a helper type that unifies `const fn` constructors for
+each of the raw integer fundamentals, cells, and atomics in order to avoid
+transmutes.
diff --git a/doc/mem.md b/doc/mem.md
new file mode 100644
index 0000000..0514b73
--- /dev/null
+++ b/doc/mem.md
@@ -0,0 +1,11 @@
+# Memory Element Descriptions
+
+This module describes the memory integers and processor registers used to hold
+and manipulate `bitvec` data buffers.
+
+The [`BitRegister`] trait marks the unsigned integers that correspond to
+processor registers, and can therefore be used for buffer control. The integers
+that are not `BitRegister` can be composed from register values, but are not
+able to be used in buffer type parameters.
+
+[`BitRegister`]: self::BitRegister
diff --git a/doc/mem/BitElement.md b/doc/mem/BitElement.md
new file mode 100644
index 0000000..a4dac86
--- /dev/null
+++ b/doc/mem/BitElement.md
@@ -0,0 +1,17 @@
+# Unified Element Constructor
+
+This type is a hack around the fact that `Cell` and `AtomicUN` all have
+`const fn new(val: Inner) -> Self;` constructors, but the numberic fundamentals
+do not. As such, the standard library does not provide a unified construction
+syntax to turn an integer fundamental into the final type.
+
+This provides a `const fn BitElement::<_>::new(R) -> Self;` function,
+implemented only for the `BitStore` implementors that the crate provides, that
+the constructor macros can use to turn integers into final values without using
+[`mem::transmute`][0]. While `transmute` is acceptable in this case (the types
+are all `#[repr(transparent)]`), it is still better avoided where possible.
+
+As this is a macro assistant, it is publicly exposed, but is not public API. It
+has no purpose outside of the crate’s macros.
+
+[0]: core::mem::transmute.
diff --git a/doc/mem/BitRegister.md b/doc/mem/BitRegister.md
new file mode 100644
index 0000000..c4f02a1
--- /dev/null
+++ b/doc/mem/BitRegister.md
@@ -0,0 +1,6 @@
+# Register Descriptions
+
+This trait describes the unsigned integer types that can be manipulated in a
+target processor’s general-purpose registers. It has no bearing on the processor
+instructions or registers used to interact with the memory bus, and solely
+exists to describe integers that can exist on a system.
diff --git a/doc/mem/elts.md b/doc/mem/elts.md
new file mode 100644
index 0000000..3a6c204
--- /dev/null
+++ b/doc/mem/elts.md
@@ -0,0 +1,28 @@
+# Bit Storage Calculator
+
+Computes the number of `T` elements required to store some number of bits. `T`
+must be an unsigned integer type and cannot have padding bits, but this
+restriction cannot be placed on `const fn`s yet.
+
+## Parameters
+
+- `bits`: The number of bits being stored in a `[T]` array.
+
+## Returns
+
+A minimal `N` in `[T; N]` that is not less than `bits`.
+
+As this is a `const` function, when `bits` is also a `const` expression, it can
+be used to compute the size of an array type, such as
+`[u32; elts::<u32>(BITS)]`.
+
+## Examples
+
+```rust
+use bitvec::mem as bv_mem;
+
+assert_eq!(bv_mem::elts::<u8>(10), 2);
+assert_eq!(bv_mem::elts::<u8>(16), 2);
+
+let arr: [u16; bv_mem::elts::<u16>(20)] = [0; 2];
+```
diff --git a/doc/order.md b/doc/order.md
new file mode 100644
index 0000000..35771f8
--- /dev/null
+++ b/doc/order.md
@@ -0,0 +1,24 @@
+# In-Element Bit Ordering
+
+The `bitvec` memory model is designed to separate the semantic ordering of bits
+in an abstract memory space from the electrical ordering of latches in real
+memory. This module provides the bridge between the two domains with the
+[`BitOrder`] trait and implementations of it.
+
+The `BitOrder` trait bridges semantic indices (marked by the [`BitIdx`] type) to
+electrical position counters (morked by the [`BitPos`] type) or selection masks
+(marked by the [`BitSel`] and [`BitMask`] types).
+
+Because `BitOrder` is open for client crates to implement, this module also
+provides verification functions for the test suite that ensure a given
+`BitOrder` implementation is correct for all the register types that it will
+govern. See the [`verify_for_type`] or [`verify`] functions for more
+information.
+
+[`BitIdx`]: crate::index::BitIdx
+[`BitMask`]: crate::index::BitMask
+[`BitOrder`]: self::BitOrder
+[`BitPos`]: crate::index::BitPos
+[`BitSel`]: crate::index::BitSel
+[`verify`]: self::verify
+[`verify_for_type`]: self::verify_for_type
diff --git a/doc/order/BitOrder.md b/doc/order/BitOrder.md
new file mode 100644
index 0000000..7650402
--- /dev/null
+++ b/doc/order/BitOrder.md
@@ -0,0 +1,92 @@
+# In-Element Bit Ordering
+
+This trait manages the translation of semantic bit indices into electrical
+positions within storage elements of a memory region.
+
+## Usage
+
+`bitvec` APIs operate on semantic index counters that exist in an abstract
+memory space independently of the real memory that underlies them. In order to
+affect real memory, `bitvec` must translate these indices into real values. The
+[`at`] function maps abstract index values into their corresponding real
+positions that can then be used to access memory.
+
+You will likely never call any of the trait functions yourself. They are used by
+`bitvec` internals to operate on memory regions; all you need to do is provide
+an implementation of this trait as a type parameter to `bitvec` data structures.
+
+## Safety
+
+`BitOrder` is unsafe to implement because its translation of index to position
+cannot be forcibly checked by `bitvec` itself, and an improper implementation
+will lead to memory unsafety errors and unexpected collisions. The trait has
+strict requirements for each function. If these are not upheld, then the
+implementation is considered undefined at the library level and its use may
+produce incorrect or undefined behavior during compilation.
+
+You are responsible for running [`verify_for_type`] or [`verify`] in your test
+suite if you implement `BitOrder`.
+
+## Implementation Rules
+
+Values of this type are never constructed or passed to `bitvec` functions. Your
+implementation does not need to be zero-sized, but it will never have access to
+an instance to view its state. It *may* refer to other global state, but per the
+rules of `at`, that state may not change while any `bitvec` data structures are
+alive.
+
+The only function you *need* to provide is `at`. Its requirements are listed in
+its trait documentation.
+
+You *may* also choose to provide implementations of `select` and `mask`. These
+have a default implementation that is correct, but may be unoptimized for your
+implementation. As such, you may replace them with a better version, but your
+implementation of these functions must be exactly equal to the default
+implementation for all possible inputs.
+
+This requirement is checked by the `verify_for_type` function.
+
+## Verification
+
+The `verify_for_type` function verifies that a `BitOrder` implementation is
+correct for a single `BitStore` implementor, and the `verify` function runs
+`verify_for_type` on all unsigned integers that implement `BitStore` on a
+target. If you run these functions in your test suite, they will provide
+detailed information if your implementation is incorrect.
+
+## Examples
+
+Implementations are not required to remain contiguous over a register, and may
+have any mapping they wish as long as it is total and bijective. This example
+swizzles the high and low halves of each byte.
+
+```rust
+use bitvec::{
+ order::BitOrder,
+ index::{BitIdx, BitPos},
+ mem::BitRegister,
+};
+
+pub struct HiLo;
+
+unsafe impl BitOrder for HiLo {
+ fn at<R>(index: BitIdx<R>) -> BitPos<R>
+ where R: BitRegister {
+ unsafe { BitPos::new_unchecked(index.into_inner() ^ 4) }
+ }
+}
+
+#[test]
+#[cfg(test)]
+fn prove_hilo() {
+ bitvec::order::verify::<HiLo>();
+}
+```
+
+Once a `BitOrder` implementation passes the test suite, it can be freely used as
+a type parameter in `bitvec` data structures. The translation takes place
+automatically, and you never need to look at this trait again.
+
+[`at`]: Self::at
+[`verify`]: crate::order::verify
+[`verify_for_type`]: crate::order::verify_for_type
diff --git a/doc/order/LocalBits.md b/doc/order/LocalBits.md
new file mode 100644
index 0000000..0341a7a
--- /dev/null
+++ b/doc/order/LocalBits.md
@@ -0,0 +1,23 @@
+# C-Compatible Bit Ordering
+
+This type alias attempts to match the bitfield ordering used by GCC on your
+target. The C standard permits ordering of single-bit bitfields in a structure
+to be implementation-defined, and GCC has been observed to use Lsb0-ordering on
+little-endian processors and Msb0-ordering on big-endian processors.
+
+This has two important caveats:
+
+- ordering of bits in an element is **completely** independent of the ordering
+ of constituent bytes in memory. These have nothing to do with each other in
+ any way. See [the user guide][0] for more information on memory
+ representation.
+- GCC wide bitfields on big-endian targets behave as `<T, Lsb0>` bit-slices
+ using the `_be` variants of `BitField` accessors. They do not match `Msb0`
+ bit-wise ordering.
+
+This type is provided solely as a convenience for narrow use cases that *may*
+match GCC’s `std::bitset<N>`. It makes no guarantee about what C compilers for
+your target actually do, and you will need to do your own investigation if you
+are exchanging a single buffer across FFI in this manner.
+
+[0]: https://bitvecto-rs.github.io/bitvec/memory-representation
diff --git a/doc/order/Lsb0.md b/doc/order/Lsb0.md
new file mode 100644
index 0000000..6b14943
--- /dev/null
+++ b/doc/order/Lsb0.md
@@ -0,0 +1,13 @@
+# Least-Significant-First Bit Traversal
+
+This type orders the bits in an element with the least significant bit first and
+the most significant bit last, in contiguous order across the element.
+
+The guide has [a chapter][0] with more detailed information on the memory
+representation this produces.
+
+This is the default type parameter used throughout the crate. If you do not have
+a desired memory representation, you should continue to use it, as it provides
+the best codegen for bit manipulation.
+
+[0]: https://bitvecto-rs.github.io/bitvec/memory-representation
diff --git a/doc/order/Msb0.md b/doc/order/Msb0.md
new file mode 100644
index 0000000..6aece3c
--- /dev/null
+++ b/doc/order/Msb0.md
@@ -0,0 +1,13 @@
+# Most-Significant-First Bit Traversal
+
+This type orders the bits in an element with the most significant bit first and
+the least significant bit last, in contiguous order across the element.
+
+The guide has [a chapter][0] with more detailed information on the memory
+representation this produces.
+
+This type likely matches the ordering of bits you would expect to see in a
+debugger, but has worse codegen than `Lsb0`, and is not encouraged if you are
+not doing direct memory inspection.
+
+[0]: https://bitvecto-rs.github.io/bitvec/memory-representation
diff --git a/doc/order/verify.md b/doc/order/verify.md
new file mode 100644
index 0000000..ec03ed5
--- /dev/null
+++ b/doc/order/verify.md
@@ -0,0 +1,23 @@
+# Complete `BitOrder` Verification
+
+This function checks some [`BitOrder`] implementation’s behavior on each of the
+[`BitRegister`] types present on the target, and reports any violation of the
+rules that it detects.
+
+## Type Parameters
+
+- `O`: The `BitOrder` implementation being tested.
+
+## Parameters
+
+- `verbose`: Controls whether the test should print diagnostic information to
+ standard output. If this is false, then the test only prints a message on
+ failure; if it is true, it emits a message for every test it executes.
+
+## Panics
+
+This panics when it detects a violation of the `BitOrder` rules. If it returns
+normally, then the implementation is correct.
+
+[`BitOrder`]: crate::order::BitOrder
+[`BitRegister`]: crate::mem::BitRegister
diff --git a/doc/order/verify_for_type.md b/doc/order/verify_for_type.md
new file mode 100644
index 0000000..072dc2e
--- /dev/null
+++ b/doc/order/verify_for_type.md
@@ -0,0 +1,29 @@
+# Single-Type `BitOrder` Verification
+
+This function checks some [`BitOrder`] implementation’s behavior on only one
+[`BitRegister`] type. It can be used when a program knows that it will only use
+a limited set of storage types and does not need to check against all of them.
+
+You should prefer to use [`verify`], as `bitvec` has no means of preventing the
+use of a `BitRegister` storage type that your `BitOrder` implementation does not
+satisfy.
+
+## Type Parameters
+
+- `O`: The `BitOrder` implementation being tested.
+- `R`: The `BitRegister` type for which `O` is being tested.
+
+## Parameters
+
+- `verbose`: Controls whether the test should print diagnostic information to
+ standard output. If this is false, then the test only prints a message on
+ failure; if it is true, then it emits a message for every test it executes.
+
+## Panics
+
+This panics when it detects a violation of the `BitOrder` rules. If it returns
+normally, then the implementation is correct for the given `R` type.
+
+[`BitOrder`]: crate::order::BitOrder
+[`BitRegister`]: crate::mem::BitRegister
+[`verify`]: crate::order::verify.
diff --git a/doc/prelude.md b/doc/prelude.md
new file mode 100644
index 0000000..f0b4d27
--- /dev/null
+++ b/doc/prelude.md
@@ -0,0 +1,9 @@
+# Symbol Export
+
+This module collects the general public API into a single place for bulk import,
+as `use bitvec::prelude::*;`, without polluting the root namespace of the crate.
+
+This provides all the data structure types and macros, as well as the two traits
+needed to operate them as type parameters, by name. It also imports extension
+traits without naming them, so that their methods are available but their trait
+names are not.
diff --git a/doc/ptr.md b/doc/ptr.md
new file mode 100644
index 0000000..018eee9
--- /dev/null
+++ b/doc/ptr.md
@@ -0,0 +1,22 @@
+# Raw Pointer Implementation
+
+This provides `bitvec`-internal pointer types and a mirror of the [`core::ptr`]
+module.
+
+It contains the following types:
+
+- [`BitPtr`] is a raw-pointer to exactly one bit.
+- [`BitRef`] is a proxy reference to exactly one bit.
+- `BitSpan` is the encoded form of the `*BitSlice` pointer and `&BitSlice`
+ reference. It is not publicly exposed, but it serves as the foundation of
+ `bitvec`’s ability to describe memory regions.
+
+It also provides ports of the free functions available in `core::ptr`, as well
+as some utilities for bridging ordinary Rust pointers into `bitvec`.
+
+You should generally not use the contents of this module; `BitSlice` provides
+more convenience and has stronger abilities to optimize performance.
+
+[`BitPtr`]: self::BitPtr
+[`BitRef`]: self::BitRef
+[`core::ptr`]: core::ptr
diff --git a/doc/ptr/BitPtr.md b/doc/ptr/BitPtr.md
new file mode 100644
index 0000000..4c2c441
--- /dev/null
+++ b/doc/ptr/BitPtr.md
@@ -0,0 +1,61 @@
+# Single-Bit Pointer
+
+This structure defines a pointer to exactly one bit in a memory element. It is a
+structure, rather than an encoding of a `*Bit` raw pointer, because it contains
+more information than can be packed into such a pointer. Furthermore, it can
+uphold the same requirements and guarantees that the rest of the crate demands,
+whereäs a raw pointer cannot.
+
+## Original
+
+[`*bool`](https://doc.rust-lang.org/std/primitive.pointer.html) and
+[`NonNull<bool>`](core::ptr::NonNull)
+
+## API Differences
+
+Since raw pointers are not sufficient in space or guarantees, and are limited by
+not being marked `#[fundamental]`, this is an ordinary `struct`. Because it
+cannot use the `*const`/`*mut` distinction that raw pointers and references can,
+this encodes mutability in a type parameter instead.
+
+In order to be consistent with the rest of the crate, particularly the
+`*BitSlice` encoding, this enforces that all `T` element addresses are
+well-aligned to `T` and non-null. While this type is used in the API as an
+analogue of raw pointers, it is restricted in value to only contain the values
+of valid *references* to memory, not arbitrary pointers.
+
+## ABI Differences
+
+This is aligned to `1`, rather than the processor word, in order to enable some
+crate-internal space optimizations.
+
+## Type Parameters
+
+- `M`: Marks whether the pointer has mutability permissions to the referent
+ memory. Only `Mut` pointers can be used to create `&mut` references.
+- `T`: A memory type used to select both the register width and the bus behavior
+ when performing memory accesses.
+- `O`: The ordering of bits within a memory element.
+
+## Usage
+
+This structure is used as the `bitvec` equivalent to `*bool`. It is used in all
+raw-pointer APIs and provides behavior to emulate raw pointers. It cannot be
+directly dereferenced, as it is not a pointer; it can only be transformed back
+into higher referential types, or used in functions that accept it.
+
+These pointers can never be null or misaligned.
+
+## Safety
+
+Rust and LLVM **do not** have a concept of bit-level initialization yet.
+Furthermore, the underlying foundational code that this type uses to manipulate
+individual bits in memory relies on construction of **shared references** to
+memory, which means that unlike standard pointers, the `T` element to which
+`BitPtr` values point must always be **already initialized** in your program
+context.
+
+`bitvec` is not able to detect or enforce this requirement, and is currently not
+able to avoid it. See [`BitAccess`] for more information.
+
+[`BitAccess`]: crate::access::BitAccess
diff --git a/doc/ptr/BitPtrRange.md b/doc/ptr/BitPtrRange.md
new file mode 100644
index 0000000..8d84ad1
--- /dev/null
+++ b/doc/ptr/BitPtrRange.md
@@ -0,0 +1,36 @@
+# Bit-Pointer Range
+
+This type is equivalent in purpose, but superior in functionality, to
+`Range<BitPtr<M, T, O>>`. If the standard library stabilizes [`Step`], the trait
+used to drive `Range` operations, then this type will likely be destroyed in
+favor of an `impl Step for BitPtr` block and use of standard ranges.
+
+Like [`Range`], this is a half-open set where the low bit-pointer selects the
+first live bit in a span and the high bit-pointer selects the first dead bit
+*after* the span.
+
+This type is not capable of inspecting provenance, and has no requirement of its
+own that both bit-pointers be derived from the same provenance region. It is
+safe to construct and use with any pair of bit-pointers; however, the
+bit-pointers it *produces* are, necessarily, `unsafe` to use.
+
+## Original
+
+[`Range<*bool>`][`Range`]
+
+## Memory Representation
+
+[`BitPtr`] is required to be `repr(packed)` in order to satisfy the [`BitRef`]
+size optimizations. In order to stay minimally sized itself, this type has no
+alignment requirement, and reading either bit-pointer *may* incur a misalignment
+penalty. Reads are always safe and valid; they may merely be slow.
+
+## Type Parameters
+
+This takes the same type parameters as `BitPtr`, as it is simply a pair of
+bit-pointers with range semantics.
+
+[`BitPtr`]: crate::ptr::BitPtr
+[`BitRef`]: crate::ptr::BitRef
+[`Range`]: core::ops::Range
+[`Step`]: core::iter::Step
diff --git a/doc/ptr/BitRef.md b/doc/ptr/BitRef.md
new file mode 100644
index 0000000..798cf3c
--- /dev/null
+++ b/doc/ptr/BitRef.md
@@ -0,0 +1,49 @@
+# Proxy Bit-Reference
+
+This structure simulates `&/mut bool` within `BitSlice` regions. It is analogous
+to the C++ type [`std::bitset<N>::reference`][0].
+
+This type wraps a [`BitPtr`] and caches a `bool` in one of the remaining padding
+bytes. It is then able to freely give out references to its cached `bool`, and
+commits the cached value back to the proxied location when dropped.
+
+## Original
+
+This is semantically equivalent to `&'a bool` or `&'a mut bool`.
+
+## Quirks
+
+Because this type has both a lifetime and a destructor, it can introduce an
+uncommon syntax error condition in Rust. When an expression that produces this
+type is in the final expression of a block, including if that expression is used
+as a condition in a `match`, `if let`, or `if`, then the compiler will attempt
+to extend the drop scope of this type to the outside of the block. This causes a
+lifetime mismatch error if the source region from which this proxy is produced
+begins its lifetime inside the block.
+
+If you get a compiler error that this type causes something to be dropped while
+borrowed, you can end the borrow by putting any expression-ending syntax element
+after the offending expression that produces this type, including a semicolon or
+an item definition.
+
+## Examples
+
+```rust
+use bitvec::prelude::*;
+
+let bits = bits![mut 0; 2];
+
+let (left, right) = bits.split_at_mut(1);
+let mut first = left.get_mut(0).unwrap();
+let second = right.get_mut(0).unwrap();
+
+// Writing through a dereference requires a `mut` binding.
+*first = true;
+// Writing through the explicit method call does not.
+second.commit(true);
+
+drop(first); // It’s not a reference, so NLL does not apply!
+assert_eq!(bits, bits![1; 2]);
+```
+
+[0]: https://en.cppreference.com/w/cpp/utility/bitset/reference
diff --git a/doc/ptr/BitSpan.md b/doc/ptr/BitSpan.md
new file mode 100644
index 0000000..c54c473
--- /dev/null
+++ b/doc/ptr/BitSpan.md
@@ -0,0 +1,134 @@
+# Encoded Bit-Span Descriptor
+
+This structure is used as the actual in-memory value of `BitSlice` pointers
+(including both `*{const,mut} BitSlice` and `&/mut BitSlice`). It is **not**
+public API, and the encoding scheme does not support external modification.
+
+Rust slices encode a base element address and an element count into a single
+`&[T]` two-word value. `BitSpan` encodes a third value, the index of the base
+bit within the base element, into unused bits of the address and length counter.
+
+The slice reference has the ABI `(*T, usize)`, which is exactly two processor
+words in size. `BitSpan` matches this ABI so that it can be cast into
+`&/mut BitSlice` and used in reference-demanding APIs.
+
+## Layout
+
+This structure is a more complex version of the `(*const T, usize)` tuple that
+Rust uses to represent slices throughout the language. It breaks the pointer and
+counter fundamentals into sub-field components. Rust does not have bitfield
+syntax, so the below description of the structure layout is in C++.
+
+```cpp
+template <typename T>
+struct BitSpan {
+ uintptr_t ptr_head : __builtin_ctzll(alignof(T));
+ uintptr_t ptr_addr : sizeof(uintptr_T) * 8 - __builtin_ctzll(alignof(T));
+
+ size_t len_head : 3;
+ size_t len_bits : sizeof(size_t) * 8 - 3;
+};
+```
+
+This means that the `BitSpan<O, T>` has three *logical* fields, stored in four
+segments, across the two *structural* fields of the type. The widths and
+placements of each segment are functions of the size of `*const T`, `usize`, and
+of the alignment of the `T` referent buffer element type.
+
+## Fields
+
+### Base Address
+
+The address of the base element in a memory region is stored in all but the
+lowest bits of the `ptr` field. An aligned pointer to `T` will always have its
+lowest log<sub>2</sub>(byte width) bits zeroed, so those bits can be used to
+store other information, as long as they are erased before dereferencing the
+address as a pointer to `T`.
+
+### Head Bit Index
+
+For any referent element type `T`, the selection of a single bit within the
+element requires log<sub>2</sub>(byte width) bits to select a byte within the
+element `T`, and another three bits to select a bit within the selected byte.
+
+|Type |Alignment|Trailing Zeros|Count Bits|
+|:----|--------:|-------------:|---------:|
+|`u8` | 1| 0| 3|
+|`u16`| 2| 1| 4|
+|`u32`| 4| 2| 5|
+|`u64`| 8| 3| 6|
+
+The index of the first live bit in the base element is split to have its three
+least significant bits stored in the least significant edge of the `len` field,
+and its remaining bits stored in the least significant edge of the `ptr` field.
+
+### Length Counter
+
+All but the lowest three bits of the `len` field are used to store a counter of
+live bits in the referent region. When this is zero, the region is empty.
+Because it is missing three bits, a `BitSpan` has only ⅛ of the index space of
+a `usize` value.
+
+## Significant Values
+
+The following values represent significant instances of the `BitSpan` type.
+
+### Null Slice
+
+The fully-zeroed slot is not a valid member of the `BitSpan<O, T>` type; it is
+reserved instead as the sentinel value for `Option::<BitSpan<O, T>>::None`.
+
+### Canonical Empty Slice
+
+All pointers with a `bits: 0` logical field are empty. Pointers that are used to
+maintain ownership of heap buffers are not permitted to erase their `addr`
+field. The canonical form of the empty slice has an `addr` value of
+[`NonNull::<T>::dangling()`], but all pointers to an empty region are equivalent
+regardless of address.
+
+#### Uninhabited Slices
+
+Any empty pointer with a non-[`dangling()`] base address is considered to be an
+uninhabited region. `BitSpan` never discards its address information, even as
+operations may alter or erase its head-index or length values.
+
+## Type Parameters
+
+- `T`: The memory type of the referent region. `BitSpan<O, T>` is a specialized
+ `*[T]` slice pointer, and operates on memory in terms of the `T` type for
+ access instructions and pointer calculation.
+- `O`: The ordering within the register type. The bit-ordering used within a
+ region colors all pointers to the region, and orderings can never mix.
+
+## Safety
+
+`BitSpan` values may only be constructed from pointers provided by the
+surrounding program.
+
+## Undefined Behavior
+
+Values of this type are binary-incompatible with slice pointers. Transmutation
+of these values into any other type will result in an incorrect program, and
+permit the program to begin illegal or undefined behaviors. This type may never
+be manipulated in any way by user code outside of the APIs it offers to this
+`bitvec`; it certainly may not be seen or observed by other crates.
+
+## Design Notes
+
+Accessing the `.head` logical field would be faster if it inhabited the least
+significant byte of `.len`, and was not partitioned into `.ptr` as well.
+This implementation was chosen against in order to minimize the loss of bits in
+the length counter; if user studies indicate that bit-slices do not **ever**
+require more than 2<sup>24</sup> bits on 32-bit systems, this may be revisited.
+
+The `ptr_metadata` feature, tracked in [Issue #81513], defines a trait `Pointee`
+that regions such as `BitSlice` can implement and define a `Metadata` type that
+carries all information other than a dereferenceable memory address. For regular
+slices, this would be `impl<T> Pointee for [T] { type Metadata = usize; }`. For
+`BitSlice`, it would be `(usize, BitIdx<T::Mem>)` and obviate this module
+entirely. But until it stabilizes, this remains.
+
+[Issue #81513]: https://github.com/rust-lang/rust/issues/81513
+
+[`NonNull::<T>::dangling()`]: core::ptr::NonNull::dangling
+[`dangling()`]: core::ptr::NonNull::dangling
diff --git a/doc/ptr/addr.md b/doc/ptr/addr.md
new file mode 100644
index 0000000..36a137e
--- /dev/null
+++ b/doc/ptr/addr.md
@@ -0,0 +1,5 @@
+# Address Value Management
+
+This module provides utilities for working with `T: BitStore` addresses so that
+the other `ptr` submodules can rely on the correctness of their values when
+doing pointer encoding.
diff --git a/doc/ptr/bitslice_from_raw_parts.md b/doc/ptr/bitslice_from_raw_parts.md
new file mode 100644
index 0000000..d6bbfca
--- /dev/null
+++ b/doc/ptr/bitslice_from_raw_parts.md
@@ -0,0 +1,30 @@
+# Bit-Slice Pointer Construction
+
+This forms a raw [`BitSlice`] pointer from a bit-pointer and a length.
+
+## Original
+
+[`ptr::slice_from_raw_parts`](core::ptr::slice_from_raw_parts)
+
+## Examples
+
+You will need to construct a `BitPtr` first; these are typically produced by
+existing `BitSlice` views, or you can do so manually.
+
+```rust
+use bitvec::{
+ prelude::*,
+ index::BitIdx,
+ ptr as bv_ptr,
+};
+
+let data = 6u16;
+let head = BitIdx::new(1).unwrap();
+let ptr = BitPtr::<_, _, Lsb0>::new((&data).into(), head).unwrap();
+let slice = bv_ptr::bitslice_from_raw_parts(ptr, 10);
+let slice_ref = unsafe { &*slice };
+assert_eq!(slice_ref.len(), 10);
+assert_eq!(slice_ref, bits![1, 1, 0, 0, 0, 0, 0, 0, 0, 0]);
+```
+
+[`BitSlice`]: crate::slice::BitSlice
diff --git a/doc/ptr/bitslice_from_raw_parts_mut.md b/doc/ptr/bitslice_from_raw_parts_mut.md
new file mode 100644
index 0000000..1070029
--- /dev/null
+++ b/doc/ptr/bitslice_from_raw_parts_mut.md
@@ -0,0 +1,31 @@
+# Bit-Slice Pointer Construction
+
+This forms a raw [`BitSlice`] pointer from a bit-pointer and a length.
+
+## Original
+
+[`ptr::slice_from_raw_parts`](core::ptr::slice_from_raw_parts)
+
+## Examples
+
+You will need to construct a `BitPtr` first; these are typically produced by
+existing `BitSlice` views, or you can do so manually.
+
+```rust
+use bitvec::{
+ prelude::*,
+ index::BitIdx,
+ ptr as bv_ptr,
+};
+
+let mut data = 6u16;
+let head = BitIdx::new(1).unwrap();
+let ptr = BitPtr::<_, _, Lsb0>::new((&mut data).into(), head).unwrap();
+let slice = bv_ptr::bitslice_from_raw_parts_mut(ptr, 10);
+let slice_ref = unsafe { &mut *slice };
+assert_eq!(slice_ref.len(), 10);
+slice_ref.set(2, true);
+assert_eq!(slice_ref, bits![1, 1, 1, 0, 0, 0, 0, 0, 0, 0]);
+```
+
+[`BitSlice`]: crate::slice::BitSlice
diff --git a/doc/ptr/copy.md b/doc/ptr/copy.md
new file mode 100644
index 0000000..4b461d6
--- /dev/null
+++ b/doc/ptr/copy.md
@@ -0,0 +1,87 @@
+# Bit-wise `memcpy`
+
+This copies bits from a region beginning at `src` into a region beginning at
+`dst`, each extending upwards in the address space for `count` bits.
+
+The two regions may overlap.
+
+If the two regions are known to *never* overlap, then [`copy_nonoverlapping`][0]
+can be used instead.
+
+## Original
+
+[`ptr::copy`](core::ptr::copy)
+
+## Overlap Definition
+
+`bitvec` defines region overlap only when the bit-pointers used to access them
+have the same `O: BitOrder` type parameter. When this parameter differs, the
+regions are always assumed to not overlap in real memory, because `bitvec` does
+not define the effects of different orderings mapping to the same locations.
+
+## Safety
+
+In addition to the bit-ordering constraints, this inherits the restrictions of
+the original `ptr::copy`:
+
+- `src` must be valid to read the next `count` bits out of memory.
+- `dst` must be valid to write into the next `count` bits.
+- Both `src` and `dst` must satisfy [`BitPtr`]’s non-null, well-aligned,
+ requirements.
+
+## Behavior
+
+This reads and writes each bit individually. It is incapable of optimizing its
+behavior to perform batched memory accesses that have better awareness of the
+underlying memory.
+
+The [`BitSlice::copy_from_bitslice`][1] method *is* able to perform this
+optimization. You should always prefer to use `BitSlice` if you are sensitive to
+performance.
+
+## Examples
+
+This example performs a simple copy across independent regions. You can see that
+it follows the ordering parameter for the source and destination regions as it
+walks each bit individually.
+
+```rust
+use bitvec::prelude::*;
+use bitvec::ptr as bv_ptr;
+
+let start = 0b1011u8;
+let mut end = 0u16;
+
+let src = BitPtr::<_, _, Lsb0>::from_ref(&start);
+let dst = BitPtr::<_, _, Msb0>::from_mut(&mut end);
+
+unsafe {
+ bv_ptr::copy(src, dst, 4);
+}
+assert_eq!(end, 0b1101_0000_0000_0000);
+```
+
+This can detect overlapping regions. Note again that overlap only exists when
+the ordering parameter is the same! Using bit-pointers that overlap in real
+memory with different ordering is not defined, and `bitvec` does not specify any
+result.
+
+```rust
+use bitvec::prelude::*;
+use bitvec::ptr as bv_ptr;
+
+let mut x = 0b1111_0010u8;
+let src = BitPtr::<_, _, Lsb0>::from_mut(&mut x);
+let dst = unsafe { src.add(2) };
+
+unsafe {
+ bv_ptr::copy(src.to_const(), dst, 4);
+}
+
+assert_eq!(x, 0b1100_1010);
+// bottom nibble ^^ ^^ moved here
+```
+
+[`BitPtr`]: crate::ptr::BitPtr
+[0]: crate::ptr::copy_nonoverlapping
+[1]: crate::slice::BitSlice::copy_from_bitslice
diff --git a/doc/ptr/copy_nonoverlapping.md b/doc/ptr/copy_nonoverlapping.md
new file mode 100644
index 0000000..6145438
--- /dev/null
+++ b/doc/ptr/copy_nonoverlapping.md
@@ -0,0 +1,59 @@
+# Bit-wise `memcpy`
+
+This copies bits from a region beginning at `src` into a region beginning at
+`dst`, each extending upwards in the address space for `count` bits.
+
+The two regions *may not* overlap.
+
+## Original
+
+[`ptr::copy_nonoverlapping`](core::ptr::copy_nonoverlapping)
+
+## Overlap Definition
+
+The two regions may be in the same provenance as long as they have no common
+bits. `bitvec` only defines the possibility of overlap when the `O1` and `O2`
+bit-ordering parameters are the same; if they are different, then it considers
+the regions to not overlap, and does not attempt to detect real-memory
+collisions.
+
+## Safety
+
+In addition to the bit-ordering constraints, this inherits the restrictions of
+the original `ptr::copy_nonoverlapping`:
+
+- `src` must be valid to read the next `count` bits out of memory.
+- `dst` must be valid to write into the next `count` bits.
+- Both `src` and `dst` must satisfy [`BitPtr`]’s non-null, well-aligned,
+ requirements.
+
+## Behavior
+
+This reads and writes each bit individually. It is incapable of optimizing its
+behavior to perform batched memory accesses that have better awareness of the
+underlying memory.
+
+The [`BitSlice::copy_from_bitslice`][1] method *is* able to perform this
+optimization, and tolerates overlap. You should always prefer to use `BitSlice`
+if you are sensitive to performance.
+
+## Examples
+
+```rust
+use bitvec::prelude::*;
+use bitvec::ptr as bv_ptr;
+
+let start = 0b1011u8;
+let mut end = 0u16;
+
+let src = BitPtr::<_, _, Lsb0>::from_ref(&start);
+let dst = BitPtr::<_, _, Msb0>::from_mut(&mut end);
+
+unsafe {
+ bv_ptr::copy_nonoverlapping(src, dst, 4);
+}
+assert_eq!(end, 0b1101_0000_0000_0000);
+```
+
+[1]: crate::slice::BitSlice::copy_from_bitslice
+[`BitPtr`]: crate::ptr::BitPtr
diff --git a/doc/ptr/drop_in_place.md b/doc/ptr/drop_in_place.md
new file mode 100644
index 0000000..78bf9fa
--- /dev/null
+++ b/doc/ptr/drop_in_place.md
@@ -0,0 +1,9 @@
+# Remote Destructor
+
+`BitPtr` only points to indestructible types. This has no effect, and is only
+present for symbol compatibility. You should not have been calling it on your
+integers or `bool`s anyway!
+
+## Original
+
+[`ptr::drop_in_place`](core::ptr::drop_in_place)
diff --git a/doc/ptr/eq.md b/doc/ptr/eq.md
new file mode 100644
index 0000000..b12b8eb
--- /dev/null
+++ b/doc/ptr/eq.md
@@ -0,0 +1,38 @@
+# Bit-Pointer Equality
+
+This compares two bit-pointers for equality by their address value, not by the
+value of their referent bit. This does not dereference either.
+
+## Original
+
+[`ptr::eq`](core::ptr::eq)
+
+## API Differences
+
+The two bit-pointers can differ in their storage type parameters. `bitvec`
+defines pointer equality only between pointers with the same underlying
+[`BitStore::Mem`][0] element type. Numerically-equal bit-pointers with different
+integer types *will not* compare equal, though this function will compile and
+accept them.
+
+This cannot compare encoded span poiters. `*const BitSlice` can be used in the
+standard-library `ptr::eq`, and does not need an override.
+
+## Examples
+
+```rust
+use bitvec::prelude::*;
+use bitvec::ptr as bv_ptr;
+use core::cell::Cell;
+
+let data = 0u16;
+let bare_ptr = BitPtr::<_, _, Lsb0>::from_ref(&data);
+let cell_ptr = bare_ptr.cast::<Cell<u16>>();
+
+assert!(bv_ptr::eq(bare_ptr, cell_ptr));
+
+let byte_ptr = bare_ptr.cast::<u8>();
+assert!(!bv_ptr::eq(bare_ptr, byte_ptr));
+```
+
+[0]: crate::store::BitStore::Mem
diff --git a/doc/ptr/hash.md b/doc/ptr/hash.md
new file mode 100644
index 0000000..77cc091
--- /dev/null
+++ b/doc/ptr/hash.md
@@ -0,0 +1,11 @@
+# Bit-Pointer Hashing
+
+This hashes a bit-pointer by the value of its components, rather than its
+referent bit. It does not dereference the pointer.
+
+This can be used to ensure that you are hashing the bit-pointer’s address value,
+though, as always, hashing an address rather than a data value is likely unwise.
+
+## Original
+
+[`ptr::hash`](core::ptr::hash)
diff --git a/doc/ptr/null.md b/doc/ptr/null.md
new file mode 100644
index 0000000..71f2b38
--- /dev/null
+++ b/doc/ptr/null.md
@@ -0,0 +1,10 @@
+# Bit-Pointer Sentinel Value
+
+`BitPtr` does not permit actual null pointers. Instead, it uses the canonical
+dangling address as a sentinel for uninitialized, useless, locations.
+
+You should use `Option<BitPtr>` if you need to track nullability.
+
+## Original
+
+[`ptr::null`](core::ptr::null)
diff --git a/doc/ptr/null_mut.md b/doc/ptr/null_mut.md
new file mode 100644
index 0000000..f95bf72
--- /dev/null
+++ b/doc/ptr/null_mut.md
@@ -0,0 +1,10 @@
+# Bit-Pointer Sentinel Value
+
+`BitPtr` does not permit actual null pointers. Instead, it uses the canonical
+dangling address as a sentinel for uninitialized, useless, locations.
+
+You should use `Option<BitPtr>` if you need to track nullability.
+
+## Original
+
+[`ptr::null_mut`](core::ptr::null_mut)
diff --git a/doc/ptr/proxy.md b/doc/ptr/proxy.md
new file mode 100644
index 0000000..6b619cc
--- /dev/null
+++ b/doc/ptr/proxy.md
@@ -0,0 +1,9 @@
+# Proxy Bit-References
+
+Rust does not permit the use of custom proxy structures in place of true
+reference primitives, so APIs that specify references (like `IndexMut` or
+`DerefMut`) cannot be implemented by types that cannot manifest `&mut`
+references directly. Since `bitvec` cannot produce an `&mut bool` reference
+within a `BitSlice`, it instead uses the `BitRef` proxy type defined in this
+module to provide reference-like work generally, and simply does not define
+`IndexMut<usize>`.
diff --git a/doc/ptr/range.md b/doc/ptr/range.md
new file mode 100644
index 0000000..8ff3d54
--- /dev/null
+++ b/doc/ptr/range.md
@@ -0,0 +1,20 @@
+# Bit-Pointer Ranges
+
+This module defines ports of the `Range` type family to work with `BitPtr`s.
+Rust’s own ranges have unstable internal details that make them awkward to use
+within the standard library, and essentially impossible outside it, with
+anything other than the numeric fundamentals.
+
+In particular, `bitvec` uses a half-open range of `BitPtr`s to represent
+C++-style dual-pointer memory regions (such as `BitSlice` iterators). Rust’s own
+slice iterators also do this, but because `*T` does not implement the [`Step`]
+trait, the standard library duplicates some work done by `Range` types in the
+slice iterators just to be able to alter the views.
+
+As such, `Range<BitPtr<_, _, _>>` has the same functionality as
+`Range<*const _>`: almost none. As this is undesirable, this module defines
+equivalent types that implement the full desired behavior of a pointer range.
+These are primarily used as crate internals, but may also be of interest to
+users.
+
+[`Step`]: core::iter::Step
diff --git a/doc/ptr/read.md b/doc/ptr/read.md
new file mode 100644
index 0000000..6ae9ad6
--- /dev/null
+++ b/doc/ptr/read.md
@@ -0,0 +1,28 @@
+# Single-Bit Read
+
+This reads the bit out of `src` directly.
+
+## Original
+
+[`ptr::read`](core::ptr::read)
+
+## Safety
+
+Because this performs a dereference of memory, it inherits the original
+`ptr::read`’s requirements:
+
+- `src` must be valid to read.
+- `src` must be properly aligned. This is an invariant of the `BitPtr` type as
+ well as of the memory access.
+- `src` must point to an initialized value of `T`.
+
+## Examples
+
+```rust
+use bitvec::prelude::*;
+use bitvec::ptr as bv_ptr;
+
+let data = 128u8;
+let ptr = BitPtr::<_, _, Msb0>::from_ref(&data);
+assert!(unsafe { bv_ptr::read(ptr) });
+```
diff --git a/doc/ptr/read_unaligned.md b/doc/ptr/read_unaligned.md
new file mode 100644
index 0000000..a539e1e
--- /dev/null
+++ b/doc/ptr/read_unaligned.md
@@ -0,0 +1,30 @@
+# Single-Bit Unaligned Read
+
+This reads the bit out of `src` directly. It uses compiler intrinsics to
+tolerate an unaligned `T` address. However, because `BitPtr` has a type
+invariant that addresses are always well-aligned (and non-null), this has no
+benefit or purpose.
+
+## Original
+
+[`ptr::read_unaligned`](core::ptr::read_unaligned)
+
+## Safety
+
+Because this performs a dereference of memory, it inherits the original
+`ptr::read_unaligned`’s requirements:
+
+- `src` must be valid to read.
+- `src` must point to an initialized value of `T`.
+
+## Examples
+
+```rust
+use bitvec::prelude::*;
+use bitvec::ptr as bv_ptr;
+
+
+let data = 128u8;
+let ptr = BitPtr::<_, _, Msb0>::from_ref(&data);
+assert!(unsafe { bv_ptr::read_unaligned(ptr) });
+```
diff --git a/doc/ptr/read_volatile.md b/doc/ptr/read_volatile.md
new file mode 100644
index 0000000..71b0b1f
--- /dev/null
+++ b/doc/ptr/read_volatile.md
@@ -0,0 +1,39 @@
+# Single-Bit Volatile Read
+
+This reads the bit out of `src` directly, using a volatile I/O intrinsic to
+prevent compiler reördering or removal.
+
+You should not use `bitvec` to perform any volatile I/O operations. You should
+instead do volatile I/O work on integer values directly, or use a crate like
+[`voladdress`][0] to perform I/O transactions, and use `bitvec` only on stack
+locals that have no additional memory semantics.
+
+## Original
+
+[`ptr::read_volatile`](core::ptr::read_volatile)
+
+## Safety
+
+Because this performs a dereference of memory, it inherits the original
+`ptr::read_volatile`’s requirements:
+
+- `src` must be valid to read.
+- `src` must be properly aligned. This is an invariant of the `BitPtr` type as
+ well as of the memory access.
+- `src` must point to an initialized value of `T`.
+
+Remember that volatile accesses are ordinary loads that the compiler cannot
+remove or reörder! They are *not* an atomic synchronizer.
+
+## Examples
+
+```rust
+use bitvec::prelude::*;
+use bitvec::ptr as bv_ptr;
+
+let data = 128u8;
+let ptr = BitPtr::<_, _, Msb0>::from_ref(&data);
+assert!(unsafe { bv_ptr::read_volatile(ptr) });
+```
+
+[0]: https://docs.rs/voladdress/latest/voladdress
diff --git a/doc/ptr/replace.md b/doc/ptr/replace.md
new file mode 100644
index 0000000..b3240db
--- /dev/null
+++ b/doc/ptr/replace.md
@@ -0,0 +1,35 @@
+# Single-Bit Replacement
+
+This writes a new value into a location, and returns the bit-value previously
+stored there. It is semantically and behaviorally equivalent to
+[`BitRef::replace`][0], except that it works on bit-pointer structures rather
+than proxy references. Prefer to use a proxy reference or
+[`BitSlice::replace`][1] instead.
+
+## Original
+
+[`ptr::replace`](core::ptr::replace)
+
+## Safety
+
+This has the same safety requirements as [`ptr::read`][2] and [`ptr::write`][3],
+as it is required to use them in its implementation.
+
+## Examples
+
+```rust
+use bitvec::prelude::*;
+use bitvec::ptr as bv_ptr;
+
+let mut data = 4u8;
+let ptr = BitPtr::<_, _, Lsb0>::from_mut(&mut data);
+assert!(unsafe {
+ bv_ptr::replace(ptr.add(2), false)
+});
+assert_eq!(data, 0);
+```
+
+[0]: crate::ptr::BitRef::replace
+[1]: crate::slice::BitSlice::replace
+[2]: crate::ptr::read
+[3]: crate::ptr::write
diff --git a/doc/ptr/single.md b/doc/ptr/single.md
new file mode 100644
index 0000000..9606df6
--- /dev/null
+++ b/doc/ptr/single.md
@@ -0,0 +1,10 @@
+# Single-Bit Pointers
+
+This module defines single-bit pointers, which are required to be structures in
+their own right and do not have an encoded form.
+
+These pointers should generally not be used; [`BitSlice`] is more likely to be
+correct and have better performance. They are provided for consistency, not for
+hidden optimizations.
+
+[`BitSlice`]: crate::slice::BitSlice
diff --git a/doc/ptr/slice_from_raw_parts.md b/doc/ptr/slice_from_raw_parts.md
new file mode 100644
index 0000000..a7b2f3d
--- /dev/null
+++ b/doc/ptr/slice_from_raw_parts.md
@@ -0,0 +1,10 @@
+# Raw Bit-Slice Pointer Construction
+
+This is an alias for [`bitslice_from_raw_parts`][0], renamed for symbol
+compatibility. See its documentation instead.
+
+## Original
+
+[`ptr::slice_from_raw_parts`](core::ptr::slice_from_raw_parts)
+
+[0]: crate::ptr::bitslice_from_raw_parts
diff --git a/doc/ptr/slice_from_raw_parts_mut.md b/doc/ptr/slice_from_raw_parts_mut.md
new file mode 100644
index 0000000..04c95c2
--- /dev/null
+++ b/doc/ptr/slice_from_raw_parts_mut.md
@@ -0,0 +1,10 @@
+# Raw Bit-Slice Pointer Construction
+
+This is an alias for [`bitslice_from_raw_parts_mut`][0], renamed for symbol
+compatibility. See its documentation instead.
+
+## Original
+
+[`ptr::slice_from_raw_parts_mut`](core::ptr::slice_from_raw_parts_mut)
+
+[0]: crate::ptr::bitslice_from_raw_parts_mut
diff --git a/doc/ptr/span.md b/doc/ptr/span.md
new file mode 100644
index 0000000..aba37d1
--- /dev/null
+++ b/doc/ptr/span.md
@@ -0,0 +1,33 @@
+# Encoded Bit-Span Pointer
+
+This module implements the logic used to encode and operate on values of
+`*BitSlice`. It is the core operational module of the library.
+
+## Theory
+
+Rust is slowly experimenting with allowing user-provided types to define
+metadata structures attached to raw-pointers and references in a structured
+manner. However, this is a fairly recent endeavour, much newer than `bitvec`’s
+work in the same area, so `bitvec` does not attempt to use it.
+
+The problem with bit-addressable memory is that it takes three more bits to
+select a *bit* than it does a *byte*. While AMD64 specifies (and AArch64 likely
+follows by fiat) that pointers are 64 bits wide but only contain 48 (or more
+recently, 57) bits of information, leaving the remainder available to store
+userspace information (as long as it is canonicalized before dereferencing),
+x86 and Arm32 have no such luxury space in their pointers.
+
+Since `bitvec` supports 32-bit targets, it instead opts to place the three
+bit-selector bits outside the pointer address. The only other space available in
+Rust pointers is in the length field of slice pointers. As such, `bitvec`
+encodes its span description information into `*BitSlice` and, by extension,
+`&/mut BitSlice`. The value underlying these language fundamentals is well-known
+(though theoretically opaque), and the standard library provides APIs that it
+promises will always be valid to manipulate them. Through careful use of these
+APIs, and following type-system rules to prevent undefined behavior, `bitvec` is
+able to define its span descriptions within the language fundamentals and appear
+fully idiomatic and compliant with existing Rust patterns.
+
+See the [`BitSpan`] type documentation for details on the encoding scheme used.
+
+[`BitSpan`]: self::BitSpan
diff --git a/doc/ptr/swap.md b/doc/ptr/swap.md
new file mode 100644
index 0000000..e7650e2
--- /dev/null
+++ b/doc/ptr/swap.md
@@ -0,0 +1,34 @@
+# Bit Swap
+
+This exchanges the bit-values in two locations. It is semantically and
+behaviorally equivalent to [`BitRef::swap`][0], except that it works on
+bit-pointer structures rather than proxy references. Prefer to use a proxy
+reference or [`BitSlice::swap`][1] instead.
+
+## Original
+
+[`ptr::swap`](core::ptr::swap)
+
+## Safety
+
+This has the same safety requirements as [`ptr::read`][2] and [`ptr::write`][3],
+as it is required to use them in its implementation.
+
+## Examples
+
+```rust
+use bitvec::prelude::*;
+use bitvec::ptr as bv_ptr;
+
+let mut data = 2u8;
+let x = BitPtr::<_, _, Lsb0>::from_mut(&mut data);
+let y = unsafe { x.add(1) };
+
+unsafe { bv_ptr::swap(x, y); }
+assert_eq!(data, 1);
+```
+
+[0]: crate::ptr::BitRef::swap
+[1]: crate::slice::BitSlice::swap
+[2]: crate::ptr::read
+[3]: crate::ptr::write
diff --git a/doc/ptr/swap_nonoverlapping.md b/doc/ptr/swap_nonoverlapping.md
new file mode 100644
index 0000000..4a137a8
--- /dev/null
+++ b/doc/ptr/swap_nonoverlapping.md
@@ -0,0 +1,35 @@
+# Many-Bit Swap
+
+Exchanges the contents of two regions, which cannot overlap.
+
+## Original
+
+[`ptr::swap_nonoverlapping`](core::ptr::swap_nonoverlapping)
+
+## Safety
+
+Both `one` and `two` must be:
+
+- correct `BitPtr` instances (well-aligned, non-null)
+- valid to read and write for the next `count` bits
+
+Additionally, the ranges `one .. one + count` and `two .. two + count` must be
+entirely disjoint. They can be adjacent, but no bit can be in both.
+
+## Examples
+
+```rust
+use bitvec::prelude::*;
+use bitvec::ptr as bv_ptr;
+
+let mut x = [0u8; 2];
+let mut y = !0u16;
+let x_ptr = BitPtr::<_, _, Msb0>::from_slice_mut(&mut x);
+let y_ptr = BitPtr::<_, _, Lsb0>::from_mut(&mut y);
+
+unsafe {
+ bv_ptr::swap_nonoverlapping(x_ptr, y_ptr, 12);
+}
+assert_eq!(x, [!0, 0xF0]);
+assert_eq!(y, 0xF0_00);
+```
diff --git a/doc/ptr/write.md b/doc/ptr/write.md
new file mode 100644
index 0000000..4be6b4a
--- /dev/null
+++ b/doc/ptr/write.md
@@ -0,0 +1,37 @@
+# Single-Bit Write
+
+This writes a bit into `dst` directly.
+
+## Original
+
+[`ptr::write`](core::ptr::write)
+
+## Safety
+
+Because this performs a dereference of memory, it inherits the original
+`ptr::write`’s requirements:
+
+- `dst` must be valid to write
+- `dst` must be properly aligned. This is an invariant of the `BitPtr` type as
+ well as of the memory access.
+
+Additionally, `dst` must point to an initialized value of `T`. Integers cannot
+be initialized one bit at a time.
+
+## Behavior
+
+This is required to perform a read/modify/write cycle on the memory location.
+LLVM *may or may not* emit a bit-write instruction on targets that have them in
+the ISA, but this is not specified in any way.
+
+## Examples
+
+```rust
+use bitvec::prelude::*;
+use bitvec::ptr as bv_ptr;
+
+let mut data = 0u8;
+let ptr = BitPtr::<_, _, Lsb0>::from_mut(&mut data);
+unsafe { bv_ptr::write(ptr.add(2), true); }
+assert_eq!(data, 4);
+```
diff --git a/doc/ptr/write_bits.md b/doc/ptr/write_bits.md
new file mode 100644
index 0000000..cc33276
--- /dev/null
+++ b/doc/ptr/write_bits.md
@@ -0,0 +1,42 @@
+# Bit-wise `memset`
+
+This fills a region of memory with a bit value. It is equivalent to using
+`memset` with only `!0` or `0`, masked appropriately for the region edges.
+
+## Original
+
+[`ptr::write_bytes`](core::ptr::write_bytes)
+
+## Safety
+
+Because this performs a dereference of memory, it inherits the original
+`ptr::write_bytes`’ requirements:
+
+- `dst` must be valid to write
+- `dst` must be properly aligned. This is an invariant of the `BitPtr` type as
+ well as of the memory access.
+
+Additionally, `dst` must point to an initialized value of `T`. Integers cannot
+be initialized one bit at a time.
+
+## Behavior
+
+This function does not specify an implementation. You should assume the worst
+case (`O(n)` read/modify/write of each bit). The [`BitSlice::fill`][0] method
+will have equal or better performance.
+
+## Examples
+
+```rust
+use bitvec::prelude::*;
+use bitvec::ptr as bv_ptr;
+
+let mut data = 0u8;
+let ptr = BitPtr::<_, _, Lsb0>::from_mut(&mut data);
+unsafe {
+ bv_ptr::write_bits(ptr.add(1), true, 5);
+}
+assert_eq!(data, 0b0011_1110);
+```
+
+[0]: crate::slice::BitSlice::fill
diff --git a/doc/ptr/write_bytes.md b/doc/ptr/write_bytes.md
new file mode 100644
index 0000000..1d2dd7b
--- /dev/null
+++ b/doc/ptr/write_bytes.md
@@ -0,0 +1,10 @@
+# Bit-wise `memset`
+
+This is an alias for [`write_bits`][0], renamed for symbol compatibility. See
+its documentation instead.
+
+## Original
+
+[`ptr::write_bytes`](core::ptr::write_bytes)
+
+[0]: crate::ptr::write_bits
diff --git a/doc/ptr/write_unaligned.md b/doc/ptr/write_unaligned.md
new file mode 100644
index 0000000..4cbba7d
--- /dev/null
+++ b/doc/ptr/write_unaligned.md
@@ -0,0 +1,37 @@
+# Single-Bit Unaligned Write
+
+This writes a bit into `dst` directly. It uses compiler intrinsics to tolerate
+an unaligned `T` address. However, because `BitPtr` has a type invariant that
+addresses are always well-aligned (and non-null), this has no benefit or
+purpose.
+
+## Original
+
+[`ptr::write_unaligned](core::ptr::write_unaligned)
+
+## Safety
+
+- `dst` must be valid to write
+- `dst` must be properly aligned. This is an invariant of the `BitPtr` type as
+ well as of the memory access.
+
+Additionally, `dst` must point to an initialized value of `T`. Integers cannot
+be initialized one bit at a time.
+
+## Behavior
+
+This is required to perform a read/modify/write cycle on the memory location.
+LLVM *may or may not* emit a bit-write instruction on targets that have them in
+the ISA, but this is not specified in any way.
+
+## Examples
+
+```rust
+use bitvec::prelude::*;
+use bitvec::ptr as bv_ptr;
+
+let mut data = 0u8;
+let ptr = BitPtr::<_, _, Lsb0>::from_mut(&mut data);
+unsafe { bv_ptr::write_unaligned(ptr.add(2), true); }
+assert_eq!(data, 4);
+```
diff --git a/doc/ptr/write_volatile.md b/doc/ptr/write_volatile.md
new file mode 100644
index 0000000..f3fcd70
--- /dev/null
+++ b/doc/ptr/write_volatile.md
@@ -0,0 +1,45 @@
+# Single-Bit Volatile Write
+
+This writes a bit into `dst` directly, using a volatile I/O intrinsic to
+prevent compiler reördering or removal.
+
+You should not use `bitvec` to perform any volatile I/O operations. You should
+instead do volatile I/O work on integer values directly, or use a crate like
+[`voladdress`][0] to perform I/O transactions, and use `bitvec` only on stack
+locals that have no additional memory semantics.
+
+## Original
+
+[`ptr::write_volatile](core::ptr::write_volatile)
+
+## Safety
+
+Because this performs a dereference of memory, it inherits the original
+`ptr::write_volatile`’s requirements:
+
+- `dst` must be valid to write
+- `dst` must be properly aligned. This is an invariant of the `BitPtr` type as
+ well as of the memory access.
+
+Additionally, `dst` must point to an initialized value of `T`. Integers cannot
+be initialized one bit at a time.
+
+## Behavior
+
+This is required to perform a read/modify/write cycle on the memory location.
+LLVM *may or may not* emit a bit-write instruction on targets that have them in
+the ISA, but this is not specified in any way.
+
+## Examples
+
+```rust
+use bitvec::prelude::*;
+use bitvec::ptr as bv_ptr;
+
+let mut data = 0u8;
+let ptr = BitPtr::<_, _, Lsb0>::from_mut(&mut data);
+unsafe { bv_ptr::write_volatile(ptr.add(2), true); }
+assert_eq!(data, 4);
+```
+
+[0]: https://docs.rs/voladdress/latest/voladdress
diff --git a/doc/serdes.md b/doc/serdes.md
new file mode 100644
index 0000000..0bdf017
--- /dev/null
+++ b/doc/serdes.md
@@ -0,0 +1,121 @@
+# Support for `serde`
+
+`bitvec` structures are able to de/serialize their contents using `serde`.
+Because `bitvec` is itself designed to be a transport buffer and have
+memory-efficient storage properties, the implementations are somewhat strange
+and not necessarily suitable for transport across heterogenous hosts.
+
+`bitvec` always serializes its underlying memory storage as a sequence of raw
+memory. It also includes the necessary metadata to prevent deserialization into
+an incorrect type.
+
+## Serialization
+
+All data types serialize through `BitSlice`. While in version 0, `BitArray` had
+its own serialization logic; this is no longer the case.
+
+`BitSlice` serializes the bit-width of `T::Mem` and the `any::type_name` of `O`.
+This may cause deserialization failures in the future if `any::type_name`
+changes its behavior, but as it is run while compiling `bitvec` itself, clients
+that rename `bitvec` when bringing it into their namespace should not be
+affected.
+
+Note that because `LocalBits` is a reëxport rather than a type in its own right,
+it always serializes as the real type to which it forwards. This prevents
+accidental mismatch when transporting between machines with different
+destinations for this alias.
+
+The next items serialized are the index of the starting bit within the starting
+element, and the total number of bits included in the bit-slice. After these,
+the data buffer is serialized directly.
+
+Each element in the data buffer is loaded, has any dead bits cleared to `0`, and
+is then serialized directly into the collector. In particular, no
+byte-reördering for transport is performed here, so integers wider than a byte
+must use a de/serialization format that handles this if, for example, byte-order
+endian transforms are required.
+
+## Deserialization
+
+Serde only permits no-copy slice deserialization on `&'a [u8]` slices, so
+`bitvec` in turn can only deserialize into `&'a BitSlice<u8, O>` bit-slices. It
+can deserialize into `BitArray`s of any type, relying on the serialization layer
+to reverse any byte-order transforms.
+
+`&BitSlice` will only deserialize if the transport format contains the bytes
+directly in it. If you do do not have an allocator, you should always transport
+`BitArray`. If you do have an allocator, and are serializing `BitBox` or
+`BitVec`, and expect to deserialize into a `BitArray`, then you will need to use
+`.force_align()` and ensure that you fully occupy the buffer being transported.
+
+`BitArray` will fail to deserialize if the data stream does not have a head-bit
+index of `0` and a length that exactly matches its own. This limitation is a
+consequence of the implementation, and likely will not be relaxed. `BitBox` and
+`BitVec`, however, are able to deserialize any bit-sequence without issue.
+
+## Warnings
+
+`usize` *does* de/serialize! However, because it does not have a fixed width,
+`bitvec` always serializes it as the local fixed-width equivalent, and places
+the word width into the serialization stream. This will prevent roundtripping a
+`BitArray<[usize; N]>` between hosts with different `usize` widths, even though
+the types in the source code line up.
+
+This behavior was not present in version 0, and users were able to write
+programs that incorrectly handled de/serialization when used on heterogenous
+systems.
+
+In addition, remember that `bitvec` serializes its data buffer *directly* as
+2’s-complement integers. You must ensure that your transport layer can handle
+them correctly. As an example, JSON is not required to transport 64-bit integers
+with perfect fidelity. `bitvec` has no way to detect inadequacy in the transport
+layer, and will not prevent you from using a serialization format that damages
+or alters the bit-stream you send through it.
+
+## Transport Format
+
+All `bitvec` data structures produce the same basic format: a structure (named
+`BitSeq` for `BitSlice`, `BitBox`, and `BitVec`, or `BitArr` for `BitArray`)
+with four fields:
+
+1. `order` is a string naming the `O: BitOrder` parameter. Because it uses
+ [`any::type_name`][0], its value cannot be *guaranteed* to be stable. You
+ cannot assume that transport buffers are compatible across versions of the
+ compiler used to create applications exchanging them.
+1. `head` is a `BitIdx` structure containing two fields:
+ 1. `width` is a single byte containing `8`, `16`, `32`, or `64`, describing
+ the bit-width of each element in the data buffer. `bitvec` structures will
+ refuse to deserialize if the serialized bit-width does not match their
+ `T::Mem` type.
+ 1. `index` is a single byte containing the head-bit that begins the live
+ `BitSlice` region. `BitArray` will refuse to deserialize if this is not
+ zero.
+1. `bits` is the number of live bits in the region, as a `u64`. `BitArray` fails
+ to deserialize if it does not match [`mem::bits_of::<A>()`][1].
+1. `data` is the actual data buffer containing the bits being transported. For
+ `BitSeq` serialization, it is a sequence; for `BitArr`, it is a tuple. This
+ may affect the transport representation, and so the two are not guaranteed to
+ be interchangeable over all transports.
+
+As known examples, JSON does not have a fixed-size array type, so the contents
+of all `bitvec` structures have identical rendering, while the [`bincode`] crate
+does distinguish between run-length-encoded slices and non-length-encoded
+arrays.
+
+## Implementation Details
+
+`bitvec` supports deserializing from both of Serde’s models for aggregate
+structures: dictionaries and sequences. It always serializes as a dictionary,
+but if your serialization layer does not want to include field names, you may
+emit only the values *in the temporal order that they are received* and `bitvec`
+will correctly deserialize from them.
+
+`BitSlice` (and `BitBox` and `BitVec`, which forward to it) transports its data
+buffer using Serde’s *sequence* model. `BitArray` uses Serde’s *tuple* model
+instead. These models might not be interchangeable in certain transport formats!
+You should always deserialize into the same container type that produced a
+serialized stream.
+
+[0]: core::any::type_name
+[1]: crate::mem::bits_of
+[`bincode`]: https://docs.rs/bincode/latest/bincode
diff --git a/doc/serdes/array.md b/doc/serdes/array.md
new file mode 100644
index 0000000..be4ecdd
--- /dev/null
+++ b/doc/serdes/array.md
@@ -0,0 +1,26 @@
+# Bit-Array De/Serialization
+
+The Serde model distinguishes between *sequences*, which have a dynamic length
+which must always be transported with the data, and *tuples*, which have a fixed
+length known at compile-time that does not require transport.
+
+Serde handles arrays using its tuple model, not its sequence model, which means
+that `BitArray` cannot use the `BitSlice` Serde implementations (which must use
+the sequence model in order to handle `&[u8]` and `Vec<T>` de/serialization).
+Instead, `BitArray` has a standalone implementation using the tuple model so
+that its wrapped array can be transported (nearly) as if it were unwrapped.
+
+For consistency, `BitArray` has the same wire format that `BitSlice` does; the
+only distinction is that the data buffer is a tuple rather than a sequence.
+
+Additionally, Serde’s support for old versions of Rust means that it only
+implements its traits on arrays `[T; 0 ..= 32]`. Since `bitvec` has a much
+higher MSRV that includes support for the const-generic `[T; N]` family, it
+reïmplements Serde’s behavior on a custom `Array<T, N>` type in order to ensure
+that all possible `BitArray` storage types are transportable. Note, however,
+that *because* each `[T; N]` combination is a new implementation, de/serializing
+`BitArray`s directly is a great way to pessimize codegen.
+
+While it would be nice if `rustc` or LLVM could collapse the implementations and
+restore `N` as a run-time argument rather than a compile-time constant, neither
+`bitvec` nor Serde attempt to promise this in any way. Use at your discretion.
diff --git a/doc/serdes/slice.md b/doc/serdes/slice.md
new file mode 100644
index 0000000..56776cc
--- /dev/null
+++ b/doc/serdes/slice.md
@@ -0,0 +1,14 @@
+# Bit-Slice De/Serialization
+
+Bit-slice references and containers serialize as sequences with additional
+metadata.
+
+Serde only provides a deserializer for `&[u8]`; wider integers and
+interior-mutability wrappers are not able to view a transport buffer without
+potentially modifying it, and the buffer is not modifiable while being used for
+deserialization. As such, only `&BitSlice<u8, O>` has a no-copy deserialization
+implementation.
+
+If you need other storage types, you will need to deserialize into a `BitBox` or
+`BitVec`. If you do not have an allocator, you must *serialize from* and
+deserialize into a `BitArray`.
diff --git a/doc/serdes/utils.md b/doc/serdes/utils.md
new file mode 100644
index 0000000..edf1851
--- /dev/null
+++ b/doc/serdes/utils.md
@@ -0,0 +1,26 @@
+# De/Serialization Assistants
+
+This module contains types and implementations that assist in the
+de/serialization of the crate’s primary data structures.
+
+## `BitIdx<R>`
+
+The `BitIdx` implementation serializes both the index value and also the
+bit-width of `T::Mem`, so that the deserializer can ensure that it only loads
+from a matching data buffer.
+
+## `Array<T, N>`
+
+Serde only provides implementations for `[T; 0 ..= 32]`, because it must support
+much older Rust versions (at time of writing, 1.15+) that do not have
+const-generics. As `bitvec` has an MSRV of 1.56; it *does* have const-generics.
+This type reïmplements Serde’s array behavior for all arrays, so that `bitvec`
+can transport any `BitArray` rather than only small bit-arrays.
+
+## `Domain<Const, T, O>`
+
+`BitSlice` serializes its data buffer by using `Domain` to produce a sequence of
+elements. While the length is always known, and is additionally carried in the
+crate metadata ahead of the data buffer, `Domain` uses Serde’s sequence model in
+order to allow the major implementations to use the provided slice or vector
+deserializers, rather than rebuilding even more logic from scratch.
diff --git a/doc/slice.md b/doc/slice.md
new file mode 100644
index 0000000..5a2f5b3
--- /dev/null
+++ b/doc/slice.md
@@ -0,0 +1,34 @@
+# Bit-Addressable Memory Regions
+
+This module defines the [`BitSlice`] region, which forms the primary export item
+of the crate. It is a region of memory that addresses each bit individually, and
+is analogous to the slice language item. See `BitSlice`’s documentation for
+information on its use.
+
+The other data structures `bitvec` offers are built atop `BitSlice`, and follow
+the development conventions outlined in this module. Because the API surface for
+`bitvec` data structures is so large, they are broken into a number of common
+submodules:
+
+- `slice` defines the `BitSlice` data structure, its inherent methods
+ that are original to `bitvec`, as well as some free functions.
+- `slice::api` defines ports of the `impl<T> [T]` inherent blocks from
+ `core::slice`.
+- `slice::iter` contains all the logic used to iterate across `BitSlices`,
+ including ports of `core::slice` iterators.
+- `slice::ops` contains implementations of `core::ops` traits that power
+ operator sigils.
+- `slice::traits` contains all the other trait implementations.
+- `slice::tests` contains unit tests for `BitSlice` inherent methods.
+
+Additionally, `slice` has a submodule unique to it: `specialization` contains
+override functions that provide faster behavior on known `BitOrder`
+implementations. Since the other data structures `Deref` to it, they do not need
+to implement bit-order specializations of their own.
+
+All ports of language or standard-library items have an `## Original` section in
+their documentation that links to the item they are porting, and possibly an
+`## API Differences` that explains why the `bitvec` item is not a drop-in
+replacement.
+
+[`BitSlice`]: self::BitSlice
diff --git a/doc/slice/BitSlice.md b/doc/slice/BitSlice.md
new file mode 100644
index 0000000..5b01b0a
--- /dev/null
+++ b/doc/slice/BitSlice.md
@@ -0,0 +1,371 @@
+# Bit-Addressable Memory
+
+A slice of individual bits, anywhere in memory.
+
+`BitSlice<T, O>` is an unsized region type; you interact with it through
+`&BitSlice<T, O>` and `&mut BitSlice<T, O>` references, which work exactly like
+all other Rust references. As with the standard slice’s relationship to arrays
+and vectors, this is `bitvec`’s primary working type, but you will probably
+hold it through one of the provided [`BitArray`], [`BitBox`], or [`BitVec`]
+containers.
+
+`BitSlice` is conceptually a `[bool]` slice, and provides a nearly complete
+mirror of `[bool]`’s API.
+
+Every bit-vector crate can give you an opaque type that hides shift/mask
+calculations from you. `BitSlice` does far more than this: it offers you the
+full Rust guarantees about reference behavior, including lifetime tracking,
+mutability and aliasing awareness, and explicit memory control, *as well as* the
+full set of tools and APIs available to the standard `[bool]` slice type.
+`BitSlice` can arbitrarily split and subslice, just like `[bool]`. You can write
+a linear consuming function and keep the patterns you already know.
+
+For example, to trim all the bits off either edge that match a condition, you
+could write
+
+```rust
+use bitvec::prelude::*;
+
+fn trim<T: BitStore, O: BitOrder>(
+ bits: &BitSlice<T, O>,
+ to_trim: bool,
+) -> &BitSlice<T, O> {
+ let stop = |b: bool| b != to_trim;
+ let front = bits.iter()
+ .by_vals()
+ .position(stop)
+ .unwrap_or(0);
+ let back = bits.iter()
+ .by_vals()
+ .rposition(stop)
+ .map_or(0, |p| p + 1);
+ &bits[front .. back]
+}
+# assert_eq!(trim(bits![0, 0, 1, 1, 0, 1, 0], false), bits![1, 1, 0, 1]);
+```
+
+to get behavior something like
+`trim(&BitSlice[0, 0, 1, 1, 0, 1, 0], false) == &BitSlice[1, 1, 0, 1]`.
+
+## Documentation
+
+All APIs that mirror something in the standard library will have an `Original`
+section linking to the corresponding item. All APIs that have a different
+signature or behavior than the original will have an `API Differences` section
+explaining what has changed, and how to adapt your existing code to the change.
+
+These sections look like this:
+
+## Original
+
+[`[bool]`](https://doc.rust-lang.org/stable/std/primitive.slice.html)
+
+## API Differences
+
+The slice type `[bool]` has no type parameters. `BitSlice<T, O>` has two: one
+for the integer type used as backing storage, and one for the order of bits
+within that integer type.
+
+`&BitSlice<T, O>` is capable of producing `&bool` references to read bits out
+of its memory, but is not capable of producing `&mut bool` references to write
+bits *into* its memory. Any `[bool]` API that would produce a `&mut bool` will
+instead produce a [`BitRef<Mut, T, O>`] proxy reference.
+
+## Behavior
+
+`BitSlice` is a wrapper over `[T]`. It describes a region of memory, and must be
+handled indirectly. This is most commonly done through the reference types
+`&BitSlice` and `&mut BitSlice`, which borrow memory owned by some other value
+in the program. These buffers can be directly owned by the sibling types
+[`BitBox`], which behaves like [`Box<[T]>`](alloc::boxed::Box), and [`BitVec`],
+which behaves like [`Vec<T>`]. It cannot be used as the type parameter to a
+pointer type such as `Box`, `Rc`, `Arc`, or any other indirection.
+
+The `BitSlice` region provides access to each individual bit in the region, as
+if each bit had a memory address that you could use to dereference it. It packs
+each logical bit into exactly one bit of storage memory, just like
+[`std::bitset`] and [`std::vector<bool>`] in C++.
+
+## Type Parameters
+
+`BitSlice` has two type parameters which propagate through nearly every public
+API in the crate. These are very important to its operation, and your choice
+of type arguments informs nearly every part of this library’s behavior.
+
+### `T: BitStore`
+
+[`BitStore`] is the simpler of the two parameters. It refers to the integer type
+used to hold bits. It must be one of the Rust unsigned integer fundamentals:
+`u8`, `u16`, `u32`, `usize`, and on 64-bit systems only, `u64`. In addition, it
+can also be an alias-safe wrapper over them (see the [`access`] module) in
+order to permit bit-slices to share underlying memory without interfering with
+each other.
+
+`BitSlice` references can only be constructed over the integers, not over their
+aliasing wrappers. `BitSlice` will only use aliasing types in its `T` slots when
+you invoke APIs that produce them, such as [`.split_at_mut()`].
+
+The default type argument is `usize`.
+
+The argument you choose is used as the basis of a `[T]` slice, over which the
+`BitSlice` view is produced. `BitSlice<T, _>` is subject to all of the rules
+about alignment that `[T]` is. If you are working with in-memory representation
+formats, chances are that you already have a `T` type with which you’ve been
+working, and should use it here.
+
+If you are only using this crate to discard the seven wasted bits per `bool`
+in a collection of `bool`s, and are not too concerned about the in-memory
+representation, then you should use the default type argument of `usize`. This
+is because most processors work best when moving an entire `usize` between
+memory and the processor itself, and using a smaller type may cause it to slow
+down. Additionally, processor instructions are typically optimized for the whole
+register, and the processor might need to do additional clearing work for
+narrower types.
+
+### `O: BitOrder`
+
+[`BitOrder`] is the more complex parameter. It has a default argument which,
+like `usize`, is a good baseline choice when you do not explicitly need to
+control the representation of bits in memory.
+
+This parameter determines how `bitvec` indexes the bits within a single `T`
+memory element. Computers all agree that in a slice of `T` elements, the element
+with the lower index has a lower memory address than the element with the higher
+index. But the individual bits within an element do not have addresses, and so
+there is no uniform standard of which bit is the zeroth, which is the first,
+which is the penultimate, and which is the last.
+
+To make matters even more confusing, there are two predominant ideas of
+in-element ordering that often *correlate* with the in-element *byte* ordering
+of integer types, but are in fact wholly unrelated! `bitvec` provides these two
+main orderings as types for you, and if you need a different one, it also
+provides the tools you need to write your own.
+
+#### Least Significant Bit Comes First
+
+This ordering, named the [`Lsb0`] type, indexes bits within an element by
+placing the `0` index at the least significant bit (numeric value `1`) and the
+final index at the most significant bit (numeric value [`T::MIN`][minval] for
+signed integers on most machines).
+
+For example, this is the ordering used by most C compilers to lay out bit-field
+struct members on little-endian **byte**-ordered machines.
+
+#### Most Significant Bit Comes First
+
+This ordering, named the [`Msb0`] type, indexes bits within an element by
+placing the `0` index at the most significant bit (numeric value
+[`T::MIN`][minval] for most signed integers) and the final index at the least
+significant bit (numeric value `1`).
+
+For example, this is the ordering used by the [TCP wire format][tcp], and by
+most C compilers to lay out bit-field struct members on big-endian
+**byte**-ordered machines.
+
+#### Default Ordering
+
+The default ordering is [`Lsb0`], as it typically produces shorter object code
+than [`Msb0`] does. If you are implementing a collection, then `Lsb0` will
+likely give you better performance; if you are implementing a buffer protocol,
+then your choice of ordering is dictated by the protocol definition.
+
+## Safety
+
+`BitSlice` is designed to never introduce new memory unsafety that you did not
+provide yourself, either before or during the use of this crate. However, safety
+bugs have been identified before, and you are welcome to submit any discovered
+flaws as a defect report.
+
+The `&BitSlice` reference type uses a private encoding scheme to hold all of the
+information needed in its stack value. This encoding is **not** part of the
+public API of the library, and is not binary-compatible with `&[T]`.
+Furthermore, in order to satisfy Rust’s requirements about alias conditions,
+`BitSlice` performs type transformations on the `T` parameter to ensure that it
+never creates the potential for undefined behavior or data races.
+
+You must never attempt to type-cast a reference to `BitSlice` in any way. You
+must not use [`mem::transmute`] with `BitSlice` anywhere in its type arguments.
+You must not use `as`-casting to convert between `*BitSlice` and any other type.
+You must not attempt to modify the binary representation of a `&BitSlice`
+reference value. These actions will all lead to runtime memory unsafety, are
+(hopefully) likely to induce a program crash, and may possibly cause undefined
+behavior at compile-time.
+
+Everything in the `BitSlice` public API, even the `unsafe` parts, are guaranteed
+to have no more unsafety than their equivalent items in the standard library.
+All `unsafe` APIs will have documentation explicitly detailing what the API
+requires you to uphold in order for it to function safely and correctly. All
+safe APIs will do so themselves.
+
+## Performance
+
+Like the standard library’s `[T]` slice, `BitSlice` is designed to be very easy
+to use safely, while supporting `unsafe` usage when necessary. Rust has a
+powerful optimizing engine, and `BitSlice` will frequently be compiled to have
+zero runtime cost. Where it is slower, it will not be significantly slower than
+a manual replacement.
+
+As the machine instructions operate on registers rather than bits, your choice
+of [`T: BitStore`] type parameter can influence your bits-slice’s performance.
+Using larger register types means that bit-slices can gallop over
+completely-used interior elements faster, while narrower register types permit
+more graceful handling of subslicing and aliased splits.
+
+## Construction
+
+`BitSlice` views of memory can be constructed over borrowed data in a number of
+ways. As this is a reference-only type, it can only ever be built by borrowing
+an existing memory buffer and taking temporary control of your program’s view of
+the region.
+
+### Macro Constructor
+
+`BitSlice` buffers can be constructed at compile-time through the [`bits!`]
+macro. This macro accepts a superset of the [`vec!`] arguments, and creates an
+appropriate buffer in the local scope. The macro expands to a borrowed
+[`BitArray`] temporary, which will live for the duration of the bound name.
+
+```rust
+use bitvec::prelude::*;
+
+let immut = bits![u8, Lsb0; 0, 1, 0, 0, 1, 0, 0, 1];
+let mutable: &mut BitSlice<_, _> = bits![mut u8, Msb0; 0; 8];
+
+assert_ne!(immut, mutable);
+mutable.clone_from_bitslice(immut);
+assert_eq!(immut, mutable);
+```
+
+### Borrowing Constructors
+
+You may borrow existing elements or slices with the following functions:
+
+- [`from_element`] and [`from_element_mut`],
+- [`from_slice`] and [`from_slice_mut`],
+- [`try_from_slice`] and [`try_from_slice_mut`]
+
+These take references to existing memory and construct `BitSlice` references
+from them. These are the most basic ways to borrow memory and view it as bits;
+however, you should prefer the [`BitView`] trait methods instead.
+
+```rust
+use bitvec::prelude::*;
+
+let data = [0u16; 3];
+let local_borrow = BitSlice::<_, Lsb0>::from_slice(&data);
+
+let mut data = [0u8; 5];
+let local_mut = BitSlice::<_, Lsb0>::from_slice_mut(&mut data);
+```
+
+### Trait Method Constructors
+
+The [`BitView`] trait implements [`.view_bits::<O>()`] and
+[`.view_bits_mut::<O>()`] methods on elements, arrays, and slices. This trait,
+imported in the crate prelude, is *probably* the easiest way for you to borrow
+memory as bits.
+
+```rust
+use bitvec::prelude::*;
+
+let data = [0u32; 5];
+let trait_view = data.view_bits::<Lsb0>();
+
+let mut data = 0usize;
+let trait_mut = data.view_bits_mut::<Msb0>();
+```
+
+### Owned Bit Slices
+
+If you wish to take ownership of a memory region and enforce that it is always
+viewed as a `BitSlice` by default, you can use one of the [`BitArray`],
+[`BitBox`], or [`BitVec`] types, rather than pairing ordinary buffer types with
+the borrowing constructors.
+
+```rust
+use bitvec::prelude::*;
+
+let slice = bits![0; 27];
+let array = bitarr![u8, LocalBits; 0; 10];
+# #[cfg(feature = "alloc")] fn allocates() {
+let boxed = bitbox![0; 10];
+let vec = bitvec![0; 20];
+# } #[cfg(feature = "alloc")] allocates();
+
+// arrays always round up
+assert_eq!(array.as_bitslice(), slice[.. 16]);
+# #[cfg(feature = "alloc")] fn allocates2() {
+# let slice = bits![0; 27];
+# let boxed = bitbox![0; 10];
+# let vec = bitvec![0; 20];
+assert_eq!(boxed.as_bitslice(), slice[.. 10]);
+assert_eq!(vec.as_bitslice(), slice[.. 20]);
+# } #[cfg(feature = "alloc")] allocates2();
+```
+
+## Usage
+
+`BitSlice` implements the full standard-library `[bool]` API. The documentation
+for these API surfaces is intentionally sparse, and forwards to the standard
+library rather than try to replicate it.
+
+`BitSlice` also has a great deal of novel API surfaces. These are broken into
+separate `impl` blocks below. A short summary:
+
+- Since there is no `BitSlice` literal, the constructor functions `::empty()`,
+ `::from_element()`, `::from_slice()`, and `::try_from_slice()`, and their
+ `_mut` counterparts, create bit-slices as needed.
+- Since `bits[idx] = value` does not exist, you can use `.set()` or `.replace()`
+ (as well as their `_unchecked` and `_aliased` counterparts) to write into a
+ bit-slice.
+- Raw memory can be inspected with `.domain()` and `.domain_mut()`, and a
+ bit-slice can be split on aliasing lines with `.bit_domain()` and
+ `.bit_domain_mut()`.
+- The population can be queried for which indices have `0` or `1` bits by
+ iterating across all such indices, counting them, or counting leading or
+ trailing blocks. Additionally, `.any()`, `.all()`, `.not_any()`, `.not_all()`,
+ and `.some()` test whether bit-slices satisfy aggregate Boolean qualities.
+- Buffer contents can be relocated internally by shifting or rotating to the
+ left or right.
+
+## Trait Implementations
+
+`BitSlice` adds trait implementations that `[bool]` and `[T]` do not necessarily
+have, including numeric formatting and Boolean arithmetic operators.
+Additionally, the [`BitField`] trait allows bit-slices to act as a buffer for
+wide-value storage.
+
+[minval]: https://doc.rust-lang.org/stable/std/primitive.usize.html#associatedconstant.MIN
+[tcp]: https://en.wikipedia.org/wiki/Transmission_Control_Protocol#TCP_segment_structure
+
+[`BitArray`]: crate::array::BitArray
+[`BitBox`]: crate::boxed::BitBox
+[`BitField`]: crate::field::BitField
+[`BitRef<Mut, T, O>`]: crate::ptr::BitRef
+[`BitOrder`]: crate::order::BitOrder
+[`BitStore`]: crate::store::BitStore
+[`BitVec`]: crate::vec::BitVec
+[`BitView`]: crate::view::BitView
+[`Cell<T>`]: core::cell::Cell
+[`Lsb0`]: crate::order::Lsb0
+[`Msb0`]: crate::order::Msb0
+[`T: BitStore`]: crate::store::BitStore
+[`Vec<T>`]: alloc::vec::Vec
+
+[`access`]: crate::access
+[`bits!`]: macro@crate::bits
+[`bitvec::prelude::LocalBits`]: crate::order::LocalBits
+[`from_element`]: Self::from_element
+[`from_element_mut`]: Self::from_element_mut
+[`from_slice`]: Self::from_slice
+[`from_slice_mut`]: Self::from_slice_mut
+[`mem::transmute`]: core::mem::transmute
+[`std::bitset`]: https://en.cppreference.com/w/cpp/utility/bitset
+[`std::vector<bool>`]: https://en.cppreference.com/w/cpp/container/vector_bool
+[`try_from_slice`]: Self::try_from_slice
+[`try_from_slice_mut`]: Self::try_from_slice_mut
+[`vec!`]: macro@alloc::vec
+
+[`.split_at_mut()`]: Self::split_at_mut
+[`.view_bits::<O>()`]: crate::view::BitView::view_bits
+[`.view_bits_mut::<O>()`]: crate::view::BitView::view_bits_mut
diff --git a/doc/slice/BitSliceIndex.md b/doc/slice/BitSliceIndex.md
new file mode 100644
index 0000000..7512fec
--- /dev/null
+++ b/doc/slice/BitSliceIndex.md
@@ -0,0 +1,31 @@
+# Bit-Slice Indexing
+
+This trait, like its mirror in `core`, unifies various types that can be used to
+index within a bit-slice. Individual `usize` indices can refer to exactly one
+bit within a bit-slice, and `R: RangeBounds<usize>` ranges can refer to
+subslices of any length within a bit-slice.
+
+The three operations (get, get unchecked, and index) reflect the three theories
+of lookup within a collection: fallible, pre-checked, and crashing on failure.
+
+You will likely not use this trait directly; its methods all have corresponding
+methods on [`BitSlice`] that delegate to particular implementations of it.
+
+## Original
+
+[`slice::SliceIndex`](core::slice::SliceIndex)
+
+## API Differences
+
+The [`SliceIndex::Output`] type is not usable here, because `bitvec` cannot
+manifest a `&mut bool` reference. Work to unify referential values in the trait
+system is ongoing, and in the future this functionality *may* be approximated.
+
+Instead, this uses two output types, [`Immut`] and [`Mut`], that are the
+referential structures produced by indexing immutably or mutably, respectively.
+This allows the range implementations to produce `&/mut BitSlice` as expected,
+while `usize` produces the proxy structure.
+
+[`Immut`]: Self::Immut
+[`Mut`]: Self::Mut
+[`SliceIndex::Output`]: core::slice::SliceIndex::Output
diff --git a/doc/slice/api.md b/doc/slice/api.md
new file mode 100644
index 0000000..d2b3967
--- /dev/null
+++ b/doc/slice/api.md
@@ -0,0 +1,14 @@
+# Port of the `[bool]` Inherent API
+
+This module provides a port of the standard-library’s slice primitive, and
+associated special-purpose items.
+
+It is intended to contain the contents of every `impl<T> [T]` block in the
+standard library (with a few exceptions due to impossibility or uselessness).
+The sibling modules `iter`, `ops`, and `traits` contain slice APIs that relate
+specifically to iteration, the sigil operators, or general-purpose traits.
+
+Documentation for each ported API strives to be *inspired by*, but not a
+transliteration of, the documentation in the standard library. `bitvec`
+generally assumes that you are already familiar with the standard library, and
+links each ported item to the original in the event that you are not.
diff --git a/doc/slice/bitop_assign.md b/doc/slice/bitop_assign.md
new file mode 100644
index 0000000..1842fad
--- /dev/null
+++ b/doc/slice/bitop_assign.md
@@ -0,0 +1,56 @@
+# Boolean Arithmetic
+
+This merges another bit-slice into `self` with a Boolean arithmetic operation.
+If the other bit-slice is shorter than `self`, it is zero-extended. For `BitAnd`,
+this clears all excess bits of `self` to `0`; for `BitOr` and `BitXor`, it
+leaves them untouched
+
+## Behavior
+
+The Boolean operation proceeds across each bit-slice in iteration order. This is
+`3O(n)` in the length of the shorter of `self` and `rhs`. However, it can be
+accelerated if `rhs` has the same type parameters as `self`, and both are using
+one of the orderings provided by `bitvec`. In this case, the implementation
+specializes to use `BitField` batch operations to operate on the slices one word
+at a time, rather than one bit.
+
+Acceleration is not currently provided for custom bit-orderings that use the
+same storage type.
+
+## Pre-`1.0` Behavior
+
+In the `0.` development series, Boolean arithmetic was implemented against all
+`I: Iterator<Item = bool>`. This allowed code such as `bits |= [false, true];`,
+but forbad acceleration in the most common use case (combining two bit-slices)
+because `BitSlice` is not such an iterator.
+
+Usage surveys indicate that it is better for the arithmetic operators to operate
+on bit-slices, and to allow the possibility of specialized acceleration, rather
+than to allow folding against any iterator of `bool`s.
+
+If pre-`1.0` code relies on this behavior specifically, and has non-`BitSlice`
+arguments to the Boolean sigils, then they will need to be replaced with the
+equivalent loop.
+
+## Examples
+
+```rust
+use bitvec::prelude::*;
+
+let a = bits![mut 0, 0, 1, 1];
+let b = bits![ 0, 1, 0, 1];
+
+*a ^= b;
+assert_eq!(a, bits![0, 1, 1, 0]);
+
+let c = bits![mut 0, 0, 1, 1];
+let d = [false, true, false, true];
+
+// no longer allowed
+// c &= d.into_iter().by_vals();
+for (mut c, d) in c.iter_mut().zip(d.into_iter())
+{
+ *c ^= d;
+}
+assert_eq!(c, bits![0, 1, 1, 0]);
+```
diff --git a/doc/slice/format.md b/doc/slice/format.md
new file mode 100644
index 0000000..bbc3b20
--- /dev/null
+++ b/doc/slice/format.md
@@ -0,0 +1,46 @@
+# Bit-Slice Rendering
+
+This implementation prints the contents of a `&BitSlice` in one of binary,
+octal, or hexadecimal. It is important to note that this does *not* render the
+raw underlying memory! They render the semantically-ordered contents of the
+bit-slice as numerals. This distinction matters if you use type parameters that
+differ from those presumed by your debugger (which is usually `<u8, Msb0>`).
+
+The output separates the `T` elements as individual list items, and renders each
+element as a base- 2, 8, or 16 numeric string. When walking an element, the bits
+traversed by the bit-slice are considered to be stored in
+most-significant-bit-first ordering. This means that index `[0]` is the high bit
+of the left-most digit, and index `[n]` is the low bit of the right-most digit,
+in a given printed word.
+
+In order to render according to expectations of the Arabic numeral system, an
+element being transcribed is chunked into digits from the least-significant end
+of its rendered form. This is most noticeable in octal, which will always have a
+smaller ceiling on the left-most digit in a printed word, while the right-most
+digit in that word is able to use the full `0 ..= 7` numeral range.
+
+## Examples
+
+```rust
+# #[cfg(feature = "std")] {
+use bitvec::prelude::*;
+
+let data = [
+ 0b000000_10u8,
+// digits print LTR
+ 0b10_001_101,
+// significance is computed RTL
+ 0b01_000000,
+];
+let bits = &data.view_bits::<Msb0>()[6 .. 18];
+
+assert_eq!(format!("{:b}", bits), "[10, 10001101, 01]");
+assert_eq!(format!("{:o}", bits), "[2, 215, 1]");
+assert_eq!(format!("{:X}", bits), "[2, 8D, 1]");
+# }
+```
+
+The `{:#}` format modifier causes the standard `0b`, `0o`, or `0x` prefix to be
+applied to each printed word. The other format specifiers are not interpreted by
+this implementation, and apply to the entire rendered text, not to individual
+words.
diff --git a/doc/slice/from_raw_parts.md b/doc/slice/from_raw_parts.md
new file mode 100644
index 0000000..6e394b5
--- /dev/null
+++ b/doc/slice/from_raw_parts.md
@@ -0,0 +1,68 @@
+# Raw Bit-Slice Construction
+
+This produces an `&BitSlice<T, O>` reference handle from a `BitPtr<Const, T, O>`
+bit-pointer and a length.
+
+## Parameters
+
+1. `data`: a bit-pointer to the starting bit of the produced bit-slice. This
+ should generally have been produced by `BitSlice::as_ptr`, but you are able
+ to construct these pointers directly if you wish.
+1. `len`: the number of bits, beginning at `data`, that the produced bit-slice
+ includes. This value cannot depart an allocation area, or exceed `BitSlice`’s
+ encoding limitations.
+
+## Returns
+
+This returns a `Result`, because it can detect and gracefully fail if `len`
+is too large, or if `data` is ill-formed. This fails if it has an error while
+encoding the `&BitSlice`, and succeeds if it is able to produce a correctly
+encoded value.
+
+Note that this is not able to detect semantic violations of the memory model.
+You are responsible for upholding memory safety.
+
+## Original
+
+[`slice::from_raw_parts`](core::slice::from_raw_parts)
+
+## API Differences
+
+This takes a [`BitPtr<Const, T, O>`] instead of a hypothetical `*const Bit`,
+because `bitvec` is not able to express raw Rust pointers to individual bits.
+
+Additionally, it returns a `Result` rather than a direct bit-slice, because the
+given `len` argument may be invalid to encode into a `&BitSlice` reference.
+
+## Safety
+
+This has the same memory safety requirements as the standard-library function:
+
+- `data` must be valid for reads and writes of at least `len` bits,
+- The bits that the produced bit-slice refers to must be wholly unreachable by
+ any other part of the program for the duration of the lifetime `'a`,
+
+and additionally imposes some of its own:
+
+- `len` cannot exceed [`BitSlice::MAX_BITS`].
+
+## Examples
+
+```rust
+use bitvec::{
+ prelude::*,
+ index::BitIdx,
+ ptr::Const,
+ slice as bv_slice,
+};
+
+let elem = 6u16;
+let addr = (&elem).into();
+let head = BitIdx::new(1).unwrap();
+
+let data: BitPtr<Const, u16> = BitPtr::new(addr, head).unwrap();
+let bits = unsafe { bv_slice::from_raw_parts(data, 3) };
+assert_eq!(bits.unwrap(), bits![1, 1, 0]);
+```
+
+[`BitSlice::MAX_BITS`]: crate::slice::BitSlice::MAX_BITS
diff --git a/doc/slice/from_raw_parts_mut.md b/doc/slice/from_raw_parts_mut.md
new file mode 100644
index 0000000..1651c87
--- /dev/null
+++ b/doc/slice/from_raw_parts_mut.md
@@ -0,0 +1,70 @@
+# Raw Bit-Slice Construction
+
+This produces an `&mut BitSlice<T, O>` reference handle from a
+`BitPtr<Mut, T, O>` bit-pointer and a length.
+
+## Parameters
+
+1. `data`: a bit-pointer to the starting bit of the produced bit-slice. This
+ should generally have been produced by `BitSlice::as_mut_ptr`, but you are
+ able to construct these pointers directly if you wish.
+1. `len`: the number of bits, beginning at `data`, that the produced bit-slice
+ includes. This value cannot depart an allocation area, or exceed `BitSlice`’s
+ encoding limitations.
+
+## Returns
+
+This returns a `Result`, because it can detect and gracefully fail if `len`
+is too large, or if `data` is ill-formed. This fails if it has an error while
+encoding the `&mut BitSlice`, and succeeds if it is able to produce a correctly
+encoded value.
+
+Note that this is not able to detect semantic violations of the memory model.
+You are responsible for upholding memory safety.
+
+## Original
+
+[`slice::from_raw_parts_mut`](core::slice::from_raw_parts_mut)
+
+## API Differences
+
+This takes a [`BitPtr<Mut, T, O>`] instead of a hypothetical `*mut Bit`, because
+`bitvec` is not able to express raw Rust pointers to individual bits.
+
+Additionally, it returns a `Result` rather than a direct bit-slice, because the
+given `len` argument may be invalid to encode into a `&mut BitSlice` reference.
+
+## Safety
+
+This has the same memory safety requirements as the standard-library function:
+
+- `data` must be valid for reads and writes of at least `len` bits,
+- The bits that the produced bit-slice refers to must be wholly unreachable by
+ any other part of the program for the duration of the lifetime `'a`,
+
+and additionally imposes some of its own:
+
+- `len` cannot exceed [`BitSlice::MAX_BITS`].
+
+## Examples
+
+```rust
+use bitvec::{
+ prelude::*,
+ index::BitIdx,
+ ptr::Mut,
+ slice as bv_slice,
+};
+
+let mut elem = 0u16;
+let addr = (&mut elem).into();
+let head = BitIdx::new(1).unwrap();
+
+let data: BitPtr<Mut, u16> = BitPtr::new(addr, head).unwrap();
+let bits = unsafe { bv_slice::from_raw_parts_mut(data, 3) };
+bits.unwrap()[1 ..].fill(true);
+
+assert_eq!(elem, 12);
+```
+
+[`BitSlice::MAX_BITS`]: crate::slice::BitSlice::MAX_BITS
diff --git a/doc/slice/from_raw_parts_unchecked.md b/doc/slice/from_raw_parts_unchecked.md
new file mode 100644
index 0000000..bf7694b
--- /dev/null
+++ b/doc/slice/from_raw_parts_unchecked.md
@@ -0,0 +1,29 @@
+# Raw Bit-Slice Construction
+
+This is equivalent to [`slice::from_raw_parts()`], except that it does not check
+any of the encoding requirements.
+
+## Safety
+
+Callers must both uphold the safety requirements of that function, as well as
+ensure that the arguments would not cause it to fail gracefully.
+
+Arguments that would cause `from_raw_parts` to return `Err` instead produce a
+bit-slice handle whose value is undefined.
+
+## Parameters
+
+1. `ptr`: A bit-pointer to a `T` memory element. The pointer’s data address must
+ be well-aligned, the bit-index must be valid for `T`, the target region
+ must be initialized for `len` bits.
+1. `len`: A count of live bits beginning at `ptr`. It must not exceed
+ [`MAX_BITS`].
+
+## Returns
+
+An exclusive `BitSlice` reference over the described region. If either of the
+parameters are invalid, then the value of the reference is library-level
+undefined.
+
+[`MAX_BITS`]: crate::slice::BitSlice::MAX_BITS
+[`slice::from_raw_parts()`]: crate::slice::from_raw_parts
diff --git a/doc/slice/from_raw_parts_unchecked_mut.md b/doc/slice/from_raw_parts_unchecked_mut.md
new file mode 100644
index 0000000..f1af9ac
--- /dev/null
+++ b/doc/slice/from_raw_parts_unchecked_mut.md
@@ -0,0 +1,31 @@
+# Raw Bit-Slice Construction
+
+This is equivalent to [`slice::from_raw_parts_mut()`], except that it does not
+check any of the encoding requirements.
+
+## Safety
+
+Callers must both uphold the safety requirements of that function, as well as
+ensure that the arguments would not cause it to fail gracefully.
+
+Arguments that would cause `from_raw_parts_mut` to return `Err` instead produce
+a bit-slice handle whose value is undefined.
+
+## Parameters
+
+1. `ptr`: A bit-pointer to a `T` memory element. The pointer’s data address must
+ be well-aligned, the bit-index must be valid for `T`, the target region
+ must be initialized for `len` bits.
+1. `len`: A count of live bits beginning at `ptr`. It must not exceed
+ [`MAX_BITS`].
+
+## Returns
+
+An exclusive `BitSlice` reference over the described region. If either of the
+parameters are invalid, then the value of the reference is library-level
+undefined. If any other reference, `BitSlice` or not, targets any of the bits
+that this reference governs while it is alive, then behavior is language-level
+undefined.
+
+[`MAX_BITS`]: crate::slice::BitSlice::MAX_BITS
+[`slice::from_raw_parts_mut()`]: crate::slice::from_raw_parts_mut
diff --git a/doc/slice/iter.md b/doc/slice/iter.md
new file mode 100644
index 0000000..e386b37
--- /dev/null
+++ b/doc/slice/iter.md
@@ -0,0 +1,13 @@
+# Bit-Slice Iteration
+
+Like the standard-library slice, this module contains a great deal of
+specialized iterators. In addition to the ports of the iterators in
+[`core::slice`], this also defines iterators that seek out indices of set or
+cleared bits in sparse collections.
+
+Each iterator here is documented most extensively on the [`BitSlice`] method
+that produces it, and has only light documentation on its own type or inherent
+methods.
+
+[`BitSlice`]: super::BitSlice
+[`core::slice`]: core::slice
diff --git a/doc/slice/iter/Chunks.md b/doc/slice/iter/Chunks.md
new file mode 100644
index 0000000..45ec600
--- /dev/null
+++ b/doc/slice/iter/Chunks.md
@@ -0,0 +1,29 @@
+# Shared Bit-Slice Chunking
+
+This iterator yields successive non-overlapping chunks of a bit-slice. Chunking
+advances one subslice at a time, starting at the beginning of the bit-slice.
+
+If the original bit-slice’s length is not evenly divided by the chunk width,
+then the final chunk will be the remainder, and will be shorter than requested.
+
+It is created by the [`BitSlice::chunks`] method.
+
+## Original
+
+[`slice::Chunks`](core::slice::Chunks)
+
+## Examples
+
+```rust
+use bitvec::prelude::*;
+
+let bits = bits![0, 0, 0, 1, 1, 1, 0, 1];
+let mut chunks = bits.chunks(3);
+
+assert_eq!(chunks.next().unwrap(), bits![0; 3]);
+assert_eq!(chunks.next().unwrap(), bits![1; 3]);
+assert_eq!(chunks.next().unwrap(), bits![0, 1]);
+assert!(chunks.next().is_none());
+```
+
+[`BitSlice::chunks`]: crate::slice::BitSlice::chunks
diff --git a/doc/slice/iter/ChunksExact.md b/doc/slice/iter/ChunksExact.md
new file mode 100644
index 0000000..09c06bc
--- /dev/null
+++ b/doc/slice/iter/ChunksExact.md
@@ -0,0 +1,31 @@
+# Shared Bit-Slice Exact Chunking
+
+This iterator yields successive non-overlapping chunks of a bit-slice. Chunking
+advances one sub-slice at a time, starting at the beginning of the bit-slice.
+
+If the original bit-slice’s length is not evenly divided by the chunk width,
+then the leftover segment at the back is not iterated, but can be accessed with
+the [`.remainder()`] method.
+
+It is created by the [`BitSlice::chunks_exact`] method.
+
+## Original
+
+[`slice::ChunksExact`](core::slice::ChunksExact)
+
+## Examples
+
+```rust
+use bitvec::prelude::*;
+
+let bits = bits![0, 0, 0, 1, 1, 1, 0, 1];
+let mut chunks = bits.chunks_exact(3);
+
+assert_eq!(chunks.next().unwrap(), bits![0; 3]);
+assert_eq!(chunks.next().unwrap(), bits![1; 3]);
+assert!(chunks.next().is_none());
+assert_eq!(chunks.remainder(), bits![0, 1]);
+```
+
+[`BitSlice::chunks_exact`]: crate::slice::BitSlice::chunks_exact
+[`.remainder()`]: Self::remainder
diff --git a/doc/slice/iter/ChunksExactMut.md b/doc/slice/iter/ChunksExactMut.md
new file mode 100644
index 0000000..fd9add9
--- /dev/null
+++ b/doc/slice/iter/ChunksExactMut.md
@@ -0,0 +1,42 @@
+# Exclusive Bit-Slice Exact Chunking
+
+This iterator yields successive non-overlapping mutable chunks of a bit-slice.
+Chunking advances one sub-slice at a time, starting at the beginning of the
+bit-slice.
+
+If the original bit-slice’s length is not evenly divided by the chunk width,
+then the leftover segment at the back is not iterated, but can be accessed with
+the [`.into_remainder()`] or [`.take_remainder()`] methods.
+
+It is created by the [`BitSlice::chunks_exact_mut`] method.
+
+## Original
+
+[`slice::ChunksExactMut`](core::slice::ChunksExactMut)
+
+## API Differences
+
+This iterator marks all yielded bit-slices as `::Alias`ed.
+
+## Examples
+
+```rust
+use bitvec::prelude::*;
+
+let bits = bits![mut 0, 0, 0, 1, 1, 1, 0, 1];
+let mut chunks = unsafe {
+ bits.chunks_exact_mut(3).remove_alias()
+};
+
+chunks.next().unwrap().fill(true);
+chunks.next().unwrap().fill(false);
+assert!(chunks.next().is_none());
+chunks.take_remainder().copy_from_bitslice(bits![1, 0]);
+assert!(chunks.take_remainder().is_empty());
+
+assert_eq!(bits, bits![1, 1, 1, 0, 0, 0, 1, 0]);
+```
+
+[`BitSlice::chunks_exact_mut`]: crate::slice::BitSlice::chunks_exact_mut
+[`.into_remainder()`]: Self::into_remainder
+[`.take_remainder()`]: Self::take_remainder
diff --git a/doc/slice/iter/ChunksMut.md b/doc/slice/iter/ChunksMut.md
new file mode 100644
index 0000000..bfab4e3
--- /dev/null
+++ b/doc/slice/iter/ChunksMut.md
@@ -0,0 +1,38 @@
+# Exclusive Bit-Slice Chunking
+
+This iterator yields successive non-overlapping mutable chunks of a bit-slice.
+Chunking advances one subslice at a time, starting at the beginning of the
+bit-slice.
+
+If the original bit-slice’s length is not evenly divided by the chunk width,
+then the final chunk will be the remainder, and will be shorter than requested.
+
+It is created by the [`BitSlice::chunks_mut`] method.
+
+## Original
+
+[`slice::ChunksMut`](core::slice::ChunksMut)
+
+## API Differences
+
+This iterator marks all yielded bit-slices as `::Alias`ed.
+
+## Examples
+
+```rust
+use bitvec::prelude::*;
+
+let bits = bits![mut 0, 0, 0, 1, 1, 1, 0, 1];
+let mut chunks = unsafe {
+ bits.chunks_mut(3).remove_alias()
+};
+
+chunks.next().unwrap().fill(true);
+chunks.next().unwrap().fill(false);
+chunks.next().unwrap().copy_from_bitslice(bits![1, 0]);
+assert!(chunks.next().is_none());
+
+assert_eq!(bits, bits![1, 1, 1, 0, 0, 0, 1, 0]);
+```
+
+[`BitSlice::chunks_mut`]: crate::slice::BitSlice::chunks_mut
diff --git a/doc/slice/iter/Iter.md b/doc/slice/iter/Iter.md
new file mode 100644
index 0000000..4394406
--- /dev/null
+++ b/doc/slice/iter/Iter.md
@@ -0,0 +1,36 @@
+# Shared Bit-Slice Iteration
+
+This view iterates each bit in the bit-slice by [proxy reference][0]. It is
+created by the [`BitSlice::iter`] method.
+
+## Original
+
+[`slice::Iter`](core::slice::Iter)
+
+## API Differences
+
+While this iterator can manifest `&bool` references, it instead yields the
+`bitvec` [proxy reference][0] for consistency with the [`IterMut`] type. It can
+be converted to yield true references with [`.by_refs()`]. Additionally, because
+it does not yield `&bool`, the [`Iterator::copied`] method does not apply. It
+can be converted to an iterator of `bool` values with [`.by_vals()`].
+
+## Examples
+
+```rust
+use bitvec::prelude::*;
+
+let bits = bits![0, 1];
+for bit in bits.iter() {
+ # #[cfg(feature = "std")] {
+ println!("{}", bit);
+ # }
+}
+```
+
+[`BitSlice::iter`]: crate::slice::BitSlice::iter
+[`IterMut`]: crate::slice::IterMut
+[`Iterator::copied`]: core::iter::Iterator::copied
+[`.by_refs()`]: Self::by_refs
+[`.by_vals()`]: Self::by_vals
+[0]: crate::ptr::BitRef
diff --git a/doc/slice/iter/IterMut.md b/doc/slice/iter/IterMut.md
new file mode 100644
index 0000000..9e2736e
--- /dev/null
+++ b/doc/slice/iter/IterMut.md
@@ -0,0 +1,30 @@
+# Exclusive Bit-Slice Iteration
+
+This view iterates each bit in the bit-slice by exclusive proxy reference. It is
+created by the [`BitSlice::iter_mut`] method.
+
+## Original
+
+[`slice::IterMut`](core::slice::IterMut)
+
+## API Differences
+
+Because `bitvec` cannot manifest `&mut bool` references, this instead yields the
+crate [proxy reference][0]. Because the proxy is a true type, rather than an
+`&mut` reference, its name must be bound with `mut` in order to write through
+it.
+
+## Examples
+
+```rust
+use bitvec::prelude::*;
+
+let bits = bits![mut 0, 1];
+for mut bit in bits.iter_mut() {
+ *bit = !*bit;
+}
+assert_eq!(bits, bits![1, 0]);
+```
+
+[`BitSlice::iter_mut`]: crate::slice::BitSlice::iter_mut
+[0]: crate::ptr::BitRef
diff --git a/doc/slice/iter/IterOnes.md b/doc/slice/iter/IterOnes.md
new file mode 100644
index 0000000..6cb715c
--- /dev/null
+++ b/doc/slice/iter/IterOnes.md
@@ -0,0 +1,23 @@
+# Bit Seeking
+
+This iterator yields indices of bits set to `1`, rather than bit-values
+themselves. It is essentially the inverse of indexing: rather than applying a
+`usize` to the bit-slice to get a `bool`, this applies a `bool` to get a
+`usize`.
+
+It is created by the [`.iter_ones()`] method on bit-slices.
+
+## Examples
+
+```rust
+use bitvec::prelude::*;
+
+let bits = bits![0, 1, 0, 0, 1];
+let mut ones = bits.iter_ones();
+
+assert_eq!(ones.next(), Some(1));
+assert_eq!(ones.next(), Some(4));
+assert!(ones.next().is_none());
+```
+
+[`.iter_ones()`]: crate::slice::BitSlice::iter_ones
diff --git a/doc/slice/iter/IterZeros.md b/doc/slice/iter/IterZeros.md
new file mode 100644
index 0000000..29672e3
--- /dev/null
+++ b/doc/slice/iter/IterZeros.md
@@ -0,0 +1,23 @@
+# Bit Seeking
+
+This iterator yields indices of bits cleared to `0`, rather than bit-values
+themselves. It is essentially the inverse of indexing: rather than applying a
+`usize` to the bit-slice to get a `bool`, this applies a `bool` to get a
+`usize`.
+
+It is created by the [`.iter_zeros()`] method on bit-slices.
+
+## Examples
+
+```rust
+use bitvec::prelude::*;
+
+let bits = bits![1, 0, 1, 1, 0];
+let mut zeros = bits.iter_zeros();
+
+assert_eq!(zeros.next(), Some(1));
+assert_eq!(zeros.next(), Some(4));
+assert!(zeros.next().is_none());
+```
+
+[`.iter_zeros()`]: crate::slice::BitSlice::iter_zeros
diff --git a/doc/slice/iter/NoAlias.md b/doc/slice/iter/NoAlias.md
new file mode 100644
index 0000000..6f2fcf3
--- /dev/null
+++ b/doc/slice/iter/NoAlias.md
@@ -0,0 +1,103 @@
+# Anti-Aliasing Iterator Adapter
+
+This structure is an adapter over a corresponding `&mut BitSlice` iterator. It
+removes the `::Alias` taint marker, allowing mutations through each yielded bit
+reference to skip any costs associated with aliasing.
+
+## Safety
+
+The default `&mut BitSlice` iterators attach an `::Alias` taint for a reason:
+the iterator protocol does not mandate that yielded items have a narrower
+lifespan than the iterator that produced them! As such, it is completely
+possible to pull multiple yielded items out into the same scope, where they have
+overlapping lifetimes.
+
+The `BitStore` principles require that whenever two write-capable handles to the
+same memory region have overlapping lifetimes, they *must* be `::Alias` tainted.
+This adapter removes the `::Alias` taint, but is not able to enforce strictly
+non-overlapping lifetimes of yielded items.
+
+As such, this adapter is **unsafe to construct**, and you **must** only use it
+in a `for`-loop where each yielded item does not escape the loop body.
+
+In order to help enforce this limitation, this adapter structure is *not* `Send`
+or `Sync`. It must be consumed in the scope where it was created.
+
+## Usage
+
+If you are using a loop that satisfies the safety requirement, you can use the
+`.remove_alias()` method on your mutable iterator and configure it to yield
+handles that do not impose additional alias-protection costs when accessing the
+underlying memory.
+
+Note that this adapter does not go to `T::Unalias`: it only takes an iterator
+that yields `T::Alias` and unwinds it to `T`. If the source bit-slice was
+*already* alias-tainted, the original protection is not removed. You are
+responsible for doing so by using [`.bit_domain_mut()`].
+
+## Examples
+
+This example shows using `.chunks_mut()` without incurring alias protection.
+
+This documentation is replicated on all `NoAlias` types; the examples will work
+for all of them, but are not specialized in the text.
+
+```rust
+use bitvec::prelude::*;
+use bitvec::slice::{ChunksMut, ChunksMutNoAlias};
+type Alias8 = <u8 as BitStore>::Alias;
+
+let mut data: BitArr!(for 40, in u8, Msb0) = bitarr![u8, Msb0; 0; 40];
+
+let mut chunks: ChunksMut<u8, Msb0> = data.chunks_mut(5);
+let _chunk: &mut BitSlice<Alias8, Msb0> = chunks.next().unwrap();
+
+let mut chunks: ChunksMutNoAlias<u8, Msb0> = unsafe { chunks.remove_alias() };
+let _chunk: &mut BitSlice<u8, Msb0> = chunks.next().unwrap();
+```
+
+This example shows how use of [`.split_at_mut()`] forces the `.remove_alias()` to
+still retain a layer of alias protection.
+
+```rust
+use bitvec::prelude::*;
+use bitvec::slice::{ChunksMut, ChunksMutNoAlias};
+type Alias8 = <u8 as BitStore>::Alias;
+type Alias8Alias = <Alias8 as BitStore>::Alias;
+
+let mut data: BitArr!(for 40, in u8, Msb0) = bitarr!(u8, Msb0; 0; 40);
+let (_head, rest): (_, &mut BitSlice<Alias8, Msb0>) = data.split_at_mut(5);
+
+let mut chunks: ChunksMut<Alias8, Msb0> = rest.chunks_mut(5);
+let _chunk: &mut BitSlice<Alias8, Msb0> = chunks.next().unwrap();
+
+let mut chunks: ChunksMutNoAlias<Alias8, Msb0> = unsafe { chunks.remove_alias() };
+let _chunk: &mut BitSlice<Alias8, Msb0> = chunks.next().unwrap();
+```
+
+And this example shows how to use `.bit_domain_mut()` in order to undo the
+effects of `.split_at_mut()`, so that `.remove_alias()` can complete its work.
+
+```rust
+use bitvec::prelude::*;
+use bitvec::slice::{ChunksMut, ChunksMutNoAlias};
+type Alias8 = <u8 as BitStore>::Alias;
+
+let mut data: BitArr!(for 40, in u8, Msb0) = bitarr!(u8, Msb0; 0; 40);
+let (_head, rest): (_, &mut BitSlice<Alias8, Msb0>) = data.split_at_mut(5);
+
+let (head, body, tail): (
+ &mut BitSlice<Alias8, Msb0>,
+ &mut BitSlice<u8, Msb0>,
+ &mut BitSlice<Alias8, Msb0>,
+) = rest.bit_domain_mut().region().unwrap();
+
+let mut chunks: ChunksMut<u8, Msb0> = body.chunks_mut(5);
+let _chunk: &mut BitSlice<Alias8, Msb0> = chunks.next().unwrap();
+
+let mut chunks: ChunksMutNoAlias<u8, Msb0> = unsafe { chunks.remove_alias() };
+let _chunk: &mut BitSlice<u8, Msb0> = chunks.next().unwrap();
+```
+
+[`.bit_domain_mut()`]: crate::slice::BitSlice::bit_domain_mut
+[`.split_at_mut()`]: crate::slice::BitSlice::split_at_mut
diff --git a/doc/slice/iter/RChunks.md b/doc/slice/iter/RChunks.md
new file mode 100644
index 0000000..1556fa9
--- /dev/null
+++ b/doc/slice/iter/RChunks.md
@@ -0,0 +1,29 @@
+# Shared Bit-Slice Reverse Chunking
+
+This iterator yields successive non-overlapping chunks of a bit-slice. Chunking
+advances one subslice at a time, starting at the end of the bit-slice.
+
+If the original bit-slice’s length is not evenly divided by the chunk width,
+then the final chunk will be the remainder, and will be shorter than requested.
+
+It is created by the [`BitSlice::rchunks`] method.
+
+## Original
+
+[`slice::RChunks`](core::slice::RChunks)
+
+## Examples
+
+```rust
+use bitvec::prelude::*;
+
+let bits = bits![0, 1, 0, 0, 0, 1, 1, 1];
+let mut chunks = bits.rchunks(3);
+
+assert_eq!(chunks.next().unwrap(), bits![1; 3]);
+assert_eq!(chunks.next().unwrap(), bits![0; 3]);
+assert_eq!(chunks.next().unwrap(), bits![0, 1]);
+assert!(chunks.next().is_none());
+```
+
+[`BitSlice::rchunks`]: crate::slice::BitSlice::rchunks
diff --git a/doc/slice/iter/RChunksExact.md b/doc/slice/iter/RChunksExact.md
new file mode 100644
index 0000000..812bf67
--- /dev/null
+++ b/doc/slice/iter/RChunksExact.md
@@ -0,0 +1,31 @@
+# Shared Bit-Slice Reverse Exact Chunking
+
+This iterator yields successive non-overlapping chunks of a bit-slice. Chunking
+advances one sub-slice at a time, starting at the end of the bit-slice.
+
+If the original bit-slice’s length is not evenly divided by the chunk width,
+then the leftover segment at the front is not iterated, but can be accessed with
+the [`.remainder()`] method.
+
+It is created by the [`BitSlice::rchunks_exact`] method.
+
+## Original
+
+[`slice::RChunksExact`](core::slice::RChunksExact)
+
+## Examples
+
+```rust
+use bitvec::prelude::*;
+
+let bits = bits![0, 1, 0, 0, 0, 1, 1, 1];
+let mut chunks = bits.rchunks_exact(3);
+
+assert_eq!(chunks.next().unwrap(), bits![1; 3]);
+assert_eq!(chunks.next().unwrap(), bits![0; 3]);
+assert!(chunks.next().is_none());
+assert_eq!(chunks.remainder(), bits![0, 1]);
+```
+
+[`BitSlice::rchunks_exact`]: crate::slice::BitSlice::rchunks_exact
+[`.remainder()`]: Self::remainder
diff --git a/doc/slice/iter/RChunksExactMut.md b/doc/slice/iter/RChunksExactMut.md
new file mode 100644
index 0000000..a45e29a
--- /dev/null
+++ b/doc/slice/iter/RChunksExactMut.md
@@ -0,0 +1,41 @@
+# Exclusive Bit-Slice Reverse Exact Chunking
+
+This iterator yields successive non-overlapping mutable chunks of a bit-slice.
+Chunking advances one sub-slice at a time, starting at the end of the bit-slice.
+
+If the original bit-slice’s length is not evenly divided by the chunk width,
+then the leftover segment at the front is not iterated, but can be accessed with
+the [`.into_remainder()`] or [`.take_remainder()`] methods.
+
+It is created by the [`BitSlice::rchunks_exact_mut`] method.
+
+## Original
+
+[`slice::RChunksExactMut`](core::slice::RChunksExactMut)
+
+## API Differences
+
+This iterator marks all yielded bit-slices as `::Alias`ed.
+
+## Examples
+
+```rust
+use bitvec::prelude::*;
+
+let bits = bits![mut 0, 1, 0, 0, 0, 1, 1, 1];
+let mut chunks = unsafe {
+ bits.rchunks_exact_mut(3).remove_alias()
+};
+
+chunks.next().unwrap().fill(false);
+chunks.next().unwrap().fill(true);
+assert!(chunks.next().is_none());
+chunks.take_remainder().copy_from_bitslice(bits![1, 0]);
+assert!(chunks.take_remainder().is_empty());
+
+assert_eq!(bits, bits![1, 0, 1, 1, 1, 0, 0, 0]);
+```
+
+[`BitSlice::rchunks_exact_mut`]: crate::slice::BitSlice::rchunks_exact_mut
+[`.into_remainder()`]: Self::into_remainder
+[`.take_remainder()`]: Self::take_remainder
diff --git a/doc/slice/iter/RChunksMut.md b/doc/slice/iter/RChunksMut.md
new file mode 100644
index 0000000..72d7fac
--- /dev/null
+++ b/doc/slice/iter/RChunksMut.md
@@ -0,0 +1,37 @@
+# Exclusive Bit-Slice Chunking
+
+This iterator yields successive non-overlapping mutable chunks of a bit-slice.
+Chunking advances one subslice at a time, starting at the end of the bit-slice.
+
+If the original bit-slice’s length is not evenly divided by the chunk width,
+then the final chunk will be the remainder, and will be shorter than requested.
+
+It is created by the [`BitSlice::chunks_mut`] method.
+
+## Original
+
+[`slice::ChunksMut`](core::slice::ChunksMut)
+
+## API Differences
+
+This iterator marks all yielded bit-slices as `::Alias`ed.
+
+## Examples
+
+```rust
+use bitvec::prelude::*;
+
+let bits = bits![mut 0, 1, 0, 0, 0, 1, 1, 1];
+let mut chunks = unsafe {
+ bits.rchunks_mut(3).remove_alias()
+};
+
+chunks.next().unwrap().fill(false);
+chunks.next().unwrap().fill(true);
+chunks.next().unwrap().copy_from_bitslice(bits![1, 0]);
+assert!(chunks.next().is_none());
+
+assert_eq!(bits, bits![1, 0, 1, 1, 1, 0, 0, 0]);
+```
+
+[`BitSlice::chunks_mut`]: crate::slice::BitSlice::chunks_mut
diff --git a/doc/slice/iter/RSplit.md b/doc/slice/iter/RSplit.md
new file mode 100644
index 0000000..554b90b
--- /dev/null
+++ b/doc/slice/iter/RSplit.md
@@ -0,0 +1,35 @@
+# Shared Bit-Slice Reverse Splitting
+
+This iterator yields successive non-overlapping segments of a bit-slice,
+separated by bits that match a predicate function. Splitting advances one
+segment at a time, starting at the end of the bit-slice.
+
+The matched bit is **not** included in the yielded segment.
+
+It is created by the [`BitSlice::rsplit`] method.
+
+## Original
+
+[`slice::RSplit`](core::slice::RSplit)
+
+## API Differences
+
+The predicate function receives both the index within the bit-slice, as well as
+the bit value, in order to allow the predicate to have more than one bit of
+information when splitting.
+
+## Examples
+
+```rust
+use bitvec::prelude::*;
+
+let bits = bits![0, 0, 0, 1, 1, 1, 0, 1];
+let mut split = bits.rsplit(|idx, _bit| idx % 3 == 2);
+
+assert_eq!(split.next().unwrap(), bits![0, 1]);
+assert_eq!(split.next().unwrap(), bits![1; 2]);
+assert_eq!(split.next().unwrap(), bits![0; 2]);
+assert!(split.next().is_none());
+```
+
+[`BitSlice::rsplit`]: crate::slice::BitSlice::rsplit
diff --git a/doc/slice/iter/RSplitMut.md b/doc/slice/iter/RSplitMut.md
new file mode 100644
index 0000000..c8d21ca
--- /dev/null
+++ b/doc/slice/iter/RSplitMut.md
@@ -0,0 +1,41 @@
+# Exclusive Bit-Slice Reverse Splitting
+
+This iterator yields successive non-overlapping mutable segments of a bit-slice,
+separated by bits that match a predicate function. Splitting advances one
+segment at a time, starting at the end of the bit-slice.
+
+The matched bit is **not** included in the yielded segment.
+
+It is created by the [`BitSlice::rsplit_mut`] method.
+
+## Original
+
+[`slice::RSplitMut`](core::slice::RSplitMut)
+
+## API Differences
+
+This iterator marks all yielded bit-slices as `::Alias`ed.
+
+The predicate function receives both the index within the bit-slice, as well as
+the bit value, in order to allow the predicate to have more than one bit of
+information when splitting.
+
+## Examples
+
+```rust
+use bitvec::prelude::*;
+
+let bits = bits![mut 0, 0, 0, 1, 1, 1, 0, 1];
+let mut split = unsafe {
+ bits.rsplit_mut(|idx, _bit| idx % 3 == 2).remove_alias()
+};
+
+split.next().unwrap().copy_from_bitslice(bits![1, 0]);
+split.next().unwrap().fill(false);
+split.next().unwrap().fill(true);
+assert!(split.next().is_none());
+
+assert_eq!(bits, bits![1, 1, 0, 0, 0, 1, 1, 0]);
+```
+
+[`BitSlice::rsplit_mut`]: crate::slice::BitSlice::rsplit_mut
diff --git a/doc/slice/iter/RSplitN.md b/doc/slice/iter/RSplitN.md
new file mode 100644
index 0000000..7de2646
--- /dev/null
+++ b/doc/slice/iter/RSplitN.md
@@ -0,0 +1,36 @@
+# Shared Bit-Slice Reverse Splitting
+
+This iterator yields `n` successive non-overlapping segments of a bit-slice,
+separated by bits that match a predicate function. Splitting advances one
+segment at a time, starting at the end of the bit-slice.
+
+The matched bit is **not** included in the yielded segment. The `n`th yielded
+segment does not attempt any further splits, and extends to the front of the
+bit-slice.
+
+It is created by the [`BitSlice::rsplitn`] method.
+
+## Original
+
+[`slice::RSplitN`](core::slice::RSplitN)
+
+## API Differences
+
+The predicate function receives both the index within the bit-slice, as well as
+the bit value, in order to allow the predicate to have more than one bit of
+information when splitting.
+
+## Examples
+
+```rust
+use bitvec::prelude::*;
+
+let bits = bits![0, 0, 0, 1, 1, 1, 0, 1];
+let mut split = bits.rsplitn(2, |idx, _bit| idx % 3 == 2);
+
+assert_eq!(split.next().unwrap(), bits![0, 1]);
+assert_eq!(split.next().unwrap(), bits![0, 0, 0, 1, 1]);
+assert!(split.next().is_none());
+```
+
+[`BitSlice::rsplitn`]: crate::slice::BitSlice::rsplitn
diff --git a/doc/slice/iter/RSplitNMut.md b/doc/slice/iter/RSplitNMut.md
new file mode 100644
index 0000000..88d0209
--- /dev/null
+++ b/doc/slice/iter/RSplitNMut.md
@@ -0,0 +1,40 @@
+# Exclusive Bit-Slice Reverse Splitting
+
+This iterator yields `n` successive non-overlapping mutable segments of a
+bit-slice, separated by bits that match a predicate function. Splitting advances
+one segment at a time, starting at the end of the bit-slice.
+
+The matched bit is **not** included in the yielded segment. The `n`th yielded
+segment does not attempt any further splits, and extends to the front of the
+bit-slice.
+
+It is created by the [`BitSlice::rsplitn_mut`] method.
+
+## Original
+
+[`slice::SplitNMut`](core::slice::SplitNMut)
+
+## API Differences
+
+This iterator marks all yielded bit-slices as `::Alias`ed.
+
+The predicate function receives both the index within the bit-slice, as well as
+the bit value, in order to allow the predicate to have more than one bit of
+information when splitting.
+
+## Examples
+
+```rust
+use bitvec::prelude::*;
+
+let bits = bits![mut 0, 0, 0, 1, 1, 1, 0, 1];
+let mut split = bits.rsplitn_mut(2, |idx, _bit| idx % 3 == 2);
+
+split.next().unwrap().fill(false);
+split.next().unwrap().fill(false);
+assert!(split.next().is_none());
+
+assert_eq!(bits, bits![0, 0, 0, 0, 0, 1, 0, 0]);
+```
+
+[`BitSlice::rsplitn_mut`]: crate::slice::BitSlice::rsplitn_mut
diff --git a/doc/slice/iter/Split.md b/doc/slice/iter/Split.md
new file mode 100644
index 0000000..4f1bba1
--- /dev/null
+++ b/doc/slice/iter/Split.md
@@ -0,0 +1,35 @@
+# Shared Bit-Slice Splitting
+
+This iterator yields successive non-overlapping segments of a bit-slice,
+separated by bits that match a predicate function. Splitting advances one
+segment at a time, starting at the beginning of the bit-slice.
+
+The matched bit is **not** included in the yielded segment.
+
+It is created by the [`BitSlice::split`] method.
+
+## Original
+
+[`slice::Split`](core::slice::Split)
+
+## API Differences
+
+The predicate function receives both the index within the bit-slice, as well as
+the bit value, in order to allow the predicate to have more than one bit of
+information when splitting.
+
+## Examples
+
+```rust
+use bitvec::prelude::*;
+
+let bits = bits![0, 0, 0, 1, 1, 1, 0, 1];
+let mut split = bits.split(|idx, _bit| idx % 3 == 2);
+
+assert_eq!(split.next().unwrap(), bits![0; 2]);
+assert_eq!(split.next().unwrap(), bits![1; 2]);
+assert_eq!(split.next().unwrap(), bits![0, 1]);
+assert!(split.next().is_none());
+```
+
+[`BitSlice::split`]: crate::slice::BitSlice::split
diff --git a/doc/slice/iter/SplitInclusive.md b/doc/slice/iter/SplitInclusive.md
new file mode 100644
index 0000000..1add313
--- /dev/null
+++ b/doc/slice/iter/SplitInclusive.md
@@ -0,0 +1,29 @@
+# Shared Bit-Slice Splitting
+
+This iterator yields successive non-overlapping segments of a bit-slice,
+separated by bits that match a predicate function. Splitting advances one
+segment at a time, starting at the beginning of the bit-slice.
+
+The matched bit **is** included in the yielded segment.
+
+It is created by the [`BitSlice::split_inclusive`] method.
+
+## Original
+
+[`slice::SplitInclusive`](core::slice::SplitInclusive)
+
+## Examples
+
+```rust
+use bitvec::prelude::*;
+
+let bits = bits![0, 0, 0, 1, 1, 1, 0, 1];
+let mut split = bits.split_inclusive(|idx, _bit| idx % 3 == 2);
+
+assert_eq!(split.next().unwrap(), bits![0; 3]);
+assert_eq!(split.next().unwrap(), bits![1; 3]);
+assert_eq!(split.next().unwrap(), bits![0, 1]);
+assert!(split.next().is_none());
+```
+
+[`BitSlice::split_inclusive`]: crate::slice::BitSlice::split_inclusive
diff --git a/doc/slice/iter/SplitInclusiveMut.md b/doc/slice/iter/SplitInclusiveMut.md
new file mode 100644
index 0000000..315c4c8
--- /dev/null
+++ b/doc/slice/iter/SplitInclusiveMut.md
@@ -0,0 +1,33 @@
+# Exclusive Bit-Slice Splitting
+
+This iterator yields successive non-overlapping mutable segments of a bit-slice,
+separated by bits that match a predicate function. Splitting advances one
+segment at a time, starting at the beginning of the bit-slice.
+
+The matched bit **is** included in the yielded segment.
+
+It is created by the [`BitSlice::split_inclusive_mut`] method.
+
+## Original
+
+[`slice::SplitInclusiveMut`](core::slice::SplitInclusiveMut)
+
+## Examples
+
+```rust
+use bitvec::prelude::*;
+
+let bits = bits![mut 0, 0, 0, 1, 1, 1, 0, 1];
+let mut split = unsafe {
+ bits.split_inclusive_mut(|idx, _bit| idx % 3 == 2).remove_alias()
+};
+
+split.next().unwrap().fill(true);
+split.next().unwrap().fill(false);
+split.next().unwrap().copy_from_bitslice(bits![1, 0]);
+assert!(split.next().is_none());
+
+assert_eq!(bits, bits![1, 1, 1, 0, 0, 0, 1, 0]);
+```
+
+[`BitSlice::split_inclusive_mut`]: crate::slice::BitSlice::split_inclusive_mut
diff --git a/doc/slice/iter/SplitMut.md b/doc/slice/iter/SplitMut.md
new file mode 100644
index 0000000..9ba807a
--- /dev/null
+++ b/doc/slice/iter/SplitMut.md
@@ -0,0 +1,41 @@
+# Exclusive Bit-Slice Splitting
+
+This iterator yields successive non-overlapping mutable segments of a bit-slice,
+separated by bits that match a predicate function. Splitting advances one
+segment at a time, starting at the beginning of the bit-slice.
+
+The matched bit is **not** included in the yielded segment.
+
+It is created by the [`BitSlice::split_mut`] method.
+
+## Original
+
+[`slice::SplitMut`](core::slice::SplitMut)
+
+## API Differences
+
+This iterator marks all yielded bit-slices as `::Alias`ed.
+
+The predicate function receives both the index within the bit-slice, as well as
+the bit value, in order to allow the predicate to have more than one bit of
+information when splitting.
+
+## Examples
+
+```rust
+use bitvec::prelude::*;
+
+let bits = bits![mut 0, 0, 0, 1, 1, 1, 0, 1];
+let mut split = unsafe {
+ bits.split_mut(|idx, _bit| idx % 3 == 2).remove_alias()
+};
+
+split.next().unwrap().fill(true);
+split.next().unwrap().fill(false);
+split.next().unwrap().copy_from_bitslice(bits![1, 0]);
+assert!(split.next().is_none());
+
+assert_eq!(bits, bits![1, 1, 0, 0, 0, 1, 1, 0]);
+```
+
+[`BitSlice::split_mut`]: crate::slice::BitSlice::split_mut
diff --git a/doc/slice/iter/SplitN.md b/doc/slice/iter/SplitN.md
new file mode 100644
index 0000000..871ce55
--- /dev/null
+++ b/doc/slice/iter/SplitN.md
@@ -0,0 +1,36 @@
+# Shared Bit-Slice Splitting
+
+This iterator yields `n` successive non-overlapping segments of a bit-slice,
+separated by bits that match a predicate function. Splitting advances one
+segment at a time, starting at the beginning of the bit-slice.
+
+The matched bit is **not** included in the yielded segment. The `n`th yielded
+segment does not attempt any further splits, and extends to the end of the
+bit-slice.
+
+It is created by the [`BitSlice::splitn`] method.
+
+## Original
+
+[`slice::SplitN`](core::slice::SplitN)
+
+## API Differences
+
+The predicate function receives both the index within the bit-slice, as well as
+the bit value, in order to allow the predicate to have more than one bit of
+information when splitting.
+
+## Examples
+
+```rust
+use bitvec::prelude::*;
+
+let bits = bits![0, 0, 0, 1, 1, 1, 0, 1];
+let mut split = bits.splitn(2, |idx, _bit| idx % 3 == 2);
+
+assert_eq!(split.next().unwrap(), bits![0; 2]);
+assert_eq!(split.next().unwrap(), bits![1, 1, 1, 0, 1]);
+assert!(split.next().is_none());
+```
+
+[`BitSlice::splitn`]: crate::slice::BitSlice::splitn
diff --git a/doc/slice/iter/SplitNMut.md b/doc/slice/iter/SplitNMut.md
new file mode 100644
index 0000000..ac9ebac
--- /dev/null
+++ b/doc/slice/iter/SplitNMut.md
@@ -0,0 +1,40 @@
+# Exclusive Bit-Slice Splitting
+
+This iterator yields `n` successive non-overlapping mutable segments of a
+bit-slice, separated by bits that match a predicate function. Splitting advances
+one segment at a time, starting at the beginning of the bit-slice.
+
+The matched bit is **not** included in the yielded segment. The `n`th yielded
+segment does not attempt any further splits, and extends to the end of the
+bit-slice.
+
+It is created by the [`BitSlice::splitn_mut`] method.
+
+## Original
+
+[`slice::SplitNMut`](core::slice::SplitNMut)
+
+## API Differences
+
+This iterator marks all yielded bit-slices as `::Alias`ed.
+
+The predicate function receives both the index within the bit-slice, as well as
+the bit value, in order to allow the predicate to have more than one bit of
+information when splitting.
+
+## Examples
+
+```rust
+use bitvec::prelude::*;
+
+let bits = bits![mut 0, 0, 0, 1, 1, 1, 0, 1];
+let mut split = bits.splitn_mut(2, |idx, _bit| idx % 3 == 2);
+
+split.next().unwrap().fill(true);
+split.next().unwrap().fill(false);
+assert!(split.next().is_none());
+
+assert_eq!(bits, bits![1, 1, 0, 0, 0, 0, 0, 0]);
+```
+
+[`BitSlice::splitn_mut`]: crate::slice::BitSlice::splitn_mut
diff --git a/doc/slice/iter/Windows.md b/doc/slice/iter/Windows.md
new file mode 100644
index 0000000..2cbdbcf
--- /dev/null
+++ b/doc/slice/iter/Windows.md
@@ -0,0 +1,35 @@
+# Bit-Slice Windowing
+
+This iterator yields successive overlapping windows into a bit-slice. Windowing
+advances one bit at a time, so for any given window width `N`, most bits will
+appear in `N` windows. Windows do not “extend” past either edge of the
+bit-slice: the first window has its front edge at the front of the bit-slice,
+and the last window has its back edge at the back of the bit-slice.
+
+It is created by the [`BitSlice::windows`] method.
+
+## Original
+
+[`slice::Windows`](core::slice::Windows)
+
+## Examples
+
+```rust
+use bitvec::prelude::*;
+
+let bits = bits![0, 0, 1, 1, 0];
+let mut windows = bits.windows(2);
+let expected = &[
+ bits![0, 0],
+ bits![0, 1],
+ bits![1, 1],
+ bits![1, 0],
+];
+
+assert_eq!(windows.len(), 4);
+for (window, expected) in windows.zip(expected) {
+ assert_eq!(window, expected);
+}
+```
+
+[`BitSlice::windows`]: crate::slice::BitSlice::windows
diff --git a/doc/slice/ops.md b/doc/slice/ops.md
new file mode 100644
index 0000000..1ebd91e
--- /dev/null
+++ b/doc/slice/ops.md
@@ -0,0 +1,16 @@
+# Bit-Slice Operator Implementations
+
+The standard-library slices only implement the indexing operator `[]`.
+`BitSlice` additionally implements the Boolean operators `&`, `|`, `^`, and `!`.
+
+The dyadic Boolean arithmetic operators all take any `bitvec` container as their
+second argument, and apply the operation in-place to the left-hand bit-slice. If
+the second argument exhausts before `self` does, then it is implicitly
+zero-extended. This means that `&=` zeros excess bits in `self`, while `|=` and
+`^=` do not modify them.
+
+The monadic operator `!` inverts the entire bit-slice at once. Its API requires
+*taking* a `&mut BitSlice` reference and returning it, so you will need to
+structure your code accordingly.
+
+[`BitSlice::domain_mut`]: crate::slice::BitSlice::domain_mut
diff --git a/doc/slice/specialization.md b/doc/slice/specialization.md
new file mode 100644
index 0000000..b481139
--- /dev/null
+++ b/doc/slice/specialization.md
@@ -0,0 +1,29 @@
+# Bit-Slice Specialization
+
+This module provides specialized implementations for `BitSlice<T, Lsb0>` and
+`BitSlice<T, Msb0>`. These implementations are able to use knowledge of their
+bit-ordering behavior to be faster and operate in batches.
+
+Since true specialization is not available in the language yet, this uses the
+`any::TypeId` system to detect if a type parameter is identical to a known type
+and conditionally force a cast and branch. Since type identifiers are compiler
+intrinsics produced during compilation, during monomorphization each branch
+has its conditional replaced with a compile-time constant value. The `if true`
+branch is retained, the `if false` branches are discarded, and the
+monomorphization proceeds with the specialized function replacing the generic
+body.
+
+The `.coerce()` and `.coerce_mut()` methods detect whether a bit-slice with
+generic type parameters matches statically-known type parameters, and return an
+`Option` of a value-identical bit-slice reference with the statically-known type
+parameters which can then invoke a specialization method.
+
+Generic methods can be specialized whenever their implementation is dependent on
+the `O` type parameter and the map of positions the ordering produces is
+easily legible to processor instructions. Because language-level specialization
+is unavailable, dispatch is only done in `bitvec` and cannot be extended to
+third-party crates.
+
+The `lsb0` and `msb0` modules should have identical symbols present. For
+implementation, remember that `Lsb0` and `Msb0` orderings **are** correlated
+with little-endian and big-endian byte operations!
diff --git a/doc/slice/threadsafe.md b/doc/slice/threadsafe.md
new file mode 100644
index 0000000..80df1fb
--- /dev/null
+++ b/doc/slice/threadsafe.md
@@ -0,0 +1,19 @@
+# Bit-Slice Thread Safety
+
+This allows bit-slice references to be moved across thread boundaries only when
+the underlying `T` element can tolerate concurrency.
+
+All `BitSlice` references, shared or exclusive, are only threadsafe if the `T`
+element type is `Send`, because any given bit-slice reference may only have
+partial control of a memory element that is also being shared by a bit-slice
+reference on another thread. As such, this is never implemented for `Cell<U>`,
+but always implemented for `AtomicU` and `U` for a given unsigned integer type
+`U`.
+
+Atomic integers safely handle concurrent writes, cells do not allow concurrency
+at all, so the only missing piece is `&mut BitSlice<_, U: Unsigned>`. This is
+handled by the aliasing system that the mutable splitters employ: a mutable
+reference to an unsynchronized bit-slice can only cross threads when no other
+handle is able to exist to the elements it governs. Splitting a mutable
+bit-slice causes the split halves to change over to either atomics or cells, so
+concurrency is either safe or impossible.
diff --git a/doc/slice/traits.md b/doc/slice/traits.md
new file mode 100644
index 0000000..95b34a4
--- /dev/null
+++ b/doc/slice/traits.md
@@ -0,0 +1,7 @@
+# `Bit-Slice` Trait Implementations
+
+`BitSlice` implements all of the traits that `[bool]` does, as well as a number
+that it does not but are useful for bit-slices. These additions include numeric
+formatting, so that any bit-slice can have its memory representation printed, as
+well as a permutation of `PartialEq` and `PartialOrd` implementations so that
+various `bitvec` containers can be easily compared with each other.
diff --git a/doc/store.md b/doc/store.md
new file mode 100644
index 0000000..377b632
--- /dev/null
+++ b/doc/store.md
@@ -0,0 +1,90 @@
+# Storage Memory Description
+
+This module defines the `bitvec` memory model used to interface bit-slice
+regions to raw memory, and manage type-state changes as demanded by the region
+descriptor.
+
+The [`BitStore`] trait is the primary type-level description of `bitvec` views
+of the memory space and provides the runtime system that drives the crate memory
+model.
+
+## Memory Model
+
+`bitvec` considers all memory within [`BitSlice`] regions as if it were composed
+of discrete bits, each divisible and independent from its neighbors, just as the
+Rust memory model considers elements `T` in a slice `[T]`. Much as ordinary byte
+slices `[u8]` provide an API where each byte is distinct and independent from
+its neighbors, but the underlying processor silicon clusters them in words and
+cachelines, both the processor silicon *and* the Rust compiler require that bits
+in a `BitSlice` be grouped into memory elements, and collectively subjected to
+aliasing rules within their batch.
+
+`bitvec` manages this through the `BitStore` trait. It is implemented on three
+type families available from the Rust standard libraries:
+
+- [unsigned integers]
+- [atomic] unsigned integers
+- [`Cell`] wrappers of unsigned integers
+
+`bitvec` receives memory regions typed with one of these families and wraps it
+in one of its data structures based on the `BitSlice` region. The target
+processor is responsible for handling any contention between `T: BitStore`
+memory elements; this is irrelevant to the `bitvec` model. `bitvec` is solely
+responsible for proving to the Rust compiler that all memory accesses through
+its types are correctly managed according to the `&`/`&mut` shared/exclusion
+reference model, and the [`UnsafeCell`] shared-mutation model.
+
+Through `BitStore`, `bitvec` is able to demonstrate that `&mut BitSlice`
+references to a region of *bits* have no other `BitSlice` references capable of
+viewing those bits. However, multiple `&mut BitSlice` references may view the
+same underlying memory element, which is undefined behavior in the Rust compiler
+unless additional synchronization and mutual exclusion is provided to prevent
+racing writes and unsynchronized reads.
+
+As such, `BitStore` provides a closed type-system graph that the `BitSlice`
+region API uses to mark events that can induce aliasing over memory locations.
+When a `&mut BitSlice<_, T>` typed with an ordinary unsigned integer uses any of
+the APIs that call [`.split_at_mut()`], it transitions its `BitStore` parameter
+to `&mut BitSlice<_, T::Alias>`. The [`::Alias`] associated type is always a
+type that manages aliasing references to a single memory location: either an
+[atomic] unsigned integer `T` or a [`Cell<T>`][`Cell`]. The Rust standard
+library guarantees that these types will behave correctly when multiple
+references to a single location attempt to perform memory transactions.
+
+The atomic and `Cell` types stay as themselves when [`BitSlice`] introduces
+aliasing conditions, as they are already alias-aware.
+
+Foreign implementations of `BitStore` are required to follow the conventions
+used here: unsynchronized storage types must create marker newtypes over an
+appropriate synchronized type for `::Alias` and uphold the “only `&mut` has
+write permission” rule, while synchronized storage types do not need to perform
+these transitions, but may never transition to an unsynchronized type either.
+
+The `bitvec` memory description model as implemented in the [`domain`] module is
+able to perform the inverse transition: where a `BitSlice` can demonstrate a
+static awareness that the `&`/`&mut` exclusion rules are satisfied for a
+particular element slice `[T]`, it may apply the [`::Unalias`] marker to undo
+any `::Alias`ing, and present a type that has no more aliasing protection than
+that with which the memory region was initially declared.
+
+Namely, this means that the [atomic] and [`Cell`] wrappers will *never* be
+removed from a region that had them before it was given to `bitvec`, while a
+region of ordinary integers may regain the ability to be viewed without
+synchrony guards if `bitvec` can prove safety in the `domain` module.
+
+In order to retain `bitvec`’s promise that an `&mut BitSlice<_, T>` has the sole
+right of observation for all bits in its region, the unsigned integers alias to
+a crate-internal wrapper over the alias-capable standard-library types. This
+wrapper forbids mutation through shared references, so two [`BitSlice`]
+references that alias a memory location, but do not overlap in bits, may not be
+coërced to interfere with each other.
+
+[atomic]: core::sync::atomic
+[unsigned integers]: core::primitive
+[`BitSlice`]: crate::slice::BitSlice
+[`BitStore`]: self::BitStore
+[`Cell`]: core::cell::Cell
+[`UnsafeCell`]: core::cell::UnsafeCell
+[`domain`]: crate::domain
+[`::Alias`]: self::BitStore::Alias
+[`::Unalias`]: self::BitStore::Unalias
diff --git a/doc/store/BitStore.md b/doc/store/BitStore.md
new file mode 100644
index 0000000..ec754b4
--- /dev/null
+++ b/doc/store/BitStore.md
@@ -0,0 +1,37 @@
+# Bit Storage
+
+This trait drives `bitvec`’s ability to view memory as a collection of discrete
+bits. It combines awareness of storage element width, memory-bus access
+requirements, element contention, and buffer management, into a type-system
+graph that the rest of the crate can use to abstract away concerns about memory
+representation or access rules.
+
+It is responsible for extending the standard Rust `&`/`&mut` shared/exclusion
+rules to apply to individual bits while avoiding violating those rules when
+operating on real memory so that Rust and LLVM cannot find fault with the object
+code it produces.
+
+## Implementors
+
+This is implemented on three type families:
+
+- all [`BitRegister`] raw integer fundamentals
+- all [`Cell`] wrappers of them
+- all [atomic] variants of them
+
+The [`BitSlice`] region, and all structures composed atop it, can be built out
+of regions of memory that have this trait implementation.
+
+## Associated Types
+
+The associated types attached to each implementation create a closed graph of
+type transitions used to manage alias conditions. When a bit-slice region
+determines that an aliasing or unaliasing event has occurred, it transitions
+along the type graph in order to maintain correct operations in memory. The
+methods that cause type transitions can be found in [`BitSlice`] and [`domain`].
+
+[`BitRegister`]: crate::mem::BitRegister
+[`BitSlice`]: crate::slice::BitSlice
+[`Cell`]: core::cell::Cell
+[`domain`]: crate::domain
+[atomic]: core::sync::atomic
diff --git a/doc/vec.md b/doc/vec.md
new file mode 100644
index 0000000..e020930
--- /dev/null
+++ b/doc/vec.md
@@ -0,0 +1,16 @@
+# Dynamically-Allocated, Adjustable-Size, Bit Buffer
+
+This module defines the [`BitVec`] buffer and its associated support code.
+
+`BitVec` is analogous to [`Vec<bool>`] in its use of dynamic memory allocation
+and its relationship to the [`BitSlice`] type. Most of the interesting work to
+be done on a bit sequence is actually implemented in `BitSlice`, with `BitVec`
+itself largely only containing interfaces to the memory allocator.
+
+## Original
+
+[`vec`](mod@alloc::vec)
+
+[`BitVec`]: crate::vec::BitVec
+[`BitSlice`]: crate::slice::BitSlice
+[`Vec<bool>`]: alloc::vec::Vec
diff --git a/doc/vec/BitVec.md b/doc/vec/BitVec.md
new file mode 100644
index 0000000..71e884d
--- /dev/null
+++ b/doc/vec/BitVec.md
@@ -0,0 +1,174 @@
+# Bit-Precision Dynamic Array
+
+This is an analogue to `Vec<bool>` that stores its data using a compaction
+scheme to ensure that each `bool` takes exactly one bit of memory. It is similar
+to the C++ type [`std::vector<bool>`], but uses `bitvec`’s type parameter system
+to provide more detailed control over the in-memory representation.
+
+This is *always* a heap allocation. If you know your sizes at compile-time, you
+may prefer to use [`BitArray`] instead, which is able to store its data as an
+immediate value rather than through an indirection.
+
+## Documentation Practices
+
+`BitVec` exactly replicates the API of the standard-library `Vec` type,
+including inherent methods, trait implementations, and relationships with the
+[`BitSlice`] slice analogue.
+
+Items that are either direct ports, or renamed variants, of standard-library
+APIs will have a `## Original` section that links to their standard-library
+documentation. Items that map to standard-library APIs but have a different API
+signature will also have an `## API Differences` section that describes what
+the difference is, why it exists, and how to transform your code to fit it. For
+example:
+
+## Original
+
+[`Vec<T>`](alloc::vec::Vec)
+
+## API Differences
+
+As with all `bitvec` data structures, this takes two type parameters `<T, O>`
+that govern the bit-vector’s storage representation in the underlying memory,
+and does *not* take a type parameter to govern what data type it stores (always
+`bool`)
+
+## Suggested Uses
+
+`BitVec` is able to act as a compacted `usize => bool` dictionary, and is useful
+for holding large collections of truthiness. For instance, you might replace a
+`Vec<Option<T>>` with a `(BitVec, Vec<MaybeUninit<T>>`) to cut down on the
+resident size of the discriminant.
+
+Through the [`BitField`] trait, `BitVec` is also able to act as a transport
+buffer for data that can be marshalled as integers. Serializing data to a
+narrower compacted form, or deserializing data *from* that form, can be easily
+accomplished by viewing subsets of a bit-vector and storing integers into, or
+loading integers out of, that subset. As an example, transporting four ten-bit
+integers can be done in five bytes instead of eight like so:
+
+```rust
+use bitvec::prelude::*;
+
+let mut bv = bitvec![u8, Msb0; 0; 40];
+bv[0 .. 10].store::<u16>(0x3A8);
+bv[10 .. 20].store::<u16>(0x2F9);
+bv[20 .. 30].store::<u16>(0x154);
+bv[30 .. 40].store::<u16>(0x06D);
+```
+
+If you wish to use bit-field memory representations as `struct` fields rather
+than a transport buffer, consider `BitArray` instead: that type keeps its data
+as an immediate, and is more likely to act like a C struct with bitfields.
+
+## Examples
+
+`BitVec` has exactly the same API as `Vec<bool>`, and even extends it with some
+of `Vec<T>`’s behaviors. As a brief tour:
+
+### Push and Pop
+
+```rust
+use bitvec::prelude::*;
+
+let mut bv: BitVec = BitVec::new();
+bv.push(false);
+bv.push(true);
+
+assert_eq!(bv.len(), 2);
+assert_eq!(bv[0], false);
+
+assert_eq!(bv.pop(), Some(true));
+assert_eq!(bv.len(), 1);
+```
+
+### Writing Into a Bit-Vector
+
+The only `Vec<bool>` API that `BitVec` does *not* implement is `IndexMut`,
+because that is not yet possible. Instead, [`.get_mut()`] can produce a proxy
+reference, or [`.set()`] can take an index and a value to write.
+
+```rust
+use bitvec::prelude::*;
+
+let mut bv: BitVec = BitVec::new();
+bv.push(false);
+
+*bv.get_mut(0).unwrap() = true;
+assert!(bv[0]);
+bv.set(0, false);
+assert!(!bv[0]);
+```
+
+### Macro Construction
+
+Like `Vec`, `BitVec` also has a macro constructor: [`bitvec!`] takes a sequence
+of bit expressions and encodes them at compile-time into a suitable buffer. At
+run-time, this buffer is copied into the heap as a `BitVec` with no extra cost
+beyond the allocation.
+
+```rust
+use bitvec::prelude::*;
+
+let bv = bitvec![0; 10];
+let bv = bitvec![0, 1, 0, 0, 1];
+let bv = bitvec![u16, Msb0; 1; 20];
+```
+
+### Borrowing as `BitSlice`
+
+`BitVec` lends its buffer as a `BitSlice`, so you can freely give permission to
+view or modify the contained data without affecting the allocation:
+
+```rust
+use bitvec::prelude::*;
+
+fn read_bitslice(bits: &BitSlice) {
+ // …
+}
+
+let bv = bitvec![0; 30];
+read_bitslice(&bv);
+let bs: &BitSlice = &bv;
+```
+
+## Other Notes
+
+The default type parameters are `<usize, Lsb0>`. This is the most performant
+pair when operating on memory, but likely does not match your needs if you are
+using `BitVec` to represent a transport buffer. See [the user guide][book] for
+more details on how the type parameters govern memory representation.
+
+Applications, or single-purpose libraries, built atop `bitvec` will likely want
+to create a `type` alias with specific type parameters for their usage. `bitvec`
+is fully generic over the ordering/storage types, but this generality is rarely
+useful for client crates to propagate. `<usize, Lsb0>` is fastest; `<u8, Msb0>`
+matches what most debugger views of memory will print, and the rest are
+documented in the guide.
+
+## Safety
+
+Unlike the other data structures in this crate, `BitVec` is uniquely able to
+hold uninitialized memory and produce pointers into it. As described in the
+[`BitAccess`] documentation, this crate is categorically unable to operate on
+uninitialized memory in any way. In particular, you may not allocate a buffer
+using [`::with_capacity()`], then use [`.as_mut_bitptr()`] to create a pointer
+used to write into the uninitialized buffer.
+
+You must always initialize the buffer contents of a `BitVec` before attempting
+to view its contents. You can accomplish this through safe APIs such as
+`.push()`, `.extend()`, or `.reserve()`. These are all guaranteed to safely
+initialize the memory elements underlying the `BitVec` buffer without incurring
+undefined behavior in their operation.
+
+[book]: https://bitvecto-rs.github.io/bitvec/type-parameters.html
+[`BitAccess`]: crate::access::BitAccess
+[`BitArray`]: crate::array::BitArray
+[`BitField`]: crate::field::BitField
+[`BitSlice`]: crate::slice::BitSlice
+[`bitvec!`]: macro@crate::bitvec
+[`std::vector<bool>`]: https://en.cppreference.com/w/cpp/container/vector_bool
+[`.as_mut_bitptr()`]: crate::slice::BitSlice::as_mut_bitptr
+[`.get_mut()`]: crate::slice::BitSlice::get_mut
+[`.set()`]: crate::slice::BitSlice::set
+[`::with_capacity()`]: Self::with_capacity
diff --git a/doc/vec/iter.md b/doc/vec/iter.md
new file mode 100644
index 0000000..f5df1c5
--- /dev/null
+++ b/doc/vec/iter.md
@@ -0,0 +1,14 @@
+# Bit-Vector Iteration
+
+This module provides iteration protocols for `BitVec`, including:
+
+- extension of existing bit-vectors with new data
+- collection of data into new bit-vectors
+- iteration over the contents of a bit-vector
+- draining and splicing iteration over parts of a bit-vector.
+
+`BitVec` implements `Extend` and `FromIterator` for both sources of individual
+bits and sources of `T` memory elements.
+
+The by-value `bool` iterator is defined in `boxed::iter`, rather than here. The
+`Drain` and `Splice` iterators remain here in their original location.
diff --git a/doc/vec/iter/Drain.md b/doc/vec/iter/Drain.md
new file mode 100644
index 0000000..39429bf
--- /dev/null
+++ b/doc/vec/iter/Drain.md
@@ -0,0 +1,17 @@
+# Draining Iteration
+
+This structure iterates over a subset of a bit-vector, yielding each bit and
+removing it completely from the source.
+
+Each drain locks the bit-vector that created it until the drain is either
+destroyed or forgotten. If a drain is leaked rather than being allowed to drop
+normally, the source bit-vector is only guaranteed to have contents up to the
+original start of the drain. All further contents are unspecified.
+
+See [`BitVec::drain()`] for more details.
+
+## Original
+
+[`vec::Drain`](alloc::vec::Drain)
+
+[`BitVec::drain()`]: crate::vec::BitVec::drain
diff --git a/doc/vec/iter/Extend_BitRef.md b/doc/vec/iter/Extend_BitRef.md
new file mode 100644
index 0000000..9ef098b
--- /dev/null
+++ b/doc/vec/iter/Extend_BitRef.md
@@ -0,0 +1,10 @@
+# Bit-Vector Extension by Proxy References
+
+**DO NOT** use this. You *clearly* have a bit-slice. Use
+[`.extend_from_bitslice()`] instead!
+
+Iterating over a bit-slice requires loading from memory and constructing a proxy
+reference for each bit. This is needlessly slow; the specialized method is able
+to avoid this per-bit cost and possibly even use batched operations.
+
+[`.extend_from_bitslice()`]: crate::vec::BitVec::extend_from_bitslice
diff --git a/doc/vec/iter/Extend_bool.md b/doc/vec/iter/Extend_bool.md
new file mode 100644
index 0000000..5c5b02d
--- /dev/null
+++ b/doc/vec/iter/Extend_bool.md
@@ -0,0 +1,21 @@
+# Bit-Vector Extension
+
+This extends a bit-vector from anything that produces individual bits.
+
+## Original
+
+[`impl<T> Extend<T> for Vec<T>`][orig]
+
+## Notes
+
+This `.extend()` call is the second-slowest possible way to append bits into a
+bit-vector, faster only than calling `iter.for_each(|bit| bv.push(bit))`.
+**DO NOT** use this if you have any other choice.
+
+If you are extending a bit-vector from the contents of a bit-slice, then you
+should use [`.extend_from_bitslice()`] instead. That method is specialized to
+perform upfront allocation and, where possible, use a batch copy rather than
+copying each bit individually from the source into the bit-vector.
+
+[orig]: https://doc.rust-lang.org/alloc/vec/struct.Vec.html#impl-Extend%3CT%3E
+[`.extend_from_bitslice()`]: crate::vec::BitVec::extend_from_bitslice
diff --git a/doc/vec/iter/FillStatus.md b/doc/vec/iter/FillStatus.md
new file mode 100644
index 0000000..c122b7c
--- /dev/null
+++ b/doc/vec/iter/FillStatus.md
@@ -0,0 +1,14 @@
+# Fill Status
+
+The standard library uses a `bool` flag to indicate whether a splicing operation
+exhausted the source or filled the target, which is not very clear about what is
+being signaled. This enum replaces it.
+
+## Variants
+
+- `FullSpan`: This marks that a drain span has been completely filled with
+ replacement bits, and any further replacement would require insertion rather
+ than overwriting dead storage.
+- `EmptyInput`: This marks that a replacement source has been run to completion,
+ but dead bits remain in a drain span, and the dead range will need to be
+ overwritten.
diff --git a/doc/vec/iter/FromIterator_BitRef.md b/doc/vec/iter/FromIterator_BitRef.md
new file mode 100644
index 0000000..ac92f6c
--- /dev/null
+++ b/doc/vec/iter/FromIterator_BitRef.md
@@ -0,0 +1,10 @@
+# Bit-Vector Collection from Proxy References
+
+**DO NOT** use this. You *clearly* have a bit-slice. Use
+[`::from_bitslice()`] instead!
+
+Iterating over a bit-slice requires loading from memory and constructing a proxy
+reference for each bit. This is needlessly slow; the specialized method is able
+to avoid this per-bit cost and possibly even use batched operations.
+
+[`::from_bitslice()`]: crate::vec::BitVec::from_bitslice
diff --git a/doc/vec/iter/FromIterator_bool.md b/doc/vec/iter/FromIterator_bool.md
new file mode 100644
index 0000000..93e704b
--- /dev/null
+++ b/doc/vec/iter/FromIterator_bool.md
@@ -0,0 +1,21 @@
+# Bit-Vector Collection
+
+This collects a bit-vector from anything that produces individual bits.
+
+## Original
+
+[`impl<T> FromIterator<T> for Vec<T>`][orig]
+
+## Notes
+
+This `.collect()` call is the second-slowest possible way to collect bits into a
+bit-vector, faster only than calling `iter.for_each(|bit| bv.push(bit))`.
+**DO NOT** use this if you have any other choice.
+
+If you are collecting a bit-vector from the contents of a bit-slice, then you
+should use [`::from_bitslice()`] instead. That method is specialized to
+perform upfront allocation and, where possible, use a batch copy rather than
+copying each bit individually from the source into the bit-vector.
+
+[orig]: https://doc.rust-lang.org/alloc/vec/struct.Vec.html#impl-FromIterator%3CT%3E
+[`::from_bitslice()`]: crate::vec::BitVec::extend_from_bitslice
diff --git a/doc/vec/iter/IntoIterator.md b/doc/vec/iter/IntoIterator.md
new file mode 100644
index 0000000..473c6f6
--- /dev/null
+++ b/doc/vec/iter/IntoIterator.md
@@ -0,0 +1,11 @@
+# Bit-Vector Iteration
+
+Bit-vectors have the advantage that iteration consumes the whole structure, so
+they can simply freeze the allocation into a bit-box, then use its iteration and
+destructor.
+
+## Original
+
+[`impl<T> IntoIterator for Vec<T>`][orig]
+
+[orig]: https://doc.rust-lang.org/alloc/vec/struct.Vec.html#impl-IntoIterator
diff --git a/doc/vec/iter/Splice.md b/doc/vec/iter/Splice.md
new file mode 100644
index 0000000..26ec18f
--- /dev/null
+++ b/doc/vec/iter/Splice.md
@@ -0,0 +1,17 @@
+# Splicing Iteration
+
+This adapts a [`Drain`] to overwrite the drained section with the contents of
+another iterator.
+
+When this splice is destroyed, the drained section of the source bit-vector is
+replaced with the contents of the replacement iterator. If the replacement is
+not the same length as the drained section, then the bit-vector is resized to
+fit.
+
+See [`BitVec::splice()`] for more information.
+
+## Original
+
+[`vec::Splice`](alloc::vec::Splice)
+
+[`BitVec::splice()`]: crate::vec::BitVec::splice
diff --git a/doc/view.md b/doc/view.md
new file mode 100644
index 0000000..ac9b223
--- /dev/null
+++ b/doc/view.md
@@ -0,0 +1,22 @@
+# Bit View Adapters
+
+This module provides extension traits that view ordinary memory as
+bit-addressable.
+
+The [`&BitSlice`][0] type is a reference view over memory managed elsewhere. The
+inherent constructors are awkward to call, as they require function syntax and
+a redundant type argument (the `T: BitStore` parameter is already known by the
+data being viewed). As an alternative, the [`BitView`] trait provides methods
+on `BitStore` scalars and arrays that are more convenient to create `BitSlice`
+reference views.
+
+Additionally, [`BitViewSized`], [`AsBits`], and [`AsMutBits`] inform the type
+system about types that can be used as [`BitArray`] storage, immutably viewed as
+bits, or mutably viewed as bits, respectively.
+
+[0]: crate::slice::BitSlice
+[`AsBits`]: self::AsBits
+[`AsMutBits`]: self::AsMutBits
+[`BitArray`]: crate::array::BitArray
+[`BitView`]: self::BitView
+[`BitViewSized`]: self::BitViewSized
diff --git a/doc/view/AsBits.md b/doc/view/AsBits.md
new file mode 100644
index 0000000..ee357ff
--- /dev/null
+++ b/doc/view/AsBits.md
@@ -0,0 +1,27 @@
+# Immutable Bit View
+
+This trait is an analogue to the [`AsRef`] trait, in that it enables any type to
+provide a view of an immutable bit-slice.
+
+It does not require an `AsRef<[T: BitStore]>` implementation, but a blanket
+implementation for all `AsRef<[T: BitStore]>` is provided. This allows you to
+choose whether to implement only one of `AsBits<T>` or `AsRef<[T]>`, and gain
+a bit-slice view through either choice.
+
+## Usage
+
+The `.as_bits<_>()` method has the same usage patterns as
+[`BitView::view_bits`][0].
+
+## Notes
+
+You are not *forbidden* from creating multiple views with different element
+types to the same region, but doing so is likely to cause inconsistent and
+surprising behavior.
+
+Refrain from implementing this trait with more than one storage argument unless
+you are sure that you can uphold the memory region requirements of all of them,
+and are aware of the behavior conflicts that may arise.
+
+[0]: crate::view::BitView::view_bits
+[`AsRef`]: core::convert::AsRef
diff --git a/doc/view/AsMutBits.md b/doc/view/AsMutBits.md
new file mode 100644
index 0000000..63fd0d8
--- /dev/null
+++ b/doc/view/AsMutBits.md
@@ -0,0 +1,27 @@
+# Mutable Bit View
+
+This trait is an analogue to the [`AsMut`] trait, in that it enables any type to
+provide a view of a mutable bit-slice.
+
+It does not require an `AsMut<[T: BitStore]>` implementation, but a blanket
+implementation for all `AsMut<[T: BitStore]>` is provided. This allows you to
+choose whether to implement only one of `AsMutBits<T>` or `AsMut<[T]>`, and gain
+a bit-slice view through either choice.
+
+## Usage
+
+The `.as_mut_bits<_>()` method has the same usage patterns as
+[`BitView::view_bits_mut`][0].
+
+## Notes
+
+You are not *forbidden* from creating multiple views with different element
+types to the same region, but doing so is likely to cause inconsistent and
+surprising behavior.
+
+Refrain from implementing this trait with more than one storage argument unless
+you are sure that you can uphold the memory region requirements of all of them,
+and are aware of the behavior conflicts that may arise.
+
+[0]: crate::view::BitView::view_bits_mut
+[`AsMut`]: core::convert::AsMut
diff --git a/doc/view/BitView.md b/doc/view/BitView.md
new file mode 100644
index 0000000..a18b982
--- /dev/null
+++ b/doc/view/BitView.md
@@ -0,0 +1,27 @@
+# Bit View
+
+This trait describes a region of memory that can be viewed as its constituent
+bits. It is blanket-implemented on all [`BitStore`] implementors, as well as
+slices and arrays of them. It should not be implemented on any other types.
+
+The contained extension methods allow existing memory to be easily viewd as
+[`BitSlice`]s using dot-call method syntax rather than the more cumbersome
+constructor functions in `BitSlice`’s inherent API.
+
+Since the element type is already known to the implementor, the only type
+parameter you need to provide when calling these methods is the bit-ordering.
+
+## Examples
+
+```rust
+use bitvec::prelude::*;
+
+let a = 0u16;
+let a_bits: &BitSlice<u16, Lsb0> = a.view_bits::<Lsb0>();
+
+let mut b = [0u8; 4];
+let b_bits: &mut BitSlice<u8, Msb0> = b.view_bits_mut::<Msb0>();
+```
+
+[`BitSlice`]: crate::slice::BitSlice
+[`BitStore`]: crate::store::BitStore
diff --git a/src/access.rs b/src/access.rs
new file mode 100644
index 0000000..d68022a
--- /dev/null
+++ b/src/access.rs
@@ -0,0 +1,290 @@
+#![doc = include_str!("../doc/access.md")]
+
+use core::sync::atomic::Ordering;
+
+use funty::Integral;
+use radium::Radium;
+
+use crate::{
+ index::{
+ BitIdx,
+ BitMask,
+ },
+ mem::BitRegister,
+ order::BitOrder,
+};
+
+#[doc = include_str!("../doc/access/BitAccess.md")]
+pub trait BitAccess: Radium
+where <Self as Radium>::Item: BitRegister
+{
+ /// Clears bits within a memory element to `0`.
+ ///
+ /// The mask provided to this method must be constructed from indices that
+ /// are valid in the caller’s context. As the mask is already computed by
+ /// the caller, this does not take an ordering type parameter.
+ ///
+ /// ## Parameters
+ ///
+ /// - `mask`: A mask of any number of bits. This is a selection mask: all
+ /// bits in the mask that are set to `1` will set the corresponding bit in
+ /// `*self` to `0`.
+ ///
+ /// ## Returns
+ ///
+ /// The prior value of the memory element.
+ ///
+ /// ## Effects
+ ///
+ /// All bits in `*self` corresponding to `1` bits in the `mask` are cleared
+ /// to `0`; all others retain their original value.
+ ///
+ /// Do not invert the `mask` prior to calling this function. [`BitMask`] is
+ /// a selection type, not a bitwise-operation argument.
+ ///
+ /// [`BitMask`]: crate::index::BitMask
+ #[inline]
+ fn clear_bits(&self, mask: BitMask<Self::Item>) -> Self::Item {
+ self.fetch_and(!mask.into_inner(), Ordering::Relaxed)
+ }
+
+ /// Sets bits within a memory element to `1`.
+ ///
+ /// The mask provided to this method must be constructed from indices that
+ /// are valid in the caller’s context. As the mask is already computed by
+ /// the caller, this does not take an ordering type parameter.
+ ///
+ /// ## Parameters
+ ///
+ /// - `mask`: A mask of any number of bits. This is a selection mask: all
+ /// bits in the mask that are set to `1` will set the corresponding bit in
+ /// `*self` to `1`.
+ ///
+ /// ## Returns
+ ///
+ /// The prior value of the memory element.
+ ///
+ /// ## Effects
+ ///
+ /// All bits in `*self` corresponding to `1` bits in the `mask` are set to
+ /// `1`; all others retain their original value.
+ #[inline]
+ fn set_bits(&self, mask: BitMask<Self::Item>) -> Self::Item {
+ self.fetch_or(mask.into_inner(), Ordering::Relaxed)
+ }
+
+ /// Inverts bits within a memory element.
+ ///
+ /// The mask provided to this method must be constructed from indices that
+ /// are valid in the caller’s context. As the mask is already computed by
+ /// the caller, this does not take an ordering type parameter.
+ ///
+ /// ## Parameters
+ ///
+ /// - `mask`: A mask of any number of bits. This is a selection mask: all
+ /// bits in the mask that are set to `1` will invert the corresponding bit
+ /// in `*self`.
+ ///
+ /// ## Returns
+ ///
+ /// The prior value of the memory element.
+ ///
+ /// ## Effects
+ ///
+ /// All bits in `*self` corresponding to `1` bits in the `mask` are
+ /// inverted; all others retain their original value.
+ #[inline]
+ fn invert_bits(&self, mask: BitMask<Self::Item>) -> Self::Item {
+ self.fetch_xor(mask.into_inner(), Ordering::Relaxed)
+ }
+
+ /// Writes a value to one bit in a memory element, returning the previous
+ /// value.
+ ///
+ /// ## Type Parameters
+ ///
+ /// - `O`: An ordering of bits in a memory element that translates the
+ /// `index` into a real position.
+ ///
+ /// ## Parameters
+ ///
+ /// - `index`: The semantic index of the bit in `*self` to modify.
+ /// - `value`: The new bit value to write into `*self` at the `index`.
+ ///
+ /// ## Returns
+ ///
+ /// The bit previously stored in `*self` at `index`. These operations are
+ /// required to load the `*self` value from memory in order to operate, and
+ /// so always have the prior value available for use. This can reduce
+ /// spurious loads throughout the crate.
+ ///
+ /// ## Effects
+ ///
+ /// `*self` is updated with the bit at `index` set to `value`; all other
+ /// bits remain unchanged.
+ #[inline]
+ fn write_bit<O>(&self, index: BitIdx<Self::Item>, value: bool) -> bool
+ where O: BitOrder {
+ let select = index.select::<O>().into_inner();
+ select
+ & if value {
+ self.fetch_or(select, Ordering::Relaxed)
+ }
+ else {
+ self.fetch_and(!select, Ordering::Relaxed)
+ } != <Self::Item>::ZERO
+ }
+
+ /// Gets the function that will write `value` into all bits under a mask.
+ ///
+ /// This is useful for preparing bulk operations that all write the same
+ /// data into memory, and only need to provide the shape of memory to write.
+ ///
+ /// ## Parameters
+ ///
+ /// - `value`: The bit that will be written by the returned function.
+ ///
+ /// ## Returns
+ ///
+ /// A function which writes `value` into memory at a given address and under
+ /// a given mask. If `value` is `false`, then this produces [`clear_bits`];
+ /// if it is `true`, then this produces [`set_bits`].
+ ///
+ /// [`clear_bits`]: Self::clear_bits
+ /// [`set_bits`]: Self::set_bits
+ #[inline]
+ fn get_writers(
+ value: bool,
+ ) -> for<'a> fn(&'a Self, BitMask<Self::Item>) -> Self::Item {
+ if value {
+ Self::set_bits
+ }
+ else {
+ Self::clear_bits
+ }
+ }
+}
+
+impl<A> BitAccess for A
+where
+ A: Radium,
+ A::Item: BitRegister,
+{
+}
+
+#[doc = include_str!("../doc/access/BitSafe.md")]
+pub trait BitSafe {
+ /// The element type being guarded against improper mutation.
+ ///
+ /// This is only present as an extra proof that the type graph has a
+ /// consistent view of the underlying memory.
+ type Mem: BitRegister;
+
+ /// The memory-access type this guards.
+ ///
+ /// This is exposed as an associated type so that `BitStore` can name it
+ /// without having to re-select it based on crate configuration.
+ type Rad: Radium<Item = Self::Mem>;
+
+ /// The zero constant.
+ const ZERO: Self;
+
+ /// Loads the value from memory, allowing for the possibility that other
+ /// handles have write permissions to it.
+ fn load(&self) -> Self::Mem;
+}
+
+/// Constructs a shared-mutable guard type that disallows mutation *through it*.
+macro_rules! safe {
+ ($($t:ident => $w:ident => $r:ty);+ $(;)?) => { $(
+ #[derive(Debug)]
+ #[repr(transparent)]
+ #[doc = include_str!("../doc/access/impl_BitSafe.md")]
+ pub struct $w {
+ inner: <Self as BitSafe>::Rad,
+ }
+
+ impl $w {
+ /// Allow construction of the safed value by forwarding to its
+ /// interior constructor.
+ ///
+ /// This type is not public API, and general use has no reason to
+ /// construct values of it directly. It is provided for convenience
+ /// as a crate internal.
+ pub(crate) const fn new(value: $t) -> Self {
+ Self { inner: <<Self as BitSafe>::Rad>::new(value) }
+ }
+ }
+
+ impl BitSafe for $w {
+ type Mem = $t;
+
+ #[cfg(feature = "atomic")]
+ type Rad = $r;
+
+ #[cfg(not(feature = "atomic"))]
+ type Rad = core::cell::Cell<$t>;
+
+ const ZERO: Self = Self::new(0);
+
+ #[inline]
+ fn load(&self) -> Self::Mem {
+ self.inner.load(Ordering::Relaxed)
+ }
+ }
+ )+ };
+}
+
+safe! {
+ u8 => BitSafeU8 => radium::types::RadiumU8;
+ u16 => BitSafeU16 => radium::types::RadiumU16;
+ u32 => BitSafeU32 => radium::types::RadiumU32;
+}
+
+#[cfg(target_pointer_width = "64")]
+safe!(u64 => BitSafeU64 => radium::types::RadiumU64);
+
+safe!(usize => BitSafeUsize => radium::types::RadiumUsize);
+
+#[cfg(test)]
+mod tests {
+ use core::cell::Cell;
+
+ use super::*;
+ use crate::prelude::*;
+
+ #[test]
+ fn touch_memory() {
+ let data = Cell::new(0u8);
+ let accessor = &data;
+ let aliased = unsafe { &*(&data as *const _ as *const BitSafeU8) };
+
+ assert!(!BitAccess::write_bit::<Lsb0>(
+ accessor,
+ BitIdx::new(1).unwrap(),
+ true
+ ));
+ assert_eq!(aliased.load(), 2);
+ assert!(BitAccess::write_bit::<Lsb0>(
+ accessor,
+ BitIdx::new(1).unwrap(),
+ false
+ ));
+ assert_eq!(aliased.load(), 0);
+ }
+
+ #[test]
+ #[cfg(not(miri))]
+ fn sanity_check_prefetch() {
+ use core::cell::Cell;
+ assert_eq!(
+ <Cell<u8> as BitAccess>::get_writers(false) as *const (),
+ <Cell<u8> as BitAccess>::clear_bits as *const ()
+ );
+
+ assert_eq!(
+ <Cell<u8> as BitAccess>::get_writers(true) as *const (),
+ <Cell<u8> as BitAccess>::set_bits as *const ()
+ );
+ }
+}
diff --git a/src/array.rs b/src/array.rs
new file mode 100644
index 0000000..d5f0a93
--- /dev/null
+++ b/src/array.rs
@@ -0,0 +1,118 @@
+#![doc = include_str!("../doc/array.md")]
+
+use core::marker::PhantomData;
+
+use crate::{
+ mem,
+ order::{
+ BitOrder,
+ Lsb0,
+ },
+ slice::BitSlice,
+ view::BitViewSized,
+};
+
+mod api;
+mod iter;
+mod ops;
+mod tests;
+mod traits;
+
+pub use self::iter::IntoIter;
+
+#[repr(transparent)]
+#[doc = include_str!("../doc/array/BitArray.md")]
+pub struct BitArray<A = [usize; 1], O = Lsb0>
+where
+ A: BitViewSized,
+ O: BitOrder,
+{
+ /// The ordering of bits within an `A::Store` element.
+ pub _ord: PhantomData<O>,
+ /// The wrapped data buffer.
+ pub data: A,
+}
+
+impl<A, O> BitArray<A, O>
+where
+ A: BitViewSized,
+ O: BitOrder,
+{
+ /// A bit-array with all bits initialized to zero.
+ pub const ZERO: Self = Self {
+ _ord: PhantomData,
+ data: A::ZERO,
+ };
+
+ /// Wraps an existing buffer as a bit-array.
+ ///
+ /// ## Examples
+ ///
+ /// ```rust
+ /// use bitvec::prelude::*;
+ ///
+ /// let data = [0u16, 1, 2, 3];
+ /// let bits = BitArray::<_, Msb0>::new(data);
+ /// assert_eq!(bits.len(), 64);
+ /// ```
+ #[inline]
+ pub fn new(data: A) -> Self {
+ Self { data, ..Self::ZERO }
+ }
+
+ /// Removes the bit-array wrapper, returning the contained buffer.
+ ///
+ /// ## Examples
+ ///
+ /// ```rust
+ /// use bitvec::prelude::*;
+ ///
+ /// let bits = bitarr![0; 30];
+ /// let native: [usize; 1] = bits.into_inner();
+ /// ```
+ #[inline]
+ pub fn into_inner(self) -> A {
+ self.data
+ }
+
+ /// Explicitly views the bit-array as a bit-slice.
+ #[inline]
+ pub fn as_bitslice(&self) -> &BitSlice<A::Store, O> {
+ self.data.view_bits::<O>()
+ }
+
+ /// Explicitly views the bit-array as a mutable bit-slice.
+ #[inline]
+ pub fn as_mut_bitslice(&mut self) -> &mut BitSlice<A::Store, O> {
+ self.data.view_bits_mut::<O>()
+ }
+
+ /// Views the bit-array as a slice of its underlying memory elements.
+ #[inline]
+ pub fn as_raw_slice(&self) -> &[A::Store] {
+ self.data.as_raw_slice()
+ }
+
+ /// Views the bit-array as a mutable slice of its underlying memory
+ /// elements.
+ #[inline]
+ pub fn as_raw_mut_slice(&mut self) -> &mut [A::Store] {
+ self.data.as_raw_mut_slice()
+ }
+
+ /// Gets the length (in bits) of the bit-array.
+ ///
+ /// This method is a compile-time constant.
+ #[inline]
+ pub fn len(&self) -> usize {
+ mem::bits_of::<A>()
+ }
+
+ /// Tests whether the array is empty.
+ ///
+ /// This method is a compile-time constant.
+ #[inline]
+ pub fn is_empty(&self) -> bool {
+ mem::bits_of::<A>() == 0
+ }
+}
diff --git a/src/array/api.rs b/src/array/api.rs
new file mode 100644
index 0000000..5da346d
--- /dev/null
+++ b/src/array/api.rs
@@ -0,0 +1,55 @@
+#![doc = include_str!("../../doc/array/api.md")]
+
+use super::BitArray;
+use crate::{
+ order::BitOrder,
+ slice::BitSlice,
+ view::BitViewSized,
+};
+
+impl<A, O> BitArray<A, O>
+where
+ A: BitViewSized,
+ O: BitOrder,
+{
+ /// Returns a bit-slice containing the entire bit-array. Equivalent to
+ /// `&a[..]`.
+ ///
+ /// Because `BitArray` can be viewed as a slice of bits or as a slice of
+ /// elements with equal ease, you should switch to using [`.as_bitslice()`]
+ /// or [`.as_raw_slice()`] to make your choice explicit.
+ ///
+ /// ## Original
+ ///
+ /// [`array::as_slice`](https://doc.rust-lang.org/std/primitive.array.html#method.as_slice)
+ ///
+ /// [`.as_bitslice()`]: Self::as_bitslice
+ /// [`.as_raw_slice()`]: Self::as_raw_slice
+ #[inline]
+ #[cfg(not(tarpaulin_include))]
+ #[deprecated = "use `.as_bitslice()` or `.as_raw_slice()` instead"]
+ pub fn as_slice(&self) -> &BitSlice<A::Store, O> {
+ self.as_bitslice()
+ }
+
+ /// Returns a mutable bit-slice containing the entire bit-array. Equivalent
+ /// to `&mut a[..]`.
+ ///
+ /// Because `BitArray` can be viewed as a slice of bits or as a slice of
+ /// elements with equal ease, you should switch to using
+ /// [`.as_mut_bitslice()`] or [`.as_raw_mut_slice()`] to make your choice
+ /// explicit.
+ ///
+ /// ## Original
+ ///
+ /// [`array::as_mut_slice`](https://doc.rust-lang.org/std/primitive.array.html#method.as_mut_slice)
+ ///
+ /// [`.as_mut_bitslice()`]: Self::as_mut_bitslice
+ /// [`.as_raw_mut_slice()`]: Self::as_raw_mut_slice
+ #[inline]
+ #[cfg(not(tarpaulin_include))]
+ #[deprecated = "use `.as_mut_bitslice()` or `.as_raw_mut_slice()` instead"]
+ pub fn as_mut_slice(&mut self) -> &mut BitSlice<A::Store, O> {
+ self.as_mut_bitslice()
+ }
+}
diff --git a/src/array/iter.rs b/src/array/iter.rs
new file mode 100644
index 0000000..7a04226
--- /dev/null
+++ b/src/array/iter.rs
@@ -0,0 +1,229 @@
+#![doc = include_str!("../../doc/array/iter.md")]
+
+use core::{
+ fmt::{
+ self,
+ Debug,
+ Formatter,
+ },
+ iter::FusedIterator,
+ ops::Range,
+};
+
+use tap::Pipe;
+use wyz::comu::Const;
+
+use super::BitArray;
+use crate::{
+ mem,
+ order::BitOrder,
+ ptr::BitPtr,
+ slice::BitSlice,
+ view::BitViewSized,
+};
+
+/// [Original](https://doc.rust-lang.org/std/primitive.array.html#impl-IntoIterator)
+impl<A, O> IntoIterator for BitArray<A, O>
+where
+ A: BitViewSized,
+ O: BitOrder,
+{
+ type IntoIter = IntoIter<A, O>;
+ type Item = <IntoIter<A, O> as Iterator>::Item;
+
+ #[inline]
+ fn into_iter(self) -> Self::IntoIter {
+ IntoIter::new(self)
+ }
+}
+
+/// [Original](https://doc.rust-lang.org/std/primitive.array.html#impl-IntoIterator-1)
+#[cfg(not(tarpaulin_include))]
+impl<'a, A, O> IntoIterator for &'a BitArray<A, O>
+where
+ O: BitOrder,
+ A: 'a + BitViewSized,
+{
+ type IntoIter = <&'a BitSlice<A::Store, O> as IntoIterator>::IntoIter;
+ type Item = <&'a BitSlice<A::Store, O> as IntoIterator>::Item;
+
+ #[inline]
+ fn into_iter(self) -> Self::IntoIter {
+ self.as_bitslice().into_iter()
+ }
+}
+
+/// [Original](https://doc.rust-lang.org/std/primitive.array.html#impl-IntoIterator-2)
+#[cfg(not(tarpaulin_include))]
+impl<'a, A, O> IntoIterator for &'a mut BitArray<A, O>
+where
+ O: BitOrder,
+ A: 'a + BitViewSized,
+{
+ type IntoIter = <&'a mut BitSlice<A::Store, O> as IntoIterator>::IntoIter;
+ type Item = <&'a mut BitSlice<A::Store, O> as IntoIterator>::Item;
+
+ #[inline]
+ fn into_iter(self) -> Self::IntoIter {
+ self.as_mut_bitslice().into_iter()
+ }
+}
+
+#[derive(Clone)]
+#[doc = include_str!("../../doc/array/IntoIter.md")]
+pub struct IntoIter<A, O>
+where
+ A: BitViewSized,
+ O: BitOrder,
+{
+ /// The bit-array being iterated.
+ array: BitArray<A, O>,
+ /// The indices in `.array` that have not yet been yielded.
+ ///
+ /// This range is always a strict subset of `0 .. self.array.len()`.
+ alive: Range<usize>,
+}
+
+impl<A, O> IntoIter<A, O>
+where
+ A: BitViewSized,
+ O: BitOrder,
+{
+ /// Converts a bit-array into its iterator.
+ ///
+ /// The [`.into_iter()`] method on bit-arrays forwards to this. While
+ /// `BitArray` does deref to `&/mut BitSlice`, which also has
+ /// `.into_iter()`, this behavior has always been present alongside
+ /// `BitArray` and there is no legacy forwarding to preserve.
+ ///
+ /// ## Original
+ ///
+ /// [`IntoIter::new`](core::array::IntoIter::new)s
+ #[inline]
+ pub fn new(array: BitArray<A, O>) -> Self {
+ Self {
+ array,
+ alive: 0 .. mem::bits_of::<A>(),
+ }
+ }
+
+ /// Views the remaining unyielded bits in the iterator.
+ ///
+ /// ## Original
+ ///
+ /// [`IntoIter::as_slice`](core::array::IntoIter::as_slice)
+ #[inline]
+ pub fn as_bitslice(&self) -> &BitSlice<A::Store, O> {
+ unsafe { self.array.as_bitslice().get_unchecked(self.alive.clone()) }
+ }
+
+ #[inline]
+ #[cfg(not(tarpaulin_include))]
+ #[deprecated = "use `.as_bitslice()` instead"]
+ #[allow(missing_docs, clippy::missing_docs_in_private_items)]
+ pub fn as_slice(&self) -> &BitSlice<A::Store, O> {
+ self.as_bitslice()
+ }
+
+ /// Mutably views the remaining unyielded bits in the iterator.
+ ///
+ /// ## Original
+ ///
+ /// [`IntoIter::as_mut_slice`](core::array::IntoIter::as_mut_slice)
+ #[inline]
+ pub fn as_mut_bitslice(&mut self) -> &mut BitSlice<A::Store, O> {
+ unsafe {
+ self.array
+ .as_mut_bitslice()
+ .get_unchecked_mut(self.alive.clone())
+ }
+ }
+
+ #[inline]
+ #[cfg(not(tarpaulin_include))]
+ #[deprecated = "use `.as_bitslice_mut()` instead"]
+ #[allow(missing_docs, clippy::missing_docs_in_private_items)]
+ pub fn as_mut_slice(&mut self) -> &mut BitSlice<A::Store, O> {
+ self.as_mut_bitslice()
+ }
+
+ /// Gets a bit from the bit-array.
+ #[inline]
+ fn get(&self, index: usize) -> bool {
+ unsafe {
+ self.array
+ .as_raw_slice()
+ .pipe(BitPtr::<Const, A::Store, O>::from_slice)
+ .add(index)
+ .read()
+ }
+ }
+}
+
+#[cfg(not(tarpaulin_include))]
+impl<A, O> Debug for IntoIter<A, O>
+where
+ A: BitViewSized,
+ O: BitOrder,
+{
+ #[inline]
+ fn fmt(&self, fmt: &mut Formatter) -> fmt::Result {
+ fmt.debug_tuple("IntoIter")
+ .field(&self.as_bitslice())
+ .finish()
+ }
+}
+
+impl<A, O> Iterator for IntoIter<A, O>
+where
+ A: BitViewSized,
+ O: BitOrder,
+{
+ type Item = bool;
+
+ easy_iter!();
+
+ #[inline]
+ fn next(&mut self) -> Option<Self::Item> {
+ self.alive.next().map(|idx| self.get(idx))
+ }
+
+ #[inline]
+ fn nth(&mut self, n: usize) -> Option<Self::Item> {
+ self.alive.nth(n).map(|idx| self.get(idx))
+ }
+}
+
+impl<A, O> DoubleEndedIterator for IntoIter<A, O>
+where
+ A: BitViewSized,
+ O: BitOrder,
+{
+ #[inline]
+ fn next_back(&mut self) -> Option<Self::Item> {
+ self.alive.next_back().map(|idx| self.get(idx))
+ }
+
+ #[inline]
+ fn nth_back(&mut self, n: usize) -> Option<Self::Item> {
+ self.alive.nth_back(n).map(|idx| self.get(idx))
+ }
+}
+
+impl<A, O> ExactSizeIterator for IntoIter<A, O>
+where
+ A: BitViewSized,
+ O: BitOrder,
+{
+ #[inline]
+ fn len(&self) -> usize {
+ self.alive.len()
+ }
+}
+
+impl<A, O> FusedIterator for IntoIter<A, O>
+where
+ A: BitViewSized,
+ O: BitOrder,
+{
+}
diff --git a/src/array/ops.rs b/src/array/ops.rs
new file mode 100644
index 0000000..fb9012e
--- /dev/null
+++ b/src/array/ops.rs
@@ -0,0 +1,242 @@
+//! Operator trait implementations for bit-arrays.
+
+use core::ops::{
+ BitAnd,
+ BitAndAssign,
+ BitOr,
+ BitOrAssign,
+ BitXor,
+ BitXorAssign,
+ Deref,
+ DerefMut,
+ Index,
+ IndexMut,
+ Not,
+};
+
+use super::BitArray;
+use crate::{
+ order::BitOrder,
+ slice::BitSlice,
+ store::BitStore,
+ view::BitViewSized,
+};
+
+#[cfg(not(tarpaulin_include))]
+impl<A, O> BitAndAssign<BitArray<A, O>> for BitSlice<A::Store, O>
+where
+ A: BitViewSized,
+ O: BitOrder,
+{
+ #[inline]
+ fn bitand_assign(&mut self, rhs: BitArray<A, O>) {
+ *self &= rhs.as_bitslice()
+ }
+}
+
+#[cfg(not(tarpaulin_include))]
+impl<A, O> BitAndAssign<&BitArray<A, O>> for BitSlice<A::Store, O>
+where
+ A: BitViewSized,
+ O: BitOrder,
+{
+ #[inline]
+ fn bitand_assign(&mut self, rhs: &BitArray<A, O>) {
+ *self &= rhs.as_bitslice()
+ }
+}
+
+impl<A, O, Rhs> BitAnd<Rhs> for BitArray<A, O>
+where
+ A: BitViewSized,
+ O: BitOrder,
+ BitSlice<A::Store, O>: BitAndAssign<Rhs>,
+{
+ type Output = Self;
+
+ #[inline]
+ fn bitand(mut self, rhs: Rhs) -> Self::Output {
+ self &= rhs;
+ self
+ }
+}
+
+impl<A, O, Rhs> BitAndAssign<Rhs> for BitArray<A, O>
+where
+ A: BitViewSized,
+ O: BitOrder,
+ BitSlice<A::Store, O>: BitAndAssign<Rhs>,
+{
+ #[inline]
+ fn bitand_assign(&mut self, rhs: Rhs) {
+ *self.as_mut_bitslice() &= rhs;
+ }
+}
+
+#[cfg(not(tarpaulin_include))]
+impl<A, O> BitOrAssign<BitArray<A, O>> for BitSlice<A::Store, O>
+where
+ A: BitViewSized,
+ O: BitOrder,
+{
+ #[inline]
+ fn bitor_assign(&mut self, rhs: BitArray<A, O>) {
+ *self |= rhs.as_bitslice()
+ }
+}
+
+#[cfg(not(tarpaulin_include))]
+impl<A, O> BitOrAssign<&BitArray<A, O>> for BitSlice<A::Store, O>
+where
+ A: BitViewSized,
+ O: BitOrder,
+{
+ #[inline]
+ fn bitor_assign(&mut self, rhs: &BitArray<A, O>) {
+ *self |= rhs.as_bitslice()
+ }
+}
+
+impl<A, O, Rhs> BitOr<Rhs> for BitArray<A, O>
+where
+ A: BitViewSized,
+ O: BitOrder,
+ BitSlice<A::Store, O>: BitOrAssign<Rhs>,
+{
+ type Output = Self;
+
+ #[inline]
+ fn bitor(mut self, rhs: Rhs) -> Self::Output {
+ self |= rhs;
+ self
+ }
+}
+
+impl<A, O, Rhs> BitOrAssign<Rhs> for BitArray<A, O>
+where
+ A: BitViewSized,
+ O: BitOrder,
+ BitSlice<A::Store, O>: BitOrAssign<Rhs>,
+{
+ #[inline]
+ fn bitor_assign(&mut self, rhs: Rhs) {
+ *self.as_mut_bitslice() |= rhs;
+ }
+}
+
+#[cfg(not(tarpaulin_include))]
+impl<A, O> BitXorAssign<BitArray<A, O>> for BitSlice<A::Store, O>
+where
+ A: BitViewSized,
+ O: BitOrder,
+{
+ #[inline]
+ fn bitxor_assign(&mut self, rhs: BitArray<A, O>) {
+ *self ^= rhs.as_bitslice()
+ }
+}
+
+#[cfg(not(tarpaulin_include))]
+impl<A, O> BitXorAssign<&BitArray<A, O>> for BitSlice<A::Store, O>
+where
+ A: BitViewSized,
+ O: BitOrder,
+{
+ #[inline]
+ fn bitxor_assign(&mut self, rhs: &BitArray<A, O>) {
+ *self ^= rhs.as_bitslice()
+ }
+}
+
+impl<A, O, Rhs> BitXor<Rhs> for BitArray<A, O>
+where
+ A: BitViewSized,
+ O: BitOrder,
+ BitSlice<A::Store, O>: BitXorAssign<Rhs>,
+{
+ type Output = Self;
+
+ #[inline]
+ fn bitxor(mut self, rhs: Rhs) -> Self::Output {
+ self ^= rhs;
+ self
+ }
+}
+
+impl<A, O, Rhs> BitXorAssign<Rhs> for BitArray<A, O>
+where
+ A: BitViewSized,
+ O: BitOrder,
+ BitSlice<A::Store, O>: BitXorAssign<Rhs>,
+{
+ #[inline]
+ fn bitxor_assign(&mut self, rhs: Rhs) {
+ *self.as_mut_bitslice() ^= rhs;
+ }
+}
+
+impl<A, O> Deref for BitArray<A, O>
+where
+ A: BitViewSized,
+ O: BitOrder,
+{
+ type Target = BitSlice<A::Store, O>;
+
+ #[inline]
+ fn deref(&self) -> &Self::Target {
+ self.as_bitslice()
+ }
+}
+
+impl<A, O> DerefMut for BitArray<A, O>
+where
+ A: BitViewSized,
+ O: BitOrder,
+{
+ #[inline]
+ fn deref_mut(&mut self) -> &mut Self::Target {
+ self.as_mut_bitslice()
+ }
+}
+
+impl<A, O, Idx> Index<Idx> for BitArray<A, O>
+where
+ A: BitViewSized,
+ O: BitOrder,
+ BitSlice<A::Store, O>: Index<Idx>,
+{
+ type Output = <BitSlice<A::Store, O> as Index<Idx>>::Output;
+
+ #[inline]
+ fn index(&self, index: Idx) -> &Self::Output {
+ &self.as_bitslice()[index]
+ }
+}
+
+impl<A, O, Idx> IndexMut<Idx> for BitArray<A, O>
+where
+ A: BitViewSized,
+ O: BitOrder,
+ BitSlice<A::Store, O>: IndexMut<Idx>,
+{
+ #[inline]
+ fn index_mut(&mut self, index: Idx) -> &mut Self::Output {
+ &mut self.as_mut_bitslice()[index]
+ }
+}
+
+impl<A, O> Not for BitArray<A, O>
+where
+ A: BitViewSized,
+ O: BitOrder,
+{
+ type Output = Self;
+
+ #[inline]
+ fn not(mut self) -> Self::Output {
+ for elem in self.as_raw_mut_slice() {
+ elem.store_value(!elem.load_value());
+ }
+ self
+ }
+}
diff --git a/src/array/tests.rs b/src/array/tests.rs
new file mode 100644
index 0000000..a3e5210
--- /dev/null
+++ b/src/array/tests.rs
@@ -0,0 +1,176 @@
+//! Unit tests for bit-arrays.
+
+#![cfg(test)]
+
+use core::{
+ borrow::{
+ Borrow,
+ BorrowMut,
+ },
+ cell::Cell,
+ convert::TryFrom,
+ fmt::Debug,
+ hash::Hash,
+ ops::{
+ BitAnd,
+ BitOr,
+ BitXor,
+ Index,
+ IndexMut,
+ Range,
+ },
+};
+
+use static_assertions::*;
+
+use crate::prelude::*;
+
+#[test]
+fn core_impl() {
+ assert_impl_all!(
+ BitArray: AsMut<BitSlice>,
+ AsRef<BitSlice>,
+ Borrow<BitSlice>,
+ BorrowMut<BitSlice>,
+ Debug,
+ Default,
+ Eq,
+ Hash,
+ Index<usize>,
+ Index<Range<usize>>,
+ IndexMut<Range<usize>>,
+ IntoIterator,
+ Ord,
+ PartialEq<&'static BitSlice>,
+ PartialEq<&'static mut BitSlice>,
+ PartialOrd<&'static BitSlice>,
+ TryFrom<&'static BitSlice>,
+ );
+ assert_impl_all!(&'static BitArray: TryFrom<&'static BitSlice>);
+ assert_impl_all!(&'static mut BitArray: TryFrom<&'static mut BitSlice>);
+}
+
+#[test]
+fn bonus_impl() {
+ assert_impl_all!(
+ BitArray: BitAnd<&'static BitSlice>,
+ BitAnd<BitArray>,
+ BitOr<&'static BitSlice>,
+ BitOr<BitArray>,
+ BitXor<&'static BitSlice>,
+ BitXor<BitArray>,
+ );
+}
+
+#[test]
+fn make_and_view() {
+ let data = [1u8, 2, 3, 4];
+ let bits = BitArray::<_, Msb0>::new(data);
+
+ assert_eq!(bits.as_bitslice(), data.view_bits::<Msb0>());
+
+ assert_eq!(bits.len(), data.view_bits::<Msb0>().len());
+ assert!(!bits.is_empty());
+ assert_eq!(bits.into_inner(), data);
+}
+
+#[test]
+fn ops() {
+ let a = bitarr![0, 0, 1, 1];
+ let b = bitarr![0, 1, 0, 1];
+
+ let c = a & b;
+ assert_eq!(c, bitarr![0, 0, 0, 1]);
+
+ let d = a | b;
+ assert_eq!(d, bitarr![0, 1, 1, 1]);
+
+ let e = a ^ b;
+ assert_eq!(e, bitarr![0, 1, 1, 0]);
+
+ let mut f = !e;
+ assert_eq!(f[.. 4], bitarr![1, 0, 0, 1][.. 4]);
+
+ let _: &BitSlice = &a;
+ let _: &mut BitSlice = &mut f;
+}
+
+#[test]
+fn traits() {
+ let a = BitArray::<[Cell<u16>; 3], Msb0>::default();
+ let b = a.clone();
+ assert_eq!(a, b);
+
+ let mut c = rand::random::<[u8; 4]>();
+ let d = c.view_bits_mut::<Lsb0>();
+ assert!(<&BitArray<[u8; 4], Lsb0>>::try_from(&*d).is_ok());
+ assert!(<&mut BitArray<[u8; 4], Lsb0>>::try_from(&mut *d).is_ok());
+ assert!(<&BitArray<[u8; 3], Lsb0>>::try_from(&d[4 .. 28]).is_err());
+ assert!(<&mut BitArray<[u8; 3], Lsb0>>::try_from(&mut d[4 .. 28]).is_err());
+ assert_eq!(BitArray::<[u8; 4], Lsb0>::try_from(&*d).unwrap(), *d);
+}
+
+#[test]
+fn iter() {
+ let data = rand::random::<[u32; 4]>();
+ let bits = data.into_bitarray::<Lsb0>();
+ let view = data.view_bits::<Lsb0>();
+
+ assert!(
+ bits.into_iter()
+ .zip(view.iter().by_vals())
+ .all(|(a, b)| a == b)
+ );
+
+ let mut iter = bits.into_iter();
+ assert!(iter.next().is_some());
+ assert!(iter.next_back().is_some());
+ assert!(iter.nth(6).is_some());
+ assert!(iter.nth_back(6).is_some());
+ assert_eq!(iter.len(), 112);
+
+ assert_eq!(iter.as_bitslice(), &view[8 .. 120]);
+ assert_eq!(iter.as_mut_bitslice(), &view[8 .. 120]);
+}
+
+#[cfg(feature = "alloc")]
+mod format {
+ #[cfg(not(feature = "std"))]
+ use alloc::format;
+ use core::{
+ any,
+ convert::TryFrom,
+ };
+
+ use super::{
+ BitArray,
+ Lsb0,
+ };
+
+ #[test]
+ fn render() {
+ let render = format!("{:?}", BitArray::<u8, Lsb0>::ZERO);
+ assert!(render.starts_with(&format!(
+ "BitArray<u8, {}>",
+ any::type_name::<Lsb0>(),
+ )));
+ assert!(render.ends_with("[0, 0, 0, 0, 0, 0, 0, 0]"));
+
+ assert_eq!(
+ format!(
+ "{:?}",
+ BitArray::<u8, Lsb0>::try_from(bits![u8, Lsb0; 0, 1])
+ .unwrap_err(),
+ ),
+ "TryFromBitSliceError::UnequalLen(2 != 8)",
+ );
+ assert_eq!(
+ format!(
+ "{:?}",
+ BitArray::<u8, Lsb0>::try_from(&bits![u8, Lsb0; 0; 9][1 ..])
+ .unwrap_err(),
+ ),
+ "TryFromBitSliceError::Misaligned",
+ );
+ }
+}
diff --git a/src/array/traits.rs b/src/array/traits.rs
new file mode 100644
index 0000000..beefdf8
--- /dev/null
+++ b/src/array/traits.rs
@@ -0,0 +1,382 @@
+//! Additional trait implementations on bit-arrays.
+
+use core::{
+ borrow::{
+ Borrow,
+ BorrowMut,
+ },
+ cmp,
+ convert::TryFrom,
+ fmt::{
+ self,
+ Debug,
+ Display,
+ Formatter,
+ },
+ hash::{
+ Hash,
+ Hasher,
+ },
+ marker::Unpin,
+};
+
+use tap::TryConv;
+
+use super::BitArray;
+use crate::{
+ index::BitIdx,
+ mem,
+ order::BitOrder,
+ slice::BitSlice,
+ store::BitStore,
+ view::BitViewSized,
+};
+
+#[cfg(not(tarpaulin_include))]
+impl<A, O> Borrow<BitSlice<A::Store, O>> for BitArray<A, O>
+where
+ A: BitViewSized,
+ O: BitOrder,
+{
+ #[inline]
+ fn borrow(&self) -> &BitSlice<A::Store, O> {
+ self.as_bitslice()
+ }
+}
+
+#[cfg(not(tarpaulin_include))]
+impl<A, O> BorrowMut<BitSlice<A::Store, O>> for BitArray<A, O>
+where
+ A: BitViewSized,
+ O: BitOrder,
+{
+ #[inline]
+ fn borrow_mut(&mut self) -> &mut BitSlice<A::Store, O> {
+ self.as_mut_bitslice()
+ }
+}
+
+impl<A, O> Clone for BitArray<A, O>
+where
+ A: BitViewSized,
+ O: BitOrder,
+{
+ #[inline]
+ fn clone(&self) -> Self {
+ let mut out = Self::ZERO;
+ for (dst, src) in
+ out.as_raw_mut_slice().iter_mut().zip(self.as_raw_slice())
+ {
+ dst.store_value(src.load_value());
+ }
+ out
+ }
+}
+
+impl<A, O> Eq for BitArray<A, O>
+where
+ A: BitViewSized,
+ O: BitOrder,
+{
+}
+
+#[cfg(not(tarpaulin_include))]
+impl<A, O> Ord for BitArray<A, O>
+where
+ A: BitViewSized,
+ O: BitOrder,
+{
+ #[inline]
+ fn cmp(&self, other: &Self) -> cmp::Ordering {
+ self.as_bitslice().cmp(other.as_bitslice())
+ }
+}
+
+#[cfg(not(tarpaulin_include))]
+impl<O1, A, O2, T> PartialEq<BitArray<A, O2>> for BitSlice<T, O1>
+where
+ O1: BitOrder,
+ O2: BitOrder,
+ A: BitViewSized,
+ T: BitStore,
+{
+ #[inline]
+ fn eq(&self, other: &BitArray<A, O2>) -> bool {
+ self == other.as_bitslice()
+ }
+}
+
+#[cfg(not(tarpaulin_include))]
+impl<A, O, Rhs> PartialEq<Rhs> for BitArray<A, O>
+where
+ A: BitViewSized,
+ O: BitOrder,
+ Rhs: ?Sized,
+ BitSlice<A::Store, O>: PartialEq<Rhs>,
+{
+ #[inline]
+ fn eq(&self, other: &Rhs) -> bool {
+ self.as_bitslice() == other
+ }
+}
+
+#[cfg(not(tarpaulin_include))]
+impl<A, T, O> PartialOrd<BitArray<A, O>> for BitSlice<T, O>
+where
+ A: BitViewSized,
+ T: BitStore,
+ O: BitOrder,
+{
+ #[inline]
+ fn partial_cmp(&self, other: &BitArray<A, O>) -> Option<cmp::Ordering> {
+ self.partial_cmp(other.as_bitslice())
+ }
+}
+
+#[cfg(not(tarpaulin_include))]
+impl<A, O, Rhs> PartialOrd<Rhs> for BitArray<A, O>
+where
+ A: BitViewSized,
+ O: BitOrder,
+ Rhs: ?Sized,
+ BitSlice<A::Store, O>: PartialOrd<Rhs>,
+{
+ #[inline]
+ fn partial_cmp(&self, other: &Rhs) -> Option<cmp::Ordering> {
+ self.as_bitslice().partial_cmp(other)
+ }
+}
+
+#[cfg(not(tarpaulin_include))]
+impl<A, O> AsRef<BitSlice<A::Store, O>> for BitArray<A, O>
+where
+ A: BitViewSized,
+ O: BitOrder,
+{
+ #[inline]
+ fn as_ref(&self) -> &BitSlice<A::Store, O> {
+ self.as_bitslice()
+ }
+}
+
+#[cfg(not(tarpaulin_include))]
+impl<A, O> AsMut<BitSlice<A::Store, O>> for BitArray<A, O>
+where
+ A: BitViewSized,
+ O: BitOrder,
+{
+ #[inline]
+ fn as_mut(&mut self) -> &mut BitSlice<A::Store, O> {
+ self.as_mut_bitslice()
+ }
+}
+
+#[cfg(not(tarpaulin_include))]
+impl<A, O> From<A> for BitArray<A, O>
+where
+ A: BitViewSized,
+ O: BitOrder,
+{
+ #[inline]
+ fn from(data: A) -> Self {
+ Self::new(data)
+ }
+}
+
+impl<A, O> TryFrom<&BitSlice<A::Store, O>> for BitArray<A, O>
+where
+ A: BitViewSized,
+ O: BitOrder,
+{
+ type Error = TryFromBitSliceError;
+
+ #[inline]
+ fn try_from(src: &BitSlice<A::Store, O>) -> Result<Self, Self::Error> {
+ src.try_conv::<&Self>().map(|this| this.clone())
+ }
+}
+
+impl<A, O> TryFrom<&BitSlice<A::Store, O>> for &BitArray<A, O>
+where
+ A: BitViewSized,
+ O: BitOrder,
+{
+ type Error = TryFromBitSliceError;
+
+ #[inline]
+ fn try_from(src: &BitSlice<A::Store, O>) -> Result<Self, Self::Error> {
+ TryFromBitSliceError::new::<A, O>(src).map(|()| unsafe {
+ &*src
+ .as_bitspan()
+ .address()
+ .to_const()
+ .cast::<BitArray<A, O>>()
+ })
+ }
+}
+
+impl<A, O> TryFrom<&mut BitSlice<A::Store, O>> for &mut BitArray<A, O>
+where
+ A: BitViewSized,
+ O: BitOrder,
+{
+ type Error = TryFromBitSliceError;
+
+ #[inline]
+ fn try_from(src: &mut BitSlice<A::Store, O>) -> Result<Self, Self::Error> {
+ TryFromBitSliceError::new::<A, O>(src).map(|()| unsafe {
+ &mut *src
+ .as_mut_bitspan()
+ .address()
+ .to_mut()
+ .cast::<BitArray<A, O>>()
+ })
+ }
+}
+
+impl<A, O> Default for BitArray<A, O>
+where
+ A: BitViewSized,
+ O: BitOrder,
+{
+ #[inline]
+ fn default() -> Self {
+ Self::ZERO
+ }
+}
+
+impl<A, O> Debug for BitArray<A, O>
+where
+ A: BitViewSized,
+ O: BitOrder,
+{
+ #[inline]
+ fn fmt(&self, fmt: &mut Formatter) -> fmt::Result {
+ self.as_bitspan().render(fmt, "Array", None)?;
+ fmt.write_str(" ")?;
+ Display::fmt(self, fmt)
+ }
+}
+
+easy_fmt! {
+ impl Binary
+ impl Display
+ impl LowerHex
+ impl Octal
+ impl UpperHex
+ for BitArray
+}
+
+#[cfg(not(tarpaulin_include))]
+impl<A, O> Hash for BitArray<A, O>
+where
+ A: BitViewSized,
+ O: BitOrder,
+{
+ #[inline]
+ fn hash<H>(&self, hasher: &mut H)
+ where H: Hasher {
+ self.as_bitslice().hash(hasher);
+ }
+}
+
+impl<A, O> Copy for BitArray<A, O>
+where
+ O: BitOrder,
+ A: BitViewSized + Copy,
+{
+}
+
+impl<A, O> Unpin for BitArray<A, O>
+where
+ A: BitViewSized,
+ O: BitOrder,
+{
+}
+
+#[repr(transparent)]
+#[derive(Clone, Copy, Eq, Ord, PartialEq, PartialOrd)]
+#[doc = include_str!("../../doc/array/TryFromBitSliceError.md")]
+pub struct TryFromBitSliceError(InnerError);
+
+impl TryFromBitSliceError {
+ /// Checks whether a bit-slice can be viewed as a bit-array.
+ #[inline]
+ fn new<A, O>(bits: &BitSlice<A::Store, O>) -> Result<(), Self>
+ where
+ O: BitOrder,
+ A: BitViewSized,
+ {
+ InnerError::new::<A, O>(bits).map_err(Self)
+ }
+}
+
+impl Debug for TryFromBitSliceError {
+ #[inline]
+ fn fmt(&self, fmt: &mut Formatter) -> fmt::Result {
+ fmt.write_str("TryFromBitSliceError::")?;
+ match self.0 {
+ InnerError::UnequalLen { actual, expected } => {
+ write!(fmt, "UnequalLen({} != {})", actual, expected)
+ },
+ InnerError::Misaligned => fmt.write_str("Misaligned"),
+ }
+ }
+}
+
+#[cfg(not(tarpaulin_include))]
+impl Display for TryFromBitSliceError {
+ #[inline]
+ fn fmt(&self, fmt: &mut Formatter) -> fmt::Result {
+ match self.0 {
+ InnerError::UnequalLen { actual, expected } => write!(
+ fmt,
+ "bit-slice with length {} cannot be viewed as bit-array with \
+ length {}",
+ actual, expected,
+ ),
+ InnerError::Misaligned => fmt.write_str(
+ "a bit-slice must begin at the front edge of a storage element \
+ in order to be viewed as a bit-array",
+ ),
+ }
+ }
+}
+
+#[cfg(feature = "std")]
+impl std::error::Error for TryFromBitSliceError {}
+
+/// Opaque error type for bit-slice to bit-array view conversions.
+#[derive(Clone, Copy, Eq, Hash, Ord, PartialEq, PartialOrd)]
+enum InnerError {
+ /// A bit-slice did not match the length of the destination bit-array.
+ UnequalLen {
+ /// The length of the bit-slice that produced this error.
+ actual: usize,
+ /// The length of the destination bit-array type.
+ expected: usize,
+ },
+ /// A bit-slice did not begin at `BitIdx::MIN`.
+ Misaligned,
+}
+
+impl InnerError {
+ /// Checks whether a bit-slice is suitable to view as a bit-array.
+ #[inline]
+ fn new<A, O>(bits: &BitSlice<A::Store, O>) -> Result<(), Self>
+ where
+ O: BitOrder,
+ A: BitViewSized,
+ {
+ let bitspan = bits.as_bitspan();
+ let actual = bitspan.len();
+ let expected = mem::bits_of::<A>();
+ if actual != expected {
+ return Err(Self::UnequalLen { actual, expected });
+ }
+ if bitspan.head() != BitIdx::<<A::Store as BitStore>::Mem>::MIN {
+ return Err(Self::Misaligned);
+ }
+ Ok(())
+ }
+}
diff --git a/src/boxed.rs b/src/boxed.rs
new file mode 100644
index 0000000..b145fa3
--- /dev/null
+++ b/src/boxed.rs
@@ -0,0 +1,365 @@
+#![cfg(feature = "alloc")]
+#![doc = include_str!("../doc/boxed.md")]
+
+use alloc::boxed::Box;
+use core::{
+ mem::ManuallyDrop,
+ slice,
+};
+
+use tap::{
+ Pipe,
+ Tap,
+};
+use wyz::comu::Mut;
+
+use crate::{
+ index::BitIdx,
+ mem,
+ order::{
+ BitOrder,
+ Lsb0,
+ },
+ ptr::{
+ BitPtr,
+ BitSpan,
+ },
+ slice::BitSlice,
+ store::BitStore,
+ vec::BitVec,
+ view::BitView,
+};
+
+mod api;
+mod iter;
+mod ops;
+mod tests;
+mod traits;
+
+pub use self::iter::IntoIter;
+
+#[repr(transparent)]
+#[doc = include_str!("../doc/boxed/BitBox.md")]
+pub struct BitBox<T = usize, O = Lsb0>
+where
+ T: BitStore,
+ O: BitOrder,
+{
+ /// Describes the region that the box owns.
+ bitspan: BitSpan<Mut, T, O>,
+}
+
+impl<T, O> BitBox<T, O>
+where
+ T: BitStore,
+ O: BitOrder,
+{
+ /// Copies a bit-slice region into a new bit-box allocation.
+ ///
+ /// The referent memory is `memcpy`d into the heap, exactly preserving the
+ /// original bit-slice’s memory layout and contents. This allows the
+ /// function to run as fast as possible, but misaligned source bit-slices
+ /// may result in decreased performance or unexpected layout behavior during
+ /// use. You can use [`.force_align()`] to ensure that the referent
+ /// bit-slice is aligned in memory.
+ ///
+ /// ## Notes
+ ///
+ /// Bits in the allocation of the source bit-slice, but outside its own
+ /// description of that memory, have an **unspecified**, but initialized,
+ /// value. You may not rely on their contents in any way, and you *should*
+ /// call [`.force_align()`] and/or [`.fill_uninitialized()`] if you are
+ /// going to inspect the underlying memory of the new allocation.
+ ///
+ /// ## Examples
+ ///
+ /// ```rust
+ /// use bitvec::prelude::*;
+ ///
+ /// let data = 0b0101_1011u8;
+ /// let bits = data.view_bits::<Msb0>();
+ /// let bb = BitBox::from_bitslice(&bits[2 ..]);
+ /// assert_eq!(bb, bits[2 ..]);
+ /// ```
+ ///
+ /// [`.fill_uninitialized()`]: Self::fill_uninitialized
+ /// [`.force_align()`]: Self::force_align
+ #[inline]
+ pub fn from_bitslice(slice: &BitSlice<T, O>) -> Self {
+ BitVec::from_bitslice(slice).into_boxed_bitslice()
+ }
+
+ /// Converts a `Box<[T]>` into a `BitBox<T, O>`, in place.
+ ///
+ /// This does not affect the referent buffer, and only transforms the
+ /// handle.
+ ///
+ /// ## Panics
+ ///
+ /// This panics if the provided `boxed` slice is too long to view as a
+ /// bit-slice region.
+ ///
+ /// ## Examples
+ ///
+ /// ```rust
+ /// use bitvec::prelude::*;
+ ///
+ /// let boxed: Box<[u8]> = Box::new([0; 40]);
+ /// let addr = boxed.as_ptr();
+ /// let bb = BitBox::<u8>::from_boxed_slice(boxed);
+ /// assert_eq!(bb, bits![0; 320]);
+ /// assert_eq!(addr, bb.as_raw_slice().as_ptr());
+ /// ```
+ #[inline]
+ pub fn from_boxed_slice(boxed: Box<[T]>) -> Self {
+ Self::try_from_boxed_slice(boxed)
+ .expect("slice was too long to be converted into a `BitBox`")
+ }
+
+ /// Attempts to convert an ordinary boxed slice into a boxed bit-slice.
+ ///
+ /// This does not perform a copy or reällocation; it only attempts to
+ /// transform the handle. Because `Box<[T]>` can be longer than `BitBox`es,
+ /// it may fail, and will return the original handle if it does.
+ ///
+ /// It is unlikely that you have a single `Box<[_]>` that is too large to
+ /// convert into a bit-box. You can find the length restrictions as the
+ /// bit-slice associated constants [`MAX_BITS`] and [`MAX_ELTS`].
+ ///
+ /// ## Examples
+ ///
+ /// ```rust
+ /// use bitvec::prelude::*;
+ ///
+ /// let boxed: Box<[u8]> = Box::new([0u8; 40]);
+ /// let addr = boxed.as_ptr();
+ /// let bb = BitBox::<u8>::try_from_boxed_slice(boxed).unwrap();
+ /// assert_eq!(bb, bits![0; 320]);
+ /// assert_eq!(addr, bb.as_raw_slice().as_ptr());
+ /// ```
+ ///
+ /// [`MAX_BITS`]: crate::slice::BitSlice::MAX_BITS
+ /// [`MAX_ELTS`]: crate::slice::BitSlice::MAX_ELTS
+ #[inline]
+ pub fn try_from_boxed_slice(boxed: Box<[T]>) -> Result<Self, Box<[T]>> {
+ let mut boxed = ManuallyDrop::new(boxed);
+
+ BitPtr::from_mut_slice(boxed.as_mut())
+ .span(boxed.len() * mem::bits_of::<T::Mem>())
+ .map(|bitspan| Self { bitspan })
+ .map_err(|_| ManuallyDrop::into_inner(boxed))
+ }
+
+ /// Converts the bit-box back into an ordinary boxed element slice.
+ ///
+ /// This does not touch the allocator or the buffer contents; it is purely a
+ /// handle transform.
+ ///
+ /// ## Examples
+ ///
+ /// ```rust
+ /// use bitvec::prelude::*;
+ ///
+ /// let bb = bitbox![0; 5];
+ /// let addr = bb.as_raw_slice().as_ptr();
+ /// let boxed = bb.into_boxed_slice();
+ /// assert_eq!(boxed[..], [0][..]);
+ /// assert_eq!(addr, boxed.as_ptr());
+ /// ```
+ #[inline]
+ pub fn into_boxed_slice(self) -> Box<[T]> {
+ self.pipe(ManuallyDrop::new)
+ .as_raw_mut_slice()
+ .pipe(|slice| unsafe { Box::from_raw(slice) })
+ }
+
+ /// Converts the bit-box into a bit-vector.
+ ///
+ /// This uses the Rust allocator API, and does not guarantee whether or not
+ /// a reällocation occurs internally.
+ ///
+ /// The resulting bit-vector can be converted back into a bit-box via
+ /// [`BitBox::into_boxed_bitslice`][0].
+ ///
+ /// ## Original
+ ///
+ /// [`slice::into_vec`](https://doc.rust-lang.org/std/primitive.slice.html#method.into_vec)
+ ///
+ /// ## API Differences
+ ///
+ /// The original function is implemented in an `impl<T> [T]` block, despite
+ /// taking a `Box<[T]>` receiver. Since `BitBox` cannot be used as an
+ /// explicit receiver outside its own `impl` blocks, the method is relocated
+ /// here.
+ ///
+ /// ## Examples
+ ///
+ /// ```rust
+ /// use bitvec::prelude::*;
+ ///
+ /// let bb = bitbox![0, 1, 0, 0, 1];
+ /// let bv = bb.into_bitvec();
+ ///
+ /// assert_eq!(bv, bitvec![0, 1, 0, 0, 1]);
+ /// ```
+ ///
+ /// [0]: crate::vec::BitVec::into_boxed_bitslice
+ #[inline]
+ pub fn into_bitvec(self) -> BitVec<T, O> {
+ let bitspan = self.bitspan;
+ /* This pipeline converts the underlying `Box<[T]>` into a `Vec<T>`,
+ * then converts that into a `BitVec`. This handles any changes that
+ * may occur in the allocator. Once done, the original head/span
+ * values need to be written into the `BitVec`, since the conversion
+ * from `Vec` always fully spans the live elements.
+ */
+ self.pipe(ManuallyDrop::new)
+ .with_box(|b| unsafe { ManuallyDrop::take(b) })
+ .into_vec()
+ .pipe(BitVec::from_vec)
+ .tap_mut(|bv| unsafe {
+ // len first! Otherwise, the descriptor might briefly go out of
+ // bounds.
+ bv.set_len_unchecked(bitspan.len());
+ bv.set_head(bitspan.head());
+ })
+ }
+
+ /// Explicitly views the bit-box as a bit-slice.
+ #[inline]
+ pub fn as_bitslice(&self) -> &BitSlice<T, O> {
+ unsafe { self.bitspan.into_bitslice_ref() }
+ }
+
+ /// Explicitly views the bit-box as a mutable bit-slice.
+ #[inline]
+ pub fn as_mut_bitslice(&mut self) -> &mut BitSlice<T, O> {
+ unsafe { self.bitspan.into_bitslice_mut() }
+ }
+
+ /// Views the bit-box as a slice of its underlying memory elements.
+ ///
+ /// Because bit-boxes uniquely own their buffer, they can safely view the
+ /// underlying buffer without dealing with contending neighbors.
+ #[inline]
+ pub fn as_raw_slice(&self) -> &[T] {
+ let (data, len) =
+ (self.bitspan.address().to_const(), self.bitspan.elements());
+ unsafe { slice::from_raw_parts(data, len) }
+ }
+
+ /// Views the bit-box as a mutable slice of its underlying memory elements.
+ ///
+ /// Because bit-boxes uniquely own their buffer, they can safely view the
+ /// underlying buffer without dealing with contending neighbors.
+ #[inline]
+ pub fn as_raw_mut_slice(&mut self) -> &mut [T] {
+ let (data, len) =
+ (self.bitspan.address().to_mut(), self.bitspan.elements());
+ unsafe { slice::from_raw_parts_mut(data, len) }
+ }
+
+ /// Sets the unused bits outside the `BitBox` buffer to a fixed value.
+ ///
+ /// This method modifies all bits that the allocated buffer owns but which
+ /// are outside the `self.as_bitslice()` view. `bitvec` guarantees that all
+ /// owned bits are initialized to *some* value, but does not guarantee
+ /// *which* value. This method can be used to make all such unused bits have
+ /// a known value after the call, so that viewing the underlying memory
+ /// directly has consistent results.
+ ///
+ /// Note that the crate implementation guarantees that all bits owned by its
+ /// handles are stably initialized according to the language and compiler
+ /// rules! `bitvec` will never cause UB by using uninitialized memory.
+ ///
+ /// ## Examples
+ ///
+ /// ```rust
+ /// use bitvec::prelude::*;
+ ///
+ /// let bits = 0b1011_0101u8.view_bits::<Msb0>();
+ /// let mut bb = BitBox::from_bitslice(&bits[2 .. 6]);
+ /// assert_eq!(bb.count_ones(), 3);
+ /// // Remember, the two bits on each edge are unspecified, and cannot be
+ /// // observed! They must be masked away for the test to be meaningful.
+ /// assert_eq!(bb.as_raw_slice()[0] & 0x3C, 0b00_1101_00u8);
+ ///
+ /// bb.fill_uninitialized(false);
+ /// assert_eq!(bb.as_raw_slice(), &[0b00_1101_00u8]);
+ ///
+ /// bb.fill_uninitialized(true);
+ /// assert_eq!(bb.as_raw_slice(), &[0b11_1101_11u8]);
+ /// ```
+ #[inline]
+ pub fn fill_uninitialized(&mut self, value: bool) {
+ let (_, head, bits) = self.bitspan.raw_parts();
+ let head = head.into_inner() as usize;
+ let tail = head + bits;
+ let all = self.as_raw_mut_slice().view_bits_mut::<O>();
+ unsafe {
+ all.get_unchecked_mut(.. head).fill(value);
+ all.get_unchecked_mut(tail ..).fill(value);
+ }
+ }
+
+ /// Ensures that the allocated buffer has no dead bits between the start of
+ /// the buffer and the start of the live bit-slice.
+ ///
+ /// This is useful for ensuring a consistent memory layout in bit-boxes
+ /// created by cloning an arbitrary bit-slice into the heap. As bit-slices
+ /// can begin and end anywhere in memory, the [`::from_bitslice()`] function
+ /// does not attempt to normalize them and only does a fast element-wise
+ /// copy when creating the bit-box.
+ ///
+ /// The value of dead bits that are in the allocation but not in the live
+ /// region are *initialized*, but do not have a *specified* value. After
+ /// calling this method, you should use [`.fill_uninitialized()`] to set the
+ /// excess bits in the buffer to a fixed value.
+ ///
+ /// ## Examples
+ ///
+ /// ```rust
+ /// use bitvec::prelude::*;
+ ///
+ /// let bits = &0b10_1101_01u8.view_bits::<Msb0>()[2 .. 6];
+ /// let mut bb = BitBox::from_bitslice(bits);
+ /// // Remember, the two bits on each edge are unspecified, and cannot be
+ /// // observed! They must be masked away for the test to be meaningful.
+ /// assert_eq!(bb.as_raw_slice()[0] & 0x3C, 0b00_1101_00u8);
+ ///
+ /// bb.force_align();
+ /// bb.fill_uninitialized(false);
+ /// assert_eq!(bb.as_raw_slice(), &[0b1101_0000u8]);
+ /// ```
+ ///
+ /// [`::from_bitslice()`]: Self::from_bitslice
+ /// [`.fill_uninitialized()`]: Self::fill_uninitialized
+ #[inline]
+ pub fn force_align(&mut self) {
+ let head = self.bitspan.head();
+ if head == BitIdx::MIN {
+ return;
+ }
+ let head = head.into_inner() as usize;
+ let last = self.len() + head;
+ unsafe {
+ self.bitspan.set_head(BitIdx::MIN);
+ self.copy_within_unchecked(head .. last, 0);
+ }
+ }
+
+ /// Permits a function to modify the `Box` backing storage of a `BitBox`
+ /// handle.
+ ///
+ /// This produces a temporary `Box` view of the bit-box’s buffer and allows
+ /// a function to have mutable access to it. After the callback returns, the
+ /// `Box` is written back into `self` and forgotten.
+ #[inline]
+ fn with_box<F, R>(&mut self, func: F) -> R
+ where F: FnOnce(&mut ManuallyDrop<Box<[T]>>) -> R {
+ self.as_raw_mut_slice()
+ .pipe(|raw| unsafe { Box::from_raw(raw) })
+ .pipe(ManuallyDrop::new)
+ .pipe_ref_mut(func)
+ }
+}
diff --git a/src/boxed/api.rs b/src/boxed/api.rs
new file mode 100644
index 0000000..b618205
--- /dev/null
+++ b/src/boxed/api.rs
@@ -0,0 +1,139 @@
+//! Port of the `Box<[T]>` inherent API.
+
+use core::mem;
+
+use tap::Tap;
+
+use super::BitBox;
+use crate::{
+ order::BitOrder,
+ ptr::BitSpan,
+ slice::BitSlice,
+ store::BitStore,
+ vec::BitVec,
+};
+
+impl<T, O> BitBox<T, O>
+where
+ T: BitStore,
+ O: BitOrder,
+{
+ /// Constructs a bit-box from a raw bit-slice pointer.
+ ///
+ /// This converts a `*mut BitSlice` pointer that had previously been
+ /// produced by either [`::into_raw()`] or [`::leak()`] and restores the
+ /// bit-box containing it.
+ ///
+ /// ## Original
+ ///
+ /// [`Box::from_raw`](alloc::boxed::Box::from_raw)
+ ///
+ /// ## Safety
+ ///
+ /// You must only call this function on pointers produced by leaking a prior
+ /// `BitBox`; you may not modify the value of a pointer returned by
+ /// [`::into_raw()`], nor may you conjure pointer values of your own. Doing
+ /// so will corrupt the allocator state.
+ ///
+ /// You must only call this function on any given leaked pointer at most
+ /// once. Not calling it at all will merely render the allocated memory
+ /// unreachable for the duration of the program runtime, a normal (and safe)
+ /// memory leak. Calling it once restores ordinary functionality, and
+ /// ensures ordinary destruction at or before program termination. However,
+ /// calling it more than once on the same pointer will introduce data races,
+ /// use-after-free, and/or double-free errors.
+ ///
+ /// ## Examples
+ ///
+ /// ```rust
+ /// use bitvec::prelude::*;
+ ///
+ /// let bb = bitbox![0; 80];
+ /// let ptr: *mut BitSlice = BitBox::into_raw(bb);
+ /// let bb = unsafe { BitBox::from_raw(ptr) };
+ /// // unsafe { BitBox::from_raw(ptr) }; // UAF crash!
+ /// ```
+ ///
+ /// [`::into_raw()`]: Self::into_raw
+ /// [`::leak()`]: Self::leak
+ #[inline]
+ pub unsafe fn from_raw(raw: *mut BitSlice<T, O>) -> Self {
+ Self {
+ bitspan: BitSpan::from_bitslice_ptr_mut(raw),
+ }
+ }
+
+ /// Consumes the bit-box, returning a raw bit-slice pointer.
+ ///
+ /// Bit-slice pointers are always correctly encoded and non-null. The
+ /// referent region is dereferenceäble *as a `BitSlice` for the remainder of
+ /// the program, or until it is first passed to [`::from_raw()`], whichever
+ /// comes first. Once the pointer is first passed to `::from_raw()`, all
+ /// copies of that pointer become invalid to dereference.
+ ///
+ /// ## Original
+ ///
+ /// [`Box::into_raw`](alloc::boxed::Box::into_raw)
+ ///
+ /// ## Examples
+ ///
+ /// ```rust
+ /// use bitvec::prelude::*;
+ ///
+ /// let bb = bitbox![0; 80];
+ /// let ptr = BitBox::into_raw(bb);
+ /// let bb = unsafe { BitBox::from_raw(ptr) };
+ /// ```
+ ///
+ /// You **may not** deällocate pointers produced by this function through
+ /// any other means.
+ ///
+ /// [`::from_raw()`]: Self::from_raw
+ #[inline]
+ pub fn into_raw(this: Self) -> *mut BitSlice<T, O> {
+ Self::leak(this)
+ }
+
+ /// Deliberately leaks the allocated memory, returning an
+ /// `&'static mut BitSlice` reference.
+ ///
+ /// This differs from [`::into_raw()`] in that the reference is safe to use
+ /// and can be tracked by the Rust borrow-checking system. Like the
+ /// bit-slice pointer produced by `::into_raw()`, this reference can be
+ /// un-leaked by passing it into [`::from_raw()`] to reclaim the memory.
+ ///
+ /// ## Original
+ ///
+ /// [`Box::leak`](alloc::boxed::Box::leak)
+ ///
+ /// ## Examples
+ ///
+ /// ```rust
+ /// use bitvec::prelude::*;
+ ///
+ /// let bb = bitbox![0; 80];
+ /// let static_ref: &'static mut BitSlice = BitBox::leak(bb);
+ ///
+ /// static_ref.set(0, true);
+ /// assert!(static_ref[0]);
+ /// let _ = unsafe {
+ /// BitBox::from_raw(static_ref)
+ /// };
+ /// ```
+ ///
+ /// [`::from_raw()`]: Self::from_raw
+ /// [`::into_raw()`]: Self::into_raw
+ #[inline]
+ pub fn leak<'a>(this: Self) -> &'a mut BitSlice<T, O>
+ where T: 'a {
+ unsafe { this.bitspan.into_bitslice_mut() }.tap(|_| mem::forget(this))
+ }
+
+ #[inline]
+ #[doc(hidden)]
+ #[cfg(not(tarpaulin_include))]
+ #[deprecated = "use `.into_bitvec()` instead"]
+ pub fn into_vec(self) -> BitVec<T, O> {
+ self.into_bitvec()
+ }
+}
diff --git a/src/boxed/iter.rs b/src/boxed/iter.rs
new file mode 100644
index 0000000..e483eea
--- /dev/null
+++ b/src/boxed/iter.rs
@@ -0,0 +1,241 @@
+#![doc = include_str!("../../doc/boxed/iter.md")]
+
+use core::{
+ fmt::{
+ self,
+ Debug,
+ Formatter,
+ },
+ iter::FusedIterator,
+ ops::Range,
+};
+
+use super::BitBox;
+use crate::{
+ order::{
+ BitOrder,
+ Lsb0,
+ },
+ slice::BitSlice,
+ store::BitStore,
+};
+
+/// [Original](alloc::vec::IntoIter)
+impl<T, O> IntoIterator for BitBox<T, O>
+where
+ T: BitStore,
+ O: BitOrder,
+{
+ type IntoIter = IntoIter<T, O>;
+ type Item = bool;
+
+ #[inline]
+ fn into_iter(self) -> Self::IntoIter {
+ IntoIter::new(self)
+ }
+}
+
+/** An iterator over a `BitBox`.
+
+## Original
+
+[`vec::IntoIter`](alloc::vec::IntoIter)
+**/
+pub struct IntoIter<T = usize, O = Lsb0>
+where
+ T: BitStore,
+ O: BitOrder,
+{
+ /// The original `BitBox`, kept so it can correctly drop.
+ _buf: BitBox<T, O>,
+ /// A range of indices yet to be iterated.
+ // TODO(myrrlyn): Race this against `BitPtrRange<Mut, T, O>`.
+ iter: Range<usize>,
+}
+
+impl<T, O> IntoIter<T, O>
+where
+ T: BitStore,
+ O: BitOrder,
+{
+ /// Wraps a bit-array in an iterator view. This is irreversible.
+ #[inline]
+ fn new(this: BitBox<T, O>) -> Self {
+ let iter = 0 .. this.len();
+ Self { _buf: this, iter }
+ }
+
+ /// Views the remaining unyielded bits as a bit-slice.
+ ///
+ /// ## Original
+ ///
+ /// [`IntoIter::as_slice`](alloc::vec::IntoIter::as_slice)
+ #[inline]
+ pub fn as_bitslice(&self) -> &BitSlice<T, O> {
+ // While the memory is never actually deïnitialized, this is still a
+ // good habit to do.
+ unsafe {
+ self._buf
+ .as_bitptr()
+ .add(self.iter.start)
+ .span_unchecked(self.iter.len())
+ .into_bitslice_ref()
+ }
+ }
+
+ #[inline]
+ #[doc(hidden)]
+ #[cfg(not(tarpaulin_include))]
+ #[deprecated = "use `.as_bitslice()` instead"]
+ #[allow(missing_docs, clippy::missing_docs_in_private_items)]
+ pub fn as_slice(&self) -> &BitSlice<T, O> {
+ self.as_bitslice()
+ }
+
+ /// Views the remaining unyielded bits as a mutable bit-slice.
+ ///
+ /// ## Original
+ ///
+ /// [`IntoIter::as_mut_slice`](alloc::vec::IntoIter::as_mut_slice)
+ #[inline]
+ pub fn as_mut_bitslice(&mut self) -> &mut BitSlice<T, O> {
+ unsafe {
+ self._buf
+ .as_mut_bitptr()
+ .add(self.iter.start)
+ .span_unchecked(self.iter.len())
+ .into_bitslice_mut()
+ }
+ }
+
+ #[inline]
+ #[doc(hidden)]
+ #[cfg(not(tarpaulin_include))]
+ #[deprecated = "use `.as_mut_bitslice()` instead"]
+ #[allow(missing_docs, clippy::missing_docs_in_private_items)]
+ pub fn as_mut_slice(&mut self) -> &mut BitSlice<T, O> {
+ self.as_mut_bitslice()
+ }
+}
+
+/// [Original](https://doc.rust-lang.org/alloc/vec/struct.IntoIter.html#impl-AsRef%3C%5BT%5D%3E)
+#[cfg(not(tarpaulin_include))]
+impl<T, O> AsRef<BitSlice<T, O>> for IntoIter<T, O>
+where
+ T: BitStore,
+ O: BitOrder,
+{
+ #[inline]
+ fn as_ref(&self) -> &BitSlice<T, O> {
+ self.as_bitslice()
+ }
+}
+
+#[cfg(not(tarpaulin_include))]
+impl<T, O> Clone for IntoIter<T, O>
+where
+ T: BitStore,
+ O: BitOrder,
+{
+ #[inline]
+ fn clone(&self) -> Self {
+ Self {
+ _buf: self._buf.clone(),
+ iter: self.iter.clone(),
+ }
+ }
+}
+
+/// [Original](https://doc.rust-lang.org/alloc/vec/struct.IntoIter.html#impl-Debug)
+#[cfg(not(tarpaulin_include))]
+impl<T, O> Debug for IntoIter<T, O>
+where
+ T: BitStore,
+ O: BitOrder,
+{
+ #[inline]
+ fn fmt(&self, fmt: &mut Formatter) -> fmt::Result {
+ fmt.debug_tuple("IntoIter")
+ .field(&self.as_bitslice())
+ .finish()
+ }
+}
+
+impl<T, O> Iterator for IntoIter<T, O>
+where
+ T: BitStore,
+ O: BitOrder,
+{
+ type Item = bool;
+
+ easy_iter!();
+
+ #[inline]
+ fn next(&mut self) -> Option<Self::Item> {
+ self.iter
+ .next()
+ .map(|idx| unsafe { self._buf.as_bitptr().add(idx).read() })
+ }
+
+ #[inline]
+ fn nth(&mut self, n: usize) -> Option<Self::Item> {
+ self.iter
+ .nth(n)
+ .map(|idx| unsafe { self._buf.as_bitptr().add(idx).read() })
+ }
+}
+
+impl<T, O> DoubleEndedIterator for IntoIter<T, O>
+where
+ T: BitStore,
+ O: BitOrder,
+{
+ #[inline]
+ fn next_back(&mut self) -> Option<Self::Item> {
+ self.iter
+ .next_back()
+ .map(|idx| unsafe { self._buf.as_bitptr().add(idx).read() })
+ }
+
+ #[inline]
+ fn nth_back(&mut self, n: usize) -> Option<Self::Item> {
+ self.iter
+ .nth_back(n)
+ .map(|idx| unsafe { self._buf.as_bitptr().add(idx).read() })
+ }
+}
+
+impl<T, O> ExactSizeIterator for IntoIter<T, O>
+where
+ T: BitStore,
+ O: BitOrder,
+{
+ #[inline]
+ fn len(&self) -> usize {
+ self.iter.len()
+ }
+}
+
+impl<T, O> FusedIterator for IntoIter<T, O>
+where
+ T: BitStore,
+ O: BitOrder,
+{
+}
+
+/// [Original](https://doc.rust-lang.org/alloc/vec/struct.IntoIter.html#impl-Send)
+// #[allow(clippy::non_send_fields_in_send_ty)]
+unsafe impl<T, O> Send for IntoIter<T, O>
+where
+ T: BitStore + Sync,
+ O: BitOrder,
+{
+}
+
+/// [Original](https://doc.rust-lang.org/alloc/vec/struct.IntoIter.html#impl-Sync)
+unsafe impl<T, O> Sync for IntoIter<T, O>
+where
+ T: BitStore + Sync,
+ O: BitOrder,
+{
+}
diff --git a/src/boxed/ops.rs b/src/boxed/ops.rs
new file mode 100644
index 0000000..74ccad7
--- /dev/null
+++ b/src/boxed/ops.rs
@@ -0,0 +1,257 @@
+//! Operator trait implementations for boxed bit-slices.
+
+use core::{
+ mem::ManuallyDrop,
+ ops::{
+ BitAnd,
+ BitAndAssign,
+ BitOr,
+ BitOrAssign,
+ BitXor,
+ BitXorAssign,
+ Deref,
+ DerefMut,
+ Index,
+ IndexMut,
+ Not,
+ },
+};
+
+use super::BitBox;
+use crate::{
+ order::BitOrder,
+ slice::BitSlice,
+ store::BitStore,
+};
+
+#[cfg(not(tarpaulin_include))]
+impl<T, O> BitAndAssign<BitBox<T, O>> for BitSlice<T, O>
+where
+ T: BitStore,
+ O: BitOrder,
+{
+ #[inline]
+ fn bitand_assign(&mut self, rhs: BitBox<T, O>) {
+ *self &= rhs.as_bitslice()
+ }
+}
+
+#[cfg(not(tarpaulin_include))]
+impl<T, O> BitAndAssign<&BitBox<T, O>> for BitSlice<T, O>
+where
+ T: BitStore,
+ O: BitOrder,
+{
+ #[inline]
+ fn bitand_assign(&mut self, rhs: &BitBox<T, O>) {
+ *self &= rhs.as_bitslice()
+ }
+}
+
+impl<T, O, Rhs> BitAnd<Rhs> for BitBox<T, O>
+where
+ T: BitStore,
+ O: BitOrder,
+ BitSlice<T, O>: BitAndAssign<Rhs>,
+{
+ type Output = Self;
+
+ #[inline]
+ fn bitand(mut self, rhs: Rhs) -> Self::Output {
+ self &= rhs;
+ self
+ }
+}
+
+impl<T, O, Rhs> BitAndAssign<Rhs> for BitBox<T, O>
+where
+ T: BitStore,
+ O: BitOrder,
+ BitSlice<T, O>: BitAndAssign<Rhs>,
+{
+ #[inline]
+ fn bitand_assign(&mut self, rhs: Rhs) {
+ *self.as_mut_bitslice() &= rhs;
+ }
+}
+
+#[cfg(not(tarpaulin_include))]
+impl<T, O> BitOrAssign<BitBox<T, O>> for BitSlice<T, O>
+where
+ T: BitStore,
+ O: BitOrder,
+{
+ #[inline]
+ fn bitor_assign(&mut self, rhs: BitBox<T, O>) {
+ *self |= rhs.as_bitslice()
+ }
+}
+
+#[cfg(not(tarpaulin_include))]
+impl<T, O> BitOrAssign<&BitBox<T, O>> for BitSlice<T, O>
+where
+ T: BitStore,
+ O: BitOrder,
+{
+ #[inline]
+ fn bitor_assign(&mut self, rhs: &BitBox<T, O>) {
+ *self |= rhs.as_bitslice()
+ }
+}
+
+impl<T, O, Rhs> BitOr<Rhs> for BitBox<T, O>
+where
+ T: BitStore,
+ O: BitOrder,
+ BitSlice<T, O>: BitOrAssign<Rhs>,
+{
+ type Output = Self;
+
+ #[inline]
+ fn bitor(mut self, rhs: Rhs) -> Self::Output {
+ self |= rhs;
+ self
+ }
+}
+
+impl<T, O, Rhs> BitOrAssign<Rhs> for BitBox<T, O>
+where
+ T: BitStore,
+ O: BitOrder,
+ BitSlice<T, O>: BitOrAssign<Rhs>,
+{
+ #[inline]
+ fn bitor_assign(&mut self, rhs: Rhs) {
+ *self.as_mut_bitslice() |= rhs;
+ }
+}
+
+#[cfg(not(tarpaulin_include))]
+impl<T, O> BitXorAssign<BitBox<T, O>> for BitSlice<T, O>
+where
+ T: BitStore,
+ O: BitOrder,
+{
+ #[inline]
+ fn bitxor_assign(&mut self, rhs: BitBox<T, O>) {
+ *self ^= rhs.as_bitslice()
+ }
+}
+
+#[cfg(not(tarpaulin_include))]
+impl<T, O> BitXorAssign<&BitBox<T, O>> for BitSlice<T, O>
+where
+ T: BitStore,
+ O: BitOrder,
+{
+ #[inline]
+ fn bitxor_assign(&mut self, rhs: &BitBox<T, O>) {
+ *self ^= rhs.as_bitslice()
+ }
+}
+
+impl<T, O, Rhs> BitXor<Rhs> for BitBox<T, O>
+where
+ T: BitStore,
+ O: BitOrder,
+ BitSlice<T, O>: BitXorAssign<Rhs>,
+{
+ type Output = Self;
+
+ #[inline]
+ fn bitxor(mut self, rhs: Rhs) -> Self::Output {
+ self ^= rhs;
+ self
+ }
+}
+
+impl<T, O, Rhs> BitXorAssign<Rhs> for BitBox<T, O>
+where
+ T: BitStore,
+ O: BitOrder,
+ BitSlice<T, O>: BitXorAssign<Rhs>,
+{
+ #[inline]
+ fn bitxor_assign(&mut self, rhs: Rhs) {
+ *self.as_mut_bitslice() ^= rhs;
+ }
+}
+
+impl<T, O> Deref for BitBox<T, O>
+where
+ T: BitStore,
+ O: BitOrder,
+{
+ type Target = BitSlice<T, O>;
+
+ #[inline]
+ fn deref(&self) -> &Self::Target {
+ self.as_bitslice()
+ }
+}
+
+impl<T, O> DerefMut for BitBox<T, O>
+where
+ T: BitStore,
+ O: BitOrder,
+{
+ #[inline]
+ fn deref_mut(&mut self) -> &mut Self::Target {
+ self.as_mut_bitslice()
+ }
+}
+
+impl<T, O> Drop for BitBox<T, O>
+where
+ T: BitStore,
+ O: BitOrder,
+{
+ #[inline]
+ fn drop(&mut self) {
+ self.with_box(|b| unsafe { ManuallyDrop::drop(b) })
+ }
+}
+
+#[cfg(not(tarpaulin_include))]
+impl<T, O, Idx> Index<Idx> for BitBox<T, O>
+where
+ T: BitStore,
+ O: BitOrder,
+ BitSlice<T, O>: Index<Idx>,
+{
+ type Output = <BitSlice<T, O> as Index<Idx>>::Output;
+
+ #[inline]
+ fn index(&self, index: Idx) -> &Self::Output {
+ &self.as_bitslice()[index]
+ }
+}
+
+#[cfg(not(tarpaulin_include))]
+impl<T, O, Idx> IndexMut<Idx> for BitBox<T, O>
+where
+ T: BitStore,
+ O: BitOrder,
+ BitSlice<T, O>: IndexMut<Idx>,
+{
+ #[inline]
+ fn index_mut(&mut self, index: Idx) -> &mut Self::Output {
+ &mut self.as_mut_bitslice()[index]
+ }
+}
+
+impl<T, O> Not for BitBox<T, O>
+where
+ T: BitStore,
+ O: BitOrder,
+{
+ type Output = Self;
+
+ #[inline]
+ fn not(mut self) -> Self::Output {
+ for elem in self.as_raw_mut_slice().iter_mut() {
+ elem.store_value(!elem.load_value());
+ }
+ self
+ }
+}
diff --git a/src/boxed/tests.rs b/src/boxed/tests.rs
new file mode 100644
index 0000000..5360e3d
--- /dev/null
+++ b/src/boxed/tests.rs
@@ -0,0 +1,187 @@
+//! Unit tests for boxed bit-slices.
+
+#![cfg(test)]
+
+#[cfg(not(feature = "std"))]
+use alloc::vec;
+use alloc::{
+ borrow::Cow,
+ boxed::Box,
+};
+use core::{
+ any,
+ borrow::{
+ Borrow,
+ BorrowMut,
+ },
+ convert::TryFrom,
+ fmt::{
+ Debug,
+ Display,
+ Pointer,
+ },
+ hash::Hash,
+ iter::{
+ FromIterator,
+ FusedIterator,
+ },
+ ops::{
+ Deref,
+ DerefMut,
+ },
+};
+
+use static_assertions::*;
+
+use crate::prelude::*;
+
+#[test]
+fn inherents() {
+ let bits = bits![0, 1, 0, 0, 1];
+ let mut boxed = BitBox::from_bitslice(&bits[1 ..]);
+ assert_eq!(boxed, bits[1 ..]);
+ assert_eq!(boxed.bitspan.head().into_inner(), 1);
+ boxed.force_align();
+ assert_eq!(boxed.bitspan.head().into_inner(), 0);
+
+ let data = vec![0u8, 1, 2, 3].into_boxed_slice();
+ let ptr = data.as_ptr();
+ let boxed: BitBox<u8> = BitBox::from_boxed_slice(data);
+ assert_eq!(boxed.len(), 32);
+ assert_eq!(boxed.count_ones(), 4);
+ let data = boxed.into_boxed_slice();
+ assert_eq!(data.as_ptr(), ptr);
+
+ let bv = BitBox::<u8>::from_boxed_slice(data).into_bitvec();
+ assert_eq!(bv.len(), 32);
+
+ assert_eq!(bitbox![0, 1, 0, 0, 1].as_bitslice(), bits![0, 1, 0, 0, 1]);
+ assert_eq!(bitbox![0; 5].as_mut_bitslice(), bits![0; 5]);
+
+ let mut bb = bitbox![0; 5];
+ bb.fill_uninitialized(true);
+ assert_eq!(bb.as_raw_slice(), &[!0usize << 5][..]);
+
+ let ptr = BitBox::into_raw(bb);
+ let bb = unsafe { BitBox::from_raw(ptr) };
+ assert_eq!(ptr as *const BitSlice, bb.as_bitslice() as *const BitSlice);
+}
+
+#[test]
+fn iter() {
+ let bb = bitbox![0, 1, 1, 0, 0, 1];
+ let mut iter = bb.into_iter();
+ assert_eq!(iter.len(), 6);
+
+ assert!(!iter.next().unwrap());
+ assert_eq!(iter.as_bitslice(), bits![1, 1, 0, 0, 1]);
+ assert!(iter.next_back().unwrap());
+ assert_eq!(iter.as_mut_bitslice(), bits![1, 1, 0, 0]);
+ assert!(iter.nth(1).unwrap());
+ assert!(!iter.nth_back(1).unwrap());
+ assert!(iter.next().is_none());
+}
+
+#[test]
+fn traits() {
+ assert_impl_all!(
+ BitBox: AsMut<BitSlice>,
+ AsRef<BitSlice>,
+ Borrow<BitSlice>,
+ BorrowMut<BitSlice>,
+ Clone,
+ Debug,
+ Default,
+ Deref,
+ DerefMut,
+ Display,
+ Drop,
+ Eq,
+ From<&'static BitSlice>,
+ From<BitArray>,
+ From<Box<usize>>,
+ From<Cow<'static, BitSlice>>,
+ From<BitVec>,
+ FromIterator<bool>,
+ Hash,
+ Ord,
+ PartialEq<BitSlice>,
+ PartialOrd<BitSlice>,
+ Pointer,
+ TryFrom<Box<[usize]>>,
+ Unpin,
+ );
+ assert_impl_all!(
+ super::IntoIter: AsRef<BitSlice>,
+ Clone,
+ Debug,
+ DoubleEndedIterator,
+ ExactSizeIterator,
+ FusedIterator,
+ Send,
+ Sync,
+ );
+}
+
+#[test]
+fn conversions() {
+ let bits = bits![0, 1, 0, 0, 1];
+ assert_eq!(BitBox::from(bits), bits);
+
+ let arr: BitArray = BitArray::new(rand::random());
+ assert_eq!(BitBox::from(arr), arr);
+
+ let boxed = Box::new(5usize);
+ assert_eq!(
+ BitBox::<_, Lsb0>::from(boxed.clone()),
+ boxed.view_bits::<Lsb0>()
+ );
+
+ let cow = Cow::Borrowed([0usize, 1].view_bits::<Lsb0>());
+ assert_eq!(BitBox::from(cow.clone()), &*cow);
+
+ assert_eq!(BitBox::from(bitvec![0, 1]), bits![0, 1]);
+
+ let boxed: Box<[usize]> = BitBox::from(cow.clone()).into();
+ assert_eq!(boxed[..], [0usize, 1][..]);
+
+ assert!(BitBox::<_, Lsb0>::try_from(boxed).is_ok());
+
+ assert!(BitBox::<usize, Lsb0>::default().is_empty());
+}
+
+#[test]
+fn ops() {
+ let a = bitbox![0, 0, 1, 1];
+ let b = bitbox![0, 1, 0, 1];
+
+ let c = a.clone() & b.clone();
+ assert_eq!(c, bitbox![0, 0, 0, 1]);
+
+ let d = a.clone() | b.clone();
+ assert_eq!(d, bitbox![0, 1, 1, 1]);
+
+ let e = a.clone() ^ b;
+ assert_eq!(e, bitbox![0, 1, 1, 0]);
+
+ let mut f = !e;
+ assert_eq!(f, bitbox![1, 0, 0, 1]);
+
+ let _: &BitSlice = &a;
+ let _: &mut BitSlice = &mut f;
+}
+
+#[test]
+fn format() {
+ #[cfg(not(feature = "std"))]
+ use alloc::format;
+
+ let render = format!("{:?}", bitbox![0, 1, 0, 0, 1]);
+ assert!(
+ render.starts_with(&format!(
+ "BitBox<usize, {}>",
+ any::type_name::<Lsb0>(),
+ ))
+ );
+ assert!(render.ends_with("[0, 1, 0, 0, 1]"));
+}
diff --git a/src/boxed/traits.rs b/src/boxed/traits.rs
new file mode 100644
index 0000000..7fb3a30
--- /dev/null
+++ b/src/boxed/traits.rs
@@ -0,0 +1,391 @@
+//! General trait implementations for boxed bit-slices.
+
+use alloc::{
+ borrow::Cow,
+ boxed::Box,
+};
+use core::{
+ borrow::{
+ Borrow,
+ BorrowMut,
+ },
+ cmp,
+ convert::TryFrom,
+ fmt::{
+ self,
+ Debug,
+ Display,
+ Formatter,
+ },
+ hash::{
+ Hash,
+ Hasher,
+ },
+ iter::FromIterator,
+};
+
+use tap::Pipe;
+
+use super::BitBox;
+use crate::{
+ array::BitArray,
+ order::BitOrder,
+ slice::BitSlice,
+ store::BitStore,
+ vec::BitVec,
+ view::BitViewSized,
+};
+
+#[cfg(not(tarpaulin_include))]
+impl<T, O> Borrow<BitSlice<T, O>> for BitBox<T, O>
+where
+ T: BitStore,
+ O: BitOrder,
+{
+ #[inline]
+ fn borrow(&self) -> &BitSlice<T, O> {
+ self.as_bitslice()
+ }
+}
+
+#[cfg(not(tarpaulin_include))]
+impl<T, O> BorrowMut<BitSlice<T, O>> for BitBox<T, O>
+where
+ T: BitStore,
+ O: BitOrder,
+{
+ #[inline]
+ fn borrow_mut(&mut self) -> &mut BitSlice<T, O> {
+ self.as_mut_bitslice()
+ }
+}
+
+#[cfg(not(tarpaulin_include))]
+impl<T, O> Clone for BitBox<T, O>
+where
+ T: BitStore,
+ O: BitOrder,
+{
+ #[inline]
+ fn clone(&self) -> Self {
+ self.as_bitslice().pipe(Self::from_bitslice)
+ }
+}
+
+#[cfg(not(tarpaulin_include))]
+impl<T, O> Eq for BitBox<T, O>
+where
+ T: BitStore,
+ O: BitOrder,
+{
+}
+
+#[cfg(not(tarpaulin_include))]
+impl<T, O> Ord for BitBox<T, O>
+where
+ T: BitStore,
+ O: BitOrder,
+{
+ #[inline]
+ fn cmp(&self, other: &Self) -> cmp::Ordering {
+ self.as_bitslice().cmp(other.as_bitslice())
+ }
+}
+
+#[cfg(not(tarpaulin_include))]
+impl<O1, O2, T1, T2> PartialEq<BitBox<T2, O2>> for BitSlice<T1, O1>
+where
+ O1: BitOrder,
+ O2: BitOrder,
+ T1: BitStore,
+ T2: BitStore,
+{
+ #[inline]
+ fn eq(&self, other: &BitBox<T2, O2>) -> bool {
+ self == other.as_bitslice()
+ }
+}
+
+#[cfg(not(tarpaulin_include))]
+impl<O1, O2, T1, T2> PartialEq<BitBox<T2, O2>> for &BitSlice<T1, O1>
+where
+ O1: BitOrder,
+ O2: BitOrder,
+ T1: BitStore,
+ T2: BitStore,
+{
+ #[inline]
+ fn eq(&self, other: &BitBox<T2, O2>) -> bool {
+ *self == other.as_bitslice()
+ }
+}
+
+#[cfg(not(tarpaulin_include))]
+impl<O1, O2, T1, T2> PartialEq<BitBox<T2, O2>> for &mut BitSlice<T1, O1>
+where
+ O1: BitOrder,
+ O2: BitOrder,
+ T1: BitStore,
+ T2: BitStore,
+{
+ #[inline]
+ fn eq(&self, other: &BitBox<T2, O2>) -> bool {
+ **self == other.as_bitslice()
+ }
+}
+
+#[cfg(not(tarpaulin_include))]
+impl<T, O, Rhs> PartialEq<Rhs> for BitBox<T, O>
+where
+ T: BitStore,
+ O: BitOrder,
+ Rhs: ?Sized + PartialEq<BitSlice<T, O>>,
+{
+ #[inline]
+ fn eq(&self, other: &Rhs) -> bool {
+ other == self.as_bitslice()
+ }
+}
+
+#[cfg(not(tarpaulin_include))]
+impl<O1, O2, T1, T2> PartialOrd<BitBox<T2, O2>> for BitSlice<T1, O1>
+where
+ O1: BitOrder,
+ O2: BitOrder,
+ T1: BitStore,
+ T2: BitStore,
+{
+ #[inline]
+ fn partial_cmp(&self, other: &BitBox<T2, O2>) -> Option<cmp::Ordering> {
+ self.partial_cmp(other.as_bitslice())
+ }
+}
+
+#[cfg(not(tarpaulin_include))]
+impl<T, O, Rhs> PartialOrd<Rhs> for BitBox<T, O>
+where
+ T: BitStore,
+ O: BitOrder,
+ Rhs: ?Sized + PartialOrd<BitSlice<T, O>>,
+{
+ #[inline]
+ fn partial_cmp(&self, other: &Rhs) -> Option<cmp::Ordering> {
+ other.partial_cmp(self.as_bitslice())
+ }
+}
+
+#[cfg(not(tarpaulin_include))]
+impl<'a, O1, O2, T1, T2> PartialOrd<BitBox<T2, O2>> for &'a BitSlice<T1, O1>
+where
+ O1: BitOrder,
+ O2: BitOrder,
+ T1: BitStore,
+ T2: BitStore,
+{
+ #[inline]
+ fn partial_cmp(&self, other: &BitBox<T2, O2>) -> Option<cmp::Ordering> {
+ self.partial_cmp(other.as_bitslice())
+ }
+}
+
+#[cfg(not(tarpaulin_include))]
+impl<'a, O1, O2, T1, T2> PartialOrd<BitBox<T2, O2>> for &'a mut BitSlice<T1, O1>
+where
+ O1: BitOrder,
+ O2: BitOrder,
+ T1: BitStore,
+ T2: BitStore,
+{
+ #[inline]
+ fn partial_cmp(&self, other: &BitBox<T2, O2>) -> Option<cmp::Ordering> {
+ self.partial_cmp(other.as_bitslice())
+ }
+}
+
+#[cfg(not(tarpaulin_include))]
+impl<T, O> AsRef<BitSlice<T, O>> for BitBox<T, O>
+where
+ T: BitStore,
+ O: BitOrder,
+{
+ #[inline]
+ fn as_ref(&self) -> &BitSlice<T, O> {
+ self.as_bitslice()
+ }
+}
+
+#[cfg(not(tarpaulin_include))]
+impl<T, O> AsMut<BitSlice<T, O>> for BitBox<T, O>
+where
+ T: BitStore,
+ O: BitOrder,
+{
+ #[inline]
+ fn as_mut(&mut self) -> &mut BitSlice<T, O> {
+ self.as_mut_bitslice()
+ }
+}
+
+impl<T, O> From<&'_ BitSlice<T, O>> for BitBox<T, O>
+where
+ T: BitStore,
+ O: BitOrder,
+{
+ #[inline]
+ fn from(slice: &BitSlice<T, O>) -> Self {
+ slice.pipe(Self::from_bitslice)
+ }
+}
+
+impl<A, O> From<BitArray<A, O>> for BitBox<A::Store, O>
+where
+ A: BitViewSized,
+ O: BitOrder,
+{
+ #[inline]
+ fn from(array: BitArray<A, O>) -> Self {
+ array.as_bitslice().pipe(Self::from_bitslice)
+ }
+}
+
+impl<T, O> From<Box<T>> for BitBox<T, O>
+where
+ T: BitStore,
+ O: BitOrder,
+{
+ #[inline]
+ fn from(elem: Box<T>) -> Self {
+ unsafe {
+ Box::from_raw(Box::into_raw(elem).cast::<[T; 1]>() as *mut [T])
+ }
+ .pipe(Self::from_boxed_slice)
+ }
+}
+
+impl<'a, T, O> From<Cow<'a, BitSlice<T, O>>> for BitBox<T, O>
+where
+ T: BitStore,
+ O: BitOrder,
+{
+ #[inline]
+ fn from(cow: Cow<'a, BitSlice<T, O>>) -> Self {
+ cow.into_owned().into_boxed_bitslice()
+ }
+}
+
+impl<T, O> From<BitVec<T, O>> for BitBox<T, O>
+where
+ T: BitStore,
+ O: BitOrder,
+{
+ #[inline]
+ fn from(bv: BitVec<T, O>) -> Self {
+ bv.into_boxed_bitslice()
+ }
+}
+
+impl<T, O> From<BitBox<T, O>> for Box<[T]>
+where
+ T: BitStore,
+ O: BitOrder,
+{
+ #[inline]
+ fn from(bb: BitBox<T, O>) -> Self {
+ bb.into_boxed_slice()
+ }
+}
+
+impl<T, O> TryFrom<Box<[T]>> for BitBox<T, O>
+where
+ T: BitStore,
+ O: BitOrder,
+{
+ type Error = Box<[T]>;
+
+ #[inline]
+ fn try_from(boxed: Box<[T]>) -> Result<Self, Self::Error> {
+ Self::try_from_boxed_slice(boxed)
+ }
+}
+
+impl<T, O> Default for BitBox<T, O>
+where
+ T: BitStore,
+ O: BitOrder,
+{
+ #[inline]
+ fn default() -> Self {
+ Self::from_bitslice(BitSlice::<T, O>::empty())
+ }
+}
+
+impl<T, O> Debug for BitBox<T, O>
+where
+ T: BitStore,
+ O: BitOrder,
+{
+ #[inline]
+ fn fmt(&self, fmt: &mut Formatter) -> fmt::Result {
+ self.bitspan.render(fmt, "Box", None)?;
+ fmt.write_str(" ")?;
+ Display::fmt(self, fmt)
+ }
+}
+
+easy_fmt! {
+ impl Binary
+ impl Display
+ impl LowerHex
+ impl Octal
+ impl Pointer
+ impl UpperHex
+ for BitBox
+}
+
+#[cfg(not(tarpaulin_include))]
+impl<T, O, I> FromIterator<I> for BitBox<T, O>
+where
+ T: BitStore,
+ O: BitOrder,
+ BitVec<T, O>: FromIterator<I>,
+{
+ #[inline]
+ fn from_iter<II>(iter: II) -> Self
+ where II: IntoIterator<Item = I> {
+ BitVec::from_iter(iter).into_boxed_bitslice()
+ }
+}
+
+#[cfg(not(tarpaulin_include))]
+impl<T, O> Hash for BitBox<T, O>
+where
+ T: BitStore,
+ O: BitOrder,
+{
+ #[inline]
+ fn hash<H>(&self, state: &mut H)
+ where H: Hasher {
+ self.as_bitslice().hash(state)
+ }
+}
+
+unsafe impl<T, O> Send for BitBox<T, O>
+where
+ T: BitStore,
+ O: BitOrder,
+{
+}
+
+unsafe impl<T, O> Sync for BitBox<T, O>
+where
+ T: BitStore,
+ O: BitOrder,
+{
+}
+
+impl<T, O> Unpin for BitBox<T, O>
+where
+ T: BitStore,
+ O: BitOrder,
+{
+}
diff --git a/src/devel.rs b/src/devel.rs
new file mode 100644
index 0000000..50863f6
--- /dev/null
+++ b/src/devel.rs
@@ -0,0 +1,115 @@
+//! Support utilities for crate development.
+
+use core::any::TypeId;
+
+use crate::{
+ order::BitOrder,
+ store::BitStore,
+};
+
+/// Constructs formatting-trait implementations by delegating.
+macro_rules! easy_fmt {
+ ($(impl $fmt:ident)+ for BitArray) => { $(
+ impl<A, O> core::fmt::$fmt for $crate::array::BitArray<A, O>
+ where
+ O: $crate::order::BitOrder,
+ A: $crate::view::BitViewSized,
+ {
+ #[inline]
+ #[cfg(not(tarpaulin_include))]
+ fn fmt(&self, fmt: &mut core::fmt::Formatter) -> core::fmt::Result {
+ core::fmt::$fmt::fmt(self.as_bitslice(), fmt)
+ }
+ }
+ )+ };
+ ($(impl $fmt:ident)+ for $this:ident) => { $(
+ impl<T, O> core::fmt::$fmt for $this<T, O>
+ where
+ O: $crate::order::BitOrder,
+ T: $crate::store::BitStore,
+ {
+ #[inline]
+ #[cfg(not(tarpaulin_include))]
+ fn fmt(&self, fmt: &mut core::fmt::Formatter) -> core::fmt::Result {
+ core::fmt::$fmt::fmt(self.as_bitslice(), fmt)
+ }
+ }
+ )+ };
+}
+
+/// Implements some `Iterator` functions that have boilerplate behavior.
+macro_rules! easy_iter {
+ () => {
+ #[inline]
+ fn size_hint(&self) -> (usize, Option<usize>) {
+ let len = self.len();
+ (len, Some(len))
+ }
+
+ #[inline]
+ fn count(self) -> usize {
+ self.len()
+ }
+
+ #[inline]
+ fn last(mut self) -> Option<Self::Item> {
+ self.next_back()
+ }
+ };
+}
+
+/// Tests if two `BitOrder` implementors are the same.
+#[inline]
+pub fn match_order<O, P>() -> bool
+where
+ O: BitOrder,
+ P: BitOrder,
+{
+ eq_types::<O, P>()
+}
+
+/// Tests if two `BitStore` implementors are the same.
+#[inline]
+pub fn match_store<T, U>() -> bool
+where
+ T: BitStore,
+ U: BitStore,
+{
+ eq_types::<T, U>()
+}
+
+/// Tests if two `BitSlice` type parameter pairs match each other.
+#[inline]
+pub fn match_types<T1, O1, T2, O2>() -> bool
+where
+ O1: BitOrder,
+ T1: BitStore,
+ O2: BitOrder,
+ T2: BitStore,
+{
+ match_order::<O1, O2>() && match_store::<T1, T2>()
+}
+
+/// Tests if a type is known to be an unsigned integer.
+///
+/// Returns `true` for `u{8,16,32,64,128,size}` and `false` for all others.
+#[inline]
+pub fn is_unsigned<T>() -> bool
+where T: 'static {
+ eq_types::<T, u8>()
+ || eq_types::<T, u16>()
+ || eq_types::<T, u32>()
+ || eq_types::<T, u64>()
+ || eq_types::<T, u128>()
+ || eq_types::<T, usize>()
+}
+
+/// Tests if two types are identical, even through different names.
+#[inline]
+fn eq_types<T, U>() -> bool
+where
+ T: 'static,
+ U: 'static,
+{
+ TypeId::of::<T>() == TypeId::of::<U>()
+}
diff --git a/src/domain.rs b/src/domain.rs
new file mode 100644
index 0000000..f07cabf
--- /dev/null
+++ b/src/domain.rs
@@ -0,0 +1,1139 @@
+#![doc = include_str!("../doc/domain.md")]
+
+use core::{
+ any,
+ convert::{
+ TryFrom,
+ TryInto,
+ },
+ fmt::{
+ self,
+ Binary,
+ Debug,
+ Display,
+ Formatter,
+ LowerHex,
+ Octal,
+ UpperHex,
+ },
+ hash::{
+ Hash,
+ Hasher,
+ },
+ iter::FusedIterator,
+ marker::PhantomData,
+};
+
+use tap::{
+ Conv,
+ Pipe,
+ Tap,
+};
+use wyz::{
+ comu::{
+ Address,
+ Const,
+ Mut,
+ Mutability,
+ Reference,
+ Referential,
+ SliceReferential,
+ },
+ fmt::FmtForward,
+};
+
+use crate::{
+ access::BitAccess,
+ index::{
+ BitEnd,
+ BitIdx,
+ BitMask,
+ },
+ order::{
+ BitOrder,
+ Lsb0,
+ },
+ ptr::BitSpan,
+ slice::BitSlice,
+ store::BitStore,
+};
+
+#[doc = include_str!("../doc/domain/BitDomain.md")]
+pub enum BitDomain<'a, M = Const, T = usize, O = Lsb0>
+where
+ M: Mutability,
+ T: 'a + BitStore,
+ O: BitOrder,
+ Address<M, BitSlice<T, O>>: Referential<'a>,
+ Address<M, BitSlice<T::Unalias, O>>: Referential<'a>,
+{
+ /// Indicates that a bit-slice’s contents are entirely in the interior
+ /// indices of a single memory element.
+ ///
+ /// The contained value is always the bit-slice that created this view.
+ Enclave(Reference<'a, M, BitSlice<T, O>>),
+ /// Indicates that a bit-slice’s contents touch an element edge.
+ ///
+ /// This splits the bit-slice into three partitions, each of which may be
+ /// empty: two partially-occupied edge elements, with their original type
+ /// status, and one interior span, which is known to not have any other
+ /// aliases derived from the bit-slice that created this view.
+ Region {
+ /// Any bits that partially-fill the first element of the underlying
+ /// storage region.
+ ///
+ /// This does not modify its aliasing status, as it will already be
+ /// appropriately marked before this view is constructed.
+ head: Reference<'a, M, BitSlice<T, O>>,
+ /// Any bits that wholly-fill elements in the interior of the bit-slice.
+ ///
+ /// This is marked as unaliased, because it is statically impossible for
+ /// any other handle derived from the source bit-slice to have
+ /// conflicting access to the region of memory it describes. As such,
+ /// even a bit-slice that was marked as `::Alias` can revert this
+ /// protection on the known-unaliased interior.
+ ///
+ /// Proofs:
+ ///
+ /// - Rust’s `&`/`&mut` exclusion rules universally apply. If a
+ /// reference exists, no other reference has unsynchronized write
+ /// capability.
+ /// - `BitStore::Unalias` only modifies unsynchronized types. `Cell` and
+ /// atomic types unalias to themselves, and retain their original
+ /// behavior.
+ body: Reference<'a, M, BitSlice<T::Unalias, O>>,
+ /// Any bits that partially-fill the last element of the underlying
+ /// storage region.
+ ///
+ /// This does not modify its aliasing status, as it will already be
+ /// appropriately marked before this view is constructed.
+ tail: Reference<'a, M, BitSlice<T, O>>,
+ },
+}
+
+impl<'a, M, T, O> BitDomain<'a, M, T, O>
+where
+ M: Mutability,
+ T: 'a + BitStore,
+ O: BitOrder,
+ Address<M, BitSlice<T, O>>: Referential<'a>,
+ Address<M, BitSlice<T::Unalias, O>>: Referential<'a>,
+{
+ /// Attempts to unpack the bit-domain as an [`Enclave`] variant. This is
+ /// just a shorthand for explicit destructuring.
+ ///
+ /// [`Enclave`]: Self::Enclave
+ #[inline]
+ pub fn enclave(self) -> Option<Reference<'a, M, BitSlice<T, O>>> {
+ match self {
+ Self::Enclave(bits) => Some(bits),
+ _ => None,
+ }
+ }
+
+ /// Attempts to unpack the bit-domain as a [`Region`] variant. This is just
+ /// a shorthand for explicit destructuring.
+ ///
+ /// [`Region`]: Self::Region
+ #[inline]
+ pub fn region(
+ self,
+ ) -> Option<(
+ Reference<'a, M, BitSlice<T, O>>,
+ Reference<'a, M, BitSlice<T::Unalias, O>>,
+ Reference<'a, M, BitSlice<T, O>>,
+ )> {
+ match self {
+ Self::Region { head, body, tail } => Some((head, body, tail)),
+ _ => None,
+ }
+ }
+}
+
+impl<'a, M, T, O> Default for BitDomain<'a, M, T, O>
+where
+ M: Mutability,
+ T: 'a + BitStore,
+ O: BitOrder,
+ Address<M, BitSlice<T, O>>: Referential<'a>,
+ Address<M, BitSlice<T::Unalias, O>>: Referential<'a>,
+ Reference<'a, M, BitSlice<T, O>>: Default,
+ Reference<'a, M, BitSlice<T::Unalias, O>>: Default,
+{
+ #[inline]
+ fn default() -> Self {
+ Self::Region {
+ head: Default::default(),
+ body: Default::default(),
+ tail: Default::default(),
+ }
+ }
+}
+
+impl<'a, M, T, O> Debug for BitDomain<'a, M, T, O>
+where
+ M: Mutability,
+ T: 'a + BitStore,
+ O: BitOrder,
+ Address<M, BitSlice<T, O>>: Referential<'a>,
+ Address<M, BitSlice<T::Unalias, O>>: Referential<'a>,
+ Reference<'a, M, BitSlice<T, O>>: Debug,
+ Reference<'a, M, BitSlice<T::Unalias, O>>: Debug,
+{
+ #[inline]
+ fn fmt(&self, fmt: &mut Formatter) -> fmt::Result {
+ write!(
+ fmt,
+ "BitDomain::<{} {}, {}>::",
+ M::RENDER,
+ any::type_name::<T::Mem>(),
+ any::type_name::<O>(),
+ )?;
+ match self {
+ Self::Enclave(elem) => {
+ fmt.debug_tuple("Enclave").field(elem).finish()
+ },
+ Self::Region { head, body, tail } => fmt
+ .debug_struct("Region")
+ .field("head", head)
+ .field("body", body)
+ .field("tail", tail)
+ .finish(),
+ }
+ }
+}
+
+#[cfg(not(tarpaulin_include))]
+impl<T, O> Clone for BitDomain<'_, Const, T, O>
+where
+ T: BitStore,
+ O: BitOrder,
+{
+ #[inline]
+ fn clone(&self) -> Self {
+ *self
+ }
+}
+
+impl<T, O> Copy for BitDomain<'_, Const, T, O>
+where
+ T: BitStore,
+ O: BitOrder,
+{
+}
+
+#[doc = include_str!("../doc/domain/Domain.md")]
+pub enum Domain<'a, M = Const, T = usize, O = Lsb0>
+where
+ M: Mutability,
+ T: 'a + BitStore,
+ O: BitOrder,
+ Address<M, T>: Referential<'a>,
+ Address<M, [T::Unalias]>: SliceReferential<'a>,
+{
+ /// Indicates that a bit-slice’s contents are entirely in the interior
+ /// indices of a single memory element.
+ ///
+ /// The contained reference is only able to observe the bits governed by the
+ /// generating bit-slice. Other handles to the element may exist, and may
+ /// write to bits outside the range that this reference can observe.
+ Enclave(PartialElement<'a, M, T, O>),
+ /// Indicates that a bit-slice’s contents touch an element edge.
+ ///
+ /// This splits the bit-slice into three partitions, each of which may be
+ /// empty: two partially-occupied edge elements, with their original type
+ /// status, and one interior span, which is known not to have any other
+ /// aliases derived from the bit-slice that created this view.
+ Region {
+ /// The first element in the bit-slice’s underlying storage, if it is
+ /// only partially used.
+ head: Option<PartialElement<'a, M, T, O>>,
+ /// All fully-used elements in the bit-slice’s underlying storage.
+ ///
+ /// This is marked as unaliased, because it is statically impossible for
+ /// any other handle derived from the source bit-slice to have
+ /// conflicting access to the region of memory it describes. As such,
+ /// even a bit-slice that was marked as `::Alias` can revert this
+ /// protection on the known-unaliased interior.
+ body: Reference<'a, M, [T::Unalias]>,
+ /// The last element in the bit-slice’s underlying storage, if it is
+ /// only partially used.
+ tail: Option<PartialElement<'a, M, T, O>>,
+ },
+}
+
+impl<'a, M, T, O> Domain<'a, M, T, O>
+where
+ M: Mutability,
+ T: 'a + BitStore,
+ O: BitOrder,
+ Address<M, T>: Referential<'a>,
+ Address<M, [T::Unalias]>: SliceReferential<'a>,
+{
+ /// Attempts to unpack the bit-domain as an [`Enclave`] variant. This is
+ /// just a shorthand for explicit destructuring.
+ ///
+ /// [`Enclave`]: Self::Enclave
+ #[inline]
+ pub fn enclave(self) -> Option<PartialElement<'a, M, T, O>> {
+ match self {
+ Self::Enclave(elem) => Some(elem),
+ _ => None,
+ }
+ }
+
+ /// Attempts to unpack the bit-domain as a [`Region`] variant. This is just
+ /// a shorthand for explicit destructuring.
+ ///
+ /// [`Region`]: Self::Region
+ #[inline]
+ pub fn region(
+ self,
+ ) -> Option<(
+ Option<PartialElement<'a, M, T, O>>,
+ Reference<'a, M, [T::Unalias]>,
+ Option<PartialElement<'a, M, T, O>>,
+ )> {
+ match self {
+ Self::Region { head, body, tail } => Some((head, body, tail)),
+ _ => None,
+ }
+ }
+
+ /// Converts the element-wise `Domain` into the equivalent `BitDomain`.
+ ///
+ /// This transform replaces each memory reference with an equivalent
+ /// `BitSlice` reference.
+ #[inline]
+ pub fn into_bit_domain(self) -> BitDomain<'a, M, T, O>
+ where
+ Address<M, BitSlice<T, O>>: Referential<'a>,
+ Address<M, BitSlice<T::Unalias, O>>: Referential<'a>,
+ Reference<'a, M, BitSlice<T, O>>: Default,
+ Reference<'a, M, BitSlice<T::Unalias, O>>:
+ TryFrom<Reference<'a, M, [T::Unalias]>>,
+ {
+ match self {
+ Self::Enclave(elem) => BitDomain::Enclave(elem.into_bitslice()),
+ Self::Region { head, body, tail } => BitDomain::Region {
+ head: head.map_or_else(
+ Default::default,
+ PartialElement::into_bitslice,
+ ),
+ body: body.try_into().unwrap_or_else(|_| {
+ match option_env!("CARGO_PKG_REPOSITORY") {
+ Some(env) => unreachable!(
+ "Construction of a slice with length {} should not \
+ be possible. If this assumption is outdated, \
+ please file an issue at {}",
+ (isize::MIN as usize) >> 3,
+ env,
+ ),
+ None => unreachable!(
+ "Construction of a slice with length {} should not \
+ be possible. If this assumption is outdated, \
+ please consider filing an issue",
+ (isize::MIN as usize) >> 3
+ ),
+ }
+ }),
+ tail: tail.map_or_else(
+ Default::default,
+ PartialElement::into_bitslice,
+ ),
+ },
+ }
+ }
+}
+
+/** Domain constructors.
+
+Only `Domain<Const>` and `Domain<Mut>` are ever constructed, and they of course
+are only constructed from `&BitSlice` and `&mut BitSlice`, respectively.
+
+However, the Rust trait system does not have a way to express a closed set, so
+**/
+impl<'a, M, T, O> Domain<'a, M, T, O>
+where
+ M: Mutability,
+ T: 'a + BitStore,
+ O: BitOrder,
+ Address<M, T>: Referential<'a>,
+ Address<M, [T::Unalias]>:
+ SliceReferential<'a, ElementAddr = Address<M, T::Unalias>>,
+ Address<M, BitSlice<T, O>>: Referential<'a>,
+ Reference<'a, M, [T::Unalias]>: Default,
+{
+ /// Creates a new `Domain` over a bit-slice.
+ ///
+ /// ## Parameters
+ ///
+ /// - `bits`: Either a `&BitSlice` or `&mut BitSlice` reference, depending
+ /// on whether a `Domain<Const>` or `Domain<Mut>` is being produced.
+ ///
+ /// ## Returns
+ ///
+ /// A `Domain` description of the raw memory governed by `bits`.
+ pub(crate) fn new(bits: Reference<'a, M, BitSlice<T, O>>) -> Self
+ where BitSpan<M, T, O>: From<Reference<'a, M, BitSlice<T, O>>> {
+ let bitspan = bits.conv::<BitSpan<M, T, O>>();
+ let (head, elts, tail) =
+ (bitspan.head(), bitspan.elements(), bitspan.tail());
+ let base = bitspan.address();
+ let (min, max) = (BitIdx::<T::Mem>::MIN, BitEnd::<T::Mem>::MAX);
+ let ctor = match (head, elts, tail) {
+ (_, 0, _) => Self::empty,
+ (h, _, t) if h == min && t == max => Self::spanning,
+ (_, _, t) if t == max => Self::partial_head,
+ (h, ..) if h == min => Self::partial_tail,
+ (_, 1, _) => Self::minor,
+ _ => Self::major,
+ };
+ ctor(base, elts, head, tail)
+ }
+
+ /// Produces the canonical empty `Domain`.
+ #[inline]
+ fn empty(
+ _: Address<M, T>,
+ _: usize,
+ _: BitIdx<T::Mem>,
+ _: BitEnd<T::Mem>,
+ ) -> Self {
+ Default::default()
+ }
+
+ /// Produces a `Domain::Region` that contains both `head` and `tail` partial
+ /// elements as well as a `body` slice (which may be empty).
+ #[inline]
+ fn major(
+ addr: Address<M, T>,
+ elts: usize,
+ head: BitIdx<T::Mem>,
+ tail: BitEnd<T::Mem>,
+ ) -> Self {
+ let h_elem = addr;
+ let t_elem = unsafe { addr.add(elts - 1) };
+ let body = unsafe {
+ Address::<M, [T::Unalias]>::from_raw_parts(
+ addr.add(1).cast::<T::Unalias>(),
+ elts - 2,
+ )
+ };
+ Self::Region {
+ head: Some(PartialElement::new(h_elem, head, None)),
+ body,
+ tail: Some(PartialElement::new(t_elem, None, tail)),
+ }
+ }
+
+ /// Produces a `Domain::Enclave`.
+ #[inline]
+ fn minor(
+ addr: Address<M, T>,
+ _: usize,
+ head: BitIdx<T::Mem>,
+ tail: BitEnd<T::Mem>,
+ ) -> Self {
+ let elem = addr;
+ Self::Enclave(PartialElement::new(elem, head, tail))
+ }
+
+ /// Produces a `Domain::Region` with a partial `head` and a `body`, but no
+ /// `tail`.
+ #[inline]
+ fn partial_head(
+ addr: Address<M, T>,
+ elts: usize,
+ head: BitIdx<T::Mem>,
+ _: BitEnd<T::Mem>,
+ ) -> Self {
+ let elem = addr;
+ let body = unsafe {
+ Address::<M, [T::Unalias]>::from_raw_parts(
+ addr.add(1).cast::<T::Unalias>(),
+ elts - 1,
+ )
+ };
+ Self::Region {
+ head: Some(PartialElement::new(elem, head, None)),
+ body,
+ tail: None,
+ }
+ }
+
+ /// Produces a `Domain::Region` with a partial `tail` and a `body`, but no
+ /// `head`.
+ #[inline]
+ fn partial_tail(
+ addr: Address<M, T>,
+ elts: usize,
+ _: BitIdx<T::Mem>,
+ tail: BitEnd<T::Mem>,
+ ) -> Self {
+ let elem = unsafe { addr.add(elts - 1) };
+ let body = unsafe {
+ Address::<M, [T::Unalias]>::from_raw_parts(
+ addr.cast::<T::Unalias>(),
+ elts - 1,
+ )
+ };
+ Self::Region {
+ head: None,
+ body,
+ tail: Some(PartialElement::new(elem, None, tail)),
+ }
+ }
+
+ /// Produces a `Domain::Region` with neither `head` nor `tail`, but only a
+ /// `body`.
+ #[inline]
+ fn spanning(
+ addr: Address<M, T>,
+ elts: usize,
+ _: BitIdx<T::Mem>,
+ _: BitEnd<T::Mem>,
+ ) -> Self {
+ Self::Region {
+ head: None,
+ body: unsafe {
+ <Address<M, [T::Unalias]> as SliceReferential>::from_raw_parts(
+ addr.cast::<T::Unalias>(),
+ elts,
+ )
+ },
+ tail: None,
+ }
+ }
+}
+
+impl<'a, M, T, O> Default for Domain<'a, M, T, O>
+where
+ M: Mutability,
+ T: 'a + BitStore,
+ O: BitOrder,
+ Address<M, T>: Referential<'a>,
+ Address<M, [T::Unalias]>: SliceReferential<'a>,
+ Reference<'a, M, [T::Unalias]>: Default,
+{
+ #[inline]
+ fn default() -> Self {
+ Self::Region {
+ head: None,
+ body: Reference::<M, [T::Unalias]>::default(),
+ tail: None,
+ }
+ }
+}
+
+impl<'a, M, T, O> Debug for Domain<'a, M, T, O>
+where
+ M: Mutability,
+ T: 'a + BitStore,
+ O: BitOrder,
+ Address<M, T>: Referential<'a>,
+ Address<M, [T::Unalias]>: SliceReferential<'a>,
+ Reference<'a, M, [T::Unalias]>: Debug,
+{
+ #[inline]
+ fn fmt(&self, fmt: &mut Formatter) -> fmt::Result {
+ write!(
+ fmt,
+ "Domain::<{} {}, {}>::",
+ M::RENDER,
+ any::type_name::<T>(),
+ any::type_name::<O>(),
+ )?;
+ match self {
+ Self::Enclave(elem) => {
+ fmt.debug_tuple("Enclave").field(elem).finish()
+ },
+ Self::Region { head, body, tail } => fmt
+ .debug_struct("Region")
+ .field("head", head)
+ .field("body", body)
+ .field("tail", tail)
+ .finish(),
+ }
+ }
+}
+
+#[cfg(not(tarpaulin_include))]
+impl<T, O> Clone for Domain<'_, Const, T, O>
+where
+ T: BitStore,
+ O: BitOrder,
+{
+ #[inline]
+ fn clone(&self) -> Self {
+ *self
+ }
+}
+
+impl<T, O> Iterator for Domain<'_, Const, T, O>
+where
+ T: BitStore,
+ O: BitOrder,
+{
+ type Item = T::Mem;
+
+ #[inline]
+ fn next(&mut self) -> Option<Self::Item> {
+ match self {
+ Self::Enclave(elem) => {
+ elem.load_value().tap(|_| *self = Default::default()).into()
+ },
+ Self::Region { head, body, tail } => {
+ if let Some(elem) = head.take() {
+ return elem.load_value().into();
+ }
+ if let Some((elem, rest)) = body.split_first() {
+ *body = rest;
+ return elem.load_value().into();
+ }
+ if let Some(elem) = tail.take() {
+ return elem.load_value().into();
+ }
+ None
+ },
+ }
+ }
+}
+
+impl<T, O> DoubleEndedIterator for Domain<'_, Const, T, O>
+where
+ T: BitStore,
+ O: BitOrder,
+{
+ #[inline]
+ fn next_back(&mut self) -> Option<Self::Item> {
+ match self {
+ Self::Enclave(elem) => {
+ elem.load_value().tap(|_| *self = Default::default()).into()
+ },
+ Self::Region { head, body, tail } => {
+ if let Some(elem) = tail.take() {
+ return elem.load_value().into();
+ }
+ if let Some((elem, rest)) = body.split_last() {
+ *body = rest;
+ return elem.load_value().into();
+ }
+ if let Some(elem) = head.take() {
+ return elem.load_value().into();
+ }
+ None
+ },
+ }
+ }
+}
+
+impl<T, O> ExactSizeIterator for Domain<'_, Const, T, O>
+where
+ T: BitStore,
+ O: BitOrder,
+{
+ #[inline]
+ fn len(&self) -> usize {
+ match self {
+ Self::Enclave(_) => 1,
+ Self::Region { head, body, tail } => {
+ head.is_some() as usize + body.len() + tail.is_some() as usize
+ },
+ }
+ }
+}
+
+impl<T, O> FusedIterator for Domain<'_, Const, T, O>
+where
+ T: BitStore,
+ O: BitOrder,
+{
+}
+
+impl<T, O> Copy for Domain<'_, Const, T, O>
+where
+ T: BitStore,
+ O: BitOrder,
+{
+}
+
+/// Implements numeric formatting by rendering each element.
+macro_rules! fmt {
+ ($($fmt:ty => $fwd:ident),+ $(,)?) => { $(
+ impl<'a, T, O> $fmt for Domain<'a, Const, T, O>
+ where
+ O: BitOrder,
+ T: BitStore,
+ {
+ #[inline]
+ fn fmt(&self, fmt: &mut Formatter) -> fmt::Result {
+ fmt.debug_list()
+ .entries(self.into_iter().map(FmtForward::$fwd))
+ .finish()
+ }
+ }
+ )+ };
+}
+
+fmt! {
+ Binary => fmt_binary,
+ Display => fmt_display,
+ LowerHex => fmt_lower_hex,
+ Octal => fmt_octal,
+ UpperHex => fmt_upper_hex,
+}
+
+#[doc = include_str!("../doc/domain/PartialElement.md")]
+pub struct PartialElement<'a, M, T, O>
+where
+ M: Mutability,
+ T: 'a + BitStore,
+ O: BitOrder,
+{
+ /// The address of the memory element being partially viewed.
+ ///
+ /// This must be stored as a pointer, not a reference, because it must
+ /// retain mutability permissions but cannot have an `&mut` reference to
+ /// a shared element.
+ ///
+ /// Similarly, it must remain typed as `T`, not `T::Access`, to allow the
+ /// `<Const, uN>` case not to inappropriately produce a `<Const, Cell<uN>>`
+ /// even if no write is performed.
+ elem: Address<M, T>,
+ /// Cache the selector mask, so it never needs to be recomputed.
+ mask: BitMask<T::Mem>,
+ /// The starting index.
+ head: BitIdx<T::Mem>,
+ /// The ending index.
+ tail: BitEnd<T::Mem>,
+ /// Preserve the originating bit-order
+ _ord: PhantomData<O>,
+ /// This type acts as-if it were a shared-mutable reference.
+ _ref: PhantomData<&'a T::Access>,
+}
+
+impl<'a, M, T, O> PartialElement<'a, M, T, O>
+where
+ M: Mutability,
+ T: 'a + BitStore,
+ O: BitOrder,
+{
+ /// Constructs a new partial-element guarded reference.
+ ///
+ /// ## Parameters
+ ///
+ /// - `elem`: the element to which this partially points.
+ /// - `head`: the index at which the partial region begins.
+ /// - `tail`: the index at which the partial region ends.
+ #[inline]
+ fn new(
+ elem: Address<M, T>,
+ head: impl Into<Option<BitIdx<T::Mem>>>,
+ tail: impl Into<Option<BitEnd<T::Mem>>>,
+ ) -> Self {
+ let (head, tail) = (
+ head.into().unwrap_or(BitIdx::MIN),
+ tail.into().unwrap_or(BitEnd::MAX),
+ );
+ Self {
+ elem,
+ mask: O::mask(head, tail),
+ head,
+ tail,
+ _ord: PhantomData,
+ _ref: PhantomData,
+ }
+ }
+
+ /// Fetches the value stored through `self` and masks away extra bits.
+ ///
+ /// ## Returns
+ ///
+ /// A bit-map containing any bits set to `1` in the governed bits. All other
+ /// bits are cleared to `0`.
+ #[inline]
+ pub fn load_value(&self) -> T::Mem {
+ self.elem
+ .pipe(|addr| unsafe { &*addr.to_const() })
+ .load_value()
+ & self.mask.into_inner()
+ }
+
+ /// Gets the starting index of the live bits in the element.
+ #[inline]
+ #[cfg(not(tarpaulin_include))]
+ pub fn head(&self) -> BitIdx<T::Mem> {
+ self.head
+ }
+
+ /// Gets the ending index of the live bits in the element.
+ #[inline]
+ #[cfg(not(tarpaulin_include))]
+ pub fn tail(&self) -> BitEnd<T::Mem> {
+ self.tail
+ }
+
+ /// Gets the semantic head and tail indices that constrain which bits of the
+ /// referent element may be accessed.
+ #[inline]
+ #[cfg(not(tarpaulin_include))]
+ pub fn bounds(&self) -> (BitIdx<T::Mem>, BitEnd<T::Mem>) {
+ (self.head, self.tail)
+ }
+
+ /// Gets the bit-mask over all accessible bits.
+ #[inline]
+ #[cfg(not(tarpaulin_include))]
+ pub fn mask(&self) -> BitMask<T::Mem> {
+ self.mask
+ }
+
+ /// Converts the partial element into a bit-slice over its governed bits.
+ #[inline]
+ pub fn into_bitslice(self) -> Reference<'a, M, BitSlice<T, O>>
+ where Address<M, BitSlice<T, O>>: Referential<'a> {
+ unsafe {
+ BitSpan::new_unchecked(
+ self.elem,
+ self.head,
+ (self.tail.into_inner() - self.head.into_inner()) as usize,
+ )
+ }
+ .to_bitslice()
+ }
+}
+
+impl<'a, T, O> PartialElement<'a, Mut, T, O>
+where
+ T: BitStore,
+ O: BitOrder,
+ Address<Mut, T>: Referential<'a>,
+{
+ /// Stores a value through `self` after masking away extra bits.
+ ///
+ /// ## Parameters
+ ///
+ /// - `&mut self`
+ /// - `value`: A bit-map which will be written into the governed bits. This
+ /// is a bit-map store, not an integer store; the value will not be
+ /// shifted into position and will only be masked directly against the
+ /// bits that this partial-element governs.
+ ///
+ /// ## Returns
+ ///
+ /// The previous value of the governed bits.
+ #[inline]
+ pub fn store_value(&mut self, value: T::Mem) -> T::Mem {
+ let this = self.access();
+ let prev = this.clear_bits(self.mask);
+ this.set_bits(self.mask & value);
+ prev & self.mask.into_inner()
+ }
+
+ /// Inverts the value of each bit governed by the partial-element.
+ ///
+ /// ## Returns
+ ///
+ /// The previous value of the governed bits.
+ #[inline]
+ #[cfg(not(tarpaulin_include))]
+ pub fn invert(&mut self) -> T::Mem {
+ self.access().invert_bits(self.mask) & self.mask.into_inner()
+ }
+
+ /// Clears all bits governed by the partial-element to `0`.
+ ///
+ /// ## Returns
+ ///
+ /// The previous value of the governed bits.
+ #[inline]
+ #[cfg(not(tarpaulin_include))]
+ pub fn clear(&mut self) -> T::Mem {
+ self.access().clear_bits(self.mask) & self.mask.into_inner()
+ }
+
+ /// Sets all bits governed by the partial-element to `1`.
+ ///
+ /// ## Returns
+ ///
+ /// The previous value of the governed bits.
+ #[inline]
+ #[cfg(not(tarpaulin_include))]
+ pub fn set(&mut self) -> T::Mem {
+ self.access().set_bits(self.mask) & self.mask.into_inner()
+ }
+
+ /// Produces a reference capable of tolerating other handles viewing the
+ /// same *memory element*.
+ #[inline]
+ fn access(&self) -> &T::Access {
+ unsafe { &*self.elem.to_const().cast::<T::Access>() }
+ }
+}
+
+impl<'a, M, T, O> PartialElement<'a, M, T, O>
+where
+ M: Mutability,
+ O: BitOrder,
+ T: 'a + BitStore + radium::Radium,
+{
+ /// Performs a store operation on a partial-element whose bits might be
+ /// observed by another handle.
+ #[inline]
+ pub fn store_value_aliased(&self, value: T::Mem) -> T::Mem {
+ let this = unsafe { &*self.elem.to_const().cast::<T::Access>() };
+ let prev = this.clear_bits(self.mask);
+ this.set_bits(self.mask & value);
+ prev & self.mask.into_inner()
+ }
+}
+
+#[cfg(not(tarpaulin_include))]
+impl<'a, T, O> Clone for PartialElement<'a, Const, T, O>
+where
+ T: BitStore,
+ O: BitOrder,
+ Address<Const, T>: Referential<'a>,
+{
+ #[inline]
+ fn clone(&self) -> Self {
+ *self
+ }
+}
+
+impl<'a, M, T, O> Debug for PartialElement<'a, M, T, O>
+where
+ M: Mutability,
+ T: 'a + BitStore,
+ O: BitOrder,
+{
+ #[inline]
+ fn fmt(&self, fmt: &mut Formatter) -> fmt::Result {
+ write!(
+ fmt,
+ "PartialElement<{} {}, {}>",
+ M::RENDER,
+ any::type_name::<T>(),
+ any::type_name::<O>(),
+ )?;
+ fmt.debug_struct("")
+ .field("elem", &self.load_value())
+ .field("mask", &self.mask.fmt_display())
+ .field("head", &self.head.fmt_display())
+ .field("tail", &self.tail.fmt_display())
+ .finish()
+ }
+}
+
+#[cfg(not(tarpaulin_include))]
+impl<'a, M, T, O> Hash for PartialElement<'a, M, T, O>
+where
+ M: Mutability,
+ T: 'a + BitStore,
+ O: BitOrder,
+{
+ #[inline]
+ fn hash<H>(&self, hasher: &mut H)
+ where H: Hasher {
+ self.load_value().hash(hasher);
+ self.mask.hash(hasher);
+ self.head.hash(hasher);
+ self.tail.hash(hasher);
+ }
+}
+
+impl<T, O> Copy for PartialElement<'_, Const, T, O>
+where
+ T: BitStore,
+ O: BitOrder,
+{
+}
+
+#[cfg(test)]
+mod tests {
+ use rand::random;
+
+ use super::*;
+ use crate::prelude::*;
+
+ #[test]
+ fn bit_domain() {
+ let data = BitArray::<[u32; 3], Msb0>::new(random());
+
+ let bd = data.bit_domain();
+ assert!(bd.enclave().is_none());
+ let (head, body, tail) = bd.region().unwrap();
+ assert_eq!(data, body);
+ assert!(head.is_empty());
+ assert!(tail.is_empty());
+
+ let bd = data[2 ..].bit_domain();
+ let (head, body, tail) = bd.region().unwrap();
+ assert_eq!(head, &data[2 .. 32]);
+ assert_eq!(body, &data[32 ..]);
+ assert!(tail.is_empty());
+
+ let bd = data[.. 94].bit_domain();
+ let (head, body, tail) = bd.region().unwrap();
+ assert!(head.is_empty());
+ assert_eq!(body, &data[.. 64]);
+ assert_eq!(tail, &data[64 .. 94]);
+
+ let bd = data[2 .. 94].bit_domain();
+ let (head, body, tail) = bd.region().unwrap();
+ assert_eq!(head, &data[2 .. 32]);
+ assert_eq!(body, &data[32 .. 64]);
+ assert_eq!(tail, &data[64 .. 94]);
+
+ let bd = data[34 .. 62].bit_domain();
+ assert!(bd.region().is_none());
+ assert_eq!(bd.enclave().unwrap(), data[34 .. 62]);
+
+ let (head, body, tail) =
+ BitDomain::<Const, usize, Lsb0>::default().region().unwrap();
+ assert!(head.is_empty());
+ assert!(body.is_empty());
+ assert!(tail.is_empty());
+ }
+
+ #[test]
+ fn domain() {
+ let data: [u32; 3] = random();
+ let bits = data.view_bits::<Msb0>();
+
+ let d = bits.domain();
+ assert!(d.enclave().is_none());
+ let (head, body, tail) = d.region().unwrap();
+ assert!(head.is_none());
+ assert!(tail.is_none());
+ assert_eq!(body, data);
+
+ let d = bits[2 ..].domain();
+ let (head, body, tail) = d.region().unwrap();
+ assert_eq!(head.unwrap().load_value(), (data[0] << 2) >> 2);
+ assert_eq!(body, &data[1 ..]);
+ assert!(tail.is_none());
+
+ let d = bits[.. 94].domain();
+ let (head, body, tail) = d.region().unwrap();
+ assert!(head.is_none());
+ assert_eq!(body, &data[.. 2]);
+ assert_eq!(tail.unwrap().load_value(), (data[2] >> 2) << 2);
+
+ let d = bits[2 .. 94].domain();
+ let (head, body, tail) = d.region().unwrap();
+ assert_eq!(head.unwrap().load_value(), (data[0] << 2) >> 2);
+ assert_eq!(body, &data[1 .. 2]);
+ assert_eq!(tail.unwrap().load_value(), (data[2] >> 2) << 2);
+
+ let d = bits[34 .. 62].domain();
+ assert!(d.region().is_none());
+ assert_eq!(
+ d.enclave().unwrap().load_value(),
+ ((data[1] << 2) >> 4) << 2,
+ );
+
+ assert!(matches!(bits![].domain(), Domain::Region {
+ head: None,
+ body: &[],
+ tail: None,
+ }));
+
+ assert!(matches!(
+ Domain::<Const, usize, Lsb0>::default(),
+ Domain::Region {
+ head: None,
+ body: &[],
+ tail: None,
+ },
+ ));
+
+ let data = core::cell::Cell::new(0u8);
+ let partial =
+ data.view_bits::<Lsb0>()[2 .. 6].domain().enclave().unwrap();
+ assert_eq!(partial.store_value_aliased(!0), 0);
+ assert_eq!(data.get(), 0b00_1111_00);
+ }
+
+ #[test]
+ fn iter() {
+ let bits = [0x12u8, 0x34, 0x56].view_bits::<Lsb0>();
+ let mut domain = bits[4 .. 12].domain();
+ assert_eq!(domain.len(), 2);
+ assert_eq!(domain.next().unwrap(), 0x10);
+ assert_eq!(domain.next_back().unwrap(), 0x04);
+
+ assert!(domain.next().is_none());
+ assert!(domain.next_back().is_none());
+
+ assert_eq!(bits[2 .. 6].domain().len(), 1);
+ assert_eq!(bits[18 .. 22].domain().next_back().unwrap(), 0b00_0101_00);
+
+ let mut domain = bits[4 .. 20].domain();
+ assert_eq!(domain.next_back().unwrap(), 0x06);
+ assert_eq!(domain.next_back().unwrap(), 0x34);
+ assert_eq!(domain.next_back().unwrap(), 0x10);
+ }
+
+ #[test]
+ #[cfg(feature = "alloc")]
+ fn render() {
+ #[cfg(not(feature = "std"))]
+ use alloc::format;
+
+ let data = BitArray::<u32, Msb0>::new(random());
+
+ let render = format!("{:?}", data.bit_domain());
+ let expected = format!(
+ "BitDomain::<*const u32, {}>::Region {{ head: {:?}, body: {:?}, \
+ tail: {:?} }}",
+ any::type_name::<Msb0>(),
+ BitSlice::<u32, Msb0>::empty(),
+ data.as_bitslice(),
+ BitSlice::<u32, Msb0>::empty(),
+ );
+ assert_eq!(render, expected);
+
+ let render = format!("{:?}", data[2 .. 30].bit_domain());
+ let expected = format!(
+ "BitDomain::<*const u32, {}>::Enclave({:?})",
+ any::type_name::<Msb0>(),
+ &data[2 .. 30],
+ );
+ assert_eq!(render, expected);
+
+ let render = format!("{:?}", data.domain());
+ let expected = format!(
+ "Domain::<*const u32, {}>::Region {{ head: None, body: {:?}, tail: \
+ None }}",
+ any::type_name::<Msb0>(),
+ data.as_raw_slice(),
+ );
+ assert_eq!(render, expected);
+
+ let render = format!("{:?}", data[2 .. 30].domain());
+ let expected = format!(
+ "Domain::<*const u32, {}>::Enclave",
+ any::type_name::<Msb0>(),
+ );
+ assert!(render.starts_with(&expected));
+
+ let partial = 0x3Cu8.view_bits::<Lsb0>()[2 .. 6]
+ .domain()
+ .enclave()
+ .unwrap();
+ let render = format!("{:?}", partial);
+ assert_eq!(
+ render,
+ format!(
+ "PartialElement<*const u8, {}> {{ elem: 60, mask: {}, head: \
+ {}, tail: {} }}",
+ any::type_name::<Lsb0>(),
+ partial.mask,
+ partial.head,
+ partial.tail,
+ ),
+ );
+ }
+}
diff --git a/src/field.rs b/src/field.rs
new file mode 100644
index 0000000..7211821
--- /dev/null
+++ b/src/field.rs
@@ -0,0 +1,638 @@
+#![doc = include_str!("../doc/field.md")]
+
+use core::{
+ mem,
+ ptr,
+};
+
+use funty::Integral;
+use tap::Pipe;
+use wyz::comu::{
+ Const,
+ Mut,
+};
+
+use crate::{
+ array::BitArray,
+ devel as dvl,
+ domain::{
+ Domain,
+ PartialElement,
+ },
+ mem::bits_of,
+ order::{
+ BitOrder,
+ Lsb0,
+ Msb0,
+ },
+ slice::BitSlice,
+ store::BitStore,
+ view::BitViewSized,
+};
+#[cfg(feature = "alloc")]
+use crate::{
+ boxed::BitBox,
+ vec::BitVec,
+};
+
+mod io;
+mod tests;
+
+#[doc = include_str!("../doc/field/BitField.md")]
+pub trait BitField {
+ #[inline]
+ #[cfg(not(tarpaulin_include))]
+ #[doc = include_str!("../doc/field/BitField_load.md")]
+ fn load<I>(&self) -> I
+ where I: Integral {
+ if cfg!(target_endian = "little") {
+ self.load_le::<I>()
+ }
+ else if cfg!(target_endian = "big") {
+ self.load_be::<I>()
+ }
+ else {
+ match option_env!("CARGO_PKG_REPOSITORY") {
+ Some(env) => unreachable!(
+ "This architecture is not supported! Please consider \
+ filing an issue at {}",
+ env
+ ),
+ None => unreachable!(
+ "This architecture is not supported! Please consider \
+ filing an issue"
+ ),
+ }
+ }
+ }
+
+ #[inline]
+ #[cfg(not(tarpaulin_include))]
+ #[doc = include_str!("../doc/field/BitField_store.md")]
+ fn store<I>(&mut self, value: I)
+ where I: Integral {
+ if cfg!(target_endian = "little") {
+ self.store_le::<I>(value);
+ }
+ else if cfg!(target_endian = "big") {
+ self.store_be::<I>(value);
+ }
+ else {
+ match option_env!("CARGO_PKG_REPOSITORY") {
+ Some(env) => unreachable!(
+ "This architecture is not supported! Please consider \
+ filing an issue at {}",
+ env
+ ),
+ None => unreachable!(
+ "This architecture is not supported! Please consider \
+ filing an issue"
+ ),
+ }
+ }
+ }
+
+ #[doc = include_str!("../doc/field/BitField_load_le.md")]
+ fn load_le<I>(&self) -> I
+ where I: Integral;
+
+ #[doc = include_str!("../doc/field/BitField_load_be.md")]
+ fn load_be<I>(&self) -> I
+ where I: Integral;
+
+ #[doc = include_str!("../doc/field/BitField_store_le.md")]
+ fn store_le<I>(&mut self, value: I)
+ where I: Integral;
+
+ #[doc = include_str!("../doc/field/BitField_store_be.md")]
+ fn store_be<I>(&mut self, value: I)
+ where I: Integral;
+}
+
+#[doc = include_str!("../doc/field/BitField_Lsb0.md")]
+impl<T> BitField for BitSlice<T, Lsb0>
+where T: BitStore
+{
+ #[inline]
+ #[doc = include_str!("../doc/field/BitField_Lsb0_load_le.md")]
+ fn load_le<I>(&self) -> I
+ where I: Integral {
+ let len = self.len();
+ check::<I>("load", len);
+
+ match self.domain() {
+ // In Lsb0, the head counts distance from LSedge to first live bit.
+ Domain::Enclave(elem) => get(elem, elem.head().into_inner()),
+ Domain::Region { head, body, tail } => {
+ let mut accum = I::ZERO;
+
+ if let Some(elem) = tail {
+ accum = get(elem, 0);
+ }
+
+ for elem in body.iter().rev().map(BitStore::load_value) {
+ maybe_shift_left(&mut accum, bits_of::<T>());
+ accum |= resize::<T::Mem, I>(elem);
+ }
+
+ if let Some(elem) = head {
+ let shamt = elem.head().into_inner();
+ maybe_shift_left(
+ &mut accum,
+ bits_of::<T>() - shamt as usize,
+ );
+ accum |= get::<_, _, I>(elem, shamt);
+ }
+
+ accum
+ },
+ }
+ .pipe(|elem| sign(elem, len))
+ }
+
+ #[inline]
+ #[doc = include_str!("../doc/field/BitField_Lsb0_load_be.md")]
+ fn load_be<I>(&self) -> I
+ where I: Integral {
+ let len = self.len();
+ check::<I>("load", len);
+
+ match self.domain() {
+ Domain::Enclave(elem) => get(elem, elem.head().into_inner()),
+ Domain::Region { head, body, tail } => {
+ let mut accum = I::ZERO;
+
+ if let Some(elem) = head {
+ accum = get(elem, elem.head().into_inner());
+ }
+
+ for elem in body.iter().map(BitStore::load_value) {
+ maybe_shift_left(&mut accum, bits_of::<T>());
+ accum |= resize::<T::Mem, I>(elem);
+ }
+
+ if let Some(elem) = tail {
+ let shamt = elem.tail().into_inner() as usize;
+ maybe_shift_left(&mut accum, shamt);
+ accum |= get::<_, _, I>(elem, 0);
+ }
+
+ accum
+ },
+ }
+ .pipe(|elem| sign(elem, len))
+ }
+
+ #[inline]
+ #[doc = include_str!("../doc/field/BitField_Lsb0_store_le.md")]
+ fn store_le<I>(&mut self, mut value: I)
+ where I: Integral {
+ check::<I>("store", self.len());
+
+ match self.domain_mut() {
+ Domain::Enclave(elem) => {
+ let shamt = elem.head().into_inner();
+ set(elem, value, shamt);
+ },
+ Domain::Region { head, body, tail } => {
+ if let Some(elem) = head {
+ let shamt = elem.head().into_inner();
+ set(elem, value, shamt);
+ let rshamt = bits_of::<T>() - shamt as usize;
+ maybe_shift_right(&mut value, rshamt);
+ }
+
+ for elem in body.iter_mut() {
+ elem.store_value(resize(value));
+ maybe_shift_right(&mut value, bits_of::<T>());
+ }
+
+ if let Some(elem) = tail {
+ set(elem, value, 0);
+ }
+ },
+ }
+ }
+
+ #[inline]
+ #[doc = include_str!("../doc/field/BitField_Lsb0_store_be.md")]
+ fn store_be<I>(&mut self, mut value: I)
+ where I: Integral {
+ check::<I>("store", self.len());
+
+ match self.domain_mut() {
+ Domain::Enclave(elem) => {
+ let shamt = elem.head().into_inner();
+ set(elem, value, shamt);
+ },
+ Domain::Region { head, body, tail } => {
+ if let Some(elem) = tail {
+ let shamt = elem.tail().into_inner() as usize;
+ set(elem, value, 0);
+ maybe_shift_right(&mut value, shamt);
+ }
+
+ for elem in body.iter_mut().rev() {
+ elem.store_value(resize(value));
+ maybe_shift_right(&mut value, bits_of::<T>());
+ }
+
+ if let Some(elem) = head {
+ let shamt = elem.head().into_inner();
+ set(elem, value, shamt);
+ }
+ },
+ }
+ }
+}
+
+#[doc = include_str!("../doc/field/BitField_Msb0.md")]
+impl<T> BitField for BitSlice<T, Msb0>
+where T: BitStore
+{
+ #[inline]
+ #[doc = include_str!("../doc/field/BitField_Msb0_load_le.md")]
+ fn load_le<I>(&self) -> I
+ where I: Integral {
+ let len = self.len();
+ check::<I>("load", len);
+
+ match self.domain() {
+ Domain::Enclave(elem) => {
+ let shamt = bits_of::<T>() as u8 - elem.tail().into_inner();
+ get(elem, shamt)
+ },
+ Domain::Region { head, body, tail } => {
+ let mut accum = I::ZERO;
+
+ if let Some(elem) = tail {
+ let shamt = bits_of::<T>() as u8 - elem.tail().into_inner();
+ accum = get(elem, shamt);
+ }
+
+ for elem in body.iter().rev().map(BitStore::load_value) {
+ maybe_shift_left(&mut accum, bits_of::<T>());
+ accum |= resize::<T::Mem, I>(elem);
+ }
+
+ if let Some(elem) = head {
+ let shamt =
+ bits_of::<T>() - elem.head().into_inner() as usize;
+ maybe_shift_left(&mut accum, shamt);
+ accum |= get::<_, _, I>(elem, 0);
+ }
+
+ accum
+ },
+ }
+ .pipe(|elem| sign(elem, len))
+ }
+
+ #[inline]
+ #[doc = include_str!("../doc/field/BitField_Msb0_load_be.md")]
+ fn load_be<I>(&self) -> I
+ where I: Integral {
+ let len = self.len();
+ check::<I>("load", len);
+
+ match self.domain() {
+ Domain::Enclave(elem) => {
+ let shamt = bits_of::<T>() as u8 - elem.tail().into_inner();
+ get(elem, shamt)
+ },
+ Domain::Region { head, body, tail } => {
+ let mut accum = I::ZERO;
+
+ if let Some(elem) = head {
+ accum = get(elem, 0);
+ }
+
+ for elem in body.iter().map(BitStore::load_value) {
+ maybe_shift_left(&mut accum, bits_of::<T>());
+ accum |= resize::<T::Mem, I>(elem);
+ }
+
+ if let Some(elem) = tail {
+ let shamt = elem.tail().into_inner();
+ maybe_shift_left(&mut accum, shamt as usize);
+ accum |= get::<_, _, I>(elem, bits_of::<T>() as u8 - shamt);
+ }
+
+ accum
+ },
+ }
+ .pipe(|elem| sign(elem, len))
+ }
+
+ #[inline]
+ #[doc = include_str!("../doc/field/BitField_Msb0_store_le.md")]
+ fn store_le<I>(&mut self, mut value: I)
+ where I: Integral {
+ check::<I>("store", self.len());
+
+ match self.domain_mut() {
+ Domain::Enclave(elem) => {
+ let shamt = bits_of::<T>() as u8 - elem.tail().into_inner();
+ set(elem, value, shamt);
+ },
+ Domain::Region { head, body, tail } => {
+ if let Some(elem) = head {
+ let shamt =
+ bits_of::<T>() - elem.head().into_inner() as usize;
+ set(elem, value, 0);
+ maybe_shift_right(&mut value, shamt);
+ }
+
+ for elem in body.iter_mut() {
+ elem.store_value(resize(value));
+ maybe_shift_right(&mut value, bits_of::<T>());
+ }
+
+ if let Some(elem) = tail {
+ let shamt = bits_of::<T>() as u8 - elem.tail().into_inner();
+ set(elem, value, shamt);
+ }
+ },
+ }
+ }
+
+ #[inline]
+ #[doc = include_str!("../doc/field/BitField_Msb0_store_be.md")]
+ fn store_be<I>(&mut self, mut value: I)
+ where I: Integral {
+ check::<I>("store", self.len());
+
+ match self.domain_mut() {
+ Domain::Enclave(elem) => {
+ let shamt = bits_of::<T>() as u8 - elem.tail().into_inner();
+ set(elem, value, shamt);
+ },
+ Domain::Region { head, body, tail } => {
+ if let Some(elem) = tail {
+ let tail = elem.tail().into_inner() as usize;
+ let shamt = bits_of::<T>() - tail;
+ set(elem, value, shamt as u8);
+ maybe_shift_right(&mut value, tail);
+ }
+
+ for elem in body.iter_mut().rev() {
+ elem.store_value(resize(value));
+ maybe_shift_right(&mut value, bits_of::<T>());
+ }
+
+ if let Some(elem) = head {
+ set(elem, value, 0);
+ }
+ },
+ }
+ }
+}
+
+#[doc = include_str!("../doc/field/impl_BitArray.md")]
+impl<A, O> BitField for BitArray<A, O>
+where
+ O: BitOrder,
+ A: BitViewSized,
+ BitSlice<A::Store, O>: BitField,
+{
+ #[inline(always)]
+ fn load_le<I>(&self) -> I
+ where I: Integral {
+ let mut accum = I::ZERO;
+
+ for elem in self.as_raw_slice().iter().map(BitStore::load_value).rev() {
+ maybe_shift_left(&mut accum, bits_of::<A::Store>());
+ accum |= resize::<_, I>(elem);
+ }
+
+ sign(accum, self.len())
+ }
+
+ #[inline(always)]
+ fn load_be<I>(&self) -> I
+ where I: Integral {
+ let mut accum = I::ZERO;
+
+ for elem in self.as_raw_slice().iter().map(BitStore::load_value) {
+ maybe_shift_left(&mut accum, bits_of::<A::Store>());
+ accum |= resize::<_, I>(elem);
+ }
+
+ sign(accum, self.len())
+ }
+
+ #[inline(always)]
+ fn store_le<I>(&mut self, mut value: I)
+ where I: Integral {
+ for slot in self.as_raw_mut_slice() {
+ slot.store_value(resize(value));
+ maybe_shift_right(&mut value, bits_of::<A::Store>());
+ }
+ }
+
+ #[inline(always)]
+ fn store_be<I>(&mut self, mut value: I)
+ where I: Integral {
+ for slot in self.as_raw_mut_slice().iter_mut().rev() {
+ slot.store_value(resize(value));
+ maybe_shift_right(&mut value, bits_of::<A::Store>());
+ }
+ }
+}
+
+#[cfg(feature = "alloc")]
+#[cfg(not(tarpaulin_include))]
+impl<T, O> BitField for BitBox<T, O>
+where
+ T: BitStore,
+ O: BitOrder,
+ BitSlice<T, O>: BitField,
+{
+ #[inline(always)]
+ fn load_le<I>(&self) -> I
+ where I: Integral {
+ self.as_bitslice().load_le()
+ }
+
+ #[inline(always)]
+ fn load_be<I>(&self) -> I
+ where I: Integral {
+ self.as_bitslice().load_be()
+ }
+
+ #[inline(always)]
+ fn store_le<I>(&mut self, value: I)
+ where I: Integral {
+ self.as_mut_bitslice().store_le(value)
+ }
+
+ #[inline(always)]
+ fn store_be<I>(&mut self, value: I)
+ where I: Integral {
+ self.as_mut_bitslice().store_be(value)
+ }
+}
+
+#[cfg(feature = "alloc")]
+#[cfg(not(tarpaulin_include))]
+impl<T, O> BitField for BitVec<T, O>
+where
+ T: BitStore,
+ O: BitOrder,
+ BitSlice<T, O>: BitField,
+{
+ #[inline(always)]
+ fn load_le<I>(&self) -> I
+ where I: Integral {
+ self.as_bitslice().load_le()
+ }
+
+ #[inline(always)]
+ fn load_be<I>(&self) -> I
+ where I: Integral {
+ self.as_bitslice().load_be()
+ }
+
+ #[inline(always)]
+ fn store_le<I>(&mut self, value: I)
+ where I: Integral {
+ self.as_mut_bitslice().store_le(value)
+ }
+
+ #[inline(always)]
+ fn store_be<I>(&mut self, value: I)
+ where I: Integral {
+ self.as_mut_bitslice().store_be(value)
+ }
+}
+
+/** Asserts that a bit-slice is not longer than a memory element.
+
+## Type Parameters
+
+- `I`: The integer type being stored into or loaded out of a bit-slice.
+
+## Parameters
+
+- `action`: the verb being performed. One of `"load"` or `"store"`.
+- `len`: the length of the bit-slice under test.
+
+## Panics
+
+This panics if `len` is not in `1 ..= U::BITS`.
+**/
+fn check<I>(action: &'static str, len: usize)
+where I: Integral {
+ assert!(
+ (1 ..= bits_of::<I>()).contains(&len),
+ "cannot {} {} bits from a {}-bit region",
+ action,
+ bits_of::<I>(),
+ len,
+ );
+}
+
+/// Shifts a value to the left, if it can support the shift amount.
+fn maybe_shift_left<T: Integral>(elem: &mut T, shamt: usize) {
+ if bits_of::<T>() > shamt {
+ *elem <<= shamt;
+ }
+}
+
+/// Shifts a value to the right, if it can support the shift amount.
+fn maybe_shift_right<T: Integral>(elem: &mut T, shamt: usize) {
+ if bits_of::<T>() > shamt {
+ *elem >>= shamt;
+ }
+}
+
+#[doc = include_str!("../doc/field/get.md")]
+fn get<T, O, I>(elem: PartialElement<Const, T, O>, shamt: u8) -> I
+where
+ T: BitStore,
+ O: BitOrder,
+ I: Integral,
+{
+ resize::<T::Mem, I>(elem.load_value() >> shamt)
+}
+
+#[doc = include_str!("../doc/field/set.md")]
+fn set<T, O, I>(mut elem: PartialElement<Mut, T, O>, value: I, shamt: u8)
+where
+ T: BitStore,
+ O: BitOrder,
+ I: Integral,
+{
+ elem.store_value(resize::<I, T::Mem>(value) << shamt);
+}
+
+#[doc = include_str!("../doc/field/sign.md")]
+fn sign<I>(elem: I, width: usize) -> I
+where I: Integral {
+ if dvl::is_unsigned::<I>() {
+ return elem;
+ }
+ // Find the number of high bits that are not loaded.
+ let shamt = bits_of::<I>() - width;
+ // Shift left, so that the highest loaded bit is now in the sign position.
+ let shl: I = elem << shamt;
+ // Shift right with sign extension back to the original place.
+ shl >> shamt
+}
+
+#[doc = include_str!("../doc/field/resize.md")]
+fn resize<T, U>(value: T) -> U
+where
+ T: Integral,
+ U: Integral,
+{
+ let mut out = U::ZERO;
+ let size_t = mem::size_of::<T>();
+ let size_u = mem::size_of::<U>();
+
+ unsafe {
+ resize_inner::<T, U>(&value, &mut out, size_t, size_u);
+ }
+
+ out
+}
+
+/// Performs little-endian byte-order register resizing.
+#[cfg(target_endian = "little")]
+unsafe fn resize_inner<T, U>(
+ src: &T,
+ dst: &mut U,
+ size_t: usize,
+ size_u: usize,
+) {
+ // In LE, the least-significant byte is the base address, so resizing is
+ // just a `memmove` into a zeroed slot, taking only the lesser width.
+ ptr::copy_nonoverlapping(
+ src as *const T as *const u8,
+ dst as *mut U as *mut u8,
+ size_t.min(size_u),
+ );
+}
+
+/// Performs big-endian byte-order register resizing.
+#[cfg(target_endian = "big")]
+unsafe fn resize_inner<T, U>(
+ src: &T,
+ dst: &mut U,
+ size_t: usize,
+ size_u: usize,
+) {
+ let src = src as *const T as *const u8;
+ let dst = dst as *mut U as *mut u8;
+
+ // In BE, shrinking a value requires moving the source base-pointer up in
+ // memory (to a higher address, lower significance),
+ if size_t > size_u {
+ ptr::copy_nonoverlapping(src.add(size_t - size_u), dst, size_u);
+ }
+ // While expanding a value requires moving the *destination* base-pointer
+ // up (and leaving the lower address, higher significance bytes zeroed).
+ else {
+ ptr::copy_nonoverlapping(src, dst.add(size_u - size_t), size_t);
+ }
+}
diff --git a/src/field/io.rs b/src/field/io.rs
new file mode 100644
index 0000000..4edd8d2
--- /dev/null
+++ b/src/field/io.rs
@@ -0,0 +1,106 @@
+#![cfg(feature = "std")]
+#![doc = include_str!("../../doc/field/io.md")]
+
+use core::mem;
+use std::io::{
+ self,
+ Read,
+ Write,
+};
+
+use super::BitField;
+use crate::{
+ mem::bits_of,
+ order::BitOrder,
+ slice::BitSlice,
+ store::BitStore,
+ vec::BitVec,
+};
+
+#[doc = include_str!("../../doc/field/io/Read_BitSlice.md")]
+impl<T, O> Read for &BitSlice<T, O>
+where
+ T: BitStore,
+ O: BitOrder,
+ BitSlice<T, O>: BitField,
+{
+ #[inline]
+ fn read(&mut self, buf: &mut [u8]) -> io::Result<usize> {
+ let mut count = 0;
+ self.chunks_exact(bits_of::<u8>())
+ .zip(buf.iter_mut())
+ .for_each(|(byte, slot)| {
+ *slot = byte.load_be();
+ count += 1;
+ });
+ *self = unsafe { self.get_unchecked(count * bits_of::<u8>() ..) };
+ Ok(count)
+ }
+}
+
+#[doc = include_str!("../../doc/field/io/Write_BitSlice.md")]
+impl<T, O> Write for &mut BitSlice<T, O>
+where
+ T: BitStore,
+ O: BitOrder,
+ BitSlice<T, O>: BitField,
+{
+ #[inline]
+ fn write(&mut self, buf: &[u8]) -> io::Result<usize> {
+ let mut count = 0;
+ unsafe { self.chunks_exact_mut(bits_of::<u8>()).remove_alias() }
+ .zip(buf.iter().copied())
+ .for_each(|(slot, byte)| {
+ slot.store_be(byte);
+ count += 1;
+ });
+ *self = unsafe {
+ mem::take(self).get_unchecked_mut(count * bits_of::<u8>() ..)
+ };
+ Ok(count)
+ }
+
+ #[inline]
+ #[cfg(not(tarpaulin_include))]
+ fn flush(&mut self) -> io::Result<()> {
+ Ok(())
+ }
+}
+
+#[doc = include_str!("../../doc/field/io/Read_BitVec.md")]
+impl<T, O> Read for BitVec<T, O>
+where
+ T: BitStore,
+ O: BitOrder,
+ BitSlice<T, O>: BitField,
+{
+ #[inline]
+ fn read(&mut self, buf: &mut [u8]) -> io::Result<usize> {
+ let bytes_read = self.as_bitslice().read(buf)?;
+ let bits = bytes_read * bits_of::<u8>();
+ self.shift_left(bits);
+ self.truncate(self.len() - bits);
+ Ok(bytes_read)
+ }
+}
+
+#[doc = include_str!("../../doc/field/io/Write_BitVec.md")]
+impl<T, O> Write for BitVec<T, O>
+where
+ O: BitOrder,
+ T: BitStore,
+ BitSlice<T, O>: BitField,
+{
+ #[inline]
+ fn write(&mut self, buf: &[u8]) -> io::Result<usize> {
+ let len = self.len();
+ self.resize(len + buf.len() * bits_of::<u8>(), false);
+ unsafe { self.get_unchecked_mut(len ..) }.write(buf)
+ }
+
+ #[inline]
+ #[cfg(not(tarpaulin_include))]
+ fn flush(&mut self) -> io::Result<()> {
+ Ok(())
+ }
+}
diff --git a/src/field/tests.rs b/src/field/tests.rs
new file mode 100644
index 0000000..a241503
--- /dev/null
+++ b/src/field/tests.rs
@@ -0,0 +1,315 @@
+#![cfg(test)]
+
+#[cfg(feature = "std")]
+use std::io;
+
+use rand::prelude::*;
+
+use crate::prelude::*;
+
+#[test]
+fn lsb0_u8_any_u5() {
+ let mut bits = BitArray::<u8, Lsb0>::ZERO;
+
+ let val = random::<u8>() & 0x1Fu8;
+ bits[2 .. 7].store_le(val);
+ assert_eq!(
+ bits.as_raw_slice()[0],
+ val << 2,
+ "{:08b} != {:08b}",
+ bits.as_raw_slice()[0],
+ val << 2,
+ );
+ assert_eq!(bits[2 .. 7].load_le::<u8>(), val);
+
+ let neg = val | 0xF0;
+ bits[2 .. 7].store_le(neg);
+ assert_eq!(bits[2 .. 7].load_le::<i8>(), neg as i8);
+
+ let val = random::<u8>() & 0x1Fu8;
+ bits[2 .. 7].store_be(val);
+ assert_eq!(
+ bits.as_raw_slice()[0],
+ val << 2,
+ "{:08b} != {:08b}",
+ bits.as_raw_slice()[0],
+ val << 2,
+ );
+ assert_eq!(bits[2 .. 7].load_be::<u8>(), val);
+
+ let neg = val | 0xF0;
+ bits[2 .. 7].store_be(neg);
+ assert_eq!(bits[2 .. 7].load_be::<i8>(), neg as i8);
+}
+
+#[test]
+fn lsb0_u8_le_u20() {
+ let mut bits = BitArray::<[u8; 3], Lsb0>::ZERO;
+
+ let val = random::<u32>() & 0x00_0F_FF_FFu32;
+ let bytes = (val << 2).to_le_bytes();
+ bits[2 .. 22].store_le(val);
+ assert_eq!(bits.as_raw_slice(), &bytes[.. 3]);
+ assert_eq!(bits[2 .. 22].load_le::<u32>(), val);
+
+ let neg = val | 0xFF_F8_00_00u32;
+ bits[2 .. 22].store_le(neg);
+ assert_eq!(
+ bits[2 .. 22].load_le::<i32>(),
+ neg as i32,
+ "{:08x} != {:08x}",
+ bits[2 .. 22].load_le::<i32>(),
+ neg as i32,
+ );
+}
+
+#[test]
+fn lsb0_u8_be_u20() {
+ let mut bits = BitArray::<[u8; 3], Lsb0>::ZERO;
+
+ let val = random::<u32>() & 0x00_0F_FF_FFu32;
+ let mut bytes = (val << 2).to_be_bytes();
+ // Lsb0 _be has *weird* effects in raw memory.
+ bytes[1] <<= 2;
+ bytes[3] >>= 2;
+ bits[2 .. 22].store_be(val);
+ assert_eq!(bits.as_raw_slice(), &bytes[1 ..]);
+ assert_eq!(bits[2 .. 22].load_be::<u32>(), val);
+
+ let neg = val | 0xFF_F8_00_00u32;
+ bits[2 .. 22].store_be(neg);
+ assert_eq!(
+ bits[2 .. 22].load_be::<i32>(),
+ neg as i32,
+ "{:08x} != {:08x}",
+ bits[2 .. 22].load_le::<i32>(),
+ neg as i32,
+ );
+}
+
+#[test]
+fn msb0_u8_any_u5() {
+ let mut bits = BitArray::<u8, Msb0>::ZERO;
+
+ let val = random::<u8>() & 0x1Fu8;
+ bits[2 .. 7].store_le(val);
+ assert_eq!(
+ bits.as_raw_slice()[0],
+ val << 1,
+ "{:08b} != {:08b}",
+ bits.as_raw_slice()[0],
+ val << 1,
+ );
+ assert_eq!(bits[2 .. 7].load_le::<u8>(), val);
+
+ let neg = val | 0xF0;
+ bits[2 .. 7].store_le(neg);
+ assert_eq!(bits[2 .. 7].load_le::<i8>(), neg as i8);
+
+ let val = random::<u8>() & 0x1Fu8;
+ bits[2 .. 7].store_be(val);
+ assert_eq!(
+ bits.as_raw_slice()[0],
+ val << 1,
+ "{:08b} != {:08b}",
+ bits.as_raw_slice()[0],
+ val << 1,
+ );
+ assert_eq!(bits[2 .. 7].load_be::<u8>(), val);
+
+ let neg = val | 0xF0;
+ bits[2 .. 7].store_be(neg);
+ assert_eq!(bits[2 .. 7].load_be::<i8>(), neg as i8);
+}
+
+#[test]
+fn msb0_u8_le_u20() {
+ let mut bits = BitArray::<[u8; 3], Msb0>::ZERO;
+
+ let val = random::<u32>() & 0x00_0F_FF_FFu32;
+ let mut bytes = (val << 2).to_le_bytes();
+ // Msb0 _le has *weird* effects in raw memory.
+ bytes[0] >>= 2;
+ bytes[2] <<= 2;
+ bits[2 .. 22].store_le(val);
+ assert_eq!(bits.as_raw_slice(), &bytes[.. 3]);
+ assert_eq!(bits[2 .. 22].load_le::<u32>(), val);
+
+ let neg = val | 0xFF_F8_00_00u32;
+ bits[2 .. 22].store_le(neg);
+ assert_eq!(
+ bits[2 .. 22].load_le::<i32>(),
+ neg as i32,
+ "{:08x} != {:08x}",
+ bits[2 .. 22].load_le::<i32>(),
+ neg as i32,
+ );
+}
+
+#[test]
+fn msb0_u8_be_u20() {
+ let mut bits = BitArray::<[u8; 3], Msb0>::ZERO;
+
+ let val = random::<u32>() & 0x00_0F_FF_FFu32;
+ let bytes = (val << 2).to_be_bytes();
+ bits[2 .. 22].store_be(val);
+ assert_eq!(bits.as_raw_slice(), &bytes[1 ..]);
+ assert_eq!(bits[2 .. 22].load_be::<u32>(), val);
+
+ let neg = val | 0xFF_F8_00_00u32;
+ bits[2 .. 22].store_be(neg);
+ assert_eq!(
+ bits[2 .. 22].load_be::<i32>(),
+ neg as i32,
+ "{:08x} != {:08x}",
+ bits[2 .. 22].load_le::<i32>(),
+ neg as i32,
+ );
+}
+
+#[test]
+fn lsb0_u8_le_u24() {
+ let mut bits = BitArray::<[u8; 3], Lsb0>::ZERO;
+
+ let val = random::<u32>() & 0x00_FF_FF_FFu32;
+ let bytes = val.to_le_bytes();
+ bits.store_le(val);
+ assert_eq!(bits.as_raw_slice(), &bytes[.. 3]);
+ assert_eq!(
+ bits.load_le::<u32>(),
+ val,
+ "{:08x} != {:08x}",
+ bits.load_le::<i32>(),
+ val,
+ );
+
+ let neg = val | 0xFF_80_00_00u32;
+ bits.store_le(neg);
+ assert_eq!(
+ bits.load_le::<i32>(),
+ neg as i32,
+ "{:08x} != {:08x}",
+ bits.load_le::<i32>(),
+ neg as i32,
+ );
+}
+
+#[test]
+fn lsb0_u8_be_u24() {
+ let mut bits = BitArray::<[u8; 3], Lsb0>::ZERO;
+
+ let val = random::<u32>() & 0x00_FF_FF_FFu32;
+ let bytes = val.to_be_bytes();
+ bits.store_be(val);
+ assert_eq!(bits.as_raw_slice(), &bytes[1 ..]);
+ assert_eq!(bits.load_be::<u32>(), val);
+
+ let neg = val | 0xFF_80_00_00u32;
+ bits.store_be(neg);
+ assert_eq!(
+ bits.load_be::<i32>(),
+ neg as i32,
+ "{:08x} != {:08x}",
+ bits.load_be::<i32>(),
+ neg as i32,
+ );
+}
+
+#[test]
+fn msb0_u8_le_u24() {
+ let mut bits = BitArray::<[u8; 3], Msb0>::ZERO;
+
+ let val = random::<u32>() & 0x00_FF_FF_FFu32;
+ let bytes = val.to_le_bytes();
+ bits.store_le(val);
+ assert_eq!(bits.as_raw_slice(), &bytes[.. 3]);
+ assert_eq!(bits.load_le::<u32>(), val);
+
+ let neg = val | 0xFF_80_00_00u32;
+ bits.store_le(neg);
+ assert_eq!(
+ bits.load_le::<i32>(),
+ neg as i32,
+ "{:08x} != {:08x}",
+ bits.load_le::<i32>(),
+ neg as i32,
+ );
+}
+
+#[test]
+fn msb0_u8_be_u24() {
+ let mut bits = BitArray::<[u8; 3], Msb0>::ZERO;
+
+ let val = random::<u32>() & 0x00_FF_FF_FFu32;
+ let bytes = val.to_be_bytes();
+ bits.store_be(val);
+ assert_eq!(bits.as_raw_slice(), &bytes[1 ..]);
+ assert_eq!(bits.load_be::<u32>(), val);
+
+ let neg = val | 0xFF_80_00_00u32;
+ bits.store_be(neg);
+ assert_eq!(
+ bits.load_be::<i32>(),
+ neg as i32,
+ "{:08x} != {:08x}",
+ bits.load_be::<i32>(),
+ neg as i32,
+ );
+}
+
+#[test]
+#[cfg(feature = "std")]
+fn read_bits() {
+ let data = [0x136Cu16, 0x8C63];
+ let base = data.view_bits::<Msb0>().as_bitptr();
+ let mut bits = &data.view_bits::<Msb0>()[4 ..];
+
+ assert_eq!(unsafe { bits.as_bitptr().offset_from(base) }, 4);
+ assert_eq!(bits.len(), 28);
+
+ let mut transfer = [0u8; 4];
+ let last_ptr = &mut transfer[3] as *mut _;
+ let mut transfer_handle = &mut transfer[..];
+
+ assert_eq!(io::copy(&mut bits, &mut transfer_handle).unwrap(), 3);
+ assert_eq!(unsafe { bits.as_bitptr().offset_from(base) }, 28);
+ assert_eq!(transfer_handle.as_mut_ptr() as *mut _, last_ptr);
+ assert_eq!(transfer[.. 3], [0x36, 0xC8, 0xC6][..]);
+
+ let mut bv = data.view_bits::<Msb0>()[4 ..].to_bitvec();
+ let mut transfer = [0u8; 3];
+ assert_eq!(io::copy(&mut bv, &mut &mut transfer[..]).unwrap(), 3);
+ assert_eq!(bv, bits![0, 0, 1, 1]);
+ assert_eq!(transfer, [0x36, 0xC8, 0xC6]);
+}
+
+#[test]
+#[cfg(feature = "std")]
+fn write_bits() {
+ let mut bv = bitvec![usize, Msb0; 0; 4];
+ assert_eq!(
+ io::copy(&mut &[0xC3u8, 0xF0, 0x69][..], &mut bv).unwrap(),
+ 3,
+ );
+
+ assert_eq!(bv, bits![
+ 0, 0, 0, 0, // original
+ 1, 1, 0, 0, 0, 0, 1, 1, // byte 0
+ 1, 1, 1, 1, 0, 0, 0, 0, // byte 1
+ 0, 1, 1, 0, 1, 0, 0, 1, // byte 2
+ ]);
+
+ let mut data = [0u8; 4];
+ let base = data.view_bits_mut::<Lsb0>().as_mut_bitptr();
+ let mut bits = &mut data.view_bits_mut::<Lsb0>()[4 ..];
+ assert_eq!(unsafe { bits.as_mut_bitptr().offset_from(base) }, 4);
+ assert_eq!(bits.len(), 28);
+ assert_eq!(
+ io::copy(&mut &[0xA5u8, 0xB4, 0x3C][..], &mut bits).unwrap(),
+ 3,
+ );
+ assert_eq!(unsafe { bits.as_mut_bitptr().offset_from(base) }, 28);
+ assert_eq!(bits.len(), 4);
+
+ assert_eq!(data, [0b1010_0000, 0b1011_0101, 0b0011_0100, 0b0000_1100]);
+}
diff --git a/src/index.rs b/src/index.rs
new file mode 100644
index 0000000..3a8278c
--- /dev/null
+++ b/src/index.rs
@@ -0,0 +1,1349 @@
+#![doc = include_str!("../doc/index.md")]
+
+use core::{
+ any,
+ fmt::{
+ self,
+ Binary,
+ Debug,
+ Display,
+ Formatter,
+ },
+ iter::{
+ FusedIterator,
+ Sum,
+ },
+ marker::PhantomData,
+ ops::{
+ BitAnd,
+ BitOr,
+ Not,
+ },
+};
+
+use crate::{
+ mem::{
+ bits_of,
+ BitRegister,
+ },
+ order::BitOrder,
+};
+
+#[repr(transparent)]
+#[doc = include_str!("../doc/index/BitIdx.md")]
+#[derive(Clone, Copy, Default, Eq, Hash, Ord, PartialEq, PartialOrd)]
+pub struct BitIdx<R = usize>
+where R: BitRegister
+{
+ /// Semantic index counter within a register, constrained to `0 .. R::BITS`.
+ idx: u8,
+ /// Marker for the register type.
+ _ty: PhantomData<R>,
+}
+
+impl<R> BitIdx<R>
+where R: BitRegister
+{
+ /// The inclusive maximum index within an `R` element.
+ pub const MAX: Self = Self {
+ idx: R::MASK,
+ _ty: PhantomData,
+ };
+ /// The inclusive minimum index within an `R` element.
+ pub const MIN: Self = Self {
+ idx: 0,
+ _ty: PhantomData,
+ };
+
+ /// Wraps a counter value as a known-good index into an `R` register.
+ ///
+ /// ## Parameters
+ ///
+ /// - `idx`: The counter value to mark as an index. This must be in the
+ /// range `0 .. R::BITS`.
+ ///
+ /// ## Returns
+ ///
+ /// This returns `idx`, either marked as a valid `BitIdx` or an invalid
+ /// `BitIdxError` by whether it is within the valid range `0 .. R::BITS`.
+ #[inline]
+ pub fn new(idx: u8) -> Result<Self, BitIdxError<R>> {
+ if idx >= bits_of::<R>() as u8 {
+ return Err(BitIdxError::new(idx));
+ }
+ Ok(unsafe { Self::new_unchecked(idx) })
+ }
+
+ /// Wraps a counter value as an assumed-good index into an `R` register.
+ ///
+ /// ## Parameters
+ ///
+ /// - `idx`: The counter value to mark as an index. This must be in the
+ /// range `0 .. R::BITS`.
+ ///
+ /// ## Returns
+ ///
+ /// This unconditionally marks `idx` as a valid bit-index.
+ ///
+ /// ## Safety
+ ///
+ /// If the `idx` value is outside the valid range, then the program is
+ /// incorrect. Debug builds will panic; release builds do not inspect the
+ /// value or specify a behavior.
+ #[inline]
+ pub unsafe fn new_unchecked(idx: u8) -> Self {
+ debug_assert!(
+ idx < bits_of::<R>() as u8,
+ "Bit index {} cannot exceed type width {}",
+ idx,
+ bits_of::<R>(),
+ );
+ Self {
+ idx,
+ _ty: PhantomData,
+ }
+ }
+
+ /// Removes the index wrapper, leaving the internal counter.
+ #[inline]
+ #[cfg(not(tarpaulin_include))]
+ pub fn into_inner(self) -> u8 {
+ self.idx
+ }
+
+ /// Increments an index counter, wrapping at the back edge of the register.
+ ///
+ /// ## Parameters
+ ///
+ /// - `self`: The index to increment.
+ ///
+ /// ## Returns
+ ///
+ /// - `.0`: The next index after `self`.
+ /// - `.1`: Indicates whether the new index is in the next memory address.
+ #[inline]
+ pub fn next(self) -> (Self, bool) {
+ let next = self.idx + 1;
+ (
+ unsafe { Self::new_unchecked(next & R::MASK) },
+ next == bits_of::<R>() as u8,
+ )
+ }
+
+ /// Decrements an index counter, wrapping at the front edge of the register.
+ ///
+ /// ## Parameters
+ ///
+ /// - `self`: The index to decrement.
+ ///
+ /// ## Returns
+ ///
+ /// - `.0`: The previous index before `self`.
+ /// - `.1`: Indicates whether the new index is in the previous memory
+ /// address.
+ #[inline]
+ pub fn prev(self) -> (Self, bool) {
+ let prev = self.idx.wrapping_sub(1);
+ (
+ unsafe { Self::new_unchecked(prev & R::MASK) },
+ self.idx == 0,
+ )
+ }
+
+ /// Computes the bit position corresponding to `self` under some ordering.
+ ///
+ /// This forwards to [`O::at::<R>`], which is the only public, safe,
+ /// constructor for a position counter.
+ ///
+ /// [`O::at::<R>`]: crate::order::BitOrder::at
+ #[inline]
+ #[cfg(not(tarpaulin_include))]
+ pub fn position<O>(self) -> BitPos<R>
+ where O: BitOrder {
+ O::at::<R>(self)
+ }
+
+ /// Computes the bit selector corresponding to `self` under an ordering.
+ ///
+ /// This forwards to [`O::select::<R>`], which is the only public, safe,
+ /// constructor for a bit selector.
+ ///
+ /// [`O::select::<R>`]: crate::order::BitOrder::select
+ #[inline]
+ #[cfg(not(tarpaulin_include))]
+ pub fn select<O>(self) -> BitSel<R>
+ where O: BitOrder {
+ O::select::<R>(self)
+ }
+
+ /// Computes the bit selector for `self` as an accessor mask.
+ ///
+ /// This is a type-cast over [`Self::select`].
+ ///
+ /// [`Self::select`]: Self::select
+ #[inline]
+ #[cfg(not(tarpaulin_include))]
+ pub fn mask<O>(self) -> BitMask<R>
+ where O: BitOrder {
+ self.select::<O>().mask()
+ }
+
+ /// Iterates over all indices between an inclusive start and exclusive end
+ /// point.
+ ///
+ /// Because implementation details of the range type family, including the
+ /// [`RangeBounds`] trait, are not yet stable, and heterogeneous ranges are
+ /// not supported, this must be an opaque iterator rather than a direct
+ /// [`Range<BitIdx<R>>`].
+ ///
+ /// # Parameters
+ ///
+ /// - `from`: The inclusive low bound of the range. This will be the first
+ /// index produced by the iterator.
+ /// - `upto`: The exclusive high bound of the range. The iterator will halt
+ /// before yielding an index of this value.
+ ///
+ /// # Returns
+ ///
+ /// An opaque iterator that is equivalent to the range `from .. upto`.
+ ///
+ /// # Requirements
+ ///
+ /// `from` must be no greater than `upto`.
+ ///
+ /// [`RangeBounds`]: core::ops::RangeBounds
+ /// [`Range<BitIdx<R>>`]: core::ops::Range
+ #[inline]
+ pub fn range(
+ self,
+ upto: BitEnd<R>,
+ ) -> impl Iterator<Item = Self>
+ + DoubleEndedIterator
+ + ExactSizeIterator
+ + FusedIterator {
+ let (from, upto) = (self.into_inner(), upto.into_inner());
+ debug_assert!(from <= upto, "Ranges must run from low to high");
+ (from .. upto).map(|val| unsafe { Self::new_unchecked(val) })
+ }
+
+ /// Iterates over all possible index values.
+ #[inline]
+ pub fn range_all() -> impl Iterator<Item = Self>
+ + DoubleEndedIterator
+ + ExactSizeIterator
+ + FusedIterator {
+ BitIdx::MIN.range(BitEnd::MAX)
+ }
+
+ /// Computes the jump distance for some number of bits away from a starting
+ /// index.
+ ///
+ /// This computes the number of elements by which to adjust a base pointer,
+ /// and then the bit index of the destination bit in the new referent
+ /// register element.
+ ///
+ /// # Parameters
+ ///
+ /// - `self`: An index within some element, from which the offset is
+ /// computed.
+ /// - `by`: The distance by which to jump. Negative values move lower in the
+ /// index and element-pointer space; positive values move higher.
+ ///
+ /// # Returns
+ ///
+ /// - `.0`: The number of elements `R` by which to adjust a base pointer.
+ /// This value can be passed directly into [`ptr::offset`].
+ /// - `.1`: The index of the destination bit within the destination element.
+ ///
+ /// [`ptr::offset`]: https://doc.rust-lang.org/stable/std/primitive.pointer.html#method.offset
+ pub(crate) fn offset(self, by: isize) -> (isize, Self) {
+ /* Signed-add `self.idx` to the jump distance. This will almost
+ * certainly not wrap (as the crate imposes restrictions well below
+ * `isize::MAX`), but correctness never hurts. The resulting sum is a
+ * bit distance that is then broken into an element distance and final
+ * bit index.
+ */
+ let far = by.wrapping_add(self.into_inner() as isize);
+
+ let (elts, head) = (far >> R::INDX, far as u8 & R::MASK);
+
+ (elts, unsafe { Self::new_unchecked(head) })
+ }
+
+ /// Computes the span information for a region beginning at `self` for `len`
+ /// bits.
+ ///
+ /// The span information is the number of elements in the region that hold
+ /// live bits, and the position of the tail marker after the live bits.
+ ///
+ /// This forwards to [`BitEnd::span`], as the computation is identical for
+ /// the two types. Beginning a span at any `Idx` is equivalent to beginning
+ /// it at the tail of a previous span.
+ ///
+ /// # Parameters
+ ///
+ /// - `self`: The start bit of the span.
+ /// - `len`: The number of bits in the span.
+ ///
+ /// # Returns
+ ///
+ /// - `.0`: The number of elements, starting in the element that contains
+ /// `self`, that contain live bits of the span.
+ /// - `.1`: The tail counter of the span’s end point.
+ ///
+ /// [`BitEnd::span`]: crate::index::BitEnd::span
+ pub(crate) fn span(self, len: usize) -> (usize, BitEnd<R>) {
+ unsafe { BitEnd::<R>::new_unchecked(self.into_inner()) }.span(len)
+ }
+}
+
+impl<R> Binary for BitIdx<R>
+where R: BitRegister
+{
+ #[inline]
+ fn fmt(&self, fmt: &mut Formatter) -> fmt::Result {
+ write!(fmt, "{:0>1$b}", self.idx, R::INDX as usize)
+ }
+}
+
+impl<R> Debug for BitIdx<R>
+where R: BitRegister
+{
+ #[inline]
+ fn fmt(&self, fmt: &mut Formatter) -> fmt::Result {
+ write!(fmt, "BitIdx<{}>({})", any::type_name::<R>(), self)
+ }
+}
+
+impl<R> Display for BitIdx<R>
+where R: BitRegister
+{
+ #[inline]
+ fn fmt(&self, fmt: &mut Formatter) -> fmt::Result {
+ Binary::fmt(self, fmt)
+ }
+}
+
+#[repr(transparent)]
+#[doc = include_str!("../doc/index/BitIdxError.md")]
+#[derive(Clone, Copy, Default, Eq, Hash, Ord, PartialEq, PartialOrd)]
+pub struct BitIdxError<R = usize>
+where R: BitRegister
+{
+ /// The value that is invalid as a [`BitIdx<R>`].
+ ///
+ /// [`BitIdx<R>`]: crate::index::BitIdx
+ err: u8,
+ /// Marker for the register type.
+ _ty: PhantomData<R>,
+}
+
+impl<R> BitIdxError<R>
+where R: BitRegister
+{
+ /// Marks a counter value as invalid to be an index for an `R` register.
+ ///
+ /// ## Parameters
+ ///
+ /// - `err`: The counter value to mark as an error. This must be greater
+ /// than `R::BITS`.
+ ///
+ /// ## Returns
+ ///
+ /// This returns `err`, marked as an invalid index for `R`.
+ ///
+ /// ## Panics
+ ///
+ /// Debug builds panic when `err` is a valid index for `R`.
+ pub(crate) fn new(err: u8) -> Self {
+ debug_assert!(
+ err >= bits_of::<R>() as u8,
+ "Bit index {} is valid for type width {}",
+ err,
+ bits_of::<R>(),
+ );
+ Self {
+ err,
+ _ty: PhantomData,
+ }
+ }
+
+ /// Removes the error wrapper, leaving the internal counter.
+ #[inline]
+ #[cfg(not(tarpaulin_include))]
+ pub fn into_inner(self) -> u8 {
+ self.err
+ }
+}
+
+impl<R> Debug for BitIdxError<R>
+where R: BitRegister
+{
+ #[inline]
+ fn fmt(&self, fmt: &mut Formatter) -> fmt::Result {
+ write!(fmt, "BitIdxError<{}>({})", any::type_name::<R>(), self.err)
+ }
+}
+
+#[cfg(not(tarpaulin_include))]
+impl<R> Display for BitIdxError<R>
+where R: BitRegister
+{
+ #[inline]
+ fn fmt(&self, fmt: &mut Formatter) -> fmt::Result {
+ write!(
+ fmt,
+ "the value {} is too large to index into {} ({} bits wide)",
+ self.err,
+ any::type_name::<R>(),
+ bits_of::<R>(),
+ )
+ }
+}
+
+#[cfg(feature = "std")]
+impl<R> std::error::Error for BitIdxError<R> where R: BitRegister {}
+
+#[repr(transparent)]
+#[doc = include_str!("../doc/index/BitEnd.md")]
+#[derive(Clone, Copy, Default, Eq, Hash, Ord, PartialEq, PartialOrd)]
+pub struct BitEnd<R = usize>
+where R: BitRegister
+{
+ /// Semantic tail counter within or after a register, contained to `0 ..=
+ /// R::BITS`.
+ end: u8,
+ /// Marker for the register type.
+ _ty: PhantomData<R>,
+}
+
+impl<R> BitEnd<R>
+where R: BitRegister
+{
+ /// The inclusive maximum tail within (or after) an `R` element.
+ pub const MAX: Self = Self {
+ end: bits_of::<R>() as u8,
+ _ty: PhantomData,
+ };
+ /// The inclusive minimum tail within (or after) an `R` element.
+ pub const MIN: Self = Self {
+ end: 0,
+ _ty: PhantomData,
+ };
+
+ /// Wraps a counter value as a known-good tail of an `R` register.
+ ///
+ /// ## Parameters
+ ///
+ /// - `end`: The counter value to mark as a tail. This must be in the range
+ /// `0 ..= R::BITS`.
+ ///
+ /// ## Returns
+ ///
+ /// This returns `Some(end)` when it is in the valid range `0 ..= R::BITS`,
+ /// and `None` when it is not.
+ #[inline]
+ pub fn new(end: u8) -> Option<Self> {
+ if end > bits_of::<R>() as u8 {
+ return None;
+ }
+ Some(unsafe { Self::new_unchecked(end) })
+ }
+
+ /// Wraps a counter value as an assumed-good tail of an `R` register.
+ ///
+ /// ## Parameters
+ ///
+ /// - `end`: The counter value to mark as a tail. This must be in the range
+ /// `0 ..= R::BITS`.
+ ///
+ /// ## Returns
+ ///
+ /// This unconditionally marks `end` as a valid tail index.
+ ///
+ /// ## Safety
+ ///
+ /// If the `end` value is outside the valid range, then the program is
+ /// incorrect. Debug builds will panic; release builds do not inspect the
+ /// value or specify a behavior.
+ pub(crate) unsafe fn new_unchecked(end: u8) -> Self {
+ debug_assert!(
+ end <= bits_of::<R>() as u8,
+ "Bit tail {} cannot exceed type width {}",
+ end,
+ bits_of::<R>(),
+ );
+ Self {
+ end,
+ _ty: PhantomData,
+ }
+ }
+
+ /// Removes the tail wrapper, leaving the internal counter.
+ #[inline]
+ #[cfg(not(tarpaulin_include))]
+ pub fn into_inner(self) -> u8 {
+ self.end
+ }
+
+ /// Iterates over all tail indices at and after an inclusive starting point.
+ ///
+ /// Because implementation details of the range type family, including the
+ /// [`RangeBounds`] trait, are not yet stable, and heterogeneous ranges are
+ /// not yet supported, this must be an opaque iterator rather than a direct
+ /// [`Range<BitEnd<R>>`].
+ ///
+ /// # Parameters
+ ///
+ /// - `from`: The inclusive low bound of the range. This will be the first
+ /// tail produced by the iterator.
+ ///
+ /// # Returns
+ ///
+ /// An opaque iterator that is equivalent to the range `from ..=
+ /// Self::MAX`.
+ ///
+ /// [`RangeBounds`]: core::ops::RangeBounds
+ /// [`Range<BitEnd<R>>`]: core::ops::Range
+ #[inline]
+ pub fn range_from(
+ from: BitIdx<R>,
+ ) -> impl Iterator<Item = Self>
+ + DoubleEndedIterator
+ + ExactSizeIterator
+ + FusedIterator {
+ (from.idx ..= Self::MAX.end)
+ .map(|tail| unsafe { BitEnd::new_unchecked(tail) })
+ }
+
+ /// Computes the span information for a region.
+ ///
+ /// The computed span of `len` bits begins at `self` and extends upwards in
+ /// memory. The return value is the number of memory elements that contain
+ /// bits of the span, and the first dead bit after the span.
+ ///
+ /// ## Parameters
+ ///
+ /// - `self`: A dead bit which is used as the first live bit of the new
+ /// span.
+ /// - `len`: The number of live bits in the span starting at `self`.
+ ///
+ /// ## Returns
+ ///
+ /// - `.0`: The number of `R` elements that contain live bits in the
+ /// computed span.
+ /// - `.1`: The dead-bit tail index ending the computed span.
+ ///
+ /// ## Behavior
+ ///
+ /// If `len` is `0`, this returns `(0, self)`, as the span has no live bits.
+ /// If `self` is [`BitEnd::MAX`], then the new region starts at
+ /// [`BitIdx::MIN`] in the next element.
+ ///
+ /// [`BitEnd::MAX`]: Self::MAX
+ /// [`BitIdx::MIN`]: Self::MIN
+ pub(crate) fn span(self, len: usize) -> (usize, Self) {
+ if len == 0 {
+ return (0, self);
+ }
+
+ let head = self.end & R::MASK;
+ let bits_in_head = (bits_of::<R>() as u8 - head) as usize;
+
+ if len <= bits_in_head {
+ return (1, unsafe { Self::new_unchecked(head + len as u8) });
+ }
+
+ let bits_after_head = len - bits_in_head;
+ let elts = bits_after_head >> R::INDX;
+ let tail = bits_after_head as u8 & R::MASK;
+
+ let is_zero = (tail == 0) as u8;
+ let edges = 2 - is_zero as usize;
+ (elts + edges, unsafe {
+ Self::new_unchecked((is_zero << R::INDX) | tail)
+ })
+ }
+}
+
+impl<R> Binary for BitEnd<R>
+where R: BitRegister
+{
+ #[inline]
+ fn fmt(&self, fmt: &mut Formatter) -> fmt::Result {
+ write!(fmt, "{:0>1$b}", self.end, R::INDX as usize + 1)
+ }
+}
+
+impl<R> Debug for BitEnd<R>
+where R: BitRegister
+{
+ #[inline]
+ fn fmt(&self, fmt: &mut Formatter) -> fmt::Result {
+ write!(fmt, "BitEnd<{}>({})", any::type_name::<R>(), self)
+ }
+}
+
+impl<R> Display for BitEnd<R>
+where R: BitRegister
+{
+ #[inline]
+ fn fmt(&self, fmt: &mut Formatter) -> fmt::Result {
+ Binary::fmt(self, fmt)
+ }
+}
+
+#[repr(transparent)]
+#[doc = include_str!("../doc/index/BitPos.md")]
+// #[rustc_layout_scalar_valid_range_end(R::BITS)]
+#[derive(Clone, Copy, Default, Eq, Hash, Ord, PartialEq, PartialOrd)]
+pub struct BitPos<R = usize>
+where R: BitRegister
+{
+ /// Electrical position counter within a register, constrained to `0 ..
+ /// R::BITS`.
+ pos: u8,
+ /// Marker for the register type.
+ _ty: PhantomData<R>,
+}
+
+impl<R> BitPos<R>
+where R: BitRegister
+{
+ /// The position value of the most significant bit in an `R` element.
+ pub const MAX: Self = Self {
+ pos: R::MASK as u8,
+ _ty: PhantomData,
+ };
+ /// The position value of the least significant bit in an `R` element.
+ pub const MIN: Self = Self {
+ pos: 0,
+ _ty: PhantomData,
+ };
+
+ /// Wraps a counter value as a known-good position within an `R` register.
+ ///
+ /// ## Parameters
+ ///
+ /// - `pos`: The counter value to mark as a position. This must be in the
+ /// range `0 .. R::BITS`.
+ ///
+ /// ## Returns
+ ///
+ /// This returns `Some(pos)` when it is in the valid range `0 .. R::BITS`,
+ /// and `None` when it is not.
+ #[inline]
+ pub fn new(pos: u8) -> Option<Self> {
+ if pos >= bits_of::<R>() as u8 {
+ return None;
+ }
+ Some(unsafe { Self::new_unchecked(pos) })
+ }
+
+ /// Wraps a counter value as an assumed-good position within an `R`
+ /// register.
+ ///
+ /// ## Parameters
+ ///
+ /// - `value`: The counter value to mark as a position. This must be in the
+ /// range `0 .. R::BITS`.
+ ///
+ /// ## Returns
+ ///
+ /// This unconditionally marks `pos` as a valid bit-position.
+ ///
+ /// ## Safety
+ ///
+ /// If the `pos` value is outside the valid range, then the program is
+ /// incorrect. Debug builds will panic; release builds do not inspect the
+ /// value or specify a behavior.
+ #[inline]
+ pub unsafe fn new_unchecked(pos: u8) -> Self {
+ debug_assert!(
+ pos < bits_of::<R>() as u8,
+ "Bit position {} cannot exceed type width {}",
+ pos,
+ bits_of::<R>(),
+ );
+ Self {
+ pos,
+ _ty: PhantomData,
+ }
+ }
+
+ /// Removes the position wrapper, leaving the internal counter.
+ #[inline]
+ #[cfg(not(tarpaulin_include))]
+ pub fn into_inner(self) -> u8 {
+ self.pos
+ }
+
+ /// Computes the bit selector corresponding to `self`.
+ ///
+ /// This is always `1 << self.pos`.
+ #[inline]
+ pub fn select(self) -> BitSel<R> {
+ unsafe { BitSel::new_unchecked(R::ONE << self.pos) }
+ }
+
+ /// Computes the bit selector for `self` as an accessor mask.
+ ///
+ /// This is a type-cast over [`Self::select`].
+ ///
+ /// [`Self::select`]: Self::select
+ #[inline]
+ #[cfg(not(tarpaulin_include))]
+ pub fn mask(self) -> BitMask<R> {
+ self.select().mask()
+ }
+
+ /// Iterates over all possible position values.
+ pub(crate) fn range_all() -> impl Iterator<Item = Self>
+ + DoubleEndedIterator
+ + ExactSizeIterator
+ + FusedIterator {
+ BitIdx::<R>::range_all()
+ .map(|idx| unsafe { Self::new_unchecked(idx.into_inner()) })
+ }
+}
+
+impl<R> Binary for BitPos<R>
+where R: BitRegister
+{
+ #[inline]
+ fn fmt(&self, fmt: &mut Formatter) -> fmt::Result {
+ write!(fmt, "{:0>1$b}", self.pos, R::INDX as usize)
+ }
+}
+
+impl<R> Debug for BitPos<R>
+where R: BitRegister
+{
+ #[inline]
+ fn fmt(&self, fmt: &mut Formatter) -> fmt::Result {
+ write!(fmt, "BitPos<{}>({})", any::type_name::<R>(), self)
+ }
+}
+
+impl<R> Display for BitPos<R>
+where R: BitRegister
+{
+ #[inline]
+ fn fmt(&self, fmt: &mut Formatter) -> fmt::Result {
+ Binary::fmt(self, fmt)
+ }
+}
+
+#[repr(transparent)]
+#[doc = include_str!("../doc/index/BitSel.md")]
+// #[rustc_layout_scalar_valid_range_end(R::BITS)]
+#[derive(Clone, Copy, Default, Eq, Hash, Ord, PartialEq, PartialOrd)]
+pub struct BitSel<R = usize>
+where R: BitRegister
+{
+ /// A one-hot selection mask.
+ sel: R,
+}
+
+impl<R> BitSel<R>
+where R: BitRegister
+{
+ /// Wraps a selector value as a known-good selection in an `R` register.
+ ///
+ /// ## Parameters
+ ///
+ /// - `sel`: A one-hot selection mask of a bit in an `R` register.
+ ///
+ /// ## Returns
+ ///
+ /// This returns `Some(sel)` when it is a power of two (exactly one bit set
+ /// and all others cleared), and `None` when it is not.
+ #[inline]
+ pub fn new(sel: R) -> Option<Self> {
+ if sel.count_ones() != 1 {
+ return None;
+ }
+ Some(unsafe { Self::new_unchecked(sel) })
+ }
+
+ /// Wraps a selector value as an assumed-good selection in an `R` register.
+ ///
+ /// ## Parameters
+ ///
+ /// - `sel`: A one-hot selection mask of a bit in an `R` register.
+ ///
+ /// ## Returns
+ ///
+ /// This unconditionally marks `sel` as a one-hot bit selector.
+ ///
+ /// ## Safety
+ ///
+ /// If the `sel` value has zero or multiple bits set, then it is invalid to
+ /// be used as a `BitSel` and the program is incorrect. Debug builds will
+ /// panic; release builds do not inspect the value or specify a behavior.
+ #[inline]
+ pub unsafe fn new_unchecked(sel: R) -> Self {
+ debug_assert!(
+ sel.count_ones() == 1,
+ "Selections are required to have exactly one bit set: {:0>1$b}",
+ sel,
+ bits_of::<R>() as usize,
+ );
+ Self { sel }
+ }
+
+ /// Removes the one-hot selection wrapper, leaving the internal mask.
+ #[inline]
+ #[cfg(not(tarpaulin_include))]
+ pub fn into_inner(self) -> R {
+ self.sel
+ }
+
+ /// Computes a bit-mask for `self`. This is a type-cast.
+ #[inline]
+ #[cfg(not(tarpaulin_include))]
+ pub fn mask(self) -> BitMask<R> {
+ BitMask::new(self.sel)
+ }
+
+ /// Iterates over all possible selector values.
+ #[inline]
+ pub fn range_all() -> impl Iterator<Item = Self>
+ + DoubleEndedIterator
+ + ExactSizeIterator
+ + FusedIterator {
+ BitPos::<R>::range_all().map(BitPos::select)
+ }
+}
+
+impl<R> Binary for BitSel<R>
+where R: BitRegister
+{
+ #[inline]
+ fn fmt(&self, fmt: &mut Formatter) -> fmt::Result {
+ write!(fmt, "{:0>1$b}", self.sel, bits_of::<R>() as usize)
+ }
+}
+
+impl<R> Debug for BitSel<R>
+where R: BitRegister
+{
+ #[inline]
+ fn fmt(&self, fmt: &mut Formatter) -> fmt::Result {
+ write!(fmt, "BitSel<{}>({})", any::type_name::<R>(), self)
+ }
+}
+
+impl<R> Display for BitSel<R>
+where R: BitRegister
+{
+ #[inline]
+ fn fmt(&self, fmt: &mut Formatter) -> fmt::Result {
+ Binary::fmt(self, fmt)
+ }
+}
+
+#[repr(transparent)]
+#[doc = include_str!("../doc/index/BitMask.md")]
+#[derive(Clone, Copy, Default, Eq, Hash, Ord, PartialEq, PartialOrd)]
+pub struct BitMask<R = usize>
+where R: BitRegister
+{
+ /// A mask of any number of bits to select.
+ mask: R,
+}
+
+impl<R> BitMask<R>
+where R: BitRegister
+{
+ /// A full bit-mask with every bit set.
+ pub const ALL: Self = Self { mask: R::ALL };
+ /// An empty bit-mask with every bit cleared.
+ pub const ZERO: Self = Self { mask: R::ZERO };
+
+ /// Wraps any `R` value as a bit-mask.
+ ///
+ /// This constructor is provided to explicitly declare that an operation is
+ /// discarding the numeric value of an integer and instead using it only as
+ /// a bit-mask.
+ ///
+ /// ## Parameters
+ ///
+ /// - `mask`: Some integer to use as a bit-mask.
+ ///
+ /// ## Returns
+ ///
+ /// The `mask` value wrapped as a bit-mask, with its numeric context
+ /// discarded.
+ ///
+ /// Prefer accumulating [`BitSel`] values using its `Sum` implementation.
+ ///
+ /// ## Safety
+ ///
+ /// The `mask` value must be computed from a set of valid bit positions in
+ /// the caller’s context.
+ ///
+ /// [`BitSel`]: crate::index::BitSel
+ #[inline]
+ pub fn new(mask: R) -> Self {
+ Self { mask }
+ }
+
+ /// Removes the mask wrapper, leaving the internal value.
+ #[inline]
+ #[cfg(not(tarpaulin_include))]
+ pub fn into_inner(self) -> R {
+ self.mask
+ }
+
+ /// Tests if a mask contains a given selector bit.
+ ///
+ /// ## Parameters
+ ///
+ /// - `&self`: The mask being tested.
+ /// - `sel`: A selector bit to test in `self`.
+ ///
+ /// ## Returns
+ ///
+ /// Whether `self` has set the bit that `sel` indicates.
+ #[inline]
+ pub fn test(&self, sel: BitSel<R>) -> bool {
+ self.mask & sel.sel != R::ZERO
+ }
+
+ /// Inserts a selector bit into a mask.
+ ///
+ /// ## Parameters
+ ///
+ /// - `&mut self`: The mask being modified.
+ /// - `sel`: A selector bit to insert into `self`.
+ ///
+ /// ## Effects
+ ///
+ /// The `sel` bit is set in the mask.
+ #[inline]
+ pub fn insert(&mut self, sel: BitSel<R>) {
+ self.mask |= sel.sel;
+ }
+
+ /// Creates a new mask with a selector bit activated.
+ ///
+ /// ## Parameters
+ ///
+ /// - `self`: The original mask.
+ /// - `sel`: The selector bit being added into the mask.
+ ///
+ /// ## Returns
+ ///
+ /// A new bit-mask with `sel` activated.
+ #[inline]
+ pub fn combine(self, sel: BitSel<R>) -> Self {
+ Self {
+ mask: self.mask | sel.sel,
+ }
+ }
+}
+
+impl<R> Binary for BitMask<R>
+where R: BitRegister
+{
+ #[inline]
+ fn fmt(&self, fmt: &mut Formatter) -> fmt::Result {
+ write!(fmt, "{:0>1$b}", self.mask, bits_of::<R>() as usize)
+ }
+}
+
+impl<R> Debug for BitMask<R>
+where R: BitRegister
+{
+ #[inline]
+ fn fmt(&self, fmt: &mut Formatter) -> fmt::Result {
+ write!(fmt, "BitMask<{}>({})", any::type_name::<R>(), self)
+ }
+}
+
+impl<R> Display for BitMask<R>
+where R: BitRegister
+{
+ #[inline]
+ fn fmt(&self, fmt: &mut Formatter) -> fmt::Result {
+ Binary::fmt(self, fmt)
+ }
+}
+
+impl<R> Sum<BitSel<R>> for BitMask<R>
+where R: BitRegister
+{
+ #[inline]
+ fn sum<I>(iter: I) -> Self
+ where I: Iterator<Item = BitSel<R>> {
+ iter.fold(Self::ZERO, Self::combine)
+ }
+}
+
+impl<R> BitAnd<R> for BitMask<R>
+where R: BitRegister
+{
+ type Output = Self;
+
+ #[inline]
+ fn bitand(self, rhs: R) -> Self::Output {
+ Self {
+ mask: self.mask & rhs,
+ }
+ }
+}
+
+impl<R> BitOr<R> for BitMask<R>
+where R: BitRegister
+{
+ type Output = Self;
+
+ #[inline]
+ fn bitor(self, rhs: R) -> Self::Output {
+ Self {
+ mask: self.mask | rhs,
+ }
+ }
+}
+
+impl<R> Not for BitMask<R>
+where R: BitRegister
+{
+ type Output = Self;
+
+ #[inline]
+ fn not(self) -> Self::Output {
+ Self { mask: !self.mask }
+ }
+}
+
+#[cfg(test)]
+mod tests {
+ use super::*;
+ use crate::order::Lsb0;
+
+ #[test]
+ fn index_ctors() {
+ for n in 0 .. 8 {
+ assert!(BitIdx::<u8>::new(n).is_ok());
+ }
+ assert!(BitIdx::<u8>::new(8).is_err());
+
+ for n in 0 .. 16 {
+ assert!(BitIdx::<u16>::new(n).is_ok());
+ }
+ assert!(BitIdx::<u16>::new(16).is_err());
+
+ for n in 0 .. 32 {
+ assert!(BitIdx::<u32>::new(n).is_ok());
+ }
+ assert!(BitIdx::<u32>::new(32).is_err());
+
+ #[cfg(target_pointer_width = "64")]
+ {
+ for n in 0 .. 64 {
+ assert!(BitIdx::<u64>::new(n).is_ok());
+ }
+ assert!(BitIdx::<u64>::new(64).is_err());
+ }
+
+ for n in 0 .. bits_of::<usize>() as u8 {
+ assert!(BitIdx::<usize>::new(n).is_ok());
+ }
+ assert!(BitIdx::<usize>::new(bits_of::<usize>() as u8).is_err());
+ }
+
+ #[test]
+ fn tail_ctors() {
+ for n in 0 ..= 8 {
+ assert!(BitEnd::<u8>::new(n).is_some());
+ }
+ assert!(BitEnd::<u8>::new(9).is_none());
+
+ for n in 0 ..= 16 {
+ assert!(BitEnd::<u16>::new(n).is_some());
+ }
+ assert!(BitEnd::<u16>::new(17).is_none());
+
+ for n in 0 ..= 32 {
+ assert!(BitEnd::<u32>::new(n).is_some());
+ }
+ assert!(BitEnd::<u32>::new(33).is_none());
+
+ #[cfg(target_pointer_width = "64")]
+ {
+ for n in 0 ..= 64 {
+ assert!(BitEnd::<u64>::new(n).is_some());
+ }
+ assert!(BitEnd::<u64>::new(65).is_none());
+ }
+
+ for n in 0 ..= bits_of::<usize>() as u8 {
+ assert!(BitEnd::<usize>::new(n).is_some());
+ }
+ assert!(BitEnd::<usize>::new(bits_of::<usize>() as u8 + 1).is_none());
+ }
+
+ #[test]
+ fn position_ctors() {
+ for n in 0 .. 8 {
+ assert!(BitPos::<u8>::new(n).is_some());
+ }
+ assert!(BitPos::<u8>::new(8).is_none());
+
+ for n in 0 .. 16 {
+ assert!(BitPos::<u16>::new(n).is_some());
+ }
+ assert!(BitPos::<u16>::new(16).is_none());
+
+ for n in 0 .. 32 {
+ assert!(BitPos::<u32>::new(n).is_some());
+ }
+ assert!(BitPos::<u32>::new(32).is_none());
+
+ #[cfg(target_pointer_width = "64")]
+ {
+ for n in 0 .. 64 {
+ assert!(BitPos::<u64>::new(n).is_some());
+ }
+ assert!(BitPos::<u64>::new(64).is_none());
+ }
+
+ for n in 0 .. bits_of::<usize>() as u8 {
+ assert!(BitPos::<usize>::new(n).is_some());
+ }
+ assert!(BitPos::<usize>::new(bits_of::<usize>() as u8).is_none());
+ }
+
+ #[test]
+ fn select_ctors() {
+ for n in 0 .. 8 {
+ assert!(BitSel::<u8>::new(1 << n).is_some());
+ }
+ assert!(BitSel::<u8>::new(0).is_none());
+ assert!(BitSel::<u8>::new(3).is_none());
+
+ for n in 0 .. 16 {
+ assert!(BitSel::<u16>::new(1 << n).is_some());
+ }
+ assert!(BitSel::<u16>::new(0).is_none());
+ assert!(BitSel::<u16>::new(3).is_none());
+
+ for n in 0 .. 32 {
+ assert!(BitSel::<u32>::new(1 << n).is_some());
+ }
+ assert!(BitSel::<u32>::new(0).is_none());
+ assert!(BitSel::<u32>::new(3).is_none());
+
+ #[cfg(target_pointer_width = "64")]
+ {
+ for n in 0 .. 64 {
+ assert!(BitSel::<u64>::new(1 << n).is_some());
+ }
+ assert!(BitSel::<u64>::new(0).is_none());
+ assert!(BitSel::<u64>::new(3).is_none());
+ }
+
+ for n in 0 .. bits_of::<usize>() as u8 {
+ assert!(BitSel::<usize>::new(1 << n).is_some());
+ }
+ assert!(BitSel::<usize>::new(0).is_none());
+ assert!(BitSel::<usize>::new(3).is_none());
+ }
+
+ #[test]
+ fn ranges() {
+ let mut range = BitIdx::<u16>::range_all();
+ assert_eq!(range.next(), BitIdx::new(0).ok());
+ assert_eq!(range.next_back(), BitIdx::new(15).ok());
+ assert_eq!(range.count(), 14);
+
+ let mut range = BitEnd::<u8>::range_from(BitIdx::new(1).unwrap());
+ assert_eq!(range.next(), BitEnd::new(1));
+ assert_eq!(range.next_back(), BitEnd::new(8));
+ assert_eq!(range.count(), 6);
+
+ let mut range = BitPos::<u8>::range_all();
+ assert_eq!(range.next(), BitPos::new(0));
+ assert_eq!(range.next_back(), BitPos::new(7));
+ assert_eq!(range.count(), 6);
+
+ let mut range = BitSel::<u8>::range_all();
+ assert_eq!(range.next(), BitSel::new(1));
+ assert_eq!(range.next_back(), BitSel::new(128));
+ assert_eq!(range.count(), 6);
+ }
+
+ #[test]
+ fn index_cycle() {
+ let six = BitIdx::<u8>::new(6).unwrap();
+ let (seven, step) = six.next();
+ assert_eq!(seven, BitIdx::new(7).unwrap());
+ assert!(!step);
+
+ let (zero, step) = seven.next();
+ assert_eq!(zero, BitIdx::MIN);
+ assert!(step);
+
+ let (seven, step) = zero.prev();
+ assert_eq!(seven, BitIdx::new(7).unwrap());
+ assert!(step);
+
+ let (six, step) = seven.prev();
+ assert_eq!(six, BitIdx::new(6).unwrap());
+ assert!(!step);
+
+ let fourteen = BitIdx::<u16>::new(14).unwrap();
+ let (fifteen, step) = fourteen.next();
+ assert_eq!(fifteen, BitIdx::new(15).unwrap());
+ assert!(!step);
+ let (zero, step) = fifteen.next();
+ assert_eq!(zero, BitIdx::MIN);
+ assert!(step);
+ let (fifteen, step) = zero.prev();
+ assert_eq!(fifteen, BitIdx::new(15).unwrap());
+ assert!(step);
+ let (fourteen, step) = fifteen.prev();
+ assert_eq!(fourteen, BitIdx::new(14).unwrap());
+ assert!(!step);
+ }
+
+ #[test]
+ fn jumps() {
+ let (jump, head) = BitIdx::<u8>::new(1).unwrap().offset(2);
+ assert_eq!(jump, 0);
+ assert_eq!(head, BitIdx::new(3).unwrap());
+
+ let (jump, head) = BitIdx::<u8>::MAX.offset(1);
+ assert_eq!(jump, 1);
+ assert_eq!(head, BitIdx::MIN);
+
+ let (jump, head) = BitIdx::<u16>::new(10).unwrap().offset(40);
+ // 10 is in 0..16; 10+40 is in 48..64
+ assert_eq!(jump, 3);
+ assert_eq!(head, BitIdx::new(2).unwrap());
+
+ // .offset() wraps at the `isize` boundary
+ let (jump, head) = BitIdx::<u8>::MAX.offset(isize::MAX);
+ assert_eq!(jump, -(((isize::MAX as usize + 1) >> 3) as isize));
+ assert_eq!(head, BitIdx::MAX.prev().0);
+
+ let (elts, tail) = BitIdx::<u8>::new(4).unwrap().span(0);
+ assert_eq!(elts, 0);
+ assert_eq!(tail, BitEnd::new(4).unwrap());
+
+ let (elts, tail) = BitIdx::<u8>::new(3).unwrap().span(3);
+ assert_eq!(elts, 1);
+ assert_eq!(tail, BitEnd::new(6).unwrap());
+
+ let (elts, tail) = BitIdx::<u16>::new(10).unwrap().span(40);
+ assert_eq!(elts, 4);
+ assert_eq!(tail, BitEnd::new(2).unwrap());
+ }
+
+ #[test]
+ fn mask_operators() {
+ let mut mask = BitIdx::<u8>::new(2)
+ .unwrap()
+ .range(BitEnd::new(5).unwrap())
+ .map(BitIdx::select::<Lsb0>)
+ .sum::<BitMask<u8>>();
+ assert_eq!(mask, BitMask::new(28));
+ assert_eq!(mask & 25, BitMask::new(24));
+ assert_eq!(mask | 32, BitMask::new(60));
+ assert_eq!(!mask, BitMask::new(!28));
+ let yes = BitSel::<u8>::new(16).unwrap();
+ let no = BitSel::<u8>::new(64).unwrap();
+ assert!(mask.test(yes));
+ assert!(!mask.test(no));
+ mask.insert(no);
+ assert!(mask.test(no));
+ }
+
+ #[test]
+ #[cfg(feature = "alloc")]
+ fn render() {
+ #[cfg(not(feature = "std"))]
+ use alloc::format;
+
+ assert_eq!(format!("{:?}", BitIdx::<u8>::MAX), "BitIdx<u8>(111)");
+ assert_eq!(format!("{:?}", BitIdx::<u16>::MAX), "BitIdx<u16>(1111)");
+ assert_eq!(format!("{:?}", BitIdx::<u32>::MAX), "BitIdx<u32>(11111)");
+
+ assert_eq!(
+ format!("{:?}", BitIdx::<u8>::new(8).unwrap_err()),
+ "BitIdxError<u8>(8)"
+ );
+ assert_eq!(
+ format!("{:?}", BitIdx::<u16>::new(16).unwrap_err()),
+ "BitIdxError<u16>(16)"
+ );
+ assert_eq!(
+ format!("{:?}", BitIdx::<u32>::new(32).unwrap_err()),
+ "BitIdxError<u32>(32)"
+ );
+
+ assert_eq!(format!("{:?}", BitEnd::<u8>::MAX), "BitEnd<u8>(1000)");
+ assert_eq!(format!("{:?}", BitEnd::<u16>::MAX), "BitEnd<u16>(10000)");
+ assert_eq!(format!("{:?}", BitEnd::<u32>::MAX), "BitEnd<u32>(100000)");
+
+ assert_eq!(format!("{:?}", BitPos::<u8>::MAX), "BitPos<u8>(111)");
+ assert_eq!(format!("{:?}", BitPos::<u16>::MAX), "BitPos<u16>(1111)");
+ assert_eq!(format!("{:?}", BitPos::<u32>::MAX), "BitPos<u32>(11111)");
+
+ assert_eq!(
+ format!("{:?}", BitSel::<u8>::new(1).unwrap()),
+ "BitSel<u8>(00000001)",
+ );
+ assert_eq!(
+ format!("{:?}", BitSel::<u16>::new(1).unwrap()),
+ "BitSel<u16>(0000000000000001)",
+ );
+ assert_eq!(
+ format!("{:?}", BitSel::<u32>::new(1).unwrap()),
+ "BitSel<u32>(00000000000000000000000000000001)",
+ );
+
+ assert_eq!(
+ format!("{:?}", BitMask::<u8>::new(1 | 4 | 32)),
+ "BitMask<u8>(00100101)",
+ );
+ assert_eq!(
+ format!("{:?}", BitMask::<u16>::new(1 | 4 | 32)),
+ "BitMask<u16>(0000000000100101)",
+ );
+ assert_eq!(
+ format!("{:?}", BitMask::<u32>::new(1 | 4 | 32)),
+ "BitMask<u32>(00000000000000000000000000100101)",
+ );
+
+ #[cfg(target_pointer_width = "64")]
+ {
+ assert_eq!(
+ format!("{:?}", BitIdx::<u64>::MAX),
+ "BitIdx<u64>(111111)",
+ );
+ assert_eq!(
+ format!("{:?}", BitIdx::<u64>::new(64).unwrap_err()),
+ "BitIdxError<u64>(64)",
+ );
+ assert_eq!(
+ format!("{:?}", BitEnd::<u64>::MAX),
+ "BitEnd<u64>(1000000)",
+ );
+ assert_eq!(
+ format!("{:?}", BitPos::<u64>::MAX),
+ "BitPos<u64>(111111)",
+ );
+ assert_eq!(
+ format!("{:?}", BitSel::<u64>::new(1).unwrap()),
+ "BitSel<u64>(0000000000000000000000000000000000000000000000000000000000000001)",
+ );
+ assert_eq!(
+ format!("{:?}", BitMask::<u64>::new(1 | 4 | 32)),
+ "BitMask<u64>(0000000000000000000000000000000000000000000000000000000000100101)",
+ );
+ }
+ }
+}
diff --git a/src/lib.rs b/src/lib.rs
new file mode 100644
index 0000000..462ec46
--- /dev/null
+++ b/src/lib.rs
@@ -0,0 +1,77 @@
+#![doc = include_str!("../README.md")]
+#![cfg_attr(not(feature = "std"), no_std)]
+#![cfg_attr(
+ debug_assertions,
+ warn(missing_docs, clippy::missing_docs_in_private_items)
+)]
+#![cfg_attr(
+ not(debug_assertions),
+ deny(missing_docs, clippy::missing_docs_in_private_items)
+)]
+#![deny(unconditional_recursion)]
+#![allow(
+ clippy::declare_interior_mutable_const,
+ clippy::type_complexity,
+ unknown_lints
+)]
+
+#[cfg(feature = "alloc")]
+extern crate alloc;
+
+#[macro_use]
+mod devel;
+
+#[macro_use]
+pub mod macros;
+
+pub mod access;
+pub mod array;
+pub mod boxed;
+pub mod domain;
+pub mod field;
+pub mod index;
+pub mod mem;
+pub mod order;
+pub mod ptr;
+mod serdes;
+pub mod slice;
+pub mod store;
+pub mod vec;
+pub mod view;
+
+#[doc = include_str!("../doc/prelude.md")]
+pub mod prelude {
+ pub use crate::{
+ array::BitArray,
+ bitarr,
+ bits,
+ field::BitField as _,
+ order::{
+ BitOrder,
+ LocalBits,
+ Lsb0,
+ Msb0,
+ },
+ ptr::{
+ BitPtr,
+ BitPtrRange,
+ BitRef,
+ },
+ slice::BitSlice,
+ store::BitStore,
+ view::{
+ AsBits,
+ AsMutBits,
+ BitView as _,
+ BitViewSized as _,
+ },
+ BitArr,
+ };
+ #[cfg(feature = "alloc")]
+ pub use crate::{
+ bitbox,
+ bitvec,
+ boxed::BitBox,
+ vec::BitVec,
+ };
+}
diff --git a/src/macros.rs b/src/macros.rs
new file mode 100644
index 0000000..04c5198
--- /dev/null
+++ b/src/macros.rs
@@ -0,0 +1,365 @@
+#![allow(deprecated)]
+#![doc = include_str!("../doc/macros.md")]
+
+#[macro_use]
+#[doc(hidden)]
+pub mod internal;
+
+mod tests;
+
+#[macro_export]
+#[doc = include_str!("../doc/macros/BitArr_type.md")]
+macro_rules! BitArr {
+ (for $len:expr, in $store:ty, $order:ty $(,)?) => {
+ $crate::array::BitArray::<
+ [$store; $crate::mem::elts::<$store>($len)], $order
+ >
+ };
+
+ (for $len:expr, in $store:ty $(,)?) => {
+ $crate::BitArr!(for $len, in $store, $crate::order::Lsb0)
+ };
+
+ (for $len:expr) => {
+ $crate::BitArr!(for $len, in usize)
+ };
+}
+
+#[macro_export]
+#[doc = include_str!("../doc/macros/bitarr_value.md")]
+macro_rules! bitarr {
+ /* `const`-expression constructors.
+ *
+ * These arms expand to expressions which are guaranteed to be valid in
+ * `const` position: initializing `static` or `const`, or arguments to
+ * `const fn`.
+ *
+ * > Other arms *may* be valid in `const`s, but do not guarantee it.
+ *
+ * They are more restricted than the general variants below, because the
+ * trait system is not yet usable in `const` contexts and thus these
+ * expansions can only use codepaths defined in this module, and cannot use
+ * the rest of `bitvec`’s systems.
+ *
+ * All valid invocations with a leading `const` will remain valid if the
+ * `const` is removed, though their expansion may change to no longer be
+ * valid in `const` contexts.
+ */
+
+ // Bit-sequencing requires detecting `Cell` separately from other types.
+ // See below.
+
+ (const Cell<$store:ident>, $order:ident; $($val:expr),* $(,)?) => {{
+ const ELTS: usize = $crate::__count_elts!($store; $($val),*);
+ type Data = [Cell<$store>; ELTS];
+ const DATA: Data = $crate::__encode_bits!(Cell<$store>, $order; $($val),*);
+
+ type This = $crate::array::BitArray<Data, $order>;
+ This { data: DATA, ..This::ZERO }
+ }};
+ (const $store:ident, $order:ident; $($val:expr),* $(,)?) => {{
+ const ELTS: usize = $crate::__count_elts!($store; $($val),*);
+ type Data = [$store; ELTS];
+ const DATA: Data = $crate::__encode_bits!($store, $order; $($val),*);
+
+ type This = $crate::array::BitArray<Data, $order>;
+ This { data: DATA, ..This::ZERO }
+ }};
+
+ // Bit-repetition is agnostic to types, so it only needs two arms.
+
+ (const $store:ty, $order:ty; $val:expr; $len:expr) => {{
+ use $crate::macros::internal::core;
+ type Mem = <$store as $crate::store::BitStore>::Mem;
+
+ const ELTS: usize = $crate::mem::elts::<$store>($len);
+ const ELEM: Mem = $crate::__extend_bool!($val, $store);
+ const DATA: [Mem; ELTS] = [ELEM; ELTS];
+
+ type This = $crate::array::BitArray<[$store; ELTS], $order>;
+ unsafe { core::mem::transmute::<_, This>(DATA) }
+ }};
+ (const $val:expr; $len:expr) => {{
+ $crate::bitarr!(const usize, $crate::order::Lsb0; $val; $len)
+ }};
+
+ (const $($val:expr),* $(,)?) => {{
+ $crate::bitarr!(const usize, Lsb0; $($val),*)
+ }};
+
+ /* Non-`const` constructors.
+ *
+ * These expansions are allowed to produce code that does not run in `const`
+ * contexts. While it is *likely* that the expansions will be evaluated at
+ * compile-time, they won’t do so while the `const` engine is active.
+ */
+
+ /* Bit-sequence encoding.
+ *
+ * This requires four arms to the `const` section’s one, because of how both
+ * the ordering and storage arguments may be provided. As macros operate
+ * syntactically, before the type system begins, they have to accept any
+ * syntax that could later be accepted as the name of a satisfying type.
+ *
+ * The `$order:ident` matcher uses the fact that `:ident` matches remain
+ * matchable across deeper macro invocations, so that the bottom of the
+ * macro stack can detect the magic tokens `LocalBits`, `Lsb0`, and `Msb0`,
+ * and operate accordingly. The `$order:path` matcher is always opaque, and
+ * serves as a fallback for complex type-names.
+ *
+ * `Cell<$store>` uses literal detection to extract the interior type width.
+ * This cannot be done by `:ty` or `:path`, as these are opaque, and
+ * `:ident` does not match `Cell<_>`.
+ */
+
+ (Cell<$store:ident>, $order:ident; $($val:expr),* $(,)?) => {{
+ use $crate::macros::internal::core;
+ type Celled = core::cell::Cell<$store>;
+
+ const ELTS: usize = $crate::__count_elts!($store; $($val),*);
+ type Data = [Celled; ELTS];
+ type This = $crate::array::BitArray<Data, $order>;
+
+ This::new($crate::__encode_bits!(Cell<$store>, $order; $($val),*))
+ }};
+ (Cell<$store:ident>, $order:path; $($val:expr),* $(,)?) => {{
+ use $crate::macros::internal::core;
+ type Celled = core::cell::Cell<$store>;
+
+ const ELTS: usize = $crate::__count_elts!($store; $($val),*);
+ type This = $crate::array::BitArray<[Celled; ELTS], $order>;
+
+ This::new($crate::__encode_bits!(Cell<$store>, $order; $($val),*))
+ }};
+
+ ($store:ident, $order:ident; $($val:expr),* $(,)?) => {{
+ const ELTS: usize = $crate::__count_elts!($store; $($val),*);
+ type This = $crate::array::BitArray<[$store; ELTS], $order>;
+
+ This::new($crate::__encode_bits!($store, $order; $($val),*))
+ }};
+ ($store:ident, $order:path; $($val:expr),* $(,)?) => {{
+ const ELTS: usize = $crate::__count_elts!($store; $($val),*);
+ type This = $crate::array::BitArray<[$store; ELTS], $order>;
+
+ This::new($crate::__encode_bits!($store, $order; $($val),*))
+ }};
+
+
+ ($store:ty, $order:ty; $val:expr; $len:expr) => {{
+ $crate::bitarr!(const $store, $order; $val; $len)
+ }};
+ ($val:expr; $len:expr) => {{
+ $crate::bitarr!(const $val; $len)
+ }};
+ ($($val:expr),* $(,)?) => {
+ $crate::bitarr!(usize, Lsb0; $($val),*)
+ };
+}
+
+#[macro_export]
+#[doc = include_str!("../doc/macros/bits.md")]
+macro_rules! bits {
+ /* `&'static` constructors.
+ *
+ * Like the `bitarr!(const …)` arms, these arms must expand to code that is
+ * valid in `const` contexts. As such, they can only accept `$order`
+ * arguments that are one of the `LocalBits`, `Lsb0`, or `Msb0` literals.
+ * Once the underlying `static BitArray` is created,
+ */
+ (static mut Cell<$store:ident>, $order:ty; $val:expr; $len:expr) => {{
+ use $crate::macros::internal::core;
+ type Celled = core::cell::Cell<$store>;
+ static mut DATA: $crate::BitArr!(for $len, in Celled, $order) =
+ $crate::bitarr!(const Cell<$store>, $order; $val; $len);
+ &mut DATA[.. $len]
+ }};
+ (static mut $store:ident, $order:ident; $val:expr; $len:expr) => {{
+ static mut DATA: $crate::BitArr!(for $len, in $store, $order) =
+ $crate::bitarr!(const $store, $order; $val; $len);
+ DATA.get_unchecked_mut(.. $len)
+ }};
+
+ (static mut Cell<$store:ident>, $order:ident; $($val:expr),* $(,)?) => {{
+ use $crate::macros::internal::core;
+ type Celled = core::cell::Cell<$store>;
+ const BITS: usize = $crate::__count!($($val),*);
+
+ static mut DATA: $crate::BitArr!(for BITS, in $store, $order) =
+ $crate::bitarr!(const $store, $order; $($val),*);
+ &mut *(
+ DATA.get_unchecked_mut(.. BITS)
+ as *mut $crate::slice::BitSlice<$store, $order>
+ as *mut $crate::slice::BitSlice<Celled, $order>
+ )
+ }};
+ (static mut $store:ident, $order:ident; $($val:expr),* $(,)?) => {{
+ const BITS: usize = $crate::__count!($($val),*);
+ static mut DATA: $crate::BitArr!(for BITS, in $store, $order) =
+ $crate::bitarr!(const $store, $order; $($val),*);
+ DATA.get_unchecked_mut(.. BITS)
+ }};
+
+ (static mut $val:expr; $len:expr) => {{
+ static mut DATA: $crate::BitArr!(for $len) =
+ $crate::bitarr!(const usize, $crate::order::Lsb0; $val; $len);
+ DATA.get_unchecked_mut(.. $len)
+ }};
+ (static mut $($val:expr),* $(,)?) => {{
+ $crate::bits!(static mut usize, Lsb0; $($val),*)
+ }};
+
+ (static Cell<$store:ident>, $order:ty; $val:expr; $len:expr) => {{
+ use $crate::macros::internal::core;
+ type Celled = core::cell::Cell<$store>;
+ static DATA: $crate::BitArr!(for $len, in $store, $order) =
+ $crate::bitarr!(const $store, $order; $val; $len);
+ unsafe {
+ &*(
+ DATA.get_unchecked(.. $len)
+ as *const $crate::slice::BitSlice<$store, $order>
+ as *const $crate::slice::BitSlice<Celled, $order>
+ )
+ }
+ }};
+ (static Cell<$store:ident>, $order:ident; $($val:expr),* $(,)?) => {{
+ use $crate::macros::internal::core;
+ type Celled = core::cell::Cell<$store>;
+ const BITS: usize = $crate::__count!($($val),*);
+
+ static DATA: $crate::BitArr!(for BITS, in $store, $order) =
+ $crate::bitarr!(const $store, $order; $($val),*);
+ unsafe {
+ &*(
+ DATA.get_unchecked(.. BITS)
+ as *const $crate::slice::BitSlice<$store, $order>
+ as *const $crate::slice::BitSlice<Celled, $order>
+ )
+ }
+ }};
+
+ (static $store:ident, $order:ident; $val:expr; $len:expr) => {{
+ static DATA: $crate::BitArr!(for $len, in $store, $order) =
+ $crate::bitarr!(const $store, $order; $val; $len);
+ unsafe { DATA.get_unchecked(.. $len) }
+ }};
+ (static $val:expr; $len:expr) => {{
+ static DATA: $crate::BitArr!(for $len) =
+ $crate::bitarr!(const usize, $crate::order::Lsb0; $val; $len);
+ unsafe { DATA.get_unchecked(.. $len) }
+ }};
+
+ (static $store:ident, $order:ident; $($val:expr),* $(,)?) => {{
+ const BITS: usize = $crate::__count!($($val),*);
+ static DATA: $crate::BitArr!(for BITS, in $store, $order) =
+ $crate::bitarr!(const $store, $order; $($val),*);
+ unsafe { DATA.get_unchecked(.. BITS) }
+ }};
+ (static $($val:expr),* $(,)?) => {{
+ $crate::bits!(static usize, Lsb0; $($val),*)
+ }};
+
+ // Repetition syntax `[bit ; count]`.
+ // NOTE: `count` must be a `const`, as this is a non-allocating macro.
+
+ // Sequence syntax `[bit (, bit)*]` or `[(bit ,)*]`.
+
+ // Explicit order and store.
+
+ (mut Cell<$store:ident>, $order:ident; $($val:expr),* $(,)?) => {{
+ const BITS: usize = $crate::__count!($($val),*);
+ &mut $crate::bitarr!(Cell<$store>, $order; $($val),*)[.. BITS]
+ }};
+ (mut Cell<$store:ident>, $order:path; $($val:expr),* $(,)?) => {{
+ const BITS: usize = $crate::__count!($($val),*);
+ &mut $crate::bitarr!(Cell<$store>, $order; $($val),*)[.. BITS]
+ }};
+
+ (mut $store:ident, $order:ident; $($val:expr),* $(,)?) => {{
+ const BITS: usize = $crate::__count!($($val),*);
+ &mut $crate::bitarr!($store, $order; $($val),*)[.. BITS]
+ }};
+ (mut $store:ident, $order:path; $($val:expr),* $(,)?) => {{
+ const BITS: usize = $crate::__count!($($val),*);
+ &mut $crate::bitarr!($store, $order; $($val),*)[.. BITS]
+ }};
+
+ // Explicit order and store.
+ (mut $store:ty, $order:ty; $val:expr; $len:expr) => {{
+ &mut $crate::bitarr!($store, $order; $val; $len)[.. $len]
+ }};
+ // Default order and store.
+ (mut $val:expr; $len:expr) => {
+ $crate::bits!(mut usize, $crate::order::Lsb0; $val; $len)
+ };
+
+ // Default order and store.
+ (mut $($val:expr),* $(,)?) => {
+ $crate::bits!(mut usize, Lsb0; $($val),*)
+ };
+
+ // Repeat everything from above, but now immutable.
+
+ ($store:ty, $order:ty; $val:expr; $len:expr) => {{
+ &$crate::bitarr!($store, $order; $val; $len)[.. $len]
+ }};
+
+ (Cell<$store:ident>, $order:ident; $($val:expr),* $(,)?) => {{
+ const BITS: usize = $crate::__count!($($val),*);
+ &$crate::bitarr!(Cell<$store>, $order; $($val),*)[.. BITS]
+ }};
+ ($store:ident, $order:ident; $($val:expr),* $(,)?) => {{
+ const BITS: usize = $crate::__count!($($val),*);
+ &$crate::bitarr!($store, $order; $($val),*)[.. BITS]
+ }};
+
+ (Cell<$store:ident>, $order:path; $($val:expr),* $(,)?) => {{
+ const BITS: usize = $crate::__count!($($val),*);
+ &$crate::bitarr!(Cell<$store>, $order; $($val),*)[.. BITS]
+ }};
+ ($store:ident, $order:path; $($val:expr),* $(,)?) => {{
+ const BITS: usize = $crate::__count!($($val),*);
+ &$crate::bitarr!($store, $order; $($val),*)[.. BITS]
+ }};
+
+ // Default order and store.
+ ($val:expr; $len:expr) => {
+ $crate::bits!(usize, $crate::order::Lsb0; $val; $len)
+ };
+ ($($val:expr),* $(,)?) => {
+ $crate::bits!(usize, Lsb0; $($val),*)
+ };
+}
+
+#[macro_export]
+#[cfg(feature = "alloc")]
+#[doc = include_str!("../doc/macros/bitvec.md")]
+macro_rules! bitvec {
+ // First, capture the repetition syntax, as it is permitted to use runtime
+ // values for the repetition count.
+ ($store:ty, $order:ty; $val:expr; $len:expr) => {
+ $crate::vec::BitVec::<$store, $order>::repeat($val != 0, $len)
+ };
+ // Capture `Cell<T>` patterns and prevent them from being parsed as
+ // comparisons. Guess we didn't escape Most Vexing Parse after all.
+ (Cell<$store:ident>, $order:ident $($rest:tt)*) => {
+ $crate::vec::BitVec::from_bitslice($crate::bits!(Cell<$store>, $order $($rest)*))
+ };
+ ($val:expr; $len:expr) => {
+ $crate::bitvec!(usize, $crate::order::Lsb0; $val; $len)
+ };
+
+ // Delegate all others to the `bits!` macro.
+ ($($arg:tt)*) => {
+ $crate::vec::BitVec::from_bitslice($crate::bits!($($arg)*))
+ };
+}
+
+#[macro_export]
+#[cfg(feature = "alloc")]
+#[doc = include_str!("../doc/macros/bitbox.md")]
+macro_rules! bitbox {
+ ($($arg:tt)*) => {
+ $crate::bitvec!($($arg)*).into_boxed_bitslice()
+ };
+}
diff --git a/src/macros/internal.rs b/src/macros/internal.rs
new file mode 100644
index 0000000..e7ba840
--- /dev/null
+++ b/src/macros/internal.rs
@@ -0,0 +1,414 @@
+#![doc(hidden)]
+#![doc = include_str!("../../doc/macros/internal.md")]
+
+// Provide known mount-points of dependency crates.
+
+#[doc(hidden)]
+pub use core;
+
+#[doc(hidden)]
+pub use funty;
+
+#[doc(hidden)]
+#[macro_export]
+#[doc = include_str!("../../doc/macros/encode_bits.md")]
+macro_rules! __encode_bits {
+ /* ENTRY POINTS
+ *
+ * These arms match the syntax provided by the public macros, and dispatch
+ * by storage type width.
+ */
+
+ (u8, $ord:tt; $($val:expr),*) => {
+ $crate::__encode_bits!(u8 as u8, $ord; $($val),*)
+ };
+ (Cell<u8>, $ord:tt; $($val:expr),*) => {
+ $crate::__encode_bits!(Cell<u8> as u8, $ord; $($val),*)
+ };
+ (AtomicU8, $ord:tt; $($val:expr),*) => {
+ $crate::__encode_bits!(AtomicU8 as u8, $ord; $($val),*)
+ };
+ (RadiumU8, $ord:tt; $($val:expr),*) => {
+ $crate::__encode_bits!(RadiumU8 as u8, $ord; $($val),*)
+ };
+
+ (u16, $ord:tt; $($val:expr),*) => {
+ $crate::__encode_bits!(u16 as u16, $ord; $($val),*)
+ };
+ (Cell<u16>, $ord:tt; $($val:expr),*) => {
+ $crate::__encode_bits!(Cell<u16> as u16, $ord; $($val),*)
+ };
+ (AtomicU16, $ord:tt; $($val:expr),*) => {
+ $crate::__encode_bits!(AtomicU16 as u16, $ord; $($val),*)
+ };
+ (RadiumU16, $ord:tt; $($val:expr),*) => {
+ $crate::__encode_bits!(RadiumU16 as u16, $ord; $($val),*)
+ };
+
+ (u32, $ord:tt; $($val:expr),*) => {
+ $crate::__encode_bits!(u32 as u32, $ord; $($val),*)
+ };
+ (Cell<u32>, $ord:tt; $($val:expr),*) => {
+ $crate::__encode_bits!(Cell<u32> as u32, $ord; $($val),*)
+ };
+ (AtomicU32, $ord:tt; $($val:expr),*) => {
+ $crate::__encode_bits!(AtomicU32 as u32, $ord; $($val),*)
+ };
+ (RadiumU32, $ord:tt; $($val:expr),*) => {
+ $crate::__encode_bits!(RadiumU32 as u32, $ord; $($val),*)
+ };
+
+ (u64, $ord:tt; $($val:expr),*) => {
+ $crate::__encode_bits!(u64 as u64, $ord; $($val),*)
+ };
+ (Cell<u64>, $ord:tt; $($val:expr),*) => {
+ $crate::__encode_bits!(Cell<u64> as u64, $ord; $($val),*)
+ };
+ (AtomicU64, $ord:tt; $($val:expr),*) => {
+ $crate::__encode_bits!(AtomicU64 as u64, $ord; $($val),*)
+ };
+ (RadiumU64, $ord:tt; $($val:expr),*) => {
+ $crate::__encode_bits!(RadiumU64 as u64, $ord; $($val),*)
+ };
+
+ (usize, $ord:tt; $($val:expr),*) => {
+ $crate::__encode_bits!(usize as usize, $ord; $($val),*)
+ };
+ (Cell<usize>, $ord:tt; $($val:expr),*) => {
+ $crate::__encode_bits!(Cell<usize> as usize, $ord; $($val),*)
+ };
+ (AtomicUsize, $ord:tt; $($val:expr),*) => {
+ $crate::__encode_bits!(AtomicUsize as usize, $ord; $($val),*)
+ };
+ (RadiumUsize, $ord:tt; $($val:expr),*) => {
+ $crate::__encode_bits!(RadiumUsize as usize, $ord; $($val),*)
+ };
+
+ // This arm routes `usize` into `u32` or `u64`, depending on target, and
+ // marks them to return to `usize` after chunking.
+ ($typ:ty as usize, $ord:tt; $($val:expr),*) => {{
+ const LEN: usize = $crate::__count_elts!(usize; $($val),*);
+
+ let out: [$typ; LEN];
+
+ #[cfg(target_pointer_width = "32")]
+ {
+ out = $crate::__encode_bits!($typ as u32 as usize, $ord; $($val),*);
+ }
+
+ #[cfg(target_pointer_width = "64")]
+ {
+ out = $crate::__encode_bits!($typ as u64 as usize, $ord; $($val),*);
+ }
+
+ out
+ }};
+
+ // ZERO EXTENSION: Supply literal `0, ` tokens to ensure that elements can
+ // be completely filled with bits.
+ ($typ:ty as $uint:ident $(as $usz:ident)?, $ord:tt; $($val:expr),*) => {
+ $crate::__encode_bits!(
+ $typ as $uint $(as $usz)?, $ord; []; $($val,)*
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, // 16
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, // 32
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, // 48
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, // 64
+ )
+ };
+
+ /* EXIT POINT.
+ *
+ * This arm enters once the only remaining bit-expression tokens are the
+ * literal `0, `s provided above. It does not enter while any opaque
+ * user-provided bit expressions remain, and matching falls through to the
+ * chunkers, below.
+ *
+ * Once entered, this converts each chunk of bit expressions into the
+ * requested storage element, then emits an array of the encoded elements.
+ * This array is the final value of the originally-invoked macro. The
+ * invoker is responsible for turning the array into a `bitvec` type.
+ */
+ (
+ $typ:ty as $uint:ident as usize, $ord:tt;
+ [$([$($bit:tt,)+],)*]; $(0,)*
+ ) => {
+ [$($crate::__make_elem!($typ as $uint as usize, $ord; $($bit,)+),)*]
+ };
+ (
+ $typ:ty as $uint:ident, $ord:tt;
+ [$([$($bit:tt,)+],)*]; $(0,)*
+ ) => {
+ [$($crate::__make_elem!($typ as $uint, $ord; $($bit,)+),)*]
+ };
+
+ /* CHUNKERS
+ *
+ * These arms munch through the token stream, creating a sequence of chunks
+ * of bits. Each chunk contains bits to exactly fill one element, and gets
+ * passed into `__make_elem!` for final encoding.
+ */
+
+ (
+ $typ:ty as u8, $ord:tt; [$($elem:tt)*];
+ $a0:tt, $b0:tt, $c0:tt, $d0:tt, $e0:tt, $f0:tt, $g0:tt, $h0:tt,
+ $($t:tt)*
+ ) => {
+ $crate::__encode_bits!(
+ $typ as u8, $ord; [$($elem)* [
+ $a0, $b0, $c0, $d0, $e0, $f0, $g0, $h0,
+ ],]; $($t)*
+ )
+ };
+
+ (
+ $typ:ty as u16, $ord:tt; [$($elem:tt)*];
+ $a0:tt, $b0:tt, $c0:tt, $d0:tt, $e0:tt, $f0:tt, $g0:tt, $h0:tt,
+ $a1:tt, $b1:tt, $c1:tt, $d1:tt, $e1:tt, $f1:tt, $g1:tt, $h1:tt,
+ $($t:tt)*
+ ) => {
+ $crate::__encode_bits!(
+ $typ as u16, $ord; [$($elem)* [
+ $a0, $b0, $c0, $d0, $e0, $f0, $g0, $h0,
+ $a1, $b1, $c1, $d1, $e1, $f1, $g1, $h1,
+ ],]; $($t)*
+ )
+ };
+
+ (
+ $typ:ty as u32 $(as $usz:ident)?, $ord:tt; [$($elem:tt)*];
+ $a0:tt, $b0:tt, $c0:tt, $d0:tt, $e0:tt, $f0:tt, $g0:tt, $h0:tt,
+ $a1:tt, $b1:tt, $c1:tt, $d1:tt, $e1:tt, $f1:tt, $g1:tt, $h1:tt,
+ $a2:tt, $b2:tt, $c2:tt, $d2:tt, $e2:tt, $f2:tt, $g2:tt, $h2:tt,
+ $a3:tt, $b3:tt, $c3:tt, $d3:tt, $e3:tt, $f3:tt, $g3:tt, $h3:tt,
+ $($t:tt)*
+ ) => {
+ $crate::__encode_bits!(
+ $typ as u32 $(as $usz)?, $ord; [$($elem)* [
+ $a0, $b0, $c0, $d0, $e0, $f0, $g0, $h0,
+ $a1, $b1, $c1, $d1, $e1, $f1, $g1, $h1,
+ $a2, $b2, $c2, $d2, $e2, $f2, $g2, $h2,
+ $a3, $b3, $c3, $d3, $e3, $f3, $g3, $h3,
+ ],]; $($t)*
+ )
+ };
+
+ (
+ $typ:ty as u64 $(as $usz:ident)?, $ord:tt; [$($elem:tt)*];
+ $a0:tt, $b0:tt, $c0:tt, $d0:tt, $e0:tt, $f0:tt, $g0:tt, $h0:tt,
+ $a1:tt, $b1:tt, $c1:tt, $d1:tt, $e1:tt, $f1:tt, $g1:tt, $h1:tt,
+ $a2:tt, $b2:tt, $c2:tt, $d2:tt, $e2:tt, $f2:tt, $g2:tt, $h2:tt,
+ $a3:tt, $b3:tt, $c3:tt, $d3:tt, $e3:tt, $f3:tt, $g3:tt, $h3:tt,
+ $a4:tt, $b4:tt, $c4:tt, $d4:tt, $e4:tt, $f4:tt, $g4:tt, $h4:tt,
+ $a5:tt, $b5:tt, $c5:tt, $d5:tt, $e5:tt, $f5:tt, $g5:tt, $h5:tt,
+ $a6:tt, $b6:tt, $c6:tt, $d6:tt, $e6:tt, $f6:tt, $g6:tt, $h6:tt,
+ $a7:tt, $b7:tt, $c7:tt, $d7:tt, $e7:tt, $f7:tt, $g7:tt, $h7:tt,
+ $($t:tt)*
+ ) => {
+ $crate::__encode_bits!(
+ $typ as u64 $(as $usz)?, $ord; [$($elem)* [
+ $a0, $b0, $c0, $d0, $e0, $f0, $g0, $h0,
+ $a1, $b1, $c1, $d1, $e1, $f1, $g1, $h1,
+ $a2, $b2, $c2, $d2, $e2, $f2, $g2, $h2,
+ $a3, $b3, $c3, $d3, $e3, $f3, $g3, $h3,
+ $a4, $b4, $c4, $d4, $e4, $f4, $g4, $h4,
+ $a5, $b5, $c5, $d5, $e5, $f5, $g5, $h5,
+ $a6, $b6, $c6, $d6, $e6, $f6, $g6, $h6,
+ $a7, $b7, $c7, $d7, $e7, $f7, $g7, $h7,
+ ],]; $($t)*
+ )
+ };
+}
+
+/// Counts the number of expression tokens in a repetition sequence.
+#[doc(hidden)]
+#[macro_export]
+macro_rules! __count {
+ (@ $val:expr) => { 1 };
+ ($($val:expr),* $(,)?) => {{
+ const LEN: usize = 0 $(+ $crate::__count!(@ $val))*;
+ LEN
+ }};
+}
+
+/// Counts the number of storage elements needed to store a bit sequence.
+#[doc(hidden)]
+#[macro_export]
+macro_rules! __count_elts {
+ ($t:ty; $($val:expr),*) => {
+ $crate::mem::elts::<$t>($crate::__count!($($val),*))
+ };
+}
+
+#[doc(hidden)]
+#[macro_export]
+#[doc = include_str!("../../doc/macros/make_elem.md")]
+macro_rules! __make_elem {
+ // Token-matching ordering names can use specialized work.
+ ($typ:ty as $uint:ident $(as $usz:ident)?, Lsb0; $(
+ $a:expr, $b:expr, $c:expr, $d:expr,
+ $e:expr, $f:expr, $g:expr, $h:expr,
+ )*) => {{
+ const ELEM: $uint = $crate::__ty_from_bytes!(
+ $uint, Lsb0, [$($crate::macros::internal::u8_from_le_bits(
+ $a != 0, $b != 0, $c != 0, $d != 0,
+ $e != 0, $f != 0, $g != 0, $h != 0,
+ )),*]
+ );
+ $crate::mem::BitElement::<$typ>::new(ELEM $(as $usz)?).elem
+ }};
+ ($typ:ty as $uint:ident $(as $usz:ident)?, Msb0; $(
+ $a:expr, $b:expr, $c:expr, $d:expr,
+ $e:expr, $f:expr, $g:expr, $h:expr,
+ )*) => {{
+ const ELEM: $uint = $crate::__ty_from_bytes!(
+ $uint, Msb0, [$($crate::macros::internal::u8_from_be_bits(
+ $a != 0, $b != 0, $c != 0, $d != 0,
+ $e != 0, $f != 0, $g != 0, $h != 0,
+ )),*]
+ );
+ $crate::mem::BitElement::<$typ>::new(ELEM $(as $usz)?).elem
+ }};
+ ($typ:ty as $uint:ident $(as $usz:ident)?, LocalBits; $(
+ $a:expr, $b:expr, $c:expr, $d:expr,
+ $e:expr, $f:expr, $g:expr, $h:expr,
+ )*) => {{
+ const ELEM: $uint = $crate::__ty_from_bytes!(
+ $uint, LocalBits, [$($crate::macros::internal::u8_from_ne_bits(
+ $a != 0, $b != 0, $c != 0, $d != 0,
+ $e != 0, $f != 0, $g != 0, $h != 0,
+ )),*]
+ );
+ $crate::mem::BitElement::<$typ>::new(ELEM $(as $usz)?).elem
+ }};
+ // Otherwise, invoke `BitOrder` for each bit and accumulate.
+ ($typ:ty as $uint:ident $(as $usz:ident)?, $ord:tt; $($bit:expr),* $(,)?) => {{
+ let mut tmp: $uint = 0;
+ let _bits = $crate::slice::BitSlice::<$uint, $ord>::from_element_mut(
+ &mut tmp
+ );
+ let mut _idx = 0;
+ $( _bits.set(_idx, $bit != 0); _idx += 1; )*
+ $crate::mem::BitElement::<$typ>::new(tmp $(as $usz)?).elem
+ }};
+}
+
+/// Translates `false` into `0` and `true` into `!0`.
+#[doc(hidden)]
+#[macro_export]
+macro_rules! __extend_bool {
+ ($val:expr, $typ:tt) => {{
+ type Mem = <$typ as $crate::store::BitStore>::Mem;
+ if $val != 0 {
+ <Mem as $crate::mem::BitRegister>::ALL
+ }
+ else {
+ <Mem as $crate::macros::internal::funty::Integral>::ZERO
+ }
+ }};
+}
+
+/// Constructs an unsigned integer from a list of *bytes*.
+#[doc(hidden)]
+#[macro_export]
+macro_rules! __ty_from_bytes {
+ (u8, Msb0, [$($byte:expr),*]) => {
+ u8::from_be_bytes([$($byte),*])
+ };
+ (u8, Lsb0, [$($byte:expr),*]) => {
+ u8::from_le_bytes([$($byte),*])
+ };
+ (u8, LocalBits, [$($byte:expr),*]) => {
+ u8::from_ne_bytes([$($byte),*])
+ };
+ (u16, Msb0, [$($byte:expr),*]) => {
+ u16::from_be_bytes([$($byte),*])
+ };
+ (u16, Lsb0, [$($byte:expr),*]) => {
+ u16::from_le_bytes([$($byte),*])
+ };
+ (u16, LocalBits, [$($byte:expr),*]) => {
+ u16::from_ne_bytes([$($byte),*])
+ };
+ (u32, Msb0, [$($byte:expr),*]) => {
+ u32::from_be_bytes([$($byte),*])
+ };
+ (u32, Lsb0, [$($byte:expr),*]) => {
+ u32::from_le_bytes([$($byte),*])
+ };
+ (u32, LocalBits, [$($byte:expr),*]) => {
+ u32::from_ne_bytes([$($byte),*])
+ };
+ (u64, Msb0, [$($byte:expr),*]) => {
+ u64::from_be_bytes([$($byte),*])
+ };
+ (u64, Lsb0, [$($byte:expr),*]) => {
+ u64::from_le_bytes([$($byte),*])
+ };
+ (u64, LocalBits, [$($byte:expr),*]) => {
+ u64::from_ne_bytes([$($byte),*])
+ };
+ (usize, Msb0, [$($byte:expr),*]) => {
+ usize::from_be_bytes([$($byte),*])
+ };
+ (usizeLsb0, , [$($byte:expr),*]) => {
+ usize::from_le_bytes([$($byte),*])
+ };
+ (usize, LocalBits, [$($byte:expr),*]) => {
+ usize::from_ne_bytes([$($byte),*])
+ };
+}
+
+/// Constructs a `u8` from bits applied in `Lsb0` order (`a` low, `h` high).
+#[doc(hidden)]
+#[inline(always)]
+#[cfg(not(tarpaulin_include))]
+pub const fn u8_from_le_bits(
+ a: bool,
+ b: bool,
+ c: bool,
+ d: bool,
+ e: bool,
+ f: bool,
+ g: bool,
+ h: bool,
+) -> u8 {
+ (a as u8)
+ | ((b as u8) << 1)
+ | ((c as u8) << 2)
+ | ((d as u8) << 3)
+ | ((e as u8) << 4)
+ | ((f as u8) << 5)
+ | ((g as u8) << 6)
+ | ((h as u8) << 7)
+}
+
+/// Constructs a `u8` from bits applied in `Msb0` order (`a` high, `h` low).
+#[doc(hidden)]
+#[inline(always)]
+#[cfg(not(tarpaulin_include))]
+pub const fn u8_from_be_bits(
+ a: bool,
+ b: bool,
+ c: bool,
+ d: bool,
+ e: bool,
+ f: bool,
+ g: bool,
+ h: bool,
+) -> u8 {
+ (h as u8)
+ | ((g as u8) << 1)
+ | ((f as u8) << 2)
+ | ((e as u8) << 3)
+ | ((d as u8) << 4)
+ | ((c as u8) << 5)
+ | ((b as u8) << 6)
+ | ((a as u8) << 7)
+}
+
+#[doc(hidden)]
+#[cfg(target_endian = "big")]
+pub use self::u8_from_be_bits as u8_from_ne_bits;
+#[doc(hidden)]
+#[cfg(target_endian = "little")]
+pub use self::u8_from_le_bits as u8_from_ne_bits;
diff --git a/src/macros/tests.rs b/src/macros/tests.rs
new file mode 100644
index 0000000..4e921a9
--- /dev/null
+++ b/src/macros/tests.rs
@@ -0,0 +1,587 @@
+//! Invocation tests of each supported constructor-macro syntax.
+
+#![cfg(test)]
+
+use core::{
+ cell::Cell,
+ sync::atomic::*,
+};
+
+use radium::types::*;
+
+use crate::{
+ mem::bits_of,
+ prelude::*,
+};
+
+#[test]
+fn compile_bitarr_typedef() {
+ #[derive(Clone, Copy, Debug, Eq, Ord, PartialEq, PartialOrd)]
+ struct Slots {
+ all: BitArr!(for 10, in u8, Msb0),
+ typ: BitArr!(for 10, in u8),
+ def: BitArr!(for 10),
+ }
+
+ static SLOTS: Slots = Slots {
+ all: bitarr!(const u8, Msb0; 1, 1, 1, 1, 1, 1, 1, 1, 1, 1),
+ typ: bitarr!(const u8, Lsb0; 1, 1, 1, 1, 1, 1, 1, 1, 1, 1),
+ def: bitarr!(const 1, 1, 1, 1, 1, 1, 1, 1, 1, 1),
+ };
+
+ let slots = Slots {
+ all: bitarr!(u8, Msb0; 1, 1, 1, 1, 1, 1, 1, 1, 1, 1),
+ typ: bitarr!(u8, Lsb0; 1, 1, 1, 1, 1, 1, 1, 1, 1, 1),
+ def: bitarr!(1, 1, 1, 1, 1, 1, 1, 1, 1, 1),
+ };
+
+ assert_eq!(SLOTS, slots);
+
+ assert_eq!(slots.all.into_inner(), [!0u8, 192]);
+ assert_eq!(slots.typ.into_inner(), [!0u8, 3]);
+ let def: [usize; 1] = slots.def.into_inner();
+ assert_eq!(def[0].count_ones(), 10);
+}
+
+#[test]
+fn constexpr_macros() {
+ const A: BitArr!(for 20, in Cell<u8>, Lsb0) =
+ bitarr!(const Cell<u8>, Lsb0; 1; 20);
+ let a = A;
+ assert_eq!(a.len(), 24);
+ assert!(a.all());
+
+ const B: BitArr!(for 20) = bitarr!(const 1; 20);
+ let b = B;
+ assert_eq!(b.len(), bits_of::<usize>());
+ assert!(b.all());
+
+ const C: BitArr!(for 5, in Cell<u16>, Msb0) =
+ bitarr!(const Cell<u16>, Msb0; 1, 0, 1, 1, 0);
+ let c = C;
+ assert_eq!(c[.. 5], bits![1, 0, 1, 1, 0]);
+
+ const D: BitArr!(for 5, in u32, Lsb0) =
+ bitarr!(const u32, Lsb0; 1, 0, 1, 1, 0);
+ let d = D;
+ assert_eq!(d[.. 5], bits![1, 0, 1, 1, 0]);
+
+ let _: &'static mut BitSlice<Cell<u16>, Msb0> =
+ unsafe { bits!(static mut Cell<u16>, Msb0; 1; 20) };
+ let _: &'static mut BitSlice<u32, Lsb0> =
+ unsafe { bits!(static mut u32, Lsb0; 1; 20) };
+ let _: &'static mut BitSlice = unsafe { bits!(static mut 1; 20) };
+
+ let _: &'static mut BitSlice<Cell<u16>, Msb0> =
+ unsafe { bits!(static mut Cell<u16>, Msb0; 1, 0, 1, 1, 0) };
+ let _: &'static mut BitSlice<Cell<u32>, Msb0> =
+ unsafe { bits!(static mut Cell<u32>, Msb0; 1, 0, 1, 1, 0) };
+ let _: &'static mut BitSlice = unsafe { bits!(static mut 1, 0, 1, 1, 0) };
+
+ let _: &'static BitSlice<Cell<u16>, Msb0> =
+ bits!(static Cell<u16>, Msb0; 1; 20);
+ let _: &'static BitSlice<u32, Lsb0> = bits!(static u32, Lsb0; 1, 0, 1, 1, 0);
+ let _: &'static BitSlice = bits!(static 1; 20);
+
+ let _: &'static BitSlice<Cell<u16>, Msb0> =
+ bits!(static Cell<u16>, Msb0; 1, 0, 1, 1, 0);
+ let _: &'static BitSlice<u32, Msb0> = bits!(static u32, Msb0; 1, 0, 1, 1, 0);
+ let _: &'static BitSlice = bits!(static 1, 0, 1, 1, 0);
+}
+
+#[test]
+fn compile_bitarr() {
+ let uint: BitArray<[u8; 1], Lsb0> = bitarr![u8, Lsb0; 1, 0, 1, 0];
+ assert_eq!(uint.into_inner(), [5u8]);
+ let cell: BitArray<[Cell<u8>; 1], Lsb0> =
+ bitarr![Cell<u8>, Lsb0; 1, 0, 1, 0];
+ assert_eq!(cell.into_inner()[0].get(), 5u8);
+
+ let uint: BitArray<[u16; 2], Msb0> = bitarr![u16, Msb0;
+ 0, 1, 0, 1, 0, 1, 0, 1,
+ 0, 1, 1, 0, 1, 0, 0, 1,
+ 0, 1, 1, 0, 1, 1, 1, 0,
+ 0, 1, 1, 1, 0, 1, 0, 0,
+ ];
+ assert_eq!(uint.into_inner(), [0x5569, 0x6E74]);
+ let cell: BitArray<[Cell<u16>; 2], Msb0> = bitarr![Cell<u16>, Msb0;
+ 0, 1, 0, 1, 0, 1, 0, 1,
+ 0, 1, 1, 0, 1, 0, 0, 1,
+ 0, 1, 1, 0, 1, 1, 1, 0,
+ 0, 1, 1, 1, 0, 1, 0, 0,
+ ];
+ let cells = cell.into_inner();
+ assert_eq!(cells[0].get(), 0x5569);
+ assert_eq!(cells[1].get(), 0x6E74);
+
+ let uint: BitArray<[u32; 1], Lsb0> =
+ bitarr![u32, crate::order::Lsb0; 1, 0, 1, 1];
+ assert_eq!(uint.into_inner(), [13u32]);
+ let cell: BitArray<[Cell<u32>; 1], Lsb0> =
+ bitarr![Cell<u32>, crate::order::Lsb0; 1, 0, 1, 1];
+ assert_eq!(cell.into_inner()[0].get(), 13u32);
+
+ #[cfg(target_pointer_width = "64")]
+ {
+ let uint: BitArray<[u64; 2], LocalBits> = bitarr![u64, LocalBits; 1; 70];
+ assert_eq!(uint.into_inner(), [!0u64; 2]);
+
+ let cell: BitArray<[Cell<u64>; 2], LocalBits> =
+ bitarr![Cell<u64>, LocalBits; 1; 70];
+ assert_eq!(cell.clone().into_inner()[0].get(), !0u64);
+ assert_eq!(cell.into_inner()[1].get(), !0u64);
+ }
+
+ let uint: BitArray<[usize; 1], Lsb0> = bitarr![1, 0, 1];
+ assert_eq!(uint.into_inner(), [5usize]);
+ let uint: BitArray<[usize; 1], Lsb0> = bitarr![1;30];
+ assert_eq!(uint.into_inner(), [!0usize]);
+}
+
+#[test]
+#[allow(clippy::many_single_char_names)]
+fn compile_bits() {
+ let a: &mut BitSlice<Cell<u8>, Lsb0> = bits![mut Cell<u8>, Lsb0; 1, 0, 1];
+ let b: &mut BitSlice<u8, Lsb0> = bits![mut u8, Lsb0; 1, 0, 1];
+ let c: &mut BitSlice<Cell<u8>, Msb0> =
+ bits![mut Cell<u8>, crate::order::Msb0; 1, 0, 1];
+ let d: &mut BitSlice<u8, Msb0> = bits![mut u8, crate::order::Msb0; 1, 0, 1];
+ assert_eq!(a, c);
+ assert_eq!(b, d);
+
+ let e: &mut BitSlice<Cell<u8>, Lsb0> = bits![mut Cell<u8>, Lsb0; 1; 100];
+ let f: &mut BitSlice<u8, Lsb0> = bits![mut u8, Lsb0; 1; 100];
+ let g: &mut BitSlice<Cell<u8>, Msb0> =
+ bits![mut Cell<u8>, crate::order::Msb0; 1; 100];
+ let h: &mut BitSlice<u8, Msb0> = bits![mut u8, crate::order::Msb0; 1; 100];
+ assert_eq!(e, g);
+ assert_eq!(f, h);
+ assert!(h.domain().take(12).all(|e| e == !0));
+ assert_eq!(h.domain().next_back().unwrap(), 0b1111_0000);
+ assert_eq!(h.domain().len(), 13);
+
+ let i: &mut BitSlice<usize, Lsb0> = bits![mut 1, 0, 1];
+ let j: &mut BitSlice<usize, Lsb0> = bits![mut 1; 3];
+ j.set(1, false);
+ assert_eq!(i, j);
+
+ let _: &BitSlice<Cell<u8>, Lsb0> = bits![Cell<u8>, Lsb0; 1, 0, 1];
+ let _: &BitSlice<u8, Lsb0> = bits![u8, Lsb0; 1, 0, 1];
+ let _: &BitSlice<Cell<u8>, Msb0> =
+ bits![Cell<u8>, crate::order::Msb0; 1, 0, 1];
+ let _: &BitSlice<u8, Msb0> = bits![u8, crate::order::Msb0; 1, 0, 1];
+
+ let _: &BitSlice<Cell<u8>, Lsb0> = bits![Cell<u8>, Lsb0; 1; 100];
+ let _: &BitSlice<u8, Lsb0> = bits![u8, Lsb0; 1; 100];
+ let _: &BitSlice<Cell<u8>, Msb0> =
+ bits![Cell<u8>, crate::order::Msb0; 1; 100];
+ let _: &BitSlice<u8, Msb0> = bits![u8, crate::order::Msb0; 1; 100];
+
+ let _: &BitSlice<usize, Lsb0> = bits![1, 0, 1];
+ let _: &BitSlice<usize, Lsb0> = bits![1; 100];
+
+ let _: &BitSlice<Cell<u16>, Lsb0> = bits![Cell<u16>, Lsb0; 1, 0, 1];
+ let _: &BitSlice<u16, Lsb0> = bits![u16, Lsb0; 1, 0, 1];
+ let _: &BitSlice<Cell<u16>, Msb0> =
+ bits![Cell<u16>, crate::order::Msb0; 1, 0, 1];
+ let _: &BitSlice<u16, Msb0> = bits![u16, crate::order::Msb0; 1, 0, 1];
+
+ let _: &BitSlice<Cell<u16>, Lsb0> = bits![Cell<u16>, Lsb0; 1; 100];
+ let _: &BitSlice<u16, Lsb0> = bits![u16, Lsb0; 1; 100];
+ let _: &BitSlice<Cell<u16>, Msb0> =
+ bits![Cell<u16>, crate::order::Msb0; 1; 100];
+ let _: &BitSlice<u16, Msb0> = bits![u16, crate::order::Msb0; 1; 100];
+
+ let _: &BitSlice<Cell<u32>, Lsb0> = bits![Cell<u32>, Lsb0; 1, 0, 1];
+ let _: &BitSlice<u32, Lsb0> = bits![u32, Lsb0; 1, 0, 1];
+ let _: &BitSlice<Cell<u32>, Msb0> =
+ bits![Cell<u32>, crate::order::Msb0; 1, 0, 1];
+ let _: &BitSlice<u32, Msb0> = bits![u32, crate::order::Msb0; 1, 0, 1];
+
+ let _: &BitSlice<Cell<u32>, Lsb0> = bits![Cell<u32>, Lsb0; 1; 100];
+ let _: &BitSlice<u32, Lsb0> = bits![u32, Lsb0; 1; 100];
+ let _: &BitSlice<Cell<u32>, Msb0> =
+ bits![Cell<u32>, crate::order::Msb0; 1; 100];
+ let _: &BitSlice<u32, Msb0> = bits![u32, crate::order::Msb0; 1; 100];
+
+ let _: &BitSlice<Cell<usize>, Lsb0> = bits![Cell<usize>, Lsb0; 1, 0, 1];
+ let _: &BitSlice<usize, Lsb0> = bits![usize, Lsb0; 1, 0, 1];
+ let _: &BitSlice<Cell<usize>, Msb0> =
+ bits![Cell<usize>, crate::order::Msb0; 1, 0, 1];
+ let _: &BitSlice<usize, Msb0> = bits![usize, crate::order::Msb0; 1, 0, 1];
+
+ let _: &BitSlice<Cell<usize>, Lsb0> = bits![Cell<usize>, Lsb0; 1; 100];
+ let _: &BitSlice<usize, Lsb0> = bits![usize, Lsb0; 1; 100];
+ let _: &BitSlice<Cell<usize>, Msb0> =
+ bits![Cell<usize>, crate::order::Msb0; 1; 100];
+ let _: &BitSlice<usize, Msb0> = bits![usize, crate::order::Msb0; 1; 100];
+
+ #[cfg(target_pointer_width = "64")]
+ {
+ let _: &BitSlice<Cell<u64>, Lsb0> = bits![Cell<u64>, Lsb0; 1, 0, 1];
+ let _: &BitSlice<u64, Lsb0> = bits![u64, Lsb0; 1, 0, 1];
+ let _: &BitSlice<Cell<u64>, Msb0> =
+ bits![Cell<u64>, crate::order::Msb0; 1, 0, 1];
+ let _: &BitSlice<u64, Msb0> = bits![u64, crate::order::Msb0; 1, 0, 1];
+
+ let _: &BitSlice<Cell<u64>, Lsb0> = bits![Cell<u64>, Lsb0; 1; 100];
+ let _: &BitSlice<u64, Lsb0> = bits![u64, Lsb0; 1; 100];
+ let _: &BitSlice<Cell<u64>, Msb0> =
+ bits![Cell<u64>, crate::order::Msb0; 1; 100];
+ let _: &BitSlice<u64, Msb0> = bits![u64, crate::order::Msb0; 1; 100];
+ }
+
+ radium::if_atomic! {
+ if atomic(8) {
+ let _: &BitSlice<AtomicU8, LocalBits> = bits![AtomicU8, LocalBits; 0, 1];
+ let _: &BitSlice<AtomicU8, Lsb0> = bits![AtomicU8, Lsb0; 0, 1];
+ let _: &BitSlice<AtomicU8, Msb0> = bits![AtomicU8, Msb0; 0, 1];
+ let _: &BitSlice<RadiumU8, LocalBits> = bits![RadiumU8, LocalBits; 1; 100];
+ let _: &BitSlice<RadiumU8, Lsb0> = bits![RadiumU8, Lsb0; 1; 100];
+ let _: &BitSlice<RadiumU8, Msb0> = bits![RadiumU8, Msb0; 1; 100];
+ }
+ if atomic(16) {
+ let _: &BitSlice<AtomicU16, LocalBits> = bits![AtomicU16, LocalBits; 0, 1];
+ let _: &BitSlice<AtomicU16, Lsb0> = bits![AtomicU16, Lsb0; 0, 1];
+ let _: &BitSlice<AtomicU16, Msb0> = bits![AtomicU16, Msb0; 0, 1];
+ let _: &BitSlice<RadiumU16, LocalBits> = bits![RadiumU16, LocalBits; 1; 100];
+ let _: &BitSlice<RadiumU16, Lsb0> = bits![RadiumU16, Lsb0; 1; 100];
+ let _: &BitSlice<RadiumU16, Msb0> = bits![RadiumU16, Msb0; 1; 100];
+ }
+ if atomic(32) {
+ let _: &BitSlice<AtomicU32, LocalBits> = bits![AtomicU32, LocalBits; 0, 1];
+ let _: &BitSlice<AtomicU32, Lsb0> = bits![AtomicU32, Lsb0; 0, 1];
+ let _: &BitSlice<AtomicU32, Msb0> = bits![AtomicU32, Msb0; 0, 1];
+ let _: &BitSlice<RadiumU32, LocalBits> = bits![RadiumU32, LocalBits; 1; 100];
+ let _: &BitSlice<RadiumU32, Lsb0> = bits![RadiumU32, Lsb0; 1; 100];
+ let _: &BitSlice<RadiumU32, Msb0> = bits![RadiumU32, Msb0; 1; 100];
+ }
+ if atomic(size) {
+ let _: &BitSlice<AtomicUsize, LocalBits> = bits![AtomicUsize, LocalBits; 0, 1];
+ let _: &BitSlice<AtomicUsize, Lsb0> = bits![AtomicUsize, Lsb0; 0, 1];
+ let _: &BitSlice<AtomicUsize, Msb0> = bits![AtomicUsize, Msb0; 0, 1];
+ let _: &BitSlice<RadiumUsize, LocalBits> = bits![RadiumUsize, LocalBits; 1; 100];
+ let _: &BitSlice<RadiumUsize, Lsb0> = bits![RadiumUsize, Lsb0; 1; 100];
+ let _: &BitSlice<RadiumUsize, Msb0> = bits![RadiumUsize, Msb0; 1; 100];
+ }
+ }
+ #[cfg(target_pointer_width = "64")]
+ radium::if_atomic! {
+ if atomic(64) {
+ let _: &BitSlice<AtomicU64, LocalBits> = bits![AtomicU64, LocalBits; 0, 1];
+ let _: &BitSlice<AtomicU64, Lsb0> = bits![AtomicU64, Lsb0; 0, 1];
+ let _: &BitSlice<AtomicU64, Msb0> = bits![AtomicU64, Msb0; 0, 1];
+ let _: &BitSlice<RadiumU64, LocalBits> = bits![RadiumU64, LocalBits; 1; 100];
+ let _: &BitSlice<RadiumU64, Lsb0> = bits![RadiumU64, Lsb0; 1; 100];
+ let _: &BitSlice<RadiumU64, Msb0> = bits![RadiumU64, Msb0; 1; 100];
+ }
+ }
+}
+
+#[test]
+#[cfg(feature = "alloc")]
+fn compile_bitvec() {
+ let _: BitVec<Cell<u8>, Lsb0> = bitvec![Cell<u8>, Lsb0; 1, 0, 1];
+ let _: BitVec<u8, Lsb0> = bitvec![u8, Lsb0; 1, 0, 1];
+ let _: BitVec<Cell<u8>, Msb0> =
+ bitvec![Cell<u8>, crate::order::Msb0; 1, 0, 1];
+ let _: BitVec<u8, Msb0> = bitvec![u8, crate::order::Msb0; 1, 0, 1];
+
+ let _: BitVec<Cell<u8>, Lsb0> = bitvec![Cell<u8>, Lsb0; 1; 100];
+ let _: BitVec<u8, Lsb0> = bitvec![u8, Lsb0; 1; 100];
+ let _: BitVec<Cell<u8>, Msb0> =
+ bitvec![Cell<u8>, crate::order::Msb0; 1; 100];
+ let _: BitVec<u8, Msb0> = bitvec![u8, crate::order::Msb0; 1; 100];
+
+ let _: BitVec<usize, Lsb0> = bitvec![1, 0, 1];
+ let _: BitVec<usize, Lsb0> = bitvec![1; 100];
+
+ let _: BitVec<Cell<u16>, Lsb0> = bitvec![Cell<u16>, Lsb0; 1, 0, 1];
+ let _: BitVec<u16, Lsb0> = bitvec![u16, Lsb0; 1, 0, 1];
+ let _: BitVec<Cell<u16>, Msb0> =
+ bitvec![Cell<u16>, crate::order::Msb0; 1, 0, 1];
+ let _: BitVec<u16, Msb0> = bitvec![u16, crate::order::Msb0; 1, 0, 1];
+
+ let _: BitVec<Cell<u16>, Lsb0> = bitvec![Cell<u16>, Lsb0; 1; 100];
+ let _: BitVec<u16, Lsb0> = bitvec![u16, Lsb0; 1; 100];
+ let _: BitVec<Cell<u16>, Msb0> =
+ bitvec![Cell<u16>, crate::order::Msb0; 1; 100];
+ let _: BitVec<u16, Msb0> = bitvec![u16, crate::order::Msb0; 1; 100];
+
+ let _: BitVec<Cell<u32>, Lsb0> = bitvec![Cell<u32>, Lsb0; 1, 0, 1];
+ let _: BitVec<u32, Lsb0> = bitvec![u32, Lsb0; 1, 0, 1];
+ let _: BitVec<Cell<u32>, Msb0> =
+ bitvec![Cell<u32>, crate::order::Msb0; 1, 0, 1];
+ let _: BitVec<u32, Msb0> = bitvec![u32, crate::order::Msb0; 1, 0, 1];
+
+ let _: BitVec<Cell<u32>, Lsb0> = bitvec![Cell<u32>, Lsb0; 1; 100];
+ let _: BitVec<u32, Lsb0> = bitvec![u32, Lsb0; 1; 100];
+ let _: BitVec<Cell<u32>, Msb0> =
+ bitvec![Cell<u32>, crate::order::Msb0; 1; 100];
+ let _: BitVec<u32, Msb0> = bitvec![u32, crate::order::Msb0; 1; 100];
+
+ let _: BitVec<Cell<usize>, Lsb0> = bitvec![Cell<usize>, Lsb0; 1, 0, 1];
+ let _: BitVec<usize, Lsb0> = bitvec![usize, Lsb0; 1, 0, 1];
+ let _: BitVec<Cell<usize>, Msb0> =
+ bitvec![Cell<usize>, crate::order::Msb0; 1, 0, 1];
+ let _: BitVec<usize, Msb0> = bitvec![usize, crate::order::Msb0; 1, 0, 1];
+
+ let _: BitVec<Cell<usize>, Lsb0> = bitvec![Cell<usize>, Lsb0; 1; 100];
+ let _: BitVec<usize, Lsb0> = bitvec![usize, Lsb0; 1; 100];
+ let _: BitVec<Cell<usize>, Msb0> =
+ bitvec![Cell<usize>, crate::order::Msb0; 1; 100];
+ let _: BitVec<usize, Msb0> = bitvec![usize, crate::order::Msb0; 1; 100];
+
+ #[cfg(target_pointer_width = "64")]
+ {
+ let _: BitVec<Cell<u64>, Lsb0> = bitvec![Cell<u64>, Lsb0; 1, 0, 1];
+ let _: BitVec<u64, Lsb0> = bitvec![u64, Lsb0; 1, 0, 1];
+ let _: BitVec<Cell<u64>, Msb0> =
+ bitvec![Cell<u64>, crate::order::Msb0; 1, 0, 1];
+ let _: BitVec<u64, Msb0> = bitvec![u64, crate::order::Msb0; 1, 0, 1];
+
+ let _: BitVec<Cell<u64>, Lsb0> = bitvec![Cell<u64>, Lsb0; 1; 100];
+ let _: BitVec<u64, Lsb0> = bitvec![u64, Lsb0; 1; 100];
+ let _: BitVec<Cell<u64>, Msb0> =
+ bitvec![Cell<u64>, crate::order::Msb0; 1; 100];
+ let _: BitVec<u64, Msb0> = bitvec![u64, crate::order::Msb0; 1; 100];
+ }
+ radium::if_atomic! {
+ if atomic(8) {
+ let _: BitVec<AtomicU8, LocalBits> =bitvec![AtomicU8, LocalBits; 0, 1];
+ let _: BitVec<AtomicU8, Lsb0> =bitvec![AtomicU8, Lsb0; 0, 1];
+ let _: BitVec<AtomicU8, Msb0> =bitvec![AtomicU8, Msb0; 0, 1];
+ let _: BitVec<RadiumU8, LocalBits> =bitvec![RadiumU8, LocalBits; 1; 100];
+ let _: BitVec<RadiumU8, Lsb0> =bitvec![RadiumU8, Lsb0; 1; 100];
+ let _: BitVec<RadiumU8, Msb0> =bitvec![RadiumU8, Msb0; 1; 100];
+ }
+ if atomic(16) {
+ let _: BitVec<AtomicU16, LocalBits> =bitvec![AtomicU16, LocalBits; 0, 1];
+ let _: BitVec<AtomicU16, Lsb0> =bitvec![AtomicU16, Lsb0; 0, 1];
+ let _: BitVec<AtomicU16, Msb0> =bitvec![AtomicU16, Msb0; 0, 1];
+ let _: BitVec<RadiumU16, LocalBits> =bitvec![RadiumU16, LocalBits; 1; 100];
+ let _: BitVec<RadiumU16, Lsb0> =bitvec![RadiumU16, Lsb0; 1; 100];
+ let _: BitVec<RadiumU16, Msb0> =bitvec![RadiumU16, Msb0; 1; 100];
+ }
+ if atomic(32) {
+ let _: BitVec<AtomicU32, LocalBits> =bitvec![AtomicU32, LocalBits; 0, 1];
+ let _: BitVec<AtomicU32, Lsb0> =bitvec![AtomicU32, Lsb0; 0, 1];
+ let _: BitVec<AtomicU32, Msb0> =bitvec![AtomicU32, Msb0; 0, 1];
+ let _: BitVec<RadiumU32, LocalBits> =bitvec![RadiumU32, LocalBits; 1; 100];
+ let _: BitVec<RadiumU32, Lsb0> =bitvec![RadiumU32, Lsb0; 1; 100];
+ let _: BitVec<RadiumU32, Msb0> =bitvec![RadiumU32, Msb0; 1; 100];
+ }
+ if atomic(size) {
+ let _: BitVec<AtomicUsize, LocalBits> =bitvec![AtomicUsize, LocalBits; 0, 1];
+ let _: BitVec<AtomicUsize, Lsb0> =bitvec![AtomicUsize, Lsb0; 0, 1];
+ let _: BitVec<AtomicUsize, Msb0> =bitvec![AtomicUsize, Msb0; 0, 1];
+ let _: BitVec<RadiumUsize, LocalBits> =bitvec![RadiumUsize, LocalBits; 1; 100];
+ let _: BitVec<RadiumUsize, Lsb0> =bitvec![RadiumUsize, Lsb0; 1; 100];
+ let _: BitVec<RadiumUsize, Msb0> =bitvec![RadiumUsize, Msb0; 1; 100];
+ }
+ }
+ #[cfg(target_pointer_width = "64")]
+ radium::if_atomic! {
+ if atomic(64) {
+ let _: BitVec<AtomicU64, LocalBits> =bitvec![AtomicU64, LocalBits; 0, 1];
+ let _: BitVec<AtomicU64, Lsb0> =bitvec![AtomicU64, Lsb0; 0, 1];
+ let _: BitVec<AtomicU64, Msb0> =bitvec![AtomicU64, Msb0; 0, 1];
+ let _: BitVec<RadiumU64, LocalBits> =bitvec![RadiumU64, LocalBits; 1; 100];
+ let _: BitVec<RadiumU64, Lsb0> =bitvec![RadiumU64, Lsb0; 1; 100];
+ let _: BitVec<RadiumU64, Msb0> =bitvec![RadiumU64, Msb0; 1; 100];
+ }
+ }
+}
+
+#[test]
+#[cfg(feature = "alloc")]
+fn compile_bitbox() {
+ let _: BitBox<Cell<u8>, Lsb0> = bitbox![Cell<u8>, Lsb0; 1, 0, 1];
+ let _: BitBox<u8, Lsb0> = bitbox![u8, Lsb0; 1, 0, 1];
+ let _: BitBox<Cell<u8>, Msb0> =
+ bitbox![Cell<u8>, crate::order::Msb0; 1, 0, 1];
+ let _: BitBox<u8, Msb0> = bitbox![u8, crate::order::Msb0; 1, 0, 1];
+
+ let _: BitBox<Cell<u8>, Lsb0> = bitbox![Cell<u8>, Lsb0; 1; 100];
+ let _: BitBox<u8, Lsb0> = bitbox![u8, Lsb0; 1; 100];
+ let _: BitBox<Cell<u8>, Msb0> =
+ bitbox![Cell<u8>, crate::order::Msb0; 1; 100];
+ let _: BitBox<u8, Msb0> = bitbox![u8, crate::order::Msb0; 1; 100];
+
+ let _: BitBox<usize, Lsb0> = bitbox![1, 0, 1];
+ let _: BitBox<usize, Lsb0> = bitbox![1; 100];
+
+ let _: BitBox<Cell<u16>, Lsb0> = bitbox![Cell<u16>, Lsb0; 1, 0, 1];
+ let _: BitBox<u16, Lsb0> = bitbox![u16, Lsb0; 1, 0, 1];
+ let _: BitBox<Cell<u16>, Msb0> =
+ bitbox![Cell<u16>, crate::order::Msb0; 1, 0, 1];
+ let _: BitBox<u16, Msb0> = bitbox![u16, crate::order::Msb0; 1, 0, 1];
+
+ let _: BitBox<Cell<u16>, Lsb0> = bitbox![Cell<u16>, Lsb0; 1; 100];
+ let _: BitBox<u16, Lsb0> = bitbox![u16, Lsb0; 1; 100];
+ let _: BitBox<Cell<u16>, Msb0> =
+ bitbox![Cell<u16>, crate::order::Msb0; 1; 100];
+ let _: BitBox<u16, Msb0> = bitbox![u16, crate::order::Msb0; 1; 100];
+
+ let _: BitBox<Cell<u32>, Lsb0> = bitbox![Cell<u32>, Lsb0; 1, 0, 1];
+ let _: BitBox<u32, Lsb0> = bitbox![u32, Lsb0; 1, 0, 1];
+ let _: BitBox<Cell<u32>, Msb0> =
+ bitbox![Cell<u32>, crate::order::Msb0; 1, 0, 1];
+ let _: BitBox<u32, Msb0> = bitbox![u32, crate::order::Msb0; 1, 0, 1];
+
+ let _: BitBox<Cell<u32>, Lsb0> = bitbox![Cell<u32>, Lsb0; 1; 100];
+ let _: BitBox<u32, Lsb0> = bitbox![u32, Lsb0; 1; 100];
+ let _: BitBox<Cell<u32>, Msb0> =
+ bitbox![Cell<u32>, crate::order::Msb0; 1; 100];
+ let _: BitBox<u32, Msb0> = bitbox![u32, crate::order::Msb0; 1; 100];
+
+ let _: BitBox<Cell<usize>, Lsb0> = bitbox![Cell<usize>, Lsb0; 1, 0, 1];
+ let _: BitBox<usize, Lsb0> = bitbox![usize, Lsb0; 1, 0, 1];
+ let _: BitBox<Cell<usize>, Msb0> =
+ bitbox![Cell<usize>, crate::order::Msb0; 1, 0, 1];
+ let _: BitBox<usize, Msb0> = bitbox![usize, crate::order::Msb0; 1, 0, 1];
+
+ let _: BitBox<Cell<usize>, Lsb0> = bitbox![Cell<usize>, Lsb0; 1; 100];
+ let _: BitBox<usize, Lsb0> = bitbox![usize, Lsb0; 1; 100];
+ let _: BitBox<Cell<usize>, Msb0> =
+ bitbox![Cell<usize>, crate::order::Msb0; 1; 100];
+ let _: BitBox<usize, Msb0> = bitbox![usize, crate::order::Msb0; 1; 100];
+
+ #[cfg(target_pointer_width = "64")]
+ {
+ let _: BitBox<Cell<u64>, Lsb0> = bitbox![Cell<u64>, Lsb0; 1, 0, 1];
+ let _: BitBox<u64, Lsb0> = bitbox![u64, Lsb0; 1, 0, 1];
+ let _: BitBox<Cell<u64>, Msb0> =
+ bitbox![Cell<u64>, crate::order::Msb0; 1, 0, 1];
+ let _: BitBox<u64, Msb0> = bitbox![u64, crate::order::Msb0; 1, 0, 1];
+
+ let _: BitBox<Cell<u64>, Lsb0> = bitbox![Cell<u64>, Lsb0; 1; 100];
+ let _: BitBox<u64, Lsb0> = bitbox![u64, Lsb0; 1; 100];
+ let _: BitBox<Cell<u64>, Msb0> =
+ bitbox![Cell<u64>, crate::order::Msb0; 1; 100];
+ let _: BitBox<u64, Msb0> = bitbox![u64, crate::order::Msb0; 1; 100];
+ }
+ radium::if_atomic! {
+ if atomic(8) {
+ let _: BitBox<AtomicU8, LocalBits> =bitbox![AtomicU8, LocalBits; 0, 1];
+ let _: BitBox<AtomicU8, Lsb0> =bitbox![AtomicU8, Lsb0; 0, 1];
+ let _: BitBox<AtomicU8, Msb0> =bitbox![AtomicU8, Msb0; 0, 1];
+ let _: BitBox<RadiumU8, LocalBits> =bitbox![RadiumU8, LocalBits; 1; 100];
+ let _: BitBox<RadiumU8, Lsb0> =bitbox![RadiumU8, Lsb0; 1; 100];
+ let _: BitBox<RadiumU8, Msb0> =bitbox![RadiumU8, Msb0; 1; 100];
+ }
+ if atomic(16) {
+ let _: BitBox<AtomicU16, LocalBits> =bitbox![AtomicU16, LocalBits; 0, 1];
+ let _: BitBox<AtomicU16, Lsb0> =bitbox![AtomicU16, Lsb0; 0, 1];
+ let _: BitBox<AtomicU16, Msb0> =bitbox![AtomicU16, Msb0; 0, 1];
+ let _: BitBox<RadiumU16, LocalBits> =bitbox![RadiumU16, LocalBits; 1; 100];
+ let _: BitBox<RadiumU16, Lsb0> =bitbox![RadiumU16, Lsb0; 1; 100];
+ let _: BitBox<RadiumU16, Msb0> =bitbox![RadiumU16, Msb0; 1; 100];
+ }
+ if atomic(32) {
+ let _: BitBox<AtomicU32, LocalBits> =bitbox![AtomicU32, LocalBits; 0, 1];
+ let _: BitBox<AtomicU32, Lsb0> =bitbox![AtomicU32, Lsb0; 0, 1];
+ let _: BitBox<AtomicU32, Msb0> =bitbox![AtomicU32, Msb0; 0, 1];
+ let _: BitBox<RadiumU32, LocalBits> =bitbox![RadiumU32, LocalBits; 1; 100];
+ let _: BitBox<RadiumU32, Lsb0> =bitbox![RadiumU32, Lsb0; 1; 100];
+ let _: BitBox<RadiumU32, Msb0> =bitbox![RadiumU32, Msb0; 1; 100];
+ }
+ if atomic(size) {
+ let _: BitBox<AtomicUsize, LocalBits> =bitbox![AtomicUsize, LocalBits; 0, 1];
+ let _: BitBox<AtomicUsize, Lsb0> =bitbox![AtomicUsize, Lsb0; 0, 1];
+ let _: BitBox<AtomicUsize, Msb0> =bitbox![AtomicUsize, Msb0; 0, 1];
+ let _: BitBox<RadiumUsize, LocalBits> =bitbox![RadiumUsize, LocalBits; 1; 100];
+ let _: BitBox<RadiumUsize, Lsb0> =bitbox![RadiumUsize, Lsb0; 1; 100];
+ let _: BitBox<RadiumUsize, Msb0> =bitbox![RadiumUsize, Msb0; 1; 100];
+ }
+ }
+ #[cfg(target_pointer_width = "64")]
+ radium::if_atomic! {
+ if atomic(64) {
+ let _: BitBox<AtomicU64, LocalBits> =bitbox![AtomicU64, LocalBits; 0, 1];
+ let _: BitBox<AtomicU64, Lsb0> =bitbox![AtomicU64, Lsb0; 0, 1];
+ let _: BitBox<AtomicU64, Msb0> =bitbox![AtomicU64, Msb0; 0, 1];
+ let _: BitBox<RadiumU64, LocalBits> =bitbox![RadiumU64, LocalBits; 1; 100];
+ let _: BitBox<RadiumU64, Lsb0> =bitbox![RadiumU64, Lsb0; 1; 100];
+ let _: BitBox<RadiumU64, Msb0> =bitbox![RadiumU64, Msb0; 1; 100];
+ }
+ }
+}
+
+#[test]
+fn encode_bits() {
+ let uint: [u8; 1] = __encode_bits!(u8, Lsb0; 1, 0, 1, 0, 1, 1, 0, 0);
+ assert_eq!(uint, [53]);
+
+ let cell: [Cell<u8>; 1] =
+ __encode_bits!(Cell<u8>, Lsb0; 1, 0, 1, 0, 1, 1, 0, 0);
+ assert_eq!(cell[0].get(), 53);
+
+ let uint: [u16; 1] = __encode_bits!(u16, Msb0;
+ 0, 1, 0, 0, 1, 0, 0, 0,
+ 0, 1, 1, 0, 1, 0, 0, 1
+ );
+ assert_eq!(uint, [0x4869]);
+
+ let cell: [Cell<u16>; 1] = __encode_bits!(Cell<u16>, Msb0;
+ 0, 1, 0, 0, 1, 0, 0, 0,
+ 0, 1, 1, 0, 1, 0, 0, 1
+ );
+ assert_eq!(cell[0].get(), 0x4869);
+
+ let uint: [u32; 1] = __encode_bits!(u32, LocalBits; 1, 0, 1);
+ assert_eq!(uint.view_bits::<LocalBits>()[.. 3], bits![1, 0, 1]);
+
+ let cell: [Cell<u32>; 1] = __encode_bits!(Cell<u32>, LocalBits; 1, 0, 1);
+ assert_eq!(cell.view_bits::<LocalBits>()[.. 3], bits![1, 0, 1]);
+}
+
+#[test]
+fn make_elem() {
+ let uint: u8 = __make_elem!(u8 as u8, Lsb0; 1, 0, 1, 0, 1, 1, 0, 0);
+ assert_eq!(uint, 53);
+
+ let cell: Cell<u8> =
+ __make_elem!(Cell<u8> as u8, Lsb0; 1, 0, 1, 0, 1, 1, 0, 0);
+ assert_eq!(cell.get(), 53);
+
+ let uint: u16 = __make_elem!(u16 as u16, Msb0;
+ 0, 1, 0, 0, 1, 0, 0, 0,
+ 0, 1, 1, 0, 1, 0, 0, 1
+ );
+ assert_eq!(uint, 0x4869);
+
+ let cell: Cell<u16> = __make_elem!(Cell<u16> as u16, Msb0;
+ 0, 1, 0, 0, 1, 0, 0, 0,
+ 0, 1, 1, 0, 1, 0, 0, 1
+ );
+ assert_eq!(cell.get(), 0x4869);
+
+ let uint: u32 = __make_elem!(u32 as u32, LocalBits; 1, 0, 1);
+ assert_eq!(uint.view_bits::<LocalBits>()[.. 3], bits![1, 0, 1]);
+
+ let cell: Cell<u32> = __make_elem!(Cell<u32> as u32, LocalBits; 1, 0, 1);
+ assert_eq!(cell.view_bits::<LocalBits>()[.. 3], bits![1, 0, 1]);
+
+ /* `__make_elem!` is only invoked after `$ord` has already been made
+ * opaque to matchers as a single `:tt`. Invoking it directly with a path
+ * will fail the `:tt`, so this macro wraps it as one and forwards the
+ * rest.
+ */
+ macro_rules! invoke_make_elem {
+ (Cell<$typ:ident> as $sto:ident, $ord:path; $($rest:tt)*) => {
+ __make_elem!(Cell<$typ> as $sto, $ord; $($rest)*)
+ };
+ ($typ:ident as $sto:ident, $ord:path; $($rest:tt)*) => {
+ __make_elem!($typ as $sto, $ord; $($rest)*)
+ };
+ }
+
+ let uint: usize =
+ invoke_make_elem!(usize as usize, crate::order::Lsb0; 0, 0, 1, 1);
+ assert_eq!(uint, 12);
+
+ let cell: Cell<usize> =
+ invoke_make_elem!(Cell<usize> as usize, crate::order::Lsb0; 0, 0, 1, 1);
+ assert_eq!(cell.get(), 12);
+}
diff --git a/src/mem.rs b/src/mem.rs
new file mode 100644
index 0000000..b10f9fc
--- /dev/null
+++ b/src/mem.rs
@@ -0,0 +1,159 @@
+#![doc = include_str!("../doc/mem.md")]
+
+use core::{
+ cell::Cell,
+ mem,
+};
+
+use funty::Unsigned;
+use radium::marker::BitOps;
+
+#[doc = include_str!("../doc/mem/BitRegister.md")]
+pub trait BitRegister: Unsigned + BitOps {
+ /// The number of bits required to store an index in the range `0 .. BITS`.
+ const INDX: u8 = bits_of::<Self>().trailing_zeros() as u8;
+ /// A mask over all bits that can be used as an index within the element.
+ /// This is the value with the least significant `INDX`-many bits set high.
+ const MASK: u8 = bits_of::<Self>() as u8 - 1;
+ /// The literal `!0`.
+ const ALL: Self;
+}
+
+/// Marks certain fundamentals as processor registers.
+macro_rules! register {
+ ($($t:ty),+ $(,)?) => { $(
+ impl BitRegister for $t {
+ const ALL: Self = !0;
+ }
+ )+ };
+}
+
+register!(u8, u16, u32);
+
+/** `u64` can only be used as a register on processors whose word size is at
+least 64 bits.
+
+This implementation is not present on targets with 32-bit processor words.
+**/
+#[cfg(target_pointer_width = "64")]
+impl BitRegister for u64 {
+ const ALL: Self = !0;
+}
+
+register!(usize);
+
+/// Counts the number of bits in a value of type `T`.
+pub const fn bits_of<T>() -> usize {
+ core::mem::size_of::<T>().saturating_mul(<u8>::BITS as usize)
+}
+
+#[doc = include_str!("../doc/mem/elts.md")]
+pub const fn elts<T>(bits: usize) -> usize {
+ let width = bits_of::<T>();
+ if width == 0 {
+ return 0;
+ }
+ bits / width + (bits % width != 0) as usize
+}
+
+/// Tests if a type has alignment equal to its size.
+#[doc(hidden)]
+#[cfg(not(tarpaulin_include))]
+pub const fn aligned_to_size<T>() -> bool {
+ mem::align_of::<T>() == mem::size_of::<T>()
+}
+
+/// Tests if two types have identical layouts (size and alignment are equal).
+#[doc(hidden)]
+#[cfg(not(tarpaulin_include))]
+pub const fn layout_eq<T, U>() -> bool {
+ mem::align_of::<T>() == mem::align_of::<U>()
+ && mem::size_of::<T>() == mem::size_of::<U>()
+}
+
+#[doc(hidden)]
+#[repr(transparent)]
+#[doc = include_str!("../doc/mem/BitElement.md")]
+#[derive(Clone, Copy, Debug, Default, Eq, Hash, Ord, PartialEq, PartialOrd)]
+pub struct BitElement<T = usize> {
+ pub elem: T,
+}
+
+/// Creates a `BitElement` implementation for an integer and its atomic/cell
+/// variants.
+macro_rules! element {
+ ($($size:tt, $bare:ty => $atom:ident);+ $(;)?) => { $(
+ impl BitElement<$bare> {
+ /// Creates a new element wrapper from a raw integer.
+ pub const fn new(elem: $bare) -> Self {
+ Self {
+ elem,
+ }
+ }
+ }
+
+ impl BitElement<Cell<$bare>> {
+ /// Creates a new element wrapper from a raw integer.
+ pub const fn new(elem: $bare) -> Self {
+ Self {
+ elem: Cell::new(elem),
+ }
+ }
+ }
+
+ radium::if_atomic!( if atomic($size) {
+ use core::sync::atomic::$atom;
+ impl BitElement<$atom> {
+ /// Creates a new element wrapper from a raw integer.
+ pub const fn new(elem: $bare) -> Self {
+ Self {
+ elem: <$atom>::new(elem),
+ }
+ }
+ }
+ });
+ )+ };
+}
+
+element! {
+ 8, u8 => AtomicU8;
+ 16, u16 => AtomicU16;
+ 32, u32 => AtomicU32;
+}
+
+#[cfg(target_pointer_width = "64")]
+element!(64, u64 => AtomicU64);
+
+element!(size, usize => AtomicUsize);
+
+#[cfg(test)]
+mod tests {
+ use super::*;
+ use crate::access::*;
+
+ #[test]
+ fn integer_properties() {
+ assert!(aligned_to_size::<u8>());
+ assert!(aligned_to_size::<BitSafeU8>());
+ assert!(layout_eq::<u8, BitSafeU8>());
+
+ assert!(aligned_to_size::<u16>());
+ assert!(aligned_to_size::<BitSafeU16>());
+ assert!(layout_eq::<u16, BitSafeU16>());
+
+ assert!(aligned_to_size::<u32>());
+ assert!(aligned_to_size::<BitSafeU32>());
+ assert!(layout_eq::<u32, BitSafeU32>());
+
+ assert!(aligned_to_size::<usize>());
+ assert!(aligned_to_size::<BitSafeUsize>());
+ assert!(layout_eq::<usize, BitSafeUsize>());
+
+ #[cfg(target_pointer_width = "64")]
+ {
+ assert!(aligned_to_size::<u64>());
+ assert!(aligned_to_size::<BitSafeU64>());
+ assert!(layout_eq::<u64, BitSafeU64>());
+ }
+ }
+}
diff --git a/src/order.rs b/src/order.rs
new file mode 100644
index 0000000..b70b916
--- /dev/null
+++ b/src/order.rs
@@ -0,0 +1,531 @@
+#![doc = include_str!("../doc/order.md")]
+
+use crate::{
+ index::{
+ BitEnd,
+ BitIdx,
+ BitMask,
+ BitPos,
+ BitSel,
+ },
+ mem::{
+ bits_of,
+ BitRegister,
+ },
+};
+
+#[doc = include_str!("../doc/order/BitOrder.md")]
+pub unsafe trait BitOrder: 'static {
+ /// Translates a semantic bit index into a real bit position.
+ ///
+ /// This function is the basis of the trait, and must adhere to a number of
+ /// requirements in order for an implementation to be correct.
+ ///
+ /// ## Type Parameters
+ ///
+ /// - `R`: The memory element type that the index and position govern.
+ ///
+ /// ## Parameters
+ ///
+ /// - `index`: A semantic bit-index within some `R` element.
+ ///
+ /// ## Returns
+ ///
+ /// The real position of the indexed bit within an `R` element. See the
+ /// `BitPos` documentation for what these positions are considered to mean.
+ ///
+ /// ## Requirements
+ ///
+ /// This function must satisfy the following requirements for all possible
+ /// input and output values, for all possible `R` type parameters:
+ ///
+ /// - Totality: The implementation must be able to accept every input in
+ /// [`BitIdx::<R>::range_all()`], and produce some `BitPos` value for
+ /// each.
+ /// - Bijection: There must be an exactly one-to-one correspondence between
+ /// input and output values. No input index may choose its output from a
+ /// set of more than one position, and no output position may be produced
+ /// by more than one input index.
+ /// - Purity: The translation from index to position must be consistent for
+ /// the lifetime of *at least* all data structures in the program. This
+ /// function *may* refer to global state, but that state **must** be
+ /// immutable while any `bitvec` data structures exist, and must not be
+ /// used to violate the totality or bijection requirements.
+ /// - Validity: The produced `BitPos` value must be within the valid range
+ /// of its type. This is enforced by [`BitPos::new`], but not by the
+ /// unsafe constructor [`BitPos::new_unchecked`].
+ ///
+ /// [`BitIdx::<R>::range_all()`]: crate::index::BitIdx::range_all
+ /// [`BitPos::new`]: crate::index::BitPos::new
+ /// [`BitPos::new_unchecked`]: crate::index::BitPos::new_unchecked
+ fn at<R>(index: BitIdx<R>) -> BitPos<R>
+ where R: BitRegister;
+
+ /// Produces a single-bit selection mask from a bit-index.
+ ///
+ /// This is an optional function: it is implemented as, and must always be
+ /// exactly identical to, `BitOrder::at(index).select()`. If your ordering
+ /// has a faster implementation, you may provide it, but it must be exactly
+ /// numerically equivalent.
+ #[inline]
+ fn select<R>(index: BitIdx<R>) -> BitSel<R>
+ where R: BitRegister {
+ Self::at::<R>(index).select()
+ }
+
+ /// Produces a multi-bit selection mask from a range of bit-indices.
+ ///
+ /// This is an optional function: it is implemented as, and must always be
+ /// exactly identical to,
+ /// `BitIdx::range(from, upto).map(BitOrder::select).sum()`. If your
+ /// ordering has a faster implementation, you may provide it, but it must be
+ /// exactly numerically equivalent.
+ ///
+ /// ## Parameters
+ ///
+ /// - `from`: The inclusive starting value of the indices being selected.
+ /// Defaults to [`BitIdx::MIN`].
+ /// - `upto`: The exclusive ending value of the indices being selected.
+ /// Defaults to [`BitEnd::MAX`].
+ ///
+ /// ## Returns
+ ///
+ /// A selection mask with all bit-positions corresponding to `from .. upto`
+ /// selected.
+ ///
+ /// [`BitEnd::MAX`]: crate::index::BitEnd::MAX
+ /// [`BitIdx::MIN`]: crate::index::BitIdx::MIN
+ #[inline]
+ fn mask<R>(
+ from: impl Into<Option<BitIdx<R>>>,
+ upto: impl Into<Option<BitEnd<R>>>,
+ ) -> BitMask<R>
+ where
+ R: BitRegister,
+ {
+ let (from, upto) = match (from.into(), upto.into()) {
+ (None, None) => return BitMask::ALL,
+ (Some(from), None) => (from, BitEnd::MAX),
+ (None, Some(upto)) => (BitIdx::MIN, upto),
+ (Some(from), Some(upto)) => (from, upto),
+ };
+ from.range(upto).map(Self::select::<R>).sum()
+ }
+}
+
+#[doc = include_str!("../doc/order/Lsb0.md")]
+#[derive(Clone, Copy, Debug, Default, Eq, Hash, Ord, PartialEq, PartialOrd)]
+pub struct Lsb0;
+
+#[doc = include_str!("../doc/order/Msb0.md")]
+#[derive(Clone, Copy, Debug, Default, Eq, Hash, Ord, PartialEq, PartialOrd)]
+pub struct Msb0;
+
+unsafe impl BitOrder for Lsb0 {
+ #[inline]
+ fn at<R>(index: BitIdx<R>) -> BitPos<R>
+ where R: BitRegister {
+ unsafe { BitPos::new_unchecked(index.into_inner()) }
+ }
+
+ #[inline]
+ fn select<R>(index: BitIdx<R>) -> BitSel<R>
+ where R: BitRegister {
+ unsafe { BitSel::new_unchecked(R::ONE << index.into_inner()) }
+ }
+
+ #[inline]
+ fn mask<R>(
+ from: impl Into<Option<BitIdx<R>>>,
+ upto: impl Into<Option<BitEnd<R>>>,
+ ) -> BitMask<R>
+ where
+ R: BitRegister,
+ {
+ let from = from.into().unwrap_or(BitIdx::MIN).into_inner();
+ let upto = upto.into().unwrap_or(BitEnd::MAX).into_inner();
+ debug_assert!(
+ from <= upto,
+ "Ranges must run from low index ({}) to high ({})",
+ from,
+ upto,
+ );
+ let ct = upto - from;
+ if ct == bits_of::<R>() as u8 {
+ return BitMask::ALL;
+ }
+ /* This expression does the following work:
+ * 1. Set all bits in the mask to `1`.
+ * 2. Shift left by the number of bits in the mask. The mask bits are
+ * now at LSedge and `0`.
+ * 3. Invert the mask. The mask bits are now at LSedge and `1`; all
+ * else are `0`.
+ * 4. Shift left by the `from` distance from LSedge. The mask bits now
+ * begin at `from` left of LSedge and extend to `upto` left of
+ * LSedge.
+ */
+ BitMask::new(!(R::ALL << ct) << from)
+ }
+}
+
+unsafe impl BitOrder for Msb0 {
+ #[inline]
+ fn at<R>(index: BitIdx<R>) -> BitPos<R>
+ where R: BitRegister {
+ unsafe { BitPos::new_unchecked(R::MASK - index.into_inner()) }
+ }
+
+ #[inline]
+ fn select<R>(index: BitIdx<R>) -> BitSel<R>
+ where R: BitRegister {
+ /* Shift the MSbit down by the index count. This is not equivalent to
+ * the expression `1 << (mask - index)`, because that is required to
+ * perform a runtime subtraction before the shift, while this produces
+ * a constant that is shifted.
+ */
+ let msbit: R = R::ONE << R::MASK;
+ unsafe { BitSel::new_unchecked(msbit >> index.into_inner()) }
+ }
+
+ #[inline]
+ fn mask<R>(
+ from: impl Into<Option<BitIdx<R>>>,
+ upto: impl Into<Option<BitEnd<R>>>,
+ ) -> BitMask<R>
+ where
+ R: BitRegister,
+ {
+ let from = from.into().unwrap_or(BitIdx::MIN).into_inner();
+ let upto = upto.into().unwrap_or(BitEnd::MAX).into_inner();
+ debug_assert!(
+ from <= upto,
+ "ranges must run from low index ({}) to high ({})",
+ from,
+ upto,
+ );
+ let ct = upto - from;
+ if ct == bits_of::<R>() as u8 {
+ return BitMask::ALL;
+ }
+ /* This expression does the following work:
+ * 1. Set all bits in the mask to `1`.
+ * 2. Shift right by the number of bits in the mask. The mask bits are
+ * now at MSedge and `0`.
+ * 3. Invert the mask. The mask bits are now at MSedge and `1`; all
+ * else are `0`.
+ * 4. Shift right by the `from` distance from MSedge. The mask bits
+ * now begin at `from` right of MSedge and extend to `upto` right
+ * of MSedge.
+ */
+ BitMask::new(!(R::ALL >> ct) >> from)
+ }
+}
+
+#[cfg(target_endian = "little")]
+#[doc = include_str!("../doc/order/LocalBits.md")]
+pub use self::Lsb0 as LocalBits;
+#[cfg(target_endian = "big")]
+#[doc = include_str!("../doc/order/LocalBits.md")]
+pub use self::Msb0 as LocalBits;
+
+#[cfg(not(any(target_endian = "big", target_endian = "little")))]
+compile_fail!(
+ "This architecture is not supported! Please consider filing an issue"
+);
+
+#[inline]
+#[cfg(not(tarpaulin_include))]
+#[doc = include_str!("../doc/order/verify.md")]
+pub fn verify<O>(verbose: bool)
+where O: BitOrder {
+ verify_for_type::<u8, O>(verbose);
+ verify_for_type::<u16, O>(verbose);
+ verify_for_type::<u32, O>(verbose);
+ verify_for_type::<usize, O>(verbose);
+
+ #[cfg(target_pointer_width = "64")]
+ verify_for_type::<u64, O>(verbose);
+}
+
+/// Verification does not access memory, and is both useless and slow in Miri.
+#[cfg(miri)]
+pub fn verify_for_type<R, O>(_: bool)
+where
+ R: BitRegister,
+ O: BitOrder,
+{
+}
+
+#[cfg(not(miri))]
+#[doc = include_str!("../doc/order/verify_for_type.md")]
+pub fn verify_for_type<R, O>(verbose: bool)
+where
+ R: BitRegister,
+ O: BitOrder,
+{
+ use core::any::type_name;
+ let mut accum = BitMask::<R>::ZERO;
+
+ let ord_name = type_name::<O>();
+ let reg_name = type_name::<R>();
+
+ for n in 0 .. bits_of::<R>() as u8 {
+ // Wrap the counter as an index.
+ let idx = unsafe { BitIdx::<R>::new_unchecked(n) };
+
+ // Compute the bit position for the index.
+ let pos = O::at::<R>(idx);
+ if verbose {
+ #[cfg(feature = "std")]
+ println!(
+ "`<{} as BitOrder>::at::<{}>({})` produces {}",
+ ord_name,
+ reg_name,
+ n,
+ pos.into_inner(),
+ );
+ }
+
+ // If the computed position exceeds the valid range, fail.
+ assert!(
+ pos.into_inner() < bits_of::<R>() as u8,
+ "Error when verifying the implementation of `BitOrder` for `{}`: \
+ Index {} produces a bit position ({}) that exceeds the type width \
+ {}",
+ ord_name,
+ n,
+ pos.into_inner(),
+ bits_of::<R>(),
+ );
+
+ // Check `O`’s implementation of `select`
+ let sel = O::select::<R>(idx);
+ if verbose {
+ #[cfg(feature = "std")]
+ println!(
+ "`<{} as BitOrder>::select::<{}>({})` produces {:b}",
+ ord_name, reg_name, n, sel,
+ );
+ }
+
+ // If the selector bit is not one-hot, fail.
+ assert_eq!(
+ sel.into_inner().count_ones(),
+ 1,
+ "Error when verifying the implementation of `BitOrder` for `{}`: \
+ Index {} produces a bit selector ({:b}) that is not a one-hot mask",
+ ord_name,
+ n,
+ sel,
+ );
+
+ // Check that the selection computed from the index matches the
+ // selection computed from the position.
+ let shl = pos.select();
+ // If `O::select(idx)` does not produce `1 << pos`, fail.
+ assert_eq!(
+ sel,
+ shl,
+ "Error when verifying the implementation of `BitOrder` for `{}`: \
+ Index {} produces a bit selector ({:b}) that is not equal to `1 \
+ << {}` ({:b})",
+ ord_name,
+ n,
+ sel,
+ pos.into_inner(),
+ shl,
+ );
+
+ // Check that the produced selector bit has not already been added to
+ // the accumulator.
+ assert!(
+ !accum.test(sel),
+ "Error when verifying the implementation of `BitOrder` for `{}`: \
+ Index {} produces a bit position ({}) that has already been \
+ produced by a prior index",
+ ord_name,
+ n,
+ pos.into_inner(),
+ );
+ accum.insert(sel);
+ if verbose {
+ #[cfg(feature = "std")]
+ println!(
+ "`<{} as BitOrder>::at::<{}>({})` accumulates {:b}",
+ ord_name, reg_name, n, accum,
+ );
+ }
+ }
+
+ // Check that all indices produced all positions.
+ assert_eq!(
+ accum,
+ BitMask::ALL,
+ "Error when verifying the implementation of `BitOrder` for `{}`: The \
+ bit positions marked with a `0` here were never produced from an \
+ index, despite all possible indices being passed in for translation: \
+ {:b}",
+ ord_name,
+ accum,
+ );
+
+ // Check that `O::mask` is correct for all range combinations.
+ for from in BitIdx::<R>::range_all() {
+ for upto in BitEnd::<R>::range_from(from) {
+ let mask = O::mask(from, upto);
+ let check = from
+ .range(upto)
+ .map(O::at)
+ .map(BitPos::select)
+ .sum::<BitMask<R>>();
+ assert_eq!(
+ mask,
+ check,
+ "Error when verifying the implementation of `BitOrder` for \
+ `{o}`: `{o}::mask::<{m}>({f}, {u})` produced {bad:b}, but \
+ expected {good:b}",
+ o = ord_name,
+ m = reg_name,
+ f = from,
+ u = upto,
+ bad = mask,
+ good = check,
+ );
+ }
+ }
+}
+
+/// An ordering that does not provide a contiguous index map or `BitField`
+/// acceleration.
+#[cfg(test)]
+pub struct HiLo;
+
+#[cfg(test)]
+unsafe impl BitOrder for HiLo {
+ fn at<R>(index: BitIdx<R>) -> BitPos<R>
+ where R: BitRegister {
+ BitPos::new(index.into_inner() ^ 4).unwrap()
+ }
+}
+
+#[cfg(test)]
+mod tests {
+ use super::*;
+
+ #[test]
+ fn default_impl() {
+ assert_eq!(Lsb0::mask(None, None), BitMask::<u8>::ALL);
+ assert_eq!(Msb0::mask(None, None), BitMask::<u8>::ALL);
+ assert_eq!(HiLo::mask(None, None), BitMask::<u8>::ALL);
+
+ assert_eq!(
+ HiLo::mask(None, BitEnd::<u8>::new(3).unwrap()),
+ BitMask::new(0b0111_0000),
+ );
+ assert_eq!(
+ HiLo::mask(BitIdx::<u8>::new(3).unwrap(), None),
+ BitMask::new(0b1000_1111),
+ );
+ }
+
+ // Split these out into individual test functions so they can parallelize.
+
+ mod lsb0 {
+ use super::*;
+
+ #[test]
+ fn verify_u8() {
+ verify_for_type::<u8, Lsb0>(cfg!(feature = "verbose"));
+ }
+
+ #[test]
+ #[cfg(not(tarpaulin))]
+ fn verify_u16() {
+ verify_for_type::<u16, Lsb0>(cfg!(feature = "verbose"));
+ }
+
+ #[test]
+ #[cfg(not(tarpaulin))]
+ fn verify_u32() {
+ verify_for_type::<u32, Lsb0>(cfg!(feature = "verbose"));
+ }
+
+ #[test]
+ #[cfg(all(target_pointer_width = "64", not(tarpaulin)))]
+ fn verify_u64() {
+ verify_for_type::<u64, Lsb0>(cfg!(feature = "verbose"));
+ }
+
+ #[test]
+ #[cfg(not(tarpaulin))]
+ fn verify_usize() {
+ verify_for_type::<usize, Lsb0>(cfg!(feature = "verbose"));
+ }
+ }
+
+ mod msb0 {
+ use super::*;
+
+ #[test]
+ fn verify_u8() {
+ verify_for_type::<u8, Msb0>(cfg!(feature = "verbose"));
+ }
+
+ #[test]
+ #[cfg(not(tarpaulin))]
+ fn verify_u16() {
+ verify_for_type::<u16, Msb0>(cfg!(feature = "verbose"));
+ }
+
+ #[test]
+ #[cfg(not(tarpaulin))]
+ fn verify_u32() {
+ verify_for_type::<u32, Msb0>(cfg!(feature = "verbose"));
+ }
+
+ #[test]
+ #[cfg(all(target_pointer_width = "64", not(tarpaulin)))]
+ fn verify_u64() {
+ verify_for_type::<u64, Msb0>(cfg!(feature = "verbose"));
+ }
+
+ #[test]
+ #[cfg(not(tarpaulin))]
+ fn verify_usize() {
+ verify_for_type::<usize, Msb0>(cfg!(feature = "verbose"));
+ }
+ }
+
+ mod hilo {
+ use super::*;
+
+ #[test]
+ fn verify_u8() {
+ verify_for_type::<u8, HiLo>(cfg!(feature = "verbose"));
+ }
+
+ #[test]
+ #[cfg(not(tarpaulin))]
+ fn verify_u16() {
+ verify_for_type::<u16, HiLo>(cfg!(feature = "verbose"));
+ }
+
+ #[test]
+ #[cfg(not(tarpaulin))]
+ fn verify_u32() {
+ verify_for_type::<u32, HiLo>(cfg!(feature = "verbose"));
+ }
+
+ #[test]
+ #[cfg(all(target_pointer_width = "64", not(tarpaulin)))]
+ fn verify_u64() {
+ verify_for_type::<u64, HiLo>(cfg!(feature = "verbose"));
+ }
+
+ #[test]
+ #[cfg(not(tarpaulin))]
+ fn verify_usize() {
+ verify_for_type::<usize, HiLo>(cfg!(feature = "verbose"));
+ }
+ }
+}
diff --git a/src/ptr.rs b/src/ptr.rs
new file mode 100644
index 0000000..8cc4ec1
--- /dev/null
+++ b/src/ptr.rs
@@ -0,0 +1,349 @@
+#![doc = include_str!("../doc/ptr.md")]
+
+use core::hash::{
+ Hash,
+ Hasher,
+};
+
+use wyz::bidi::BidiIterator;
+
+use crate::{
+ devel as dvl,
+ order::BitOrder,
+ slice::BitSlice,
+ store::BitStore,
+};
+
+mod addr;
+mod proxy;
+mod range;
+mod single;
+mod span;
+mod tests;
+
+pub use wyz::comu::{
+ Const,
+ Mut,
+ Mutability,
+};
+
+pub(crate) use self::{
+ addr::AddressExt,
+ span::BitSpan,
+};
+pub use self::{
+ addr::{
+ check_alignment,
+ MisalignError,
+ },
+ proxy::BitRef,
+ range::BitPtrRange,
+ single::{
+ BitPtr,
+ BitPtrError,
+ },
+ span::BitSpanError,
+};
+
+#[inline]
+#[doc = include_str!("../doc/ptr/copy.md")]
+pub unsafe fn copy<T1, T2, O1, O2>(
+ src: BitPtr<Const, T1, O1>,
+ dst: BitPtr<Mut, T2, O2>,
+ count: usize,
+) where
+ O1: BitOrder,
+ O2: BitOrder,
+ T1: BitStore,
+ T2: BitStore,
+{
+ // Overlap is only defined if the orderings are identical.
+ if dvl::match_order::<O1, O2>() {
+ let (addr, head) = dst.raw_parts();
+ let dst = BitPtr::<Mut, T2, O1>::new_unchecked(addr, head);
+ let src_pair = src.range(count);
+
+ let rev = src_pair.contains(&dst);
+ for (from, to) in src_pair.zip(dst.range(count)).bidi(rev) {
+ to.write(from.read());
+ }
+ }
+ else {
+ copy_nonoverlapping(src, dst, count);
+ }
+}
+
+#[inline]
+#[doc = include_str!("../doc/ptr/copy_nonoverlapping.md")]
+pub unsafe fn copy_nonoverlapping<T1, T2, O1, O2>(
+ src: BitPtr<Const, T1, O1>,
+ dst: BitPtr<Mut, T2, O2>,
+ count: usize,
+) where
+ O1: BitOrder,
+ O2: BitOrder,
+ T1: BitStore,
+ T2: BitStore,
+{
+ for (from, to) in src.range(count).zip(dst.range(count)) {
+ to.write(from.read());
+ }
+}
+
+#[inline]
+#[doc = include_str!("../doc/ptr/drop_in_place.md")]
+#[deprecated = "this has no effect, and should not be called"]
+pub unsafe fn drop_in_place<T, O>(_: BitPtr<Mut, T, O>)
+where
+ T: BitStore,
+ O: BitOrder,
+{
+}
+
+#[doc = include_str!("../doc/ptr/eq.md")]
+#[inline]
+pub fn eq<T1, T2, O>(
+ this: BitPtr<Const, T1, O>,
+ that: BitPtr<Const, T2, O>,
+) -> bool
+where
+ T1: BitStore,
+ T2: BitStore,
+ O: BitOrder,
+{
+ this == that
+}
+
+#[inline]
+#[cfg(not(tarpaulin_include))]
+#[doc = include_str!("../doc/ptr/hash.md")]
+pub fn hash<T, O, S>(ptr: BitPtr<Const, T, O>, into: &mut S)
+where
+ T: BitStore,
+ O: BitOrder,
+ S: Hasher,
+{
+ ptr.hash(into);
+}
+
+#[inline]
+#[cfg(not(tarpaulin_include))]
+#[doc = include_str!("../doc/ptr/null.md")]
+pub fn null<T, O>() -> BitPtr<Const, T, O>
+where
+ T: BitStore,
+ O: BitOrder,
+{
+ BitPtr::DANGLING
+}
+
+#[inline]
+#[cfg(not(tarpaulin_include))]
+#[doc = include_str!("../doc/ptr/null_mut.md")]
+pub fn null_mut<T, O>() -> BitPtr<Mut, T, O>
+where
+ T: BitStore,
+ O: BitOrder,
+{
+ BitPtr::DANGLING
+}
+
+#[inline]
+#[cfg(not(tarpaulin_include))]
+#[doc = include_str!("../doc/ptr/read.md")]
+pub unsafe fn read<T, O>(src: BitPtr<Const, T, O>) -> bool
+where
+ T: BitStore,
+ O: BitOrder,
+{
+ src.read()
+}
+
+#[inline]
+#[allow(deprecated)]
+#[cfg(not(tarpaulin_include))]
+#[doc = include_str!("../doc/ptr/read_unaligned.md")]
+#[deprecated = "`BitPtr` does not have unaligned addresses"]
+pub unsafe fn read_unaligned<T, O>(src: BitPtr<Const, T, O>) -> bool
+where
+ T: BitStore,
+ O: BitOrder,
+{
+ src.read_unaligned()
+}
+
+#[inline]
+#[cfg(not(tarpaulin_include))]
+#[doc = include_str!("../doc/ptr/read_volatile.md")]
+pub unsafe fn read_volatile<T, O>(src: BitPtr<Const, T, O>) -> bool
+where
+ T: BitStore,
+ O: BitOrder,
+{
+ src.read_volatile()
+}
+
+#[inline]
+#[cfg(not(tarpaulin_include))]
+#[doc = include_str!("../doc/ptr/replace.md")]
+pub unsafe fn replace<T, O>(dst: BitPtr<Mut, T, O>, src: bool) -> bool
+where
+ T: BitStore,
+ O: BitOrder,
+{
+ dst.replace(src)
+}
+
+#[inline]
+#[cfg(not(tarpaulin_include))]
+#[doc = include_str!("../doc/ptr/slice_from_raw_parts.md")]
+pub fn slice_from_raw_parts<T, O>(
+ ptr: BitPtr<Const, T, O>,
+ len: usize,
+) -> *const BitSlice<T, O>
+where
+ T: BitStore,
+ O: BitOrder,
+{
+ bitslice_from_raw_parts(ptr, len)
+}
+
+#[inline]
+#[cfg(not(tarpaulin_include))]
+#[doc = include_str!("../doc/ptr/slice_from_raw_parts_mut.md")]
+pub fn slice_from_raw_parts_mut<T, O>(
+ ptr: BitPtr<Mut, T, O>,
+ len: usize,
+) -> *mut BitSlice<T, O>
+where
+ T: BitStore,
+ O: BitOrder,
+{
+ bitslice_from_raw_parts_mut(ptr, len)
+}
+
+#[inline]
+#[doc = include_str!("../doc/ptr/swap.md")]
+pub unsafe fn swap<T1, T2, O1, O2>(
+ one: BitPtr<Mut, T1, O1>,
+ two: BitPtr<Mut, T2, O2>,
+) where
+ T1: BitStore,
+ T2: BitStore,
+ O1: BitOrder,
+ O2: BitOrder,
+{
+ one.write(two.replace(one.read()));
+}
+
+#[inline]
+#[doc = include_str!("../doc/ptr/swap_nonoverlapping.md")]
+pub unsafe fn swap_nonoverlapping<T1, T2, O1, O2>(
+ mut one: BitPtr<Mut, T1, O1>,
+ mut two: BitPtr<Mut, T2, O2>,
+ count: usize,
+) where
+ O1: BitOrder,
+ O2: BitOrder,
+ T1: BitStore,
+ T2: BitStore,
+{
+ // Note: compare codegen with `one.range(count).zip(two.range(count))`.
+ for _ in 0 .. count {
+ swap(one, two);
+ one = one.add(1);
+ two = two.add(1);
+ }
+}
+
+#[inline]
+#[cfg(not(tarpaulin_include))]
+#[doc = include_str!("../doc/ptr/write.md")]
+pub unsafe fn write<T, O>(dst: BitPtr<Mut, T, O>, value: bool)
+where
+ T: BitStore,
+ O: BitOrder,
+{
+ dst.write(value);
+}
+
+#[inline]
+#[cfg(not(tarpaulin_include))]
+#[deprecated = "use `write_bits()` instead"]
+#[doc = include_str!("../doc/ptr/write_bytes.md")]
+pub unsafe fn write_bytes<T, O>(
+ dst: BitPtr<Mut, T, O>,
+ value: bool,
+ count: usize,
+) where
+ T: BitStore,
+ O: BitOrder,
+{
+ write_bits(dst, value, count)
+}
+
+#[inline]
+#[allow(deprecated)]
+#[cfg(not(tarpaulin_include))]
+#[doc = include_str!("../doc/ptr/write_unaligned.md")]
+#[deprecated = "`BitPtr` does not have unaligned addresses"]
+pub unsafe fn write_unaligned<T, O>(dst: BitPtr<Mut, T, O>, value: bool)
+where
+ T: BitStore,
+ O: BitOrder,
+{
+ dst.write_unaligned(value);
+}
+
+#[inline]
+#[cfg(not(tarpaulin_include))]
+#[doc = include_str!("../doc/ptr/write_volatile.md")]
+pub unsafe fn write_volatile<T, O>(dst: BitPtr<Mut, T, O>, value: bool)
+where
+ T: BitStore,
+ O: BitOrder,
+{
+ dst.write_volatile(value);
+}
+
+// Renamed variants.
+
+#[inline]
+#[cfg(not(tarpaulin_include))]
+#[doc = include_str!("../doc/ptr/bitslice_from_raw_parts.md")]
+pub fn bitslice_from_raw_parts<T, O>(
+ ptr: BitPtr<Const, T, O>,
+ len: usize,
+) -> *const BitSlice<T, O>
+where
+ T: BitStore,
+ O: BitOrder,
+{
+ ptr.span(len).unwrap().into_bitslice_ptr()
+}
+
+#[inline]
+#[cfg(not(tarpaulin_include))]
+#[doc = include_str!("../doc/ptr/bitslice_from_raw_parts_mut.md")]
+pub fn bitslice_from_raw_parts_mut<T, O>(
+ ptr: BitPtr<Mut, T, O>,
+ len: usize,
+) -> *mut BitSlice<T, O>
+where
+ T: BitStore,
+ O: BitOrder,
+{
+ ptr.span(len).unwrap().into_bitslice_ptr_mut()
+}
+
+#[inline]
+#[doc = include_str!("../doc/ptr/write_bits.md")]
+pub unsafe fn write_bits<T, O>(dst: BitPtr<Mut, T, O>, value: bool, count: usize)
+where
+ T: BitStore,
+ O: BitOrder,
+{
+ for bit in dst.range(count) {
+ bit.write(value);
+ }
+}
diff --git a/src/ptr/addr.rs b/src/ptr/addr.rs
new file mode 100644
index 0000000..90b1e92
--- /dev/null
+++ b/src/ptr/addr.rs
@@ -0,0 +1,170 @@
+#![doc = include_str!("../../doc/ptr/addr.md")]
+
+use core::{
+ any,
+ fmt::{
+ self,
+ Debug,
+ Display,
+ Formatter,
+ Pointer,
+ },
+ mem,
+ ptr::NonNull,
+};
+
+use tap::{
+ Pipe,
+ TryConv,
+};
+use wyz::{
+ comu::{
+ Address,
+ Const,
+ Mut,
+ Mutability,
+ },
+ fmt::FmtForward,
+};
+
+/// Ensures that an address is well-aligned to its referent type width.
+#[inline]
+pub fn check_alignment<M, T>(
+ addr: Address<M, T>,
+) -> Result<Address<M, T>, MisalignError<T>>
+where M: Mutability {
+ let ptr = addr.to_const();
+ let mask = mem::align_of::<T>() - 1;
+ if ptr as usize & mask != 0 {
+ Err(MisalignError { ptr })
+ }
+ else {
+ Ok(addr)
+ }
+}
+
+/// Extension methods for raw pointers.
+pub(crate) trait AddressExt {
+ /// Tracks the original mutation capability of the source pointer.
+ type Permission: Mutability;
+ /// The type to which the pointer points.
+ type Referent: Sized;
+
+ /// Forcibly wraps a raw pointer as an `Address`, without handling errors.
+ ///
+ /// In debug builds, this panics on null or misaligned pointers. In release
+ /// builds, it is permitted to remove the error-handling codepaths and
+ /// assume these invariants are upheld by the caller.
+ ///
+ /// ## Safety
+ ///
+ /// The caller must ensure that this is only called on non-null,
+ /// well-aligned pointers. Pointers derived from Rust references or calls to
+ /// the Rust allocator API will always satisfy this.
+ unsafe fn into_address(self) -> Address<Self::Permission, Self::Referent>;
+}
+
+#[cfg(not(tarpaulin_include))]
+impl<T> AddressExt for *const T {
+ type Permission = Const;
+ type Referent = T;
+
+ unsafe fn into_address(self) -> Address<Const, T> {
+ if cfg!(debug_assertions) {
+ self.try_conv::<Address<_, _>>()
+ .unwrap_or_else(|err| panic!("{}", err))
+ .pipe(check_alignment)
+ .unwrap_or_else(|err| panic!("{}", err))
+ }
+ else {
+ Address::new(NonNull::new_unchecked(self as *mut T))
+ }
+ }
+}
+
+#[cfg(not(tarpaulin_include))]
+impl<T> AddressExt for *mut T {
+ type Permission = Mut;
+ type Referent = T;
+
+ unsafe fn into_address(self) -> Address<Mut, T> {
+ if cfg!(debug_assertions) {
+ self.try_conv::<Address<_, _>>()
+ .unwrap_or_else(|err| panic!("{}", err))
+ .pipe(check_alignment)
+ .unwrap_or_else(|err| panic!("{}", err))
+ }
+ else {
+ Address::new(NonNull::new_unchecked(self))
+ }
+ }
+}
+
+#[cfg(not(tarpaulin_include))]
+impl<T> AddressExt for &T {
+ type Permission = Const;
+ type Referent = T;
+
+ unsafe fn into_address(self) -> Address<Self::Permission, Self::Referent> {
+ self.into()
+ }
+}
+
+#[cfg(not(tarpaulin_include))]
+impl<T> AddressExt for &mut T {
+ type Permission = Mut;
+ type Referent = T;
+
+ unsafe fn into_address(self) -> Address<Self::Permission, Self::Referent> {
+ self.into()
+ }
+}
+
+/// The error produced when an address is insufficiently aligned to the width of
+/// its type.
+#[derive(Clone, Copy, Eq, Hash, Ord, PartialEq, PartialOrd)]
+pub struct MisalignError<T> {
+ /// The misaligned pointer.
+ ptr: *const T,
+}
+
+impl<T> MisalignError<T> {
+ /// The minimum address alignment of `T` values.
+ const ALIGN: usize = mem::align_of::<T>();
+ /// The number of least-significant-bits of an address that must be `0` in
+ /// order for it to be validly aligned for `T`.
+ const CTTZ: usize = Self::ALIGN.trailing_zeros() as usize;
+}
+
+#[cfg(not(tarpaulin_include))]
+impl<T> Debug for MisalignError<T> {
+ #[inline]
+ fn fmt(&self, fmt: &mut Formatter) -> fmt::Result {
+ fmt.debug_tuple("MisalignError")
+ .field(&self.ptr.fmt_pointer())
+ .field(&Self::ALIGN)
+ .finish()
+ }
+}
+
+#[cfg(not(tarpaulin_include))]
+impl<T> Display for MisalignError<T> {
+ #[inline]
+ fn fmt(&self, fmt: &mut Formatter) -> fmt::Result {
+ write!(
+ fmt,
+ "Type {} requires {}-byte alignment: address ",
+ any::type_name::<T>(),
+ Self::ALIGN,
+ )?;
+ Pointer::fmt(&self.ptr, fmt)?;
+ write!(fmt, " must clear its least {} bits", Self::CTTZ)
+ }
+}
+
+unsafe impl<T> Send for MisalignError<T> {}
+
+unsafe impl<T> Sync for MisalignError<T> {}
+
+#[cfg(feature = "std")]
+impl<T> std::error::Error for MisalignError<T> {}
diff --git a/src/ptr/proxy.rs b/src/ptr/proxy.rs
new file mode 100644
index 0000000..fccfd9f
--- /dev/null
+++ b/src/ptr/proxy.rs
@@ -0,0 +1,475 @@
+#![doc = include_str!("../../doc/ptr/proxy.md")]
+
+use core::{
+ cell::UnsafeCell,
+ cmp,
+ fmt::{
+ self,
+ Debug,
+ Display,
+ Formatter,
+ Pointer,
+ },
+ hash::{
+ Hash,
+ Hasher,
+ },
+ marker::PhantomData,
+ mem,
+ ops::{
+ Deref,
+ DerefMut,
+ Not,
+ },
+};
+
+use wyz::comu::{
+ Const,
+ Mut,
+ Mutability,
+};
+
+use super::BitPtr;
+use crate::{
+ order::{
+ BitOrder,
+ Lsb0,
+ },
+ store::BitStore,
+};
+
+#[doc = include_str!("../../doc/ptr/BitRef.md")]
+// Restore alignment and sizing properties, as `BitPtr` lacks them.
+#[cfg_attr(target_pointer_width = "32", repr(C, align(4)))]
+#[cfg_attr(target_pointer_width = "64", repr(C, align(8)))]
+#[cfg_attr(
+ not(any(target_pointer_width = "32", target_pointer_width = "64")),
+ repr(C)
+)]
+pub struct BitRef<'a, M = Const, T = usize, O = Lsb0>
+where
+ M: Mutability,
+ T: BitStore,
+ O: BitOrder,
+{
+ /// The proxied bit-address.
+ bitptr: BitPtr<M, T, O>,
+ /// A local cache of the proxied bit that can be referenced.
+ data: bool,
+ /// Attach the lifetime and reflect the possibility of mutation.
+ _ref: PhantomData<&'a UnsafeCell<bool>>,
+}
+
+impl<M, T, O> BitRef<'_, M, T, O>
+where
+ M: Mutability,
+ T: BitStore,
+ O: BitOrder,
+{
+ /// Converts a bit-pointer into a proxy bit-reference.
+ ///
+ /// This reads through the pointer in order to cache the current bit value
+ /// in the proxy.
+ ///
+ /// ## Original
+ ///
+ /// The syntax `unsafe { &* ptr }`.
+ ///
+ /// ## Safety
+ ///
+ /// This is equivalent to (and is!) dereferencing a raw pointer. The pointer
+ /// must be well-constructed, refer to a live memory location in the program
+ /// context, and not be aliased beyond its typing indicators.
+ #[inline]
+ pub unsafe fn from_bitptr(bitptr: BitPtr<M, T, O>) -> Self {
+ let data = bitptr.read();
+ Self {
+ bitptr,
+ data,
+ _ref: PhantomData,
+ }
+ }
+
+ /// Decays the bit-reference to an ordinary bit-pointer.
+ ///
+ /// ## Original
+ ///
+ /// The syntax `&val as *T`.
+ #[inline]
+ #[cfg(not(tarpaulin_include))]
+ pub fn into_bitptr(self) -> BitPtr<M, T, O> {
+ self.bitptr
+ }
+
+ /// Removes a layer of `::Alias` marking from a bit-reference.
+ ///
+ /// ## Safety
+ ///
+ /// The caller must ensure that no element-level aliasing *by `bitvec`*
+ /// occurs in the scope for which the produced de-aliased proxy is alive.
+ #[cfg(not(tarpaulin_include))]
+ pub(crate) unsafe fn remove_alias(this: BitRef<M, T::Alias, O>) -> Self {
+ Self {
+ bitptr: this.bitptr.cast::<T>(),
+ data: this.data,
+ _ref: PhantomData,
+ }
+ }
+}
+
+impl<T, O> BitRef<'_, Mut, T, O>
+where
+ T: BitStore,
+ O: BitOrder,
+{
+ /// Moves `src` into the referenced bit, returning the previous value.
+ ///
+ /// ## Original
+ ///
+ /// [`mem::replace`](core::mem::replace)
+ #[inline]
+ pub fn replace(&mut self, src: bool) -> bool {
+ mem::replace(&mut self.data, src)
+ }
+
+ /// Swaps the bit values of two proxies.
+ ///
+ /// ## Original
+ ///
+ /// [`mem::swap`](core::mem::swap)
+ #[inline]
+ pub fn swap<T2, O2>(&mut self, other: &mut BitRef<Mut, T2, O2>)
+ where
+ T2: BitStore,
+ O2: BitOrder,
+ {
+ mem::swap(&mut self.data, &mut other.data)
+ }
+
+ /// Commits a bit into the proxied location.
+ ///
+ /// This function writes `value` directly into the proxied location,
+ /// bypassing the cache and destroying the proxy. This eliminates the second
+ /// write done in the destructor, and allows code to be slightly faster.
+ #[inline]
+ pub fn commit(self, value: bool) {
+ unsafe {
+ self.bitptr.write(value);
+ }
+ mem::forget(self);
+ }
+
+ /// Writes `value` into the proxy.
+ ///
+ /// This does not write into the proxied location; that is deferred until
+ /// the proxy destructor runs.
+ #[inline]
+ pub fn set(&mut self, value: bool) {
+ self.data = value;
+ }
+}
+
+#[cfg(not(tarpaulin_include))]
+impl<T, O> Clone for BitRef<'_, Const, T, O>
+where
+ T: BitStore,
+ O: BitOrder,
+{
+ #[inline]
+ fn clone(&self) -> Self {
+ Self { ..*self }
+ }
+}
+
+impl<M, T, O> Eq for BitRef<'_, M, T, O>
+where
+ M: Mutability,
+ T: BitStore,
+ O: BitOrder,
+{
+}
+
+#[cfg(not(tarpaulin_include))]
+impl<M, T, O> Ord for BitRef<'_, M, T, O>
+where
+ M: Mutability,
+ T: BitStore,
+ O: BitOrder,
+{
+ #[inline]
+ fn cmp(&self, other: &Self) -> cmp::Ordering {
+ self.data.cmp(&other.data)
+ }
+}
+
+#[cfg(not(tarpaulin_include))]
+impl<M1, M2, O1, O2, T1, T2> PartialEq<BitRef<'_, M2, T2, O2>>
+ for BitRef<'_, M1, T1, O1>
+where
+ M1: Mutability,
+ M2: Mutability,
+ T1: BitStore,
+ T2: BitStore,
+ O1: BitOrder,
+ O2: BitOrder,
+{
+ #[inline(always)]
+ fn eq(&self, other: &BitRef<'_, M2, T2, O2>) -> bool {
+ self.data == other.data
+ }
+}
+
+#[cfg(not(tarpaulin_include))]
+impl<M, T, O> PartialEq<bool> for BitRef<'_, M, T, O>
+where
+ M: Mutability,
+ T: BitStore,
+ O: BitOrder,
+{
+ #[inline(always)]
+ fn eq(&self, other: &bool) -> bool {
+ self.data == *other
+ }
+}
+
+#[cfg(not(tarpaulin_include))]
+impl<M, T, O> PartialEq<BitRef<'_, M, T, O>> for bool
+where
+ M: Mutability,
+ T: BitStore,
+ O: BitOrder,
+{
+ #[inline]
+ fn eq(&self, other: &BitRef<'_, M, T, O>) -> bool {
+ other == self
+ }
+}
+
+#[cfg(not(tarpaulin_include))]
+impl<M, T, O> PartialEq<&bool> for BitRef<'_, M, T, O>
+where
+ M: Mutability,
+ T: BitStore,
+ O: BitOrder,
+{
+ #[inline(always)]
+ fn eq(&self, other: &&bool) -> bool {
+ self.data == **other
+ }
+}
+
+#[cfg(not(tarpaulin_include))]
+impl<M, T, O> PartialEq<BitRef<'_, M, T, O>> for &bool
+where
+ M: Mutability,
+ T: BitStore,
+ O: BitOrder,
+{
+ #[inline]
+ fn eq(&self, other: &BitRef<'_, M, T, O>) -> bool {
+ other == *self
+ }
+}
+
+#[cfg(not(tarpaulin_include))]
+impl<M1, M2, O1, O2, T1, T2> PartialOrd<BitRef<'_, M2, T2, O2>>
+ for BitRef<'_, M1, T1, O1>
+where
+ M1: Mutability,
+ M2: Mutability,
+ T1: BitStore,
+ T2: BitStore,
+ O1: BitOrder,
+ O2: BitOrder,
+{
+ #[inline]
+ fn partial_cmp(
+ &self,
+ other: &BitRef<'_, M2, T2, O2>,
+ ) -> Option<cmp::Ordering> {
+ self.data.partial_cmp(&other.data)
+ }
+}
+
+#[cfg(not(tarpaulin_include))]
+impl<M, T, O> PartialOrd<bool> for BitRef<'_, M, T, O>
+where
+ M: Mutability,
+ T: BitStore,
+ O: BitOrder,
+{
+ #[inline]
+ fn partial_cmp(&self, other: &bool) -> Option<cmp::Ordering> {
+ self.data.partial_cmp(other)
+ }
+}
+
+#[cfg(not(tarpaulin_include))]
+impl<M, T, O> PartialOrd<&bool> for BitRef<'_, M, T, O>
+where
+ M: Mutability,
+ T: BitStore,
+ O: BitOrder,
+{
+ #[inline]
+ fn partial_cmp(&self, other: &&bool) -> Option<cmp::Ordering> {
+ self.data.partial_cmp(*other)
+ }
+}
+
+#[cfg(not(tarpaulin_include))]
+impl<M, T, O> AsRef<bool> for BitRef<'_, M, T, O>
+where
+ M: Mutability,
+ T: BitStore,
+ O: BitOrder,
+{
+ #[inline]
+ fn as_ref(&self) -> &bool {
+ &self.data
+ }
+}
+
+#[cfg(not(tarpaulin_include))]
+impl<T, O> AsMut<bool> for BitRef<'_, Mut, T, O>
+where
+ T: BitStore,
+ O: BitOrder,
+{
+ #[inline]
+ fn as_mut(&mut self) -> &mut bool {
+ &mut self.data
+ }
+}
+
+impl<M, T, O> Debug for BitRef<'_, M, T, O>
+where
+ M: Mutability,
+ T: BitStore,
+ O: BitOrder,
+{
+ #[inline]
+ fn fmt(&self, fmt: &mut Formatter) -> fmt::Result {
+ unsafe { self.bitptr.span_unchecked(1) }
+ .render(fmt, "Ref", &[("bit", &self.data as &dyn Debug)])
+ }
+}
+
+#[cfg(not(tarpaulin_include))]
+impl<M, T, O> Display for BitRef<'_, M, T, O>
+where
+ M: Mutability,
+ T: BitStore,
+ O: BitOrder,
+{
+ #[inline]
+ fn fmt(&self, fmt: &mut Formatter) -> fmt::Result {
+ Display::fmt(&self.data, fmt)
+ }
+}
+
+#[cfg(not(tarpaulin_include))]
+impl<M, T, O> Pointer for BitRef<'_, M, T, O>
+where
+ M: Mutability,
+ T: BitStore,
+ O: BitOrder,
+{
+ #[inline]
+ fn fmt(&self, fmt: &mut Formatter) -> fmt::Result {
+ Pointer::fmt(&self.bitptr, fmt)
+ }
+}
+
+#[cfg(not(tarpaulin_include))]
+impl<M, T, O> Hash for BitRef<'_, M, T, O>
+where
+ M: Mutability,
+ T: BitStore,
+ O: BitOrder,
+{
+ #[inline]
+ fn hash<H>(&self, state: &mut H)
+ where H: Hasher {
+ self.bitptr.hash(state);
+ }
+}
+
+// #[allow(clippy::non_send_fields_in_send_ty)] // I know what I’m doing
+unsafe impl<M, T, O> Send for BitRef<'_, M, T, O>
+where
+ M: Mutability,
+ T: BitStore + Sync,
+ O: BitOrder,
+{
+}
+
+unsafe impl<M, T, O> Sync for BitRef<'_, M, T, O>
+where
+ M: Mutability,
+ T: BitStore + Sync,
+ O: BitOrder,
+{
+}
+
+// This cannot be implemented until `Drop` is specialized to only
+// `<Mut, T, O>`.
+// impl<T, O> Copy for BitRef<'_, Const, T, O>
+// where O: BitOrder, T: BitStore {}
+
+impl<M, T, O> Deref for BitRef<'_, M, T, O>
+where
+ M: Mutability,
+ T: BitStore,
+ O: BitOrder,
+{
+ type Target = bool;
+
+ #[inline]
+ fn deref(&self) -> &Self::Target {
+ &self.data
+ }
+}
+
+impl<T, O> DerefMut for BitRef<'_, Mut, T, O>
+where
+ T: BitStore,
+ O: BitOrder,
+{
+ #[inline]
+ fn deref_mut(&mut self) -> &mut Self::Target {
+ &mut self.data
+ }
+}
+
+impl<M, T, O> Drop for BitRef<'_, M, T, O>
+where
+ M: Mutability,
+ T: BitStore,
+ O: BitOrder,
+{
+ #[inline]
+ fn drop(&mut self) {
+ // `Drop` cannot specialize on type parameters, but only mutable
+ // proxies can commit to memory.
+ if M::CONTAINS_MUTABILITY {
+ unsafe {
+ self.bitptr.to_mut().write(self.data);
+ }
+ }
+ }
+}
+
+impl<M, T, O> Not for BitRef<'_, M, T, O>
+where
+ M: Mutability,
+ T: BitStore,
+ O: BitOrder,
+{
+ type Output = bool;
+
+ #[inline]
+ fn not(self) -> Self::Output {
+ !self.data
+ }
+}
diff --git a/src/ptr/range.rs b/src/ptr/range.rs
new file mode 100644
index 0000000..2dc3f8c
--- /dev/null
+++ b/src/ptr/range.rs
@@ -0,0 +1,403 @@
+#![doc = include_str!("../../doc/ptr/range.md")]
+
+use core::{
+ fmt::{
+ self,
+ Debug,
+ Formatter,
+ },
+ hash::{
+ Hash,
+ Hasher,
+ },
+ iter::FusedIterator,
+ ops::{
+ Bound,
+ Range,
+ RangeBounds,
+ },
+};
+
+use wyz::comu::{
+ Const,
+ Mutability,
+};
+
+use super::{
+ BitPtr,
+ BitSpan,
+};
+use crate::{
+ devel as dvl,
+ order::{
+ BitOrder,
+ Lsb0,
+ },
+ store::BitStore,
+};
+
+#[repr(C)]
+#[doc = include_str!("../../doc/ptr/BitPtrRange.md")]
+pub struct BitPtrRange<M = Const, T = usize, O = Lsb0>
+where
+ M: Mutability,
+ T: BitStore,
+ O: BitOrder,
+{
+ /// The lower, inclusive, bound of the range. The bit to which this points
+ /// is considered live.
+ pub start: BitPtr<M, T, O>,
+ /// The higher, exclusive, bound of the range. The bit to which this points
+ /// is considered dead, and the pointer may be one bit beyond the bounds of
+ /// an allocation region.
+ ///
+ /// Because Rust and LLVM both define the address of `base + (len * width)`
+ /// as being within the provenance of `base`, even though that address may
+ /// itself be the base address of another region in a different provenance,
+ /// and bit-pointers are always composed of an ordinary memory address and a
+ /// bit-counter, the ending bit-pointer is always valid.
+ pub end: BitPtr<M, T, O>,
+}
+
+impl<M, T, O> BitPtrRange<M, T, O>
+where
+ M: Mutability,
+ T: BitStore,
+ O: BitOrder,
+{
+ /// The canonical empty range. All ranges with zero length (equal `.start`
+ /// and `.end`) are equally empty.
+ pub const EMPTY: Self = Self {
+ start: BitPtr::DANGLING,
+ end: BitPtr::DANGLING,
+ };
+
+ /// Explicitly converts a `Range<BitPtr>` into a `BitPtrRange`.
+ #[inline]
+ pub fn from_range(Range { start, end }: Range<BitPtr<M, T, O>>) -> Self {
+ Self { start, end }
+ }
+
+ /// Explicitly converts a `BitPtrRange` into a `Range<BitPtr>`.
+ #[inline]
+ pub fn into_range(self) -> Range<BitPtr<M, T, O>> {
+ let Self { start, end } = self;
+ start .. end
+ }
+
+ /// Tests if the range is empty (the distance between bit-pointers is `0`).
+ ///
+ /// ## Original
+ ///
+ /// [`Range::is_empty`](core::ops::Range::is_empty)
+ ///
+ /// ## Examples
+ ///
+ /// ```rust
+ /// use bitvec::prelude::*;
+ /// use bitvec::ptr::BitPtrRange;
+ ///
+ /// let data = 0u8;
+ /// let bp = BitPtr::<_, _, Lsb0>::from_ref(&data);
+ /// let mut range = BitPtrRange::from_range(bp .. bp.wrapping_add(1));
+ ///
+ /// assert!(!range.is_empty());
+ /// assert_ne!(range.start, range.end);
+ ///
+ /// range.next();
+ ///
+ /// assert!(range.is_empty());
+ /// assert_eq!(range.start, range.end);
+ /// ```
+ #[inline]
+ pub fn is_empty(&self) -> bool {
+ self.start == self.end
+ }
+
+ /// Tests if a given bit-pointer is contained within the range.
+ ///
+ /// Bit-pointer ordering is defined when the types have the same exact
+ /// `BitOrder` type parameter and the same `BitStore::Mem` associated type
+ /// (but are free to differ in alias condition!). Inclusion in a range
+ /// occurs when the bit-pointer is not strictly less than the range start,
+ /// and is strictly less than the range end.
+ ///
+ /// ## Original
+ ///
+ /// [`Range::contains`](core::ops::Range::contains)
+ ///
+ /// ## Examples
+ ///
+ /// ```rust
+ /// use bitvec::prelude::*;
+ /// use bitvec::ptr::BitPtrRange;
+ /// use core::cell::Cell;
+ ///
+ /// let data = 0u16;
+ /// let bp = BitPtr::<_, _, Lsb0>::from_ref(&data);
+ ///
+ /// let mut range = BitPtrRange::from_range(bp .. bp.wrapping_add(16));
+ /// range.nth(2);
+ /// range.nth_back(2);
+ ///
+ /// assert!(bp < range.start);
+ /// assert!(!range.contains(&bp));
+ ///
+ /// let mid = bp.wrapping_add(8);
+ ///
+ /// let same_mem = mid.cast::<Cell<u16>>();
+ /// assert!(range.contains(&mid));
+ /// ```
+ ///
+ /// Casting to a different `BitStore` type whose `Mem` parameter differs
+ /// from the range always results in a `false` response, even if the pointer
+ /// being tested is numerically within the range.
+ #[inline]
+ pub fn contains<M2, T2>(&self, pointer: &BitPtr<M2, T2, O>) -> bool
+ where
+ M2: Mutability,
+ T2: BitStore,
+ {
+ dvl::match_store::<T::Mem, T2::Mem>()
+ && self.start <= *pointer
+ && *pointer < self.end
+ }
+
+ /// Converts the range into a span descriptor over all live bits.
+ ///
+ /// The produced bit-span does *not* include the bit addressed by `.end`.
+ ///
+ /// ## Safety
+ ///
+ /// The `.start` and `.end` bit-pointers must both be derived from the same
+ /// provenance region. `BitSpan` draws its provenance from the `.start`
+ /// element pointer, and incorrectly extending it beyond the source
+ /// provenance is undefined behavior.
+ pub(crate) unsafe fn into_bitspan(self) -> BitSpan<M, T, O> {
+ self.start.span_unchecked(self.len())
+ }
+
+ /// Snapshots `.start`, then increments it.
+ ///
+ /// This method is only safe to call when the range is non-empty.
+ #[inline]
+ fn take_front(&mut self) -> BitPtr<M, T, O> {
+ let start = self.start;
+ self.start = start.wrapping_add(1);
+ start
+ }
+
+ /// Decrements `.end`, then returns it.
+ ///
+ /// The bit-pointer returned by this method is always to an alive bit.
+ ///
+ /// This method is only safe to call when the range is non-empty.
+ #[inline]
+ fn take_back(&mut self) -> BitPtr<M, T, O> {
+ let prev = self.end.wrapping_sub(1);
+ self.end = prev;
+ prev
+ }
+}
+
+#[cfg(not(tarpaulin_include))]
+impl<M, T, O> Clone for BitPtrRange<M, T, O>
+where
+ M: Mutability,
+ T: BitStore,
+ O: BitOrder,
+{
+ #[inline]
+ fn clone(&self) -> Self {
+ Self { ..*self }
+ }
+}
+
+impl<M, T, O> Eq for BitPtrRange<M, T, O>
+where
+ M: Mutability,
+ T: BitStore,
+ O: BitOrder,
+{
+}
+
+impl<M1, M2, O, T1, T2> PartialEq<BitPtrRange<M2, T2, O>>
+ for BitPtrRange<M1, T1, O>
+where
+ M1: Mutability,
+ M2: Mutability,
+ O: BitOrder,
+ T1: BitStore,
+ T2: BitStore,
+{
+ #[inline]
+ fn eq(&self, other: &BitPtrRange<M2, T2, O>) -> bool {
+ // Pointers over different element types are never equal
+ dvl::match_store::<T1::Mem, T2::Mem>()
+ && self.start == other.start
+ && self.end == other.end
+ }
+}
+
+#[cfg(not(tarpaulin_include))]
+impl<M, T, O> Default for BitPtrRange<M, T, O>
+where
+ M: Mutability,
+ T: BitStore,
+ O: BitOrder,
+{
+ #[inline]
+ fn default() -> Self {
+ Self::EMPTY
+ }
+}
+
+#[cfg(not(tarpaulin_include))]
+impl<M, T, O> From<Range<BitPtr<M, T, O>>> for BitPtrRange<M, T, O>
+where
+ M: Mutability,
+ T: BitStore,
+ O: BitOrder,
+{
+ #[inline]
+ fn from(range: Range<BitPtr<M, T, O>>) -> Self {
+ Self::from_range(range)
+ }
+}
+
+#[cfg(not(tarpaulin_include))]
+impl<M, T, O> From<BitPtrRange<M, T, O>> for Range<BitPtr<M, T, O>>
+where
+ M: Mutability,
+ T: BitStore,
+ O: BitOrder,
+{
+ #[inline]
+ fn from(range: BitPtrRange<M, T, O>) -> Self {
+ range.into_range()
+ }
+}
+
+#[cfg(not(tarpaulin_include))]
+impl<M, T, O> Debug for BitPtrRange<M, T, O>
+where
+ M: Mutability,
+ T: BitStore,
+ O: BitOrder,
+{
+ #[inline]
+ fn fmt(&self, fmt: &mut Formatter) -> fmt::Result {
+ let Range { start, end } = self.clone().into_range();
+ Debug::fmt(&start, fmt)?;
+ write!(fmt, "{0}..{0}", if fmt.alternate() { " " } else { "" })?;
+ Debug::fmt(&end, fmt)
+ }
+}
+
+#[cfg(not(tarpaulin_include))]
+impl<M, T, O> Hash for BitPtrRange<M, T, O>
+where
+ M: Mutability,
+ T: BitStore,
+ O: BitOrder,
+{
+ #[inline]
+ fn hash<H>(&self, state: &mut H)
+ where H: Hasher {
+ self.start.hash(state);
+ self.end.hash(state);
+ }
+}
+
+impl<M, T, O> Iterator for BitPtrRange<M, T, O>
+where
+ M: Mutability,
+ T: BitStore,
+ O: BitOrder,
+{
+ type Item = BitPtr<M, T, O>;
+
+ easy_iter!();
+
+ #[inline]
+ fn next(&mut self) -> Option<Self::Item> {
+ if Self::is_empty(&*self) {
+ return None;
+ }
+ Some(self.take_front())
+ }
+
+ #[inline]
+ fn nth(&mut self, n: usize) -> Option<Self::Item> {
+ if n >= self.len() {
+ self.start = self.end;
+ return None;
+ }
+ self.start = unsafe { self.start.add(n) };
+ Some(self.take_front())
+ }
+}
+
+impl<M, T, O> DoubleEndedIterator for BitPtrRange<M, T, O>
+where
+ M: Mutability,
+ T: BitStore,
+ O: BitOrder,
+{
+ #[inline]
+ fn next_back(&mut self) -> Option<Self::Item> {
+ if Self::is_empty(&*self) {
+ return None;
+ }
+ Some(self.take_back())
+ }
+
+ #[inline]
+ fn nth_back(&mut self, n: usize) -> Option<Self::Item> {
+ if n >= self.len() {
+ self.end = self.start;
+ return None;
+ }
+ let out = unsafe { self.end.sub(n.wrapping_add(1)) };
+ self.end = out;
+ Some(out)
+ }
+}
+
+impl<M, T, O> ExactSizeIterator for BitPtrRange<M, T, O>
+where
+ M: Mutability,
+ T: BitStore,
+ O: BitOrder,
+{
+ #[inline]
+ fn len(&self) -> usize {
+ (unsafe { self.end.offset_from(self.start) }) as usize
+ }
+}
+
+impl<M, T, O> FusedIterator for BitPtrRange<M, T, O>
+where
+ M: Mutability,
+ T: BitStore,
+ O: BitOrder,
+{
+}
+
+#[cfg(not(tarpaulin_include))]
+impl<M, T, O> RangeBounds<BitPtr<M, T, O>> for BitPtrRange<M, T, O>
+where
+ M: Mutability,
+ T: BitStore,
+ O: BitOrder,
+{
+ #[inline]
+ fn start_bound(&self) -> Bound<&BitPtr<M, T, O>> {
+ Bound::Included(&self.start)
+ }
+
+ #[inline]
+ fn end_bound(&self) -> Bound<&BitPtr<M, T, O>> {
+ Bound::Excluded(&self.end)
+ }
+}
diff --git a/src/ptr/single.rs b/src/ptr/single.rs
new file mode 100644
index 0000000..f896c9e
--- /dev/null
+++ b/src/ptr/single.rs
@@ -0,0 +1,1446 @@
+#![doc = include_str!("../../doc/ptr/single.md")]
+
+use core::{
+ any,
+ cmp,
+ convert::TryFrom,
+ fmt::{
+ self,
+ Debug,
+ Display,
+ Formatter,
+ Pointer,
+ },
+ hash::{
+ Hash,
+ Hasher,
+ },
+ marker::PhantomData,
+ ptr,
+};
+
+use tap::{
+ Pipe,
+ TryConv,
+};
+use wyz::{
+ comu::{
+ Address,
+ Const,
+ Frozen,
+ Mut,
+ Mutability,
+ NullPtrError,
+ },
+ fmt::FmtForward,
+};
+
+use super::{
+ check_alignment,
+ AddressExt,
+ BitPtrRange,
+ BitRef,
+ BitSpan,
+ BitSpanError,
+ MisalignError,
+};
+use crate::{
+ access::BitAccess,
+ devel as dvl,
+ index::BitIdx,
+ mem,
+ order::{
+ BitOrder,
+ Lsb0,
+ },
+ store::BitStore,
+};
+
+#[repr(C, packed)]
+#[doc = include_str!("../../doc/ptr/BitPtr.md")]
+pub struct BitPtr<M = Const, T = usize, O = Lsb0>
+where
+ M: Mutability,
+ T: BitStore,
+ O: BitOrder,
+{
+ /// Memory addresses must be well-aligned and non-null.
+ ///
+ /// This is not actually a requirement of `BitPtr`, but it is a requirement
+ /// of `BitSpan`, and it is extended across the entire crate for
+ /// consistency.
+ ptr: Address<M, T>,
+ /// The index of the referent bit within `*addr`.
+ bit: BitIdx<T::Mem>,
+ /// The ordering used to select the bit at `head` in `*addr`.
+ _or: PhantomData<O>,
+}
+
+impl<M, T, O> BitPtr<M, T, O>
+where
+ M: Mutability,
+ T: BitStore,
+ O: BitOrder,
+{
+ /// The canonical dangling pointer. This selects the starting bit of the
+ /// canonical dangling pointer for `T`.
+ pub const DANGLING: Self = Self {
+ ptr: Address::DANGLING,
+ bit: BitIdx::MIN,
+ _or: PhantomData,
+ };
+
+ /// Loads the address field, sidestepping any alignment problems.
+ ///
+ /// This is the only safe way to access `(&self).ptr`. Do not perform field
+ /// access on `.ptr` through a reference except through this method.
+ #[inline]
+ fn get_addr(&self) -> Address<M, T> {
+ unsafe { ptr::addr_of!(self.ptr).read_unaligned() }
+ }
+
+ /// Tries to construct a `BitPtr` from a memory location and a bit index.
+ ///
+ /// ## Parameters
+ ///
+ /// - `ptr`: The address of a memory element. `Address` wraps raw pointers
+ /// or references, and enforces that they are not null. `BitPtr`
+ /// additionally requires that the address be well-aligned to its type;
+ /// misaligned addresses cause this to return an error.
+ /// - `bit`: The index of the selected bit within `*ptr`.
+ ///
+ /// ## Returns
+ ///
+ /// This returns an error if `ptr` is not aligned to `T`; otherwise, it
+ /// returns a new bit-pointer structure to the given element and bit.
+ ///
+ /// You should typically prefer to use constructors that take directly from
+ /// a memory reference or pointer, such as the `TryFrom<*T>`
+ /// implementations, the `From<&/mut T>` implementations, or the
+ /// [`::from_ref()`], [`::from_mut()`], [`::from_slice()`], or
+ /// [`::from_slice_mut()`] functions.
+ ///
+ /// [`::from_mut()`]: Self::from_mut
+ /// [`::from_ref()`]: Self::from_ref
+ /// [`::from_slice()`]: Self::from_slice
+ /// [`::from_slice_mut()`]: Self::from_slice_mut
+ #[inline]
+ pub fn new(
+ ptr: Address<M, T>,
+ bit: BitIdx<T::Mem>,
+ ) -> Result<Self, MisalignError<T>> {
+ Ok(Self {
+ ptr: check_alignment(ptr)?,
+ bit,
+ ..Self::DANGLING
+ })
+ }
+
+ /// Constructs a `BitPtr` from an address and head index, without checking
+ /// the address for validity.
+ ///
+ /// ## Parameters
+ ///
+ /// - `addr`: The memory address to use in the bit-pointer. See the Safety
+ /// section.
+ /// - `head`: The index of the bit in `*addr` that this bit-pointer selects.
+ ///
+ /// ## Returns
+ ///
+ /// A new bit-pointer composed of the parameters. No validity checking is
+ /// performed.
+ ///
+ /// ## Safety
+ ///
+ /// The `Address` type imposes a non-null requirement. `BitPtr` additionally
+ /// requires that `addr` is well-aligned for `T`, and presumes that the
+ /// caller has ensured this with [`bv_ptr::check_alignment`][0]. If this is
+ /// not the case, then the program is incorrect, and subsequent behavior is
+ /// not specified.
+ ///
+ /// [0]: crate::ptr::check_alignment.
+ #[inline]
+ pub unsafe fn new_unchecked(
+ ptr: Address<M, T>,
+ bit: BitIdx<T::Mem>,
+ ) -> Self {
+ if cfg!(debug_assertions) {
+ Self::new(ptr, bit).unwrap()
+ }
+ else {
+ Self {
+ ptr,
+ bit,
+ ..Self::DANGLING
+ }
+ }
+ }
+
+ /// Gets the address of the base storage element.
+ #[inline]
+ pub fn address(self) -> Address<M, T> {
+ self.get_addr()
+ }
+
+ /// Gets the `BitIdx` that selects the bit within the memory element.
+ #[inline]
+ pub fn bit(self) -> BitIdx<T::Mem> {
+ self.bit
+ }
+
+ /// Decomposes a bit-pointer into its element address and bit index.
+ ///
+ /// ## Parameters
+ ///
+ /// - `self`
+ ///
+ /// ## Returns
+ ///
+ /// - `.0`: The memory address in which the referent bit is located.
+ /// - `.1`: The index of the referent bit in `*.0` according to the `O` type
+ /// parameter.
+ #[inline]
+ pub fn raw_parts(self) -> (Address<M, T>, BitIdx<T::Mem>) {
+ (self.address(), self.bit())
+ }
+
+ /// Converts a bit-pointer into a span descriptor by attaching a length
+ /// counter (in bits).
+ ///
+ /// ## Parameters
+ ///
+ /// - `self`: The base address of the produced span.
+ /// - `bits`: The length, in bits, of the span.
+ ///
+ /// ## Returns
+ ///
+ /// A span descriptor beginning at `self` and ending (exclusive) at `self +
+ /// bits`. This fails if it is unable to encode the requested span into a
+ /// descriptor.
+ pub(crate) fn span(
+ self,
+ bits: usize,
+ ) -> Result<BitSpan<M, T, O>, BitSpanError<T>> {
+ BitSpan::new(self.ptr, self.bit, bits)
+ }
+
+ /// Converts a bit-pointer into a span descriptor, without performing
+ /// encoding validity checks.
+ ///
+ /// ## Parameters
+ ///
+ /// - `self`: The base address of the produced span.
+ /// - `bits`: The length, in bits, of the span.
+ ///
+ /// ## Returns
+ ///
+ /// An encoded span descriptor of `self` and `bits`. Note that no validity
+ /// checks are performed!
+ ///
+ /// ## Safety
+ ///
+ /// The caller must ensure that the rules of `BitSpan::new` are not
+ /// violated. Typically this method should only be used on parameters that
+ /// have already passed through `BitSpan::new` and are known to be good.
+ pub(crate) unsafe fn span_unchecked(self, bits: usize) -> BitSpan<M, T, O> {
+ BitSpan::new_unchecked(self.get_addr(), self.bit, bits)
+ }
+
+ /// Produces a bit-pointer range beginning at `self` (inclusive) and ending
+ /// at `self + count` (exclusive).
+ ///
+ /// ## Safety
+ ///
+ /// `self + count` must be within the same provenance region as `self`. The
+ /// first bit past the end of an allocation is included in provenance
+ /// regions, though it is not dereferenceable and will not be dereferenced.
+ ///
+ /// It is unsound to *even construct* a pointer that departs the provenance
+ /// region, even if that pointer is never dereferenced!
+ pub(crate) unsafe fn range(self, count: usize) -> BitPtrRange<M, T, O> {
+ (self .. self.add(count)).into()
+ }
+
+ /// Removes write permissions from a bit-pointer.
+ #[inline]
+ pub fn to_const(self) -> BitPtr<Const, T, O> {
+ let Self {
+ ptr: addr,
+ bit: head,
+ ..
+ } = self;
+ BitPtr {
+ ptr: addr.immut(),
+ bit: head,
+ ..BitPtr::DANGLING
+ }
+ }
+
+ /// Adds write permissions to a bit-pointer.
+ ///
+ /// ## Safety
+ ///
+ /// This pointer must have been derived from a `*mut` pointer.
+ #[inline]
+ pub unsafe fn to_mut(self) -> BitPtr<Mut, T, O> {
+ let Self {
+ ptr: addr,
+ bit: head,
+ ..
+ } = self;
+ BitPtr {
+ ptr: addr.assert_mut(),
+ bit: head,
+ ..BitPtr::DANGLING
+ }
+ }
+
+ /// Freezes a bit-pointer, forbidding direct mutation.
+ ///
+ /// This is used as a necessary prerequisite to all mutation of memory.
+ /// `BitPtr` uses an implementation scoped to `Frozen<_>` to perform
+ /// alias-aware writes; see below.
+ pub(crate) fn freeze(self) -> BitPtr<Frozen<M>, T, O> {
+ let Self {
+ ptr: addr,
+ bit: head,
+ ..
+ } = self;
+ BitPtr {
+ ptr: addr.freeze(),
+ bit: head,
+ ..BitPtr::DANGLING
+ }
+ }
+}
+
+impl<T, O> BitPtr<Const, T, O>
+where
+ T: BitStore,
+ O: BitOrder,
+{
+ /// Constructs a `BitPtr` to the zeroth bit in a single element.
+ #[inline]
+ pub fn from_ref(elem: &T) -> Self {
+ unsafe { Self::new_unchecked(elem.into(), BitIdx::MIN) }
+ }
+
+ /// Constructs a `BitPtr` to the zeroth bit in the zeroth element of a
+ /// slice.
+ ///
+ /// This method is distinct from `Self::from_ref(&elem[0])`, because it
+ /// ensures that the returned bit-pointer has provenance over the entire
+ /// slice. Indexing within a slice narrows the provenance range, and makes
+ /// departure from the subslice, *even within the original slice*, illegal.
+ #[inline]
+ pub fn from_slice(slice: &[T]) -> Self {
+ unsafe {
+ Self::new_unchecked(slice.as_ptr().into_address(), BitIdx::MIN)
+ }
+ }
+
+ /// Gets a raw pointer to the memory element containing the selected bit.
+ #[inline]
+ #[cfg(not(tarpaulin_include))]
+ pub fn pointer(&self) -> *const T {
+ self.get_addr().to_const()
+ }
+}
+
+impl<T, O> BitPtr<Mut, T, O>
+where
+ T: BitStore,
+ O: BitOrder,
+{
+ /// Constructs a mutable `BitPtr` to the zeroth bit in a single element.
+ #[inline]
+ pub fn from_mut(elem: &mut T) -> Self {
+ unsafe { Self::new_unchecked(elem.into(), BitIdx::MIN) }
+ }
+
+ /// Constructs a `BitPtr` to the zeroth bit in the zeroth element of a
+ /// mutable slice.
+ ///
+ /// This method is distinct from `Self::from_mut(&mut elem[0])`, because it
+ /// ensures that the returned bit-pointer has provenance over the entire
+ /// slice. Indexing within a slice narrows the provenance range, and makes
+ /// departure from the subslice, *even within the original slice*, illegal.
+ #[inline]
+ pub fn from_mut_slice(slice: &mut [T]) -> Self {
+ unsafe {
+ Self::new_unchecked(slice.as_mut_ptr().into_address(), BitIdx::MIN)
+ }
+ }
+
+ /// Constructs a mutable `BitPtr` to the zeroth bit in the zeroth element of
+ /// a slice.
+ ///
+ /// This method is distinct from `Self::from_mut(&mut elem[0])`, because it
+ /// ensures that the returned bit-pointer has provenance over the entire
+ /// slice. Indexing within a slice narrows the provenance range, and makes
+ /// departure from the subslice, *even within the original slice*, illegal.
+ #[inline]
+ pub fn from_slice_mut(slice: &mut [T]) -> Self {
+ unsafe {
+ Self::new_unchecked(slice.as_mut_ptr().into_address(), BitIdx::MIN)
+ }
+ }
+
+ /// Gets a raw pointer to the memory location containing the selected bit.
+ #[inline]
+ #[cfg(not(tarpaulin_include))]
+ pub fn pointer(&self) -> *mut T {
+ self.get_addr().to_mut()
+ }
+}
+
+/// Port of the `*bool` inherent API.
+impl<M, T, O> BitPtr<M, T, O>
+where
+ M: Mutability,
+ T: BitStore,
+ O: BitOrder,
+{
+ /// Tests if a bit-pointer is the null value.
+ ///
+ /// This is always false, as a `BitPtr` is a `NonNull` internally. Use
+ /// `Option<BitPtr>` to express the potential for a null pointer.
+ ///
+ /// ## Original
+ ///
+ /// [`pointer::is_null`](https://doc.rust-lang.org/std/primitive.pointer.html#method.is_null)
+ #[inline]
+ #[deprecated = "`BitPtr` is never null"]
+ pub fn is_null(self) -> bool {
+ false
+ }
+
+ /// Casts to a `BitPtr` with a different storage parameter.
+ ///
+ /// This is not free! In order to maintain value integrity, it encodes a
+ /// `BitSpan` encoded descriptor with its value, casts that, then decodes
+ /// into a `BitPtr` of the target type. If `T` and `U` have different
+ /// `::Mem` associated types, then this may change the selected bit in
+ /// memory. This is an unavoidable cost of the addressing and encoding
+ /// schemes.
+ ///
+ /// ## Original
+ ///
+ /// [`pointer::cast`](https://doc.rust-lang.org/std/primitive.pointer.html#method.cast)
+ #[inline]
+ pub fn cast<U>(self) -> BitPtr<M, U, O>
+ where U: BitStore {
+ let (addr, head, _) =
+ unsafe { self.span_unchecked(1) }.cast::<U>().raw_parts();
+ unsafe { BitPtr::new_unchecked(addr, head) }
+ }
+
+ /// Decomposes a bit-pointer into its address and head-index components.
+ ///
+ /// ## Original
+ ///
+ /// [`pointer::to_raw_parts`](https://doc.rust-lang.org/std/primitive.pointer.html#method.to_raw_parts)
+ ///
+ /// ## API Differences
+ ///
+ /// The original method is unstable as of 1.54.0; however, because `BitPtr`
+ /// already has a similar API, the name is optimistically stabilized here.
+ /// Prefer [`.raw_parts()`] until the original inherent stabilizes.
+ ///
+ /// [`.raw_parts()`]: Self::raw_parts
+ #[inline]
+ #[cfg(not(tarpaulin_include))]
+ pub fn to_raw_parts(self) -> (Address<M, T>, BitIdx<T::Mem>) {
+ self.raw_parts()
+ }
+
+ /// Produces a proxy reference to the referent bit.
+ ///
+ /// Because `BitPtr` guarantees that it is non-null and well-aligned, this
+ /// never returns `None`. However, this is still unsafe to call on any
+ /// bit-pointers created from conjured values rather than known references.
+ ///
+ /// ## Original
+ ///
+ /// [`pointer::as_ref`](https://doc.rust-lang.org/std/primitive.pointer.html#method.as_ref)
+ ///
+ /// ## API Differences
+ ///
+ /// This produces a proxy type rather than a true reference. The proxy
+ /// implements `Deref<Target = bool>`, and can be converted to `&bool` with
+ /// a reborrow `&*`.
+ ///
+ /// ## Safety
+ ///
+ /// Since `BitPtr` does not permit null or misaligned pointers, this method
+ /// will always dereference the pointer in order to create the proxy. As
+ /// such, you must ensure the following conditions are met:
+ ///
+ /// - the pointer must be dereferenceable as defined in the standard library
+ /// documentation
+ /// - the pointer must point to an initialized instance of `T`
+ /// - you must ensure that no other pointer will race to modify the referent
+ /// location while this call is reading from memory to produce the proxy
+ ///
+ /// ## Examples
+ ///
+ /// ```rust
+ /// use bitvec::prelude::*;
+ ///
+ /// let data = 1u8;
+ /// let ptr = BitPtr::<_, _, Lsb0>::from_ref(&data);
+ /// let val = unsafe { ptr.as_ref() }.unwrap();
+ /// assert!(*val);
+ /// ```
+ #[inline]
+ pub unsafe fn as_ref<'a>(self) -> Option<BitRef<'a, Const, T, O>> {
+ Some(BitRef::from_bitptr(self.to_const()))
+ }
+
+ /// Creates a new bit-pointer at a specified offset from the original.
+ ///
+ /// `count` is in units of bits.
+ ///
+ /// ## Original
+ ///
+ /// [`pointer::offset`](https://doc.rust-lang.org/std/primitive.pointer.html#method.offset)
+ ///
+ /// ## Safety
+ ///
+ /// `BitPtr` is implemented with Rust raw pointers internally, and is
+ /// subject to all of Rust’s rules about provenance and permission tracking.
+ /// You must abide by the safety rules established in the original method,
+ /// to which this internally delegates.
+ ///
+ /// Additionally, `bitvec` imposes its own rules: while Rust cannot observe
+ /// provenance beyond an element or byte level, `bitvec` demands that
+ /// `&mut BitSlice` have exclusive view over all bits it observes. You must
+ /// not produce a bit-pointer that departs a `BitSlice` region and intrudes
+ /// on any `&mut BitSlice`’s handle, and you must not produce a
+ /// write-capable bit-pointer that intrudes on a `&BitSlice` handle that
+ /// expects its contents to be immutable.
+ ///
+ /// Note that it is illegal to *construct* a bit-pointer that invalidates
+ /// any of these rules. If you wish to defer safety-checking to the point of
+ /// dereferencing, and allow the temporary construction *but not*
+ /// *dereference* of illegal `BitPtr`s, use [`.wrapping_offset()`] instead.
+ ///
+ /// ## Examples
+ ///
+ /// ```rust
+ /// use bitvec::prelude::*;
+ ///
+ /// let data = 5u8;
+ /// let ptr = BitPtr::<_, _, Lsb0>::from_ref(&data);
+ /// unsafe {
+ /// assert!(ptr.read());
+ /// assert!(!ptr.offset(1).read());
+ /// assert!(ptr.offset(2).read());
+ /// }
+ /// ```
+ ///
+ /// [`.wrapping_offset()`]: Self::wrapping_offset
+ #[inline]
+ #[must_use = "returns a new bit-pointer rather than modifying its argument"]
+ pub unsafe fn offset(self, count: isize) -> Self {
+ let (elts, head) = self.bit.offset(count);
+ Self::new_unchecked(self.ptr.offset(elts), head)
+ }
+
+ /// Creates a new bit-pointer at a specified offset from the original.
+ ///
+ /// `count` is in units of bits.
+ ///
+ /// ## Original
+ ///
+ /// [`pointer::wrapping_offset`](https://doc.rust-lang.org/std/primitive.pointer.html#method.wrapping_offset)
+ ///
+ /// ## API Differences
+ ///
+ /// `bitvec` makes it explicitly illegal to wrap a pointer around the high
+ /// end of the address space, because it is incapable of representing a null
+ /// pointer.
+ ///
+ /// However, `<*T>::wrapping_offset` has additional properties as a result
+ /// of its tolerance for wrapping the address space: it tolerates departing
+ /// a provenance region, and is not unsafe to use to *create* a bit-pointer
+ /// that is outside the bounds of its original provenance.
+ ///
+ /// ## Safety
+ ///
+ /// This function is safe to use because the bit-pointers it creates defer
+ /// their provenance checks until the point of dereference. As such, you
+ /// can safely use this to perform arbitrary pointer arithmetic that Rust
+ /// considers illegal in ordinary arithmetic, as long as you do not
+ /// dereference the bit-pointer until it has been brought in bounds of the
+ /// originating provenance region.
+ ///
+ /// This means that, to the Rust rule engine,
+ /// `let z = x.wrapping_add(y as usize).wrapping_sub(x as usize);` is not
+ /// equivalent to `y`, but `z` is safe to construct, and
+ /// `z.wrapping_add(x as usize).wrapping_sub(y as usize)` produces a
+ /// bit-pointer that *is* equivalent to `x`.
+ ///
+ /// See the documentation of the original method for more details about
+ /// provenance regions, and the distinctions that the optimizer makes about
+ /// them.
+ ///
+ /// ## Examples
+ ///
+ /// ```rust
+ /// use bitvec::prelude::*;
+ ///
+ /// let data = 0u32;
+ /// let mut ptr = BitPtr::<_, _, Lsb0>::from_ref(&data);
+ /// let end = ptr.wrapping_offset(32);
+ /// while ptr < end {
+ /// # #[cfg(feature = "std")] {
+ /// println!("{}", unsafe { ptr.read() });
+ /// # }
+ /// ptr = ptr.wrapping_offset(3);
+ /// }
+ /// ```
+ #[inline]
+ #[must_use = "returns a new bit-pointer rather than modifying its argument"]
+ pub fn wrapping_offset(self, count: isize) -> Self {
+ let (elts, head) = self.bit.offset(count);
+ unsafe { Self::new_unchecked(self.ptr.wrapping_offset(elts), head) }
+ }
+
+ /// Calculates the distance (in bits) between two bit-pointers.
+ ///
+ /// This method is the inverse of [`.offset()`].
+ ///
+ /// ## Original
+ ///
+ /// [`pointer::offset_from`](https://doc.rust-lang.org/std/primitive.pointer.html#method.offset_from)
+ ///
+ /// ## API Differences
+ ///
+ /// The base pointer may have a different `BitStore` type parameter, as long
+ /// as they share an underlying memory type. This is necessary in order to
+ /// accommodate aliasing markers introduced between when an origin pointer
+ /// was taken and when `self` compared against it.
+ ///
+ /// ## Safety
+ ///
+ /// Both `self` and `origin` **must** be drawn from the same provenance
+ /// region. This means that they must be created from the same Rust
+ /// allocation, whether with `let` or the allocator API, and must be in the
+ /// (inclusive) range `base ..= base + len`. The first bit past the end of
+ /// a region can be addressed, just not dereferenced.
+ ///
+ /// See the original `<*T>::offset_from` for more details on region safety.
+ ///
+ /// ## Examples
+ ///
+ /// ```rust
+ /// use bitvec::prelude::*;
+ ///
+ /// let data = 0u32;
+ /// let base = BitPtr::<_, _, Lsb0>::from_ref(&data);
+ /// let low = unsafe { base.add(10) };
+ /// let high = unsafe { low.add(15) };
+ /// unsafe {
+ /// assert_eq!(high.offset_from(low), 15);
+ /// assert_eq!(low.offset_from(high), -15);
+ /// assert_eq!(low.offset(15), high);
+ /// assert_eq!(high.offset(-15), low);
+ /// }
+ /// ```
+ ///
+ /// While this method is safe to *construct* bit-pointers that depart a
+ /// provenance region, it remains illegal to *dereference* those pointers!
+ ///
+ /// This usage is incorrect, and a program that contains it is not
+ /// well-formed.
+ ///
+ /// ```rust,no_run
+ /// use bitvec::prelude::*;
+ ///
+ /// let a = 0u8;
+ /// let b = !0u8;
+ ///
+ /// let a_ptr = BitPtr::<_, _, Lsb0>::from_ref(&a);
+ /// let b_ptr = BitPtr::<_, _, Lsb0>::from_ref(&b);
+ /// let diff = (b_ptr.pointer() as isize)
+ /// .wrapping_sub(a_ptr.pointer() as isize)
+ /// // Remember: raw pointers are byte-stepped,
+ /// // but bit-pointers are bit-stepped.
+ /// .wrapping_mul(8);
+ /// // This pointer to `b` has `a`’s provenance:
+ /// let b_ptr_2 = a_ptr.wrapping_offset(diff);
+ ///
+ /// // They are *arithmetically* equal:
+ /// assert_eq!(b_ptr, b_ptr_2);
+ /// // But it is still undefined behavior to cross provenances!
+ /// assert_eq!(0, unsafe { b_ptr_2.offset_from(b_ptr) });
+ /// ```
+ ///
+ /// [`.offset()`]: Self::offset
+ #[inline]
+ pub unsafe fn offset_from<U>(self, origin: BitPtr<M, U, O>) -> isize
+ where U: BitStore<Mem = T::Mem> {
+ self.get_addr()
+ .cast::<T::Mem>()
+ .offset_from(origin.get_addr().cast::<T::Mem>())
+ .wrapping_mul(mem::bits_of::<T::Mem>() as isize)
+ .wrapping_add(self.bit.into_inner() as isize)
+ .wrapping_sub(origin.bit.into_inner() as isize)
+ }
+
+ /// Adjusts a bit-pointer upwards in memory. This is equivalent to
+ /// `.offset(count as isize)`.
+ ///
+ /// `count` is in units of bits.
+ ///
+ /// ## Original
+ ///
+ /// [`pointer::add`](https://doc.rust-lang.org/std/primitive.pointer.html#method.add)
+ ///
+ /// ## Safety
+ ///
+ /// See [`.offset()`](Self::offset).
+ #[inline]
+ #[must_use = "returns a new bit-pointer rather than modifying its argument"]
+ pub unsafe fn add(self, count: usize) -> Self {
+ self.offset(count as isize)
+ }
+
+ /// Adjusts a bit-pointer downwards in memory. This is equivalent to
+ /// `.offset((count as isize).wrapping_neg())`.
+ ///
+ /// `count` is in units of bits.
+ ///
+ /// ## Original
+ ///
+ /// [`pointer::sub`](https://doc.rust-lang.org/std/primitive.pointer.html#method.sub)
+ ///
+ /// ## Safety
+ ///
+ /// See [`.offset()`](Self::offset).
+ #[inline]
+ #[must_use = "returns a new bit-pointer rather than modifying its argument"]
+ pub unsafe fn sub(self, count: usize) -> Self {
+ self.offset((count as isize).wrapping_neg())
+ }
+
+ /// Adjusts a bit-pointer upwards in memory, using wrapping semantics. This
+ /// is equivalent to `.wrapping_offset(count as isize)`.
+ ///
+ /// `count` is in units of bits.
+ ///
+ /// ## Original
+ ///
+ /// [`pointer::wrapping_add`](https://doc.rust-lang.org/std/primitive.pointer.html#method.wrapping_add)
+ ///
+ /// ## Safety
+ ///
+ /// See [`.wrapping_offset()`](Self::wrapping_offset).
+ #[inline]
+ #[must_use = "returns a new bit-pointer rather than modifying its argument"]
+ pub fn wrapping_add(self, count: usize) -> Self {
+ self.wrapping_offset(count as isize)
+ }
+
+ /// Adjusts a bit-pointer downwards in memory, using wrapping semantics.
+ /// This is equivalent to
+ /// `.wrapping_offset((count as isize).wrapping_neg())`.
+ ///
+ /// `count` is in units of bits.
+ ///
+ /// ## Original
+ ///
+ /// [`pointer::wrapping_add`](https://doc.rust-lang.org/std/primitive.pointer.html#method.wrapping_add)
+ ///
+ /// ## Safety
+ ///
+ /// See [`.wrapping_offset()`](Self::wrapping_offset).
+ #[inline]
+ #[must_use = "returns a new bit-pointer rather than modifying its argument"]
+ pub fn wrapping_sub(self, count: usize) -> Self {
+ self.wrapping_offset((count as isize).wrapping_neg())
+ }
+
+ /// Reads the bit from `*self`.
+ ///
+ /// ## Original
+ ///
+ /// [`pointer::read`](https://doc.rust-lang.org/std/primitive.pointer.html#method.read)
+ ///
+ /// ## Safety
+ ///
+ /// See [`ptr::read`](crate::ptr::read).
+ #[inline]
+ pub unsafe fn read(self) -> bool {
+ (*self.ptr.to_const()).load_value().get_bit::<O>(self.bit)
+ }
+
+ /// Reads the bit from `*self` using a volatile load.
+ ///
+ /// Prefer using a crate such as [`voladdress`][0] to manage volatile I/O
+ /// and use `bitvec` only on the local objects it provides. Individual I/O
+ /// operations for individual bits are likely not the behavior you want.
+ ///
+ /// ## Original
+ ///
+ /// [`pointer::read_volatile`](https://doc.rust-lang.org/std/primitive.pointer.html#method.read_volatile)
+ ///
+ /// ## Safety
+ ///
+ /// See [`ptr::read_volatile`](crate::ptr::read_volatile).
+ ///
+ /// [0]: https://docs.rs/voladdress/later/voladdress
+ #[inline]
+ pub unsafe fn read_volatile(self) -> bool {
+ self.ptr.to_const().read_volatile().get_bit::<O>(self.bit)
+ }
+
+ /// Reads the bit from `*self` using an unaligned memory access.
+ ///
+ /// `BitPtr` forbids unaligned addresses. If you have such an address, you
+ /// must perform your memory accesses on the raw element, and only use
+ /// `bitvec` on a well-aligned stack temporary. This method should never be
+ /// necessary.
+ ///
+ /// ## Original
+ ///
+ /// [`pointer::read_unaligned`](https://doc.rust-lang.org/std/primitive.pointer.html#method.read_unaligned)
+ ///
+ /// ## Safety
+ ///
+ /// See [`ptr::read_unaligned`](crate::ptr::read_unaligned)
+ #[inline]
+ #[deprecated = "`BitPtr` does not have unaligned addresses"]
+ pub unsafe fn read_unaligned(self) -> bool {
+ self.ptr.to_const().read_unaligned().get_bit::<O>(self.bit)
+ }
+
+ /// Copies `count` bits from `self` to `dest`. The source and destination
+ /// may overlap.
+ ///
+ /// Note that overlap is only defined when `O` and `O2` are the same type.
+ /// If they differ, then `bitvec` does not define overlap, and assumes that
+ /// they are wholly discrete in memory.
+ ///
+ /// ## Original
+ ///
+ /// [`pointer::copy_to`](https://doc.rust-lang.org/std/primitive.pointer.html#method.copy_to)
+ ///
+ /// ## Safety
+ ///
+ /// See [`ptr::copy`](crate::ptr::copy).
+ #[inline]
+ #[cfg(not(tarpaulin_include))]
+ pub unsafe fn copy_to<T2, O2>(self, dest: BitPtr<Mut, T2, O2>, count: usize)
+ where
+ T2: BitStore,
+ O2: BitOrder,
+ {
+ super::copy(self.to_const(), dest, count);
+ }
+
+ /// Copies `count` bits from `self` to `dest`. The source and destination
+ /// may *not* overlap.
+ ///
+ /// ## Original
+ ///
+ /// [`pointer::copy_to_nonoverlapping`](https://doc.rust-lang.org/std/primitive.pointer.html#method.copy_to_nonoverlapping)
+ ///
+ /// ## Safety
+ ///
+ /// See [`ptr::copy_nonoverlapping`](crate::ptr::copy_nonoverlapping).
+ #[inline]
+ #[cfg(not(tarpaulin_include))]
+ pub unsafe fn copy_to_nonoverlapping<T2, O2>(
+ self,
+ dest: BitPtr<Mut, T2, O2>,
+ count: usize,
+ ) where
+ T2: BitStore,
+ O2: BitOrder,
+ {
+ super::copy_nonoverlapping(self.to_const(), dest, count);
+ }
+
+ /// Computes the offset (in bits) that needs to be applied to the
+ /// bit-pointer in order to make it aligned to the given *byte* alignment.
+ ///
+ /// “Alignment” here means that the bit-pointer selects the starting bit of
+ /// a memory location whose address satisfies the requested alignment.
+ ///
+ /// `align` is measured in **bytes**. If you wish to align your bit-pointer
+ /// to a specific fraction (½, ¼, or ⅛ of one byte), please file an issue
+ /// and I will work on adding this functionality.
+ ///
+ /// ## Original
+ ///
+ /// [`pointer::align_offset`](https://doc.rust-lang.org/std/primitive.pointer.html#method.align_offset)
+ ///
+ /// ## Notes
+ ///
+ /// If the base-element address of the bit-pointer is already aligned to
+ /// `align`, then this will return the bit-offset required to select the
+ /// first bit of the successor element.
+ ///
+ /// If it is not possible to align the bit-pointer, then the implementation
+ /// returns `usize::MAX`.
+ ///
+ /// The return value is measured in bits, not `T` elements or bytes. The
+ /// only thing you can do with it is pass it into [`.add()`] or
+ /// [`.wrapping_add()`].
+ ///
+ /// Note from the standard library: It is permissible for the implementation
+ /// to *always* return `usize::MAX`. Only your algorithm’s performance can
+ /// depend on getting a usable offset here; it must be correct independently
+ /// of this function providing a useful value.
+ ///
+ /// ## Safety
+ ///
+ /// There are no guarantees whatsoëver that offsetting the bit-pointer will
+ /// not overflow or go beyond the allocation that the bit-pointer selects.
+ /// It is up to the caller to ensure that the returned offset is correct in
+ /// all terms other than alignment.
+ ///
+ /// ## Panics
+ ///
+ /// This method panics if `align` is not a power of two.
+ ///
+ /// ## Examples
+ ///
+ /// ```rust
+ /// use bitvec::prelude::*;
+ ///
+ /// let data = [0u8; 3];
+ /// let ptr = BitPtr::<_, _, Lsb0>::from_slice(&data);
+ /// let ptr = unsafe { ptr.add(2) };
+ /// let count = ptr.align_offset(2);
+ /// assert!(count >= 6);
+ /// ```
+ ///
+ /// [`.add()`]: Self::add
+ /// [`.wrapping_add()`]: Self::wrapping_add
+ #[inline]
+ pub fn align_offset(self, align: usize) -> usize {
+ let width = mem::bits_of::<T::Mem>();
+ match (
+ self.ptr.to_const().align_offset(align),
+ self.bit.into_inner() as usize,
+ ) {
+ (0, 0) => 0,
+ (0, head) => align * mem::bits_of::<u8>() - head,
+ (usize::MAX, _) => usize::MAX,
+ (elts, head) => elts.wrapping_mul(width).wrapping_sub(head),
+ }
+ }
+}
+
+/// Port of the `*mut bool` inherent API.
+impl<T, O> BitPtr<Mut, T, O>
+where
+ T: BitStore,
+ O: BitOrder,
+{
+ /// Produces a proxy reference to the referent bit.
+ ///
+ /// Because `BitPtr` guarantees that it is non-null and well-aligned, this
+ /// never returns `None`. However, this is still unsafe to call on any
+ /// bit-pointers created from conjured values rather than known references.
+ ///
+ /// ## Original
+ ///
+ /// [`pointer::as_mut`](https://doc.rust-lang.org/std/primitive.pointer.html#method.as_mut)
+ ///
+ /// ## API Differences
+ ///
+ /// This produces a proxy type rather than a true reference. The proxy
+ /// implements `DerefMut<Target = bool>`, and can be converted to
+ /// `&mut bool` with a reborrow `&mut *`.
+ ///
+ /// Writes to the proxy are not reflected in the proxied location until the
+ /// proxy is destroyed, either through `Drop` or its [`.commit()`] method.
+ ///
+ /// ## Safety
+ ///
+ /// Since `BitPtr` does not permit null or misaligned pointers, this method
+ /// will always dereference the pointer in order to create the proxy. As
+ /// such, you must ensure the following conditions are met:
+ ///
+ /// - the pointer must be dereferenceable as defined in the standard library
+ /// documentation
+ /// - the pointer must point to an initialized instance of `T`
+ /// - you must ensure that no other pointer will race to modify the referent
+ /// location while this call is reading from memory to produce the proxy
+ /// - you must ensure that no other `bitvec` handle targets the referent bit
+ ///
+ /// ## Examples
+ ///
+ /// ```rust
+ /// use bitvec::prelude::*;
+ ///
+ /// let mut data = 0u8;
+ /// let ptr = BitPtr::<_, _, Lsb0>::from_mut(&mut data);
+ /// let mut val = unsafe { ptr.as_mut() }.unwrap();
+ /// assert!(!*val);
+ /// *val = true;
+ /// assert!(*val);
+ /// ```
+ ///
+ /// [`.commit()`]: crate::ptr::BitRef::commit
+ #[inline]
+ pub unsafe fn as_mut<'a>(self) -> Option<BitRef<'a, Mut, T, O>> {
+ Some(BitRef::from_bitptr(self))
+ }
+
+ /// Copies `count` bits from the region starting at `src` to the region
+ /// starting at `self`.
+ ///
+ /// The regions are free to overlap; the implementation will detect overlap
+ /// and correctly avoid it.
+ ///
+ /// Note: this has the *opposite* argument order from [`ptr::copy`]: `self`
+ /// is the destination, not the source.
+ ///
+ /// ## Original
+ ///
+ /// [`pointer::copy_from`](https://doc.rust-lang.org/std/primitive.pointer.html#method.copy_from)
+ ///
+ /// ## Safety
+ ///
+ /// See [`ptr::copy`].
+ ///
+ /// [`ptr::copy`]: crate::ptr::copy
+ #[inline]
+ #[cfg(not(tarpaulin_include))]
+ pub unsafe fn copy_from<T2, O2>(
+ self,
+ src: BitPtr<Const, T2, O2>,
+ count: usize,
+ ) where
+ T2: BitStore,
+ O2: BitOrder,
+ {
+ src.copy_to(self, count);
+ }
+
+ /// Copies `count` bits from the region starting at `src` to the region
+ /// starting at `self`.
+ ///
+ /// Unlike [`.copy_from()`], the two regions may *not* overlap; this method
+ /// does not attempt to detect overlap and thus may have a slight
+ /// performance boost over the overlap-handling `.copy_from()`.
+ ///
+ /// Note: this has the *opposite* argument order from
+ /// [`ptr::copy_nonoverlapping`]: `self` is the destination, not the source.
+ ///
+ /// ## Original
+ ///
+ /// [`pointer::copy_from_nonoverlapping`](https://doc.rust-lang.org/std/primitive.pointer.html#method.copy_from_nonoverlapping)
+ ///
+ /// ## Safety
+ ///
+ /// See [`ptr::copy_nonoverlapping`].
+ ///
+ /// [`.copy_from()`]: Self::copy_from
+ #[inline]
+ #[cfg(not(tarpaulin_include))]
+ pub unsafe fn copy_from_nonoverlapping<T2, O2>(
+ self,
+ src: BitPtr<Const, T2, O2>,
+ count: usize,
+ ) where
+ T2: BitStore,
+ O2: BitOrder,
+ {
+ src.copy_to_nonoverlapping(self, count);
+ }
+
+ /// Runs the destructor of the referent value.
+ ///
+ /// `bool` has no destructor; this function does nothing.
+ ///
+ /// ## Original
+ ///
+ /// [`pointer::drop_in_place`](https://doc.rust-lang.org/std/primitive.pointer.html#method.drop_in_place)
+ ///
+ /// ## Safety
+ ///
+ /// See [`ptr::drop_in_place`].
+ ///
+ /// [`ptr::drop_in_place`]: crate::ptr::drop_in_place
+ #[inline]
+ #[deprecated = "this has no effect, and should not be called"]
+ pub fn drop_in_place(self) {}
+
+ /// Writes a new bit into the given location.
+ ///
+ /// ## Original
+ ///
+ /// [`pointer::write`](https://doc.rust-lang.org/std/primitive.pointer.html#method.write)
+ ///
+ /// ## Safety
+ ///
+ /// See [`ptr::write`].
+ ///
+ /// [`ptr::write`]: crate::ptr::write
+ #[inline]
+ pub unsafe fn write(self, value: bool) {
+ self.replace(value);
+ }
+
+ /// Writes a new bit using volatile I/O operations.
+ ///
+ /// Because processors do not generally have single-bit read or write
+ /// instructions, this must perform a volatile read of the entire memory
+ /// location, perform the write locally, then perform another volatile write
+ /// to the entire location. These three steps are guaranteed to be
+ /// sequential with respect to each other, but are not guaranteed to be
+ /// atomic.
+ ///
+ /// Volatile operations are intended to act on I/O memory, and are *only*
+ /// guaranteed not to be elided or reördered by the compiler across other
+ /// I/O operations.
+ ///
+ /// You should not use `bitvec` to act on volatile memory. You should use a
+ /// crate specialized for volatile I/O work, such as [`voladdr`], and use it
+ /// to explicitly manage the I/O and ask it to perform `bitvec` work only on
+ /// the local snapshot of a volatile location.
+ ///
+ /// ## Original
+ ///
+ /// [`pointer::write_volatile`](https://doc.rust-lang.org/std/primitive.pointer.html#method.write_volatile)
+ ///
+ /// ## Safety
+ ///
+ /// See [`ptr::write_volatile`].
+ ///
+ /// [`ptr::write_volatile`]: crate::ptr::write_volatile
+ /// [`voladdr`]: https://docs.rs/voladdr/latest/voladdr
+ #[inline]
+ #[allow(clippy::needless_borrow)] // Clippy is wrong.
+ pub unsafe fn write_volatile(self, value: bool) {
+ let ptr = self.ptr.to_mut();
+ let mut tmp = ptr.read_volatile();
+ Self::new_unchecked((&mut tmp).into(), self.bit).write(value);
+ ptr.write_volatile(tmp);
+ }
+
+ /// Writes a bit into memory, tolerating unaligned addresses.
+ ///
+ /// `BitPtr` does not have unaligned addresses. `BitPtr` itself is capable
+ /// of operating on misaligned addresses, but elects to disallow use of them
+ /// in keeping with the rest of `bitvec`’s requirements.
+ ///
+ /// ## Original
+ ///
+ /// [`pointer::write_unaligned`](https://doc.rust-lang.org/std/primitive.pointer.html#method.write_unaligned)
+ ///
+ /// ## Safety
+ ///
+ /// See [`ptr::write_unaligned`].
+ ///
+ /// [`ptr::write_unaligned`]: crate::ptr::write_unaligned
+ #[inline]
+ #[allow(clippy::needless_borrow)] // Clippy is wrong.
+ #[deprecated = "`BitPtr` does not have unaligned addresses"]
+ pub unsafe fn write_unaligned(self, value: bool) {
+ let ptr = self.ptr.to_mut();
+ let mut tmp = ptr.read_unaligned();
+ Self::new_unchecked((&mut tmp).into(), self.bit).write(value);
+ ptr.write_unaligned(tmp);
+ }
+
+ /// Replaces the bit at `*self` with a new value, returning the previous
+ /// value.
+ ///
+ /// ## Original
+ ///
+ /// [`pointer::replace`](https://doc.rust-lang.org/std/primitive.pointer.html#method.replace)
+ ///
+ /// ## Safety
+ ///
+ /// See [`ptr::replace`].
+ ///
+ /// [`ptr::replace`]: crate::ptr::replace
+ #[inline]
+ pub unsafe fn replace(self, value: bool) -> bool {
+ self.freeze().frozen_write_bit(value)
+ }
+
+ /// Swaps the bits at two mutable locations.
+ ///
+ /// ## Original
+ ///
+ /// [`pointer::swap`](https://doc.rust-lang.org/std/primitive.pointer.html#method.swap)
+ ///
+ /// ## Safety
+ ///
+ /// See [`ptr::swap`].
+ ///
+ /// [`ptr::swap`]: crate::ptr::swap
+ #[inline]
+ pub unsafe fn swap<T2, O2>(self, with: BitPtr<Mut, T2, O2>)
+ where
+ T2: BitStore,
+ O2: BitOrder,
+ {
+ self.write(with.replace(self.read()));
+ }
+}
+
+impl<M, T, O> BitPtr<Frozen<M>, T, O>
+where
+ M: Mutability,
+ T: BitStore,
+ O: BitOrder,
+{
+ /// Writes through a bit-pointer that has had its mutability permission
+ /// removed.
+ ///
+ /// This is used to allow `BitPtr<Const, _, AliasSafe<T>>` pointers, which
+ /// are not `Mut` but may still modify memory, to do so.
+ pub(crate) unsafe fn frozen_write_bit(self, value: bool) -> bool {
+ (*self.ptr.cast::<T::Access>().to_const())
+ .write_bit::<O>(self.bit, value)
+ }
+}
+
+#[cfg(not(tarpaulin_include))]
+impl<M, T, O> Clone for BitPtr<M, T, O>
+where
+ M: Mutability,
+ T: BitStore,
+ O: BitOrder,
+{
+ #[inline]
+ fn clone(&self) -> Self {
+ Self {
+ ptr: self.get_addr(),
+ ..*self
+ }
+ }
+}
+
+impl<M, T, O> Eq for BitPtr<M, T, O>
+where
+ M: Mutability,
+ T: BitStore,
+ O: BitOrder,
+{
+}
+
+impl<M, T, O> Ord for BitPtr<M, T, O>
+where
+ M: Mutability,
+ T: BitStore,
+ O: BitOrder,
+{
+ #[inline]
+ fn cmp(&self, other: &Self) -> cmp::Ordering {
+ self.partial_cmp(other).expect(
+ "BitPtr has a total ordering when type parameters are identical",
+ )
+ }
+}
+
+impl<M1, M2, T1, T2, O> PartialEq<BitPtr<M2, T2, O>> for BitPtr<M1, T1, O>
+where
+ M1: Mutability,
+ M2: Mutability,
+ T1: BitStore,
+ T2: BitStore,
+ O: BitOrder,
+{
+ #[inline]
+ fn eq(&self, other: &BitPtr<M2, T2, O>) -> bool {
+ if !dvl::match_store::<T1::Mem, T2::Mem>() {
+ return false;
+ }
+ self.get_addr().to_const() as usize
+ == other.get_addr().to_const() as usize
+ && self.bit.into_inner() == other.bit.into_inner()
+ }
+}
+
+impl<M1, M2, T1, T2, O> PartialOrd<BitPtr<M2, T2, O>> for BitPtr<M1, T1, O>
+where
+ M1: Mutability,
+ M2: Mutability,
+ T1: BitStore,
+ T2: BitStore,
+ O: BitOrder,
+{
+ #[inline]
+ fn partial_cmp(&self, other: &BitPtr<M2, T2, O>) -> Option<cmp::Ordering> {
+ if !dvl::match_store::<T1::Mem, T2::Mem>() {
+ return None;
+ }
+ match (self.get_addr().to_const() as usize)
+ .cmp(&(other.get_addr().to_const() as usize))
+ {
+ cmp::Ordering::Equal => {
+ self.bit.into_inner().partial_cmp(&other.bit.into_inner())
+ },
+ ord => Some(ord),
+ }
+ }
+}
+
+#[cfg(not(tarpaulin_include))]
+impl<T, O> From<&T> for BitPtr<Const, T, O>
+where
+ T: BitStore,
+ O: BitOrder,
+{
+ #[inline]
+ fn from(elem: &T) -> Self {
+ Self::from_ref(elem)
+ }
+}
+
+#[cfg(not(tarpaulin_include))]
+impl<T, O> From<&mut T> for BitPtr<Mut, T, O>
+where
+ T: BitStore,
+ O: BitOrder,
+{
+ #[inline]
+ fn from(elem: &mut T) -> Self {
+ Self::from_mut(elem)
+ }
+}
+
+impl<T, O> TryFrom<*const T> for BitPtr<Const, T, O>
+where
+ T: BitStore,
+ O: BitOrder,
+{
+ type Error = BitPtrError<T>;
+
+ #[inline]
+ fn try_from(elem: *const T) -> Result<Self, Self::Error> {
+ elem.try_conv::<Address<Const, T>>()?
+ .pipe(|ptr| Self::new(ptr, BitIdx::MIN))?
+ .pipe(Ok)
+ }
+}
+
+impl<T, O> TryFrom<*mut T> for BitPtr<Mut, T, O>
+where
+ T: BitStore,
+ O: BitOrder,
+{
+ type Error = BitPtrError<T>;
+
+ #[inline]
+ fn try_from(elem: *mut T) -> Result<Self, Self::Error> {
+ elem.try_conv::<Address<Mut, T>>()?
+ .pipe(|ptr| Self::new(ptr, BitIdx::MIN))?
+ .pipe(Ok)
+ }
+}
+
+impl<M, T, O> Debug for BitPtr<M, T, O>
+where
+ M: Mutability,
+ T: BitStore,
+ O: BitOrder,
+{
+ #[inline]
+ fn fmt(&self, fmt: &mut Formatter) -> fmt::Result {
+ write!(
+ fmt,
+ "{} Bit<{}, {}>",
+ M::RENDER,
+ any::type_name::<T>(),
+ any::type_name::<O>(),
+ )?;
+ Pointer::fmt(self, fmt)
+ }
+}
+
+impl<M, T, O> Pointer for BitPtr<M, T, O>
+where
+ M: Mutability,
+ T: BitStore,
+ O: BitOrder,
+{
+ #[inline]
+ fn fmt(&self, fmt: &mut Formatter) -> fmt::Result {
+ fmt.debug_tuple("")
+ .field(&self.get_addr().fmt_pointer())
+ .field(&self.bit.fmt_binary())
+ .finish()
+ }
+}
+
+#[cfg(not(tarpaulin_include))]
+impl<M, T, O> Hash for BitPtr<M, T, O>
+where
+ M: Mutability,
+ T: BitStore,
+ O: BitOrder,
+{
+ #[inline]
+ fn hash<H>(&self, state: &mut H)
+ where H: Hasher {
+ self.get_addr().hash(state);
+ self.bit.hash(state);
+ }
+}
+
+impl<M, T, O> Copy for BitPtr<M, T, O>
+where
+ M: Mutability,
+ T: BitStore,
+ O: BitOrder,
+{
+}
+
+/// Errors produced by invalid bit-pointer components.
+#[derive(Clone, Copy, Debug, Eq, Hash, Ord, PartialEq, PartialOrd)]
+pub enum BitPtrError<T>
+where T: BitStore
+{
+ /// Attempted to construct a bit-pointer with the null element address.
+ Null(NullPtrError),
+ /// Attempted to construct a bit-pointer with an address not aligned for the
+ /// element type.
+ Misaligned(MisalignError<T>),
+}
+
+#[cfg(not(tarpaulin_include))]
+impl<T> From<MisalignError<T>> for BitPtrError<T>
+where T: BitStore
+{
+ #[inline]
+ fn from(err: MisalignError<T>) -> Self {
+ Self::Misaligned(err)
+ }
+}
+
+#[cfg(not(tarpaulin_include))]
+impl<T> From<NullPtrError> for BitPtrError<T>
+where T: BitStore
+{
+ #[inline]
+ fn from(err: NullPtrError) -> Self {
+ Self::Null(err)
+ }
+}
+
+#[cfg(not(tarpaulin_include))]
+impl<T> Display for BitPtrError<T>
+where T: BitStore
+{
+ #[inline]
+ fn fmt(&self, fmt: &mut Formatter) -> fmt::Result {
+ match self {
+ Self::Null(err) => Display::fmt(err, fmt),
+ Self::Misaligned(err) => Display::fmt(err, fmt),
+ }
+ }
+}
+
+#[cfg(feature = "std")]
+impl<T> std::error::Error for BitPtrError<T> where T: BitStore {}
diff --git a/src/ptr/span.rs b/src/ptr/span.rs
new file mode 100644
index 0000000..261d54e
--- /dev/null
+++ b/src/ptr/span.rs
@@ -0,0 +1,874 @@
+#![doc = include_str!("../../doc/ptr/span.md")]
+
+use core::{
+ any,
+ fmt::{
+ self,
+ Binary,
+ Debug,
+ Display,
+ Formatter,
+ Pointer,
+ },
+ marker::PhantomData,
+ mem,
+ ptr::{
+ self,
+ NonNull,
+ },
+};
+
+use tap::Pipe;
+use wyz::{
+ comu::{
+ Address,
+ Const,
+ Mut,
+ Mutability,
+ NullPtrError,
+ Reference,
+ Referential,
+ },
+ fmt::FmtForward,
+};
+
+use super::{
+ BitPtr,
+ BitPtrError,
+ BitPtrRange,
+ MisalignError,
+};
+use crate::{
+ index::{
+ BitEnd,
+ BitIdx,
+ },
+ mem::{
+ bits_of,
+ BitRegister,
+ },
+ order::{
+ BitOrder,
+ Lsb0,
+ },
+ slice::BitSlice,
+ store::BitStore,
+};
+
+#[doc = include_str!("../../doc/ptr/BitSpan.md")]
+pub(crate) struct BitSpan<M = Const, T = usize, O = Lsb0>
+where
+ M: Mutability,
+ T: BitStore,
+ O: BitOrder,
+{
+ /// The element address in which the base bit lives.
+ ptr: NonNull<()>,
+ /// The length of the span, in bits. This must be typed as `()` because it
+ /// cannot be directly dereferenced, and will not have valid values for
+ /// `NonNull<T>`.
+ len: usize,
+ /// The bit-ordering within elements used to translate indices to real bits.
+ _or: PhantomData<O>,
+ /// This is functionally an element-slice pointer.
+ _ty: PhantomData<Address<M, [T]>>,
+}
+
+impl<M, T, O> BitSpan<M, T, O>
+where
+ M: Mutability,
+ T: BitStore,
+ O: BitOrder,
+{
+ /// The canonical empty span. This always uses the dangling address for `T`.
+ pub(crate) const EMPTY: Self = Self {
+ ptr: NonNull::<T>::dangling().cast::<()>(),
+ len: 0,
+ _or: PhantomData,
+ _ty: PhantomData,
+ };
+ /// The number of least-significant bits in `.len` needed to hold the low
+ /// bits of the head `BitIdx` cursor.
+ ///
+ /// This is always 3 until Rust adds a target architecture whose bytes are
+ /// not 8 bits.
+ pub(crate) const LEN_HEAD_BITS: usize = 3;
+ /// Marks the bits of `.len` that store some of the `.head()` logical field.
+ pub(crate) const LEN_HEAD_MASK: usize = 0b111;
+ /// Marks the bits of `.ptr` that store the `.addr()` logical field.
+ pub(crate) const PTR_ADDR_MASK: usize = !0 << Self::PTR_HEAD_BITS;
+ /// The number of least-significant bits in `.ptr` needed to hold the high
+ /// bits of the head `BitIdx` cursor.
+ pub(crate) const PTR_HEAD_BITS: usize =
+ <T::Mem as BitRegister>::INDX as usize - Self::LEN_HEAD_BITS;
+ /// Marks the bits of `.ptr` that store some of the `.head()` logical field.
+ pub(crate) const PTR_HEAD_MASK: usize = !Self::PTR_ADDR_MASK;
+ /// The inclusive-maximum number of bits that a `BitSpan` can cover. This
+ /// value is therefore one higher than the maximum *index* that can be used
+ /// to select a bit within a span.
+ pub(crate) const REGION_MAX_BITS: usize = !0 >> Self::LEN_HEAD_BITS;
+ /// The inclusive-maximum number of memory elements that a bit-span can
+ /// cover.
+ ///
+ /// This is the number of elements required to store `REGION_MAX_BITS` bits,
+ /// plus one because a region could begin away from the zeroth bit and thus
+ /// continue into the next element at the end.
+ ///
+ /// Since the region is ⅛th the domain of a `usize` counter already, this
+ /// number is guaranteed to be well below the limits of both arithmetic and
+ /// Rust’s own ceiling constraints on memory region descriptors.
+ pub(crate) const REGION_MAX_ELTS: usize =
+ crate::mem::elts::<T::Mem>(Self::REGION_MAX_BITS) + 1;
+}
+
+/// Constructors.
+impl<M, T, O> BitSpan<M, T, O>
+where
+ M: Mutability,
+ T: BitStore,
+ O: BitOrder,
+{
+ /// Constructs an empty `BitSpan` at an allocated address.
+ ///
+ /// This is used when the region has no contents, but the pointer
+ /// information must be retained and cannot be canonicalized.
+ ///
+ /// ## Parameters
+ ///
+ /// - `addr`: Some address of a `T` allocation. It must be valid in the
+ /// caller’s memory regime.
+ ///
+ /// ## Returns
+ ///
+ /// A zero-length `BitSpan` based at `addr`.
+ #[cfg(feature = "alloc")]
+ pub(crate) fn uninhabited(addr: Address<M, T>) -> Self {
+ Self {
+ ptr: addr.into_inner().cast::<()>(),
+ ..Self::EMPTY
+ }
+ }
+
+ /// Creates a new bit-span from its logical components.
+ ///
+ /// ## Parameters
+ ///
+ /// - `addr`: The base address of the memory region in which the bit-span
+ /// resides.
+ /// - `head`: The index of the initial bit within `*addr`.
+ /// - `bits`: The number of bits contained in the bit-span.
+ ///
+ /// ## Returns
+ ///
+ /// This fails in the following conditions:
+ ///
+ /// - `bits` is greater than `REGION_MAX_BITS`
+ /// - `addr` is not aligned to `T`.
+ /// - `addr + elts(bits)` wraps around the address space
+ ///
+ /// The `Address` type already enforces the non-null requirement.
+ pub(crate) fn new(
+ addr: Address<M, T>,
+ head: BitIdx<T::Mem>,
+ bits: usize,
+ ) -> Result<Self, BitSpanError<T>> {
+ if bits > Self::REGION_MAX_BITS {
+ return Err(BitSpanError::TooLong(bits));
+ }
+ let base = BitPtr::<M, T, O>::new(addr, head)?;
+ let last = base.wrapping_add(bits);
+ if last < base {
+ return Err(BitSpanError::TooHigh(addr.to_const()));
+ }
+
+ Ok(unsafe { Self::new_unchecked(addr, head, bits) })
+ }
+
+ /// Creates a new bit-span from its components, without any validity checks.
+ ///
+ /// ## Safety
+ ///
+ /// The caller must ensure that the arguments satisfy all the requirements
+ /// outlined in [`::new()`]. The easiest way to ensure this is to only use
+ /// this function to construct bit-spans from values extracted from
+ /// bit-spans previously constructed through `::new()`.
+ ///
+ /// This function **only** performs the value encoding. Invalid lengths will
+ /// truncate, and invalid addresses may cause memory unsafety.
+ ///
+ /// [`::new()`]: Self::new
+ pub(crate) unsafe fn new_unchecked(
+ addr: Address<M, T>,
+ head: BitIdx<T::Mem>,
+ bits: usize,
+ ) -> Self {
+ let addr = addr.to_const().cast::<u8>();
+
+ let head = head.into_inner() as usize;
+ let ptr_data = addr as usize & Self::PTR_ADDR_MASK;
+ let ptr_head = head >> Self::LEN_HEAD_BITS;
+
+ let len_head = head & Self::LEN_HEAD_MASK;
+ let len_bits = bits << Self::LEN_HEAD_BITS;
+
+ /* See <https://github.com/bitvecto-rs/bitvec/issues/135#issuecomment-986357842>.
+ * This attempts to retain inbound provenance information and may help
+ * Miri better understand pointer operations this module performs.
+ *
+ * This performs `a + (p - a)` in `addr`’s provenance zone, which is
+ * numerically equivalent to `p` but does not require conjuring a new,
+ * uninformed, pointer value.
+ */
+ let ptr_raw = ptr_data | ptr_head;
+ let ptr = addr.wrapping_add(ptr_raw.wrapping_sub(addr as usize));
+
+ Self {
+ ptr: NonNull::new_unchecked(ptr.cast::<()>() as *mut ()),
+ len: len_bits | len_head,
+ ..Self::EMPTY
+ }
+ }
+}
+
+/// Encoded fields.
+impl<M, T, O> BitSpan<M, T, O>
+where
+ M: Mutability,
+ T: BitStore,
+ O: BitOrder,
+{
+ /// Gets the base element address of the referent region.
+ ///
+ /// # Parameters
+ ///
+ /// - `&self`
+ ///
+ /// # Returns
+ ///
+ /// The address of the starting element of the memory region. This address
+ /// is weakly typed so that it can be cast by call sites to the most useful
+ /// access type.
+ pub(crate) fn address(&self) -> Address<M, T> {
+ Address::new(unsafe {
+ NonNull::new_unchecked(
+ (self.ptr.as_ptr() as usize & Self::PTR_ADDR_MASK) as *mut T,
+ )
+ })
+ }
+
+ /// Overwrites the data pointer with a new address. This method does not
+ /// perform safety checks on the new pointer.
+ ///
+ /// # Parameters
+ ///
+ /// - `&mut self`
+ /// - `ptr`: The new address of the `BitSpan`’s domain.
+ ///
+ /// # Safety
+ ///
+ /// None. The invariants of [`::new`] must be checked at the caller.
+ ///
+ /// [`::new`]: Self::new
+ #[cfg(feature = "alloc")]
+ pub(crate) unsafe fn set_address(&mut self, addr: Address<M, T>) {
+ let mut addr_value = addr.to_const() as usize;
+ addr_value &= Self::PTR_ADDR_MASK;
+ addr_value |= self.ptr.as_ptr() as usize & Self::PTR_HEAD_MASK;
+ self.ptr = NonNull::new_unchecked(addr_value as *mut ())
+ }
+
+ /// Gets the starting bit index of the referent region.
+ ///
+ /// # Parameters
+ ///
+ /// - `&self`
+ ///
+ /// # Returns
+ ///
+ /// A [`BitIdx`] of the first live bit in the element at the
+ /// [`self.address()`] address.
+ ///
+ /// [`BitIdx`]: crate::index::BitIdx
+ /// [`self.address()`]: Self::address
+ pub(crate) fn head(&self) -> BitIdx<T::Mem> {
+ let ptr = self.ptr.as_ptr() as usize;
+ let ptr_head = (ptr & Self::PTR_HEAD_MASK) << Self::LEN_HEAD_BITS;
+ let len_head = self.len & Self::LEN_HEAD_MASK;
+ unsafe { BitIdx::new_unchecked((ptr_head | len_head) as u8) }
+ }
+
+ /// Writes a new `head` value into the pointer, with no other effects.
+ ///
+ /// # Parameters
+ ///
+ /// - `&mut self`
+ /// - `head`: A new starting index.
+ ///
+ /// # Effects
+ ///
+ /// `head` is written into the `.head` logical field, without affecting
+ /// `.addr` or `.bits`.
+ #[cfg(feature = "alloc")]
+ pub(crate) unsafe fn set_head(&mut self, head: BitIdx<T::Mem>) {
+ let head = head.into_inner() as usize;
+ let mut ptr = self.ptr.as_ptr() as usize;
+
+ ptr &= Self::PTR_ADDR_MASK;
+ ptr |= head >> Self::LEN_HEAD_BITS;
+ self.ptr = NonNull::new_unchecked(ptr as *mut ());
+
+ self.len &= !Self::LEN_HEAD_MASK;
+ self.len |= head & Self::LEN_HEAD_MASK;
+ }
+
+ /// Gets the number of live bits in the described region.
+ ///
+ /// # Parameters
+ ///
+ /// - `&self`
+ ///
+ /// # Returns
+ ///
+ /// A count of how many live bits the region pointer describes.
+ pub(crate) fn len(&self) -> usize {
+ self.len >> Self::LEN_HEAD_BITS
+ }
+
+ /// Sets the `.bits` logical member to a new value.
+ ///
+ /// # Parameters
+ ///
+ /// - `&mut self`
+ /// - `len`: A new bit length. This must not be greater than
+ /// [`REGION_MAX_BITS`].
+ ///
+ /// # Effects
+ ///
+ /// The `new_len` value is written directly into the `.bits` logical field.
+ ///
+ /// [`REGION_MAX_BITS`]: Self::REGION_MAX_BITS
+ pub(crate) unsafe fn set_len(&mut self, new_len: usize) {
+ if cfg!(debug_assertions) {
+ *self = Self::new(self.address(), self.head(), new_len).unwrap();
+ }
+ else {
+ self.len &= Self::LEN_HEAD_MASK;
+ self.len |= new_len << Self::LEN_HEAD_BITS;
+ }
+ }
+
+ /// Gets the three logical components of the pointer.
+ ///
+ /// The encoding is not public API, and direct field access is never
+ /// supported.
+ ///
+ /// # Parameters
+ ///
+ /// - `&self`
+ ///
+ /// # Returns
+ ///
+ /// - `.0`: The base address of the referent memory region.
+ /// - `.1`: The index of the first live bit in the first element of the
+ /// region.
+ /// - `.2`: The number of live bits in the region.
+ pub(crate) fn raw_parts(&self) -> (Address<M, T>, BitIdx<T::Mem>, usize) {
+ (self.address(), self.head(), self.len())
+ }
+}
+
+/// Virtual fields.
+impl<M, T, O> BitSpan<M, T, O>
+where
+ M: Mutability,
+ T: BitStore,
+ O: BitOrder,
+{
+ /// Computes the number of elements, starting at [`self.address()`], that
+ /// the region touches.
+ ///
+ /// # Parameters
+ ///
+ /// - `&self`
+ ///
+ /// # Returns
+ ///
+ /// The count of all elements, starting at [`self.address()`], that contain
+ /// live bits included in the referent region.
+ ///
+ /// [`self.address()`]: Self::address
+ pub(crate) fn elements(&self) -> usize {
+ crate::mem::elts::<T>(self.len() + self.head().into_inner() as usize)
+ }
+
+ /// Computes the tail index for the first dead bit after the live bits.
+ ///
+ /// # Parameters
+ ///
+ /// - `&self`
+ ///
+ /// # Returns
+ ///
+ /// A `BitEnd` that is the index of the first dead bit after the last live
+ /// bit in the last element. This will almost always be in the range `1 ..=
+ /// T::Mem::BITS`.
+ ///
+ /// It will be zero only when `self` is empty.
+ pub(crate) fn tail(&self) -> BitEnd<T::Mem> {
+ let (head, len) = (self.head(), self.len());
+ let (_, tail) = head.span(len);
+ tail
+ }
+}
+
+/// Conversions.
+impl<M, T, O> BitSpan<M, T, O>
+where
+ M: Mutability,
+ T: BitStore,
+ O: BitOrder,
+{
+ /// Casts the span to another element type.
+ ///
+ /// This does not alter the encoded value of the pointer! It only
+ /// reinterprets the element type, and the encoded value may shift
+ /// significantly in the result type. Use with caution.
+ pub(crate) fn cast<U>(self) -> BitSpan<M, U, O>
+ where U: BitStore {
+ let Self { ptr, len, .. } = self;
+ BitSpan {
+ ptr,
+ len,
+ ..BitSpan::EMPTY
+ }
+ }
+
+ /// Reäligns a bit-span to a different base memory type.
+ ///
+ /// ## Original
+ ///
+ /// [`slice::align_to`](https://doc.rust-lang.org/std/primitive.slice.html#method.align_to)
+ ///
+ /// ## Safety
+ ///
+ /// `U` must have the same type family as `T`. It is illegal to use this
+ /// method to cast away alias safeties such as an atomic or `Cell` wrapper.
+ pub(crate) unsafe fn align_to<U>(self) -> (Self, BitSpan<M, U, O>, Self)
+ where U: BitStore {
+ /* This function body implements the algorithm locally, rather than
+ * delegating to the standard library’s `<[T]>::align_to::<U>`
+ * function, because that requires use of memory references, and
+ * `BitSpan` does not require that its values be valid for
+ * dereference.
+ */
+ let this = self.to_bitptr();
+ // Counter for how many bits remain in the span.
+ let mut rem = self.len();
+ // The *byte* alignment of `U`.
+ let align = mem::align_of::<U>();
+ // 1. Get the number of bits between `self.head()` and the start of a
+ // `[U]` region.
+ let step = this.align_offset(align);
+ // If this count is more than the available bits, quit.
+ if step > rem {
+ return (self, BitSpan::EMPTY, Self::EMPTY);
+ }
+ let left = this.span_unchecked(step);
+ rem -= step;
+
+ let mid_base =
+ this.add(step).address().cast::<U>().pipe(|addr| {
+ BitPtr::<M, U, O>::new_unchecked(addr, BitIdx::MIN)
+ });
+ let mid_elts = rem >> <U::Mem as BitRegister>::INDX;
+ let excess = rem & <U::Mem as BitRegister>::MASK as usize;
+ let step = rem - excess;
+ let mid = mid_base.span_unchecked(step);
+
+ let right_base =
+ mid_base.address().add(mid_elts).cast::<T>().pipe(|addr| {
+ BitPtr::<M, T, O>::new_unchecked(addr, BitIdx::MIN)
+ });
+ let right = right_base.span_unchecked(excess);
+
+ (left, mid, right)
+ }
+
+ /// Casts a mutable bit-slice pointer into its structural representation.
+ pub(crate) fn from_bitslice_ptr_mut(raw: *mut BitSlice<T, O>) -> Self {
+ let BitSpan { ptr, len, .. } =
+ BitSpan::from_bitslice_ptr(raw as *const BitSlice<T, O>);
+ Self {
+ ptr,
+ len,
+ ..Self::EMPTY
+ }
+ }
+
+ /// Converts the span descriptor into a raw `BitSlice` pointer.
+ ///
+ /// This is a noöp.
+ pub(crate) fn into_bitslice_ptr(self) -> *const BitSlice<T, O> {
+ let Self { ptr, len, .. } = self;
+ ptr::slice_from_raw_parts(ptr.as_ptr(), len) as *const BitSlice<T, O>
+ }
+
+ /// Converts the span descriptor into a shared `BitSlice` reference.
+ ///
+ /// This is a noöp.
+ ///
+ /// ## Safety
+ ///
+ /// The span must describe memory that is safe to dereference, and to which
+ /// no `&mut BitSlice` references exist.
+ pub(crate) unsafe fn into_bitslice_ref<'a>(self) -> &'a BitSlice<T, O> {
+ &*self.into_bitslice_ptr()
+ }
+
+ /// Produces a bit-pointer to the start of the span.
+ ///
+ /// This is **not** a noöp: the base address and starting bit index are
+ /// decoded into the bit-pointer structure.
+ pub(crate) fn to_bitptr(self) -> BitPtr<M, T, O> {
+ unsafe { BitPtr::new_unchecked(self.address(), self.head()) }
+ }
+
+ /// Produces a bit-pointer range to either end of the span.
+ ///
+ /// This is **not** a noöp: all three logical fields are decoded in order to
+ /// construct the range.
+ pub(crate) fn to_bitptr_range(self) -> BitPtrRange<M, T, O> {
+ let start = self.to_bitptr();
+ let end = unsafe { start.add(self.len()) };
+ BitPtrRange { start, end }
+ }
+
+ /// Converts the span descriptor into an `Address<>` generic pointer.
+ ///
+ /// This is a noöp.
+ pub(crate) fn to_bitslice_addr(self) -> Address<M, BitSlice<T, O>> {
+ (self.into_bitslice_ptr() as *mut BitSlice<T, O>)
+ .pipe(|ptr| unsafe { NonNull::new_unchecked(ptr) })
+ .pipe(Address::new)
+ }
+
+ /// Converts the span descriptor into a `Reference<>` generic handle.
+ ///
+ /// This is a noöp.
+ pub(crate) fn to_bitslice<'a>(self) -> Reference<'a, M, BitSlice<T, O>>
+ where Address<M, BitSlice<T, O>>: Referential<'a> {
+ unsafe { self.to_bitslice_addr().to_ref() }
+ }
+}
+
+/// Conversions.
+impl<T, O> BitSpan<Const, T, O>
+where
+ T: BitStore,
+ O: BitOrder,
+{
+ /// Creates a `Const` span descriptor from a `const` bit-slice pointer.
+ pub(crate) fn from_bitslice_ptr(raw: *const BitSlice<T, O>) -> Self {
+ let slice_nn = match NonNull::new(raw as *const [()] as *mut [()]) {
+ Some(nn) => nn,
+ None => return Self::EMPTY,
+ };
+ let ptr = slice_nn.cast::<()>();
+ let len = unsafe { slice_nn.as_ref() }.len();
+ Self {
+ ptr,
+ len,
+ ..Self::EMPTY
+ }
+ }
+}
+
+/// Conversions.
+impl<T, O> BitSpan<Mut, T, O>
+where
+ T: BitStore,
+ O: BitOrder,
+{
+ /// Converts the span descriptor into a raw mutable `BitSlice` pointer.
+ ///
+ /// This is a noöp.
+ pub(crate) fn into_bitslice_ptr_mut(self) -> *mut BitSlice<T, O> {
+ self.into_bitslice_ptr() as *mut BitSlice<T, O>
+ }
+
+ /// Converts the span descriptor into an exclusive `BitSlice` reference.
+ ///
+ /// This is a noöp.
+ ///
+ /// ## Safety
+ ///
+ /// The span must describe memory that is safe to dereference. In addition,
+ /// no other `BitSlice` reference of any kind (`&` or `&mut`) may exist.
+ pub(crate) unsafe fn into_bitslice_mut<'a>(self) -> &'a mut BitSlice<T, O> {
+ &mut *self.into_bitslice_ptr_mut()
+ }
+}
+
+/// Utilities.
+impl<M, T, O> BitSpan<M, T, O>
+where
+ M: Mutability,
+ T: BitStore,
+ O: BitOrder,
+{
+ /// Checks if a requested length can be encoded into the `BitSpan`.
+ ///
+ /// This is `len <= Self::REGION_MAX_BITS`.
+ #[cfg(feature = "alloc")]
+ pub(crate) fn len_encodable(len: usize) -> bool {
+ len <= Self::REGION_MAX_BITS
+ }
+
+ /// Renders the pointer structure into a formatter for use during
+ /// higher-level type [`Debug`] implementations.
+ ///
+ /// # Parameters
+ ///
+ /// - `&self`
+ /// - `fmt`: The formatter into which the pointer is rendered.
+ /// - `name`: The suffix of the structure rendering its pointer. The `Bit`
+ /// prefix is applied to the object type name in this format.
+ /// - `fields`: Any additional fields in the object’s debug info to be
+ /// rendered.
+ ///
+ /// # Returns
+ ///
+ /// The result of formatting the pointer into the receiver.
+ ///
+ /// # Behavior
+ ///
+ /// This function writes `Bit{name}<{ord}, {type}> {{ {fields } }}` into the
+ /// `fmt` formatter, where `{fields}` includes the address, head index, and
+ /// bit length of the pointer, as well as any additional fields provided by
+ /// the caller.
+ ///
+ /// Higher types in the crate should use this function to drive their
+ /// [`Debug`] implementations, and then use [`BitSlice`]’s list formatters
+ /// to display their buffer contents.
+ ///
+ /// [`BitSlice`]: crate::slice::BitSlice
+ /// [`Debug`]: core::fmt::Debug
+ pub(crate) fn render<'a>(
+ &'a self,
+ fmt: &'a mut Formatter,
+ name: &'a str,
+ fields: impl IntoIterator<Item = &'a (&'a str, &'a dyn Debug)>,
+ ) -> fmt::Result {
+ write!(
+ fmt,
+ "Bit{}<{}, {}>",
+ name,
+ any::type_name::<T::Mem>(),
+ any::type_name::<O>(),
+ )?;
+ let mut builder = fmt.debug_struct("");
+ builder
+ .field("addr", &self.address().fmt_pointer())
+ .field("head", &self.head().fmt_binary())
+ .field("bits", &self.len());
+ for (name, value) in fields {
+ builder.field(name, value);
+ }
+ builder.finish()
+ }
+}
+
+#[cfg(not(tarpaulin_include))]
+impl<M, T, O> Clone for BitSpan<M, T, O>
+where
+ M: Mutability,
+ T: BitStore,
+ O: BitOrder,
+{
+ #[inline]
+ fn clone(&self) -> Self {
+ *self
+ }
+}
+
+impl<M1, M2, O, T1, T2> PartialEq<BitSpan<M2, T2, O>> for BitSpan<M1, T1, O>
+where
+ M1: Mutability,
+ M2: Mutability,
+ O: BitOrder,
+ T1: BitStore,
+ T2: BitStore,
+{
+ #[inline]
+ fn eq(&self, other: &BitSpan<M2, T2, O>) -> bool {
+ let (addr_a, head_a, bits_a) = self.raw_parts();
+ let (addr_b, head_b, bits_b) = other.raw_parts();
+ bits_of::<T1::Mem>() == bits_of::<T2::Mem>()
+ && addr_a.to_const() as usize == addr_b.to_const() as usize
+ && head_a.into_inner() == head_b.into_inner()
+ && bits_a == bits_b
+ }
+}
+
+impl<T, O> From<&BitSlice<T, O>> for BitSpan<Const, T, O>
+where
+ T: BitStore,
+ O: BitOrder,
+{
+ #[inline]
+ fn from(bits: &BitSlice<T, O>) -> Self {
+ Self::from_bitslice_ptr(bits)
+ }
+}
+
+impl<T, O> From<&mut BitSlice<T, O>> for BitSpan<Mut, T, O>
+where
+ T: BitStore,
+ O: BitOrder,
+{
+ #[inline]
+ fn from(bits: &mut BitSlice<T, O>) -> Self {
+ Self::from_bitslice_ptr_mut(bits)
+ }
+}
+
+#[cfg(not(tarpaulin_include))]
+impl<M, T, O> Default for BitSpan<M, T, O>
+where
+ M: Mutability,
+ T: BitStore,
+ O: BitOrder,
+{
+ #[inline]
+ fn default() -> Self {
+ Self::EMPTY
+ }
+}
+
+impl<M, T, O> Debug for BitSpan<M, T, O>
+where
+ M: Mutability,
+ T: BitStore,
+ O: BitOrder,
+{
+ #[inline]
+ fn fmt(&self, fmt: &mut Formatter) -> fmt::Result {
+ self.render(fmt, "Span", None)
+ }
+}
+
+impl<M, T, O> Pointer for BitSpan<M, T, O>
+where
+ M: Mutability,
+ T: BitStore,
+ O: BitOrder,
+{
+ #[inline]
+ fn fmt(&self, fmt: &mut Formatter) -> fmt::Result {
+ Pointer::fmt(&self.address(), fmt)?;
+ fmt.write_str("(")?;
+ Binary::fmt(&self.head(), fmt)?;
+ fmt.write_str(")[")?;
+ Display::fmt(&self.len(), fmt)?;
+ fmt.write_str("]")
+ }
+}
+
+impl<M, T, O> Copy for BitSpan<M, T, O>
+where
+ M: Mutability,
+ T: BitStore,
+ O: BitOrder,
+{
+}
+
+/// An error produced when creating `BitSpan` encoded references.
+#[derive(Clone, Copy, Eq, Hash, Ord, PartialEq, PartialOrd)]
+pub enum BitSpanError<T>
+where T: BitStore
+{
+ /// A null pointer was provided.
+ Null(NullPtrError),
+ /// The base element pointer is not aligned.
+ Misaligned(MisalignError<T>),
+ /// The requested length exceeds the `BitSpan` length ceiling.
+ TooLong(usize),
+ /// The requested address is too high, and wraps to zero.
+ TooHigh(*const T),
+}
+
+#[cfg(not(tarpaulin_include))]
+impl<T> From<BitPtrError<T>> for BitSpanError<T>
+where T: BitStore
+{
+ #[inline]
+ fn from(err: BitPtrError<T>) -> Self {
+ match err {
+ BitPtrError::Null(err) => Self::Null(err),
+ BitPtrError::Misaligned(err) => Self::Misaligned(err),
+ }
+ }
+}
+
+#[cfg(not(tarpaulin_include))]
+impl<T> From<MisalignError<T>> for BitSpanError<T>
+where T: BitStore
+{
+ #[inline]
+ fn from(err: MisalignError<T>) -> Self {
+ Self::Misaligned(err)
+ }
+}
+
+#[cfg(not(tarpaulin_include))]
+impl<T> Debug for BitSpanError<T>
+where T: BitStore
+{
+ #[inline]
+ fn fmt(&self, fmt: &mut Formatter) -> fmt::Result {
+ write!(fmt, "BitSpanError<{}>::", any::type_name::<T::Mem>())?;
+ match self {
+ Self::Null(err) => fmt.debug_tuple("Null").field(&err).finish(),
+ Self::Misaligned(err) => {
+ fmt.debug_tuple("Misaligned").field(&err).finish()
+ },
+ Self::TooLong(len) => fmt.debug_tuple("TooLong").field(len).finish(),
+ Self::TooHigh(addr) => {
+ fmt.debug_tuple("TooHigh").field(addr).finish()
+ },
+ }
+ }
+}
+
+#[cfg(not(tarpaulin_include))]
+impl<T> Display for BitSpanError<T>
+where T: BitStore
+{
+ #[inline]
+ fn fmt(&self, fmt: &mut Formatter) -> fmt::Result {
+ match self {
+ Self::Null(err) => Display::fmt(err, fmt),
+ Self::Misaligned(err) => Display::fmt(err, fmt),
+ Self::TooLong(len) => write!(
+ fmt,
+ "Length {} is too long to encode in a bit-slice, which can \
+ only accept {} bits",
+ len,
+ BitSpan::<Const, T, Lsb0>::REGION_MAX_BITS,
+ ),
+ Self::TooHigh(addr) => write!(
+ fmt,
+ "Address {:p} is too high, and produces a span that wraps \
+ around to the zero address.",
+ addr,
+ ),
+ }
+ }
+}
+
+unsafe impl<T> Send for BitSpanError<T> where T: BitStore {}
+
+unsafe impl<T> Sync for BitSpanError<T> where T: BitStore {}
+
+#[cfg(feature = "std")]
+impl<T> std::error::Error for BitSpanError<T> where T: BitStore {}
diff --git a/src/ptr/tests.rs b/src/ptr/tests.rs
new file mode 100644
index 0000000..b6fb79c
--- /dev/null
+++ b/src/ptr/tests.rs
@@ -0,0 +1,172 @@
+//! Unit tests for bit-pointers.
+
+#![cfg(test)]
+
+use core::cmp;
+
+use crate::{
+ index::BitIdx,
+ prelude::*,
+ ptr::{
+ self as bv_ptr,
+ AddressExt,
+ BitSpan,
+ BitSpanError,
+ Mut,
+ },
+};
+
+#[test]
+fn free_functions() {
+ let mut a = [0u8, !0];
+ let mut b = 255u16;
+
+ let one = BitPtr::<Mut, u8, Lsb0>::from_slice_mut(&mut a[..]);
+ let two = one.wrapping_add(8);
+ let three = BitPtr::<Mut, u16, Msb0>::from_mut(&mut b);
+ let four = three.wrapping_add(8);
+
+ unsafe {
+ bv_ptr::copy(two.to_const(), one, 8);
+ }
+ assert_eq!(a[0], !0);
+ unsafe {
+ bv_ptr::copy(three.to_const(), one, 8);
+ }
+ assert_eq!(a[0], 0);
+
+ assert!(!bv_ptr::eq(two.to_const(), one.to_const()));
+
+ unsafe {
+ bv_ptr::swap_nonoverlapping(two, three, 8);
+ }
+ assert_eq!(a[1], 0);
+ assert_eq!(b, !0);
+
+ unsafe {
+ bv_ptr::write_bits(four, false, 8);
+ }
+ assert_eq!(b, 0xFF00);
+}
+
+#[test]
+fn alignment() {
+ let data = 0u16;
+ let a = unsafe { (&data).into_address() };
+ let b = a.cast::<u8>().wrapping_add(1).cast::<u16>();
+
+ assert!(bv_ptr::check_alignment(a).is_ok());
+ assert!(bv_ptr::check_alignment(b).is_err());
+}
+
+#[test]
+fn proxy() {
+ let mut data = 0u8;
+ {
+ let bits = data.view_bits_mut::<Lsb0>();
+ let (mut a, rest) = bits.split_first_mut().unwrap();
+ let (mut b, _) = rest.split_first_mut().unwrap();
+ assert!(!a.replace(true));
+ a.swap(&mut b);
+ assert!(*b);
+ a.set(true);
+ }
+
+ assert_eq!(data, 3);
+}
+
+#[test]
+fn range() {
+ let data = 0u8;
+ let mut bpr = data.view_bits::<Lsb0>().as_bitptr_range();
+
+ let range = bpr.clone().into_range();
+ let bpr2 = range.into();
+ assert_eq!(bpr, bpr2);
+
+ assert!(bpr.nth_back(9).is_none());
+}
+
+#[test]
+#[allow(deprecated)]
+fn single() {
+ let mut data = 1u16;
+ let bp = data.view_bits_mut::<Lsb0>().as_mut_bitptr();
+
+ assert!(!bp.is_null());
+ let bp2 = bp.wrapping_add(9);
+ assert_ne!(bp2.pointer().cast::<u8>(), bp2.cast::<u8>().pointer());
+
+ assert!(unsafe { bp.read_volatile() });
+ assert!(unsafe { bp.read_unaligned() });
+
+ assert_eq!(bp.align_offset(2), 0);
+ assert_eq!(bp2.align_offset(2), 7);
+
+ unsafe {
+ bp.write_volatile(false);
+ bp.swap(bp2);
+ bp2.write_unaligned(true);
+ }
+
+ assert_eq!(bp.cmp(&bp2), cmp::Ordering::Less);
+ assert_ne!(bp, bp.cast::<u8>());
+ assert!(bp.partial_cmp(&bp.cast::<u8>()).is_none());
+}
+
+#[test]
+fn span() {
+ let mut data = [0u32; 2];
+ let addr = unsafe { data.as_mut_ptr().into_address() };
+
+ let too_long = BitSpan::<Mut, u32, Lsb0>::REGION_MAX_BITS + 1;
+ assert!(matches!(
+ BitSpan::<_, _, Lsb0>::new(addr, BitIdx::MIN, too_long),
+ Err(BitSpanError::TooLong(ct)) if ct == too_long));
+
+ let bp = data.view_bits_mut::<Lsb0>().as_mut_bitptr();
+ let bs = bp.cast::<u8>().wrapping_add(8).span(32).unwrap();
+ let (l, c, r) = unsafe { bs.align_to::<u16>() };
+ assert_eq!(l.len(), 8);
+ assert_eq!(c.len(), 16);
+ assert_eq!(r.len(), 8);
+
+ let bs2 = bp.cast::<u8>().wrapping_add(3).span(3).unwrap();
+ assert_eq!(
+ unsafe { bs2.align_to::<u16>() },
+ (bs2, BitSpan::EMPTY, BitSpan::EMPTY)
+ );
+}
+
+#[test]
+#[cfg(feature = "alloc")]
+fn format() {
+ #[cfg(not(feature = "std"))]
+ use alloc::format;
+ use core::any;
+
+ let data = 1u8;
+ let bits = data.view_bits::<Lsb0>();
+
+ let bit = bits.first().unwrap();
+ let render = format!("{:?}", bit);
+ assert!(render.starts_with("BitRef<u8,"));
+ assert!(render.ends_with("bits: 1, bit: true }"));
+
+ let bitptr = bits.as_bitptr();
+ let render = format!("{:?}", bitptr);
+ assert!(render.starts_with("*const Bit<u8,"));
+ assert!(render.ends_with(", 000)"), "{}", render);
+
+ let bitspan = bitptr.wrapping_add(2).span(3).unwrap();
+ let render = format!("{:?}", bitspan);
+ let expected = format!(
+ "BitSpan<u8, {}> {{ addr: {:p}, head: 010, bits: 3 }}",
+ any::type_name::<Lsb0>(),
+ bitspan.address(),
+ );
+ assert_eq!(render, expected);
+ let render = format!("{:p}", bitspan);
+ let expected = format!("{:p}(010)[3]", bitspan.address());
+ assert_eq!(render, expected);
+}
diff --git a/src/serdes.rs b/src/serdes.rs
new file mode 100644
index 0000000..4251cc0
--- /dev/null
+++ b/src/serdes.rs
@@ -0,0 +1,160 @@
+#![cfg(feature = "serde")]
+#![doc = include_str!("../doc/serdes.md")]
+
+mod array;
+mod slice;
+mod utils;
+
+use core::fmt::{
+ self,
+ Formatter,
+};
+
+use serde::de::{
+ Deserialize,
+ Deserializer,
+ Visitor,
+};
+
+/// A result of serialization.
+type Result<S> = core::result::Result<
+ <S as serde::Serializer>::Ok,
+ <S as serde::Serializer>::Error,
+>;
+
+/// A list of fields in the `BitSeq` and `BitArr` transport format.
+static FIELDS: &[&str] = &["order", "head", "bits", "data"];
+
+/// The components of a bit-slice in wire format.
+enum Field {
+ /// Denotes the `<O: BitOrder>` type parameter.
+ Order,
+ /// Denotes the head-bit index in the first `Data` element.
+ Head,
+ /// Denotes the count of all live bits in the `Data` sequence.
+ Bits,
+ /// Denotes the raw storage sequence.
+ Data,
+}
+
+/// Visits field tokens without attempting to deserialize into real data.
+struct FieldVisitor;
+
+impl<'de> Deserialize<'de> for Field {
+ fn deserialize<D>(deserializer: D) -> core::result::Result<Self, D::Error>
+ where D: Deserializer<'de> {
+ deserializer.deserialize_identifier(FieldVisitor)
+ }
+}
+
+impl<'de> Visitor<'de> for FieldVisitor {
+ type Value = Field;
+
+ fn expecting(&self, fmt: &mut Formatter) -> fmt::Result {
+ fmt.write_str("field_identifier")
+ }
+
+ fn visit_str<E>(self, value: &str) -> core::result::Result<Self::Value, E>
+ where E: serde::de::Error {
+ match value {
+ "order" => Ok(Field::Order),
+ "head" => Ok(Field::Head),
+ "bits" => Ok(Field::Bits),
+ "data" => Ok(Field::Data),
+ _ => Err(serde::de::Error::unknown_field(value, FIELDS)),
+ }
+ }
+}
+
+#[cfg(test)]
+mod tests {
+ use serde::{
+ Deserialize,
+ Serialize,
+ };
+ use static_assertions::*;
+
+ use crate::prelude::*;
+
+ #[test]
+ fn trait_impls() {
+ use core::{
+ cell::Cell,
+ sync::atomic::*,
+ };
+
+ use radium::types::*;
+ macro_rules! check_impl {
+ ($($ord:ident @ $($sto:ty),+);+ $(;)?) => {{ $( $(
+ assert_impl_all!(BitSlice<$sto, $ord>: Serialize);
+ assert_impl_all!(BitArray<$sto, $ord>: Serialize, Deserialize<'static>);
+ assert_impl_all!(BitArray<[$sto; 32], $ord>: Serialize, Deserialize<'static>);
+
+ #[cfg(feature = "alloc")] {
+ assert_impl_all!(BitBox<$sto, $ord>: Serialize, Deserialize<'static>);
+ assert_impl_all!(BitVec<$sto, $ord>: Serialize, Deserialize<'static>);
+ }
+ )+ )+ }};
+ }
+
+ assert_impl_all!(&BitSlice<u8, Lsb0>: Deserialize<'static>);
+ assert_impl_all!(&BitSlice<u8, Msb0>: Deserialize<'static>);
+ assert_impl_all!(&BitSlice<u8, LocalBits>: Deserialize<'static>);
+
+ check_impl! {
+ Lsb0 @ u8, u16, u32, usize;
+ Msb0 @ u8, u16, u32, usize;
+ LocalBits @ u8, u16, u32, usize;
+ Lsb0 @ Cell<u8>, Cell<u16>, Cell<u32>, Cell<usize>;
+ Msb0 @ Cell<u8>, Cell<u16>, Cell<u32>, Cell<usize>;
+ LocalBits @ Cell<u8>, Cell<u16>, Cell<u32>, Cell<usize>;
+ Lsb0 @ RadiumU8, RadiumU16, RadiumU32, RadiumUsize;
+ Msb0 @ RadiumU8, RadiumU16, RadiumU32, RadiumUsize;
+ LocalBits @ RadiumU8, RadiumU16, RadiumU32, RadiumUsize;
+ }
+ radium::if_atomic! {
+ if atomic(8) {
+ check_impl! {
+ Lsb0 @ AtomicU8;
+ Msb0 @ AtomicU8;
+ LocalBits @ AtomicU8;
+ }
+ }
+ if atomic(16) {
+ check_impl! {
+ Lsb0 @ AtomicU16;
+ Msb0 @ AtomicU16;
+ LocalBits @ AtomicU16;
+ }
+ }
+ if atomic(32) {
+ check_impl! {
+ Lsb0 @ AtomicU32;
+ Msb0 @ AtomicU32;
+ LocalBits @ AtomicU32;
+ }
+ }
+ if atomic(ptr) {
+ check_impl! {
+ Lsb0 @ AtomicUsize;
+ Msb0 @ AtomicUsize;
+ LocalBits @ AtomicUsize;
+ }
+ }
+ }
+ #[cfg(target_pointer_width = "64")]
+ check_impl! {
+ Lsb0 @ u64, RadiumU64;
+ Msb0 @ u64, RadiumU64;
+ LocalBits @ u64, RadiumU64;
+ }
+ #[cfg(target_pointer_width = "64")]
+ radium::if_atomic!(if atomic(64) {
+ check_impl! {
+ Lsb0 @ AtomicU64;
+ Msb0 @ AtomicU64;
+ LocalBits @ AtomicU64;
+ }
+ });
+ }
+}
diff --git a/src/serdes/array.rs b/src/serdes/array.rs
new file mode 100644
index 0000000..6c95206
--- /dev/null
+++ b/src/serdes/array.rs
@@ -0,0 +1,467 @@
+#![doc=include_str!("../../doc/serdes/array.md")]
+
+use core::{
+ any,
+ fmt::{
+ self,
+ Formatter,
+ },
+};
+
+use serde::{
+ de::{
+ Deserialize,
+ Deserializer,
+ Error,
+ MapAccess,
+ SeqAccess,
+ Unexpected,
+ Visitor,
+ },
+ ser::{
+ Serialize,
+ SerializeStruct,
+ Serializer,
+ },
+};
+
+use super::{
+ utils::{
+ Array,
+ TypeName,
+ },
+ Field,
+ FIELDS,
+};
+use crate::{
+ array::BitArray,
+ index::BitIdx,
+ mem::bits_of,
+ order::BitOrder,
+ store::BitStore,
+};
+
+impl<T, O> Serialize for BitArray<T, O>
+where
+ T: BitStore,
+ O: BitOrder,
+ T::Mem: Serialize,
+{
+ #[inline]
+ fn serialize<S>(&self, serializer: S) -> super::Result<S>
+ where S: Serializer {
+ let mut state = serializer.serialize_struct("BitArr", FIELDS.len())?;
+
+ state.serialize_field("order", &any::type_name::<O>())?;
+ state.serialize_field("head", &BitIdx::<T::Mem>::MIN)?;
+ state.serialize_field("bits", &(self.len() as u64))?;
+ state.serialize_field(
+ "data",
+ Array::from_ref(core::array::from_ref(&self.data)),
+ )?;
+
+ state.end()
+ }
+}
+
+impl<T, O, const N: usize> Serialize for BitArray<[T; N], O>
+where
+ T: BitStore,
+ O: BitOrder,
+ T::Mem: Serialize,
+{
+ #[inline]
+ fn serialize<S>(&self, serializer: S) -> super::Result<S>
+ where S: Serializer {
+ let mut state = serializer.serialize_struct("BitArr", FIELDS.len())?;
+
+ state.serialize_field("order", &any::type_name::<O>())?;
+ state.serialize_field("head", &BitIdx::<T::Mem>::MIN)?;
+ state.serialize_field("bits", &(self.len() as u64))?;
+ state.serialize_field("data", Array::from_ref(&self.data))?;
+
+ state.end()
+ }
+}
+
+impl<'de, T, O> Deserialize<'de> for BitArray<T, O>
+where
+ T: BitStore,
+ O: BitOrder,
+ T::Mem: Deserialize<'de>,
+{
+ #[inline]
+ fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
+ where D: Deserializer<'de> {
+ deserializer
+ .deserialize_struct("BitArr", FIELDS, BitArrVisitor::<T, O, 1>::THIS)
+ .map(|BitArray { data: [elem], .. }| BitArray::new(elem))
+ }
+}
+
+impl<'de, T, O, const N: usize> Deserialize<'de> for BitArray<[T; N], O>
+where
+ T: BitStore,
+ O: BitOrder,
+ T::Mem: Deserialize<'de>,
+{
+ #[inline]
+ fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
+ where D: Deserializer<'de> {
+ deserializer.deserialize_struct(
+ "BitArr",
+ FIELDS,
+ BitArrVisitor::<T, O, N>::THIS,
+ )
+ }
+}
+
+/// Assists in deserialization of a static `BitArr`.
+struct BitArrVisitor<T, O, const N: usize>
+where
+ T: BitStore,
+ O: BitOrder,
+{
+ /// The deserialized bit-ordering string.
+ order: Option<TypeName<O>>,
+ /// The deserialized head-bit index. This must be zero; it is used for
+ /// consistency with `BitSeq` and to carry `T::Mem` information.
+ head: Option<BitIdx<T::Mem>>,
+ /// The deserialized bit-count. It must be `bits_of::<[T::Mem; N]>()`.
+ bits: Option<u64>,
+ /// The deserialized data buffer.
+ data: Option<Array<T, N>>,
+}
+
+impl<'de, T, O, const N: usize> BitArrVisitor<T, O, N>
+where
+ T: BitStore,
+ O: BitOrder,
+ Array<T, N>: Deserialize<'de>,
+{
+ /// A new visitor in its ready condition.
+ const THIS: Self = Self {
+ order: None,
+ head: None,
+ bits: None,
+ data: None,
+ };
+
+ /// Attempts to assemble deserialized components into an output value.
+ #[inline]
+ fn assemble<E>(mut self) -> Result<BitArray<[T; N], O>, E>
+ where E: Error {
+ self.order.take().ok_or_else(|| E::missing_field("order"))?;
+ let head = self.head.take().ok_or_else(|| E::missing_field("head"))?;
+ let bits = self.bits.take().ok_or_else(|| E::missing_field("bits"))?;
+ let data = self.data.take().ok_or_else(|| E::missing_field("data"))?;
+
+ if head != BitIdx::MIN {
+ return Err(E::invalid_value(
+ Unexpected::Unsigned(head.into_inner() as u64),
+ &"`BitArray` must have a head-bit of `0`",
+ ));
+ }
+ let bits = bits as usize;
+ if bits != bits_of::<[T; N]>() {
+ return Err(E::invalid_length(bits, &self));
+ }
+
+ Ok(BitArray::new(data.inner))
+ }
+}
+
+impl<'de, T, O, const N: usize> Visitor<'de> for BitArrVisitor<T, O, N>
+where
+ T: BitStore,
+ O: BitOrder,
+ Array<T, N>: Deserialize<'de>,
+{
+ type Value = BitArray<[T; N], O>;
+
+ #[inline]
+ fn expecting(&self, fmt: &mut Formatter) -> fmt::Result {
+ write!(
+ fmt,
+ "a `BitArray<[u{}; {}], {}>`",
+ bits_of::<T::Mem>(),
+ N,
+ any::type_name::<O>(),
+ )
+ }
+
+ #[inline]
+ fn visit_seq<V>(mut self, mut seq: V) -> Result<Self::Value, V::Error>
+ where V: SeqAccess<'de> {
+ self.order = Some(
+ seq.next_element()?
+ .ok_or_else(|| <V::Error>::invalid_length(0, &self))?,
+ );
+ self.head = Some(
+ seq.next_element()?
+ .ok_or_else(|| <V::Error>::invalid_length(1, &self))?,
+ );
+ self.bits = Some(
+ seq.next_element()?
+ .ok_or_else(|| <V::Error>::invalid_length(2, &self))?,
+ );
+ self.data = Some(
+ seq.next_element()?
+ .ok_or_else(|| <V::Error>::invalid_length(3, &self))?,
+ );
+
+ self.assemble()
+ }
+
+ #[inline]
+ fn visit_map<V>(mut self, mut map: V) -> Result<Self::Value, V::Error>
+ where V: MapAccess<'de> {
+ while let Some(key) = map.next_key()? {
+ match key {
+ Field::Order => {
+ if self.order.replace(map.next_value()?).is_some() {
+ return Err(<V::Error>::duplicate_field("order"));
+ }
+ },
+ Field::Head => {
+ if self.head.replace(map.next_value()?).is_some() {
+ return Err(<V::Error>::duplicate_field("head"));
+ }
+ },
+ Field::Bits => {
+ if self.bits.replace(map.next_value()?).is_some() {
+ return Err(<V::Error>::duplicate_field("bits"));
+ }
+ },
+ Field::Data => {
+ if self.data.replace(map.next_value()?).is_some() {
+ return Err(<V::Error>::duplicate_field("data"));
+ }
+ },
+ }
+ }
+
+ self.assemble()
+ }
+}
+
+#[cfg(test)]
+mod tests {
+ #[cfg(all(feature = "alloc", not(feature = "std")))]
+ use alloc::format;
+ use core::any;
+
+ use serde_test::{
+ assert_de_tokens,
+ assert_de_tokens_error,
+ assert_ser_tokens,
+ Token,
+ };
+
+ use crate::prelude::*;
+
+ #[test]
+ #[cfg(feature = "std")]
+ fn roundtrip() -> Result<(), Box<dyn std::error::Error>> {
+ type BA = BitArr!(for 16, in u8, Msb0);
+ let array = [0x3Cu8, 0xA5].into_bitarray::<Msb0>();
+
+ let bytes = bincode::serialize(&array)?;
+ let array2 = bincode::deserialize::<BA>(&bytes)?;
+ assert_eq!(array, array2);
+
+ let json = serde_json::to_string(&array)?;
+ let array3 = serde_json::from_str::<BA>(&json)?;
+ assert_eq!(array, array3);
+
+ let json_value = serde_json::to_value(&array)?;
+ let array4 = serde_json::from_value::<BA>(json_value)?;
+ assert_eq!(array, array4);
+
+ type BA2 = BitArray<u16, Msb0>;
+ let array = BA2::new(44203);
+
+ let bytes = bincode::serialize(&array)?;
+ let array2 = bincode::deserialize::<BA2>(&bytes)?;
+ assert_eq!(array, array2);
+
+ let json = serde_json::to_string(&array)?;
+ let array3 = serde_json::from_str::<BA2>(&json)?;
+ assert_eq!(array, array3);
+
+ let json_value = serde_json::to_value(&array)?;
+ let array4 = serde_json::from_value::<BA2>(json_value)?;
+ assert_eq!(array, array4);
+
+ Ok(())
+ }
+
+ #[test]
+ fn tokens() {
+ let array = [0x3Cu8, 0xA5].into_bitarray::<Msb0>();
+ let tokens = &mut [
+ Token::Struct {
+ name: "BitArr",
+ len: 4,
+ },
+ Token::Str("order"),
+ Token::Str(any::type_name::<Msb0>()),
+ Token::Str("head"),
+ Token::Struct {
+ name: "BitIdx",
+ len: 2,
+ },
+ Token::Str("width"),
+ Token::U8(8),
+ Token::Str("index"),
+ Token::U8(0),
+ Token::StructEnd,
+ Token::Str("bits"),
+ Token::U64(16),
+ Token::Str("data"),
+ Token::Tuple { len: 2 },
+ Token::U8(0x3C),
+ Token::U8(0xA5),
+ Token::TupleEnd,
+ Token::StructEnd,
+ ];
+
+ assert_ser_tokens(&array, tokens);
+
+ tokens[1 .. 4].copy_from_slice(&[
+ Token::BorrowedStr("order"),
+ Token::BorrowedStr(any::type_name::<Msb0>()),
+ Token::BorrowedStr("head"),
+ ]);
+ tokens[5] = Token::BorrowedStr("width");
+ tokens[7] = Token::BorrowedStr("index");
+ tokens[10] = Token::BorrowedStr("bits");
+ tokens[12] = Token::BorrowedStr("data");
+ assert_de_tokens(&array, tokens);
+ }
+
+ #[test]
+ #[cfg(feature = "alloc")]
+ fn errors() {
+ type BA = BitArr!(for 8, in u8, Msb0);
+ let mut tokens = vec![
+ Token::Seq { len: Some(4) },
+ Token::BorrowedStr(any::type_name::<Msb0>()),
+ ];
+
+ assert_de_tokens_error::<BitArr!(for 8, in u8, Lsb0)>(
+ &tokens,
+ &format!(
+ "invalid value: string \"{}\", expected the string \"{}\"",
+ any::type_name::<Msb0>(),
+ any::type_name::<Lsb0>(),
+ ),
+ );
+
+ tokens.extend([
+ Token::Seq { len: Some(2) },
+ Token::U8(8),
+ Token::U8(0),
+ Token::SeqEnd,
+ Token::U64(8),
+ Token::Tuple { len: 1 },
+ Token::U8(0),
+ Token::TupleEnd,
+ Token::SeqEnd,
+ ]);
+
+ tokens[6] = Token::U64(7);
+ assert_de_tokens_error::<BA>(
+ &tokens,
+ "invalid length 7, expected a `BitArray<[u8; 1], \
+ bitvec::order::Msb0>`",
+ );
+
+ tokens[4] = Token::U8(1);
+ assert_de_tokens_error::<BA>(
+ &tokens,
+ "invalid value: integer `1`, expected `BitArray` must have a \
+ head-bit of `0`",
+ );
+
+ assert_de_tokens_error::<BA>(
+ &[
+ Token::Struct {
+ name: "BitArr",
+ len: 2,
+ },
+ Token::BorrowedStr("placeholder"),
+ ],
+ &format!(
+ "unknown field `placeholder`, expected one of `{}`",
+ super::FIELDS.join("`, `"),
+ ),
+ );
+ assert_de_tokens_error::<BA>(
+ &[
+ Token::Struct {
+ name: "BitArr",
+ len: 2,
+ },
+ Token::BorrowedStr("order"),
+ Token::BorrowedStr(any::type_name::<Msb0>()),
+ Token::BorrowedStr("order"),
+ Token::BorrowedStr(any::type_name::<Msb0>()),
+ Token::StructEnd,
+ ],
+ "duplicate field `order`",
+ );
+ assert_de_tokens_error::<BA>(
+ &[
+ Token::Struct {
+ name: "BitArr",
+ len: 2,
+ },
+ Token::BorrowedStr("head"),
+ Token::Seq { len: Some(2) },
+ Token::U8(8),
+ Token::U8(0),
+ Token::SeqEnd,
+ Token::BorrowedStr("head"),
+ Token::Seq { len: Some(2) },
+ Token::U8(8),
+ Token::U8(0),
+ Token::SeqEnd,
+ Token::StructEnd,
+ ],
+ "duplicate field `head`",
+ );
+ assert_de_tokens_error::<BA>(
+ &[
+ Token::Struct {
+ name: "BitArr",
+ len: 2,
+ },
+ Token::BorrowedStr("bits"),
+ Token::U64(8),
+ Token::BorrowedStr("bits"),
+ Token::U64(8),
+ Token::StructEnd,
+ ],
+ "duplicate field `bits`",
+ );
+ assert_de_tokens_error::<BA>(
+ &[
+ Token::Struct {
+ name: "BitArr",
+ len: 2,
+ },
+ Token::BorrowedStr("data"),
+ Token::Tuple { len: 1 },
+ Token::U8(0),
+ Token::TupleEnd,
+ Token::BorrowedStr("data"),
+ Token::Tuple { len: 1 },
+ Token::U8(1),
+ Token::TupleEnd,
+ Token::StructEnd,
+ ],
+ "duplicate field `data`",
+ );
+ }
+}
diff --git a/src/serdes/slice.rs b/src/serdes/slice.rs
new file mode 100644
index 0000000..1c495ef
--- /dev/null
+++ b/src/serdes/slice.rs
@@ -0,0 +1,460 @@
+#![doc=include_str!("../../doc/serdes/slice.md")]
+
+#[cfg(feature = "alloc")]
+use alloc::vec::Vec;
+use core::{
+ any,
+ fmt::{
+ self,
+ Formatter,
+ },
+ marker::PhantomData,
+};
+
+use serde::{
+ de::{
+ Deserialize,
+ Deserializer,
+ Error,
+ MapAccess,
+ SeqAccess,
+ Visitor,
+ },
+ ser::{
+ Serialize,
+ SerializeStruct,
+ Serializer,
+ },
+};
+use wyz::comu::Const;
+
+use super::{
+ utils::TypeName,
+ Field,
+ FIELDS,
+};
+#[cfg(feature = "alloc")]
+use crate::{
+ boxed::BitBox,
+ vec::BitVec,
+};
+use crate::{
+ index::BitIdx,
+ mem::bits_of,
+ order::BitOrder,
+ ptr::{
+ AddressExt,
+ BitSpan,
+ BitSpanError,
+ },
+ slice::BitSlice,
+ store::BitStore,
+};
+
+impl<T, O> Serialize for BitSlice<T, O>
+where
+ T: BitStore,
+ O: BitOrder,
+ T::Mem: Serialize,
+{
+ #[inline]
+ fn serialize<S>(&self, serializer: S) -> super::Result<S>
+ where S: Serializer {
+ let head = self.as_bitspan().head();
+ let mut state = serializer.serialize_struct("BitSeq", FIELDS.len())?;
+
+ state.serialize_field("order", &any::type_name::<O>())?;
+ state.serialize_field("head", &head)?;
+ state.serialize_field("bits", &(self.len() as u64))?;
+ state.serialize_field("data", &self.domain())?;
+
+ state.end()
+ }
+}
+
+#[cfg(feature = "alloc")]
+impl<T, O> Serialize for BitBox<T, O>
+where
+ T: BitStore,
+ O: BitOrder,
+ BitSlice<T, O>: Serialize,
+{
+ #[inline]
+ fn serialize<S>(&self, serializer: S) -> super::Result<S>
+ where S: Serializer {
+ self.as_bitslice().serialize(serializer)
+ }
+}
+
+#[cfg(feature = "alloc")]
+impl<T, O> Serialize for BitVec<T, O>
+where
+ T: BitStore,
+ O: BitOrder,
+ BitSlice<T, O>: Serialize,
+{
+ #[inline]
+ fn serialize<S>(&self, serializer: S) -> super::Result<S>
+ where S: Serializer {
+ self.as_bitslice().serialize(serializer)
+ }
+}
+
+impl<'de, O> Deserialize<'de> for &'de BitSlice<u8, O>
+where O: BitOrder
+{
+ #[inline]
+ fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
+ where D: Deserializer<'de> {
+ deserializer.deserialize_struct(
+ "BitSeq",
+ FIELDS,
+ BitSeqVisitor::<u8, O, &'de [u8], Self, _>::new(
+ |data, head, bits| unsafe {
+ BitSpan::new(data.as_ptr().into_address(), head, bits)
+ .map(|span| BitSpan::into_bitslice_ref(span))
+ },
+ ),
+ )
+ }
+}
+
+#[cfg(feature = "alloc")]
+impl<'de, T, O> Deserialize<'de> for BitBox<T, O>
+where
+ T: BitStore,
+ O: BitOrder,
+ Vec<T>: Deserialize<'de>,
+{
+ #[inline]
+ fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
+ where D: Deserializer<'de> {
+ <BitVec<T, O> as Deserialize<'de>>::deserialize(deserializer)
+ .map(BitVec::into_boxed_bitslice)
+ }
+}
+
+#[cfg(feature = "alloc")]
+impl<'de, T, O> Deserialize<'de> for BitVec<T, O>
+where
+ T: BitStore,
+ O: BitOrder,
+ Vec<T>: Deserialize<'de>,
+{
+ #[inline]
+ fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
+ where D: Deserializer<'de> {
+ deserializer.deserialize_struct(
+ "BitSeq",
+ FIELDS,
+ BitSeqVisitor::<T, O, Vec<T>, Self, _>::new(
+ |vec, head, bits| unsafe {
+ let addr = vec.as_ptr().into_address();
+ let mut bv = BitVec::try_from_vec(vec).map_err(|_| {
+ BitSpan::<Const, T, O>::new(addr, head, bits)
+ .unwrap_err()
+ })?;
+ bv.set_head(head);
+ bv.set_len(bits);
+ Ok(bv)
+ },
+ ),
+ )
+ }
+}
+
+/// Assists in deserialization of a dynamic `BitSeq`.
+struct BitSeqVisitor<T, O, In, Out, Func>
+where
+ T: BitStore,
+ O: BitOrder,
+ Func: FnOnce(In, BitIdx<T::Mem>, usize) -> Result<Out, BitSpanError<T>>,
+{
+ /// As well as a final output value.
+ out: PhantomData<Result<Out, BitSpanError<T>>>,
+ /// The deserialized bit-ordering string.
+ order: Option<TypeName<O>>,
+ /// The deserialized head-bit index.
+ head: Option<BitIdx<T::Mem>>,
+ /// The deserialized bit-count.
+ bits: Option<u64>,
+ /// The deserialized data buffer.
+ data: Option<In>,
+ /// A functor responsible for final transformation of the deserialized
+ /// components into the output value.
+ func: Func,
+}
+
+impl<'de, T, O, In, Out, Func> BitSeqVisitor<T, O, In, Out, Func>
+where
+ T: 'de + BitStore,
+ O: BitOrder,
+ In: Deserialize<'de>,
+ Func: FnOnce(In, BitIdx<T::Mem>, usize) -> Result<Out, BitSpanError<T>>,
+{
+ /// Creates a new visitor with a given transform functor.
+ #[inline]
+ fn new(func: Func) -> Self {
+ Self {
+ out: PhantomData,
+ order: None,
+ head: None,
+ bits: None,
+ data: None,
+ func,
+ }
+ }
+
+ /// Attempts to assemble deserialized components into an output value.
+ #[inline]
+ fn assemble<E>(mut self) -> Result<Out, E>
+ where E: Error {
+ self.order.take().ok_or_else(|| E::missing_field("order"))?;
+ let head = self.head.take().ok_or_else(|| E::missing_field("head"))?;
+ let bits = self.bits.take().ok_or_else(|| E::missing_field("bits"))?;
+ let data = self.data.take().ok_or_else(|| E::missing_field("data"))?;
+
+ (self.func)(data, head, bits as usize).map_err(|_| todo!())
+ }
+}
+
+impl<'de, T, O, In, Out, Func> Visitor<'de>
+ for BitSeqVisitor<T, O, In, Out, Func>
+where
+ T: 'de + BitStore,
+ O: BitOrder,
+ In: Deserialize<'de>,
+ Func: FnOnce(In, BitIdx<T::Mem>, usize) -> Result<Out, BitSpanError<T>>,
+{
+ type Value = Out;
+
+ #[inline]
+ fn expecting(&self, fmt: &mut Formatter) -> fmt::Result {
+ write!(
+ fmt,
+ "a `BitSlice<u{}, {}>`",
+ bits_of::<T::Mem>(),
+ any::type_name::<O>(),
+ )
+ }
+
+ #[inline]
+ fn visit_seq<V>(mut self, mut seq: V) -> Result<Self::Value, V::Error>
+ where V: SeqAccess<'de> {
+ self.order = Some(
+ seq.next_element()?
+ .ok_or_else(|| <V::Error>::invalid_length(0, &self))?,
+ );
+ self.head = Some(
+ seq.next_element()?
+ .ok_or_else(|| <V::Error>::invalid_length(1, &self))?,
+ );
+ self.bits = Some(
+ seq.next_element()?
+ .ok_or_else(|| <V::Error>::invalid_length(2, &self))?,
+ );
+ self.data = Some(
+ seq.next_element()?
+ .ok_or_else(|| <V::Error>::invalid_length(3, &self))?,
+ );
+
+ self.assemble()
+ }
+
+ #[inline]
+ fn visit_map<V>(mut self, mut map: V) -> Result<Self::Value, V::Error>
+ where V: MapAccess<'de> {
+ while let Some(key) = map.next_key()? {
+ match key {
+ Field::Order => {
+ if self.order.replace(map.next_value()?).is_some() {
+ return Err(<V::Error>::duplicate_field("order"));
+ }
+ },
+ Field::Head => {
+ if self.head.replace(map.next_value()?).is_some() {
+ return Err(<V::Error>::duplicate_field("head"));
+ }
+ },
+ Field::Bits => {
+ if self.bits.replace(map.next_value()?).is_some() {
+ return Err(<V::Error>::duplicate_field("bits"));
+ }
+ },
+ Field::Data => {
+ if self.data.replace(map.next_value()?).is_some() {
+ return Err(<V::Error>::duplicate_field("data"));
+ }
+ },
+ }
+ }
+
+ self.assemble()
+ }
+}
+
+#[cfg(test)]
+mod tests {
+ #[cfg(all(feature = "alloc", not(feature = "std")))]
+ use alloc::format;
+ use core::any;
+
+ use serde_test::{
+ assert_de_tokens,
+ assert_de_tokens_error,
+ assert_ser_tokens,
+ Token,
+ };
+
+ use crate::prelude::*;
+
+ #[test]
+ #[cfg(feature = "alloc")]
+ fn roundtrip() -> Result<(), alloc::boxed::Box<bincode::ErrorKind>> {
+ let bits = bits![u8, Msb0; 1, 0, 1, 1, 0];
+ let encoded = bincode::serialize(&bits)?;
+ let bits2 = bincode::deserialize::<&BitSlice<u8, Msb0>>(&encoded)?;
+ assert_eq!(bits, bits2);
+ Ok(())
+ }
+
+ #[test]
+ fn tokens() {
+ let slice = bits![u8, Lsb0; 0, 1, 0, 0, 1];
+ let tokens = &mut [
+ Token::Struct {
+ name: "BitSeq",
+ len: 4,
+ },
+ Token::Str("order"),
+ Token::Str(any::type_name::<Lsb0>()),
+ Token::Str("head"),
+ Token::Struct {
+ name: "BitIdx",
+ len: 2,
+ },
+ Token::Str("width"),
+ Token::U8(8),
+ Token::Str("index"),
+ Token::U8(0),
+ Token::StructEnd,
+ Token::Str("bits"),
+ Token::U64(5),
+ Token::Str("data"),
+ Token::Seq { len: Some(1) },
+ Token::U8(18),
+ Token::SeqEnd,
+ Token::StructEnd,
+ ];
+ assert_ser_tokens(&slice, tokens);
+ tokens[8] = Token::U8(1);
+ tokens[11] = Token::U64(4);
+ assert_ser_tokens(&&slice[1 ..], tokens);
+
+ let tokens = &[
+ Token::Seq { len: Some(4) },
+ Token::BorrowedStr(any::type_name::<Lsb0>()),
+ Token::Seq { len: Some(2) },
+ Token::U8(8),
+ Token::U8(0),
+ Token::SeqEnd,
+ Token::U64(5),
+ Token::BorrowedBytes(&[18]),
+ Token::SeqEnd,
+ ];
+ assert_de_tokens(&slice, tokens);
+ }
+
+ #[test]
+ #[cfg(feature = "alloc")]
+ fn errors() {
+ assert_de_tokens_error::<&BitSlice<u8, Msb0>>(
+ &[
+ Token::Seq { len: Some(4) },
+ Token::BorrowedStr(any::type_name::<Lsb0>()),
+ ],
+ &format!(
+ "invalid value: string \"{}\", expected the string \"{}\"",
+ any::type_name::<Lsb0>(),
+ any::type_name::<Msb0>(),
+ ),
+ );
+
+ assert_de_tokens_error::<&BitSlice<u8, Msb0>>(
+ &[
+ Token::Struct {
+ name: "BitSeq",
+ len: 1,
+ },
+ Token::BorrowedStr("unknown"),
+ ],
+ &format!(
+ "unknown field `unknown`, expected one of `{}`",
+ super::FIELDS.join("`, `"),
+ ),
+ );
+
+ assert_de_tokens_error::<&BitSlice<u8, Msb0>>(
+ &[
+ Token::Struct {
+ name: "BitSeq",
+ len: 2,
+ },
+ Token::BorrowedStr("order"),
+ Token::BorrowedStr(any::type_name::<Msb0>()),
+ Token::BorrowedStr("order"),
+ Token::BorrowedStr(any::type_name::<Msb0>()),
+ Token::StructEnd,
+ ],
+ "duplicate field `order`",
+ );
+ assert_de_tokens_error::<&BitSlice<u8, Msb0>>(
+ &[
+ Token::Struct {
+ name: "BitSeq",
+ len: 2,
+ },
+ Token::BorrowedStr("head"),
+ Token::Seq { len: Some(2) },
+ Token::U8(8),
+ Token::U8(0),
+ Token::SeqEnd,
+ Token::BorrowedStr("head"),
+ Token::Seq { len: Some(2) },
+ Token::U8(8),
+ Token::U8(0),
+ Token::SeqEnd,
+ Token::StructEnd,
+ ],
+ "duplicate field `head`",
+ );
+ assert_de_tokens_error::<&BitSlice<u8, Msb0>>(
+ &[
+ Token::Struct {
+ name: "BitSeq",
+ len: 2,
+ },
+ Token::BorrowedStr("bits"),
+ Token::U64(10),
+ Token::BorrowedStr("bits"),
+ Token::U64(10),
+ Token::StructEnd,
+ ],
+ "duplicate field `bits`",
+ );
+ assert_de_tokens_error::<&BitSlice<u8, Msb0>>(
+ &[
+ Token::Struct {
+ name: "BitSeq",
+ len: 2,
+ },
+ Token::BorrowedStr("data"),
+ Token::BorrowedBytes(&[0x3C, 0xA5]),
+ Token::BorrowedStr("data"),
+ Token::BorrowedBytes(&[0x3C, 0xA5]),
+ Token::StructEnd,
+ ],
+ "duplicate field `data`",
+ );
+ }
+}
diff --git a/src/serdes/utils.rs b/src/serdes/utils.rs
new file mode 100644
index 0000000..f6fd2e4
--- /dev/null
+++ b/src/serdes/utils.rs
@@ -0,0 +1,478 @@
+#![doc=include_str!("../../doc/serdes/utils.md")]
+
+use core::{
+ any,
+ fmt::{
+ self,
+ Formatter,
+ },
+ marker::PhantomData,
+ mem::MaybeUninit,
+};
+
+use serde::{
+ de::{
+ Deserialize,
+ Deserializer,
+ Error,
+ MapAccess,
+ SeqAccess,
+ Unexpected,
+ Visitor,
+ },
+ ser::{
+ Serialize,
+ SerializeSeq,
+ SerializeStruct,
+ SerializeTuple,
+ Serializer,
+ },
+};
+use wyz::comu::Const;
+
+use crate::{
+ domain::Domain,
+ index::BitIdx,
+ mem::{
+ bits_of,
+ BitRegister,
+ },
+ order::BitOrder,
+ store::BitStore,
+ view::BitViewSized,
+};
+
+/// A zero-sized type that deserializes from any string as long as it is equal
+/// to `any::type_name::<T>()`.
+pub(super) struct TypeName<T>(PhantomData<T>);
+
+impl<T> TypeName<T> {
+ /// Creates a type-name ghost for any type.
+ fn new() -> Self {
+ TypeName(PhantomData)
+ }
+}
+
+impl<'de, T> Deserialize<'de> for TypeName<T> {
+ fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
+ where D: Deserializer<'de> {
+ deserializer.deserialize_str(Self::new())
+ }
+}
+
+impl<'de, T> Visitor<'de> for TypeName<T> {
+ type Value = Self;
+
+ fn expecting(&self, fmt: &mut Formatter) -> fmt::Result {
+ write!(fmt, "the string {:?}", any::type_name::<T>())
+ }
+
+ fn visit_str<E>(self, value: &str) -> Result<Self::Value, E>
+ where E: serde::de::Error {
+ if value == any::type_name::<T>() {
+ Ok(self)
+ }
+ else {
+ Err(serde::de::Error::invalid_value(
+ Unexpected::Str(value),
+ &self,
+ ))
+ }
+ }
+}
+
+/// Fields used in the `BitIdx` transport format.
+static FIELDS: &[&str] = &["width", "index"];
+
+/// The components of a bit-idx in wire format.
+enum Field {
+ /// Denotes the maximum allowable value of the bit-idx.
+ Width,
+ /// Denotes the value of the bit-idx.
+ Index,
+}
+
+/// Visits field tokens of a bit-idx wire format.
+struct FieldVisitor;
+
+impl<'de> Deserialize<'de> for Field {
+ fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
+ where D: Deserializer<'de> {
+ deserializer.deserialize_identifier(FieldVisitor)
+ }
+}
+
+impl<'de> Visitor<'de> for FieldVisitor {
+ type Value = Field;
+
+ fn expecting(&self, fmt: &mut Formatter) -> fmt::Result {
+ fmt.write_str("field identifier")
+ }
+
+ fn visit_str<E>(self, value: &str) -> Result<Self::Value, E>
+ where E: serde::de::Error {
+ match value {
+ "width" => Ok(Field::Width),
+ "index" => Ok(Field::Index),
+ _ => Err(serde::de::Error::unknown_field(value, FIELDS)),
+ }
+ }
+}
+
+impl<R> Serialize for BitIdx<R>
+where R: BitRegister
+{
+ #[inline]
+ fn serialize<S>(&self, serializer: S) -> super::Result<S>
+ where S: Serializer {
+ let mut state = serializer.serialize_struct("BitIdx", FIELDS.len())?;
+
+ // Emit the bit-width of the `R` type.
+ state.serialize_field(FIELDS[0], &(bits_of::<R>() as u8))?;
+ // Emit the actual head-bit index.
+ state.serialize_field(FIELDS[1], &self.into_inner())?;
+
+ state.end()
+ }
+}
+
+impl<'de, R> Deserialize<'de> for BitIdx<R>
+where R: BitRegister
+{
+ #[inline]
+ fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
+ where D: Deserializer<'de> {
+ deserializer.deserialize_struct(
+ "BitIdx",
+ FIELDS,
+ BitIdxVisitor::<R>::THIS,
+ )
+ }
+}
+
+impl<T, O> Serialize for Domain<'_, Const, T, O>
+where
+ T: BitStore,
+ O: BitOrder,
+ T::Mem: Serialize,
+{
+ #[inline]
+ fn serialize<S>(&self, serializer: S) -> super::Result<S>
+ where S: Serializer {
+ // Domain<T> is functionally equivalent to `[T::Mem]`.
+ let mut state = serializer.serialize_seq(Some(self.len()))?;
+ for elem in *self {
+ state.serialize_element(&elem)?;
+ }
+ state.end()
+ }
+}
+
+/** `serde` only provides implementations for `[T; 0 ..= 32]`. This wrapper
+provides the same de/ser logic, but allows it to be used on arrays of any size.
+**/
+#[repr(transparent)]
+#[derive(Clone, Copy, Debug, Eq, Hash, Ord, PartialEq, PartialOrd)]
+pub(super) struct Array<T, const N: usize>
+where T: BitStore
+{
+ /// The data buffer being transported.
+ pub(super) inner: [T; N],
+}
+
+impl<T, const N: usize> Array<T, N>
+where T: BitStore
+{
+ /// Constructs a `&Array` reference from an `&[T; N]` reference.
+ ///
+ /// ## Safety
+ ///
+ /// `Array` is `#[repr(transparent)]`, so this address transformation is
+ /// always sound.
+ pub(super) fn from_ref(arr: &[T; N]) -> &Self {
+ unsafe { &*(arr as *const [T; N] as *const Self) }
+ }
+}
+
+impl<T, const N: usize> Serialize for Array<T, N>
+where
+ T: BitStore,
+ T::Mem: Serialize,
+{
+ #[inline]
+ fn serialize<S>(&self, serializer: S) -> super::Result<S>
+ where S: Serializer {
+ // `serde` serializes arrays as a tuple, so that transport formats can
+ // safely choose to keep or discard the length counter.
+ let mut state = serializer.serialize_tuple(N)?;
+ for elem in self.inner.as_raw_slice().iter().map(BitStore::load_value) {
+ state.serialize_element(&elem)?
+ }
+ state.end()
+ }
+}
+
+impl<'de, T, const N: usize> Deserialize<'de> for Array<T, N>
+where
+ T: BitStore,
+ T::Mem: Deserialize<'de>,
+{
+ #[inline]
+ fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
+ where D: Deserializer<'de> {
+ deserializer.deserialize_tuple(N, ArrayVisitor::<T, N>::THIS)
+ }
+}
+
+/// Assists in deserialization of a static `[T; N]` for any `N`.
+struct ArrayVisitor<T, const N: usize>
+where T: BitStore
+{
+ /// This produces an array during its work.
+ inner: PhantomData<[T; N]>,
+}
+
+impl<T, const N: usize> ArrayVisitor<T, N>
+where T: BitStore
+{
+ /// A blank visitor in its ready state.
+ const THIS: Self = Self { inner: PhantomData };
+}
+
+impl<'de, T, const N: usize> Visitor<'de> for ArrayVisitor<T, N>
+where
+ T: BitStore,
+ T::Mem: Deserialize<'de>,
+{
+ type Value = Array<T, N>;
+
+ #[inline]
+ fn expecting(&self, fmt: &mut Formatter) -> fmt::Result {
+ write!(fmt, "a [{}; {}]", any::type_name::<T>(), N)
+ }
+
+ #[inline]
+ fn visit_seq<V>(self, mut seq: V) -> Result<Self::Value, V::Error>
+ where V: SeqAccess<'de> {
+ let mut uninit = [MaybeUninit::<T::Mem>::uninit(); N];
+ for (idx, slot) in uninit.iter_mut().enumerate() {
+ slot.write(
+ seq.next_element::<T::Mem>()?
+ .ok_or_else(|| <V::Error>::invalid_length(idx, &self))?,
+ );
+ }
+ Ok(Array {
+ inner: uninit
+ .map(|elem| unsafe { MaybeUninit::assume_init(elem) })
+ .map(BitStore::new),
+ })
+ }
+}
+
+/// Assists in deserialization of a `BitIdx` value.
+struct BitIdxVisitor<R>
+where R: BitRegister
+{
+ /// This requires carrying the register type information.
+ inner: PhantomData<R>,
+}
+
+impl<R> BitIdxVisitor<R>
+where R: BitRegister
+{
+ /// A blank visitor in its ready state.
+ const THIS: Self = Self { inner: PhantomData };
+
+ /// Attempts to assemble deserialized components into an output value.
+ #[inline]
+ fn assemble<E>(self, width: u8, index: u8) -> Result<BitIdx<R>, E>
+ where E: Error {
+ // Fail if the transported type width does not match the destination.
+ if width != bits_of::<R>() as u8 {
+ return Err(E::invalid_type(
+ Unexpected::Unsigned(width as u64),
+ &self,
+ ));
+ }
+
+ // Capture an invalid index value and route it to the error handler.
+ BitIdx::<R>::new(index).map_err(|_| {
+ E::invalid_value(Unexpected::Unsigned(index as u64), &self)
+ })
+ }
+}
+
+impl<'de, R> Visitor<'de> for BitIdxVisitor<R>
+where R: BitRegister
+{
+ type Value = BitIdx<R>;
+
+ #[inline]
+ fn expecting(&self, fmt: &mut Formatter) -> fmt::Result {
+ write!(fmt, "a valid `BitIdx<u{}>`", bits_of::<R>())
+ }
+
+ #[inline]
+ fn visit_seq<V>(self, mut seq: V) -> Result<Self::Value, V::Error>
+ where V: SeqAccess<'de> {
+ let width = seq
+ .next_element::<u8>()?
+ .ok_or_else(|| <V::Error>::invalid_length(0, &self))?;
+ let index = seq
+ .next_element::<u8>()?
+ .ok_or_else(|| <V::Error>::invalid_length(1, &self))?;
+
+ self.assemble(width, index)
+ }
+
+ #[inline]
+ fn visit_map<V>(self, mut map: V) -> Result<Self::Value, V::Error>
+ where V: MapAccess<'de> {
+ let mut width = None;
+ let mut index = None;
+
+ while let Some(key) = map.next_key()? {
+ match key {
+ Field::Width => {
+ if width.replace(map.next_value::<u8>()?).is_some() {
+ return Err(<V::Error>::duplicate_field("width"));
+ }
+ },
+ Field::Index => {
+ if index.replace(map.next_value::<u8>()?).is_some() {
+ return Err(<V::Error>::duplicate_field("index"));
+ }
+ },
+ }
+ }
+
+ let width = width.ok_or_else(|| <V::Error>::missing_field("width"))?;
+ let index = index.ok_or_else(|| <V::Error>::missing_field("index"))?;
+
+ self.assemble(width, index)
+ }
+}
+
+#[cfg(test)]
+mod tests {
+ use serde_test::{
+ assert_de_tokens,
+ assert_de_tokens_error,
+ assert_ser_tokens,
+ Token,
+ };
+
+ use super::*;
+
+ #[test]
+ fn array_wrapper() {
+ let array = Array { inner: [0u8; 40] };
+ #[rustfmt::skip]
+ let tokens = &[
+ Token::Tuple { len: 40 },
+ Token::U8(0), Token::U8(0), Token::U8(0), Token::U8(0), Token::U8(0),
+ Token::U8(0), Token::U8(0), Token::U8(0), Token::U8(0), Token::U8(0),
+ Token::U8(0), Token::U8(0), Token::U8(0), Token::U8(0), Token::U8(0),
+ Token::U8(0), Token::U8(0), Token::U8(0), Token::U8(0), Token::U8(0),
+ Token::U8(0), Token::U8(0), Token::U8(0), Token::U8(0), Token::U8(0),
+ Token::U8(0), Token::U8(0), Token::U8(0), Token::U8(0), Token::U8(0),
+ Token::U8(0), Token::U8(0), Token::U8(0), Token::U8(0), Token::U8(0),
+ Token::U8(0), Token::U8(0), Token::U8(0), Token::U8(0), Token::U8(0),
+ Token::TupleEnd,
+ ];
+ assert_ser_tokens(&array, tokens);
+ assert_de_tokens(&array, tokens);
+
+ let tokens = &[Token::Tuple { len: 1 }, Token::U32(0), Token::TupleEnd];
+ assert_de_tokens_error::<Array<u32, 2>>(
+ tokens,
+ "invalid length 1, expected a [u32; 2]",
+ );
+ }
+
+ #[test]
+ fn bit_idx() {
+ let idx = BitIdx::<u32>::new(20).unwrap();
+ let tokens = &mut [
+ Token::Struct {
+ name: "BitIdx",
+ len: 2,
+ },
+ Token::Str("width"),
+ Token::U8(32),
+ Token::Str("index"),
+ Token::U8(20),
+ Token::StructEnd,
+ ];
+ assert_ser_tokens(&idx, tokens);
+ tokens[1] = Token::BorrowedStr("width");
+ tokens[3] = Token::BorrowedStr("index");
+ assert_de_tokens(&idx, tokens);
+
+ let idx = BitIdx::<u16>::new(10).unwrap();
+ let tokens = &[
+ Token::Seq { len: Some(2) },
+ Token::U8(16),
+ Token::U8(10),
+ Token::SeqEnd,
+ ];
+ assert_de_tokens(&idx, tokens);
+
+ assert_de_tokens_error::<BitIdx<u16>>(
+ &[
+ Token::Seq { len: Some(2) },
+ Token::U8(8),
+ Token::U8(0),
+ Token::SeqEnd,
+ ],
+ "invalid type: integer `8`, expected a valid `BitIdx<u16>`",
+ );
+ assert_de_tokens_error::<BitIdx<u16>>(
+ &[
+ Token::Seq { len: Some(2) },
+ Token::U8(16),
+ Token::U8(16),
+ Token::SeqEnd,
+ ],
+ "invalid value: integer `16`, expected a valid `BitIdx<u16>`",
+ );
+ assert_de_tokens_error::<BitIdx<u8>>(
+ &[
+ Token::Struct {
+ name: "BitIdx",
+ len: 1,
+ },
+ Token::BorrowedStr("unknown"),
+ ],
+ "unknown field `unknown`, expected `width` or `index`",
+ );
+ assert_de_tokens_error::<BitIdx<u8>>(
+ &[
+ Token::Struct {
+ name: "BitIdx",
+ len: 2,
+ },
+ Token::BorrowedStr("width"),
+ Token::U8(8),
+ Token::BorrowedStr("width"),
+ Token::U8(8),
+ Token::StructEnd,
+ ],
+ "duplicate field `width`",
+ );
+ assert_de_tokens_error::<BitIdx<u8>>(
+ &[
+ Token::Struct {
+ name: "BitIdx",
+ len: 2,
+ },
+ Token::BorrowedStr("index"),
+ Token::U8(7),
+ Token::BorrowedStr("index"),
+ Token::U8(7),
+ Token::StructEnd,
+ ],
+ "duplicate field `index`",
+ );
+ }
+}
diff --git a/src/slice.rs b/src/slice.rs
new file mode 100644
index 0000000..48d8924
--- /dev/null
+++ b/src/slice.rs
@@ -0,0 +1,1819 @@
+#![doc = include_str!("../doc/slice.md")]
+
+#[cfg(feature = "alloc")]
+use alloc::vec::Vec;
+use core::{
+ marker::PhantomData,
+ ops::RangeBounds,
+};
+
+use funty::Integral;
+use tap::Pipe;
+#[cfg(feature = "alloc")]
+use tap::Tap;
+use wyz::{
+ bidi::BidiIterator,
+ comu::{
+ Const,
+ Mut,
+ },
+ range::RangeExt,
+};
+
+#[cfg(feature = "alloc")]
+use crate::vec::BitVec;
+use crate::{
+ domain::{
+ BitDomain,
+ Domain,
+ },
+ mem,
+ order::{
+ BitOrder,
+ Lsb0,
+ Msb0,
+ },
+ ptr::{
+ self as bv_ptr,
+ BitPtr,
+ BitPtrRange,
+ BitSpan,
+ BitSpanError,
+ },
+ store::BitStore,
+};
+
+mod api;
+mod iter;
+mod ops;
+mod specialization;
+mod tests;
+mod traits;
+
+pub use self::{
+ api::*,
+ iter::*,
+};
+
+#[repr(transparent)]
+#[doc = include_str!("../doc/slice/BitSlice.md")]
+pub struct BitSlice<T = usize, O = Lsb0>
+where
+ T: BitStore,
+ O: BitOrder,
+{
+ /// The ordering of bits within a `T` register.
+ _ord: PhantomData<O>,
+ /// The register type used for storage.
+ _typ: PhantomData<[T]>,
+ /// Indicate that this is a newtype wrapper over a wholly-untyped slice.
+ ///
+ /// This is necessary in order for the Rust compiler to remove restrictions
+ /// on the possible values of reference handles to this type. Any other
+ /// slice type here (such as `[u8]` or `[T]`) would require that `&/mut
+ /// BitSlice` handles have values that correctly describe the region, and
+ /// the encoding *does not* do this. As such, reference handles to
+ /// `BitSlice` must not be even implicitly dereferenceäble to real memory,
+ /// and the slice must be a ZST.
+ ///
+ /// References to a ZST have no restrictions about what the values can be,
+ /// as they are never able to dereference real memory and thus both
+ /// addresses and lengths are meaningless to the memory inspector.
+ ///
+ /// See `ptr::span` for more information on the encoding scheme used in
+ /// references to `BitSlice`.
+ _mem: [()],
+}
+
+/// Constructors.
+impl<T, O> BitSlice<T, O>
+where
+ T: BitStore,
+ O: BitOrder,
+{
+ /// Produces an empty bit-slice with an arbitrary lifetime.
+ ///
+ /// ## Original
+ ///
+ /// This is equivalent to the `&[]` literal.
+ ///
+ /// ## Examples
+ ///
+ /// ```rust
+ /// use bitvec::prelude::*;
+ ///
+ /// assert!(BitSlice::<u16, LocalBits>::empty().is_empty());
+ /// assert_eq!(bits![], BitSlice::<u8, Msb0>::empty());
+ /// ```
+ #[inline]
+ pub fn empty<'a>() -> &'a Self {
+ unsafe { BitSpan::<Const, T, O>::EMPTY.into_bitslice_ref() }
+ }
+
+ /// Produces an empty bit-slice with an arbitrary lifetime.
+ ///
+ /// ## Original
+ ///
+ /// This is equivalent to the `&mut []` literal.
+ ///
+ /// ## Examples
+ ///
+ /// ```rust
+ /// use bitvec::prelude::*;
+ ///
+ /// assert!(BitSlice::<u16, LocalBits>::empty_mut().is_empty());
+ /// assert_eq!(bits![mut], BitSlice::<u8, Msb0>::empty_mut());
+ /// ```
+ #[inline]
+ pub fn empty_mut<'a>() -> &'a mut Self {
+ unsafe { BitSpan::<Mut, T, O>::EMPTY.into_bitslice_mut() }
+ }
+
+ /// Constructs a shared `&BitSlice` reference over a shared element.
+ ///
+ /// The [`BitView`] trait, implemented on all [`BitStore`] implementors,
+ /// provides a [`.view_bits::<O>()`] method which delegates to this function
+ /// and may be more convenient for you to write.
+ ///
+ /// ## Parameters
+ ///
+ /// - `elem`: A shared reference to a memory element.
+ ///
+ /// ## Returns
+ ///
+ /// A shared `&BitSlice` over `elem`.
+ ///
+ /// ## Examples
+ ///
+ /// ```rust
+ /// use bitvec::prelude::*;
+ ///
+ /// let elem = 0u8;
+ /// let bits = BitSlice::<_, Lsb0>::from_element(&elem);
+ /// assert_eq!(bits.len(), 8);
+ ///
+ /// let bits = elem.view_bits::<Lsb0>();
+ /// ```
+ ///
+ /// [`BitStore`]: crate::store::BitStore
+ /// [`BitView`]: crate::view::BitView
+ /// [`.view_bits::<O>()`]: crate::view::BitView::view_bits
+ #[inline]
+ pub fn from_element(elem: &T) -> &Self {
+ unsafe {
+ BitPtr::from_ref(elem)
+ .span_unchecked(mem::bits_of::<T::Mem>())
+ .into_bitslice_ref()
+ }
+ }
+
+ /// Constructs an exclusive `&mut BitSlice` reference over an element.
+ ///
+ /// The [`BitView`] trait, implemented on all [`BitStore`] implementors,
+ /// provides a [`.view_bits_mut::<O>()`] method which delegates to this
+ /// function and may be more convenient for you to write.
+ ///
+ /// ## Parameters
+ ///
+ /// - `elem`: An exclusive reference to a memory element.
+ ///
+ /// ## Returns
+ ///
+ /// An exclusive `&mut BitSlice` over `elem`.
+ ///
+ /// Note that the original `elem` reference will be inaccessible for the
+ /// duration of the returned bit-slice handle’s lifetime.
+ ///
+ /// ## Examples
+ ///
+ /// ```rust
+ /// use bitvec::prelude::*;
+ ///
+ /// let mut elem = 0u8;
+ /// let bits = BitSlice::<_, Lsb0>::from_element_mut(&mut elem);
+ /// bits.set(1, true);
+ /// assert!(bits[1]);
+ /// assert_eq!(elem, 2);
+ ///
+ /// let bits = elem.view_bits_mut::<Lsb0>();
+ /// ```
+ ///
+ /// [`BitStore`]: crate::store::BitStore
+ /// [`BitView`]: crate::view::BitView
+ /// [`.view_bits_mut::<O>()`]: crate::view::BitView::view_bits_mut
+ #[inline]
+ pub fn from_element_mut(elem: &mut T) -> &mut Self {
+ unsafe {
+ BitPtr::from_mut(elem)
+ .span_unchecked(mem::bits_of::<T::Mem>())
+ .into_bitslice_mut()
+ }
+ }
+
+ /// Constructs a shared `&BitSlice` reference over a slice of elements.
+ ///
+ /// The [`BitView`] trait, implemented on all `[T]` slices, provides a
+ /// [`.view_bits::<O>()`] method which delegates to this function and may be
+ /// more convenient for you to write.
+ ///
+ /// ## Parameters
+ ///
+ /// - `slice`: A shared reference to a slice of memory elements.
+ ///
+ /// ## Returns
+ ///
+ /// A shared `BitSlice` reference over all of `slice`.
+ ///
+ /// ## Panics
+ ///
+ /// This will panic if `slice` is too long to encode as a bit-slice view.
+ ///
+ /// ## Examples
+ ///
+ /// ```rust
+ /// use bitvec::prelude::*;
+ ///
+ /// let data = [0u16, 1];
+ /// let bits = BitSlice::<_, Lsb0>::from_slice(&data);
+ /// assert!(bits[16]);
+ ///
+ /// let bits = data.view_bits::<Lsb0>();
+ /// ```
+ ///
+ /// [`BitView`]: crate::view::BitView
+ /// [`.view_bits::<O>()`]: crate::view::BitView::view_bits
+ #[inline]
+ pub fn from_slice(slice: &[T]) -> &Self {
+ Self::try_from_slice(slice).unwrap()
+ }
+
+ /// Attempts to construct a shared `&BitSlice` reference over a slice of
+ /// elements.
+ ///
+ /// The [`BitView`], implemented on all `[T]` slices, provides a
+ /// [`.try_view_bits::<O>()`] method which delegates to this function and
+ /// may be more convenient for you to write.
+ ///
+ /// This is *very hard*, if not impossible, to cause to fail. Rust will not
+ /// create excessive arrays on 64-bit architectures.
+ ///
+ /// ## Parameters
+ ///
+ /// - `slice`: A shared reference to a slice of memory elements.
+ ///
+ /// ## Returns
+ ///
+ /// A shared `&BitSlice` over `slice`. If `slice` is longer than can be
+ /// encoded into a `&BitSlice` (see [`MAX_ELTS`]), this will fail and return
+ /// the original `slice` as an error.
+ ///
+ /// ## Examples
+ ///
+ /// ```rust
+ /// use bitvec::prelude::*;
+ ///
+ /// let data = [0u8, 1];
+ /// let bits = BitSlice::<_, Msb0>::try_from_slice(&data).unwrap();
+ /// assert!(bits[15]);
+ ///
+ /// let bits = data.try_view_bits::<Msb0>().unwrap();
+ /// ```
+ ///
+ /// [`BitView`]: crate::view::BitView
+ /// [`MAX_ELTS`]: Self::MAX_ELTS
+ /// [`.try_view_bits::<O>()`]: crate::view::BitView::try_view_bits
+ #[inline]
+ pub fn try_from_slice(slice: &[T]) -> Result<&Self, BitSpanError<T>> {
+ let elts = slice.len();
+ if elts >= Self::MAX_ELTS {
+ elts.saturating_mul(mem::bits_of::<T::Mem>())
+ .pipe(BitSpanError::TooLong)
+ .pipe(Err)
+ }
+ else {
+ Ok(unsafe { Self::from_slice_unchecked(slice) })
+ }
+ }
+
+ /// Constructs an exclusive `&mut BitSlice` reference over a slice of
+ /// elements.
+ ///
+ /// The [`BitView`] trait, implemented on all `[T]` slices, provides a
+ /// [`.view_bits_mut::<O>()`] method which delegates to this function and
+ /// may be more convenient for you to write.
+ ///
+ /// ## Parameters
+ ///
+ /// - `slice`: An exclusive reference to a slice of memory elements.
+ ///
+ /// ## Returns
+ ///
+ /// An exclusive `&mut BitSlice` over all of `slice`.
+ ///
+ /// ## Panics
+ ///
+ /// This panics if `slice` is too long to encode as a bit-slice view.
+ ///
+ /// ## Examples
+ ///
+ /// ```rust
+ /// use bitvec::prelude::*;
+ ///
+ /// let mut data = [0u16; 2];
+ /// let bits = BitSlice::<_, Lsb0>::from_slice_mut(&mut data);
+ /// bits.set(0, true);
+ /// bits.set(17, true);
+ /// assert_eq!(data, [1, 2]);
+ ///
+ /// let bits = data.view_bits_mut::<Lsb0>();
+ /// ```
+ ///
+ /// [`BitView`]: crate::view::BitView
+ /// [`.view_bits_mut::<O>()`]: crate::view::BitView::view_bits_mut
+ #[inline]
+ pub fn from_slice_mut(slice: &mut [T]) -> &mut Self {
+ Self::try_from_slice_mut(slice).unwrap()
+ }
+
+ /// Attempts to construct an exclusive `&mut BitSlice` reference over a
+ /// slice of elements.
+ ///
+ /// The [`BitView`] trait, implemented on all `[T]` slices, provides a
+ /// [`.try_view_bits_mut::<O>()`] method which delegates to this function
+ /// and may be more convenient for you to write.
+ ///
+ /// ## Parameters
+ ///
+ /// - `slice`: An exclusive reference to a slice of memory elements.
+ ///
+ /// ## Returns
+ ///
+ /// An exclusive `&mut BitSlice` over `slice`. If `slice` is longer than can
+ /// be encoded into a `&mut BitSlice` (see [`MAX_ELTS`]), this will fail and
+ /// return the original `slice` as an error.
+ ///
+ /// ## Examples
+ ///
+ /// ```rust
+ /// use bitvec::prelude::*;
+ ///
+ /// let mut data = [0u8; 2];
+ /// let bits = BitSlice::<_, Msb0>::try_from_slice_mut(&mut data).unwrap();
+ /// bits.set(7, true);
+ /// bits.set(15, true);
+ /// assert_eq!(data, [1; 2]);
+ ///
+ /// let bits = data.try_view_bits_mut::<Msb0>().unwrap();
+ /// ```
+ ///
+ /// [`BitView`]: crate::view::BitView
+ /// [`MAX_ELTS`]: Self::MAX_ELTS
+ /// [`.try_view_bits_mut::<O>()`]: crate::view::BitView::try_view_bits_mut
+ #[inline]
+ pub fn try_from_slice_mut(
+ slice: &mut [T],
+ ) -> Result<&mut Self, BitSpanError<T>> {
+ let elts = slice.len();
+ if elts >= Self::MAX_ELTS {
+ elts.saturating_mul(mem::bits_of::<T::Mem>())
+ .pipe(BitSpanError::TooLong)
+ .pipe(Err)
+ }
+ else {
+ Ok(unsafe { Self::from_slice_unchecked_mut(slice) })
+ }
+ }
+
+ /// Constructs a shared `&BitSlice` over an element slice, without checking
+ /// its length.
+ ///
+ /// If `slice` is too long to encode into a `&BitSlice`, then the produced
+ /// bit-slice’s length is unspecified.
+ ///
+ /// ## Safety
+ ///
+ /// You must ensure that `slice.len() < BitSlice::MAX_ELTS`.
+ ///
+ /// Calling this function with an over-long slice is **library-level**
+ /// undefined behavior. You may not assume anything about its implementation
+ /// or behavior, and must conservatively assume that over-long slices cause
+ /// compiler UB.
+ #[inline]
+ pub unsafe fn from_slice_unchecked(slice: &[T]) -> &Self {
+ let bits = slice.len().wrapping_mul(mem::bits_of::<T::Mem>());
+ BitPtr::from_slice(slice)
+ .span_unchecked(bits)
+ .into_bitslice_ref()
+ }
+
+ /// Constructs an exclusive `&mut BitSlice` over an element slice, without
+ /// checking its length.
+ ///
+ /// If `slice` is too long to encode into a `&mut BitSlice`, then the
+ /// produced bit-slice’s length is unspecified.
+ ///
+ /// ## Safety
+ ///
+ /// You must ensure that `slice.len() < BitSlice::MAX_ELTS`.
+ ///
+ /// Calling this function with an over-long slice is **library-level**
+ /// undefined behavior. You may not assume anything about its implementation
+ /// or behavior, and must conservatively assume that over-long slices cause
+ /// compiler UB.
+ #[inline]
+ pub unsafe fn from_slice_unchecked_mut(slice: &mut [T]) -> &mut Self {
+ let bits = slice.len().wrapping_mul(mem::bits_of::<T::Mem>());
+ BitPtr::from_slice_mut(slice)
+ .span_unchecked(bits)
+ .into_bitslice_mut()
+ }
+}
+
+/// Alternates of standard APIs.
+impl<T, O> BitSlice<T, O>
+where
+ T: BitStore,
+ O: BitOrder,
+{
+ /// Gets a raw pointer to the zeroth bit of the bit-slice.
+ ///
+ /// ## Original
+ ///
+ /// [`slice::as_ptr`](https://doc.rust-lang.org/std/primitive.slice.html#method.as_ptr)
+ ///
+ /// ## API Differences
+ ///
+ /// This is renamed in order to indicate that it is returning a `bitvec`
+ /// structure, not a raw pointer.
+ #[inline]
+ pub fn as_bitptr(&self) -> BitPtr<Const, T, O> {
+ self.as_bitspan().to_bitptr()
+ }
+
+ /// Gets a raw, write-capable pointer to the zeroth bit of the bit-slice.
+ ///
+ /// ## Original
+ ///
+ /// [`slice::as_mut_ptr`](https://doc.rust-lang.org/std/primitive.slice.html#method.as_mut_ptr)
+ ///
+ /// ## API Differences
+ ///
+ /// This is renamed in order to indicate that it is returning a `bitvec`
+ /// structure, not a raw pointer.
+ #[inline]
+ pub fn as_mut_bitptr(&mut self) -> BitPtr<Mut, T, O> {
+ self.as_mut_bitspan().to_bitptr()
+ }
+
+ /// Views the bit-slice as a half-open range of bit-pointers, to its first
+ /// bit *in* the bit-slice and first bit *beyond* it.
+ ///
+ /// ## Original
+ ///
+ /// [`slice::as_ptr_range`](https://doc.rust-lang.org/std/primitive.slice.html#method.as_ptr_range)
+ ///
+ /// ## API Differences
+ ///
+ /// This is renamed to indicate that it returns a `bitvec` structure, rather
+ /// than an ordinary `Range`.
+ ///
+ /// ## Notes
+ ///
+ /// `BitSlice` does define a [`.as_ptr_range()`], which returns a
+ /// `Range<BitPtr>`. `BitPtrRange` has additional capabilities that
+ /// `Range<*const T>` and `Range<BitPtr>` do not.
+ ///
+ /// [`.as_ptr_range()`]: Self::as_ptr_range
+ #[inline]
+ pub fn as_bitptr_range(&self) -> BitPtrRange<Const, T, O> {
+ self.as_bitspan().to_bitptr_range()
+ }
+
+ /// Views the bit-slice as a half-open range of write-capable bit-pointers,
+ /// to its first bit *in* the bit-slice and the first bit *beyond* it.
+ ///
+ /// ## Original
+ ///
+ /// [`slice::as_mut_ptr_range`](https://doc.rust-lang.org/std/primitive.slice.html#method.as_mut_ptr_range)
+ ///
+ /// ## API Differences
+ ///
+ /// This is renamed to indicate that it returns a `bitvec` structure, rather
+ /// than an ordinary `Range`.
+ ///
+ /// ## Notes
+ ///
+ /// `BitSlice` does define a [`.as_mut_ptr_range()`], which returns a
+ /// `Range<BitPtr>`. `BitPtrRange` has additional capabilities that
+ /// `Range<*mut T>` and `Range<BitPtr>` do not.
+ #[inline]
+ pub fn as_mut_bitptr_range(&mut self) -> BitPtrRange<Mut, T, O> {
+ self.as_mut_bitspan().to_bitptr_range()
+ }
+
+ /// Copies the bits from `src` into `self`.
+ ///
+ /// `self` and `src` must have the same length.
+ ///
+ /// ## Performance
+ ///
+ /// If `src` has the same type arguments as `self`, it will use the same
+ /// implementation as [`.copy_from_bitslice()`]; if you know that this will
+ /// always be the case, you should prefer to use that method directly.
+ ///
+ /// Only `.copy_from_bitslice()` is *able* to perform acceleration; this
+ /// method is *always* required to perform a bit-by-bit crawl over both
+ /// bit-slices.
+ ///
+ /// ## Original
+ ///
+ /// [`slice::clone_from_slice`](https://doc.rust-lang.org/std/primitive.slice.html#method.clone_from_slice)
+ ///
+ /// ## API Differences
+ ///
+ /// This is renamed to reflect that it copies from another bit-slice, not
+ /// from an element slice.
+ ///
+ /// In order to support general usage, it allows `src` to have different
+ /// type parameters than `self`, at the cost of performance optimizations.
+ ///
+ /// ## Panics
+ ///
+ /// This panics if the two bit-slices have different lengths.
+ ///
+ /// ## Examples
+ ///
+ /// ```rust
+ /// use bitvec::prelude::*;
+ /// ```
+ ///
+ /// [`.copy_from_bitslice()`]: Self::copy_from_bitslice
+ #[inline]
+ pub fn clone_from_bitslice<T2, O2>(&mut self, src: &BitSlice<T2, O2>)
+ where
+ T2: BitStore,
+ O2: BitOrder,
+ {
+ assert_eq!(
+ self.len(),
+ src.len(),
+ "cloning between bit-slices requires equal lengths",
+ );
+
+ if let Some(that) = src.coerce::<T, O>() {
+ self.copy_from_bitslice(that);
+ }
+ // TODO(myrrlyn): Test if `<T::Mem, O>` matches `<T2::Mem, O>` and
+ // specialize cloning.
+ else {
+ for (to, bit) in self.as_mut_bitptr_range().zip(src.iter().by_vals())
+ {
+ unsafe {
+ to.write(bit);
+ }
+ }
+ }
+ }
+
+ /// Copies all bits from `src` into `self`, using batched acceleration when
+ /// possible.
+ ///
+ /// `self` and `src` must have the same length.
+ ///
+ /// ## Original
+ ///
+ /// [`slice::copy_from_slice`](https://doc.rust-lang.org/std/primitive.slice.html#method.copy_from_slice)
+ ///
+ /// ## Panics
+ ///
+ /// This panics if the two bit-slices have different lengths.
+ ///
+ /// ## Examples
+ ///
+ /// ```rust
+ /// use bitvec::prelude::*;
+ /// ```
+ #[inline]
+ pub fn copy_from_bitslice(&mut self, src: &Self) {
+ assert_eq!(
+ self.len(),
+ src.len(),
+ "copying between bit-slices requires equal lengths",
+ );
+
+ let (to_head, from_head) =
+ (self.as_bitspan().head(), src.as_bitspan().head());
+ if to_head == from_head {
+ match (self.domain_mut(), src.domain()) {
+ (Domain::Enclave(mut to), Domain::Enclave(from)) => {
+ to.store_value(from.load_value());
+ },
+ (
+ Domain::Region {
+ head: to_head,
+ body: to_body,
+ tail: to_tail,
+ },
+ Domain::Region {
+ head: from_head,
+ body: from_body,
+ tail: from_tail,
+ },
+ ) => {
+ if let (Some(mut to), Some(from)) = (to_head, from_head) {
+ to.store_value(from.load_value());
+ }
+ for (to, from) in to_body.iter_mut().zip(from_body) {
+ to.store_value(from.load_value());
+ }
+ if let (Some(mut to), Some(from)) = (to_tail, from_tail) {
+ to.store_value(from.load_value());
+ }
+ },
+ _ => unreachable!(
+ "bit-slices with equal type parameters, lengths, and heads \
+ will always have equal domains"
+ ),
+ }
+ }
+ if let (Some(this), Some(that)) =
+ (self.coerce_mut::<T, Lsb0>(), src.coerce::<T, Lsb0>())
+ {
+ return this.sp_copy_from_bitslice(that);
+ }
+ if let (Some(this), Some(that)) =
+ (self.coerce_mut::<T, Msb0>(), src.coerce::<T, Msb0>())
+ {
+ return this.sp_copy_from_bitslice(that);
+ }
+ for (to, bit) in self.as_mut_bitptr_range().zip(src.iter().by_vals()) {
+ unsafe {
+ to.write(bit);
+ }
+ }
+ }
+
+ /// Swaps the contents of two bit-slices.
+ ///
+ /// `self` and `other` must have the same length.
+ ///
+ /// ## Original
+ ///
+ /// [`slice::swap_with_slice`](https://doc.rust-lang.org/std/primitive.slice.html#method.swap_with_slice)
+ ///
+ /// ## API Differences
+ ///
+ /// This method is renamed, as it takes a bit-slice rather than an element
+ /// slice.
+ ///
+ /// ## Panics
+ ///
+ /// This panics if the two bit-slices have different lengths.
+ ///
+ /// ## Examples
+ ///
+ /// ```rust
+ /// use bitvec::prelude::*;
+ ///
+ /// let mut one = [0xA5u8, 0x69];
+ /// let mut two = 0x1234u16;
+ /// let one_bits = one.view_bits_mut::<Msb0>();
+ /// let two_bits = two.view_bits_mut::<Lsb0>();
+ ///
+ /// one_bits.swap_with_bitslice(two_bits);
+ ///
+ /// assert_eq!(one, [0x2C, 0x48]);
+ /// # if cfg!(target_endian = "little") {
+ /// assert_eq!(two, 0x96A5);
+ /// # }
+ /// ```
+ #[inline]
+ pub fn swap_with_bitslice<T2, O2>(&mut self, other: &mut BitSlice<T2, O2>)
+ where
+ T2: BitStore,
+ O2: BitOrder,
+ {
+ assert_eq!(
+ self.len(),
+ other.len(),
+ "swapping between bit-slices requires equal lengths",
+ );
+
+ if let (Some(this), Some(that)) =
+ (self.coerce_mut::<T, Lsb0>(), other.coerce_mut::<T, Lsb0>())
+ {
+ return this.sp_swap_with_bitslice(that);
+ }
+ if let (Some(this), Some(that)) =
+ (self.coerce_mut::<T, Msb0>(), other.coerce_mut::<T, Msb0>())
+ {
+ return this.sp_swap_with_bitslice(that);
+ }
+ self.as_mut_bitptr_range()
+ .zip(other.as_mut_bitptr_range())
+ .for_each(|(a, b)| unsafe {
+ bv_ptr::swap(a, b);
+ });
+ }
+}
+
+/// Extensions of standard APIs.
+impl<T, O> BitSlice<T, O>
+where
+ T: BitStore,
+ O: BitOrder,
+{
+ /// Writes a new value into a single bit.
+ ///
+ /// This is the replacement for `*slice[index] = value;`, as `bitvec` is not
+ /// able to express that under the current `IndexMut` API signature.
+ ///
+ /// ## Parameters
+ ///
+ /// - `&mut self`
+ /// - `index`: The bit-index to set. It must be in `0 .. self.len()`.
+ /// - `value`: The new bit-value to write into the bit at `index`.
+ ///
+ /// ## Panics
+ ///
+ /// This panics if `index` is out of bounds.
+ ///
+ /// ## Examples
+ ///
+ /// ```rust
+ /// use bitvec::prelude::*;
+ ///
+ /// let bits = bits![mut 0, 1];
+ /// bits.set(0, true);
+ /// bits.set(1, false);
+ ///
+ /// assert_eq!(bits, bits![1, 0]);
+ /// ```
+ #[inline]
+ pub fn set(&mut self, index: usize, value: bool) {
+ self.replace(index, value);
+ }
+
+ /// Writes a new value into a single bit, without bounds checking.
+ ///
+ /// ## Parameters
+ ///
+ /// - `&mut self`
+ /// - `index`: The bit-index to set. It must be in `0 .. self.len()`.
+ /// - `value`: The new bit-value to write into the bit at `index`.
+ ///
+ /// ## Safety
+ ///
+ /// You must ensure that `index` is in the range `0 .. self.len()`.
+ ///
+ /// This performs bit-pointer offset arithmetic without doing any bounds
+ /// checks. If `index` is out of bounds, then this will issue an
+ /// out-of-bounds access and will trigger memory unsafety.
+ ///
+ /// ## Examples
+ ///
+ /// ```rust
+ /// use bitvec::prelude::*;
+ ///
+ /// let mut data = 0u8;
+ /// let bits = &mut data.view_bits_mut::<Lsb0>()[.. 2];
+ /// assert_eq!(bits.len(), 2);
+ /// unsafe {
+ /// bits.set_unchecked(3, true);
+ /// }
+ /// assert_eq!(data, 8);
+ /// ```
+ #[inline]
+ pub unsafe fn set_unchecked(&mut self, index: usize, value: bool) {
+ self.replace_unchecked(index, value);
+ }
+
+ /// Writes a new value into a bit, and returns its previous value.
+ ///
+ /// ## Panics
+ ///
+ /// This panics if `index` is not less than `self.len()`.
+ ///
+ /// ## Examples
+ ///
+ /// ```rust
+ /// use bitvec::prelude::*;
+ ///
+ /// let bits = bits![mut 0];
+ /// assert!(!bits.replace(0, true));
+ /// assert!(bits[0]);
+ /// ```
+ #[inline]
+ pub fn replace(&mut self, index: usize, value: bool) -> bool {
+ self.assert_in_bounds(index, 0 .. self.len());
+ unsafe { self.replace_unchecked(index, value) }
+ }
+
+ /// Writes a new value into a bit, returning the previous value, without
+ /// bounds checking.
+ ///
+ /// ## Safety
+ ///
+ /// `index` must be less than `self.len()`.
+ ///
+ /// ## Examples
+ ///
+ /// ```rust
+ /// use bitvec::prelude::*;
+ ///
+ /// let bits = bits![mut 0, 0];
+ /// let old = unsafe {
+ /// let a = &mut bits[.. 1];
+ /// a.replace_unchecked(1, true)
+ /// };
+ /// assert!(!old);
+ /// assert!(bits[1]);
+ /// ```
+ #[inline]
+ pub unsafe fn replace_unchecked(
+ &mut self,
+ index: usize,
+ value: bool,
+ ) -> bool {
+ self.as_mut_bitptr().add(index).replace(value)
+ }
+
+ /// Swaps two bits in a bit-slice, without bounds checking.
+ ///
+ /// See [`.swap()`] for documentation.
+ ///
+ /// ## Safety
+ ///
+ /// You must ensure that `a` and `b` are both in the range `0 ..
+ /// self.len()`.
+ ///
+ /// This method performs bit-pointer offset arithmetic without doing any
+ /// bounds checks. If `a` or `b` are out of bounds, then this will issue an
+ /// out-of-bounds access and will trigger memory unsafety.
+ ///
+ /// [`.swap()`]: Self::swap
+ #[inline]
+ pub unsafe fn swap_unchecked(&mut self, a: usize, b: usize) {
+ let a = self.as_mut_bitptr().add(a);
+ let b = self.as_mut_bitptr().add(b);
+ bv_ptr::swap(a, b);
+ }
+
+ /// Splits a bit-slice at an index, without bounds checking.
+ ///
+ /// See [`.split_at()`] for documentation.
+ ///
+ /// ## Safety
+ ///
+ /// You must ensure that `mid` is in the range `0 ..= self.len()`.
+ ///
+ /// This method produces new bit-slice references. If `mid` is out of
+ /// bounds, its behavior is **library-level** undefined. You must
+ /// conservatively assume that an out-of-bounds split point produces
+ /// compiler-level UB.
+ ///
+ /// [`.split_at()`]: Self::split_at
+ #[inline]
+ pub unsafe fn split_at_unchecked(&self, mid: usize) -> (&Self, &Self) {
+ let len = self.len();
+ let left = self.as_bitptr();
+ let right = left.add(mid);
+ let left = left.span_unchecked(mid);
+ let right = right.span_unchecked(len - mid);
+ let left = left.into_bitslice_ref();
+ let right = right.into_bitslice_ref();
+ (left, right)
+ }
+
+ /// Splits a mutable bit-slice at an index, without bounds checking.
+ ///
+ /// See [`.split_at_mut()`] for documentation.
+ ///
+ /// ## Safety
+ ///
+ /// You must ensure that `mid` is in the range `0 ..= self.len()`.
+ ///
+ /// This method produces new bit-slice references. If `mid` is out of
+ /// bounds, its behavior is **library-level** undefined. You must
+ /// conservatively assume that an out-of-bounds split point produces
+ /// compiler-level UB.
+ ///
+ /// [`.split_at_mut()`]: Self::split_at_mut
+ #[inline]
+ pub unsafe fn split_at_unchecked_mut(
+ &mut self,
+ mid: usize,
+ ) -> (&mut BitSlice<T::Alias, O>, &mut BitSlice<T::Alias, O>) {
+ let len = self.len();
+ let left = self.alias_mut().as_mut_bitptr();
+ let right = left.add(mid);
+ (
+ left.span_unchecked(mid).into_bitslice_mut(),
+ right.span_unchecked(len - mid).into_bitslice_mut(),
+ )
+ }
+
+ /// Copies bits from one region of the bit-slice to another region of
+ /// itself, without doing bounds checks.
+ ///
+ /// The regions are allowed to overlap.
+ ///
+ /// ## Parameters
+ ///
+ /// - `&mut self`
+ /// - `src`: The range within `self` from which to copy.
+ /// - `dst`: The starting index within `self` at which to paste.
+ ///
+ /// ## Effects
+ ///
+ /// `self[src]` is copied to `self[dest .. dest + src.len()]`. The bits of
+ /// `self[src]` are in an unspecified, but initialized, state.
+ ///
+ /// ## Safety
+ ///
+ /// `src.end()` and `dest + src.len()` must be entirely within bounds.
+ ///
+ /// ## Examples
+ ///
+ /// ```rust
+ /// use bitvec::prelude::*;
+ ///
+ /// let mut data = 0b1011_0000u8;
+ /// let bits = data.view_bits_mut::<Msb0>();
+ ///
+ /// unsafe {
+ /// bits.copy_within_unchecked(.. 4, 2);
+ /// }
+ /// assert_eq!(data, 0b1010_1100);
+ /// ```
+ #[inline]
+ pub unsafe fn copy_within_unchecked<R>(&mut self, src: R, dest: usize)
+ where R: RangeExt<usize> {
+ if let Some(this) = self.coerce_mut::<T, Lsb0>() {
+ return this.sp_copy_within_unchecked(src, dest);
+ }
+ if let Some(this) = self.coerce_mut::<T, Msb0>() {
+ return this.sp_copy_within_unchecked(src, dest);
+ }
+ let source = src.normalize(0, self.len());
+ let source_len = source.len();
+ let rev = source.contains(&dest);
+ let dest = dest .. dest + source_len;
+ for (from, to) in self
+ .get_unchecked(source)
+ .as_bitptr_range()
+ .zip(self.get_unchecked_mut(dest).as_mut_bitptr_range())
+ .bidi(rev)
+ {
+ to.write(from.read());
+ }
+ }
+
+ #[inline]
+ #[doc(hidden)]
+ #[cfg(not(tarpaulin_include))]
+ #[deprecated = "use `.iter_mut().enumerate()`"]
+ pub fn for_each(&mut self, mut func: impl FnMut(usize, bool) -> bool) {
+ for (idx, ptr) in self.as_mut_bitptr_range().enumerate() {
+ unsafe {
+ ptr.write(func(idx, ptr.read()));
+ }
+ }
+ }
+}
+
+/// Views of underlying memory.
+impl<T, O> BitSlice<T, O>
+where
+ T: BitStore,
+ O: BitOrder,
+{
+ /// Partitions a bit-slice into maybe-contended and known-uncontended parts.
+ ///
+ /// The documentation of `BitDomain` goes into this in more detail. In
+ /// short, this produces a `&BitSlice` that is as large as possible without
+ /// requiring alias protection, as well as any bits that were not able to be
+ /// included in the unaliased bit-slice.
+ #[inline]
+ #[cfg(not(tarpaulin_include))]
+ pub fn bit_domain(&self) -> BitDomain<Const, T, O> {
+ self.domain().into_bit_domain()
+ }
+
+ /// Partitions a mutable bit-slice into maybe-contended and
+ /// known-uncontended parts.
+ ///
+ /// The documentation of `BitDomain` goes into this in more detail. In
+ /// short, this produces a `&mut BitSlice` that is as large as possible
+ /// without requiring alias protection, as well as any bits that were not
+ /// able to be included in the unaliased bit-slice.
+ #[inline]
+ #[cfg(not(tarpaulin_include))]
+ pub fn bit_domain_mut(&mut self) -> BitDomain<Mut, T, O> {
+ self.domain_mut().into_bit_domain()
+ }
+
+ /// Views the underlying memory of a bit-slice, removing alias protections
+ /// where possible.
+ ///
+ /// The documentation of `Domain` goes into this in more detail. In short,
+ /// this produces a `&[T]` slice with alias protections removed, covering
+ /// all elements that `self` completely fills. Partially-used elements on
+ /// either the front or back edge of the slice are returned separately.
+ #[inline]
+ #[cfg(not(tarpaulin_include))]
+ pub fn domain(&self) -> Domain<Const, T, O> {
+ Domain::new(self)
+ }
+
+ /// Views the underlying memory of a bit-slice, removing alias protections
+ /// where possible.
+ ///
+ /// The documentation of `Domain` goes into this in more detail. In short,
+ /// this produces a `&mut [T]` slice with alias protections removed,
+ /// covering all elements that `self` completely fills. Partially-used
+ /// elements on the front or back edge of the slice are returned separately.
+ #[inline]
+ #[cfg(not(tarpaulin_include))]
+ pub fn domain_mut(&mut self) -> Domain<Mut, T, O> {
+ Domain::new(self)
+ }
+}
+
+/// Bit-value queries.
+impl<T, O> BitSlice<T, O>
+where
+ T: BitStore,
+ O: BitOrder,
+{
+ /// Counts the number of bits set to `1` in the bit-slice contents.
+ ///
+ /// ## Examples
+ ///
+ /// ```rust
+ /// use bitvec::prelude::*;
+ ///
+ /// let bits = bits![1, 1, 0, 0];
+ /// assert_eq!(bits[.. 2].count_ones(), 2);
+ /// assert_eq!(bits[2 ..].count_ones(), 0);
+ /// assert_eq!(bits![].count_ones(), 0);
+ /// ```
+ #[inline]
+ pub fn count_ones(&self) -> usize {
+ match self.domain() {
+ Domain::Enclave(elem) => elem.load_value().count_ones() as usize,
+ Domain::Region { head, body, tail } => {
+ head.map_or(0, |elem| elem.load_value().count_ones() as usize)
+ + body
+ .iter()
+ .map(BitStore::load_value)
+ .map(|elem| elem.count_ones() as usize)
+ .sum::<usize>() + tail
+ .map_or(0, |elem| elem.load_value().count_ones() as usize)
+ },
+ }
+ }
+
+ /// Counts the number of bits cleared to `0` in the bit-slice contents.
+ ///
+ /// ## Examples
+ ///
+ /// ```rust
+ /// use bitvec::prelude::*;
+ ///
+ /// let bits = bits![1, 1, 0, 0];
+ /// assert_eq!(bits[.. 2].count_zeros(), 0);
+ /// assert_eq!(bits[2 ..].count_zeros(), 2);
+ /// assert_eq!(bits![].count_zeros(), 0);
+ /// ```
+ #[inline]
+ pub fn count_zeros(&self) -> usize {
+ match self.domain() {
+ Domain::Enclave(elem) => (elem.load_value()
+ | !elem.mask().into_inner())
+ .count_zeros() as usize,
+ Domain::Region { head, body, tail } => {
+ head.map_or(0, |elem| {
+ (elem.load_value() | !elem.mask().into_inner()).count_zeros()
+ as usize
+ }) + body
+ .iter()
+ .map(BitStore::load_value)
+ .map(|elem| elem.count_zeros() as usize)
+ .sum::<usize>() + tail.map_or(0, |elem| {
+ (elem.load_value() | !elem.mask().into_inner()).count_zeros()
+ as usize
+ })
+ },
+ }
+ }
+
+ /// Enumerates the index of each bit in a bit-slice set to `1`.
+ ///
+ /// This is a shorthand for a `.enumerate().filter_map()` iterator that
+ /// selects the index of each `true` bit; however, its implementation is
+ /// eligible for optimizations that the individual-bit iterator is not.
+ ///
+ /// Specializations for the `Lsb0` and `Msb0` orderings allow processors
+ /// with instructions that seek particular bits within an element to operate
+ /// on whole elements, rather than on each bit individually.
+ ///
+ /// ## Examples
+ ///
+ /// This example uses `.iter_ones()`, a `.filter_map()` that finds the index
+ /// of each set bit, and the known indices, in order to show that they have
+ /// equivalent behavior.
+ ///
+ /// ```rust
+ /// use bitvec::prelude::*;
+ ///
+ /// let bits = bits![0, 1, 0, 0, 1, 0, 0, 0, 1];
+ ///
+ /// let iter_ones = bits.iter_ones();
+ /// let known_indices = [1, 4, 8].iter().copied();
+ /// let filter = bits.iter()
+ /// .by_vals()
+ /// .enumerate()
+ /// .filter_map(|(idx, bit)| if bit { Some(idx) } else { None });
+ /// let all = iter_ones.zip(known_indices).zip(filter);
+ ///
+ /// for ((iter_one, known), filtered) in all {
+ /// assert_eq!(iter_one, known);
+ /// assert_eq!(known, filtered);
+ /// }
+ /// ```
+ #[inline]
+ pub fn iter_ones(&self) -> IterOnes<T, O> {
+ IterOnes::new(self)
+ }
+
+ /// Enumerates the index of each bit in a bit-slice cleared to `0`.
+ ///
+ /// This is a shorthand for a `.enumerate().filter_map()` iterator that
+ /// selects the index of each `false` bit; however, its implementation is
+ /// eligible for optimizations that the individual-bit iterator is not.
+ ///
+ /// Specializations for the `Lsb0` and `Msb0` orderings allow processors
+ /// with instructions that seek particular bits within an element to operate
+ /// on whole elements, rather than on each bit individually.
+ ///
+ /// ## Examples
+ ///
+ /// This example uses `.iter_zeros()`, a `.filter_map()` that finds the
+ /// index of each cleared bit, and the known indices, in order to show that
+ /// they have equivalent behavior.
+ ///
+ /// ```rust
+ /// use bitvec::prelude::*;
+ ///
+ /// let bits = bits![1, 0, 1, 1, 0, 1, 1, 1, 0];
+ ///
+ /// let iter_zeros = bits.iter_zeros();
+ /// let known_indices = [1, 4, 8].iter().copied();
+ /// let filter = bits.iter()
+ /// .by_vals()
+ /// .enumerate()
+ /// .filter_map(|(idx, bit)| if !bit { Some(idx) } else { None });
+ /// let all = iter_zeros.zip(known_indices).zip(filter);
+ ///
+ /// for ((iter_zero, known), filtered) in all {
+ /// assert_eq!(iter_zero, known);
+ /// assert_eq!(known, filtered);
+ /// }
+ /// ```
+ #[inline]
+ pub fn iter_zeros(&self) -> IterZeros<T, O> {
+ IterZeros::new(self)
+ }
+
+ /// Finds the index of the first bit in the bit-slice set to `1`.
+ ///
+ /// Returns `None` if there is no `true` bit in the bit-slice.
+ ///
+ /// ## Examples
+ ///
+ /// ```rust
+ /// use bitvec::prelude::*;
+ ///
+ /// assert!(bits![].first_one().is_none());
+ /// assert!(bits![0].first_one().is_none());
+ /// assert_eq!(bits![0, 1].first_one(), Some(1));
+ /// ```
+ #[inline]
+ pub fn first_one(&self) -> Option<usize> {
+ self.iter_ones().next()
+ }
+
+ /// Finds the index of the first bit in the bit-slice cleared to `0`.
+ ///
+ /// Returns `None` if there is no `false` bit in the bit-slice.
+ ///
+ /// ## Examples
+ ///
+ /// ```rust
+ /// use bitvec::prelude::*;
+ ///
+ /// assert!(bits![].first_zero().is_none());
+ /// assert!(bits![1].first_zero().is_none());
+ /// assert_eq!(bits![1, 0].first_zero(), Some(1));
+ /// ```
+ #[inline]
+ pub fn first_zero(&self) -> Option<usize> {
+ self.iter_zeros().next()
+ }
+
+ /// Finds the index of the last bit in the bit-slice set to `1`.
+ ///
+ /// Returns `None` if there is no `true` bit in the bit-slice.
+ ///
+ /// ## Examples
+ ///
+ /// ```rust
+ /// use bitvec::prelude::*;
+ ///
+ /// assert!(bits![].last_one().is_none());
+ /// assert!(bits![0].last_one().is_none());
+ /// assert_eq!(bits![1, 0].last_one(), Some(0));
+ /// ```
+ #[inline]
+ pub fn last_one(&self) -> Option<usize> {
+ self.iter_ones().next_back()
+ }
+
+ /// Finds the index of the last bit in the bit-slice cleared to `0`.
+ ///
+ /// Returns `None` if there is no `false` bit in the bit-slice.
+ ///
+ /// ## Examples
+ ///
+ /// ```rust
+ /// use bitvec::prelude::*;
+ ///
+ /// assert!(bits![].last_zero().is_none());
+ /// assert!(bits![1].last_zero().is_none());
+ /// assert_eq!(bits![0, 1].last_zero(), Some(0));
+ /// ```
+ #[inline]
+ pub fn last_zero(&self) -> Option<usize> {
+ self.iter_zeros().next_back()
+ }
+
+ /// Counts the number of bits from the start of the bit-slice to the first
+ /// bit set to `0`.
+ ///
+ /// This returns `0` if the bit-slice is empty.
+ ///
+ /// ## Examples
+ ///
+ /// ```rust
+ /// use bitvec::prelude::*;
+ ///
+ /// assert_eq!(bits![].leading_ones(), 0);
+ /// assert_eq!(bits![0].leading_ones(), 0);
+ /// assert_eq!(bits![1, 0].leading_ones(), 1);
+ /// ```
+ #[inline]
+ pub fn leading_ones(&self) -> usize {
+ self.first_zero().unwrap_or_else(|| self.len())
+ }
+
+ /// Counts the number of bits from the start of the bit-slice to the first
+ /// bit set to `1`.
+ ///
+ /// This returns `0` if the bit-slice is empty.
+ ///
+ /// ## Examples
+ ///
+ /// ```rust
+ /// use bitvec::prelude::*;
+ ///
+ /// assert_eq!(bits![].leading_zeros(), 0);
+ /// assert_eq!(bits![1].leading_zeros(), 0);
+ /// assert_eq!(bits![0, 1].leading_zeros(), 1);
+ /// ```
+ #[inline]
+ pub fn leading_zeros(&self) -> usize {
+ self.first_one().unwrap_or_else(|| self.len())
+ }
+
+ /// Counts the number of bits from the end of the bit-slice to the last bit
+ /// set to `0`.
+ ///
+ /// This returns `0` if the bit-slice is empty.
+ ///
+ /// ## Examples
+ ///
+ /// ```rust
+ /// use bitvec::prelude::*;
+ ///
+ /// assert_eq!(bits![].trailing_ones(), 0);
+ /// assert_eq!(bits![0].trailing_ones(), 0);
+ /// assert_eq!(bits![0, 1].trailing_ones(), 1);
+ /// ```
+ #[inline]
+ pub fn trailing_ones(&self) -> usize {
+ let len = self.len();
+ self.last_zero().map(|idx| len - 1 - idx).unwrap_or(len)
+ }
+
+ /// Counts the number of bits from the end of the bit-slice to the last bit
+ /// set to `1`.
+ ///
+ /// This returns `0` if the bit-slice is empty.
+ ///
+ /// ## Examples
+ ///
+ /// ```rust
+ /// use bitvec::prelude::*;
+ ///
+ /// assert_eq!(bits![].trailing_zeros(), 0);
+ /// assert_eq!(bits![1].trailing_zeros(), 0);
+ /// assert_eq!(bits![1, 0].trailing_zeros(), 1);
+ /// ```
+ #[inline]
+ pub fn trailing_zeros(&self) -> usize {
+ let len = self.len();
+ self.last_one().map(|idx| len - 1 - idx).unwrap_or(len)
+ }
+
+ /// Tests if there is at least one bit set to `1` in the bit-slice.
+ ///
+ /// Returns `false` when `self` is empty.
+ ///
+ /// ## Examples
+ ///
+ /// ```rust
+ /// use bitvec::prelude::*;
+ ///
+ /// assert!(!bits![].any());
+ /// assert!(!bits![0].any());
+ /// assert!(bits![0, 1].any());
+ /// ```
+ #[inline]
+ pub fn any(&self) -> bool {
+ self.count_ones() > 0
+ }
+
+ /// Tests if every bit is set to `1` in the bit-slice.
+ ///
+ /// Returns `true` when `self` is empty.
+ ///
+ /// ## Examples
+ ///
+ /// ```rust
+ /// use bitvec::prelude::*;
+ ///
+ /// assert!( bits![].all());
+ /// assert!(!bits![0].all());
+ /// assert!( bits![1].all());
+ /// ```
+ #[inline]
+ pub fn all(&self) -> bool {
+ self.count_zeros() == 0
+ }
+
+ /// Tests if every bit is cleared to `0` in the bit-slice.
+ ///
+ /// Returns `true` when `self` is empty.
+ ///
+ /// ## Examples
+ ///
+ /// ```rust
+ /// use bitvec::prelude::*;
+ ///
+ /// assert!( bits![].not_any());
+ /// assert!(!bits![1].not_any());
+ /// assert!( bits![0].not_any());
+ /// ```
+ #[inline]
+ pub fn not_any(&self) -> bool {
+ self.count_ones() == 0
+ }
+
+ /// Tests if at least one bit is cleared to `0` in the bit-slice.
+ ///
+ /// Returns `false` when `self` is empty.
+ ///
+ /// ## Examples
+ ///
+ /// ```rust
+ /// use bitvec::prelude::*;
+ ///
+ /// assert!(!bits![].not_all());
+ /// assert!(!bits![1].not_all());
+ /// assert!( bits![0].not_all());
+ /// ```
+ #[inline]
+ pub fn not_all(&self) -> bool {
+ self.count_zeros() > 0
+ }
+
+ /// Tests if at least one bit is set to `1`, and at least one bit is cleared
+ /// to `0`, in the bit-slice.
+ ///
+ /// Returns `false` when `self` is empty.
+ ///
+ /// ## Examples
+ ///
+ /// ```rust
+ /// use bitvec::prelude::*;
+ ///
+ /// assert!(!bits![].some());
+ /// assert!(!bits![0].some());
+ /// assert!(!bits![1].some());
+ /// assert!( bits![0, 1].some());
+ /// ```
+ #[inline]
+ pub fn some(&self) -> bool {
+ self.any() && self.not_all()
+ }
+}
+
+/// Buffer manipulation.
+impl<T, O> BitSlice<T, O>
+where
+ T: BitStore,
+ O: BitOrder,
+{
+ /// Shifts the contents of a bit-slice “left” (towards the zero-index),
+ /// clearing the “right” bits to `0`.
+ ///
+ /// This is a strictly-worse analogue to taking `bits = &bits[by ..]`: it
+ /// has to modify the entire memory region that `bits` governs, and destroys
+ /// contained information. Unless the actual memory layout and contents of
+ /// your bit-slice matters to your program, you should *probably* prefer to
+ /// munch your way forward through a bit-slice handle.
+ ///
+ /// Note also that the “left” here is semantic only, and **does not**
+ /// necessarily correspond to a left-shift instruction applied to the
+ /// underlying integer storage.
+ ///
+ /// This has no effect when `by` is `0`. When `by` is `self.len()`, the
+ /// bit-slice is entirely cleared to `0`.
+ ///
+ /// ## Panics
+ ///
+ /// This panics if `by` is not less than `self.len()`.
+ ///
+ /// ## Examples
+ ///
+ /// ```rust
+ /// use bitvec::prelude::*;
+ ///
+ /// let bits = bits![mut 1, 1, 1, 1, 0, 0, 1, 0, 1, 1, 1, 1];
+ /// // these bits are retained ^--------------------------^
+ /// bits.shift_left(2);
+ /// assert_eq!(bits, bits![1, 1, 0, 0, 1, 0, 1, 1, 1, 1, 0, 0]);
+ /// // and move here ^--------------------------^
+ ///
+ /// let bits = bits![mut 1; 2];
+ /// bits.shift_left(2);
+ /// assert_eq!(bits, bits![0; 2]);
+ /// ```
+ #[inline]
+ pub fn shift_left(&mut self, by: usize) {
+ if by == 0 {
+ return;
+ }
+
+ let len = self.len();
+ if by == len {
+ return self.fill(false);
+ }
+ assert!(
+ by <= len,
+ "shift must be less than the length of the bit-slice: {} >= {}",
+ by,
+ len,
+ );
+
+ unsafe {
+ self.copy_within_unchecked(by .., 0);
+ self.get_unchecked_mut(len - by ..).fill(false);
+ }
+ }
+
+ /// Shifts the contents of a bit-slice “right” (away from the zero-index),
+ /// clearing the “left” bits to `0`.
+ ///
+ /// This is a strictly-worse analogue to taking `bits = &bits[.. bits.len()
+ /// - by]`: it must modify the entire memory region that `bits` governs, and
+ /// destroys contained information. Unless the actual memory layout and
+ /// contents of your bit-slice matters to your program, you should
+ /// *probably* prefer to munch your way backward through a bit-slice handle.
+ ///
+ /// Note also that the “right” here is semantic only, and **does not**
+ /// necessarily correspond to a right-shift instruction applied to the
+ /// underlying integer storage.
+ ///
+ /// This has no effect when `by` is `0`. When `by` is `self.len()`, the
+ /// bit-slice is entirely cleared to `0`.
+ ///
+ /// ## Panics
+ ///
+ /// This panics if `by` is not less than `self.len()`.
+ ///
+ /// ## Examples
+ ///
+ /// ```rust
+ /// use bitvec::prelude::*;
+ ///
+ /// let bits = bits![mut 1, 1, 1, 1, 0, 0, 1, 0, 1, 1, 1, 1];
+ /// // these bits stay ^--------------------------^
+ /// bits.shift_right(2);
+ /// assert_eq!(bits, bits![0, 0, 1, 1, 1, 1, 0, 0, 1, 0, 1, 1]);
+ /// // and move here ^--------------------------^
+ ///
+ /// let bits = bits![mut 1; 2];
+ /// bits.shift_right(2);
+ /// assert_eq!(bits, bits![0; 2]);
+ /// ```
+ #[inline]
+ pub fn shift_right(&mut self, by: usize) {
+ if by == 0 {
+ return;
+ }
+
+ let len = self.len();
+ if by == len {
+ return self.fill(false);
+ }
+ assert!(
+ by <= len,
+ "shift must be less than the length of the bit-slice: {} >= {}",
+ by,
+ len,
+ );
+
+ unsafe {
+ self.copy_within_unchecked(.. len - by, by);
+ self.get_unchecked_mut(.. by).fill(false);
+ }
+ }
+}
+
+/// Crate internals.
+impl<T, O> BitSlice<T, O>
+where
+ T: BitStore,
+ O: BitOrder,
+{
+ /// Gets the structural form of the encoded reference.
+ pub(crate) fn as_bitspan(&self) -> BitSpan<Const, T, O> {
+ BitSpan::from_bitslice_ptr(self)
+ }
+
+ /// Gets the structural form of the encoded reference.
+ pub(crate) fn as_mut_bitspan(&mut self) -> BitSpan<Mut, T, O> {
+ BitSpan::from_bitslice_ptr_mut(self)
+ }
+
+ /// Asserts that `index` is within the given bounds.
+ ///
+ /// ## Parameters
+ ///
+ /// - `&self`
+ /// - `index`: The bit index to test against the bit-slice.
+ /// - `bounds`: The bounds to check. cannot exceed `0 ..= self.len()`.
+ ///
+ /// ## Panics
+ ///
+ /// This panics if `bounds` is outside `index`.
+ pub(crate) fn assert_in_bounds<R>(&self, index: usize, bounds: R)
+ where R: RangeExt<usize> {
+ let bounds = bounds.normalize(0, self.len());
+ assert!(
+ bounds.contains(&index),
+ "index {} out of range: {:?}",
+ index,
+ bounds.end_bound()
+ );
+ }
+
+ /// Marks an exclusive bit-slice as covering an aliased memory region.
+ pub(crate) fn alias_mut(&mut self) -> &mut BitSlice<T::Alias, O> {
+ unsafe { self.as_mut_bitspan().cast::<T::Alias>().into_bitslice_mut() }
+ }
+
+ /// Removes an aliasing marker from an exclusive bit-slice handle.
+ ///
+ /// ## Safety
+ ///
+ /// This may only be used when the bit-slice is either known to be
+ /// unaliased, or this call is combined with an operation that adds an
+ /// aliasing marker and the total number of aliasing markers remains
+ /// unchanged.
+ pub(crate) unsafe fn unalias_mut(
+ this: &mut BitSlice<T::Alias, O>,
+ ) -> &mut Self {
+ this.as_mut_bitspan().cast::<T>().into_bitslice_mut()
+ }
+
+ /// Splits a mutable bit-slice at a midpoint, without either doing bounds
+ /// checks or adding an alias marker to the returned sections.
+ ///
+ /// This method has the same behavior as [`.split_at_unchecked_mut()`],
+ /// except that it does not apply an aliasing marker to the partitioned
+ /// subslices.
+ ///
+ /// ## Safety
+ ///
+ /// See `split_at_unchecked_mut`. Additionally, this is only safe when `T`
+ /// is alias-safe.
+ ///
+ /// [`.split_at_unchecked_mut()`]: Self::split_at_unchecked_mut
+ pub(crate) unsafe fn split_at_unchecked_mut_noalias(
+ &mut self,
+ mid: usize,
+ ) -> (&mut Self, &mut Self) {
+ // Split the slice at the requested midpoint, adding an alias layer
+ let (head, tail) = self.split_at_unchecked_mut(mid);
+ // Remove the new alias layer.
+ (Self::unalias_mut(head), Self::unalias_mut(tail))
+ }
+}
+
+/// Methods available only when `T` allows shared mutability.
+impl<T, O> BitSlice<T, O>
+where
+ T: BitStore + radium::Radium,
+ O: BitOrder,
+{
+ /// Writes a new value into a single bit, using alias-safe operations.
+ ///
+ /// This is equivalent to [`.set()`], except that it does not require an
+ /// `&mut` reference, and allows bit-slices with alias-safe storage to share
+ /// write permissions.
+ ///
+ /// ## Parameters
+ ///
+ /// - `&self`: This method only exists on bit-slices with alias-safe
+ /// storage, and so does not require exclusive access.
+ /// - `index`: The bit index to set. It must be in `0 .. self.len()`.
+ /// - `value`: The new bit-value to write into the bit at `index`.
+ ///
+ /// ## Panics
+ ///
+ /// This panics if `index` is out of bounds.
+ ///
+ /// ## Examples
+ ///
+ /// ```rust
+ /// use bitvec::prelude::*;
+ /// use core::cell::Cell;
+ ///
+ /// let bits: &BitSlice<_, _> = bits![Cell<usize>, Lsb0; 0, 1];
+ /// bits.set_aliased(0, true);
+ /// bits.set_aliased(1, false);
+ ///
+ /// assert_eq!(bits, bits![1, 0]);
+ /// ```
+ ///
+ /// [`.set()`]: Self::set
+ #[inline]
+ pub fn set_aliased(&self, index: usize, value: bool) {
+ self.assert_in_bounds(index, 0 .. self.len());
+ unsafe {
+ self.set_aliased_unchecked(index, value);
+ }
+ }
+
+ /// Writes a new value into a single bit, using alias-safe operations and
+ /// without bounds checking.
+ ///
+ /// This is equivalent to [`.set_unchecked()`], except that it does not
+ /// require an `&mut` reference, and allows bit-slices with alias-safe
+ /// storage to share write permissions.
+ ///
+ /// ## Parameters
+ ///
+ /// - `&self`: This method only exists on bit-slices with alias-safe
+ /// storage, and so does not require exclusive access.
+ /// - `index`: The bit index to set. It must be in `0 .. self.len()`.
+ /// - `value`: The new bit-value to write into the bit at `index`.
+ ///
+ /// ## Safety
+ ///
+ /// The caller must ensure that `index` is not out of bounds.
+ ///
+ /// ## Examples
+ ///
+ /// ```rust
+ /// use bitvec::prelude::*;
+ /// use core::cell::Cell;
+ ///
+ /// let data = Cell::new(0u8);
+ /// let bits = &data.view_bits::<Lsb0>()[.. 2];
+ /// unsafe {
+ /// bits.set_aliased_unchecked(3, true);
+ /// }
+ /// assert_eq!(data.get(), 8);
+ /// ```
+ ///
+ /// [`.set_unchecked()`]: Self::set_unchecked
+ #[inline]
+ pub unsafe fn set_aliased_unchecked(&self, index: usize, value: bool) {
+ self.as_bitptr().add(index).freeze().frozen_write_bit(value);
+ }
+}
+
+/// Miscellaneous information.
+impl<T, O> BitSlice<T, O>
+where
+ T: BitStore,
+ O: BitOrder,
+{
+ /// The inclusive maximum length of a `BitSlice<_, T>`.
+ ///
+ /// As `BitSlice` is zero-indexed, the largest possible *index* is one less
+ /// than this value.
+ ///
+ /// |CPU word width| Value |
+ /// |-------------:|----------------------:|
+ /// | 32 bits | `0x1fff_ffff` |
+ /// | 64 bits |`0x1fff_ffff_ffff_ffff`|
+ pub const MAX_BITS: usize = BitSpan::<Const, T, O>::REGION_MAX_BITS;
+ /// The inclusive maximum length that a `[T]` slice can be for
+ /// `BitSlice<_, T>` to cover it.
+ ///
+ /// A `BitSlice<_, T>` that begins in the interior of an element and
+ /// contains the maximum number of bits will extend one element past the
+ /// cutoff that would occur if the bit-slice began at the zeroth bit. Such a
+ /// bit-slice is difficult to manually construct, but would not otherwise
+ /// fail.
+ ///
+ /// |Type Bits|Max Elements (32-bit)| Max Elements (64-bit) |
+ /// |--------:|--------------------:|----------------------:|
+ /// | 8| `0x0400_0001` |`0x0400_0000_0000_0001`|
+ /// | 16| `0x0200_0001` |`0x0200_0000_0000_0001`|
+ /// | 32| `0x0100_0001` |`0x0100_0000_0000_0001`|
+ /// | 64| `0x0080_0001` |`0x0080_0000_0000_0001`|
+ pub const MAX_ELTS: usize = BitSpan::<Const, T, O>::REGION_MAX_ELTS;
+}
+
+#[cfg(feature = "alloc")]
+impl<T, O> BitSlice<T, O>
+where
+ T: BitStore,
+ O: BitOrder,
+{
+ /// Copies a bit-slice into an owned bit-vector.
+ ///
+ /// Since the new vector is freshly owned, this gets marked as `::Unalias`
+ /// to remove any guards that may have been inserted by the bit-slice’s
+ /// history.
+ ///
+ /// It does *not* use the underlying memory type, so that a `BitSlice<_,
+ /// Cell<_>>` will produce a `BitVec<_, Cell<_>>`.
+ ///
+ /// ## Original
+ ///
+ /// [`slice::to_vec`](https://doc.rust-lang.org/std/primitive.slice.html#method.to_vec)
+ ///
+ /// ## Examples
+ ///
+ /// ```rust
+ /// use bitvec::prelude::*;
+ ///
+ /// let bits = bits![0, 1, 0, 1];
+ /// let bv = bits.to_bitvec();
+ /// assert_eq!(bits, bv);
+ /// ```
+ #[inline]
+ pub fn to_bitvec(&self) -> BitVec<T::Unalias, O> {
+ self.domain()
+ .map(<T::Unalias as BitStore>::new)
+ .collect::<Vec<_>>()
+ .pipe(BitVec::from_vec)
+ .tap_mut(|bv| unsafe {
+ bv.set_head(self.as_bitspan().head());
+ bv.set_len(self.len());
+ })
+ }
+}
+
+#[inline]
+#[doc = include_str!("../doc/slice/from_raw_parts_unchecked.md")]
+pub unsafe fn from_raw_parts_unchecked<'a, T, O>(
+ ptr: BitPtr<Const, T, O>,
+ len: usize,
+) -> &'a BitSlice<T, O>
+where
+ O: BitOrder,
+ T: 'a + BitStore,
+{
+ ptr.span_unchecked(len).into_bitslice_ref()
+}
+
+#[inline]
+#[doc = include_str!("../doc/slice/from_raw_parts_unchecked_mut.md")]
+pub unsafe fn from_raw_parts_unchecked_mut<'a, T, O>(
+ ptr: BitPtr<Mut, T, O>,
+ len: usize,
+) -> &'a mut BitSlice<T, O>
+where
+ O: BitOrder,
+ T: 'a + BitStore,
+{
+ ptr.span_unchecked(len).into_bitslice_mut()
+}
diff --git a/src/slice/api.rs b/src/slice/api.rs
new file mode 100644
index 0000000..715f169
--- /dev/null
+++ b/src/slice/api.rs
@@ -0,0 +1,2778 @@
+#![doc = include_str!("../../doc/slice/api.md")]
+
+use core::{
+ cmp,
+ ops::{
+ Range,
+ RangeFrom,
+ RangeFull,
+ RangeInclusive,
+ RangeTo,
+ RangeToInclusive,
+ },
+};
+
+use wyz::{
+ comu::{
+ Const,
+ Mut,
+ },
+ range::RangeExt,
+};
+
+use super::{
+ BitSlice,
+ Chunks,
+ ChunksExact,
+ ChunksExactMut,
+ ChunksMut,
+ Iter,
+ IterMut,
+ RChunks,
+ RChunksExact,
+ RChunksExactMut,
+ RChunksMut,
+ RSplit,
+ RSplitMut,
+ RSplitN,
+ RSplitNMut,
+ Split,
+ SplitInclusive,
+ SplitInclusiveMut,
+ SplitMut,
+ SplitN,
+ SplitNMut,
+ Windows,
+};
+#[cfg(feature = "alloc")]
+use crate::vec::BitVec;
+use crate::{
+ array::BitArray,
+ domain::Domain,
+ mem::{
+ self,
+ BitRegister,
+ },
+ order::BitOrder,
+ ptr::{
+ BitPtr,
+ BitRef,
+ BitSpan,
+ BitSpanError,
+ },
+ store::BitStore,
+};
+
+/// Port of the `[T]` inherent API.
+impl<T, O> BitSlice<T, O>
+where
+ T: BitStore,
+ O: BitOrder,
+{
+ /// Gets the number of bits in the bit-slice.
+ ///
+ /// ## Original
+ ///
+ /// [`slice::len`](https://doc.rust-lang.org/std/primitive.slice.html#method.len)
+ ///
+ /// ## Examples
+ ///
+ /// ```rust
+ /// use bitvec::prelude::*;
+ ///
+ /// assert_eq!(bits![].len(), 0);
+ /// assert_eq!(bits![0; 10].len(), 10);
+ /// ```
+ #[inline]
+ pub fn len(&self) -> usize {
+ self.as_bitspan().len()
+ }
+
+ /// Tests if the bit-slice is empty (length zero).
+ ///
+ /// ## Original
+ ///
+ /// [`slice::is_empty`](https://doc.rust-lang.org/std/primitive.slice.html#method.is_empty)
+ ///
+ /// ## Examples
+ ///
+ /// ```rust
+ /// use bitvec::prelude::*;
+ ///
+ /// assert!(bits![].is_empty());
+ /// assert!(!bits![0; 10].is_empty());
+ /// ```
+ #[inline]
+ pub fn is_empty(&self) -> bool {
+ self.len() == 0
+ }
+
+ /// Gets a reference to the first bit of the bit-slice, or `None` if it is
+ /// empty.
+ ///
+ /// ## Original
+ ///
+ /// [`slice::first`](https://doc.rust-lang.org/std/primitive.slice.html#method.first)
+ ///
+ /// ## API Differences
+ ///
+ /// `bitvec` uses a custom structure for both read-only and mutable
+ /// references to `bool`.
+ ///
+ /// ## Examples
+ ///
+ /// ```rust
+ /// use bitvec::prelude::*;
+ ///
+ /// let bits = bits![1, 0, 0];
+ /// assert_eq!(bits.first().as_deref(), Some(&true));
+ ///
+ /// assert!(bits![].first().is_none());
+ /// ```
+ #[inline]
+ pub fn first(&self) -> Option<BitRef<Const, T, O>> {
+ self.get(0)
+ }
+
+ /// Gets a mutable reference to the first bit of the bit-slice, or `None` if
+ /// it is empty.
+ ///
+ /// ## Original
+ ///
+ /// [`slice::first_mut`](https://doc.rust-lang.org/std/primitive.slice.html#method.first_mut)
+ ///
+ /// ## API Differences
+ ///
+ /// `bitvec` uses a custom structure for both read-only and mutable
+ /// references to `bool`. This must be bound as `mut` in order to write
+ /// through it.
+ ///
+ /// ## Examples
+ ///
+ /// ```rust
+ /// use bitvec::prelude::*;
+ ///
+ /// let bits = bits![mut 0; 3];
+ /// if let Some(mut first) = bits.first_mut() {
+ /// *first = true;
+ /// }
+ /// assert_eq!(bits, bits![1, 0, 0]);
+ ///
+ /// assert!(bits![mut].first_mut().is_none());
+ /// ```
+ #[inline]
+ pub fn first_mut(&mut self) -> Option<BitRef<Mut, T, O>> {
+ self.get_mut(0)
+ }
+
+ /// Splits the bit-slice into a reference to its first bit, and the rest of
+ /// the bit-slice. Returns `None` when empty.
+ ///
+ /// ## Original
+ ///
+ /// [`slice::split_first`](https://doc.rust-lang.org/std/primitive.slice.html#method.split_first)
+ ///
+ /// ## API Differences
+ ///
+ /// `bitvec` uses a custom structure for both read-only and mutable
+ /// references to `bool`.
+ ///
+ /// ## Examples
+ ///
+ /// ```rust
+ /// use bitvec::prelude::*;
+ ///
+ /// let bits = bits![1, 0, 0];
+ /// let (first, rest) = bits.split_first().unwrap();
+ /// assert_eq!(first, &true);
+ /// assert_eq!(rest, bits![0; 2]);
+ /// ```
+ #[inline]
+ pub fn split_first(&self) -> Option<(BitRef<Const, T, O>, &Self)> {
+ match self.len() {
+ 0 => None,
+ _ => unsafe {
+ let (head, rest) = self.split_at_unchecked(1);
+ Some((head.get_unchecked(0), rest))
+ },
+ }
+ }
+
+ /// Splits the bit-slice into mutable references of its first bit, and the
+ /// rest of the bit-slice. Returns `None` when empty.
+ ///
+ /// ## Original
+ ///
+ /// [`slice::split_first_mut`](https://doc.rust-lang.org/std/primitive.slice.html#method.split_first_mut)
+ ///
+ /// ## API Differences
+ ///
+ /// `bitvec` uses a custom structure for both read-only and mutable
+ /// references to `bool`. This must be bound as `mut` in order to write
+ /// through it.
+ ///
+ /// ## Examples
+ ///
+ /// ```rust
+ /// use bitvec::prelude::*;
+ ///
+ /// let bits = bits![mut 0; 3];
+ /// if let Some((mut first, rest)) = bits.split_first_mut() {
+ /// *first = true;
+ /// assert_eq!(rest, bits![0; 2]);
+ /// }
+ /// assert_eq!(bits, bits![1, 0, 0]);
+ /// ```
+ #[inline]
+ pub fn split_first_mut(
+ &mut self,
+ ) -> Option<(BitRef<Mut, T::Alias, O>, &mut BitSlice<T::Alias, O>)> {
+ match self.len() {
+ 0 => None,
+ _ => unsafe {
+ let (head, rest) = self.split_at_unchecked_mut(1);
+ Some((head.get_unchecked_mut(0), rest))
+ },
+ }
+ }
+
+ /// Splits the bit-slice into a reference to its last bit, and the rest of
+ /// the bit-slice. Returns `None` when empty.
+ ///
+ /// ## Original
+ ///
+ /// [`slice::split_last`](https://doc.rust-lang.org/std/primitive.slice.html#method.split_last)
+ ///
+ /// ## API Differences
+ ///
+ /// `bitvec` uses a custom structure for both read-only and mutable
+ /// references to `bool`.
+ ///
+ /// ## Examples
+ ///
+ /// ```rust
+ /// use bitvec::prelude::*;
+ ///
+ /// let bits = bits![0, 0, 1];
+ /// let (last, rest) = bits.split_last().unwrap();
+ /// assert_eq!(last, &true);
+ /// assert_eq!(rest, bits![0; 2]);
+ /// ```
+ #[inline]
+ pub fn split_last(&self) -> Option<(BitRef<Const, T, O>, &Self)> {
+ match self.len() {
+ 0 => None,
+ n => unsafe {
+ let (rest, tail) = self.split_at_unchecked(n - 1);
+ Some((tail.get_unchecked(0), rest))
+ },
+ }
+ }
+
+ /// Splits the bit-slice into mutable references to its last bit, and the
+ /// rest of the bit-slice. Returns `None` when empty.
+ ///
+ /// ## Original
+ ///
+ /// [`slice::split_last_mut`](https://doc.rust-lang.org/std/primitive.slice.html#method.split_last_mut)
+ ///
+ /// ## API Differences
+ ///
+ /// `bitvec` uses a custom structure for both read-only and mutable
+ /// references to `bool`. This must be bound as `mut` in order to write
+ /// through it.
+ ///
+ /// ## Examples
+ ///
+ /// ```rust
+ /// use bitvec::prelude::*;
+ ///
+ /// let bits = bits![mut 0; 3];
+ /// if let Some((mut last, rest)) = bits.split_last_mut() {
+ /// *last = true;
+ /// assert_eq!(rest, bits![0; 2]);
+ /// }
+ /// assert_eq!(bits, bits![0, 0, 1]);
+ /// ```
+ #[inline]
+ pub fn split_last_mut(
+ &mut self,
+ ) -> Option<(BitRef<Mut, T::Alias, O>, &mut BitSlice<T::Alias, O>)> {
+ match self.len() {
+ 0 => None,
+ n => unsafe {
+ let (rest, tail) = self.split_at_unchecked_mut(n - 1);
+ Some((tail.get_unchecked_mut(0), rest))
+ },
+ }
+ }
+
+ /// Gets a reference to the last bit of the bit-slice, or `None` if it is
+ /// empty.
+ ///
+ /// ## Original
+ ///
+ /// [`slice::last`](https://doc.rust-lang.org/std/primitive.slice.html#method.last)
+ ///
+ /// ## API Differences
+ ///
+ /// `bitvec` uses a custom structure for both read-only and mutable
+ /// references to `bool`.
+ ///
+ /// ## Examples
+ ///
+ /// ```rust
+ /// use bitvec::prelude::*;
+ ///
+ /// let bits = bits![0, 0, 1];
+ /// assert_eq!(bits.last().as_deref(), Some(&true));
+ ///
+ /// assert!(bits![].last().is_none());
+ /// ```
+ #[inline]
+ pub fn last(&self) -> Option<BitRef<Const, T, O>> {
+ match self.len() {
+ 0 => None,
+ n => Some(unsafe { self.get_unchecked(n - 1) }),
+ }
+ }
+
+ /// Gets a mutable reference to the last bit of the bit-slice, or `None` if
+ /// it is empty.
+ ///
+ /// ## Original
+ ///
+ /// [`slice::last_mut`](https://doc.rust-lang.org/std/primitive.slice.html#method.last_mut)
+ ///
+ /// ## API Differences
+ ///
+ /// `bitvec` uses a custom structure for both read-only and mutable
+ /// references to `bool`. This must be bound as `mut` in order to write
+ /// through it.
+ ///
+ /// ## Examples
+ ///
+ /// ```rust
+ /// use bitvec::prelude::*;
+ ///
+ /// let bits = bits![mut 0; 3];
+ /// if let Some(mut last) = bits.last_mut() {
+ /// *last = true;
+ /// }
+ /// assert_eq!(bits, bits![0, 0, 1]);
+ ///
+ /// assert!(bits![mut].last_mut().is_none());
+ /// ```
+ #[inline]
+ pub fn last_mut(&mut self) -> Option<BitRef<Mut, T, O>> {
+ match self.len() {
+ 0 => None,
+ n => Some(unsafe { self.get_unchecked_mut(n - 1) }),
+ }
+ }
+
+ /// Gets a reference to a single bit or a subsection of the bit-slice,
+ /// depending on the type of `index`.
+ ///
+ /// - If given a `usize`, this produces a reference structure to the `bool`
+ /// at the position.
+ /// - If given any form of range, this produces a smaller bit-slice.
+ ///
+ /// This returns `None` if the `index` departs the bounds of `self`.
+ ///
+ /// ## Original
+ ///
+ /// [`slice::get`](https://doc.rust-lang.org/std/primitive.slice.html#method.get)
+ ///
+ /// ## API Differences
+ ///
+ /// `BitSliceIndex` uses discrete types for immutable and mutable
+ /// references, rather than a single referent type.
+ ///
+ /// ## Examples
+ ///
+ /// ```rust
+ /// use bitvec::prelude::*;
+ ///
+ /// let bits = bits![0, 1, 0];
+ /// assert_eq!(bits.get(1).as_deref(), Some(&true));
+ /// assert_eq!(bits.get(0 .. 2), Some(bits![0, 1]));
+ /// assert!(bits.get(3).is_none());
+ /// assert!(bits.get(0 .. 4).is_none());
+ /// ```
+ #[inline]
+ pub fn get<'a, I>(&'a self, index: I) -> Option<I::Immut>
+ where I: BitSliceIndex<'a, T, O> {
+ index.get(self)
+ }
+
+ /// Gets a mutable reference to a single bit or a subsection of the
+ /// bit-slice, depending on the type of `index`.
+ ///
+ /// - If given a `usize`, this produces a reference structure to the `bool`
+ /// at the position.
+ /// - If given any form of range, this produces a smaller bit-slice.
+ ///
+ /// This returns `None` if the `index` departs the bounds of `self`.
+ ///
+ /// ## Original
+ ///
+ /// [`slice::get_mut`](https://doc.rust-lang.org/std/primitive.slice.html#method.get_mut)
+ ///
+ /// ## API Differences
+ ///
+ /// `BitSliceIndex` uses discrete types for immutable and mutable
+ /// references, rather than a single referent type.
+ ///
+ /// ## Examples
+ ///
+ /// ```rust
+ /// use bitvec::prelude::*;
+ ///
+ /// let bits = bits![mut 0; 3];
+ ///
+ /// *bits.get_mut(0).unwrap() = true;
+ /// bits.get_mut(1 ..).unwrap().fill(true);
+ /// assert_eq!(bits, bits![1; 3]);
+ /// ```
+ #[inline]
+ pub fn get_mut<'a, I>(&'a mut self, index: I) -> Option<I::Mut>
+ where I: BitSliceIndex<'a, T, O> {
+ index.get_mut(self)
+ }
+
+ /// Gets a reference to a single bit or to a subsection of the bit-slice,
+ /// without bounds checking.
+ ///
+ /// This has the same arguments and behavior as [`.get()`], except that it
+ /// does not check that `index` is in bounds.
+ ///
+ /// ## Original
+ ///
+ /// [`slice::get_unchecked`](https://doc.rust-lang.org/std/primitive.slice.html#method.get_unchecked)
+ ///
+ /// ## Safety
+ ///
+ /// You must ensure that `index` is within bounds (within the range `0 ..
+ /// self.len()`), or this method will introduce memory safety and/or
+ /// undefined behavior.
+ ///
+ /// It is library-level undefined behavior to index beyond the length of any
+ /// bit-slice, even if you **know** that the offset remains within an
+ /// allocation as measured by Rust or LLVM.
+ ///
+ /// ## Examples
+ ///
+ /// ```rust
+ /// use bitvec::prelude::*;
+ ///
+ /// let data = 0b0001_0010u8;
+ /// let bits = &data.view_bits::<Lsb0>()[.. 3];
+ ///
+ /// unsafe {
+ /// assert!(bits.get_unchecked(1));
+ /// assert!(bits.get_unchecked(4));
+ /// }
+ /// ```
+ ///
+ /// [`.get()`]: Self::get
+ #[inline]
+ pub unsafe fn get_unchecked<'a, I>(&'a self, index: I) -> I::Immut
+ where I: BitSliceIndex<'a, T, O> {
+ index.get_unchecked(self)
+ }
+
+ /// Gets a mutable reference to a single bit or a subsection of the
+ /// bit-slice, depending on the type of `index`.
+ ///
+ /// This has the same arguments and behavior as [`.get_mut()`], except that
+ /// it does not check that `index` is in bounds.
+ ///
+ /// ## Original
+ ///
+ /// [`slice::get_unchecked_mut`](https://doc.rust-lang.org/std/primitive.slice.html#method.get_unchecked_mut)
+ ///
+ /// ## Safety
+ ///
+ /// You must ensure that `index` is within bounds (within the range `0 ..
+ /// self.len()`), or this method will introduce memory safety and/or
+ /// undefined behavior.
+ ///
+ /// It is library-level undefined behavior to index beyond the length of any
+ /// bit-slice, even if you **know** that the offset remains within an
+ /// allocation as measured by Rust or LLVM.
+ ///
+ /// ## Examples
+ ///
+ /// ```rust
+ /// use bitvec::prelude::*;
+ ///
+ /// let mut data = 0u8;
+ /// let bits = &mut data.view_bits_mut::<Lsb0>()[.. 3];
+ ///
+ /// unsafe {
+ /// bits.get_unchecked_mut(1).commit(true);
+ /// bits.get_unchecked_mut(4 .. 6).fill(true);
+ /// }
+ /// assert_eq!(data, 0b0011_0010);
+ /// ```
+ ///
+ /// [`.get_mut()`]: Self::get_mut
+ #[inline]
+ pub unsafe fn get_unchecked_mut<'a, I>(&'a mut self, index: I) -> I::Mut
+ where I: BitSliceIndex<'a, T, O> {
+ index.get_unchecked_mut(self)
+ }
+
+ #[inline]
+ #[cfg(not(tarpaulin_include))]
+ #[deprecated = "use `.as_bitptr()` instead"]
+ #[allow(missing_docs, clippy::missing_docs_in_private_items)]
+ pub fn as_ptr(&self) -> BitPtr<Const, T, O> {
+ self.as_bitptr()
+ }
+
+ #[inline]
+ #[cfg(not(tarpaulin_include))]
+ #[deprecated = "use `.as_mut_bitptr()` instead"]
+ #[allow(missing_docs, clippy::missing_docs_in_private_items)]
+ pub fn as_mut_ptr(&mut self) -> BitPtr<Mut, T, O> {
+ self.as_mut_bitptr()
+ }
+
+ /// Produces a range of bit-pointers to each bit in the bit-slice.
+ ///
+ /// This is a standard-library range, which has no real functionality for
+ /// pointer types. You should prefer [`.as_bitptr_range()`] instead, as it
+ /// produces a custom structure that provides expected ranging
+ /// functionality.
+ ///
+ /// ## Original
+ ///
+ /// [`slice::as_ptr_range`](https://doc.rust-lang.org/std/primitive.slice.html#method.as_ptr_range)
+ ///
+ /// [`.as_bitptr_range()`]: Self::as_bitptr_range
+ #[inline]
+ #[cfg(not(tarpaulin_include))]
+ pub fn as_ptr_range(&self) -> Range<BitPtr<Const, T, O>> {
+ self.as_bitptr_range().into_range()
+ }
+
+ /// Produces a range of mutable bit-pointers to each bit in the bit-slice.
+ ///
+ /// This is a standard-library range, which has no real functionality for
+ /// pointer types. You should prefer [`.as_mut_bitptr_range()`] instead, as
+ /// it produces a custom structure that provides expected ranging
+ /// functionality.
+ ///
+ /// ## Original
+ ///
+ /// [`slice::as_mut_ptr_range`](https://doc.rust-lang.org/std/primitive.slice.html#method.as_mut_ptr_range)
+ ///
+ /// [`.as_mut_bitptr_range()`]: Self::as_mut_bitptr_range
+ #[inline]
+ #[cfg(not(tarpaulin_include))]
+ pub fn as_mut_ptr_range(&mut self) -> Range<BitPtr<Mut, T, O>> {
+ self.as_mut_bitptr_range().into_range()
+ }
+
+ /// Exchanges the bit values at two indices.
+ ///
+ /// ## Original
+ ///
+ /// [`slice::swap`](https://doc.rust-lang.org/std/primitive.slice.html#method.swap)
+ ///
+ /// ## Panics
+ ///
+ /// This panics if either `a` or `b` are out of bounds.
+ ///
+ /// ## Examples
+ ///
+ /// ```rust
+ /// use bitvec::prelude::*;
+ ///
+ /// let bits = bits![mut 0, 1];
+ /// bits.swap(0, 1);
+ /// assert_eq!(bits, bits![1, 0]);
+ /// ```
+ #[inline]
+ pub fn swap(&mut self, a: usize, b: usize) {
+ let bounds = 0 .. self.len();
+ self.assert_in_bounds(a, bounds.clone());
+ self.assert_in_bounds(b, bounds);
+ unsafe {
+ self.swap_unchecked(a, b);
+ }
+ }
+
+ /// Reverses the order of bits in a bit-slice.
+ ///
+ /// ## Original
+ ///
+ /// [`slice::reverse`](https://doc.rust-lang.org/std/primitive.slice.html#method.reverse)
+ ///
+ /// ## Examples
+ ///
+ /// ```rust
+ /// use bitvec::prelude::*;
+ ///
+ /// let bits = bits![mut 0, 0, 1, 0, 1, 1, 0, 0, 1];
+ /// bits.reverse();
+ /// assert_eq!(bits, bits![1, 0, 0, 1, 1, 0, 1, 0, 0]);
+ /// ```
+ #[inline]
+ pub fn reverse(&mut self) {
+ let mut iter = self.as_mut_bitptr_range();
+ while let (Some(a), Some(b)) = (iter.next(), iter.next_back()) {
+ unsafe {
+ crate::ptr::swap(a, b);
+ }
+ }
+ }
+
+ /// Produces an iterator over each bit in the bit-slice.
+ ///
+ /// ## Original
+ ///
+ /// [`slice::iter`](https://doc.rust-lang.org/std/primitive.slice.html#method.iter)
+ ///
+ /// ## API Differences
+ ///
+ /// This iterator yields proxy-reference structures, not `&bool`. It can be
+ /// adapted to yield `&bool` with the [`.by_refs()`] method, or `bool` with
+ /// [`.by_vals()`].
+ ///
+ /// This iterator, and its adapters, are fast. Do not try to be more clever
+ /// than them by abusing `.as_bitptr_range()`.
+ ///
+ /// ## Examples
+ ///
+ /// ```rust
+ /// use bitvec::prelude::*;
+ ///
+ /// let bits = bits![0, 1, 0, 1];
+ /// let mut iter = bits.iter();
+ ///
+ /// assert!(!iter.next().unwrap());
+ /// assert!( iter.next().unwrap());
+ /// assert!( iter.next_back().unwrap());
+ /// assert!(!iter.next_back().unwrap());
+ /// assert!( iter.next().is_none());
+ /// ```
+ ///
+ /// [`.by_refs()`]: crate::slice::Iter::by_refs
+ /// [`.by_vals()`]: crate::slice::Iter::by_vals
+ #[inline]
+ pub fn iter(&self) -> Iter<T, O> {
+ Iter::new(self)
+ }
+
+ /// Produces a mutable iterator over each bit in the bit-slice.
+ ///
+ /// ## Original
+ ///
+ /// [`slice::iter_mut`](https://doc.rust-lang.org/std/primitive.slice.html#method.iter_mut)
+ ///
+ /// ## API Differences
+ ///
+ /// This iterator yields proxy-reference structures, not `&mut bool`. In
+ /// addition, it marks each proxy as alias-tainted.
+ ///
+ /// If you are using this in an ordinary loop and **not** keeping multiple
+ /// yielded proxy-references alive at the same scope, you may use the
+ /// [`.remove_alias()`] adapter to undo the alias marking.
+ ///
+ /// This iterator is fast. Do not try to be more clever than it by abusing
+ /// `.as_mut_bitptr_range()`.
+ ///
+ /// ## Examples
+ ///
+ /// ```rust
+ /// use bitvec::prelude::*;
+ ///
+ /// let bits = bits![mut 0; 4];
+ /// let mut iter = bits.iter_mut();
+ ///
+ /// iter.nth(1).unwrap().commit(true); // index 1
+ /// iter.next_back().unwrap().commit(true); // index 3
+ ///
+ /// assert!(iter.next().is_some()); // index 2
+ /// assert!(iter.next().is_none()); // complete
+ /// assert_eq!(bits, bits![0, 1, 0, 1]);
+ /// ```
+ ///
+ /// [`.remove_alias()`]: crate::slice::IterMut::remove_alias
+ #[inline]
+ pub fn iter_mut(&mut self) -> IterMut<T, O> {
+ IterMut::new(self)
+ }
+
+ /// Iterates over consecutive windowing subslices in a bit-slice.
+ ///
+ /// Windows are overlapping views of the bit-slice. Each window advances one
+ /// bit from the previous, so in a bit-slice `[A, B, C, D, E]`, calling
+ /// `.windows(3)` will yield `[A, B, C]`, `[B, C, D]`, and `[C, D, E]`.
+ ///
+ /// ## Original
+ ///
+ /// [`slice::windows`](https://doc.rust-lang.org/std/primitive.slice.html#method.windows)
+ ///
+ /// ## Panics
+ ///
+ /// This panics if `size` is `0`.
+ ///
+ /// ## Examples
+ ///
+ /// ```rust
+ /// use bitvec::prelude::*;
+ ///
+ /// let bits = bits![0, 1, 0, 0, 1];
+ /// let mut iter = bits.windows(3);
+ ///
+ /// assert_eq!(iter.next(), Some(bits![0, 1, 0]));
+ /// assert_eq!(iter.next(), Some(bits![1, 0, 0]));
+ /// assert_eq!(iter.next(), Some(bits![0, 0, 1]));
+ /// assert!(iter.next().is_none());
+ /// ```
+ #[inline]
+ pub fn windows(&self, size: usize) -> Windows<T, O> {
+ Windows::new(self, size)
+ }
+
+ /// Iterates over non-overlapping subslices of a bit-slice.
+ ///
+ /// Unlike `.windows()`, the subslices this yields do not overlap with each
+ /// other. If `self.len()` is not an even multiple of `chunk_size`, then the
+ /// last chunk yielded will be shorter.
+ ///
+ /// ## Original
+ ///
+ /// [`slice::chunks`](https://doc.rust-lang.org/std/primitive.slice.html#method.chunks)
+ ///
+ /// ## Sibling Methods
+ ///
+ /// - [`.chunks_mut()`] has the same division logic, but each yielded
+ /// bit-slice is mutable.
+ /// - [`.chunks_exact()`] does not yield the final chunk if it is shorter
+ /// than `chunk_size`.
+ /// - [`.rchunks()`] iterates from the back of the bit-slice to the front,
+ /// with the final, possibly-shorter, segment at the front edge.
+ ///
+ /// ## Panics
+ ///
+ /// This panics if `chunk_size` is `0`.
+ ///
+ /// ## Examples
+ ///
+ /// ```rust
+ /// use bitvec::prelude::*;
+ ///
+ /// let bits = bits![0, 1, 0, 0, 1];
+ /// let mut iter = bits.chunks(2);
+ ///
+ /// assert_eq!(iter.next(), Some(bits![0, 1]));
+ /// assert_eq!(iter.next(), Some(bits![0, 0]));
+ /// assert_eq!(iter.next(), Some(bits![1]));
+ /// assert!(iter.next().is_none());
+ /// ```
+ ///
+ /// [`.chunks_exact()`]: Self::chunks_exact
+ /// [`.chunks_mut()`]: Self::chunks_mut
+ /// [`.rchunks()`]: Self::rchunks
+ #[inline]
+ pub fn chunks(&self, chunk_size: usize) -> Chunks<T, O> {
+ Chunks::new(self, chunk_size)
+ }
+
+ /// Iterates over non-overlapping mutable subslices of a bit-slice.
+ ///
+ /// Iterators do not require that each yielded item is destroyed before the
+ /// next is produced. This means that each bit-slice yielded must be marked
+ /// as aliased. If you are using this in a loop that does not collect
+ /// multiple yielded subslices for the same scope, then you can remove the
+ /// alias marking by calling the (`unsafe`) method [`.remove_alias()`] on
+ /// the iterator.
+ ///
+ /// ## Original
+ ///
+ /// [`slice::chunks_mut`](https://doc.rust-lang.org/std/primitive.slice.html#method.chunks_mut)
+ ///
+ /// ## Sibling Methods
+ ///
+ /// - [`.chunks()`] has the same division logic, but each yielded bit-slice
+ /// is immutable.
+ /// - [`.chunks_exact_mut()`] does not yield the final chunk if it is
+ /// shorter than `chunk_size`.
+ /// - [`.rchunks_mut()`] iterates from the back of the bit-slice to the
+ /// front, with the final, possibly-shorter, segment at the front edge.
+ ///
+ /// ## Panics
+ ///
+ /// This panics if `chunk_size` is `0`.
+ ///
+ /// ## Examples
+ ///
+ /// ```rust
+ /// use bitvec::prelude::*;
+ ///
+ /// let bits = bits![mut u8, Msb0; 0; 5];
+ ///
+ /// for (idx, chunk) in unsafe {
+ /// bits.chunks_mut(2).remove_alias()
+ /// }.enumerate() {
+ /// chunk.store(idx + 1);
+ /// }
+ /// assert_eq!(bits, bits![0, 1, 1, 0, 1]);
+ /// // ^^^^ ^^^^ ^
+ /// ```
+ ///
+ /// [`.chunks()`]: Self::chunks
+ /// [`.chunks_exact_mut()`]: Self::chunks_exact_mut
+ /// [`.rchunks_mut()`]: Self::rchunks_mut
+ /// [`.remove_alias()`]: crate::slice::ChunksMut::remove_alias
+ #[inline]
+ pub fn chunks_mut(&mut self, chunk_size: usize) -> ChunksMut<T, O> {
+ ChunksMut::new(self, chunk_size)
+ }
+
+ /// Iterates over non-overlapping subslices of a bit-slice.
+ ///
+ /// If `self.len()` is not an even multiple of `chunk_size`, then the last
+ /// few bits are not yielded by the iterator at all. They can be accessed
+ /// with the [`.remainder()`] method if the iterator is bound to a name.
+ ///
+ /// ## Original
+ ///
+ /// [`slice::chunks_exact`](https://doc.rust-lang.org/std/primitive.slice.html#method.chunks_exact)
+ ///
+ /// ## Sibling Methods
+ ///
+ /// - [`.chunks()`] yields any leftover bits at the end as a shorter chunk
+ /// during iteration.
+ /// - [`.chunks_exact_mut()`] has the same division logic, but each yielded
+ /// bit-slice is mutable.
+ /// - [`.rchunks_exact()`] iterates from the back of the bit-slice to the
+ /// front, with the unyielded remainder segment at the front edge.
+ ///
+ /// ## Panics
+ ///
+ /// This panics if `chunk_size` is `0`.
+ ///
+ /// ## Examples
+ ///
+ /// ```rust
+ /// use bitvec::prelude::*;
+ ///
+ /// let bits = bits![0, 1, 0, 0, 1];
+ /// let mut iter = bits.chunks_exact(2);
+ ///
+ /// assert_eq!(iter.next(), Some(bits![0, 1]));
+ /// assert_eq!(iter.next(), Some(bits![0, 0]));
+ /// assert!(iter.next().is_none());
+ /// assert_eq!(iter.remainder(), bits![1]);
+ /// ```
+ ///
+ /// [`.chunks()`]: Self::chunks
+ /// [`.chunks_exact_mut()`]: Self::chunks_exact_mut
+ /// [`.rchunks_exact()`]: Self::rchunks_exact
+ /// [`.remainder()`]: crate::slice::ChunksExact::remainder
+ #[inline]
+ pub fn chunks_exact(&self, chunk_size: usize) -> ChunksExact<T, O> {
+ ChunksExact::new(self, chunk_size)
+ }
+
+ /// Iterates over non-overlapping mutable subslices of a bit-slice.
+ ///
+ /// If `self.len()` is not an even multiple of `chunk_size`, then the last
+ /// few bits are not yielded by the iterator at all. They can be accessed
+ /// with the [`.into_remainder()`] method if the iterator is bound to a
+ /// name.
+ ///
+ /// Iterators do not require that each yielded item is destroyed before the
+ /// next is produced. This means that each bit-slice yielded must be marked
+ /// as aliased. If you are using this in a loop that does not collect
+ /// multiple yielded subslices for the same scope, then you can remove the
+ /// alias marking by calling the (`unsafe`) method [`.remove_alias()`] on
+ /// the iterator.
+ ///
+ /// ## Original
+ ///
+ /// [`slice::chunks_exact_mut`](https://doc.rust-lang.org/std/primitive.slice.html#method.chunks_exact_mut)
+ ///
+ /// ## Sibling Methods
+ ///
+ /// - [`.chunks_mut()`] yields any leftover bits at the end as a shorter
+ /// chunk during iteration.
+ /// - [`.chunks_exact()`] has the same division logic, but each yielded
+ /// bit-slice is immutable.
+ /// - [`.rchunks_exact_mut()`] iterates from the back of the bit-slice
+ /// forwards, with the unyielded remainder segment at the front edge.
+ ///
+ /// ## Panics
+ ///
+ /// This panics if `chunk_size` is `0`.
+ ///
+ /// ## Examples
+ ///
+ /// ```rust
+ /// use bitvec::prelude::*;
+ ///
+ /// let bits = bits![mut u8, Msb0; 0; 5];
+ /// let mut iter = bits.chunks_exact_mut(2);
+ ///
+ /// for (idx, chunk) in iter.by_ref().enumerate() {
+ /// chunk.store(idx + 1);
+ /// }
+ /// iter.into_remainder().store(1u8);
+ ///
+ /// assert_eq!(bits, bits![0, 1, 1, 0, 1]);
+ /// // remainder ^
+ /// ```
+ ///
+ /// [`.chunks_exact()`]: Self::chunks_exact
+ /// [`.chunks_mut()`]: Self::chunks_mut
+ /// [`.into_remainder()`]: crate::slice::ChunksExactMut::into_remainder
+ /// [`.rchunks_exact_mut()`]: Self::rchunks_exact_mut
+ /// [`.remove_alias()`]: crate::slice::ChunksExactMut::remove_alias
+ #[inline]
+ pub fn chunks_exact_mut(
+ &mut self,
+ chunk_size: usize,
+ ) -> ChunksExactMut<T, O> {
+ ChunksExactMut::new(self, chunk_size)
+ }
+
+ /// Iterates over non-overlapping subslices of a bit-slice, from the back
+ /// edge.
+ ///
+ /// Unlike `.chunks()`, this aligns its chunks to the back edge of `self`.
+ /// If `self.len()` is not an even multiple of `chunk_size`, then the
+ /// leftover partial chunk is `self[0 .. len % chunk_size]`.
+ ///
+ /// ## Original
+ ///
+ /// [`slice::rchunks`](https://doc.rust-lang.org/std/primitive.slice.html#method.rchunks)
+ ///
+ /// ## Sibling Methods
+ ///
+ /// - [`.rchunks_mut()`] has the same division logic, but each yielded
+ /// bit-slice is mutable.
+ /// - [`.rchunks_exact()`] does not yield the final chunk if it is shorter
+ /// than `chunk_size`.
+ /// - [`.chunks()`] iterates from the front of the bit-slice to the back,
+ /// with the final, possibly-shorter, segment at the back edge.
+ ///
+ /// ## Panics
+ ///
+ /// This panics if `chunk_size` is `0`.
+ ///
+ /// ## Examples
+ ///
+ /// ```rust
+ /// use bitvec::prelude::*;
+ ///
+ /// let bits = bits![0, 1, 0, 0, 1];
+ /// let mut iter = bits.rchunks(2);
+ ///
+ /// assert_eq!(iter.next(), Some(bits![0, 1]));
+ /// assert_eq!(iter.next(), Some(bits![1, 0]));
+ /// assert_eq!(iter.next(), Some(bits![0]));
+ /// assert!(iter.next().is_none());
+ /// ```
+ ///
+ /// [`.chunks()`]: Self::chunks
+ /// [`.rchunks_exact()`]: Self::rchunks_exact
+ /// [`.rchunks_mut()`]: Self::rchunks_mut
+ #[inline]
+ pub fn rchunks(&self, chunk_size: usize) -> RChunks<T, O> {
+ RChunks::new(self, chunk_size)
+ }
+
+ /// Iterates over non-overlapping mutable subslices of a bit-slice, from the
+ /// back edge.
+ ///
+ /// Unlike `.chunks_mut()`, this aligns its chunks to the back edge of
+ /// `self`. If `self.len()` is not an even multiple of `chunk_size`, then
+ /// the leftover partial chunk is `self[0 .. len % chunk_size]`.
+ ///
+ /// Iterators do not require that each yielded item is destroyed before the
+ /// next is produced. This means that each bit-slice yielded must be marked
+ /// as aliased. If you are using this in a loop that does not collect
+ /// multiple yielded values for the same scope, then you can remove the
+ /// alias marking by calling the (`unsafe`) method [`.remove_alias()`] on
+ /// the iterator.
+ ///
+ /// ## Original
+ ///
+ /// [`slice::rchunks_mut`](https://doc.rust-lang.org/std/primitive.slice.html#method.rchunks_mut)
+ ///
+ /// ## Sibling Methods
+ ///
+ /// - [`.rchunks()`] has the same division logic, but each yielded bit-slice
+ /// is immutable.
+ /// - [`.rchunks_exact_mut()`] does not yield the final chunk if it is
+ /// shorter than `chunk_size`.
+ /// - [`.chunks_mut()`] iterates from the front of the bit-slice to the
+ /// back, with the final, possibly-shorter, segment at the back edge.
+ ///
+ /// ## Examples
+ ///
+ /// ```rust
+ /// use bitvec::prelude::*;
+ ///
+ /// let bits = bits![mut u8, Msb0; 0; 5];
+ /// for (idx, chunk) in unsafe {
+ /// bits.rchunks_mut(2).remove_alias()
+ /// }.enumerate() {
+ /// chunk.store(idx + 1);
+ /// }
+ /// assert_eq!(bits, bits![1, 1, 0, 0, 1]);
+ /// // remainder ^ ^^^^ ^^^^
+ /// ```
+ ///
+ /// [`.chunks_mut()`]: Self::chunks_mut
+ /// [`.rchunks()`]: Self::rchunks
+ /// [`.rchunks_exact_mut()`]: Self::rchunks_exact_mut
+ /// [`.remove_alias()`]: crate::slice::RChunksMut::remove_alias
+ #[inline]
+ pub fn rchunks_mut(&mut self, chunk_size: usize) -> RChunksMut<T, O> {
+ RChunksMut::new(self, chunk_size)
+ }
+
+ /// Iterates over non-overlapping subslices of a bit-slice, from the back
+ /// edge.
+ ///
+ /// If `self.len()` is not an even multiple of `chunk_size`, then the first
+ /// few bits are not yielded by the iterator at all. They can be accessed
+ /// with the [`.remainder()`] method if the iterator is bound to a name.
+ ///
+ /// ## Original
+ ///
+ /// [`slice::rchunks_exact`](https://doc.rust-lang.org/std/primitive.slice.html#method.rchunks_exact)
+ ///
+ /// ## Sibling Methods
+ ///
+ /// - [`.rchunks()`] yields any leftover bits at the front as a shorter
+ /// chunk during iteration.
+ /// - [`.rchunks_exact_mut()`] has the same division logic, but each yielded
+ /// bit-slice is mutable.
+ /// - [`.chunks_exact()`] iterates from the front of the bit-slice to the
+ /// back, with the unyielded remainder segment at the back edge.
+ ///
+ /// ## Panics
+ ///
+ /// This panics if `chunk_size` is `0`.
+ ///
+ /// ## Examples
+ ///
+ /// ```rust
+ /// use bitvec::prelude::*;
+ ///
+ /// let bits = bits![0, 1, 0, 0, 1];
+ /// let mut iter = bits.rchunks_exact(2);
+ ///
+ /// assert_eq!(iter.next(), Some(bits![0, 1]));
+ /// assert_eq!(iter.next(), Some(bits![1, 0]));
+ /// assert!(iter.next().is_none());
+ /// assert_eq!(iter.remainder(), bits![0]);
+ /// ```
+ ///
+ /// [`.chunks_exact()`]: Self::chunks_exact
+ /// [`.rchunks()`]: Self::rchunks
+ /// [`.rchunks_exact_mut()`]: Self::rchunks_exact_mut
+ /// [`.remainder()`]: crate::slice::RChunksExact::remainder
+ #[inline]
+ pub fn rchunks_exact(&self, chunk_size: usize) -> RChunksExact<T, O> {
+ RChunksExact::new(self, chunk_size)
+ }
+
+ /// Iterates over non-overlapping mutable subslices of a bit-slice, from the
+ /// back edge.
+ ///
+ /// If `self.len()` is not an even multiple of `chunk_size`, then the first
+ /// few bits are not yielded by the iterator at all. They can be accessed
+ /// with the [`.into_remainder()`] method if the iterator is bound to a
+ /// name.
+ ///
+ /// Iterators do not require that each yielded item is destroyed before the
+ /// next is produced. This means that each bit-slice yielded must be marked
+ /// as aliased. If you are using this in a loop that does not collect
+ /// multiple yielded subslices for the same scope, then you can remove the
+ /// alias marking by calling the (`unsafe`) method [`.remove_alias()`] on
+ /// the iterator.
+ ///
+ /// ## Sibling Methods
+ ///
+ /// - [`.rchunks_mut()`] yields any leftover bits at the front as a shorter
+ /// chunk during iteration.
+ /// - [`.rchunks_exact()`] has the same division logic, but each yielded
+ /// bit-slice is immutable.
+ /// - [`.chunks_exact_mut()`] iterates from the front of the bit-slice
+ /// backwards, with the unyielded remainder segment at the back edge.
+ ///
+ /// ## Panics
+ ///
+ /// This panics if `chunk_size` is `0`.
+ ///
+ /// ## Examples
+ ///
+ /// ```rust
+ /// use bitvec::prelude::*;
+ ///
+ /// let bits = bits![mut u8, Msb0; 0; 5];
+ /// let mut iter = bits.rchunks_exact_mut(2);
+ ///
+ /// for (idx, chunk) in iter.by_ref().enumerate() {
+ /// chunk.store(idx + 1);
+ /// }
+ /// iter.into_remainder().store(1u8);
+ ///
+ /// assert_eq!(bits, bits![1, 1, 0, 0, 1]);
+ /// // remainder ^
+ /// ```
+ ///
+ /// [`.chunks_exact_mut()`]: Self::chunks_exact_mut
+ /// [`.into_remainder()`]: crate::slice::RChunksExactMut::into_remainder
+ /// [`.rchunks_exact()`]: Self::rchunks_exact
+ /// [`.rchunks_mut()`]: Self::rchunks_mut
+ /// [`.remove_alias()`]: crate::slice::RChunksExactMut::remove_alias
+ #[inline]
+ pub fn rchunks_exact_mut(
+ &mut self,
+ chunk_size: usize,
+ ) -> RChunksExactMut<T, O> {
+ RChunksExactMut::new(self, chunk_size)
+ }
+
+ /// Splits a bit-slice in two parts at an index.
+ ///
+ /// The returned bit-slices are `self[.. mid]` and `self[mid ..]`. `mid` is
+ /// included in the right bit-slice, not the left.
+ ///
+ /// If `mid` is `0` then the left bit-slice is empty; if it is `self.len()`
+ /// then the right bit-slice is empty.
+ ///
+ /// This method guarantees that even when either partition is empty, the
+ /// encoded bit-pointer values of the bit-slice references is `&self[0]` and
+ /// `&self[mid]`.
+ ///
+ /// ## Original
+ ///
+ /// [`slice::split_at`](https://doc.rust-lang.org/std/primitive.slice.html#method.split_at)
+ ///
+ /// ## Panics
+ ///
+ /// This panics if `mid` is greater than `self.len()`. It is allowed to be
+ /// equal to the length, in which case the right bit-slice is simply empty.
+ ///
+ /// ## Examples
+ ///
+ /// ```rust
+ /// use bitvec::prelude::*;
+ ///
+ /// let bits = bits![0, 0, 0, 1, 1, 1];
+ /// let base = bits.as_bitptr();
+ ///
+ /// let (a, b) = bits.split_at(0);
+ /// assert_eq!(unsafe { a.as_bitptr().offset_from(base) }, 0);
+ /// assert_eq!(unsafe { b.as_bitptr().offset_from(base) }, 0);
+ ///
+ /// let (a, b) = bits.split_at(6);
+ /// assert_eq!(unsafe { b.as_bitptr().offset_from(base) }, 6);
+ ///
+ /// let (a, b) = bits.split_at(3);
+ /// assert_eq!(a, bits![0; 3]);
+ /// assert_eq!(b, bits![1; 3]);
+ /// ```
+ #[inline]
+ pub fn split_at(&self, mid: usize) -> (&Self, &Self) {
+ self.assert_in_bounds(mid, 0 ..= self.len());
+ unsafe { self.split_at_unchecked(mid) }
+ }
+
+ /// Splits a mutable bit-slice in two parts at an index.
+ ///
+ /// The returned bit-slices are `self[.. mid]` and `self[mid ..]`. `mid` is
+ /// included in the right bit-slice, not the left.
+ ///
+ /// If `mid` is `0` then the left bit-slice is empty; if it is `self.len()`
+ /// then the right bit-slice is empty.
+ ///
+ /// This method guarantees that even when either partition is empty, the
+ /// encoded bit-pointer values of the bit-slice references is `&self[0]` and
+ /// `&self[mid]`.
+ ///
+ /// ## Original
+ ///
+ /// [`slice::split_at_mut`](https://doc.rust-lang.org/std/primitive.slice.html#method.split_at_mut)
+ ///
+ /// ## API Differences
+ ///
+ /// The end bits of the left half and the start bits of the right half might
+ /// be stored in the same memory element. In order to avoid breaking
+ /// `bitvec`’s memory-safety guarantees, both bit-slices are marked as
+ /// `T::Alias`. This marking allows them to be used without interfering with
+ /// each other when they interact with memory.
+ ///
+ /// ## Panics
+ ///
+ /// This panics if `mid` is greater than `self.len()`. It is allowed to be
+ /// equal to the length, in which case the right bit-slice is simply empty.
+ ///
+ /// ## Examples
+ ///
+ /// ```rust
+ /// use bitvec::prelude::*;
+ ///
+ /// let bits = bits![mut u8, Msb0; 0; 6];
+ /// let base = bits.as_mut_bitptr();
+ ///
+ /// let (a, b) = bits.split_at_mut(0);
+ /// assert_eq!(unsafe { a.as_mut_bitptr().offset_from(base) }, 0);
+ /// assert_eq!(unsafe { b.as_mut_bitptr().offset_from(base) }, 0);
+ ///
+ /// let (a, b) = bits.split_at_mut(6);
+ /// assert_eq!(unsafe { b.as_mut_bitptr().offset_from(base) }, 6);
+ ///
+ /// let (a, b) = bits.split_at_mut(3);
+ /// a.store(3);
+ /// b.store(5);
+ ///
+ /// assert_eq!(bits, bits![0, 1, 1, 1, 0, 1]);
+ /// ```
+ #[inline]
+ pub fn split_at_mut(
+ &mut self,
+ mid: usize,
+ ) -> (&mut BitSlice<T::Alias, O>, &mut BitSlice<T::Alias, O>) {
+ self.assert_in_bounds(mid, 0 ..= self.len());
+ unsafe { self.split_at_unchecked_mut(mid) }
+ }
+
+ /// Iterates over subslices separated by bits that match a predicate. The
+ /// matched bit is *not* contained in the yielded bit-slices.
+ ///
+ /// ## Original
+ ///
+ /// [`slice::split`](https://doc.rust-lang.org/std/primitive.slice.html#method.split)
+ ///
+ /// ## API Differences
+ ///
+ /// The predicate function receives the index being tested as well as the
+ /// bit value at that index. This allows the predicate to have more than one
+ /// bit of information about the bit-slice being traversed.
+ ///
+ /// ## Sibling Methods
+ ///
+ /// - [`.split_mut()`] has the same splitting logic, but each yielded
+ /// bit-slice is mutable.
+ /// - [`.split_inclusive()`] includes the matched bit in the yielded
+ /// bit-slice.
+ /// - [`.rsplit()`] iterates from the back of the bit-slice instead of the
+ /// front.
+ /// - [`.splitn()`] times out after `n` yields.
+ ///
+ /// ## Examples
+ ///
+ /// ```rust
+ /// use bitvec::prelude::*;
+ ///
+ /// let bits = bits![0, 1, 1, 0];
+ /// // ^
+ /// let mut iter = bits.split(|pos, _bit| pos % 3 == 2);
+ ///
+ /// assert_eq!(iter.next().unwrap(), bits![0, 1]);
+ /// assert_eq!(iter.next().unwrap(), bits![0]);
+ /// assert!(iter.next().is_none());
+ /// ```
+ ///
+ /// If the first bit is matched, then an empty bit-slice will be the first
+ /// item yielded by the iterator. Similarly, if the last bit in the
+ /// bit-slice matches, then an empty bit-slice will be the last item
+ /// yielded.
+ ///
+ /// ```rust
+ /// use bitvec::prelude::*;
+ ///
+ /// let bits = bits![0, 0, 1];
+ /// // ^
+ /// let mut iter = bits.split(|_pos, bit| *bit);
+ ///
+ /// assert_eq!(iter.next().unwrap(), bits![0; 2]);
+ /// assert!(iter.next().unwrap().is_empty());
+ /// assert!(iter.next().is_none());
+ /// ```
+ ///
+ /// If two matched bits are directly adjacent, then an empty bit-slice will
+ /// be yielded between them:
+ ///
+ /// ```rust
+ /// use bitvec::prelude::*;
+ ///
+ /// let bits = bits![1, 0, 0, 1];
+ /// // ^ ^
+ /// let mut iter = bits.split(|_pos, bit| !*bit);
+ ///
+ /// assert_eq!(iter.next().unwrap(), bits![1]);
+ /// assert!(iter.next().unwrap().is_empty());
+ /// assert_eq!(iter.next().unwrap(), bits![1]);
+ /// assert!(iter.next().is_none());
+ /// ```
+ ///
+ /// [`.rsplit()`]: Self::rsplit
+ /// [`.splitn()`]: Self::splitn
+ /// [`.split_inclusive()`]: Self::split_inclusive
+ /// [`.split_mut()`]: Self::split_mut
+ #[inline]
+ pub fn split<F>(&self, pred: F) -> Split<T, O, F>
+ where F: FnMut(usize, &bool) -> bool {
+ Split::new(self, pred)
+ }
+
+ /// Iterates over mutable subslices separated by bits that match a
+ /// predicate. The matched bit is *not* contained in the yielded bit-slices.
+ ///
+ /// Iterators do not require that each yielded item is destroyed before the
+ /// next is produced. This means that each bit-slice yielded must be marked
+ /// as aliased. If you are using this in a loop that does not collect
+ /// multiple yielded subslices for the same scope, then you can remove the
+ /// alias marking by calling the (`unsafe`) method [`.remove_alias()`] on
+ /// the iterator.
+ ///
+ /// ## Original
+ ///
+ /// [`slice::split_mut`](https://doc.rust-lang.org/std/primitive.slice.html#method.split_mut)
+ ///
+ /// ## API Differences
+ ///
+ /// The predicate function receives the index being tested as well as the
+ /// bit value at that index. This allows the predicate to have more than one
+ /// bit of information about the bit-slice being traversed.
+ ///
+ /// ## Sibling Methods
+ ///
+ /// - [`.split()`] has the same splitting logic, but each yielded bit-slice
+ /// is immutable.
+ /// - [`.split_inclusive_mut()`] includes the matched bit in the yielded
+ /// bit-slice.
+ /// - [`.rsplit_mut()`] iterates from the back of the bit-slice instead of
+ /// the front.
+ /// - [`.splitn_mut()`] times out after `n` yields.
+ ///
+ /// ## Examples
+ ///
+ /// ```rust
+ /// use bitvec::prelude::*;
+ ///
+ /// let bits = bits![mut 0, 0, 1, 0, 1, 0];
+ /// // ^ ^
+ /// for group in bits.split_mut(|_pos, bit| *bit) {
+ /// group.set(0, true);
+ /// }
+ /// assert_eq!(bits, bits![1, 0, 1, 1, 1, 1]);
+ /// ```
+ ///
+ /// [`.remove_alias()`]: crate::slice::SplitMut::remove_alias
+ /// [`.rsplit_mut()`]: Self::rsplit_mut
+ /// [`.split()`]: Self::split
+ /// [`.split_inclusive_mut()`]: Self::split_inclusive_mut
+ /// [`.splitn_mut()`]: Self::splitn_mut
+ #[inline]
+ pub fn split_mut<F>(&mut self, pred: F) -> SplitMut<T, O, F>
+ where F: FnMut(usize, &bool) -> bool {
+ SplitMut::new(self.alias_mut(), pred)
+ }
+
+ /// Iterates over subslices separated by bits that match a predicate. Unlike
+ /// `.split()`, this *does* include the matching bit as the last bit in the
+ /// yielded bit-slice.
+ ///
+ /// ## Original
+ ///
+ /// [`slice::split_inclusive`](https://doc.rust-lang.org/std/primitive.slice.html#method.split_inclusive)
+ ///
+ /// ## API Differences
+ ///
+ /// The predicate function receives the index being tested as well as the
+ /// bit value at that index. This allows the predicate to have more than one
+ /// bit of information about the bit-slice being traversed.
+ ///
+ /// ## Sibling Methods
+ ///
+ /// - [`.split_inclusive_mut()`] has the same splitting logic, but each
+ /// yielded bit-slice is mutable.
+ /// - [`.split()`] does not include the matched bit in the yielded
+ /// bit-slice.
+ ///
+ /// ## Examples
+ ///
+ /// ```rust
+ /// use bitvec::prelude::*;
+ ///
+ /// let bits = bits![0, 0, 1, 0, 1];
+ /// // ^ ^
+ /// let mut iter = bits.split_inclusive(|_pos, bit| *bit);
+ ///
+ /// assert_eq!(iter.next().unwrap(), bits![0, 0, 1]);
+ /// assert_eq!(iter.next().unwrap(), bits![0, 1]);
+ /// assert!(iter.next().is_none());
+ /// ```
+ ///
+ /// [`.split()`]: Self::split
+ /// [`.split_inclusive_mut()`]: Self::split_inclusive_mut
+ #[inline]
+ pub fn split_inclusive<F>(&self, pred: F) -> SplitInclusive<T, O, F>
+ where F: FnMut(usize, &bool) -> bool {
+ SplitInclusive::new(self, pred)
+ }
+
+ /// Iterates over mutable subslices separated by bits that match a
+ /// predicate. Unlike `.split_mut()`, this *does* include the matching bit
+ /// as the last bit in the bit-slice.
+ ///
+ /// Iterators do not require that each yielded item is destroyed before the
+ /// next is produced. This means that each bit-slice yielded must be marked
+ /// as aliased. If you are using this in a loop that does not collect
+ /// multiple yielded subslices for the same scope, then you can remove the
+ /// alias marking by calling the (`unsafe`) method [`.remove_alias()`] on
+ /// the iterator.
+ ///
+ /// ## Original
+ ///
+ /// [`slice::split_inclusive_mut`](https://doc.rust-lang.org/std/primitive.slice.html#method.split_inclusive_mut)
+ ///
+ /// ## API Differences
+ ///
+ /// The predicate function receives the index being tested as well as the
+ /// bit value at that index. This allows the predicate to have more than one
+ /// bit of information about the bit-slice being traversed.
+ ///
+ /// ## Sibling Methods
+ ///
+ /// - [`.split_inclusive()`] has the same splitting logic, but each yielded
+ /// bit-slice is immutable.
+ /// - [`.split_mut()`] does not include the matched bit in the yielded
+ /// bit-slice.
+ ///
+ /// ## Examples
+ ///
+ /// ```rust
+ /// use bitvec::prelude::*;
+ ///
+ /// let bits = bits![mut 0, 0, 0, 0, 0];
+ /// // ^
+ /// for group in bits.split_inclusive_mut(|pos, _bit| pos % 3 == 2) {
+ /// group.set(0, true);
+ /// }
+ /// assert_eq!(bits, bits![1, 0, 0, 1, 0]);
+ /// ```
+ ///
+ /// [`.remove_alias()`]: crate::slice::SplitInclusiveMut::remove_alias
+ /// [`.split_inclusive()`]: Self::split_inclusive
+ /// [`.split_mut()`]: Self::split_mut
+ #[inline]
+ pub fn split_inclusive_mut<F>(
+ &mut self,
+ pred: F,
+ ) -> SplitInclusiveMut<T, O, F>
+ where
+ F: FnMut(usize, &bool) -> bool,
+ {
+ SplitInclusiveMut::new(self.alias_mut(), pred)
+ }
+
+ /// Iterates over subslices separated by bits that match a predicate, from
+ /// the back edge. The matched bit is *not* contained in the yielded
+ /// bit-slices.
+ ///
+ /// ## Original
+ ///
+ /// [`slice::rsplit`](https://doc.rust-lang.org/std/primitive.slice.html#method.rsplit)
+ ///
+ /// ## API Differences
+ ///
+ /// The predicate function receives the index being tested as well as the
+ /// bit value at that index. This allows the predicate to have more than one
+ /// bit of information about the bit-slice being traversed.
+ ///
+ /// ## Sibling Methods
+ ///
+ /// - [`.rsplit_mut()`] has the same splitting logic, but each yielded
+ /// bit-slice is mutable.
+ /// - [`.split()`] iterates from the front of the bit-slice instead of the
+ /// back.
+ /// - [`.rsplitn()`] times out after `n` yields.
+ ///
+ /// ## Examples
+ ///
+ /// ```rust
+ /// use bitvec::prelude::*;
+ ///
+ /// let bits = bits![0, 1, 1, 0];
+ /// // ^
+ /// let mut iter = bits.rsplit(|pos, _bit| pos % 3 == 2);
+ ///
+ /// assert_eq!(iter.next().unwrap(), bits![0]);
+ /// assert_eq!(iter.next().unwrap(), bits![0, 1]);
+ /// assert!(iter.next().is_none());
+ /// ```
+ ///
+ /// If the last bit is matched, then an empty bit-slice will be the first
+ /// item yielded by the iterator. Similarly, if the first bit in the
+ /// bit-slice matches, then an empty bit-slice will be the last item
+ /// yielded.
+ ///
+ /// ```rust
+ /// use bitvec::prelude::*;
+ ///
+ /// let bits = bits![0, 0, 1];
+ /// // ^
+ /// let mut iter = bits.rsplit(|_pos, bit| *bit);
+ ///
+ /// assert!(iter.next().unwrap().is_empty());
+ /// assert_eq!(iter.next().unwrap(), bits![0; 2]);
+ /// assert!(iter.next().is_none());
+ /// ```
+ ///
+ /// If two yielded bits are directly adjacent, then an empty bit-slice will
+ /// be yielded between them:
+ ///
+ /// ```rust
+ /// use bitvec::prelude::*;
+ ///
+ /// let bits = bits![1, 0, 0, 1];
+ /// // ^ ^
+ /// let mut iter = bits.split(|_pos, bit| !*bit);
+ ///
+ /// assert_eq!(iter.next().unwrap(), bits![1]);
+ /// assert!(iter.next().unwrap().is_empty());
+ /// assert_eq!(iter.next().unwrap(), bits![1]);
+ /// assert!(iter.next().is_none());
+ /// ```
+ ///
+ /// [`.rsplitn()`]: Self::rsplitn
+ /// [`.rsplit_mut()`]: Self::rsplit_mut
+ /// [`.split()`]: Self::split
+ #[inline]
+ pub fn rsplit<F>(&self, pred: F) -> RSplit<T, O, F>
+ where F: FnMut(usize, &bool) -> bool {
+ RSplit::new(self, pred)
+ }
+
+ /// Iterates over mutable subslices separated by bits that match a
+ /// predicate, from the back. The matched bit is *not* contained in the
+ /// yielded bit-slices.
+ ///
+ /// Iterators do not require that each yielded item is destroyed before the
+ /// next is produced. This means that each bit-slice yielded must be marked
+ /// as aliased. If you are using this in a loop that does not collect
+ /// multiple yielded subslices for the same scope, then you can remove the
+ /// alias marking by calling the (`unsafe`) method [`.remove_alias()`] on
+ /// the iterator.
+ ///
+ /// ## Original
+ ///
+ /// [`slice::rsplit_mut`](https://doc.rust-lang.org/std/primitive.slice.html#method.rsplit_mut)
+ ///
+ /// ## API Differences
+ ///
+ /// The predicate function receives the index being tested as well as the
+ /// bit value at that index. This allows the predicate to have more than one
+ /// bit of information about the bit-slice being traversed.
+ ///
+ /// ## Sibling Methods
+ ///
+ /// - [`.rsplit()`] has the same splitting logic, but each yielded bit-slice
+ /// is immutable.
+ /// - [`.split_mut()`] iterates from the front of the bit-slice to the back.
+ /// - [`.rsplitn_mut()`] iterates from the front of the bit-slice to the
+ /// back.
+ ///
+ /// ## Examples
+ ///
+ /// ```rust
+ /// use bitvec::prelude::*;
+ ///
+ /// let bits = bits![mut 0, 0, 1, 0, 1, 0];
+ /// // ^ ^
+ /// for group in bits.rsplit_mut(|_pos, bit| *bit) {
+ /// group.set(0, true);
+ /// }
+ /// assert_eq!(bits, bits![1, 0, 1, 1, 1, 1]);
+ /// ```
+ ///
+ /// [`.remove_alias()`]: crate::slice::RSplitMut::remove_alias
+ /// [`.rsplit()`]: Self::rsplit
+ /// [`.rsplitn_mut()`]: Self::rsplitn_mut
+ /// [`.split_mut()`]: Self::split_mut
+ #[inline]
+ pub fn rsplit_mut<F>(&mut self, pred: F) -> RSplitMut<T, O, F>
+ where F: FnMut(usize, &bool) -> bool {
+ RSplitMut::new(self.alias_mut(), pred)
+ }
+
+ /// Iterates over subslices separated by bits that match a predicate, giving
+ /// up after yielding `n` times. The `n`th yield contains the rest of the
+ /// bit-slice. As with `.split()`, the yielded bit-slices do not contain the
+ /// matched bit.
+ ///
+ /// ## Original
+ ///
+ /// [`slice::splitn`](https://doc.rust-lang.org/std/primitive.slice.html#method.splitn)
+ ///
+ /// ## API Differences
+ ///
+ /// The predicate function receives the index being tested as well as the
+ /// bit value at that index. This allows the predicate to have more than one
+ /// bit of information about the bit-slice being traversed.
+ ///
+ /// ## Sibling Methods
+ ///
+ /// - [`.splitn_mut()`] has the same splitting logic, but each yielded
+ /// bit-slice is mutable.
+ /// - [`.rsplitn()`] iterates from the back of the bit-slice instead of the
+ /// front.
+ /// - [`.split()`] has the same splitting logic, but never times out.
+ ///
+ /// ## Examples
+ ///
+ /// ```rust
+ /// use bitvec::prelude::*;
+ ///
+ /// let bits = bits![0, 0, 1, 0, 1, 0];
+ /// let mut iter = bits.splitn(2, |_pos, bit| *bit);
+ ///
+ /// assert_eq!(iter.next().unwrap(), bits![0, 0]);
+ /// assert_eq!(iter.next().unwrap(), bits![0, 1, 0]);
+ /// assert!(iter.next().is_none());
+ /// ```
+ ///
+ /// [`.rsplitn()`]: Self::rsplitn
+ /// [`.split()`]: Self::split
+ /// [`.splitn_mut()`]: Self::splitn_mut
+ #[inline]
+ pub fn splitn<F>(&self, n: usize, pred: F) -> SplitN<T, O, F>
+ where F: FnMut(usize, &bool) -> bool {
+ SplitN::new(self, pred, n)
+ }
+
+ /// Iterates over mutable subslices separated by bits that match a
+ /// predicate, giving up after yielding `n` times. The `n`th yield contains
+ /// the rest of the bit-slice. As with `.split_mut()`, the yielded
+ /// bit-slices do not contain the matched bit.
+ ///
+ /// Iterators do not require that each yielded item is destroyed before the
+ /// next is produced. This means that each bit-slice yielded must be marked
+ /// as aliased. If you are using this in a loop that does not collect
+ /// multiple yielded subslices for the same scope, then you can remove the
+ /// alias marking by calling the (`unsafe`) method [`.remove_alias()`] on
+ /// the iterator.
+ ///
+ /// ## Original
+ ///
+ /// [`slice::splitn_mut`](https://doc.rust-lang.org/std/primitive.slice.html#method.splitn_mut)
+ ///
+ /// ## API Differences
+ ///
+ /// The predicate function receives the index being tested as well as the
+ /// bit value at that index. This allows the predicate to have more than one
+ /// bit of information about the bit-slice being traversed.
+ ///
+ /// ## Sibling Methods
+ ///
+ /// - [`.splitn()`] has the same splitting logic, but each yielded bit-slice
+ /// is immutable.
+ /// - [`.rsplitn_mut()`] iterates from the back of the bit-slice instead of
+ /// the front.
+ /// - [`.split_mut()`] has the same splitting logic, but never times out.
+ ///
+ /// ## Examples
+ ///
+ /// ```rust
+ /// use bitvec::prelude::*;
+ ///
+ /// let bits = bits![mut 0, 0, 1, 0, 1, 0];
+ /// for group in bits.splitn_mut(2, |_pos, bit| *bit) {
+ /// group.set(0, true);
+ /// }
+ /// assert_eq!(bits, bits![1, 0, 1, 1, 1, 0]);
+ /// ```
+ ///
+ /// [`.remove_alias()`]: crate::slice::SplitNMut::remove_alias
+ /// [`.rsplitn_mut()`]: Self::rsplitn_mut
+ /// [`.split_mut()`]: Self::split_mut
+ /// [`.splitn()`]: Self::splitn
+ #[inline]
+ pub fn splitn_mut<F>(&mut self, n: usize, pred: F) -> SplitNMut<T, O, F>
+ where F: FnMut(usize, &bool) -> bool {
+ SplitNMut::new(self.alias_mut(), pred, n)
+ }
+
+ /// Iterates over mutable subslices separated by bits that match a
+ /// predicate from the back edge, giving up after yielding `n` times. The
+ /// `n`th yield contains the rest of the bit-slice. As with `.split_mut()`,
+ /// the yielded bit-slices do not contain the matched bit.
+ ///
+ /// ## Original
+ ///
+ /// [`slice::rsplitn`](https://doc.rust-lang.org/std/primitive.slice.html#method.rsplitn)
+ ///
+ /// ## API Differences
+ ///
+ /// The predicate function receives the index being tested as well as the
+ /// bit value at that index. This allows the predicate to have more than one
+ /// bit of information about the bit-slice being traversed.
+ ///
+ /// ## Sibling Methods
+ ///
+ /// - [`.rsplitn_mut()`] has the same splitting logic, but each yielded
+ /// bit-slice is mutable.
+ /// - [`.splitn()`]: iterates from the front of the bit-slice instead of the
+ /// back.
+ /// - [`.rsplit()`] has the same splitting logic, but never times out.
+ ///
+ /// ## Examples
+ ///
+ /// ```rust
+ /// use bitvec::prelude::*;
+ ///
+ /// let bits = bits![0, 0, 1, 1, 0];
+ /// // ^
+ /// let mut iter = bits.rsplitn(2, |_pos, bit| *bit);
+ ///
+ /// assert_eq!(iter.next().unwrap(), bits![0]);
+ /// assert_eq!(iter.next().unwrap(), bits![0, 0, 1]);
+ /// assert!(iter.next().is_none());
+ /// ```
+ ///
+ /// [`.rsplit()`]: Self::rsplit
+ /// [`.rsplitn_mut()`]: Self::rsplitn_mut
+ /// [`.splitn()`]: Self::splitn
+ #[inline]
+ pub fn rsplitn<F>(&self, n: usize, pred: F) -> RSplitN<T, O, F>
+ where F: FnMut(usize, &bool) -> bool {
+ RSplitN::new(self, pred, n)
+ }
+
+ /// Iterates over mutable subslices separated by bits that match a
+ /// predicate from the back edge, giving up after yielding `n` times. The
+ /// `n`th yield contains the rest of the bit-slice. As with `.split_mut()`,
+ /// the yielded bit-slices do not contain the matched bit.
+ ///
+ /// Iterators do not require that each yielded item is destroyed before the
+ /// next is produced. This means that each bit-slice yielded must be marked
+ /// as aliased. If you are using this in a loop that does not collect
+ /// multiple yielded subslices for the same scope, then you can remove the
+ /// alias marking by calling the (`unsafe`) method [`.remove_alias()`] on
+ /// the iterator.
+ ///
+ /// ## Original
+ ///
+ /// [`slice::rsplitn_mut`](https://doc.rust-lang.org/std/primitive.slice.html#method.rsplitn_mut)
+ ///
+ /// ## API Differences
+ ///
+ /// The predicate function receives the index being tested as well as the
+ /// bit value at that index. This allows the predicate to have more than one
+ /// bit of information about the bit-slice being traversed.
+ ///
+ /// ## Sibling Methods
+ ///
+ /// - [`.rsplitn()`] has the same splitting logic, but each yielded
+ /// bit-slice is immutable.
+ /// - [`.splitn_mut()`] iterates from the front of the bit-slice instead of
+ /// the back.
+ /// - [`.rsplit_mut()`] has the same splitting logic, but never times out.
+ ///
+ /// ## Examples
+ ///
+ /// ```rust
+ /// use bitvec::prelude::*;
+ ///
+ /// let bits = bits![mut 0, 0, 1, 0, 0, 1, 0, 0, 0];
+ /// for group in bits.rsplitn_mut(2, |_idx, bit| *bit) {
+ /// group.set(0, true);
+ /// }
+ /// assert_eq!(bits, bits![1, 0, 1, 0, 0, 1, 1, 0, 0]);
+ /// // ^ group 2 ^ group 1
+ /// ```
+ ///
+ /// [`.remove_alias()`]: crate::slice::RSplitNMut::remove_alias
+ /// [`.rsplitn()`]: Self::rsplitn
+ /// [`.rsplit_mut()`]: Self::rsplit_mut
+ /// [`.splitn_mut()`]: Self::splitn_mut
+ #[inline]
+ pub fn rsplitn_mut<F>(&mut self, n: usize, pred: F) -> RSplitNMut<T, O, F>
+ where F: FnMut(usize, &bool) -> bool {
+ RSplitNMut::new(self.alias_mut(), pred, n)
+ }
+
+ /// Tests if the bit-slice contains the given sequence anywhere within it.
+ ///
+ /// This scans over `self.windows(other.len())` until one of the windows
+ /// matches. The search key does not need to share type parameters with the
+ /// bit-slice being tested, as the comparison is bit-wise. However, sharing
+ /// type parameters will accelerate the comparison.
+ ///
+ /// ## Original
+ ///
+ /// [`slice::contains`](https://doc.rust-lang.org/std/primitive.slice.html#method.contains)
+ ///
+ /// ## Examples
+ ///
+ /// ```rust
+ /// use bitvec::prelude::*;
+ ///
+ /// let bits = bits![0, 0, 1, 0, 1, 1, 0, 0];
+ /// assert!( bits.contains(bits![0, 1, 1, 0]));
+ /// assert!(!bits.contains(bits![1, 0, 0, 1]));
+ /// ```
+ #[inline]
+ pub fn contains<T2, O2>(&self, other: &BitSlice<T2, O2>) -> bool
+ where
+ T2: BitStore,
+ O2: BitOrder,
+ {
+ self.len() >= other.len()
+ && self.windows(other.len()).any(|window| window == other)
+ }
+
+ /// Tests if the bit-slice begins with the given sequence.
+ ///
+ /// The search key does not need to share type parameters with the bit-slice
+ /// being tested, as the comparison is bit-wise. However, sharing type
+ /// parameters will accelerate the comparison.
+ ///
+ /// ## Original
+ ///
+ /// [`slice::starts_with`](https://doc.rust-lang.org/std/primitive.slice.html#method.starts_with)
+ ///
+ /// ## Examples
+ ///
+ /// ```rust
+ /// use bitvec::prelude::*;
+ ///
+ /// let bits = bits![0, 1, 1, 0];
+ /// assert!( bits.starts_with(bits![0, 1]));
+ /// assert!(!bits.starts_with(bits![1, 0]));
+ /// ```
+ ///
+ /// This always returns `true` if the needle is empty:
+ ///
+ /// ```rust
+ /// use bitvec::prelude::*;
+ ///
+ /// let bits = bits![0, 1, 0];
+ /// let empty = bits![];
+ /// assert!(bits.starts_with(empty));
+ /// assert!(empty.starts_with(empty));
+ /// ```
+ #[inline]
+ pub fn starts_with<T2, O2>(&self, needle: &BitSlice<T2, O2>) -> bool
+ where
+ T2: BitStore,
+ O2: BitOrder,
+ {
+ self.get(.. needle.len())
+ .map(|slice| slice == needle)
+ .unwrap_or(false)
+ }
+
+ /// Tests if the bit-slice ends with the given sequence.
+ ///
+ /// The search key does not need to share type parameters with the bit-slice
+ /// being tested, as the comparison is bit-wise. However, sharing type
+ /// parameters will accelerate the comparison.
+ ///
+ /// ## Original
+ ///
+ /// [`slice::ends_with`](https://doc.rust-lang.org/std/primitive.slice.html#method.ends_with)
+ ///
+ /// ## Examples
+ ///
+ /// ```rust
+ /// use bitvec::prelude::*;
+ ///
+ /// let bits = bits![0, 1, 1, 0];
+ /// assert!( bits.ends_with(bits![1, 0]));
+ /// assert!(!bits.ends_with(bits![0, 1]));
+ /// ```
+ ///
+ /// This always returns `true` if the needle is empty:
+ ///
+ /// ```rust
+ /// use bitvec::prelude::*;
+ ///
+ /// let bits = bits![0, 1, 0];
+ /// let empty = bits![];
+ /// assert!(bits.ends_with(empty));
+ /// assert!(empty.ends_with(empty));
+ /// ```
+ #[inline]
+ pub fn ends_with<T2, O2>(&self, needle: &BitSlice<T2, O2>) -> bool
+ where
+ T2: BitStore,
+ O2: BitOrder,
+ {
+ self.get(self.len() - needle.len() ..)
+ .map(|slice| slice == needle)
+ .unwrap_or(false)
+ }
+
+ /// Removes a prefix bit-slice, if present.
+ ///
+ /// Like [`.starts_with()`], the search key does not need to share type
+ /// parameters with the bit-slice being stripped. If
+ /// `self.starts_with(suffix)`, then this returns `Some(&self[prefix.len()
+ /// ..])`, otherwise it returns `None`.
+ ///
+ /// ## Original
+ ///
+ /// [`slice::strip_prefix`](https://doc.rust-lang.org/std/primitive.slice.html#method.strip_prefix)
+ ///
+ /// ## API Differences
+ ///
+ /// `BitSlice` does not support pattern searches; instead, it permits `self`
+ /// and `prefix` to differ in type parameters.
+ ///
+ /// ## Examples
+ ///
+ /// ```rust
+ /// use bitvec::prelude::*;
+ ///
+ /// let bits = bits![0, 1, 0, 0, 1, 0, 1, 1, 0];
+ /// assert_eq!(bits.strip_prefix(bits![0, 1]).unwrap(), bits[2 ..]);
+ /// assert_eq!(bits.strip_prefix(bits![0, 1, 0, 0,]).unwrap(), bits[4 ..]);
+ /// assert!(bits.strip_prefix(bits![1, 0]).is_none());
+ /// ```
+ ///
+ /// [`.starts_with()`]: Self::starts_with
+ #[inline]
+ pub fn strip_prefix<T2, O2>(
+ &self,
+ prefix: &BitSlice<T2, O2>,
+ ) -> Option<&Self>
+ where
+ T2: BitStore,
+ O2: BitOrder,
+ {
+ if self.starts_with(prefix) {
+ self.get(prefix.len() ..)
+ }
+ else {
+ None
+ }
+ }
+
+ /// Removes a suffix bit-slice, if present.
+ ///
+ /// Like [`.ends_with()`], the search key does not need to share type
+ /// parameters with the bit-slice being stripped. If
+ /// `self.ends_with(suffix)`, then this returns `Some(&self[.. self.len() -
+ /// suffix.len()])`, otherwise it returns `None`.
+ ///
+ /// ## Original
+ ///
+ /// [`slice::strip_suffix`](https://doc.rust-lang.org/std/primitive.slice.html#method.strip_suffix)
+ ///
+ /// ## API Differences
+ ///
+ /// `BitSlice` does not support pattern searches; instead, it permits `self`
+ /// and `suffix` to differ in type parameters.
+ ///
+ /// ## Examples
+ ///
+ /// ```rust
+ /// use bitvec::prelude::*;
+ ///
+ /// let bits = bits![0, 1, 0, 0, 1, 0, 1, 1, 0];
+ /// assert_eq!(bits.strip_suffix(bits![1, 0]).unwrap(), bits[.. 7]);
+ /// assert_eq!(bits.strip_suffix(bits![0, 1, 1, 0]).unwrap(), bits[.. 5]);
+ /// assert!(bits.strip_suffix(bits![0, 1]).is_none());
+ /// ```
+ ///
+ /// [`.ends_with()`]: Self::ends_with.
+ #[inline]
+ pub fn strip_suffix<T2, O2>(
+ &self,
+ suffix: &BitSlice<T2, O2>,
+ ) -> Option<&Self>
+ where
+ T2: BitStore,
+ O2: BitOrder,
+ {
+ if self.ends_with(suffix) {
+ self.get(.. self.len() - suffix.len())
+ }
+ else {
+ None
+ }
+ }
+
+ /// Rotates the contents of a bit-slice to the left (towards the zero
+ /// index).
+ ///
+ /// This essentially splits the bit-slice at `by`, then exchanges the two
+ /// pieces. `self[.. by]` becomes the first section, and is then followed by
+ /// `self[.. by]`.
+ ///
+ /// The implementation is batch-accelerated where possible. It should have a
+ /// runtime complexity much lower than `O(by)`.
+ ///
+ /// ## Original
+ ///
+ /// [`slice::rotate_left`](https://doc.rust-lang.org/std/primitive.slice.html#method.rotate_left)
+ ///
+ /// ## Examples
+ ///
+ /// ```rust
+ /// use bitvec::prelude::*;
+ ///
+ /// let bits = bits![mut 0, 0, 1, 0, 1, 0];
+ /// // split occurs here ^
+ /// bits.rotate_left(2);
+ /// assert_eq!(bits, bits![1, 0, 1, 0, 0, 0]);
+ /// ```
+ #[inline]
+ pub fn rotate_left(&mut self, mut by: usize) {
+ let len = self.len();
+ assert!(
+ by <= len,
+ "bit-slices cannot be rotated by more than their length",
+ );
+ if by == 0 || by == len {
+ return;
+ }
+ let mut tmp = BitArray::<usize, O>::ZERO;
+ while by > 0 {
+ let shamt = cmp::min(mem::bits_of::<usize>(), by);
+ unsafe {
+ let tmp_bits = tmp.get_unchecked_mut(.. shamt);
+ tmp_bits.clone_from_bitslice(self.get_unchecked(.. shamt));
+ self.copy_within_unchecked(shamt .., 0);
+ self.get_unchecked_mut(len - shamt ..)
+ .clone_from_bitslice(tmp_bits);
+ }
+ by -= shamt;
+ }
+ }
+
+ /// Rotates the contents of a bit-slice to the right (away from the zero
+ /// index).
+ ///
+ /// This essentially splits the bit-slice at `self.len() - by`, then
+ /// exchanges the two pieces. `self[len - by ..]` becomes the first section,
+ /// and is then followed by `self[.. len - by]`.
+ ///
+ /// The implementation is batch-accelerated where possible. It should have a
+ /// runtime complexity much lower than `O(by)`.
+ ///
+ /// ## Original
+ ///
+ /// [`slice::rotate_right`](https://doc.rust-lang.org/std/primitive.slice.html#method.rotate_right)
+ ///
+ /// ## Examples
+ ///
+ /// ```rust
+ /// use bitvec::prelude::*;
+ ///
+ /// let bits = bits![mut 0, 0, 1, 1, 1, 0];
+ /// // split occurs here ^
+ /// bits.rotate_right(2);
+ /// assert_eq!(bits, bits![1, 0, 0, 0, 1, 1]);
+ /// ```
+ #[inline]
+ pub fn rotate_right(&mut self, mut by: usize) {
+ let len = self.len();
+ assert!(
+ by <= len,
+ "bit-slices cannot be rotated by more than their length",
+ );
+ if by == 0 || by == len {
+ return;
+ }
+ let mut tmp = BitArray::<usize, O>::ZERO;
+ while by > 0 {
+ let shamt = cmp::min(mem::bits_of::<usize>(), by);
+ let mid = len - shamt;
+ unsafe {
+ let tmp_bits = tmp.get_unchecked_mut(.. shamt);
+ tmp_bits.clone_from_bitslice(self.get_unchecked(mid ..));
+ self.copy_within_unchecked(.. mid, shamt);
+ self.get_unchecked_mut(.. shamt)
+ .clone_from_bitslice(tmp_bits);
+ }
+ by -= shamt;
+ }
+ }
+
+ /// Fills the bit-slice with a given bit.
+ ///
+ /// This is a recent stabilization in the standard library. `bitvec`
+ /// previously offered this behavior as the novel API `.set_all()`. That
+ /// method name is now removed in favor of this standard-library analogue.
+ ///
+ /// ## Original
+ ///
+ /// [`slice::fill`](https://doc.rust-lang.org/std/primitive.slice.html#method.fill)
+ ///
+ /// ## Examples
+ ///
+ /// ```rust
+ /// use bitvec::prelude::*;
+ ///
+ /// let bits = bits![mut 0; 5];
+ /// bits.fill(true);
+ /// assert_eq!(bits, bits![1; 5]);
+ /// ```
+ #[inline]
+ pub fn fill(&mut self, value: bool) {
+ let fill = if value { T::Mem::ALL } else { T::Mem::ZERO };
+ match self.domain_mut() {
+ Domain::Enclave(mut elem) => {
+ elem.store_value(fill);
+ },
+ Domain::Region { head, body, tail } => {
+ if let Some(mut elem) = head {
+ elem.store_value(fill);
+ }
+ for elem in body {
+ elem.store_value(fill);
+ }
+ if let Some(mut elem) = tail {
+ elem.store_value(fill);
+ }
+ },
+ }
+ }
+
+ /// Fills the bit-slice with bits produced by a generator function.
+ ///
+ /// ## Original
+ ///
+ /// [`slice::fill_with`](https://doc.rust-lang.org/std/primitive.slice.html#method.fill_with)
+ ///
+ /// ## API Differences
+ ///
+ /// The generator function receives the index of the bit being initialized
+ /// as an argument.
+ ///
+ /// ## Examples
+ ///
+ /// ```rust
+ /// use bitvec::prelude::*;
+ ///
+ /// let bits = bits![mut 0; 5];
+ /// bits.fill_with(|idx| idx % 2 == 0);
+ /// assert_eq!(bits, bits![1, 0, 1, 0, 1]);
+ /// ```
+ #[inline]
+ pub fn fill_with<F>(&mut self, mut func: F)
+ where F: FnMut(usize) -> bool {
+ for (idx, ptr) in self.as_mut_bitptr_range().enumerate() {
+ unsafe {
+ ptr.write(func(idx));
+ }
+ }
+ }
+
+ #[inline]
+ #[cfg(not(tarpaulin_include))]
+ #[deprecated = "use `.clone_from_bitslice()` instead"]
+ #[allow(missing_docs, clippy::missing_docs_in_private_items)]
+ pub fn clone_from_slice<T2, O2>(&mut self, src: &BitSlice<T2, O2>)
+ where
+ T2: BitStore,
+ O2: BitOrder,
+ {
+ self.clone_from_bitslice(src);
+ }
+
+ #[inline]
+ #[cfg(not(tarpaulin_include))]
+ #[deprecated = "use `.copy_from_bitslice()` instead"]
+ #[allow(missing_docs, clippy::missing_docs_in_private_items)]
+ pub fn copy_from_slice(&mut self, src: &Self) {
+ self.copy_from_bitslice(src)
+ }
+
+ /// Copies a span of bits to another location in the bit-slice.
+ ///
+ /// `src` is the range of bit-indices in the bit-slice to copy, and `dest is
+ /// the starting index of the destination range. `src` and `dest .. dest +
+ /// src.len()` are permitted to overlap; the copy will automatically detect
+ /// and manage this. However, both `src` and `dest .. dest + src.len()`
+ /// **must** fall within the bounds of `self`.
+ ///
+ /// ## Original
+ ///
+ /// [`slice::copy_within`](https://doc.rust-lang.org/std/primitive.slice.html#method.copy_within)
+ ///
+ /// ## Panics
+ ///
+ /// This panics if either the source or destination range exceed
+ /// `self.len()`.
+ ///
+ /// ## Examples
+ ///
+ /// ```rust
+ /// use bitvec::prelude::*;
+ ///
+ /// let bits = bits![mut 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0];
+ /// bits.copy_within(1 .. 5, 8);
+ /// // v v v v
+ /// assert_eq!(bits, bits![1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 0]);
+ /// // ^ ^ ^ ^
+ /// ```
+ #[inline]
+ pub fn copy_within<R>(&mut self, src: R, dest: usize)
+ where R: RangeExt<usize> {
+ let len = self.len();
+ let src = src.normalize(0, len);
+ self.assert_in_bounds(src.start, 0 .. len);
+ self.assert_in_bounds(src.end, 0 ..= len);
+ self.assert_in_bounds(dest, 0 .. len);
+ self.assert_in_bounds(dest + src.len(), 0 ..= len);
+ unsafe {
+ self.copy_within_unchecked(src, dest);
+ }
+ }
+
+ #[inline]
+ #[deprecated = "use `.swap_with_bitslice()` instead"]
+ #[allow(missing_docs, clippy::missing_docs_in_private_items)]
+ pub fn swap_with_slice<T2, O2>(&mut self, other: &mut BitSlice<T2, O2>)
+ where
+ T2: BitStore,
+ O2: BitOrder,
+ {
+ self.swap_with_bitslice(other);
+ }
+
+ /// Produces bit-slice view(s) with different underlying storage types.
+ ///
+ /// This may have unexpected effects, and you cannot assume that
+ /// `before[idx] == after[idx]`! Consult the [tables in the manual][layout]
+ /// for information about memory layouts.
+ ///
+ /// ## Original
+ ///
+ /// [`slice::align_to`](https://doc.rust-lang.org/std/primitive.slice.html#method.align_to)
+ ///
+ /// ## Notes
+ ///
+ /// Unlike the standard library documentation, this explicitly guarantees
+ /// that the middle bit-slice will have maximal size. You may rely on this
+ /// property.
+ ///
+ /// ## Safety
+ ///
+ /// You may not use this to cast away alias protections. Rust does not have
+ /// support for higher-kinded types, so this cannot express the relation
+ /// `Outer<T> -> Outer<U> where Outer: BitStoreContainer`, but memory safety
+ /// does require that you respect this rule. Reälign integers to integers,
+ /// `Cell`s to `Cell`s, and atomics to atomics, but do not cross these
+ /// boundaries.
+ ///
+ /// ## Examples
+ ///
+ /// ```rust
+ /// use bitvec::prelude::*;
+ ///
+ /// let bytes: [u8; 7] = [1, 2, 3, 4, 5, 6, 7];
+ /// let bits = bytes.view_bits::<Lsb0>();
+ /// let (pfx, mid, sfx) = unsafe {
+ /// bits.align_to::<u16>()
+ /// };
+ /// assert!(pfx.len() <= 8);
+ /// assert_eq!(mid.len(), 48);
+ /// assert!(sfx.len() <= 8);
+ /// ```
+ ///
+ /// [layout]: https://bitvecto-rs.github.io/bitvec/memory-layout.html
+ #[inline]
+ pub unsafe fn align_to<U>(&self) -> (&Self, &BitSlice<U, O>, &Self)
+ where U: BitStore {
+ let (l, c, r) = self.as_bitspan().align_to::<U>();
+ (
+ l.into_bitslice_ref(),
+ c.into_bitslice_ref(),
+ r.into_bitslice_ref(),
+ )
+ }
+
+ /// Produces bit-slice view(s) with different underlying storage types.
+ ///
+ /// This may have unexpected effects, and you cannot assume that
+ /// `before[idx] == after[idx]`! Consult the [tables in the manual][layout]
+ /// for information about memory layouts.
+ ///
+ /// ## Original
+ ///
+ /// [`slice::align_to_mut`](https://doc.rust-lang.org/std/primitive.slice.html#method.align_to_mut)
+ ///
+ /// ## Notes
+ ///
+ /// Unlike the standard library documentation, this explicitly guarantees
+ /// that the middle bit-slice will have maximal size. You may rely on this
+ /// property.
+ ///
+ /// ## Safety
+ ///
+ /// You may not use this to cast away alias protections. Rust does not have
+ /// support for higher-kinded types, so this cannot express the relation
+ /// `Outer<T> -> Outer<U> where Outer: BitStoreContainer`, but memory safety
+ /// does require that you respect this rule. Reälign integers to integers,
+ /// `Cell`s to `Cell`s, and atomics to atomics, but do not cross these
+ /// boundaries.
+ ///
+ /// ## Examples
+ ///
+ /// ```rust
+ /// use bitvec::prelude::*;
+ ///
+ /// let mut bytes: [u8; 7] = [1, 2, 3, 4, 5, 6, 7];
+ /// let bits = bytes.view_bits_mut::<Lsb0>();
+ /// let (pfx, mid, sfx) = unsafe {
+ /// bits.align_to_mut::<u16>()
+ /// };
+ /// assert!(pfx.len() <= 8);
+ /// assert_eq!(mid.len(), 48);
+ /// assert!(sfx.len() <= 8);
+ /// ```
+ ///
+ /// [layout]: https://bitvecto-rs.github.io/bitvec/memory-layout.html
+ #[inline]
+ pub unsafe fn align_to_mut<U>(
+ &mut self,
+ ) -> (&mut Self, &mut BitSlice<U, O>, &mut Self)
+ where U: BitStore {
+ let (l, c, r) = self.as_mut_bitspan().align_to::<U>();
+ (
+ l.into_bitslice_mut(),
+ c.into_bitslice_mut(),
+ r.into_bitslice_mut(),
+ )
+ }
+}
+
+#[cfg(feature = "alloc")]
+impl<T, O> BitSlice<T, O>
+where
+ T: BitStore,
+ O: BitOrder,
+{
+ #[inline]
+ #[deprecated = "use `.to_bitvec()` instead"]
+ #[allow(missing_docs, clippy::missing_docs_in_private_items)]
+ pub fn to_vec(&self) -> BitVec<T::Unalias, O> {
+ self.to_bitvec()
+ }
+
+ /// Creates a bit-vector by repeating a bit-slice `n` times.
+ ///
+ /// ## Original
+ ///
+ /// [`slice::repeat`](https://doc.rust-lang.org/std/primitive.slice.html#method.repeat)
+ ///
+ /// ## Panics
+ ///
+ /// This method panics if `self.len() * n` exceeds the `BitVec` capacity.
+ ///
+ /// ## Examples
+ ///
+ /// ```rust
+ /// use bitvec::prelude::*;
+ ///
+ /// assert_eq!(bits![0, 1].repeat(3), bitvec![0, 1, 0, 1, 0, 1]);
+ /// ```
+ ///
+ /// This panics by exceeding bit-vector maximum capacity:
+ ///
+ /// ```rust,should_panic
+ /// use bitvec::prelude::*;
+ ///
+ /// bits![0, 1].repeat(BitSlice::<usize, Lsb0>::MAX_BITS);
+ /// ```
+ #[inline]
+ pub fn repeat(&self, n: usize) -> BitVec<T::Unalias, O> {
+ let len = self.len();
+ let total = len.checked_mul(n).expect("capacity overflow");
+
+ let mut out = BitVec::repeat(false, total);
+
+ let iter = unsafe { out.chunks_exact_mut(len).remove_alias() };
+ for chunk in iter {
+ chunk.clone_from_bitslice(self);
+ }
+
+ out
+ }
+
+ /* As of 1.56, the `concat` and `join` methods use still-unstable traits
+ * to govern the collection of multiple subslices into one vector. These
+ * are possible to copy over and redefine locally, but unless a user asks
+ * for it, doing so is considered a low priority.
+ */
+}
+
+#[inline]
+#[allow(missing_docs, clippy::missing_docs_in_private_items)]
+#[deprecated = "use `BitSlice::from_element()` instead"]
+pub fn from_ref<T, O>(elem: &T) -> &BitSlice<T, O>
+where
+ T: BitStore,
+ O: BitOrder,
+{
+ BitSlice::from_element(elem)
+}
+
+#[inline]
+#[allow(missing_docs, clippy::missing_docs_in_private_items)]
+#[deprecated = "use `BitSlice::from_element_mut()` instead"]
+pub fn from_mut<T, O>(elem: &mut T) -> &mut BitSlice<T, O>
+where
+ T: BitStore,
+ O: BitOrder,
+{
+ BitSlice::from_element_mut(elem)
+}
+
+#[inline]
+#[doc = include_str!("../../doc/slice/from_raw_parts.md")]
+pub unsafe fn from_raw_parts<'a, T, O>(
+ data: BitPtr<Const, T, O>,
+ len: usize,
+) -> Result<&'a BitSlice<T, O>, BitSpanError<T>>
+where
+ O: BitOrder,
+ T: 'a + BitStore,
+{
+ data.span(len).map(|bp| bp.into_bitslice_ref())
+}
+
+#[inline]
+#[doc = include_str!("../../doc/slice/from_raw_parts_mut.md")]
+pub unsafe fn from_raw_parts_mut<'a, T, O>(
+ data: BitPtr<Mut, T, O>,
+ len: usize,
+) -> Result<&'a mut BitSlice<T, O>, BitSpanError<T>>
+where
+ O: BitOrder,
+ T: 'a + BitStore,
+{
+ data.span(len).map(|bp| bp.into_bitslice_mut())
+}
+
+#[doc = include_str!("../../doc/slice/BitSliceIndex.md")]
+pub trait BitSliceIndex<'a, T, O>
+where
+ T: BitStore,
+ O: BitOrder,
+{
+ /// The output type of immutable access.
+ type Immut;
+
+ /// The output type of mutable access.
+ type Mut;
+
+ /// Immutably indexes into a bit-slice, returning `None` if `self` is out of
+ /// bounds.
+ ///
+ /// ## Original
+ ///
+ /// [`SliceIndex::get`](core::slice::SliceIndex::get)
+ fn get(self, bits: &'a BitSlice<T, O>) -> Option<Self::Immut>;
+
+ /// Mutably indexes into a bit-slice, returning `None` if `self` is out of
+ /// bounds.
+ ///
+ /// ## Original
+ ///
+ /// [`SliceIndex::get_mut`](core::slice::SliceIndex::get_mut)
+ fn get_mut(self, bits: &'a mut BitSlice<T, O>) -> Option<Self::Mut>;
+
+ /// Immutably indexes into a bit-slice without doing any bounds checking.
+ ///
+ /// ## Original
+ ///
+ /// [`SliceIndex::get_unchecked`](core::slice::SliceIndex::get_unchecked)
+ ///
+ /// ## Safety
+ ///
+ /// If `self` is not in bounds, then memory accesses through it are illegal
+ /// and the program becomes undefined. You must ensure that `self` is
+ /// appropriately within `0 .. bits.len()` at the call site.
+ unsafe fn get_unchecked(self, bits: &'a BitSlice<T, O>) -> Self::Immut;
+
+ /// Mutably indexes into a bit-slice without doing any bounds checking.
+ ///
+ /// ## Original
+ ///
+ /// [`SliceIndex::get_unchecked_mut`][0]
+ ///
+ /// ## Safety
+ ///
+ /// If `self` is not in bounds, then memory accesses through it bare illegal
+ /// and the program becomes undefined. You must ensure that `self` is
+ /// appropriately within `0 .. bits.len()` at the call site.
+ ///
+ /// [0]: core::slice::SliceIndex::get_unchecked_mut
+ unsafe fn get_unchecked_mut(self, bits: &'a mut BitSlice<T, O>)
+ -> Self::Mut;
+
+ /// Immutably indexes into a bit-slice, panicking if `self` is out of
+ /// bounds.
+ ///
+ /// ## Original
+ ///
+ /// [`SliceIndex::index`](core::slice::SliceIndex::index)
+ ///
+ /// ## Panics
+ ///
+ /// Implementations are required to panic if `self` exceeds `bits.len()` in
+ /// any way.
+ fn index(self, bits: &'a BitSlice<T, O>) -> Self::Immut;
+
+ /// Mutably indexes into a bit-slice, panicking if `self` is out of bounds.
+ ///
+ /// ## Original
+ ///
+ /// [`SliceIndex::index_mut`](core::slice::SliceIndex::index_mut)
+ ///
+ /// ## Panics
+ ///
+ /// Implementations are required to panic if `self` exceeds `bits.len()` in
+ /// any way.
+ fn index_mut(self, bits: &'a mut BitSlice<T, O>) -> Self::Mut;
+}
+
+impl<'a, T, O> BitSliceIndex<'a, T, O> for usize
+where
+ T: BitStore,
+ O: BitOrder,
+{
+ type Immut = BitRef<'a, Const, T, O>;
+ type Mut = BitRef<'a, Mut, T, O>;
+
+ #[inline]
+ fn get(self, bits: &'a BitSlice<T, O>) -> Option<Self::Immut> {
+ if self < bits.len() {
+ Some(unsafe { self.get_unchecked(bits) })
+ }
+ else {
+ None
+ }
+ }
+
+ #[inline]
+ fn get_mut(self, bits: &'a mut BitSlice<T, O>) -> Option<Self::Mut> {
+ if self < bits.len() {
+ Some(unsafe { self.get_unchecked_mut(bits) })
+ }
+ else {
+ None
+ }
+ }
+
+ #[inline]
+ unsafe fn get_unchecked(self, bits: &'a BitSlice<T, O>) -> Self::Immut {
+ bits.as_bitptr().add(self).as_ref().unwrap()
+ }
+
+ #[inline]
+ unsafe fn get_unchecked_mut(
+ self,
+ bits: &'a mut BitSlice<T, O>,
+ ) -> Self::Mut {
+ bits.as_mut_bitptr().add(self).as_mut().unwrap()
+ }
+
+ #[inline]
+ fn index(self, bits: &'a BitSlice<T, O>) -> Self::Immut {
+ self.get(bits).unwrap_or_else(|| {
+ panic!("index {} out of bounds: {}", self, bits.len())
+ })
+ }
+
+ #[inline]
+ fn index_mut(self, bits: &'a mut BitSlice<T, O>) -> Self::Mut {
+ let len = bits.len();
+ self.get_mut(bits)
+ .unwrap_or_else(|| panic!("index {} out of bounds: {}", self, len))
+ }
+}
+
+/// Implements indexing on bit-slices by various range types.
+macro_rules! range_impl {
+ ($r:ty { check $check:expr; select $select:expr; }) => {
+ #[allow(clippy::redundant_closure_call)]
+ impl<'a, T, O> BitSliceIndex<'a, T, O> for $r
+ where
+ O: BitOrder,
+ T: BitStore,
+ {
+ type Immut = &'a BitSlice<T, O>;
+ type Mut = &'a mut BitSlice<T, O>;
+
+ #[inline]
+ #[allow(
+ clippy::blocks_in_if_conditions,
+ clippy::redundant_closure_call
+ )]
+ fn get(self, bits: Self::Immut) -> Option<Self::Immut> {
+ if ($check)(self.clone(), bits.as_bitspan()) {
+ Some(unsafe { self.get_unchecked(bits) })
+ }
+ else {
+ None
+ }
+ }
+
+ #[inline]
+ #[allow(
+ clippy::blocks_in_if_conditions,
+ clippy::redundant_closure_call
+ )]
+ fn get_mut(self, bits: Self::Mut) -> Option<Self::Mut> {
+ if ($check)(self.clone(), bits.as_bitspan()) {
+ Some(unsafe { self.get_unchecked_mut(bits) })
+ }
+ else {
+ None
+ }
+ }
+
+ #[inline]
+ #[allow(clippy::redundant_closure_call)]
+ unsafe fn get_unchecked(self, bits: Self::Immut) -> Self::Immut {
+ ($select)(self, bits.as_bitspan()).into_bitslice_ref()
+ }
+
+ #[inline]
+ #[allow(clippy::redundant_closure_call)]
+ unsafe fn get_unchecked_mut(self, bits: Self::Mut) -> Self::Mut {
+ ($select)(self, bits.as_mut_bitspan()).into_bitslice_mut()
+ }
+
+ #[inline]
+ #[track_caller]
+ fn index(self, bits: Self::Immut) -> Self::Immut {
+ let r = self.clone();
+ let l = bits.len();
+ self.get(bits).unwrap_or_else(|| {
+ panic!("range {:?} out of bounds: {}", r, l)
+ })
+ }
+
+ #[inline]
+ #[track_caller]
+ fn index_mut(self, bits: Self::Mut) -> Self::Mut {
+ let r = self.clone();
+ let l = bits.len();
+ self.get_mut(bits).unwrap_or_else(|| {
+ panic!("range {:?} out of bounds: {}", r, l)
+ })
+ }
+ }
+ };
+}
+
+range_impl!(Range<usize> {
+ check |Range { start, end }, span: BitSpan<_, _, _>| {
+ let len = span.len();
+ start <= len && end <= len && start <= end
+ };
+
+ select |Range { start, end }, span: BitSpan<_, _, _>| {
+ span.to_bitptr().add(start).span_unchecked(end - start)
+ };
+});
+
+range_impl!(RangeFrom<usize> {
+ check |RangeFrom { start }, span: BitSpan<_, _, _>| {
+ start <= span.len()
+ };
+
+ select |RangeFrom { start }, span: BitSpan<_, _, _>| {
+ span.to_bitptr().add(start).span_unchecked(span.len() - start)
+ };
+});
+
+range_impl!(RangeTo<usize> {
+ check |RangeTo { end }, span: BitSpan<_, _, _>| {
+ end <= span.len()
+ };
+
+ select |RangeTo { end }, mut span: BitSpan<_, _, _>| {
+ span.set_len(end);
+ span
+ };
+});
+
+range_impl!(RangeInclusive<usize> {
+ check |range: Self, span: BitSpan<_, _, _>| {
+ let len = span.len();
+ let start = *range.start();
+ let end = *range.end();
+
+ start < len && end < len && start <= end
+ };
+
+ select |range: Self, span: BitSpan<_, _, _>| {
+ let start = *range.start();
+ let end = *range.end();
+ span.to_bitptr().add(start).span_unchecked(end + 1 - start)
+ };
+});
+
+range_impl!(RangeToInclusive<usize> {
+ check |RangeToInclusive { end }, span: BitSpan<_, _, _>| {
+ end < span.len()
+ };
+
+ select |RangeToInclusive { end }, mut span: BitSpan<_, _, _>| {
+ span.set_len(end + 1);
+ span
+ };
+});
+
+#[cfg(not(tarpaulin_include))]
+impl<'a, T, O> BitSliceIndex<'a, T, O> for RangeFull
+where
+ T: BitStore,
+ O: BitOrder,
+{
+ type Immut = &'a BitSlice<T, O>;
+ type Mut = &'a mut BitSlice<T, O>;
+
+ #[inline]
+ fn get(self, bits: Self::Immut) -> Option<Self::Immut> {
+ Some(bits)
+ }
+
+ #[inline]
+ fn get_mut(self, bits: Self::Mut) -> Option<Self::Mut> {
+ Some(bits)
+ }
+
+ #[inline]
+ unsafe fn get_unchecked(self, bits: Self::Immut) -> Self::Immut {
+ bits
+ }
+
+ #[inline]
+ unsafe fn get_unchecked_mut(self, bits: Self::Mut) -> Self::Mut {
+ bits
+ }
+
+ #[inline]
+ fn index(self, bits: Self::Immut) -> Self::Immut {
+ bits
+ }
+
+ #[inline]
+ fn index_mut(self, bits: Self::Mut) -> Self::Mut {
+ bits
+ }
+}
diff --git a/src/slice/iter.rs b/src/slice/iter.rs
new file mode 100644
index 0000000..a9b974b
--- /dev/null
+++ b/src/slice/iter.rs
@@ -0,0 +1,2595 @@
+#![doc = include_str!("../../doc/slice/iter.md")]
+
+use core::{
+ cmp,
+ fmt::{
+ self,
+ Debug,
+ Formatter,
+ },
+ iter::{
+ FusedIterator,
+ Map,
+ },
+ marker::PhantomData,
+ mem,
+};
+
+use wyz::comu::{
+ Const,
+ Mut,
+};
+
+use super::{
+ BitSlice,
+ BitSliceIndex,
+};
+use crate::{
+ order::{
+ BitOrder,
+ Lsb0,
+ Msb0,
+ },
+ ptr::{
+ BitPtrRange,
+ BitRef,
+ },
+ store::BitStore,
+};
+
+/// [Original](https://doc.rust-lang.org/core/iter/trait.IntoIterator.html#impl-IntoIterator-1)
+#[cfg(not(tarpaulin_include))]
+impl<'a, T, O> IntoIterator for &'a BitSlice<T, O>
+where
+ T: 'a + BitStore,
+ O: BitOrder,
+{
+ type IntoIter = Iter<'a, T, O>;
+ type Item = <Self::IntoIter as Iterator>::Item;
+
+ #[inline]
+ fn into_iter(self) -> Self::IntoIter {
+ Iter::new(self)
+ }
+}
+
+/// [Original](https://doc.rust-lang.org/core/iter/trait.IntoIterator.html#impl-IntoIterator-3)
+#[cfg(not(tarpaulin_include))]
+impl<'a, T, O> IntoIterator for &'a mut BitSlice<T, O>
+where
+ T: 'a + BitStore,
+ O: BitOrder,
+{
+ type IntoIter = IterMut<'a, T, O>;
+ type Item = <Self::IntoIter as Iterator>::Item;
+
+ #[inline]
+ fn into_iter(self) -> Self::IntoIter {
+ IterMut::new(self)
+ }
+}
+
+#[repr(transparent)]
+#[doc = include_str!("../../doc/slice/iter/Iter.md")]
+pub struct Iter<'a, T, O>
+where
+ T: 'a + BitStore,
+ O: BitOrder,
+{
+ /// A dual-pointer range of the bit-slice undergoing iteration.
+ ///
+ /// This structure stores two fully-decode pointers to the first live and
+ /// first dead bits, trading increased size (three words instead of two) for
+ /// faster performance when iterating.
+ range: BitPtrRange<Const, T, O>,
+ /// `Iter` is semantically equivalent to a `&BitSlice`.
+ _ref: PhantomData<&'a BitSlice<T, O>>,
+}
+
+impl<'a, T, O> Iter<'a, T, O>
+where
+ T: 'a + BitStore,
+ O: BitOrder,
+{
+ #[allow(missing_docs, clippy::missing_docs_in_private_items)]
+ pub(super) fn new(slice: &'a BitSlice<T, O>) -> Self {
+ Self {
+ range: slice.as_bitptr_range(),
+ _ref: PhantomData,
+ }
+ }
+
+ /// Views the currently unyielded bit-slice.
+ ///
+ /// Because the iterator is a shared view, the returned bit-slice does not
+ /// cause a lifetime conflict, and the iterator can continue to be used
+ /// while it exists.
+ ///
+ /// ## Original
+ ///
+ /// [`Iter::as_slice`](core::slice::Iter::as_slice)
+ ///
+ /// ## Examples
+ ///
+ /// ```rust
+ /// use bitvec::prelude::*;
+ ///
+ /// let bits = bits![0, 0, 1, 1];
+ /// let mut iter = bits.iter();
+ ///
+ /// assert_eq!(iter.as_bitslice(), bits![0, 0, 1, 1]);
+ /// assert!(!*iter.nth(1).unwrap());
+ /// assert_eq!(iter.as_bitslice(), bits![1, 1]);
+ /// ```
+ #[inline]
+ #[allow(missing_docs, clippy::missing_docs_in_private_items)]
+ pub fn as_bitslice(&self) -> &'a BitSlice<T, O> {
+ unsafe { self.range.clone().into_bitspan().into_bitslice_ref() }
+ }
+
+ #[inline]
+ #[cfg(not(tarpaulin_include))]
+ #[deprecated = "use `.as_bitslice()` instead"]
+ #[allow(missing_docs, clippy::missing_docs_in_private_items)]
+ pub fn as_slice(&self) -> &'a BitSlice<T, O> {
+ self.as_bitslice()
+ }
+
+ /// Adapts the iterator to yield regular `&bool` references rather than the
+ /// [proxy reference][0].
+ ///
+ /// This allows the iterator to be used in APIs that expect ordinary
+ /// references. It reads from the proxy and provides an equivalent
+ /// `&'static bool`. The address value of the yielded reference is not
+ /// related to the addresses covered by the `BitSlice` buffer in any way.
+ ///
+ /// ## Examples
+ ///
+ /// ```rust
+ /// use bitvec::prelude::*;
+ ///
+ /// let bits = bits![0, 1];
+ /// let mut iter = bits.iter().by_refs();
+ /// assert_eq!(iter.next(), Some(&false));
+ /// assert_eq!(iter.next(), Some(&true));
+ /// assert!(iter.next().is_none());
+ /// ```
+ ///
+ /// [0]: crate::ptr::BitRef
+ #[inline]
+ pub fn by_refs(self) -> BitRefIter<'a, T, O> {
+ self.by_vals().map(|bit| match bit {
+ true => &true,
+ false => &false,
+ })
+ }
+
+ /// Adapts the iterator to yield `bool` values rather than the
+ /// [proxy reference][0].
+ ///
+ /// This allows the iterator to be used in APIs that expect direct values.
+ /// It dereferences the proxy and yields the referent `bool` directly. It
+ /// replaces `Iterator::copied`, which is not available on this type.
+ ///
+ /// ## Original
+ ///
+ /// [`Iterator::copied`](core::iter::Iterator::copied)
+ ///
+ /// ## Performance
+ ///
+ /// This bypasses the construction of a `BitRef` for each yielded bit. Do
+ /// not use `bits.as_bitptr_range().map(|bp| unsafe { bp.read() })` in a
+ /// misguided attempt to eke out some additional performance in your code.
+ ///
+ /// This iterator is already the fastest possible walk across a bit-slice.
+ /// You do not need to beat it.
+ ///
+ /// ## Examples
+ ///
+ /// ```rust
+ /// use bitvec::prelude::*;
+ ///
+ /// let bits = bits![0, 1];
+ /// let mut iter = bits.iter().by_vals();
+ /// assert_eq!(iter.next(), Some(false));
+ /// assert_eq!(iter.next(), Some(true));
+ /// assert!(iter.next().is_none());
+ /// ```
+ ///
+ /// [0]: crate::ptr::BitRef
+ #[inline]
+ pub fn by_vals(self) -> BitValIter<'a, T, O> {
+ BitValIter {
+ range: self.range,
+ _life: PhantomData,
+ }
+ }
+
+ /// Yields `bool` values directly, rather than [proxy references][0].
+ ///
+ /// The original slice iterator yields true `&bool`, and as such allows
+ /// [`Iterator::copied`] to exist. This iterator does not satisfy the bounds
+ /// for that method, so `.copied()` is provided as an inherent in order to
+ /// maintain source compatibility. Prefer [`.by_vals()`] instead, which
+ /// avoids the name collision while still making clear that it yields `bool`
+ /// values.
+ ///
+ /// [`Iterator::copied`]: core::iter::Iterator::copied
+ /// [`.by_vals()`]: Self::by_vals
+ /// [0]: crate::ptr::BitRef
+ #[inline]
+ #[cfg(not(tarpaulin_include))]
+ #[deprecated = "`Iterator::copied` does not exist on this type. Use \
+ `.by_vals()` instead"]
+ pub fn copied(self) -> BitValIter<'a, T, O> {
+ self.by_vals()
+ }
+}
+
+/// [Original](https://doc.rust-lang.org/core/slice/struct.Iter.html#impl-Clone)
+#[cfg(not(tarpaulin_include))]
+impl<T, O> Clone for Iter<'_, T, O>
+where
+ T: BitStore,
+ O: BitOrder,
+{
+ #[inline]
+ fn clone(&self) -> Self {
+ Self {
+ range: self.range.clone(),
+ ..*self
+ }
+ }
+}
+
+/// [Original](https://doc.rust-lang.org/core/slice/struct.Iter.html#impl-AsRef%3C%5BT%5D%3E)
+#[cfg(not(tarpaulin_include))]
+impl<T, O> AsRef<BitSlice<T, O>> for Iter<'_, T, O>
+where
+ T: BitStore,
+ O: BitOrder,
+{
+ #[inline]
+ fn as_ref(&self) -> &BitSlice<T, O> {
+ self.as_bitslice()
+ }
+}
+
+/// [Original](https://doc.rust-lang.org/core/slice/struct.Iter.html#impl-Debug)
+#[cfg(not(tarpaulin_include))]
+impl<T, O> Debug for Iter<'_, T, O>
+where
+ T: BitStore,
+ O: BitOrder,
+{
+ #[inline]
+ fn fmt(&self, fmt: &mut Formatter) -> fmt::Result {
+ fmt.debug_tuple("Iter").field(&self.as_bitslice()).finish()
+ }
+}
+
+#[repr(transparent)]
+#[doc = include_str!("../../doc/slice/iter/IterMut.md")]
+pub struct IterMut<'a, T, O>
+where
+ T: 'a + BitStore,
+ O: BitOrder,
+{
+ /// A dual-pointer range of the bit-slice undergoing iteration.
+ ///
+ /// This structure stores two fully-decode pointers to the first live and
+ /// first dead bits, trading increased size (three words instead of two) for
+ /// faster performance when iterating.
+ range: BitPtrRange<Mut, T::Alias, O>,
+ /// `IterMut` is semantically equivalent to an aliased `&mut BitSlice`.
+ _ref: PhantomData<&'a mut BitSlice<T::Alias, O>>,
+}
+
+impl<'a, T, O> IterMut<'a, T, O>
+where
+ T: 'a + BitStore,
+ O: BitOrder,
+{
+ #[allow(missing_docs, clippy::missing_docs_in_private_items)]
+ pub(super) fn new(slice: &'a mut BitSlice<T, O>) -> Self {
+ Self {
+ range: slice.alias_mut().as_mut_bitptr_range(),
+ _ref: PhantomData,
+ }
+ }
+
+ /// Views the underlying bit-slice as a subslice of the original data.
+ ///
+ /// This consumes the iterator in order to avoid creating aliasing
+ /// references between the returned subslice (which has the original
+ /// lifetime, and is not borrowed from the iterator) and the proxies the
+ /// iterator produces.
+ ///
+ /// ## Original
+ ///
+ /// [`IterMut::into_slice`](core::slice::IterMut::into_slice)
+ ///
+ /// ## Examples
+ ///
+ /// ```rust
+ /// use bitvec::prelude::*;
+ ///
+ /// let bits = bits![mut 0, 0, 1, 1];
+ /// let mut iter = bits.iter_mut();
+ ///
+ /// *iter.next().unwrap() = true;
+ /// assert_eq!(iter.into_bitslice(), bits![0, 1, 1]);
+ /// assert!(bits[0]);
+ /// ```
+ #[inline]
+ #[cfg(not(tarpaulin_include))]
+ pub fn into_bitslice(self) -> &'a mut BitSlice<T::Alias, O> {
+ unsafe { self.range.into_bitspan().into_bitslice_mut() }
+ }
+
+ #[inline]
+ #[cfg(not(tarpaulin_include))]
+ #[deprecated = "use `.into_bitslice()` instead"]
+ #[allow(missing_docs, clippy::missing_docs_in_private_items)]
+ pub fn into_slice(self) -> &'a mut BitSlice<T::Alias, O> {
+ self.into_bitslice()
+ }
+
+ /// Views the remaining bit-slice that has not yet been iterated.
+ ///
+ /// This borrows the iterator’s own lifetime, preventing it from being used
+ /// while the bit-slice view exists and thus ensuring that no aliasing
+ /// references are created. Bits that the iterator has already yielded are
+ /// not included in the produced bit-slice.
+ ///
+ /// ## Original
+ ///
+ /// [`IterMut::as_slice`](core::slice::IterMut::as_slice)
+ ///
+ /// ## Examples
+ ///
+ /// ```rust
+ /// use bitvec::prelude::*;
+ ///
+ /// let bits = bits![mut 0; 4];
+ /// let mut iter = bits.iter_mut();
+ ///
+ /// *iter.next().unwrap() = true;
+ /// assert_eq!(iter.as_bitslice(), bits![0; 3]);
+ /// *iter.next().unwrap() = true;
+ /// assert_eq!(iter.as_bitslice(), bits![0; 2]);
+ ///
+ /// assert_eq!(bits, bits![1, 1, 0, 0]);
+ /// ```
+ #[inline]
+ #[cfg(not(tarpaulin_include))]
+ pub fn as_bitslice(&self) -> &BitSlice<T::Alias, O> {
+ unsafe { self.range.clone().into_bitspan().into_bitslice_ref() }
+ }
+
+ #[inline]
+ #[cfg(not(tarpaulin_include))]
+ #[deprecated = "use `.as_bitslice()` instead"]
+ #[allow(missing_docs, clippy::missing_docs_in_private_items)]
+ pub fn as_slice(&self) -> &BitSlice<T::Alias, O> {
+ self.as_bitslice()
+ }
+}
+
+/// [Original](https://doc.rust-lang.org/core/slice/struct.IterMut.html#impl-AsRef%3C%5BT%5D%3E)
+#[cfg(not(tarpaulin_include))]
+impl<T, O> AsRef<BitSlice<T::Alias, O>> for IterMut<'_, T, O>
+where
+ T: BitStore,
+ O: BitOrder,
+{
+ #[inline]
+ fn as_ref(&self) -> &BitSlice<T::Alias, O> {
+ self.as_bitslice()
+ }
+}
+
+/// [Original](https://doc.rust-lang.org/core/slice/struct.IterMut.html#impl-Debug)
+#[cfg(not(tarpaulin_include))]
+impl<T, O> Debug for IterMut<'_, T, O>
+where
+ T: BitStore,
+ O: BitOrder,
+{
+ #[inline]
+ fn fmt(&self, fmt: &mut Formatter) -> fmt::Result {
+ fmt.debug_tuple("IterMut")
+ .field(&self.as_bitslice())
+ .finish()
+ }
+}
+
+/// `Iter` and `IterMut` have very nearly the same implementation text.
+macro_rules! iter {
+ ($($iter:ident => $item:ty);+ $(;)?) => { $(
+ /// [Original](https://doc.rust-lang.org/core/slice/struct.Iter.html#impl-Iterator) and
+ /// [Original](https://doc.rust-lang.org/core/slice/struct.IterMut.html#impl-Iterator)
+ impl<'a, T, O> Iterator for $iter<'a, T, O>
+ where
+ T: 'a + BitStore,
+ O: BitOrder,
+ {
+ type Item = $item;
+
+ #[inline]
+ fn next(&mut self) -> Option<Self::Item> {
+ self.range.next().map(|bp| unsafe { BitRef::from_bitptr(bp) })
+ }
+
+ #[inline]
+ fn nth(&mut self, n: usize) -> Option<Self::Item> {
+ self.range.nth(n).map(|bp| unsafe { BitRef::from_bitptr(bp) })
+ }
+
+ easy_iter!();
+ }
+
+ /// [Original](https://doc.rust-lang.org/core/slice/struct.Iter.html#impl-DoubleEndedIterator) and
+ /// [Original](https://doc.rust-lang.org/core/slice/struct.IterMut.html#impl-DoubleEndedIterator)
+ impl<'a, T, O> DoubleEndedIterator for $iter<'a, T, O>
+ where
+ T: 'a + BitStore,
+ O: BitOrder,
+ {
+ #[inline]
+ fn next_back(&mut self) -> Option<Self::Item> {
+ self.range
+ .next_back()
+ .map(|bp| unsafe { BitRef::from_bitptr(bp) })
+ }
+
+ #[inline]
+ fn nth_back(&mut self, n: usize) -> Option<Self::Item> {
+ self.range
+ .nth_back(n)
+ .map(|bp| unsafe { BitRef::from_bitptr(bp) })
+ }
+ }
+
+ /// [Original](https://doc.rust-lang.org/core/slice/struct.Iter.html#impl-ExactSizeIterator) and
+ /// [Original](https://doc.rust-lang.org/core/slice/struct.IterMut.html#impl-ExactSizeIterator)
+ impl<T, O> ExactSizeIterator for $iter<'_, T, O>
+ where
+ T: BitStore,
+ O: BitOrder,
+ {
+ #[inline]
+ fn len(&self) -> usize {
+ self.range.len()
+ }
+ }
+
+ /// [Original](https://doc.rust-lang.org/core/slice/struct.Iter.html#impl-FusedIterator) and
+ /// [Original](https://doc.rust-lang.org/core/slice/struct.IterMut.html#impl-FusedIterator)
+ impl<T, O> FusedIterator for $iter<'_, T, O>
+ where
+ T: BitStore,
+ O: BitOrder,
+ {
+ }
+
+ /// [Original](https://doc.rust-lang.org/core/slice/struct.Iter.html#impl-Send) and
+ /// [Original](https://doc.rust-lang.org/core/slice/struct.IterMut.html#impl-Send)
+ // #[allow(clippy::non_send_fields_in_send_ty)]
+ unsafe impl<'a, T, O> Send for $iter<'a, T, O>
+ where
+ T: BitStore,
+ O: BitOrder,
+ &'a mut BitSlice<T, O>: Send,
+ {
+ }
+
+ /// [Original](https://doc.rust-lang.org/core/slice/struct.Iter.html#impl-Sync) and
+ /// [Original](https://doc.rust-lang.org/core/slice/struct.IterMut.html#impl-Sync)
+ unsafe impl<T, O> Sync for $iter<'_, T, O>
+ where
+ T: BitStore,
+ O: BitOrder,
+ BitSlice<T, O>: Sync,
+ {
+ }
+ )+ };
+}
+
+iter! {
+ Iter => <usize as BitSliceIndex<'a, T, O>>::Immut;
+ IterMut => <usize as BitSliceIndex<'a, T::Alias, O>>::Mut;
+}
+
+/// Builds an iterator implementation for grouping iterators.
+macro_rules! group {
+ // The iterator and its yielded type.
+ ($iter:ident => $item:ty {
+ // The eponymous functions from the iterator traits.
+ $next:item
+ $nth:item
+ $next_back:item
+ $nth_back:item
+ $len:item
+ }) => {
+ impl<'a, T, O> Iterator for $iter<'a, T, O>
+ where
+ T: 'a + BitStore,
+ O: BitOrder,
+ {
+ type Item = $item;
+
+ #[inline]
+ $next
+
+ #[inline]
+ $nth
+
+ easy_iter!();
+ }
+
+ impl<T, O> DoubleEndedIterator for $iter<'_, T, O>
+ where
+ T: BitStore,
+ O: BitOrder,
+ {
+ #[inline]
+ $next_back
+
+ #[inline]
+ $nth_back
+ }
+
+ impl<T, O> ExactSizeIterator for $iter<'_, T, O>
+ where
+ T: BitStore,
+ O: BitOrder,
+ {
+ #[inline]
+ $len
+ }
+
+ impl<T, O> FusedIterator for $iter<'_, T, O>
+ where
+ T: BitStore,
+ O: BitOrder,
+ {
+ }
+ };
+}
+
+/// An iterator over `BitSlice` that yields `&bool` directly.
+pub type BitRefIter<'a, T, O> = Map<BitValIter<'a, T, O>, fn(bool) -> &'a bool>;
+
+/// An iterator over `BitSlice` that yields `bool` directly.
+pub struct BitValIter<'a, T, O>
+where
+ T: 'a + BitStore,
+ O: BitOrder,
+{
+ /// The start and end bit-pointers in the iteration region.
+ range: BitPtrRange<Const, T, O>,
+ /// Hold the lifetime of the source region, so that this does not cause UAF.
+ _life: PhantomData<&'a BitSlice<T, O>>,
+}
+
+group!(BitValIter => bool {
+ fn next(&mut self) -> Option<Self::Item> {
+ self.range.next().map(|bp| unsafe { bp.read() })
+ }
+
+ fn nth(&mut self, n: usize) -> Option<Self::Item> {
+ self.range.nth(n).map(|bp| unsafe { bp.read() })
+ }
+
+ fn next_back(&mut self) -> Option<Self::Item> {
+ self.range.next_back().map(|bp| unsafe { bp.read() })
+ }
+
+ fn nth_back(&mut self, n: usize) -> Option<Self::Item> {
+ self.range.nth_back(n).map(|bp| unsafe { bp.read() })
+ }
+
+ fn len(&self) -> usize {
+ self.range.len()
+ }
+});
+
+#[derive(Clone, Debug)]
+#[doc = include_str!("../../doc/slice/iter/Windows.md")]
+pub struct Windows<'a, T, O>
+where
+ T: 'a + BitStore,
+ O: BitOrder,
+{
+ /// The source bit-slice.
+ slice: &'a BitSlice<T, O>,
+ /// The width of the produced windows.
+ width: usize,
+}
+
+group!(Windows => &'a BitSlice<T, O> {
+ fn next(&mut self) -> Option<Self::Item> {
+ if self.width > self.slice.len() {
+ self.slice = Default::default();
+ return None;
+ }
+ unsafe {
+ let out = self.slice.get_unchecked(.. self.width);
+ self.slice = self.slice.get_unchecked(1 ..);
+ Some(out)
+ }
+ }
+
+ fn nth(&mut self, n: usize) -> Option<Self::Item> {
+ let (end, ovf) = self.width.overflowing_add(n);
+ if end > self.slice.len() || ovf {
+ self.slice = Default::default();
+ return None;
+ }
+ unsafe {
+ let out = self.slice.get_unchecked(n .. end);
+ self.slice = self.slice.get_unchecked(n + 1 ..);
+ Some(out)
+ }
+ }
+
+ fn next_back(&mut self) -> Option<Self::Item> {
+ let len = self.slice.len();
+ if self.width > len {
+ self.slice = Default::default();
+ return None;
+ }
+ unsafe {
+ let out = self.slice.get_unchecked(len - self.width ..);
+ self.slice = self.slice.get_unchecked(.. len - 1);
+ Some(out)
+ }
+ }
+
+ fn nth_back(&mut self, n: usize) -> Option<Self::Item> {
+ let (end, ovf) = self.slice.len().overflowing_sub(n);
+ if end < self.width || ovf {
+ self.slice = Default::default();
+ return None;
+ }
+ unsafe {
+ let out = self.slice.get_unchecked(end - self.width .. end);
+ self.slice = self.slice.get_unchecked(.. end - 1);
+ Some(out)
+ }
+ }
+
+ fn len(&self) -> usize {
+ let len = self.slice.len();
+ if self.width > len {
+ 0
+ }
+ else {
+ len - self.width + 1
+ }
+ }
+});
+
+#[derive(Clone, Debug)]
+#[doc = include_str!("../../doc/slice/iter/Chunks.md")]
+pub struct Chunks<'a, T, O>
+where
+ T: 'a + BitStore,
+ O: BitOrder,
+{
+ /// The source bit-slice.
+ slice: &'a BitSlice<T, O>,
+ /// The width of the produced chunks.
+ width: usize,
+}
+
+group!(Chunks => &'a BitSlice<T, O> {
+ fn next(&mut self) -> Option<Self::Item> {
+ let len = self.slice.len();
+ if len == 0 {
+ return None;
+ }
+ let mid = cmp::min(len, self.width);
+ let (out, rest) = unsafe { self.slice.split_at_unchecked(mid) };
+ self.slice = rest;
+ Some(out)
+ }
+
+ fn nth(&mut self, n: usize) -> Option<Self::Item> {
+ let len = self.slice.len();
+ let (start, ovf) = n.overflowing_mul(self.width);
+ if start >= len || ovf {
+ self.slice = Default::default();
+ return None;
+ }
+ let split = start.checked_add(self.width)
+ .map(|mid| cmp::min(mid, len))
+ .unwrap_or(len);
+ unsafe {
+ let (head, rest) = self.slice.split_at_unchecked(split);
+ self.slice = rest;
+ Some(head.get_unchecked(start ..))
+ }
+ }
+
+ fn next_back(&mut self) -> Option<Self::Item> {
+ match self.slice.len() {
+ 0 => None,
+ len => {
+ // Determine if the back chunk is a remnant or a whole chunk.
+ let rem = len % self.width;
+ let size = if rem == 0 { self.width } else { rem };
+ let (rest, out)
+ = unsafe { self.slice.split_at_unchecked(len - size) };
+ self.slice = rest;
+ Some(out)
+ },
+ }
+ }
+
+ fn nth_back(&mut self, n: usize) -> Option<Self::Item> {
+ let len = self.len();
+ if n >= len {
+ self.slice = Default::default();
+ return None;
+ }
+ let start = (len - 1 - n) * self.width;
+ let width = cmp::min(start + self.width, self.slice.len());
+ let (rest, out) = unsafe {
+ self.slice
+ .get_unchecked(.. start + width)
+ .split_at_unchecked(start)
+ };
+ self.slice = rest;
+ Some(out)
+ }
+
+ fn len(&self) -> usize {
+ match self.slice.len() {
+ 0 => 0,
+ len => {
+ let (n, r) = (len / self.width, len % self.width);
+ n + (r > 0) as usize
+ },
+ }
+ }
+});
+
+#[derive(Debug)]
+#[doc = include_str!("../../doc/slice/iter/ChunksMut.md")]
+pub struct ChunksMut<'a, T, O>
+where
+ T: 'a + BitStore,
+ O: BitOrder,
+{
+ /// The source bit-slice, marked with the alias tainting.
+ slice: &'a mut BitSlice<T::Alias, O>,
+ /// The width of the produced chunks.
+ width: usize,
+}
+
+group!(ChunksMut => &'a mut BitSlice<T::Alias, O> {
+ fn next(&mut self) -> Option<Self::Item> {
+ let slice = mem::take(&mut self.slice);
+ let len = slice.len();
+ if len == 0 {
+ return None;
+ }
+ let mid = cmp::min(len, self.width);
+ let (out, rest) = unsafe { slice.split_at_unchecked_mut_noalias(mid) };
+ self.slice = rest;
+ Some(out)
+ }
+
+ fn nth(&mut self, n: usize) -> Option<Self::Item> {
+ let slice = mem::take(&mut self.slice);
+ let len = slice.len();
+ let (start, ovf) = n.overflowing_mul(self.width);
+ if start >= len || ovf {
+ return None;
+ }
+ let (out, rest) = unsafe {
+ slice
+ .get_unchecked_mut(start ..)
+ .split_at_unchecked_mut_noalias(cmp::min(len - start, self.width))
+ };
+ self.slice = rest;
+ Some(out)
+ }
+
+ fn next_back(&mut self) -> Option<Self::Item> {
+ let slice = mem::take(&mut self.slice);
+ match slice.len() {
+ 0 => None,
+ len => {
+ let rem = len % self.width;
+ let size = if rem == 0 { self.width } else { rem };
+ let mid = len - size;
+ let (rest, out)
+ = unsafe { slice.split_at_unchecked_mut_noalias(mid) };
+ self.slice = rest;
+ Some(out)
+ },
+ }
+ }
+
+ fn nth_back(&mut self, n: usize) -> Option<Self::Item> {
+ let len = self.len();
+ let slice = mem::take(&mut self.slice);
+ if n >= len {
+ return None;
+ }
+ let start = (len - 1 - n) * self.width;
+ let width = cmp::min(start + self.width, slice.len());
+ let (rest, out) = unsafe {
+ slice
+ .get_unchecked_mut(.. start + width)
+ .split_at_unchecked_mut_noalias(start)
+ };
+ self.slice = rest;
+ Some(out)
+ }
+
+ fn len(&self) -> usize {
+ match self.slice.len() {
+ 0 => 0,
+ len => {
+ let (n, r) = (len / self.width, len % self.width);
+ n + (r > 0) as usize
+ },
+ }
+ }
+});
+
+#[derive(Clone, Debug)]
+#[doc = include_str!("../../doc/slice/iter/ChunksExact.md")]
+pub struct ChunksExact<'a, T, O>
+where
+ T: 'a + BitStore,
+ O: BitOrder,
+{
+ /// The source bit-slice.
+ slice: &'a BitSlice<T, O>,
+ /// Any remnant of the source bit-slice that will not be yielded as a chunk.
+ extra: &'a BitSlice<T, O>,
+ /// The width of the produced chunks.
+ width: usize,
+}
+
+impl<'a, T, O> ChunksExact<'a, T, O>
+where
+ T: 'a + BitStore,
+ O: BitOrder,
+{
+ #[allow(missing_docs, clippy::missing_docs_in_private_items)]
+ pub(super) fn new(slice: &'a BitSlice<T, O>, width: usize) -> Self {
+ assert_ne!(width, 0, "Chunk width cannot be 0");
+ let len = slice.len();
+ let rem = len % width;
+ let (slice, extra) = unsafe { slice.split_at_unchecked(len - rem) };
+ Self {
+ slice,
+ extra,
+ width,
+ }
+ }
+
+ /// Gets the remnant bit-slice that the iterator will not yield.
+ ///
+ /// ## Original
+ ///
+ /// [`ChunksExact::remainder`](core::slice::ChunksExact::remainder)
+ #[inline]
+ #[cfg(not(tarpaulin_include))]
+ pub fn remainder(&self) -> &'a BitSlice<T, O> {
+ self.extra
+ }
+}
+
+group!(ChunksExact => &'a BitSlice<T, O> {
+ fn next(&mut self) -> Option<Self::Item> {
+ if self.slice.len() < self.width {
+ return None;
+ }
+ let (out, rest) = unsafe { self.slice.split_at_unchecked(self.width) };
+ self.slice = rest;
+ Some(out)
+ }
+
+ fn nth(&mut self, n: usize) -> Option<Self::Item> {
+ let (start, ovf) = n.overflowing_mul(self.width);
+ if start >= self.slice.len() || ovf {
+ self.slice = Default::default();
+ return None;
+ }
+ let (out, rest) = unsafe {
+ self.slice
+ .get_unchecked(start ..)
+ .split_at_unchecked(self.width)
+ };
+ self.slice = rest;
+ Some(out)
+ }
+
+ fn next_back(&mut self) -> Option<Self::Item> {
+ let len = self.slice.len();
+ if len < self.width {
+ return None;
+ }
+ let (rest, out) =
+ unsafe { self.slice.split_at_unchecked(len - self.width) };
+ self.slice = rest;
+ Some(out)
+ }
+
+ fn nth_back(&mut self, n: usize) -> Option<Self::Item> {
+ let len = self.len();
+ if n >= len {
+ self.slice = Default::default();
+ return None;
+ }
+ let end = (len - n) * self.width;
+ let (rest, out) = unsafe {
+ self.slice
+ .get_unchecked(.. end)
+ .split_at_unchecked(end - self.width)
+ };
+ self.slice = rest;
+ Some(out)
+ }
+
+ fn len(&self) -> usize {
+ self.slice.len() / self.width
+ }
+});
+
+#[derive(Debug)]
+#[doc = include_str!("../../doc/slice/iter/ChunksExactMut.md")]
+pub struct ChunksExactMut<'a, T, O>
+where
+ T: 'a + BitStore,
+ O: BitOrder,
+{
+ /// The source bit-slice, marked with the alias tainting.
+ slice: &'a mut BitSlice<T::Alias, O>,
+ /// Any remnant of the source bit-slice that will not be yielded as a chunk.
+ extra: &'a mut BitSlice<T::Alias, O>,
+ /// The width of the produced chunks.
+ width: usize,
+}
+
+impl<'a, T, O> ChunksExactMut<'a, T, O>
+where
+ T: 'a + BitStore,
+ O: BitOrder,
+{
+ #[allow(missing_docs, clippy::missing_docs_in_private_items)]
+ pub(super) fn new(slice: &'a mut BitSlice<T, O>, width: usize) -> Self {
+ assert_ne!(width, 0, "Chunk width cannot be 0");
+ let len = slice.len();
+ let rem = len % width;
+ let (slice, extra) = unsafe { slice.split_at_unchecked_mut(len - rem) };
+ Self {
+ slice,
+ extra,
+ width,
+ }
+ }
+
+ /// Consumes the iterator, returning the remnant bit-slice that it will not
+ /// yield.
+ ///
+ /// ## Original
+ ///
+ /// [`ChunksExactMut::into_remainder`][0]
+ ///
+ /// [0]: core::slice::ChunksExactMut::into_remainder
+ #[inline]
+ #[cfg(not(tarpaulin_include))]
+ pub fn into_remainder(self) -> &'a mut BitSlice<T::Alias, O> {
+ self.extra
+ }
+
+ /// Takes the remnant bit-slice out of the iterator.
+ ///
+ /// The first time this is called, it will produce the remnant; on each
+ /// subsequent call, it will produce an empty bit-slice.
+ ///
+ /// ## Examples
+ ///
+ /// ```rust
+ /// use bitvec::prelude::*;
+ ///
+ /// let bits = bits![mut 0; 5];
+ /// let mut chunks = bits.chunks_exact_mut(3);
+ ///
+ /// assert_eq!(chunks.take_remainder(), bits![0; 2]);
+ /// assert!(chunks.take_remainder().is_empty());
+ /// ```
+ #[inline]
+ pub fn take_remainder(&mut self) -> &'a mut BitSlice<T::Alias, O> {
+ mem::take(&mut self.extra)
+ }
+}
+
+group!(ChunksExactMut => &'a mut BitSlice<T::Alias, O> {
+ fn next(&mut self) -> Option<Self::Item> {
+ let slice = mem::take(&mut self.slice);
+ if slice.len() < self.width {
+ return None;
+ }
+ let (out, rest) =
+ unsafe { slice.split_at_unchecked_mut_noalias(self.width) };
+ self.slice = rest;
+ Some(out)
+ }
+
+ fn nth(&mut self, n: usize) -> Option<Self::Item> {
+ let slice = mem::take(&mut self.slice);
+ let (start, ovf) = n.overflowing_mul(self.width);
+ if start + self.width >= slice.len() || ovf {
+ return None;
+ }
+ let (out, rest) = unsafe {
+ slice.get_unchecked_mut(start ..)
+ .split_at_unchecked_mut_noalias(self.width)
+ };
+ self.slice = rest;
+ Some(out)
+ }
+
+ fn next_back(&mut self) -> Option<Self::Item> {
+ let slice = mem::take(&mut self.slice);
+ let len = slice.len();
+ if len < self.width {
+ return None;
+ }
+ let (rest, out) =
+ unsafe { slice.split_at_unchecked_mut_noalias(len - self.width) };
+ self.slice = rest;
+ Some(out)
+ }
+
+ fn nth_back(&mut self, n: usize) -> Option<Self::Item> {
+ let len = self.len();
+ let slice = mem::take(&mut self.slice);
+ if n >= len {
+ return None;
+ }
+ let end = (len - n) * self.width;
+ let (rest, out) = unsafe {
+ slice.get_unchecked_mut(.. end)
+ .split_at_unchecked_mut_noalias(end - self.width)
+ };
+ self.slice = rest;
+ Some(out)
+ }
+
+ fn len(&self) -> usize {
+ self.slice.len() / self.width
+ }
+});
+
+#[derive(Clone, Debug)]
+#[doc = include_str!("../../doc/slice/iter/RChunks.md")]
+pub struct RChunks<'a, T, O>
+where
+ T: 'a + BitStore,
+ O: BitOrder,
+{
+ /// The source bit-slice.
+ slice: &'a BitSlice<T, O>,
+ /// The width of the produced chunks.
+ width: usize,
+}
+
+group!(RChunks => &'a BitSlice<T, O> {
+ fn next(&mut self) -> Option<Self::Item> {
+ let len = self.slice.len();
+ if len == 0 {
+ return None;
+ }
+ let mid = len - cmp::min(len, self.width);
+ let (rest, out) = unsafe { self.slice.split_at_unchecked(mid) };
+ self.slice = rest;
+ Some(out)
+ }
+
+ fn nth(&mut self, n: usize) -> Option<Self::Item> {
+ let len = self.slice.len();
+ let (num, ovf) = n.overflowing_mul(self.width);
+ if num >= len || ovf {
+ self.slice = Default::default();
+ return None;
+ }
+ let end = len - num;
+ let mid = end.saturating_sub(self.width);
+ let (rest, out) = unsafe {
+ self.slice
+ .get_unchecked(.. end)
+ .split_at_unchecked(mid)
+ };
+ self.slice = rest;
+ Some(out)
+ }
+
+ fn next_back(&mut self) -> Option<Self::Item> {
+ match self.slice.len() {
+ 0 => None,
+ n => {
+ let rem = n % self.width;
+ let len = if rem == 0 { self.width } else { rem };
+ let (out, rest) = unsafe { self.slice.split_at_unchecked(len) };
+ self.slice = rest;
+ Some(out)
+ },
+ }
+ }
+
+ fn nth_back(&mut self, n: usize) -> Option<Self::Item> {
+ let len = self.len();
+ if n >= len {
+ self.slice = Default::default();
+ return None;
+ }
+ /* Taking from the back of a reverse iterator means taking from the
+ front of the slice.
+
+ `len` gives us the total number of subslices remaining. In order to find
+ the partition point, we need to subtract `n - 1` full subslices from
+ that count (because the back slice of the iteration might not be full),
+ compute their bit width, and offset *that* from the end of the memory
+ region. This gives us the zero-based index of the partition point
+ between what is returned and what is retained.
+
+ The `part ..` section of the slice is retained, and the very end of the
+ `.. part` section is returned. The head section is split at no less than
+ `self.width` bits below the end marker (this could be the partial
+ section, so a wrapping subtraction cannot be used), and `.. start` is
+ discarded.
+
+ Source:
+ https://doc.rust-lang.org/1.43.0/src/core/slice/mod.rs.html#5141-5156
+ */
+ let from_end = (len - 1 - n) * self.width;
+ let end = self.slice.len() - from_end;
+ let start = end.saturating_sub(self.width);
+ let (out, rest) = unsafe { self.slice.split_at_unchecked(end) };
+ self.slice = rest;
+ Some(unsafe { out.get_unchecked(start ..) })
+ }
+
+ fn len(&self) -> usize {
+ match self.slice.len() {
+ 0 => 0,
+ len => {
+ let (n, r) = (len / self.width, len % self.width);
+ n + (r > 0) as usize
+ },
+ }
+ }
+});
+
+#[derive(Debug)]
+#[doc = include_str!("../../doc/slice/iter/RChunksMut.md")]
+pub struct RChunksMut<'a, T, O>
+where
+ T: 'a + BitStore,
+ O: BitOrder,
+{
+ /// The source bit-slice, marked with the alias tainting.
+ slice: &'a mut BitSlice<T::Alias, O>,
+ /// The width of the produced chunks.
+ width: usize,
+}
+
+group!(RChunksMut => &'a mut BitSlice<T::Alias, O> {
+ fn next(&mut self) -> Option<Self::Item> {
+ let slice = mem::take(&mut self.slice);
+ let len = slice.len();
+ if len == 0 {
+ return None;
+ }
+ let mid = len - cmp::min(len, self.width);
+ let (rest, out) = unsafe { slice.split_at_unchecked_mut_noalias(mid) };
+ self.slice = rest;
+ Some(out)
+ }
+
+ fn nth(&mut self, n: usize) -> Option<Self::Item> {
+ let slice = mem::take(&mut self.slice);
+ let len = slice.len();
+ let (num, ovf) = n.overflowing_mul(self.width);
+ if num >= len || ovf {
+ return None;
+ }
+ let end = len - num;
+ let mid = end.saturating_sub(self.width);
+ let (rest, out) = unsafe {
+ slice.get_unchecked_mut(.. end)
+ .split_at_unchecked_mut_noalias(mid)
+ };
+ self.slice = rest;
+ Some(out)
+ }
+
+ fn next_back(&mut self) -> Option<Self::Item> {
+ let slice = mem::take(&mut self.slice);
+ match slice.len() {
+ 0 => None,
+ n => {
+ let rem = n % self.width;
+ let len = if rem == 0 { self.width } else { rem };
+ let (out, rest) =
+ unsafe { slice.split_at_unchecked_mut_noalias(len) };
+ self.slice = rest;
+ Some(out)
+ },
+ }
+ }
+
+ fn nth_back(&mut self, n: usize) -> Option<Self::Item> {
+ let len = self.len();
+ let slice = mem::take(&mut self.slice);
+ if n >= len {
+ return None;
+ }
+ let from_end = (len - 1 - n) * self.width;
+ let end = slice.len() - from_end;
+ let start = end.saturating_sub(self.width);
+ let (out, rest) = unsafe { slice.split_at_unchecked_mut_noalias(end) };
+ self.slice = rest;
+ Some(unsafe { out.get_unchecked_mut(start ..) })
+ }
+
+ fn len(&self) -> usize {
+ match self.slice.len() {
+ 0 => 0,
+ len => {
+ let (n, r) = (len / self.width, len % self.width);
+ n + (r > 0) as usize
+ },
+ }
+ }
+});
+
+#[derive(Clone, Debug)]
+#[doc = include_str!("../../doc/slice/iter/RChunksExact.md")]
+pub struct RChunksExact<'a, T, O>
+where
+ T: 'a + BitStore,
+ O: BitOrder,
+{
+ /// The source bit-slice.
+ slice: &'a BitSlice<T, O>,
+ /// Any remnant of the source bit-slice that will not be yielded as a chunk.
+ extra: &'a BitSlice<T, O>,
+ /// The width of the produced chunks.
+ width: usize,
+}
+
+impl<'a, T, O> RChunksExact<'a, T, O>
+where
+ T: 'a + BitStore,
+ O: BitOrder,
+{
+ #[allow(missing_docs, clippy::missing_docs_in_private_items)]
+ pub(super) fn new(slice: &'a BitSlice<T, O>, width: usize) -> Self {
+ assert_ne!(width, 0, "Chunk width cannot be 0");
+ let (extra, slice) =
+ unsafe { slice.split_at_unchecked(slice.len() % width) };
+ Self {
+ slice,
+ extra,
+ width,
+ }
+ }
+
+ /// Gets the remnant bit-slice that the iterator will not yield.
+ ///
+ /// ## Original
+ ///
+ /// [`RChunksExact::remainder`](core::slice::RChunksExact::remainder)
+ #[inline]
+ #[cfg(not(tarpaulin_include))]
+ pub fn remainder(&self) -> &'a BitSlice<T, O> {
+ self.extra
+ }
+}
+
+group!(RChunksExact => &'a BitSlice<T, O> {
+ fn next(&mut self) -> Option<Self::Item> {
+ let len = self.slice.len();
+ if len < self.width {
+ return None;
+ }
+ let (rest, out) =
+ unsafe { self.slice.split_at_unchecked(len - self.width) };
+ self.slice = rest;
+ Some(out)
+ }
+
+ fn nth(&mut self, n: usize) -> Option<Self::Item> {
+ let len = self.slice.len();
+ let (split, ovf) = n.overflowing_mul(self.width);
+ if split >= len || ovf {
+ self.slice = Default::default();
+ return None;
+ }
+ let end = len - split;
+ let (rest, out) = unsafe {
+ self.slice
+ .get_unchecked(.. end)
+ .split_at_unchecked(end - self.width)
+ };
+ self.slice = rest;
+ Some(out)
+ }
+
+ fn next_back(&mut self) -> Option<Self::Item> {
+ if self.slice.len() < self.width {
+ return None;
+ }
+ let (out, rest) = unsafe { self.slice.split_at_unchecked(self.width) };
+ self.slice = rest;
+ Some(out)
+ }
+
+ fn nth_back(&mut self, n: usize) -> Option<Self::Item> {
+ let len = self.slice.len();
+ let (start, ovf) = n.overflowing_mul(self.width);
+ if start >= len || ovf {
+ self.slice = Default::default();
+ return None;
+ }
+ // At this point, `start` is at least `self.width` less than `len`.
+ let (out, rest) = unsafe {
+ self.slice.get_unchecked(start ..).split_at_unchecked(self.width)
+ };
+ self.slice = rest;
+ Some(out)
+ }
+
+ fn len(&self) -> usize {
+ self.slice.len() / self.width
+ }
+});
+
+#[derive(Debug)]
+#[doc = include_str!("../../doc/slice/iter/RChunksExactMut.md")]
+pub struct RChunksExactMut<'a, T, O>
+where
+ T: 'a + BitStore,
+ O: BitOrder,
+{
+ /// The source bit-slice, marked with the alias tainting.
+ slice: &'a mut BitSlice<T::Alias, O>,
+ /// Any remnant of the source bit-slice that will not be yielded as a chunk.
+ extra: &'a mut BitSlice<T::Alias, O>,
+ /// The width of the produced chunks.
+ width: usize,
+}
+
+impl<'a, T, O> RChunksExactMut<'a, T, O>
+where
+ T: 'a + BitStore,
+ O: BitOrder,
+{
+ #[allow(missing_docs, clippy::missing_docs_in_private_items)]
+ pub(super) fn new(slice: &'a mut BitSlice<T, O>, width: usize) -> Self {
+ assert_ne!(width, 0, "Chunk width cannot be 0");
+ let (extra, slice) =
+ unsafe { slice.split_at_unchecked_mut(slice.len() % width) };
+ Self {
+ slice,
+ extra,
+ width,
+ }
+ }
+
+ /// Consumes the iterator, returning the remnant bit-slice that it will not
+ /// yield.
+ ///
+ /// ## Original
+ ///
+ /// [`RChunksExactMut::into_remainder`][0]
+ ///
+ /// [0]: core::slice::RChunksExactMut::into_remainder
+ #[inline]
+ #[cfg(not(tarpaulin_include))]
+ pub fn into_remainder(self) -> &'a mut BitSlice<T::Alias, O> {
+ self.extra
+ }
+
+ /// Takes the remnant bit-slice out of the iterator.
+ ///
+ /// The first time this is called, it will produce the remnant; on each
+ /// subsequent call, it will produce an empty bit-slice.
+ ///
+ /// ## Examples
+ ///
+ /// ```rust
+ /// use bitvec::prelude::*;
+ ///
+ /// let bits = bits![mut 0; 5];
+ /// let mut chunks = bits.rchunks_exact_mut(3);
+ ///
+ /// assert_eq!(chunks.take_remainder(), bits![0; 2]);
+ /// assert!(chunks.take_remainder().is_empty());
+ /// ```
+ #[inline]
+ pub fn take_remainder(&mut self) -> &'a mut BitSlice<T::Alias, O> {
+ mem::take(&mut self.extra)
+ }
+}
+
+group!(RChunksExactMut => &'a mut BitSlice<T::Alias, O> {
+ fn next(&mut self) -> Option<Self::Item> {
+ let slice = mem::take(&mut self.slice);
+ let len = slice.len();
+ if len < self.width {
+ return None;
+ }
+ let (rest, out) =
+ unsafe { slice.split_at_unchecked_mut_noalias(len - self.width) };
+ self.slice = rest;
+ Some(out)
+ }
+
+ fn nth(&mut self, n: usize) -> Option<Self::Item> {
+ let slice = mem::take(&mut self.slice);
+ let len = slice.len();
+ let (split, ovf) = n.overflowing_mul(self.width);
+ if split >= len || ovf {
+ return None;
+ }
+ let end = len - split;
+ let (rest, out) = unsafe {
+ slice.get_unchecked_mut(.. end)
+ .split_at_unchecked_mut_noalias(end - self.width)
+ };
+ self.slice = rest;
+ Some(out)
+ }
+
+ fn next_back(&mut self) -> Option<Self::Item> {
+ let slice = mem::take(&mut self.slice);
+ if slice.len() < self.width {
+ return None;
+ }
+ let (out, rest) =
+ unsafe { slice.split_at_unchecked_mut_noalias(self.width) };
+ self.slice = rest;
+ Some(out)
+ }
+
+ fn nth_back(&mut self, n: usize) -> Option<Self::Item> {
+ let slice = mem::take(&mut self.slice);
+ let len = slice.len();
+ let (start, ovf) = n.overflowing_mul(self.width);
+ if start >= len || ovf {
+ return None;
+ }
+ // At this point, `start` is at least `self.width` less than `len`.
+ let (out, rest) = unsafe {
+ slice.get_unchecked_mut(start ..)
+ .split_at_unchecked_mut_noalias(self.width)
+ };
+ self.slice = rest;
+ Some(out)
+ }
+
+ fn len(&self) -> usize {
+ self.slice.len() / self.width
+ }
+});
+
+/// Creates the `new` function for the easy grouping iterators.
+macro_rules! new_group {
+ ($($t:ident $($m:ident)? $(.$a:ident())?),+ $(,)?) => { $(
+ impl<'a, T, O> $t<'a, T, O>
+ where
+ T: 'a + BitStore,
+ O: BitOrder,
+ {
+ #[inline]
+ #[allow(missing_docs, clippy::missing_docs_in_private_items)]
+ pub(super) fn new(
+ slice: &'a $($m)? BitSlice<T, O>,
+ width: usize,
+ ) -> Self {
+ assert_ne!(width, 0, "view width cannot be 0");
+ let slice = slice$(.$a())?;
+ Self { slice, width }
+ }
+ }
+ )+ };
+}
+
+new_group! {
+ Windows,
+ Chunks,
+ ChunksMut mut .alias_mut(),
+ RChunks,
+ RChunksMut mut .alias_mut(),
+}
+
+/// Creates splitting iterators.
+macro_rules! split {
+ (
+ $iter:ident =>
+ $item:ty
+ $(where $alias:ident)? { $next:item $next_back:item }
+ ) => {
+ impl<'a, T, O, P> $iter<'a, T, O, P>
+ where
+ T: 'a + BitStore,
+ O: BitOrder,
+ P: FnMut(usize, &bool) -> bool,
+ {
+ pub(super) fn new(slice: $item, pred: P) -> Self {
+ Self {
+ slice,
+ pred,
+ done: false,
+ }
+ }
+ }
+
+ impl<T, O, P> Debug for $iter<'_, T, O, P>
+ where
+ T: BitStore,
+ O: BitOrder,
+ P: FnMut(usize, &bool) -> bool,
+ {
+ #[inline]
+ fn fmt(&self, fmt: &mut Formatter) -> fmt::Result {
+ fmt.debug_struct(stringify!($iter))
+ .field("slice", &self.slice)
+ .field("done", &self.done)
+ .finish()
+ }
+ }
+
+ impl<'a, T, O, P> Iterator for $iter<'a, T, O, P>
+ where
+ T: 'a + BitStore,
+ O: BitOrder,
+ P: FnMut(usize, &bool) -> bool,
+ {
+ type Item = $item;
+
+ #[inline]
+ $next
+
+ #[inline]
+ fn size_hint(&self) -> (usize, Option<usize>) {
+ if self.done {
+ (0, Some(0))
+ }
+ else {
+ (1, Some(self.slice.len() + 1))
+ }
+ }
+ }
+
+ impl<'a, T, O, P> DoubleEndedIterator for $iter<'a, T, O, P>
+ where
+ T: 'a + BitStore,
+ O: BitOrder,
+ P: FnMut(usize, &bool) -> bool,
+ {
+ #[inline]
+ $next_back
+ }
+
+ impl<'a, T, O, P> FusedIterator for $iter<'a, T, O, P>
+ where
+ T: 'a + BitStore,
+ O: BitOrder,
+ P: FnMut(usize, &bool) -> bool,
+ {
+ }
+
+ impl<'a, T, O, P> SplitIter for $iter<'a, T, O, P>
+ where
+ T: 'a + BitStore,
+ O: BitOrder,
+ P: FnMut(usize, &bool) -> bool,
+ {
+ #[inline]
+ fn finish(&mut self) -> Option<Self::Item> {
+ if self.done {
+ None
+ }
+ else {
+ self.done = true;
+ Some(mem::take(&mut self.slice))
+ }
+ }
+ }
+ };
+}
+
+#[derive(Clone)]
+#[doc = include_str!("../../doc/slice/iter/Split.md")]
+pub struct Split<'a, T, O, P>
+where
+ T: 'a + BitStore,
+ O: BitOrder,
+ P: FnMut(usize, &bool) -> bool,
+{
+ /// The [`BitSlice`] being split.
+ ///
+ /// [`BitSlice`]: crate::slice::BitSlice
+ slice: &'a BitSlice<T, O>,
+ /// The function used to test whether a split should occur.
+ pred: P,
+ /// Whether the split is finished.
+ done: bool,
+}
+
+split!(Split => &'a BitSlice<T, O> {
+ fn next(&mut self) -> Option<Self::Item> {
+ if self.done {
+ return None;
+ }
+ match self.slice
+ .iter()
+ .by_refs()
+ .enumerate()
+ .position(|(idx, bit)| (self.pred)(idx, bit))
+ {
+ None => self.finish(),
+ Some(idx) => unsafe {
+ let out = self.slice.get_unchecked(.. idx);
+ self.slice = self.slice.get_unchecked(idx + 1 ..);
+ Some(out)
+ },
+ }
+ }
+
+ fn next_back(&mut self) -> Option<Self::Item> {
+ if self.done {
+ return None;
+ }
+ match self.slice
+ .iter()
+ .by_refs()
+ .enumerate()
+ .rposition(|(idx, bit)| (self.pred)(idx, bit))
+ {
+ None => self.finish(),
+ Some(idx) => unsafe {
+ let out = self.slice.get_unchecked(idx + 1 ..);
+ self.slice = self.slice.get_unchecked(.. idx);
+ Some(out)
+ },
+ }
+ }
+});
+
+#[doc = include_str!("../../doc/slice/iter/SplitMut.md")]
+pub struct SplitMut<'a, T, O, P>
+where
+ T: 'a + BitStore,
+ O: BitOrder,
+ P: FnMut(usize, &bool) -> bool,
+{
+ /// The source bit-slice, marked with the alias tainting.
+ slice: &'a mut BitSlice<T::Alias, O>,
+ /// The function that tests each bit for whether it is a split point.
+ pred: P,
+ /// Marks whether iteration has concluded, without emptying the `slice`.
+ done: bool,
+}
+
+split!(SplitMut => &'a mut BitSlice<T::Alias, O> {
+ fn next(&mut self) -> Option<Self::Item> {
+ if self.done {
+ return None;
+ }
+ let idx_opt = {
+ let pred = &mut self.pred;
+ self.slice
+ .iter()
+ .by_refs()
+ .enumerate()
+ .position(|(idx, bit)| (pred)(idx, bit))
+ };
+ match idx_opt
+ {
+ None => self.finish(),
+ Some(idx) => unsafe {
+ let slice = mem::take(&mut self.slice);
+ let (out, rest) = slice.split_at_unchecked_mut_noalias(idx);
+ self.slice = rest.get_unchecked_mut(1 ..);
+ Some(out)
+ },
+ }
+ }
+
+ fn next_back(&mut self) -> Option<Self::Item> {
+ if self.done {
+ return None;
+ }
+ let idx_opt = {
+ let pred = &mut self.pred;
+ self.slice
+ .iter()
+ .by_refs()
+ .enumerate()
+ .rposition(|(idx, bit)| (pred)(idx, bit))
+ };
+ match idx_opt
+ {
+ None => self.finish(),
+ Some(idx) => unsafe {
+ let slice = mem::take(&mut self.slice);
+ let (rest, out) = slice.split_at_unchecked_mut_noalias(idx);
+ self.slice = rest;
+ Some(out.get_unchecked_mut(1 ..))
+ },
+ }
+ }
+});
+
+#[derive(Clone)]
+#[doc = include_str!("../../doc/slice/iter/SplitInclusive.md")]
+pub struct SplitInclusive<'a, T, O, P>
+where
+ T: 'a + BitStore,
+ O: BitOrder,
+ P: FnMut(usize, &bool) -> bool,
+{
+ /// The source bit-slice.
+ slice: &'a BitSlice<T, O>,
+ /// The function that tests each bit for whether it is a split point.
+ pred: P,
+ /// Marks whether iteration has concluded, without emptying the `slice`.
+ done: bool,
+}
+
+split!(SplitInclusive => &'a BitSlice<T, O> {
+ fn next(&mut self) -> Option<Self::Item> {
+ if self.done {
+ return None;
+ }
+ let len = self.slice.len();
+ let idx = self.slice.iter()
+ .by_refs()
+ .enumerate()
+ .position(|(idx, bit)| (self.pred)(idx, bit))
+ .map(|idx| idx + 1)
+ .unwrap_or(len);
+ if idx == len {
+ self.done = true;
+ }
+ let (out, rest) = unsafe { self.slice.split_at_unchecked(idx) };
+ self.slice = rest;
+ Some(out)
+ }
+
+ fn next_back(&mut self) -> Option<Self::Item> {
+ if self.done {
+ return None;
+ }
+
+ let idx = if self.slice.is_empty() {
+ 0
+ }
+ else {
+ unsafe { self.slice.get_unchecked(.. self.slice.len() - 1) }
+ .iter()
+ .by_refs()
+ .enumerate()
+ .rposition(|(idx, bit)| (self.pred)(idx, bit))
+ .map(|idx| idx + 1)
+ .unwrap_or(0)
+ };
+ if idx == 0 {
+ self.done = true;
+ }
+ let (rest, out) = unsafe { self.slice.split_at_unchecked(idx) };
+ self.slice = rest;
+ Some(out)
+ }
+});
+
+#[doc = include_str!("../../doc/slice/iter/SplitInclusiveMut.md")]
+pub struct SplitInclusiveMut<'a, T, O, P>
+where
+ T: 'a + BitStore,
+ O: BitOrder,
+ P: FnMut(usize, &bool) -> bool,
+{
+ /// The source bit-slice, marked with the alias tainting.
+ slice: &'a mut BitSlice<T::Alias, O>,
+ /// The function that tests each bit for whether it is a split point.
+ pred: P,
+ /// Marks whether iteration has concluded, without emptying the `slice`.
+ done: bool,
+}
+
+split!(SplitInclusiveMut => &'a mut BitSlice<T::Alias, O> {
+ fn next(&mut self) -> Option<Self::Item> {
+ if self.done {
+ return None;
+ }
+ let pred = &mut self.pred;
+ let len = self.slice.len();
+ let idx = self.slice.iter()
+ .by_refs()
+ .enumerate()
+ .position(|(idx, bit)| (pred)(idx, bit))
+ .map(|idx| idx + 1)
+ .unwrap_or(len);
+ if idx == len {
+ self.done = true;
+ }
+ let (out, rest) = unsafe {
+ mem::take(&mut self.slice)
+ .split_at_unchecked_mut_noalias(idx)
+ };
+ self.slice = rest;
+ Some(out)
+ }
+
+ fn next_back(&mut self) -> Option<Self::Item> {
+ if self.done {
+ return None;
+ }
+ let pred = &mut self.pred;
+ let idx = if self.slice.is_empty() {
+ 0
+ }
+ else {
+ unsafe { self.slice.get_unchecked(.. self.slice.len() - 1) }
+ .iter()
+ .by_refs()
+ .enumerate()
+ .rposition(|(idx, bit)| (pred)(idx, bit))
+ .map(|idx| idx + 1)
+ .unwrap_or(0)
+ };
+ if idx == 0 {
+ self.done = true;
+ }
+ let (rest, out) = unsafe {
+ mem::take(&mut self.slice)
+ .split_at_unchecked_mut_noalias(idx)
+ };
+ self.slice = rest;
+ Some(out)
+ }
+});
+
+#[derive(Clone)]
+#[doc = include_str!("../../doc/slice/iter/RSplit.md")]
+pub struct RSplit<'a, T, O, P>
+where
+ T: 'a + BitStore,
+ O: BitOrder,
+ P: FnMut(usize, &bool) -> bool,
+{
+ /// The source bit-slice.
+ slice: &'a BitSlice<T, O>,
+ /// The function that tests each bit for whether it is a split point.
+ pred: P,
+ /// Marks whether iteration has concluded, without emptying the `slice`.
+ done: bool,
+}
+
+split!(RSplit => &'a BitSlice<T, O> {
+ fn next(&mut self) -> Option<Self::Item> {
+ let mut split = Split::<'a, T, O, &mut P> {
+ slice: mem::take(&mut self.slice),
+ pred: &mut self.pred,
+ done: self.done,
+ };
+ let out = split.next_back();
+ let Split { slice, done, .. } = split;
+ self.slice = slice;
+ self.done = done;
+ out
+ }
+
+ fn next_back(&mut self) -> Option<Self::Item> {
+ let mut split = Split::<'a, T, O, &mut P> {
+ slice: mem::take(&mut self.slice),
+ pred: &mut self.pred,
+ done: self.done,
+ };
+ let out = split.next();
+ let Split { slice, done, .. } = split;
+ self.slice = slice;
+ self.done = done;
+ out
+ }
+});
+
+#[doc = include_str!("../../doc/slice/iter/RSplitMut.md")]
+pub struct RSplitMut<'a, T, O, P>
+where
+ T: 'a + BitStore,
+ O: BitOrder,
+ P: FnMut(usize, &bool) -> bool,
+{
+ /// The source bit-slice, marked with the alias tainting.
+ slice: &'a mut BitSlice<T::Alias, O>,
+ /// The function that tests each bit for whether it is a split point.
+ pred: P,
+ /// Marks whether iteration has concluded, without emptying the `slice`.
+ done: bool,
+}
+
+split!(RSplitMut => &'a mut BitSlice<T::Alias, O> {
+ fn next(&mut self) -> Option<Self::Item> {
+ let mut split = SplitMut::<'a, T, O, &mut P> {
+ slice: mem::take(&mut self.slice),
+ pred: &mut self.pred,
+ done: self.done,
+ };
+ let out = split.next_back();
+ let SplitMut { slice, done, .. } = split;
+ self.slice = slice;
+ self.done = done;
+ out
+ }
+
+ fn next_back(&mut self) -> Option<Self::Item> {
+ let mut split = SplitMut::<'a, T, O, &mut P> {
+ slice: mem::take(&mut self.slice),
+ pred: &mut self.pred,
+ done: self.done,
+ };
+ let out = split.next();
+ let SplitMut { slice, done, .. } = split;
+ self.slice = slice;
+ self.done = done;
+ out
+ }
+});
+
+/// [Original](https://github.com/rust-lang/rust/blob/95750ae/library/core/src/slice/iter.rs#L318-L325)
+trait SplitIter: DoubleEndedIterator {
+ /// Marks the underlying iterator as complete, and extracts the remaining
+ /// portion of the bit-slice.
+ fn finish(&mut self) -> Option<Self::Item>;
+}
+
+#[derive(Clone)]
+#[doc = include_str!("../../doc/slice/iter/SplitN.md")]
+pub struct SplitN<'a, T, O, P>
+where
+ T: 'a + BitStore,
+ O: BitOrder,
+ P: FnMut(usize, &bool) -> bool,
+{
+ /// The interior splitter.
+ inner: Split<'a, T, O, P>,
+ /// The number of permissible splits remaining.
+ count: usize,
+}
+
+#[doc = include_str!("../../doc/slice/iter/SplitNMut.md")]
+pub struct SplitNMut<'a, T, O, P>
+where
+ T: 'a + BitStore,
+ O: BitOrder,
+ P: FnMut(usize, &bool) -> bool,
+{
+ /// The interior splitter.
+ inner: SplitMut<'a, T, O, P>,
+ /// The number of permissible splits remaining.
+ count: usize,
+}
+
+#[derive(Clone)]
+#[doc = include_str!("../../doc/slice/iter/RSplitN.md")]
+pub struct RSplitN<'a, T, O, P>
+where
+ T: 'a + BitStore,
+ O: BitOrder,
+ P: FnMut(usize, &bool) -> bool,
+{
+ /// The interior splitter.
+ inner: RSplit<'a, T, O, P>,
+ /// The number of permissible splits remaining.
+ count: usize,
+}
+
+#[doc = include_str!("../../doc/slice/iter/RSplitNMut.md")]
+pub struct RSplitNMut<'a, T, O, P>
+where
+ T: 'a + BitStore,
+ O: BitOrder,
+ P: FnMut(usize, &bool) -> bool,
+{
+ /// The interior splitter.
+ inner: RSplitMut<'a, T, O, P>,
+ /// The number of permissible splits remaining.
+ count: usize,
+}
+
+/// Creates a splitting iterator with a maximum number of attempts.
+macro_rules! split_n {
+ ($(
+ $outer:ident => $inner:ident => $item:ty $(where $alias:ident)?
+ );+ $(;)?) => { $(
+ impl<'a, T, O, P> $outer<'a, T, O, P>
+ where
+ T: 'a + BitStore,
+ O: BitOrder,
+ P: FnMut(usize, &bool) -> bool,
+ {
+ #[inline]
+ #[allow(missing_docs, clippy::missing_docs_in_private_items)]
+ pub(super) fn new(
+ slice: $item,
+ pred: P,
+ count: usize,
+ ) -> Self {
+ Self {
+ inner: <$inner<'a, T, O, P>>::new(slice, pred),
+ count,
+ }
+ }
+ }
+
+ impl<T, O, P> Debug for $outer<'_, T, O, P>
+ where
+ T: BitStore,
+ O: BitOrder,
+ P: FnMut(usize, &bool) -> bool
+ {
+ #[inline]
+ fn fmt(&self, fmt: &mut Formatter) -> fmt::Result {
+ fmt.debug_struct(stringify!($outer))
+ .field("slice", &self.inner.slice)
+ .field("count", &self.count)
+ .finish()
+ }
+ }
+
+ impl<'a, T, O, P> Iterator for $outer<'a, T, O, P>
+ where
+ T: 'a + BitStore,
+ O: BitOrder,
+ P: FnMut(usize, &bool) -> bool,
+ $( T::$alias: radium::Radium<<<T as BitStore>::Alias as BitStore>::Mem>, )?
+ {
+ type Item = <$inner <'a, T, O, P> as Iterator>::Item;
+
+ #[inline]
+ fn next(&mut self) -> Option<Self::Item> {
+ match self.count {
+ 0 => None,
+ 1 => {
+ self.count -= 1;
+ self.inner.finish()
+ },
+ _ => {
+ self.count -= 1;
+ self.inner.next()
+ },
+ }
+ }
+
+ #[inline]
+ fn size_hint(&self) -> (usize, Option<usize>) {
+ let (low, hi) = self.inner.size_hint();
+ (low, hi.map(|h| cmp::min(h, self.count)).or(Some(self.count)))
+ }
+ }
+
+ impl<T, O, P> FusedIterator for $outer<'_, T, O, P>
+ where
+ T: BitStore,
+ O: BitOrder,
+ P: FnMut(usize, &bool) -> bool,
+ $( T::$alias: radium::Radium<<<T as BitStore>::Alias as BitStore>::Mem>, )?
+ {
+ }
+ )+ };
+}
+
+split_n! {
+ SplitN => Split => &'a BitSlice<T, O>;
+ SplitNMut => SplitMut => &'a mut BitSlice<T::Alias, O>;
+ RSplitN => RSplit => &'a BitSlice<T, O>;
+ RSplitNMut => RSplitMut => &'a mut BitSlice<T::Alias, O>;
+}
+
+#[derive(Clone, Copy, Debug, Eq, Ord, PartialEq, PartialOrd)]
+#[doc = include_str!("../../doc/slice/iter/IterOnes.md")]
+pub struct IterOnes<'a, T, O>
+where
+ T: 'a + BitStore,
+ O: BitOrder,
+{
+ /// The remaining bit-slice whose `1` bits are to be found.
+ inner: &'a BitSlice<T, O>,
+ /// The offset from the front of the original bit-slice to the current
+ /// `.inner`.
+ front: usize,
+}
+
+impl<'a, T, O> IterOnes<'a, T, O>
+where
+ T: 'a + BitStore,
+ O: BitOrder,
+{
+ #[inline]
+ #[allow(missing_docs, clippy::missing_docs_in_private_items)]
+ pub(super) fn new(slice: &'a BitSlice<T, O>) -> Self {
+ Self {
+ inner: slice,
+ front: 0,
+ }
+ }
+}
+
+impl<T, O> Default for IterOnes<'_, T, O>
+where
+ T: BitStore,
+ O: BitOrder,
+{
+ #[inline]
+ fn default() -> Self {
+ Self {
+ inner: Default::default(),
+ front: 0,
+ }
+ }
+}
+
+impl<T, O> Iterator for IterOnes<'_, T, O>
+where
+ T: BitStore,
+ O: BitOrder,
+{
+ type Item = usize;
+
+ easy_iter!();
+
+ #[inline]
+ fn next(&mut self) -> Option<Self::Item> {
+ let pos = if let Some(bits) = self.inner.coerce::<T, Lsb0>() {
+ bits.sp_first_one()
+ }
+ else if let Some(bits) = self.inner.coerce::<T, Msb0>() {
+ bits.sp_first_one()
+ }
+ else {
+ self.inner.iter().by_vals().position(|b| b)
+ };
+
+ match pos {
+ Some(n) => {
+ // Split at the index *past* the discovered bit. This is always
+ // safe, as `split_at(len)` produces `(self, [])`.
+ let (_, rest) = unsafe { self.inner.split_at_unchecked(n + 1) };
+ self.inner = rest;
+ let out = self.front + n;
+ // Search resumes from the next index after the found position.
+ self.front = out + 1;
+ Some(out)
+ },
+ None => {
+ *self = Default::default();
+ None
+ },
+ }
+ }
+}
+
+impl<T, O> DoubleEndedIterator for IterOnes<'_, T, O>
+where
+ T: BitStore,
+ O: BitOrder,
+{
+ #[inline]
+ fn next_back(&mut self) -> Option<Self::Item> {
+ let pos = if let Some(bits) = self.inner.coerce::<T, Lsb0>() {
+ bits.sp_last_one()
+ }
+ else if let Some(bits) = self.inner.coerce::<T, Msb0>() {
+ bits.sp_last_one()
+ }
+ else {
+ self.inner.iter().by_vals().rposition(|b| b)
+ };
+
+ match pos {
+ Some(n) => {
+ let (rest, _) = unsafe { self.inner.split_at_unchecked(n) };
+ self.inner = rest;
+ Some(self.front + n)
+ },
+ None => {
+ *self = Default::default();
+ None
+ },
+ }
+ }
+}
+
+impl<T, O> ExactSizeIterator for IterOnes<'_, T, O>
+where
+ T: BitStore,
+ O: BitOrder,
+{
+ #[inline]
+ fn len(&self) -> usize {
+ self.inner.count_ones()
+ }
+}
+
+impl<T, O> FusedIterator for IterOnes<'_, T, O>
+where
+ T: BitStore,
+ O: BitOrder,
+{
+}
+
+#[doc = include_str!("../../doc/slice/iter/IterZeros.md")]
+#[derive(Clone, Copy, Debug, Eq, Ord, PartialEq, PartialOrd)]
+pub struct IterZeros<'a, T, O>
+where
+ T: 'a + BitStore,
+ O: BitOrder,
+{
+ /// The remaining bit-slice whose `0` bits are to be found.
+ inner: &'a BitSlice<T, O>,
+ /// The offset from the front of the original bit-slice to the current
+ /// `.inner`.
+ front: usize,
+}
+
+impl<'a, T, O> IterZeros<'a, T, O>
+where
+ T: 'a + BitStore,
+ O: BitOrder,
+{
+ #[allow(missing_docs, clippy::missing_docs_in_private_items)]
+ pub(super) fn new(slice: &'a BitSlice<T, O>) -> Self {
+ Self {
+ inner: slice,
+ front: 0,
+ }
+ }
+}
+
+impl<T, O> Default for IterZeros<'_, T, O>
+where
+ T: BitStore,
+ O: BitOrder,
+{
+ #[inline]
+ fn default() -> Self {
+ Self {
+ inner: Default::default(),
+ front: 0,
+ }
+ }
+}
+
+impl<T, O> Iterator for IterZeros<'_, T, O>
+where
+ T: BitStore,
+ O: BitOrder,
+{
+ type Item = usize;
+
+ easy_iter!();
+
+ #[inline]
+ fn next(&mut self) -> Option<Self::Item> {
+ let pos = if let Some(bits) = self.inner.coerce::<T, Lsb0>() {
+ bits.sp_first_zero()
+ }
+ else if let Some(bits) = self.inner.coerce::<T, Msb0>() {
+ bits.sp_first_zero()
+ }
+ else {
+ self.inner.iter().by_vals().position(|b| !b)
+ };
+
+ match pos {
+ Some(n) => {
+ let (_, rest) = unsafe { self.inner.split_at_unchecked(n + 1) };
+ self.inner = rest;
+ let out = self.front + n;
+ self.front = out + 1;
+ Some(out)
+ },
+ None => {
+ *self = Default::default();
+ None
+ },
+ }
+ }
+}
+
+impl<T, O> DoubleEndedIterator for IterZeros<'_, T, O>
+where
+ T: BitStore,
+ O: BitOrder,
+{
+ #[inline]
+ fn next_back(&mut self) -> Option<Self::Item> {
+ let pos = if let Some(bits) = self.inner.coerce::<T, Lsb0>() {
+ bits.sp_last_zero()
+ }
+ else if let Some(bits) = self.inner.coerce::<T, Msb0>() {
+ bits.sp_last_zero()
+ }
+ else {
+ self.inner.iter().by_vals().rposition(|b| !b)
+ };
+
+ match pos {
+ Some(n) => {
+ let (rest, _) = unsafe { self.inner.split_at_unchecked(n) };
+ self.inner = rest;
+ Some(self.front + n)
+ },
+ None => {
+ *self = Default::default();
+ None
+ },
+ }
+ }
+}
+
+impl<T, O> ExactSizeIterator for IterZeros<'_, T, O>
+where
+ T: BitStore,
+ O: BitOrder,
+{
+ #[inline]
+ fn len(&self) -> usize {
+ self.inner.count_zeros()
+ }
+}
+
+impl<T, O> FusedIterator for IterZeros<'_, T, O>
+where
+ T: BitStore,
+ O: BitOrder,
+{
+}
+
+/* This macro has some very obnoxious call syntax that is necessary to handle
+the different iteration protocols used above.
+
+The `Split` iterators are not `DoubleEndedIterator` or `ExactSizeIterator`, and
+must be excluded from those implementations. However, bounding on `DEI` causes
+`.next_back()` and `.nth_back()` to return opaque associated types, rather than
+the return type from the directly-resolved signatures. As such, the item type of
+the source iterator must also be provided so that methods on it can be named.
+*/
+/// Creates wrappers that unsafely remove one layer of `::Alias` tainting.
+macro_rules! noalias {
+ ($(
+ $from:ident $(($p:ident))?
+ => $alias:ty
+ => $to:ident
+ => $item:ty
+ => $map:path;
+ )+) => { $(
+ #[repr(transparent)]
+ #[doc = include_str!("../../doc/slice/iter/NoAlias.md")]
+ pub struct $to<'a, T, O$(, $p)?>
+ where
+ T: 'a + BitStore,
+ O: BitOrder,
+ $($p: FnMut(usize, &bool) -> bool,)?
+ {
+ /// The actual iterator that this wraps.
+ inner: $from<'a, T, O$(, $p)?>,
+ }
+
+ impl<'a, T, O$(, $p)?> $from<'a, T, O$(, $p)?>
+ where
+ T: 'a + BitStore,
+ O: BitOrder,
+ $($p: FnMut(usize, &bool) -> bool,)?
+ {
+ /// Removes a layer of `::Alias` tainting from the yielded item.
+ ///
+ /// ## Safety
+ ///
+ /// You *must* consume the adapted iterator in a loop that does not
+ /// allow multiple yielded items to exist in the same scope. Each
+ /// yielded item must have a completely non-overlapping lifetime
+ /// from all the others.
+ ///
+ /// The items yielded by this iterator will not have an additional
+ /// alias marker applied to them, so their use in an iteration
+ /// sequence will not be penalized when the surrounding code ensures
+ /// that each item yielded by the iterator is destroyed before the
+ /// next is produced.
+ ///
+ /// This adapter does **not** convert the iterator to use the
+ /// [`T::Mem`] raw underlying type, as it can be applied to an
+ /// iterator over an already-aliased bit-slice and must preserve the
+ /// initial condition. Its *only* effect is to remove the additional
+ /// [`T::Alias`] marker imposed by the mutable iterators.
+ ///
+ /// Violating this requirement causes memory-unsafety and breaks
+ /// Rust’s data-race guarantees.
+ ///
+ /// [`T::Alias`]: crate::store::BitStore::Alias
+ /// [`T::Mem`]: crate::store::BitStore::Mem
+ #[inline]
+ #[must_use = "You must consume this object, preferably immediately \
+ upon creation"]
+ pub unsafe fn remove_alias(self) -> $to<'a, T, O$(, $p)?> {
+ $to { inner: self }
+ }
+ }
+
+ impl<'a, T, O$(, $p)?> Iterator for $to<'a, T, O$(, $p)?>
+ where
+ T: 'a + BitStore,
+ O: BitOrder,
+ $($p: FnMut(usize, &bool) -> bool,)?
+ {
+ type Item = $item;
+
+ #[inline]
+ fn next(&mut self) -> Option<Self::Item> {
+ self.inner.next().map(|item| unsafe { $map(item) })
+ }
+
+ #[inline]
+ fn nth(&mut self, n: usize) -> Option<Self::Item> {
+ self.inner.nth(n).map(|item| unsafe { $map(item) })
+ }
+
+ #[inline]
+ fn size_hint(&self) -> (usize, Option<usize>) {
+ self.inner.size_hint()
+ }
+
+ #[inline]
+ fn count(self) -> usize {
+ self.inner.count()
+ }
+
+ #[inline]
+ fn last(self) -> Option<Self::Item> {
+ self.inner.last().map(|item| unsafe { $map(item) })
+ }
+ }
+
+ impl<'a, T, O$(, $p)?> DoubleEndedIterator for $to<'a, T, O$(, $p)?>
+ where
+ T: 'a + BitStore,
+ O: BitOrder,
+ $($p: FnMut(usize, &bool) -> bool,)?
+ $from<'a, T, O$(, $p)?>: DoubleEndedIterator<Item = $alias>,
+ {
+ #[inline]
+ fn next_back(&mut self) -> Option<Self::Item> {
+ self.inner.next_back().map(|item| unsafe { $map(item) })
+ }
+
+ #[inline]
+ fn nth_back(&mut self, n: usize) -> Option<Self::Item> {
+ self.inner.nth_back(n).map(|item| unsafe { $map(item) })
+ }
+ }
+
+ impl<'a, T, O$(, $p)?> ExactSizeIterator for $to<'a, T, O$(, $p)?>
+ where
+ T: 'a + BitStore,
+ O: BitOrder,
+ $($p: FnMut(usize, &bool) -> bool,)?
+ $from<'a, T, O$(, $p)?>: ExactSizeIterator,
+ {
+ #[inline]
+ fn len(&self) -> usize {
+ self.inner.len()
+ }
+ }
+
+ impl<'a, T, O$(, $p)?> FusedIterator for $to<'a, T, O$(, $p)?>
+ where
+ T: 'a + BitStore,
+ O: BitOrder,
+ $($p: FnMut(usize, &bool) -> bool,)?
+ $from<'a, T, O$(, $p)?>: FusedIterator,
+ {
+ }
+ )+ };
+}
+
+noalias! {
+ IterMut => <usize as BitSliceIndex<'a, T::Alias, O>>::Mut
+ => IterMutNoAlias => <usize as BitSliceIndex<'a, T, O>>::Mut
+ => BitRef::remove_alias;
+
+ ChunksMut => &'a mut BitSlice<T::Alias, O>
+ => ChunksMutNoAlias => &'a mut BitSlice<T, O>
+ => BitSlice::unalias_mut;
+
+ ChunksExactMut => &'a mut BitSlice<T::Alias, O>
+ => ChunksExactMutNoAlias => &'a mut BitSlice<T, O>
+ => BitSlice::unalias_mut;
+
+ RChunksMut => &'a mut BitSlice<T::Alias, O>
+ => RChunksMutNoAlias => &'a mut BitSlice<T, O>
+ => BitSlice::unalias_mut;
+
+ RChunksExactMut => &'a mut BitSlice<T::Alias, O>
+ => RChunksExactMutNoAlias => &'a mut BitSlice<T, O>
+ => BitSlice::unalias_mut;
+
+ SplitMut (P) => &'a mut BitSlice<T::Alias, O>
+ => SplitMutNoAlias => &'a mut BitSlice<T, O>
+ => BitSlice::unalias_mut;
+
+ SplitInclusiveMut (P) => &'a mut BitSlice<T::Alias, O>
+ => SplitInclusiveMutNoAlias => &'a mut BitSlice<T, O>
+ => BitSlice::unalias_mut;
+
+ RSplitMut (P) => &'a mut BitSlice<T::Alias, O>
+ => RSplitMutNoAlias => &'a mut BitSlice<T, O>
+ => BitSlice::unalias_mut;
+
+ SplitNMut (P) => &'a mut BitSlice<T::Alias, O>
+ => SplitNMutNoAlias => &'a mut BitSlice<T, O>
+ => BitSlice::unalias_mut;
+
+ RSplitNMut (P) => &'a mut BitSlice<T::Alias, O>
+ => RSplitNMutNoAlias => &'a mut BitSlice<T, O>
+ => BitSlice::unalias_mut;
+}
+
+impl<'a, T, O> ChunksExactMutNoAlias<'a, T, O>
+where
+ T: 'a + BitStore,
+ O: BitOrder,
+{
+ /// See [`ChunksExactMut::into_remainder()`][0].
+ ///
+ /// [0]: crate::slice::ChunksExactMut::into_remainder
+ #[inline]
+ pub fn into_remainder(self) -> &'a mut BitSlice<T, O> {
+ unsafe { BitSlice::unalias_mut(self.inner.into_remainder()) }
+ }
+
+ /// See [`ChunksExactMut::take_remainder()`][0]
+ ///
+ /// [0]: crate::slice::ChunksExactMut::take_remainder
+ #[inline]
+ pub fn take_remainder(&mut self) -> &'a mut BitSlice<T, O> {
+ unsafe { BitSlice::unalias_mut(self.inner.take_remainder()) }
+ }
+}
+
+impl<'a, T, O> RChunksExactMutNoAlias<'a, T, O>
+where
+ T: 'a + BitStore,
+ O: BitOrder,
+{
+ /// See [`RChunksExactMut::into_remainder()`][0]
+ ///
+ /// [0]: crate::slice::RChunksExactMut::into_remainder
+ #[inline]
+ pub fn into_remainder(self) -> &'a mut BitSlice<T, O> {
+ unsafe { BitSlice::unalias_mut(self.inner.into_remainder()) }
+ }
+
+ /// See [`RChunksExactMut::take_remainder()`][0]
+ ///
+ /// [0]: crate::slice::RChunksExactMut::take_remainder
+ #[inline]
+ pub fn take_remainder(&mut self) -> &'a mut BitSlice<T, O> {
+ unsafe { BitSlice::unalias_mut(self.inner.take_remainder()) }
+ }
+}
diff --git a/src/slice/ops.rs b/src/slice/ops.rs
new file mode 100644
index 0000000..03f86ed
--- /dev/null
+++ b/src/slice/ops.rs
@@ -0,0 +1,239 @@
+#![doc = include_str!("../../doc/slice/ops.md")]
+
+use core::ops::{
+ BitAnd,
+ BitAndAssign,
+ BitOr,
+ BitOrAssign,
+ BitXor,
+ BitXorAssign,
+ Index,
+ IndexMut,
+ Not,
+ Range,
+ RangeFrom,
+ RangeFull,
+ RangeInclusive,
+ RangeTo,
+ RangeToInclusive,
+};
+
+use super::{
+ BitSlice,
+ BitSliceIndex,
+};
+use crate::{
+ domain::Domain,
+ order::{
+ BitOrder,
+ Lsb0,
+ Msb0,
+ },
+ store::BitStore,
+};
+
+impl<T1, T2, O1, O2> BitAndAssign<&BitSlice<T2, O2>> for BitSlice<T1, O1>
+where
+ T1: BitStore,
+ T2: BitStore,
+ O1: BitOrder,
+ O2: BitOrder,
+{
+ #[inline]
+ #[doc = include_str!("../../doc/slice/bitop_assign.md")]
+ fn bitand_assign(&mut self, rhs: &BitSlice<T2, O2>) {
+ if let (Some(this), Some(that)) =
+ (self.coerce_mut::<T1, Lsb0>(), rhs.coerce::<T1, Lsb0>())
+ {
+ return this.sp_bitop_assign(that, BitAnd::bitand, BitAnd::bitand);
+ }
+ if let (Some(this), Some(that)) =
+ (self.coerce_mut::<T1, Msb0>(), rhs.coerce::<T1, Msb0>())
+ {
+ return this.sp_bitop_assign(that, BitAnd::bitand, BitAnd::bitand);
+ }
+ for (this, that) in self.as_mut_bitptr_range().zip(rhs.as_bitptr_range())
+ {
+ unsafe {
+ this.write(this.read() & that.read());
+ }
+ }
+ if let Some(rem) = self.get_mut(rhs.len() ..) {
+ rem.fill(false);
+ }
+ }
+}
+
+impl<T1, T2, O1, O2> BitOrAssign<&BitSlice<T2, O2>> for BitSlice<T1, O1>
+where
+ T1: BitStore,
+ T2: BitStore,
+ O1: BitOrder,
+ O2: BitOrder,
+{
+ #[inline]
+ #[doc = include_str!("../../doc/slice/bitop_assign.md")]
+ fn bitor_assign(&mut self, rhs: &BitSlice<T2, O2>) {
+ if let (Some(this), Some(that)) =
+ (self.coerce_mut::<T1, Lsb0>(), rhs.coerce::<T1, Lsb0>())
+ {
+ return this.sp_bitop_assign(that, BitOr::bitor, BitOr::bitor);
+ }
+ if let (Some(this), Some(that)) =
+ (self.coerce_mut::<T1, Msb0>(), rhs.coerce::<T1, Msb0>())
+ {
+ return this.sp_bitop_assign(that, BitOr::bitor, BitOr::bitor);
+ }
+ for (this, that) in self.as_mut_bitptr_range().zip(rhs.as_bitptr_range())
+ {
+ unsafe {
+ this.write(this.read() | that.read());
+ }
+ }
+ }
+}
+
+impl<T1, T2, O1, O2> BitXorAssign<&BitSlice<T2, O2>> for BitSlice<T1, O1>
+where
+ T1: BitStore,
+ T2: BitStore,
+ O1: BitOrder,
+ O2: BitOrder,
+{
+ #[inline]
+ #[doc = include_str!("../../doc/slice/bitop_assign.md")]
+ fn bitxor_assign(&mut self, rhs: &BitSlice<T2, O2>) {
+ if let (Some(this), Some(that)) =
+ (self.coerce_mut::<T1, Lsb0>(), rhs.coerce::<T1, Lsb0>())
+ {
+ return this.sp_bitop_assign(that, BitXor::bitxor, BitXor::bitxor);
+ }
+ if let (Some(this), Some(that)) =
+ (self.coerce_mut::<T1, Msb0>(), rhs.coerce::<T1, Msb0>())
+ {
+ return this.sp_bitop_assign(that, BitXor::bitxor, BitXor::bitxor);
+ }
+ for (this, that) in self.as_mut_bitptr_range().zip(rhs.as_bitptr_range())
+ {
+ unsafe {
+ this.write(this.read() ^ that.read());
+ }
+ }
+ }
+}
+
+impl<T, O> Index<usize> for BitSlice<T, O>
+where
+ T: BitStore,
+ O: BitOrder,
+{
+ type Output = bool;
+
+ /// Looks up a single bit by its semantic index.
+ ///
+ /// ## Examples
+ ///
+ /// ```rust
+ /// use bitvec::prelude::*;
+ ///
+ /// let bits = bits![u8, Msb0; 0, 1, 0];
+ /// assert!(!bits[0]); // -----^ | |
+ /// assert!( bits[1]); // --------^ |
+ /// assert!(!bits[2]); // -----------^
+ /// ```
+ ///
+ /// If the index is greater than or equal to the length, indexing will
+ /// panic.
+ ///
+ /// The below test will panic when accessing index 1, as only index 0 is
+ /// valid.
+ ///
+ /// ```rust,should_panic
+ /// use bitvec::prelude::*;
+ ///
+ /// let bits = bits![0, ];
+ /// bits[1]; // --------^
+ /// ```
+ #[inline]
+ fn index(&self, index: usize) -> &Self::Output {
+ match *index.index(self) {
+ true => &true,
+ false => &false,
+ }
+ }
+}
+
+/// Implements `Index` and `IndexMut` with the given type.
+macro_rules! index {
+ ($($t:ty),+ $(,)?) => { $(
+ impl<T, O> Index<$t> for BitSlice<T, O>
+ where
+ O: BitOrder,
+ T: BitStore,
+ {
+ type Output = Self;
+
+ #[inline]
+ #[track_caller]
+ fn index(&self, index: $t) -> &Self::Output {
+ index.index(self)
+ }
+ }
+
+ impl<T, O> IndexMut<$t> for BitSlice<T, O>
+ where
+ O: BitOrder,
+ T: BitStore,
+ {
+ #[inline]
+ #[track_caller]
+ fn index_mut(&mut self, index: $t) -> &mut Self::Output {
+ index.index_mut(self)
+ }
+ }
+ )+ };
+}
+
+index! {
+ Range<usize>,
+ RangeFrom<usize>,
+ RangeFull,
+ RangeInclusive<usize>,
+ RangeTo<usize>,
+ RangeToInclusive<usize>,
+}
+
+/** Inverts each bit in the bit-slice.
+
+Unlike the `&`, `|`, and `^` operators, this implementation is guaranteed to
+update each memory element only once, and is not required to traverse every live
+bit in the underlying region.
+**/
+impl<'a, T, O> Not for &'a mut BitSlice<T, O>
+where
+ T: BitStore,
+ O: BitOrder,
+{
+ type Output = Self;
+
+ #[inline]
+ fn not(self) -> Self::Output {
+ match self.domain_mut() {
+ Domain::Enclave(mut elem) => {
+ elem.invert();
+ },
+ Domain::Region { head, body, tail } => {
+ if let Some(mut elem) = head {
+ elem.invert();
+ }
+ for elem in body {
+ elem.store_value(!elem.load_value());
+ }
+ if let Some(mut elem) = tail {
+ elem.invert();
+ }
+ },
+ }
+ self
+ }
+}
diff --git a/src/slice/specialization.rs b/src/slice/specialization.rs
new file mode 100644
index 0000000..0dc5161
--- /dev/null
+++ b/src/slice/specialization.rs
@@ -0,0 +1,81 @@
+#![doc = include_str!("../../doc/slice/specialization.md")]
+
+use funty::Integral;
+
+use super::BitSlice;
+use crate::{
+ devel as dvl,
+ mem,
+ order::BitOrder,
+ store::BitStore,
+};
+
+mod lsb0;
+mod msb0;
+
+/// Processor width, used for chunking.
+const WORD_BITS: usize = mem::bits_of::<usize>();
+
+/// Tests whether the masked portion of an integer has a `0` bit in it.
+fn has_zero<T>(val: T, mask: T) -> bool
+where T: Integral {
+ val | !mask != !T::ZERO
+}
+
+/// Tests whether the masked portion of an integer has a `1` bit in it.
+fn has_one<T>(val: T, mask: T) -> bool
+where T: Integral {
+ val & mask != T::ZERO
+}
+
+impl<T, O> BitSlice<T, O>
+where
+ T: BitStore,
+ O: BitOrder,
+{
+ /// Forces the storage type parameter to be its accessor type.
+ ///
+ /// Functions must use this when working with maybe-overlapping regions
+ /// within a single bit-slice, as the accessor is always tolerant of
+ /// aliasing.
+ #[inline]
+ fn as_accessor(&mut self) -> &BitSlice<T::Access, O> {
+ unsafe { &*(self as *const Self as *const BitSlice<T::Access, O>) }
+ }
+
+ /// Attempts to change a bit-slice reference to caller-supplied type
+ /// parameters.
+ ///
+ /// If `<T, O>` is identical to `<T2, O2>`, this returns `Some` with the
+ /// bit-slice reference unchanged in value but changed in type. If the types
+ /// differ, it returns `None`. This is useful for creating statically-known
+ /// bit-slice types within generic contexts.
+ pub(crate) fn coerce<T2, O2>(&self) -> Option<&BitSlice<T2, O2>>
+ where
+ T2: BitStore,
+ O2: BitOrder,
+ {
+ if dvl::match_types::<T, O, T2, O2>() {
+ Some(unsafe { &*(self as *const Self as *const BitSlice<T2, O2>) })
+ }
+ else {
+ None
+ }
+ }
+
+ /// See [`.coerce()`].
+ ///
+ /// [`.coerce()`]: Self::coerce
+ pub(crate) fn coerce_mut<T2, O2>(&mut self) -> Option<&mut BitSlice<T2, O2>>
+ where
+ T2: BitStore,
+ O2: BitOrder,
+ {
+ if dvl::match_types::<T, O, T2, O2>() {
+ Some(unsafe { &mut *(self as *mut Self as *mut BitSlice<T2, O2>) })
+ }
+ else {
+ None
+ }
+ }
+}
diff --git a/src/slice/specialization/lsb0.rs b/src/slice/specialization/lsb0.rs
new file mode 100644
index 0000000..2dde4c9
--- /dev/null
+++ b/src/slice/specialization/lsb0.rs
@@ -0,0 +1,310 @@
+//! Specializations for `BitSlice<_, Lsb0>.
+
+use core::iter;
+
+use funty::Integral;
+use wyz::{
+ bidi::BidiIterator,
+ range::RangeExt,
+};
+
+use super::{
+ has_one,
+ has_zero,
+ WORD_BITS,
+};
+use crate::{
+ domain::Domain,
+ field::BitField,
+ mem::bits_of,
+ order::Lsb0,
+ slice::BitSlice,
+ store::BitStore,
+};
+
+impl<T> BitSlice<T, Lsb0>
+where T: BitStore
+{
+ /// Accelerates Boolean arithmetic.
+ ///
+ /// This applies a Boolean-arithmetic function across all the bits in a
+ /// pair. The secondary bit-slice is zero-extended if it expires before
+ /// `self` does.
+ ///
+ /// Because the two bit-slices share the same types, this is able to
+ /// batch-load `usize` chunks from each, apply the arithmetic to them, and
+ /// write the result back into `self`. Any leftover bits are handled
+ /// individually.
+ pub(crate) fn sp_bitop_assign(
+ &mut self,
+ rhs: &Self,
+ word_op: fn(usize, usize) -> usize,
+ bool_op: fn(bool, bool) -> bool,
+ ) {
+ let (mut this, mut that) = (self, rhs);
+ while this.len() >= WORD_BITS && that.len() >= WORD_BITS {
+ unsafe {
+ let (l, left) = this.split_at_unchecked_mut_noalias(WORD_BITS);
+ let (r, right) = that.split_at_unchecked(WORD_BITS);
+ this = left;
+ that = right;
+ let (a, b) = (l.load_le::<usize>(), r.load_le::<usize>());
+ l.store_le(word_op(a, b));
+ }
+ }
+ // Note: it might actually be possible to do a partial-word load/store
+ // to exhaust the shorter bit-slice. Investigate further.
+ for (l, r) in this
+ .as_mut_bitptr_range()
+ .zip(that.iter().by_vals().chain(iter::repeat(false)))
+ {
+ unsafe {
+ l.write(bool_op(l.read(), r));
+ }
+ }
+ }
+
+ /// Accelerates copies between disjoint bit-slices with batch loads.
+ pub(crate) fn sp_copy_from_bitslice(&mut self, src: &Self) {
+ assert_eq!(
+ self.len(),
+ src.len(),
+ "copying between bit-slices requires equal lengths",
+ );
+
+ for (to, from) in unsafe { self.chunks_mut(WORD_BITS).remove_alias() }
+ .zip(src.chunks(WORD_BITS))
+ {
+ to.store_le::<usize>(from.load_le::<usize>());
+ }
+ }
+
+ /// Accelerates possibly-overlapping copies within a single bit-slice with
+ /// batch loads.
+ pub(crate) unsafe fn sp_copy_within_unchecked(
+ &mut self,
+ src: impl RangeExt<usize>,
+ dest: usize,
+ ) {
+ let source = src.normalize(None, self.len());
+ let rev = source.contains(&dest);
+ let dest = dest .. dest + source.len();
+
+ let this = self.as_accessor();
+ let from = this
+ .get_unchecked(source)
+ .chunks(WORD_BITS)
+ .map(|bits| bits as *const BitSlice<T::Access, Lsb0>);
+ let to = this.get_unchecked(dest).chunks(WORD_BITS).map(|bits| {
+ bits as *const BitSlice<T::Access, Lsb0>
+ as *mut BitSlice<T::Access, Lsb0>
+ });
+ for (from, to) in from.zip(to).bidi(rev) {
+ let value = (*from).load_le::<usize>();
+ (*to).store_le::<usize>(value);
+ }
+ }
+
+ /// Accelerates equality checking with batch loads.
+ pub(crate) fn sp_eq(&self, other: &Self) -> bool {
+ self.len() == other.len()
+ && self
+ .chunks(WORD_BITS)
+ .zip(other.chunks(WORD_BITS))
+ .all(|(a, b)| a.load_le::<usize>() == b.load_le::<usize>())
+ }
+
+ /// Seeks the index of the first `1` bit in the bit-slice.
+ pub(crate) fn sp_first_one(&self) -> Option<usize> {
+ let mut accum = 0;
+
+ match self.domain() {
+ Domain::Enclave(elem) => {
+ let val = elem.load_value();
+ if has_one(val, elem.mask().into_inner()) {
+ accum += val.trailing_zeros() as usize
+ - elem.head().into_inner() as usize;
+ return Some(accum);
+ }
+ None
+ },
+ Domain::Region { head, body, tail } => {
+ if let Some(elem) = head {
+ let val = elem.load_value();
+ accum += val.trailing_zeros() as usize
+ - elem.head().into_inner() as usize;
+ if has_one(val, elem.mask().into_inner()) {
+ return Some(accum);
+ }
+ }
+
+ for val in body.iter().map(BitStore::load_value) {
+ accum += val.trailing_zeros() as usize;
+ if has_one(val, !<T::Mem as Integral>::ZERO) {
+ return Some(accum);
+ }
+ }
+
+ if let Some(elem) = tail {
+ let val = elem.load_value();
+ if has_one(val, elem.mask().into_inner()) {
+ accum += val.trailing_zeros() as usize;
+ return Some(accum);
+ }
+ }
+
+ None
+ },
+ }
+ }
+
+ /// Seeks the index of the last `1` bit in the bit-slice.
+ pub(crate) fn sp_last_one(&self) -> Option<usize> {
+ let mut out = self.len();
+
+ match self.domain() {
+ Domain::Enclave(elem) => {
+ let val = elem.load_value();
+ let dead_bits =
+ bits_of::<T::Mem>() - elem.tail().into_inner() as usize;
+ if has_one(val, elem.mask().into_inner()) {
+ out -= val.leading_zeros() as usize - dead_bits as usize;
+ return Some(out - 1);
+ }
+ None
+ },
+ Domain::Region { head, body, tail } => {
+ if let Some(elem) = tail {
+ let val = elem.load_value();
+ let dead_bits =
+ bits_of::<T::Mem>() - elem.tail().into_inner() as usize;
+ out -= val.leading_zeros() as usize - dead_bits;
+ if has_one(val, elem.mask().into_inner()) {
+ return Some(out - 1);
+ }
+ }
+
+ for val in body.iter().map(BitStore::load_value).rev() {
+ out -= val.leading_zeros() as usize;
+ if has_one(val, !<T::Mem as Integral>::ZERO) {
+ return Some(out - 1);
+ }
+ }
+
+ if let Some(elem) = head {
+ let val = elem.load_value();
+ if has_one(val, elem.mask().into_inner()) {
+ out -= val.leading_zeros() as usize;
+ return Some(out - 1);
+ }
+ }
+
+ None
+ },
+ }
+ }
+
+ /// Seeks the index of the first `0` bit in the bit-slice.
+ pub(crate) fn sp_first_zero(&self) -> Option<usize> {
+ let mut accum = 0;
+
+ match self.domain() {
+ Domain::Enclave(elem) => {
+ let val = elem.load_value() | !elem.mask().into_inner();
+ accum += val.trailing_ones() as usize
+ - elem.head().into_inner() as usize;
+ if has_zero(val, elem.mask().into_inner()) {
+ return Some(accum);
+ }
+ None
+ },
+ Domain::Region { head, body, tail } => {
+ if let Some(elem) = head {
+ let val = elem.load_value() | !elem.mask().into_inner();
+
+ accum += val.trailing_ones() as usize
+ - elem.head().into_inner() as usize;
+ if has_zero(val, elem.mask().into_inner()) {
+ return Some(accum);
+ }
+ }
+
+ for val in body.iter().map(BitStore::load_value) {
+ accum += val.trailing_ones() as usize;
+ if has_zero(val, !<T::Mem as Integral>::ZERO) {
+ return Some(accum);
+ }
+ }
+
+ if let Some(elem) = tail {
+ let val = elem.load_value() | !elem.mask().into_inner();
+ accum += val.trailing_ones() as usize;
+ if has_zero(val, elem.mask().into_inner()) {
+ return Some(accum);
+ }
+ }
+
+ None
+ },
+ }
+ }
+
+ /// Seeks the index of the last `0` bit in the bit-slice.
+ pub(crate) fn sp_last_zero(&self) -> Option<usize> {
+ let mut out = self.len();
+
+ match self.domain() {
+ Domain::Enclave(elem) => {
+ let val = elem.load_value() | !elem.mask().into_inner();
+ let dead_bits =
+ bits_of::<T::Mem>() - elem.tail().into_inner() as usize;
+ if has_zero(val, elem.mask().into_inner()) {
+ out -= val.leading_ones() as usize - dead_bits as usize;
+ return Some(out - 1);
+ }
+ None
+ },
+ Domain::Region { head, body, tail } => {
+ if let Some(elem) = tail {
+ let val = elem.load_value() | !elem.mask().into_inner();
+ let dead_bits =
+ bits_of::<T::Mem>() - elem.tail().into_inner() as usize;
+ out -= val.leading_ones() as usize - dead_bits;
+ if has_zero(val, elem.mask().into_inner()) {
+ return Some(out - 1);
+ }
+ }
+
+ for val in body.iter().map(BitStore::load_value).rev() {
+ out -= val.leading_ones() as usize;
+ if has_zero(val, !<T::Mem as Integral>::ZERO) {
+ return Some(out - 1);
+ }
+ }
+
+ if let Some(elem) = head {
+ let val = elem.load_value() | !elem.mask().into_inner();
+ if has_zero(val, elem.mask().into_inner()) {
+ out -= val.leading_ones() as usize;
+ return Some(out - 1);
+ }
+ }
+
+ None
+ },
+ }
+ }
+
+ /// Accelerates swapping memory.
+ pub(crate) fn sp_swap_with_bitslice(&mut self, other: &mut Self) {
+ for (this, that) in unsafe {
+ self.chunks_mut(WORD_BITS)
+ .remove_alias()
+ .zip(other.chunks_mut(WORD_BITS).remove_alias())
+ } {
+ let (a, b) = (this.load_le::<usize>(), that.load_le::<usize>());
+ this.store_le(b);
+ that.store_le(a);
+ }
+ }
+}
diff --git a/src/slice/specialization/msb0.rs b/src/slice/specialization/msb0.rs
new file mode 100644
index 0000000..62513f1
--- /dev/null
+++ b/src/slice/specialization/msb0.rs
@@ -0,0 +1,305 @@
+//! Specializations for `BitSlice<_, Msb0>.
+
+use core::iter;
+
+use funty::Integral;
+use wyz::{
+ bidi::BidiIterator,
+ range::RangeExt,
+};
+
+use super::{
+ has_one,
+ has_zero,
+ WORD_BITS,
+};
+use crate::{
+ domain::Domain,
+ field::BitField,
+ mem::bits_of,
+ order::Msb0,
+ slice::BitSlice,
+ store::BitStore,
+};
+
+impl<T> BitSlice<T, Msb0>
+where T: BitStore
+{
+ /// Accelerates Boolean arithmetic.
+ ///
+ /// This applies a Boolean-arithmetic function across all the bits in a
+ /// pair. The secondary bit-slice is zero-extended if it expires before
+ /// `self` does.
+ ///
+ /// Because the two bit-slices share the same types, this is able to
+ /// batch-load `usize` chunks from each, apply the arithmetic to them, and
+ /// write the result back into `self`. Any leftover bits are handled
+ /// individually.
+ pub(crate) fn sp_bitop_assign(
+ &mut self,
+ rhs: &Self,
+ word_op: fn(usize, usize) -> usize,
+ bool_op: fn(bool, bool) -> bool,
+ ) {
+ let (mut this, mut that) = (self, rhs);
+ while this.len() >= WORD_BITS && that.len() >= WORD_BITS {
+ unsafe {
+ let (l, left) = this.split_at_unchecked_mut_noalias(WORD_BITS);
+ let (r, right) = that.split_at_unchecked(WORD_BITS);
+ this = left;
+ that = right;
+ let (a, b) = (l.load_be::<usize>(), r.load_be::<usize>());
+ l.store_be(word_op(a, b));
+ }
+ }
+ for (l, r) in this
+ .as_mut_bitptr_range()
+ .zip(that.iter().by_vals().chain(iter::repeat(false)))
+ {
+ unsafe {
+ l.write(bool_op(l.read(), r));
+ }
+ }
+ }
+
+ /// Accelerates copies between disjoint bit-slices with batch loads.
+ pub(crate) fn sp_copy_from_bitslice(&mut self, src: &Self) {
+ assert_eq!(
+ self.len(),
+ src.len(),
+ "copying between bit-slices requires equal lengths",
+ );
+
+ for (to, from) in unsafe { self.chunks_mut(WORD_BITS).remove_alias() }
+ .zip(src.chunks(WORD_BITS))
+ {
+ to.store_be::<usize>(from.load_be::<usize>());
+ }
+ }
+
+ /// Accelerates possibly-overlapping copies within a single bit-slice with
+ /// batch loads.
+ pub(crate) unsafe fn sp_copy_within_unchecked(
+ &mut self,
+ src: impl RangeExt<usize>,
+ dest: usize,
+ ) {
+ let source = src.normalize(None, self.len());
+ let rev = source.contains(&dest);
+ let dest = dest .. dest + source.len();
+
+ let this = self.as_accessor();
+ let from = this
+ .get_unchecked(source)
+ .chunks(WORD_BITS)
+ .map(|bits| bits as *const BitSlice<T::Access, Msb0>);
+ let to = this.get_unchecked(dest).chunks(WORD_BITS).map(|bits| {
+ bits as *const BitSlice<T::Access, Msb0>
+ as *mut BitSlice<T::Access, Msb0>
+ });
+ for (from, to) in from.zip(to).bidi(rev) {
+ let value = (*from).load_be::<usize>();
+ (*to).store_be::<usize>(value);
+ }
+ }
+
+ /// Accelerates equality checking with batch loads.
+ pub(crate) fn sp_eq(&self, other: &Self) -> bool {
+ self.len() == other.len()
+ && self
+ .chunks(WORD_BITS)
+ .zip(other.chunks(WORD_BITS))
+ .all(|(a, b)| a.load_be::<usize>() == b.load_be::<usize>())
+ }
+
+ /// Seeks the index of the first `1` bit in the bit-slice.
+ pub(crate) fn sp_first_one(&self) -> Option<usize> {
+ let mut accum = 0;
+
+ match self.domain() {
+ Domain::Enclave(elem) => {
+ let val = elem.load_value();
+ accum += val.leading_zeros() as usize
+ - elem.head().into_inner() as usize;
+ if has_one(val, elem.mask().into_inner()) {
+ return Some(accum);
+ }
+ None
+ },
+ Domain::Region { head, body, tail } => {
+ if let Some(elem) = head {
+ let val = elem.load_value();
+ accum += val.leading_zeros() as usize
+ - elem.head().into_inner() as usize;
+ if has_one(val, elem.mask().into_inner()) {
+ return Some(accum);
+ }
+ }
+
+ for val in body.iter().map(BitStore::load_value) {
+ accum += val.leading_zeros() as usize;
+ if has_one(val, !<T::Mem as Integral>::ZERO) {
+ return Some(accum);
+ }
+ }
+
+ if let Some(elem) = tail {
+ let val = elem.load_value();
+ accum += val.leading_zeros() as usize;
+ if has_one(val, elem.mask().into_inner()) {
+ return Some(accum);
+ }
+ }
+
+ None
+ },
+ }
+ }
+
+ /// Seeks the index of the last `1` bit in the bit-slice.
+ pub(crate) fn sp_last_one(&self) -> Option<usize> {
+ let mut out = self.len().checked_sub(1)?;
+ match self.domain() {
+ Domain::Enclave(elem) => {
+ let val = elem.load_value();
+ let dead_bits =
+ bits_of::<T::Mem>() - elem.tail().into_inner() as usize;
+ if has_one(val, elem.mask().into_inner()) {
+ out -= val.trailing_zeros() as usize - dead_bits as usize;
+ return Some(out);
+ }
+ None
+ },
+ Domain::Region { head, body, tail } => {
+ if let Some(elem) = tail {
+ let val = elem.load_value();
+ let dead_bits =
+ bits_of::<T::Mem>() - elem.tail().into_inner() as usize;
+ out -= val.trailing_zeros() as usize - dead_bits;
+ if has_one(val, elem.mask().into_inner()) {
+ return Some(out);
+ }
+ }
+
+ for val in body.iter().map(BitStore::load_value).rev() {
+ out -= val.trailing_zeros() as usize;
+ if has_one(val, !<T::Mem as Integral>::ZERO) {
+ return Some(out);
+ }
+ }
+
+ if let Some(elem) = head {
+ let val = elem.load_value();
+ if has_one(val, elem.mask().into_inner()) {
+ out -= val.trailing_zeros() as usize;
+ return Some(out);
+ }
+ }
+
+ None
+ },
+ }
+ }
+
+ /// Seeks the index of the first `0` bit in the bit-slice.
+ pub(crate) fn sp_first_zero(&self) -> Option<usize> {
+ let mut accum = 0;
+
+ match self.domain() {
+ Domain::Enclave(elem) => {
+ let val = elem.load_value() | !elem.mask().into_inner();
+ accum += val.leading_ones() as usize
+ - elem.head().into_inner() as usize;
+ if has_zero(val, elem.mask().into_inner()) {
+ return Some(accum);
+ }
+ None
+ },
+ Domain::Region { head, body, tail } => {
+ if let Some(elem) = head {
+ let val = elem.load_value() | !elem.mask().into_inner();
+ accum += val.leading_ones() as usize
+ - elem.head().into_inner() as usize;
+ if has_zero(val, elem.mask().into_inner()) {
+ return Some(accum);
+ }
+ }
+
+ for val in body.iter().map(BitStore::load_value) {
+ accum += val.leading_ones() as usize;
+ if has_zero(val, !<T::Mem as Integral>::ZERO) {
+ return Some(accum);
+ }
+ }
+
+ if let Some(elem) = tail {
+ let val = elem.load_value() | !elem.mask().into_inner();
+ accum += val.leading_ones() as usize;
+ if has_zero(val, elem.mask().into_inner()) {
+ return Some(accum);
+ }
+ }
+
+ None
+ },
+ }
+ }
+
+ /// Seeks the index of the last `0` bit in the bit-slice.
+ pub(crate) fn sp_last_zero(&self) -> Option<usize> {
+ let mut out = self.len().checked_sub(1)?;
+ match self.domain() {
+ Domain::Enclave(elem) => {
+ let val = elem.load_value() | !elem.mask().into_inner();
+ let dead_bits =
+ bits_of::<T::Mem>() - elem.tail().into_inner() as usize;
+ if has_zero(val, elem.mask().into_inner()) {
+ out -= val.trailing_ones() as usize - dead_bits;
+ return Some(out);
+ }
+ None
+ },
+ Domain::Region { head, body, tail } => {
+ if let Some(elem) = tail {
+ let val = elem.load_value() | !elem.mask().into_inner();
+ let dead_bits =
+ bits_of::<T::Mem>() - elem.tail().into_inner() as usize;
+ out -= val.trailing_ones() as usize - dead_bits;
+ if has_zero(val, elem.mask().into_inner()) {
+ return Some(out);
+ }
+ }
+
+ for val in body.iter().map(BitStore::load_value).rev() {
+ out -= val.trailing_ones() as usize;
+ if has_zero(val, !<T::Mem as Integral>::ZERO) {
+ return Some(out);
+ }
+ }
+
+ if let Some(elem) = head {
+ let val = elem.load_value() | !elem.mask().into_inner();
+ if has_zero(val, elem.mask().into_inner()) {
+ out -= val.trailing_ones() as usize;
+ return Some(out);
+ }
+ }
+
+ None
+ },
+ }
+ }
+
+ /// Accelerates swapping memory.
+ pub(crate) fn sp_swap_with_bitslice(&mut self, other: &mut Self) {
+ for (this, that) in unsafe {
+ self.chunks_mut(WORD_BITS)
+ .remove_alias()
+ .zip(other.chunks_mut(WORD_BITS).remove_alias())
+ } {
+ let (a, b) = (this.load_be::<usize>(), that.load_be::<usize>());
+ this.store_be(b);
+ that.store_be(a);
+ }
+ }
+}
diff --git a/src/slice/tests.rs b/src/slice/tests.rs
new file mode 100644
index 0000000..dd03835
--- /dev/null
+++ b/src/slice/tests.rs
@@ -0,0 +1,273 @@
+//! Unit tests for bit-slices.
+
+#![cfg(test)]
+
+use core::cell::Cell;
+
+use rand::random;
+
+use crate::{
+ order::HiLo,
+ prelude::*,
+};
+
+mod api;
+mod iter;
+mod ops;
+mod traits;
+
+#[test]
+#[allow(clippy::many_single_char_names)]
+fn copying() {
+ let a = bits![mut u8, Lsb0; 0; 4];
+ let b = bits![u16, Msb0; 0, 1, 0, 1];
+ a.clone_from_bitslice(b);
+ assert_eq!(a, b);
+
+ let mut a = random::<[u32; 3]>();
+ let b = random::<[u32; 3]>();
+
+ a.view_bits_mut::<Lsb0>()[4 .. 92]
+ .copy_from_bitslice(&b.view_bits::<Lsb0>()[4 .. 92]);
+ assert_eq!([a[0] & 0xFF_FF_FF_F0, a[1], a[2] & 0x0F_FF_FF_FF], [
+ b[0] & 0xFF_FF_FF_F0,
+ b[1],
+ b[2] & 0x0F_FF_FF_FF
+ ],);
+
+ let mut c = random::<u32>();
+ let d = random::<u32>();
+ c.view_bits_mut::<Msb0>()[4 .. 28]
+ .copy_from_bitslice(&d.view_bits::<Msb0>()[4 .. 28]);
+ assert_eq!(c & 0x0F_FF_FF_F0, d & 0x0F_FF_FF_F0);
+
+ let mut e = 0x01_23_45_67u32;
+ let f = 0x89_AB_CD_EFu32;
+ e.view_bits_mut::<HiLo>()[.. 28]
+ .copy_from_bitslice(&f.view_bits::<HiLo>()[4 ..]);
+ assert_eq!(e, 0x91_B8_DA_FC);
+ // 28 .. 32 ^
+
+ let mut g = random::<[u32; 3]>();
+ let mut h = random::<[u32; 3]>();
+ let i = g;
+ let j = h;
+ g.view_bits_mut::<Lsb0>()
+ .swap_with_bitslice(h.view_bits_mut::<Lsb0>());
+ assert_eq!((g, h), (j, i));
+ g.view_bits_mut::<Msb0>()
+ .swap_with_bitslice(h.view_bits_mut::<Msb0>());
+ assert_eq!((g, h), (i, j));
+ g.view_bits_mut::<Lsb0>()
+ .swap_with_bitslice(h.view_bits_mut::<Msb0>());
+ assert_eq!(g.view_bits::<Lsb0>(), j.view_bits::<Msb0>());
+ assert_eq!(h.view_bits::<Msb0>(), i.view_bits::<Lsb0>());
+
+ let mut k = random::<[u32; 3]>();
+ let j = k;
+ unsafe {
+ k.view_bits_mut::<Lsb0>().copy_within_unchecked(32 .., 0);
+ assert_eq!(k, [j[1], j[2], j[2]]);
+ k.view_bits_mut::<Msb0>().copy_within_unchecked(.. 64, 32);
+ assert_eq!(k, [j[1], j[1], j[2]]);
+ k.view_bits_mut::<HiLo>().copy_within_unchecked(32 .., 0);
+ assert_eq!(k, [j[1], j[2], j[2]]);
+ }
+}
+
+#[test]
+fn writing() {
+ let bits = bits![mut 0; 2];
+
+ bits.set(0, true);
+ unsafe {
+ bits.set_unchecked(1, true);
+ }
+ assert_eq!(bits, bits![1;2]);
+
+ assert!(bits.replace(0, false));
+ assert!(unsafe { bits.replace_unchecked(1, false) });
+ assert_eq!(bits, bits![0;2]);
+}
+
+#[test]
+fn bit_counting() {
+ let data = [0x12u8, 0xFE, 0x34, 0xDC];
+ let lsb0 = data.view_bits::<Lsb0>();
+ let msb0 = data.view_bits::<Msb0>();
+
+ assert_eq!(lsb0[2 .. 6].count_ones(), 1);
+ assert_eq!(lsb0[2 .. 6].count_zeros(), 3);
+ assert_eq!(msb0[2 .. 30].count_ones(), 17);
+ assert_eq!(msb0[2 .. 30].count_zeros(), 11);
+
+ assert!(!bits![].any());
+ assert!(!bits![0, 0].any());
+ assert!(bits![0, 1].any());
+
+ assert!(bits![].all());
+ assert!(!bits![0, 1].all());
+ assert!(bits![1, 1].all());
+
+ assert!(bits![].not_any());
+ assert!(bits![0, 0].not_any());
+ assert!(!bits![0, 1].not_any());
+
+ assert!(!bits![].not_all());
+ assert!(bits![0, 1].not_all());
+ assert!(!bits![1, 1].not_all());
+
+ assert!(!bits![0; 2].some());
+ assert!(bits![0, 1].some());
+ assert!(!bits![1; 2].some());
+
+ assert!(bits![usize, Lsb0;].first_one().is_none());
+ assert!(bits![usize, Msb0;].first_one().is_none());
+ assert!(bits![usize, Lsb0;].last_one().is_none());
+ assert!(bits![usize, Msb0;].last_one().is_none());
+ assert!(bits![usize, Lsb0;].first_zero().is_none());
+ assert!(bits![usize, Msb0;].first_zero().is_none());
+ assert!(bits![usize, Lsb0;].last_zero().is_none());
+ assert!(bits![usize, Msb0;].last_zero().is_none());
+
+ assert!([0u8; 1].view_bits::<Lsb0>()[1 .. 7].first_one().is_none());
+ assert!([0u8; 3].view_bits::<Lsb0>()[1 .. 23].first_one().is_none());
+ assert!([0u8; 1].view_bits::<Msb0>()[1 .. 7].first_one().is_none());
+ assert!([0u8; 3].view_bits::<Msb0>()[1 .. 23].first_one().is_none());
+
+ assert!([0u8; 1].view_bits::<Lsb0>()[1 .. 7].last_one().is_none());
+ assert!([0u8; 3].view_bits::<Lsb0>()[1 .. 23].last_one().is_none());
+ assert!([0u8; 1].view_bits::<Msb0>()[1 .. 7].last_one().is_none());
+ assert!([0u8; 3].view_bits::<Msb0>()[1 .. 23].last_one().is_none());
+
+ assert!([!0u8; 1].view_bits::<Lsb0>()[1 .. 7].first_zero().is_none());
+ assert!(
+ [!0u8; 3].view_bits::<Lsb0>()[1 .. 23]
+ .first_zero()
+ .is_none()
+ );
+ assert!([!0u8; 1].view_bits::<Msb0>()[1 .. 7].first_zero().is_none());
+ assert!(
+ [!0u8; 3].view_bits::<Msb0>()[1 .. 23]
+ .first_zero()
+ .is_none()
+ );
+
+ assert!([!0u8; 1].view_bits::<Lsb0>()[1 .. 7].last_zero().is_none());
+ assert!([!0u8; 3].view_bits::<Lsb0>()[1 .. 23].last_zero().is_none());
+ assert!([!0u8; 1].view_bits::<Msb0>()[1 .. 7].last_zero().is_none());
+ assert!([!0u8; 3].view_bits::<Msb0>()[1 .. 23].last_zero().is_none());
+
+ let data = 0b0100_0100u8;
+ assert_eq!(data.view_bits::<Lsb0>()[1 .. 7].first_one(), Some(1));
+ assert_eq!(data.view_bits::<Lsb0>()[1 .. 7].last_one(), Some(5));
+ assert_eq!(data.view_bits::<Msb0>()[1 .. 7].first_one(), Some(0));
+ assert_eq!(data.view_bits::<Msb0>()[1 .. 7].last_one(), Some(4));
+
+ let data = 0b1011_1011u8;
+ assert_eq!(data.view_bits::<Lsb0>()[1 .. 7].first_zero(), Some(1));
+ assert_eq!(data.view_bits::<Lsb0>()[1 .. 7].last_zero(), Some(5));
+ assert_eq!(data.view_bits::<Msb0>()[1 .. 7].first_zero(), Some(0));
+ assert_eq!(data.view_bits::<Msb0>()[1 .. 7].last_zero(), Some(4));
+
+ let data = [0u8, 0b1001_0110, 0];
+ assert_eq!(data.view_bits::<Lsb0>()[12 ..].first_one(), Some(0));
+ assert_eq!(data.view_bits::<Lsb0>()[4 ..].first_one(), Some(5));
+ assert_eq!(data.view_bits::<Lsb0>()[.. 12].first_one(), Some(9));
+ assert_eq!(data.view_bits::<Msb0>()[12 ..].first_one(), Some(1));
+ assert_eq!(data.view_bits::<Msb0>()[4 ..].first_one(), Some(4));
+ assert_eq!(data.view_bits::<Msb0>()[.. 12].first_one(), Some(8));
+
+ assert_eq!(data.view_bits::<Lsb0>()[12 ..].last_one(), Some(3));
+ assert_eq!(data.view_bits::<Lsb0>()[4 ..].last_one(), Some(11));
+ assert_eq!(data.view_bits::<Lsb0>()[.. 12].last_one(), Some(10));
+ assert_eq!(data.view_bits::<Msb0>()[12 ..].last_one(), Some(2));
+ assert_eq!(data.view_bits::<Msb0>()[4 ..].last_one(), Some(10));
+ assert_eq!(data.view_bits::<Msb0>()[.. 12].last_one(), Some(11));
+
+ let data = [!0u8, 0b1001_0110, !0];
+ assert_eq!(data.view_bits::<Lsb0>()[12 ..].first_zero(), Some(1));
+ assert_eq!(data.view_bits::<Lsb0>()[4 ..].first_zero(), Some(4));
+ assert_eq!(data.view_bits::<Lsb0>()[.. 12].first_zero(), Some(8));
+ assert_eq!(data.view_bits::<Msb0>()[12 ..].first_zero(), Some(0));
+ assert_eq!(data.view_bits::<Msb0>()[4 ..].first_zero(), Some(5));
+ assert_eq!(data.view_bits::<Msb0>()[.. 12].first_zero(), Some(9));
+
+ assert_eq!(data.view_bits::<Lsb0>()[12 ..].last_zero(), Some(2));
+ assert_eq!(data.view_bits::<Lsb0>()[4 ..].last_zero(), Some(10));
+ assert_eq!(data.view_bits::<Lsb0>()[.. 12].last_zero(), Some(11));
+ assert_eq!(data.view_bits::<Msb0>()[12 ..].last_zero(), Some(3));
+ assert_eq!(data.view_bits::<Msb0>()[4 ..].last_zero(), Some(11));
+ assert_eq!(data.view_bits::<Msb0>()[.. 12].last_zero(), Some(10));
+
+ assert_eq!(15u8.view_bits::<Lsb0>().leading_ones(), 4);
+ assert_eq!(15u8.view_bits::<Msb0>().leading_ones(), 0);
+
+ assert_eq!(15u8.view_bits::<Lsb0>().leading_zeros(), 0);
+ assert_eq!(15u8.view_bits::<Msb0>().leading_zeros(), 4);
+
+ assert_eq!(15u8.view_bits::<Lsb0>().trailing_ones(), 0);
+ assert_eq!(15u8.view_bits::<Msb0>().trailing_ones(), 4);
+
+ assert_eq!(15u8.view_bits::<Lsb0>().trailing_zeros(), 4);
+ assert_eq!(15u8.view_bits::<Msb0>().trailing_zeros(), 0);
+}
+
+#[test]
+fn shunting() {
+ let bits = bits![mut 0, 1, 0, 0, 1];
+ bits.shift_left(0);
+ bits.shift_right(0);
+ assert_eq!(bits, bits![0, 1, 0, 0, 1]);
+
+ let bits = bits![mut 1;5];
+ bits.shift_left(1);
+ bits.shift_right(2);
+ bits.shift_left(1);
+ assert_eq!(bits, bits![0, 1, 1, 1, 0]);
+}
+
+#[test]
+fn aliasing() {
+ let bits = bits![Cell<u32>, Lsb0; 0];
+
+ let (a, b) = (bits, bits);
+ a.set_aliased(0, true);
+ assert!(bits[0]);
+ b.set_aliased(0, false);
+ assert!(!bits[0]);
+}
+
+#[test]
+fn cooking() {
+ use core::convert::TryFrom;
+
+ use crate::{
+ ptr::BitPtr,
+ slice,
+ };
+
+ let mut data = [0usize; 80];
+ let len = crate::mem::bits_of::<usize>() * 80;
+ let ref_ptr = data.as_ptr();
+ let mut_ptr = data.as_mut_ptr();
+
+ unsafe {
+ assert_eq!(
+ slice::from_raw_parts_unchecked(
+ BitPtr::try_from(ref_ptr).unwrap(),
+ len
+ )
+ .as_bitspan(),
+ data.view_bits::<Lsb0>().as_bitspan(),
+ );
+ assert_eq!(
+ slice::from_raw_parts_unchecked_mut(
+ BitPtr::try_from(mut_ptr).unwrap(),
+ len
+ )
+ .as_bitspan(),
+ data.view_bits_mut::<Msb0>().as_bitspan(),
+ );
+ }
+}
diff --git a/src/slice/tests/api.rs b/src/slice/tests/api.rs
new file mode 100644
index 0000000..0fcfcb3
--- /dev/null
+++ b/src/slice/tests/api.rs
@@ -0,0 +1,139 @@
+#![cfg(test)]
+
+use crate::prelude::*;
+
+#[test]
+fn properties() {
+ let empty = bits![];
+ assert_eq!(empty.len(), 0);
+ assert!(empty.is_empty());
+
+ let bits = bits![0, 1, 0, 0, 1];
+ assert_eq!(bits.len(), 5);
+ assert!(!bits.is_empty());
+}
+
+#[test]
+fn getters() {
+ let empty = bits![mut];
+ let bits = bits![mut 0, 1, 0, 0, 1];
+
+ assert!(empty.first().is_none());
+ assert!(empty.first_mut().is_none());
+ assert!(empty.last().is_none());
+ assert!(empty.last_mut().is_none());
+ assert!(empty.split_first().is_none());
+ assert!(empty.split_first_mut().is_none());
+ assert!(empty.split_last().is_none());
+ assert!(empty.split_last_mut().is_none());
+ assert!(!bits.first().unwrap());
+ assert!(bits.last().unwrap());
+
+ *bits.first_mut().unwrap() = true;
+ *bits.last_mut().unwrap() = false;
+
+ let (first, rest) = bits.split_first().unwrap();
+ assert!(*first);
+ assert_eq!(rest, bits![1, 0, 0, 0]);
+ let (last, rest) = bits.split_last().unwrap();
+ assert!(!*last);
+ assert_eq!(rest, bits![1, 1, 0, 0]);
+ drop(first);
+ drop(last);
+
+ let (first, _) = bits.split_first_mut().unwrap();
+ first.commit(false);
+ let (last, _) = bits.split_last_mut().unwrap();
+ last.commit(true);
+
+ *bits.get_mut(2).unwrap() = true;
+ unsafe {
+ assert!(*bits.get_unchecked(2));
+ bits.get_unchecked_mut(2).commit(false);
+ }
+
+ bits.swap(0, 4);
+ bits[1 .. 4].reverse();
+ assert_eq!(bits, bits![1, 0, 0, 1, 0]);
+}
+
+#[test]
+fn splitters() {
+ type Bsl<T> = BitSlice<T, Lsb0>;
+ let mut data = 0xF0u8;
+ let bits = data.view_bits_mut::<Lsb0>();
+
+ let (l, r): (&Bsl<u8>, &Bsl<u8>) = bits.split_at(4);
+ assert_eq!(l, bits![0; 4]);
+ assert_eq!(r, bits![1; 4]);
+
+ let (l, r): (
+ &mut Bsl<<u8 as BitStore>::Alias>,
+ &mut Bsl<<u8 as BitStore>::Alias>,
+ ) = bits.split_at_mut(4);
+ l.fill(true);
+ r.fill(false);
+ assert_eq!(data, 0x0Fu8);
+
+ let bits = bits![0, 1, 0, 0, 1];
+
+ assert!(bits.strip_prefix(bits![1, 0]).is_none());
+ assert_eq!(bits.strip_prefix(bits![0, 1]), Some(bits![0, 0, 1]));
+
+ assert!(bits.strip_suffix(bits![1, 0]).is_none());
+ assert_eq!(bits.strip_suffix(bits![0, 1]), Some(bits![0, 1, 0]));
+}
+
+#[test]
+fn rotators() {
+ let bits = bits![mut 0, 1, 0, 0, 1];
+
+ bits.rotate_left(2);
+ assert_eq!(bits, bits![0, 0, 1, 0, 1]);
+ bits.rotate_right(2);
+ assert_eq!(bits, bits![0, 1, 0, 0, 1]);
+
+ bits.rotate_left(0);
+ bits.rotate_right(0);
+ bits.rotate_left(5);
+ bits.rotate_right(5);
+}
+
+#[test]
+#[should_panic]
+fn rotate_too_far_left() {
+ bits![mut 0, 1].rotate_left(3);
+}
+
+#[test]
+#[should_panic]
+fn rotate_too_far_right() {
+ bits![mut 0, 1].rotate_right(3);
+}
+
+#[test]
+fn fillers() {
+ let bits = bits![mut 0; 5];
+
+ bits.fill(true);
+ assert_eq!(bits, bits![1; 5]);
+ bits.fill_with(|idx| idx % 2 == 0);
+ assert_eq!(bits, bits![1, 0, 1, 0, 1]);
+
+ bits.copy_within(1 .., 0);
+ assert_eq!(bits, bits![0, 1, 0, 1, 1]);
+}
+
+#[test]
+fn inspectors() {
+ let bits = bits![0, 1, 0, 0, 1, 0, 1, 1, 0, 1];
+
+ assert!(bits.contains(bits![0, 1, 0, 1]));
+ assert!(!bits.contains(bits![0; 4]));
+
+ assert!(bits.starts_with(bits![0, 1, 0, 0]));
+ assert!(!bits.starts_with(bits![0, 1, 1]));
+
+ assert!(bits.ends_with(bits![1, 0, 1]));
+ assert!(!bits.ends_with(bits![0, 0, 1]));
+}
diff --git a/src/slice/tests/iter.rs b/src/slice/tests/iter.rs
new file mode 100644
index 0000000..5dbc562
--- /dev/null
+++ b/src/slice/tests/iter.rs
@@ -0,0 +1,703 @@
+#![cfg(test)]
+
+use crate::prelude::*;
+
+#[test]
+fn iter() {
+ let bits = bits![0, 1, 0, 1, 0, 1];
+ let mut iter = bits.iter().by_refs();
+
+ assert!(!*iter.next().unwrap());
+ assert!(!*iter.nth(1).unwrap());
+ assert!(*iter.next_back().unwrap());
+ assert!(*iter.nth_back(1).unwrap());
+
+ assert_eq!(iter.len(), 0);
+ assert!(iter.next().is_none());
+ assert!(iter.next_back().is_none());
+ assert!(iter.nth(1).is_none());
+ assert!(iter.nth_back(1).is_none());
+}
+
+#[test]
+fn iter_mut() {
+ let bits = bits![mut 0, 1, 0, 0, 1];
+ let mut iter = bits.iter_mut();
+ while let Some(mut bit) = iter.nth(1) {
+ *bit = !*bit;
+ }
+ assert_eq!(bits, bits![0, 0, 0, 1, 1]);
+}
+
+#[test]
+fn windows() {
+ let bits = bits![0, 1, 0, 1, 1, 0, 0, 1, 1, 1];
+ let base = bits.as_bitptr();
+ let mut windows = bits.windows(4);
+ assert_eq!(windows.len(), 7);
+
+ let next = windows.next().unwrap();
+ assert_eq!(unsafe { next.as_bitptr().offset_from(base) }, 0);
+ assert_eq!(next, bits![0, 1, 0, 1]);
+
+ let next_back = windows.next_back().unwrap();
+ assert_eq!(unsafe { next_back.as_bitptr().offset_from(base) }, 6);
+ assert_eq!(next_back, bits![0, 1, 1, 1]);
+
+ let nth = windows.nth(2).unwrap();
+ assert_eq!(unsafe { nth.as_bitptr().offset_from(base) }, 3);
+ assert_eq!(nth, bits![1, 1, 0, 0]);
+
+ let nth_back = windows.nth_back(1).unwrap();
+ assert_eq!(unsafe { nth_back.as_bitptr().offset_from(base) }, 4);
+ assert_eq!(nth_back, bits![1, 0, 0, 1]);
+
+ assert_eq!(windows.len(), 0);
+ assert!(windows.next().is_none());
+ assert!(windows.next_back().is_none());
+ assert!(windows.nth(1).is_none());
+ assert!(windows.nth_back(1).is_none());
+}
+
+#[test]
+fn chunks() {
+ let bits = bits![0, 0, 1, 1, 0, 1, 1, 0, 1, 1, 0];
+ // ^^^^^ ^^^^^ ^^^^^ ^
+ let base = bits.as_bitptr();
+ let mut chunks = bits.chunks(2);
+ assert_eq!(chunks.len(), 6);
+
+ let next = chunks.next().unwrap();
+ assert_eq!(unsafe { next.as_bitptr().offset_from(base) }, 0);
+ assert_eq!(next, bits![0, 0]);
+
+ let next_back = chunks.next_back().unwrap();
+ assert_eq!(unsafe { next_back.as_bitptr().offset_from(base) }, 10);
+ assert_eq!(next_back, bits![0]);
+
+ let nth = chunks.nth(1).unwrap();
+ assert_eq!(unsafe { nth.as_bitptr().offset_from(base) }, 4);
+ assert_eq!(nth, bits![0, 1]);
+
+ let nth_back = chunks.nth_back(1).unwrap();
+ assert_eq!(unsafe { nth_back.as_bitptr().offset_from(base) }, 6);
+ assert_eq!(nth_back, bits![1, 0]);
+
+ assert_eq!(chunks.len(), 0);
+ assert!(chunks.next().is_none());
+ assert!(chunks.next_back().is_none());
+ assert!(chunks.nth(1).is_none());
+ assert!(chunks.nth_back(1).is_none());
+
+ assert_eq!(bits![0; 2].chunks(3).next().unwrap().len(), 2);
+ assert_eq!(bits![0; 5].chunks(3).next().unwrap().len(), 3);
+ assert_eq!(bits![0; 5].chunks(3).nth(1).unwrap().len(), 2);
+ assert_eq!(bits![0; 8].chunks(3).nth(1).unwrap().len(), 3);
+
+ assert_eq!(bits![0; 5].chunks(3).next_back().unwrap().len(), 2);
+ assert_eq!(bits![0; 6].chunks(3).next_back().unwrap().len(), 3);
+ assert_eq!(bits![0; 5].chunks(3).nth_back(1).unwrap().len(), 3);
+}
+
+#[test]
+fn chunks_mut() {
+ let bits = bits![mut 1; 11];
+ let base = bits.as_bitptr();
+ let mut chunks = bits.chunks_mut(2);
+ assert_eq!(chunks.len(), 6);
+
+ let next = chunks.next().unwrap();
+ assert_eq!(unsafe { next.as_bitptr().offset_from(base) }, 0);
+ next.fill(false);
+
+ let next_back = chunks.next_back().unwrap();
+ assert_eq!(unsafe { next_back.as_bitptr().offset_from(base) }, 10);
+ next_back.fill(false);
+
+ let nth = chunks.nth(1).unwrap();
+ assert_eq!(unsafe { nth.as_bitptr().offset_from(base) }, 4);
+ nth.set(0, false);
+
+ let nth_back = chunks.nth_back(1).unwrap();
+ assert_eq!(unsafe { nth_back.as_bitptr().offset_from(base) }, 6);
+ nth_back.set(1, false);
+
+ assert_eq!(chunks.len(), 0);
+ assert!(chunks.next().is_none());
+ assert!(chunks.next_back().is_none());
+ assert!(chunks.nth(1).is_none());
+ assert!(chunks.nth_back(1).is_none());
+
+ assert_eq!(bits, bits![0, 0, 1, 1, 0, 1, 1, 0, 1, 1, 0]);
+
+ assert_eq!(bits![mut 0; 2].chunks_mut(3).next().unwrap().len(), 2);
+ assert_eq!(bits![mut 0; 5].chunks_mut(3).next().unwrap().len(), 3);
+ assert_eq!(bits![mut 0; 5].chunks_mut(3).nth(1).unwrap().len(), 2);
+ assert_eq!(bits![mut 0; 8].chunks_mut(3).nth(1).unwrap().len(), 3);
+
+ assert_eq!(bits![mut 0; 5].chunks_mut(3).next_back().unwrap().len(), 2);
+ assert_eq!(bits![mut 0; 6].chunks_mut(3).next_back().unwrap().len(), 3);
+ assert_eq!(bits![mut 0; 5].chunks_mut(3).nth_back(1).unwrap().len(), 3);
+}
+
+#[test]
+fn chunks_exact() {
+ let bits = bits![
+ 0, 0, 0, 1, 1, 1, 0, 0, 1, // next and nth(1)
+ 1, 0, 0, 1, 1, 1, 0, 1, 0, // next_back and nth_back(1)
+ 1, 1, // remainder
+ ];
+ let base = bits.as_bitptr();
+ let mut chunks = bits.chunks_exact(3);
+ assert_eq!(chunks.len(), 6);
+
+ let next = chunks.next().unwrap();
+ assert_eq!(unsafe { next.as_bitptr().offset_from(base) }, 0);
+ assert_eq!(next, bits![0, 0, 0]);
+
+ let nth = chunks.nth(1).unwrap();
+ assert_eq!(unsafe { nth.as_bitptr().offset_from(base) }, 6);
+ assert_eq!(nth, bits![0, 0, 1]);
+
+ let next_back = chunks.next_back().unwrap();
+ assert_eq!(unsafe { next_back.as_bitptr().offset_from(base) }, 15);
+ assert_eq!(next_back, bits![0, 1, 0]);
+
+ let nth_back = chunks.nth_back(1).unwrap();
+ assert_eq!(unsafe { nth_back.as_bitptr().offset_from(base) }, 9);
+ assert_eq!(nth_back, bits![1, 0, 0]);
+
+ let remainder = chunks.remainder();
+ assert_eq!(unsafe { remainder.as_bitptr().offset_from(base) }, 18);
+ assert_eq!(remainder, bits![1, 1]);
+
+ assert_eq!(chunks.len(), 0);
+ assert!(chunks.next().is_none());
+ assert!(chunks.next_back().is_none());
+ assert!(chunks.nth(1).is_none());
+ assert!(chunks.nth_back(1).is_none());
+}
+
+#[test]
+fn chunks_exact_mut() {
+ let bits = bits![mut 0; 20];
+ let base = bits.as_bitptr();
+ let mut chunks = bits.chunks_exact_mut(3);
+
+ let next = chunks.next().unwrap();
+ assert_eq!(unsafe { next.as_bitptr().offset_from(base) }, 0);
+ next.fill(true);
+
+ let next_back = chunks.next_back().unwrap();
+ assert_eq!(unsafe { next_back.as_bitptr().offset_from(base) }, 15);
+ next_back.fill(true);
+
+ let nth = chunks.nth(1).unwrap();
+ assert_eq!(unsafe { nth.as_bitptr().offset_from(base) }, 6);
+ nth.set(2, true);
+
+ let nth_back = chunks.nth_back(1).unwrap();
+ assert_eq!(unsafe { nth_back.as_bitptr().offset_from(base) }, 9);
+ nth_back.set(0, true);
+
+ assert_eq!(chunks.len(), 0);
+ assert!(chunks.next().is_none());
+ assert!(chunks.next_back().is_none());
+ assert!(chunks.nth(1).is_none());
+ assert!(chunks.nth_back(1).is_none());
+
+ assert_eq!(bits, bits![
+ 1, 1, 1, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 1, 1, 1, 0, 0,
+ ]);
+ bits.fill(false);
+
+ let mut chunks = bits.chunks_exact_mut(3);
+ let remainder = chunks.take_remainder();
+ assert_eq!(unsafe { remainder.as_bitptr().offset_from(base) }, 18);
+ remainder.fill(true);
+ assert!(chunks.take_remainder().is_empty());
+ assert!(chunks.into_remainder().is_empty());
+ assert!(bits.ends_with(bits![0, 0, 1, 1]));
+}
+
+#[test]
+fn rchunks() {
+ let bits = bits![1, 1, 1, 1, 0, 0, 1, 1, 1, 0, 0];
+ // ^^ ^^^^^ ^^^^^ ^^^^
+ let base = bits.as_bitptr();
+ let mut rchunks = bits.rchunks(2);
+
+ let next = rchunks.next().unwrap();
+ assert_eq!(unsafe { next.as_bitptr().offset_from(base) }, 9);
+ assert_eq!(next, bits![0, 0]);
+
+ let next_back = rchunks.next_back().unwrap();
+ assert_eq!(unsafe { next_back.as_bitptr().offset_from(base) }, 0);
+ assert_eq!(next_back, bits![1]);
+
+ let nth = rchunks.nth(1).unwrap();
+ assert_eq!(unsafe { nth.as_bitptr().offset_from(base) }, 5);
+ assert_eq!(nth, bits![0, 1]);
+
+ let nth_back = rchunks.nth_back(1).unwrap();
+ assert_eq!(unsafe { nth_back.as_bitptr().offset_from(base) }, 3);
+ assert_eq!(nth_back, bits![1, 0]);
+
+ assert_eq!(rchunks.len(), 0);
+ assert!(rchunks.next().is_none());
+ assert!(rchunks.next_back().is_none());
+ assert!(rchunks.nth(1).is_none());
+ assert!(rchunks.nth_back(1).is_none());
+
+ assert_eq!(bits![0; 5].rchunks(3).next().unwrap().len(), 3);
+ assert_eq!(bits![0; 5].rchunks(3).nth(1).unwrap().len(), 2);
+ assert_eq!(bits![0; 5].rchunks(3).next_back().unwrap().len(), 2);
+ assert_eq!(bits![0; 5].rchunks(3).nth_back(1).unwrap().len(), 3);
+}
+
+#[test]
+fn rchunks_mut() {
+ let bits = bits![mut 0; 11];
+ let base = bits.as_bitptr();
+ let mut rchunks = bits.rchunks_mut(2);
+ assert_eq!(rchunks.len(), 6);
+
+ let next = rchunks.next().unwrap();
+ assert_eq!(unsafe { next.as_bitptr().offset_from(base) }, 9);
+ next.fill(true);
+
+ let next_back = rchunks.next_back().unwrap();
+ assert_eq!(unsafe { next_back.as_bitptr().offset_from(base) }, 0);
+ next_back.fill(true);
+
+ let nth = rchunks.nth(1).unwrap();
+ assert_eq!(unsafe { nth.as_bitptr().offset_from(base) }, 5);
+ nth.set(0, true);
+
+ let nth_back = rchunks.nth_back(1).unwrap();
+ assert_eq!(unsafe { nth_back.as_bitptr().offset_from(base) }, 3);
+ nth_back.set(1, true);
+
+ assert_eq!(rchunks.len(), 0);
+ assert!(rchunks.next().is_none());
+ assert!(rchunks.next_back().is_none());
+ assert!(rchunks.nth(1).is_none());
+ assert!(rchunks.nth_back(1).is_none());
+
+ assert_eq!(bits, bits![1, 0, 0, 0, 1, 1, 0, 0, 0, 1, 1]);
+
+ assert_eq!(bits![mut 0; 5].rchunks_mut(3).next().unwrap().len(), 3);
+ assert_eq!(bits![mut 0; 5].rchunks_mut(3).nth(1).unwrap().len(), 2);
+ assert_eq!(bits![mut 0; 5].rchunks_mut(3).next_back().unwrap().len(), 2);
+ assert_eq!(bits![mut 0; 5].rchunks_mut(3).nth_back(1).unwrap().len(), 3);
+}
+
+#[test]
+fn rchunks_exact() {
+ let bits = bits![
+ 1, 1, // remainder
+ 0, 1, 0, 1, 1, 1, 0, 0, 1, // nth_back(1) and next
+ 1, 0, 0, 1, 1, 1, 0, 0, 0, // nth(1) and next
+ ];
+ let base = bits.as_bitptr();
+ let mut rchunks = bits.rchunks_exact(3);
+ assert_eq!(rchunks.len(), 6);
+
+ let next = rchunks.next().unwrap();
+ assert_eq!(unsafe { next.as_bitptr().offset_from(base) }, 17);
+ assert_eq!(next, bits![0, 0, 0]);
+
+ let nth = rchunks.nth(1).unwrap();
+ assert_eq!(unsafe { nth.as_bitptr().offset_from(base) }, 11);
+ assert_eq!(nth, bits![1, 0, 0]);
+
+ let next_back = rchunks.next_back().unwrap();
+ assert_eq!(unsafe { next_back.as_bitptr().offset_from(base) }, 2);
+ assert_eq!(next_back, bits![0, 1, 0]);
+
+ let nth_back = rchunks.nth_back(1).unwrap();
+ assert_eq!(unsafe { nth_back.as_bitptr().offset_from(base) }, 8);
+ assert_eq!(nth_back, bits![0, 0, 1]);
+
+ let remainder = rchunks.remainder();
+ assert_eq!(unsafe { remainder.as_bitptr().offset_from(base) }, 0);
+ assert_eq!(remainder, bits![1, 1]);
+
+ assert_eq!(rchunks.len(), 0);
+ assert!(rchunks.next().is_none());
+ assert!(rchunks.next_back().is_none());
+ assert!(rchunks.nth(1).is_none());
+ assert!(rchunks.nth_back(1).is_none());
+}
+
+#[test]
+fn rchunks_exact_mut() {
+ let bits = bits![mut 0; 20];
+ let base = bits.as_bitptr();
+ let mut rchunks = bits.rchunks_exact_mut(3);
+
+ let next = rchunks.next().unwrap();
+ assert_eq!(unsafe { next.as_bitptr().offset_from(base) }, 17);
+ next.fill(true);
+
+ let next_back = rchunks.next_back().unwrap();
+ assert_eq!(unsafe { next_back.as_bitptr().offset_from(base) }, 2);
+ next_back.fill(true);
+
+ let nth = rchunks.nth(1).unwrap();
+ assert_eq!(unsafe { nth.as_bitptr().offset_from(base) }, 11);
+ nth.set(2, true);
+
+ let nth_back = rchunks.nth_back(1).unwrap();
+ assert_eq!(unsafe { nth_back.as_bitptr().offset_from(base) }, 8);
+ nth_back.set(0, true);
+
+ assert_eq!(rchunks.len(), 0);
+ assert!(rchunks.next().is_none());
+ assert!(rchunks.next_back().is_none());
+ assert!(rchunks.nth(1).is_none());
+ assert!(rchunks.nth_back(1).is_none());
+
+ assert_eq!(bits, bits![
+ 0, 0, 1, 1, 1, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 1, 1, 1
+ ]);
+ bits.fill(false);
+
+ let mut chunks = bits.rchunks_exact_mut(3);
+ let remainder = chunks.take_remainder();
+ assert_eq!(unsafe { remainder.as_bitptr().offset_from(base) }, 0);
+ remainder.fill(true);
+ assert!(chunks.take_remainder().is_empty());
+ assert!(chunks.into_remainder().is_empty());
+ assert!(bits.starts_with(bits![1, 1, 0, 0]));
+}
+
+#[test]
+fn split() {
+ let bits = bits![0, 0, 1, 1, 0, 0, 1, 1];
+ let base = bits.as_bitptr();
+ let mut split = bits.split(|_, &bit| bit);
+
+ let next = split.next().unwrap();
+ assert_eq!(unsafe { next.as_bitptr().offset_from(base) }, 0);
+ assert_eq!(next, bits![0, 0]);
+
+ let next = split.next().unwrap();
+ assert_eq!(unsafe { next.as_bitptr().offset_from(base) }, 3);
+ assert!(next.is_empty());
+
+ let next_back = split.next_back().unwrap();
+ assert_eq!(unsafe { next_back.as_bitptr().offset_from(base) }, 8);
+ assert!(next_back.is_empty());
+
+ let next_back = split.next_back().unwrap();
+ assert_eq!(unsafe { next_back.as_bitptr().offset_from(base) }, 7);
+ assert!(next_back.is_empty());
+
+ let next_back = split.next_back().unwrap();
+ assert_eq!(unsafe { next_back.as_bitptr().offset_from(base) }, 4);
+ assert_eq!(next_back, bits![0, 0]);
+
+ assert!(split.next().is_none());
+ assert!(split.next_back().is_none());
+}
+
+#[test]
+fn split_mut() {
+ let bits = bits![mut 0, 0, 1, 1, 0, 0, 1, 1];
+ let base = bits.as_bitptr();
+ let mut split = bits.split_mut(|_, &bit| bit);
+
+ let next = split.next().unwrap();
+ assert_eq!(unsafe { next.as_bitptr().offset_from(base) }, 0);
+ assert_eq!(next, bits![0, 0]);
+
+ let next = split.next().unwrap();
+ assert_eq!(unsafe { next.as_bitptr().offset_from(base) }, 3);
+ assert!(next.is_empty());
+
+ let next_back = split.next_back().unwrap();
+ assert_eq!(unsafe { next_back.as_bitptr().offset_from(base) }, 8);
+ assert!(next_back.is_empty());
+
+ let next_back = split.next_back().unwrap();
+ assert_eq!(unsafe { next_back.as_bitptr().offset_from(base) }, 7);
+ assert!(next_back.is_empty());
+
+ let next_back = split.next_back().unwrap();
+ assert_eq!(unsafe { next_back.as_bitptr().offset_from(base) }, 4);
+ assert_eq!(next_back, bits![0, 0]);
+
+ assert!(split.next().is_none());
+ assert!(split.next_back().is_none());
+
+ let bits = bits![mut 0];
+ let mut split = bits.split_mut(|_, &bit| bit);
+ assert_eq!(split.next().unwrap(), bits![0]);
+ assert!(split.next().is_none());
+}
+
+#[test]
+fn split_inclusive() {
+ let bits = bits![0, 0, 1, 1, 0, 0, 1, 1];
+ let base = bits.as_bitptr();
+ let mut split = bits.split_inclusive(|_, &bit| bit);
+
+ let next = split.next().unwrap();
+ assert_eq!(unsafe { next.as_bitptr().offset_from(base) }, 0);
+ assert_eq!(next, bits![0, 0, 1]);
+
+ let next = split.next().unwrap();
+ assert_eq!(unsafe { next.as_bitptr().offset_from(base) }, 3);
+ assert_eq!(next, bits![1]);
+
+ let next_back = split.next_back().unwrap();
+ assert_eq!(unsafe { next_back.as_bitptr().offset_from(base) }, 7);
+ assert_eq!(next_back, bits![1]);
+
+ let next_back = split.next_back().unwrap();
+ assert_eq!(unsafe { next_back.as_bitptr().offset_from(base) }, 4);
+ assert_eq!(next_back, bits![0, 0, 1]);
+
+ assert!(split.next().is_none());
+ assert!(split.next_back().is_none());
+
+ let bits = bits![0, 1];
+ let mut split = bits.split_inclusive(|_, &bit| bit);
+ assert_eq!(split.next(), Some(bits![0, 1]));
+ assert!(split.next().is_none());
+ let mut split = bits.split_inclusive(|_, &bit| bit);
+ assert_eq!(split.next_back(), Some(bits![0, 1]));
+ assert!(split.next_back().is_none());
+
+ assert_eq!(
+ bits![].split_inclusive(|_, &bit| bit).next_back(),
+ Some(bits![]),
+ );
+}
+
+#[test]
+fn split_inclusive_mut() {
+ let bits = bits![mut 0, 0, 1, 1, 0, 0, 1, 1];
+ let base = bits.as_bitptr();
+ let mut split = bits.split_inclusive_mut(|_, &bit| bit);
+
+ let next = split.next().unwrap();
+ assert_eq!(unsafe { next.as_bitptr().offset_from(base) }, 0);
+ assert_eq!(next, bits![0, 0, 1]);
+
+ let next = split.next().unwrap();
+ assert_eq!(unsafe { next.as_bitptr().offset_from(base) }, 3);
+ assert_eq!(next, bits![1]);
+
+ let next_back = split.next_back().unwrap();
+ assert_eq!(unsafe { next_back.as_bitptr().offset_from(base) }, 7);
+ assert_eq!(next_back, bits![1]);
+
+ let next_back = split.next_back().unwrap();
+ assert_eq!(unsafe { next_back.as_bitptr().offset_from(base) }, 4);
+ assert_eq!(next_back, bits![0, 0, 1]);
+
+ assert!(split.next().is_none());
+ assert!(split.next_back().is_none());
+
+ let bits = bits![mut 0, 1];
+ let mut split = bits.split_inclusive_mut(|_, &bit| bit);
+ assert_eq!(split.next().unwrap(), bits![0, 1]);
+ assert!(split.next().is_none());
+ let mut split = bits.split_inclusive_mut(|_, &bit| bit);
+ assert_eq!(split.next_back().unwrap(), bits![0, 1]);
+ assert!(split.next_back().is_none());
+
+ assert_eq!(
+ bits![mut]
+ .split_inclusive_mut(|_, &bit| bit)
+ .next_back()
+ .unwrap(),
+ bits![],
+ );
+}
+
+#[test]
+fn rsplit() {
+ let bits = bits![0, 0, 1, 1, 0, 0, 1, 1];
+ let base = bits.as_bitptr();
+ let mut rsplit = bits.rsplit(|_, &bit| bit);
+
+ let next = rsplit.next().unwrap();
+ assert_eq!(unsafe { next.as_bitptr().offset_from(base) }, 8);
+ assert!(next.is_empty());
+
+ let next = rsplit.next().unwrap();
+ assert_eq!(unsafe { next.as_bitptr().offset_from(base) }, 7);
+ assert!(next.is_empty());
+
+ let next_back = rsplit.next_back().unwrap();
+ assert_eq!(unsafe { next_back.as_bitptr().offset_from(base) }, 0);
+ assert_eq!(next_back, bits![0, 0]);
+
+ let next_back = rsplit.next_back().unwrap();
+ assert_eq!(unsafe { next_back.as_bitptr().offset_from(base) }, 3);
+ assert!(next_back.is_empty());
+
+ let next_back = rsplit.next_back().unwrap();
+ assert_eq!(unsafe { next_back.as_bitptr().offset_from(base) }, 4);
+ assert_eq!(next_back, bits![0, 0]);
+
+ assert!(rsplit.next().is_none());
+ assert!(rsplit.next_back().is_none());
+}
+
+#[test]
+fn rsplit_mut() {
+ let bits = bits![mut 0, 0, 1, 1, 0, 0, 1, 1];
+ let base = bits.as_bitptr();
+ let mut rsplit = bits.rsplit_mut(|_, &bit| bit);
+
+ let next = rsplit.next().unwrap();
+ assert_eq!(unsafe { next.as_bitptr().offset_from(base) }, 8);
+ assert!(next.is_empty());
+
+ let next = rsplit.next().unwrap();
+ assert_eq!(unsafe { next.as_bitptr().offset_from(base) }, 7);
+ assert!(next.is_empty());
+
+ let next_back = rsplit.next_back().unwrap();
+ assert_eq!(unsafe { next_back.as_bitptr().offset_from(base) }, 0);
+ assert_eq!(next_back, bits![0, 0]);
+
+ let next_back = rsplit.next_back().unwrap();
+ assert_eq!(unsafe { next_back.as_bitptr().offset_from(base) }, 3);
+ assert!(next_back.is_empty());
+
+ let next_back = rsplit.next_back().unwrap();
+ assert_eq!(unsafe { next_back.as_bitptr().offset_from(base) }, 4);
+ assert!(next.is_empty());
+
+ assert!(rsplit.next().is_none());
+ assert!(rsplit.next_back().is_none());
+
+ let bits = bits![mut 0];
+ let mut rsplit = bits.rsplit_mut(|_, &bit| bit);
+ assert_eq!(rsplit.next().unwrap(), bits![0]);
+ assert!(rsplit.next().is_none());
+}
+
+#[test]
+fn splitn() {
+ let bits = bits![0, 0, 1, 1, 0, 0, 1, 1];
+ let base = bits.as_bitptr();
+ let mut splitn = bits.splitn(2, |_, &bit| bit);
+
+ let next = splitn.next().unwrap();
+ assert_eq!(unsafe { next.as_bitptr().offset_from(base) }, 0);
+ assert_eq!(next, bits![0, 0]);
+
+ let next = splitn.next().unwrap();
+ assert_eq!(unsafe { next.as_bitptr().offset_from(base) }, 3);
+ assert_eq!(next, bits[3 ..]);
+
+ assert!(splitn.next().is_none());
+}
+
+#[test]
+fn splitn_mut() {
+ let bits = bits![mut 0, 0, 1, 1, 0, 0, 1, 1];
+ let base = bits.as_bitptr();
+ let mut splitn = bits.splitn_mut(2, |_, &bit| bit);
+
+ let next = splitn.next().unwrap();
+ assert_eq!(unsafe { next.as_bitptr().offset_from(base) }, 0);
+ assert_eq!(next, bits![0, 0]);
+
+ let next = splitn.next().unwrap();
+ assert_eq!(unsafe { next.as_bitptr().offset_from(base) }, 3);
+ assert_eq!(next, bits![1, 0, 0, 1, 1]);
+
+ assert!(splitn.next().is_none());
+}
+
+#[test]
+fn rsplitn() {
+ let bits = bits![0, 0, 1, 1, 0, 0, 1, 1];
+ let base = bits.as_bitptr();
+ let mut rsplitn = bits.rsplitn(2, |_, &bit| bit);
+
+ let next = rsplitn.next().unwrap();
+ assert_eq!(unsafe { next.as_bitptr().offset_from(base) }, 8);
+ assert!(next.is_empty());
+
+ let next = rsplitn.next().unwrap();
+ assert_eq!(unsafe { next.as_bitptr().offset_from(base) }, 0);
+ assert_eq!(next, bits![0, 0, 1, 1, 0, 0, 1]);
+}
+
+#[test]
+fn rsplitn_mut() {
+ let bits = bits![mut 0, 0, 1, 1, 0, 0, 1, 1];
+ let base = bits.as_bitptr();
+ let mut rsplitn = bits.rsplitn_mut(2, |_, &bit| bit);
+
+ let next = rsplitn.next().unwrap();
+ assert_eq!(unsafe { next.as_bitptr().offset_from(base) }, 8);
+ assert!(next.is_empty());
+
+ let next = rsplitn.next().unwrap();
+ assert_eq!(unsafe { next.as_bitptr().offset_from(base) }, 0);
+ assert_eq!(next, bits![0, 0, 1, 1, 0, 0, 1]);
+
+ assert!(rsplitn.next().is_none());
+}
+
+#[test]
+fn iter_ones() {
+ use crate::order::HiLo;
+
+ let bits = 0b0100_1001u8.view_bits::<HiLo>();
+ // ordering: 3210 7654
+ let mut ones = bits.iter_ones();
+ assert_eq!(ones.len(), 3);
+ assert_eq!(ones.next(), Some(2));
+ assert_eq!(ones.next_back(), Some(7));
+ assert_eq!(ones.next(), Some(4));
+ assert!(ones.next().is_none());
+}
+
+#[test]
+fn iter_zeros() {
+ use crate::order::HiLo;
+
+ let bits = 0b1011_0110u8.view_bits::<HiLo>();
+ // ordering: 3210 7654
+ let mut zeros = bits.iter_zeros();
+ assert_eq!(zeros.len(), 3);
+ assert_eq!(zeros.next(), Some(2));
+ assert_eq!(zeros.next_back(), Some(7));
+ assert_eq!(zeros.next(), Some(4));
+ assert!(zeros.next().is_none());
+}
+
+#[test]
+fn trait_impls() {
+ use core::iter::FusedIterator;
+
+ use static_assertions::*;
+
+ use crate::slice::iter::{
+ BitRefIter,
+ BitValIter,
+ };
+
+ assert_impl_all!(
+ BitRefIter<'static, usize, Lsb0>: Iterator,
+ DoubleEndedIterator,
+ ExactSizeIterator,
+ FusedIterator
+ );
+ assert_impl_all!(
+ BitValIter<'static, usize, Lsb0>: Iterator,
+ DoubleEndedIterator,
+ ExactSizeIterator,
+ FusedIterator
+ );
+}
diff --git a/src/slice/tests/ops.rs b/src/slice/tests/ops.rs
new file mode 100644
index 0000000..21bd43b
--- /dev/null
+++ b/src/slice/tests/ops.rs
@@ -0,0 +1,128 @@
+use rand::random;
+
+use crate::{
+ prelude::*,
+ slice::BitSliceIndex,
+};
+
+#[test]
+fn bitand() {
+ let a = random::<[u32; 3]>();
+ let b = random::<[u32; 3]>();
+ let c = [a[0] & b[0], a[1] & b[1], a[2] & b[2]];
+
+ let mut d = a;
+ *d.view_bits_mut::<Lsb0>() &= b.view_bits::<Lsb0>();
+ assert_eq!(c, d);
+
+ d = a;
+ *d.view_bits_mut::<Msb0>() &= b.view_bits::<Msb0>();
+ assert_eq!(c, d);
+
+ let d = random::<[u8; 6]>();
+ let e = random::<[u16; 3]>();
+
+ let mut f = d;
+ *f.view_bits_mut::<Lsb0>() &= e.view_bits::<Msb0>();
+ for ((d, e), f) in (d
+ .view_bits::<Lsb0>()
+ .iter()
+ .by_vals()
+ .zip(e.view_bits::<Msb0>().iter().by_vals()))
+ .zip(f.view_bits::<Lsb0>())
+ {
+ assert_eq!(d & e, f);
+ }
+}
+
+#[test]
+fn bitor() {
+ let a = random::<[u32; 3]>();
+ let b = random::<[u32; 3]>();
+ let c = [a[0] | b[0], a[1] | b[1], a[2] | b[2]];
+
+ let mut d = a;
+ *d.view_bits_mut::<Lsb0>() |= b.view_bits::<Lsb0>();
+ assert_eq!(c, d);
+
+ d = a;
+ *d.view_bits_mut::<Msb0>() |= b.view_bits::<Msb0>();
+ assert_eq!(c, d);
+
+ let d = random::<[u8; 6]>();
+ let e = random::<[u16; 3]>();
+
+ let mut f = d;
+ *f.view_bits_mut::<Lsb0>() |= e.view_bits::<Msb0>();
+ for ((d, e), f) in (d
+ .view_bits::<Lsb0>()
+ .iter()
+ .by_vals()
+ .zip(e.view_bits::<Msb0>().iter().by_vals()))
+ .zip(f.view_bits::<Lsb0>())
+ {
+ assert_eq!(d | e, f);
+ }
+}
+
+#[test]
+fn bitxor() {
+ let a = random::<[u32; 3]>();
+ let b = random::<[u32; 3]>();
+ let c = [a[0] ^ b[0], a[1] ^ b[1], a[2] ^ b[2]];
+
+ let mut d = a;
+ *d.view_bits_mut::<Lsb0>() ^= b.view_bits::<Lsb0>();
+ assert_eq!(c, d);
+
+ d = a;
+ *d.view_bits_mut::<Msb0>() ^= b.view_bits::<Msb0>();
+ assert_eq!(c, d);
+
+ let d = random::<[u8; 6]>();
+ let e = random::<[u16; 3]>();
+
+ let mut f = d;
+ *f.view_bits_mut::<Lsb0>() ^= e.view_bits::<Msb0>();
+ for ((d, e), f) in (d
+ .view_bits::<Lsb0>()
+ .iter()
+ .by_vals()
+ .zip(e.view_bits::<Msb0>().iter().by_vals()))
+ .zip(f.view_bits::<Lsb0>())
+ {
+ assert_eq!(d ^ e, f);
+ }
+}
+
+#[test]
+fn bit_not() {
+ let a = random::<[u32; 3]>();
+ let mut b = a;
+ let _ = !b.view_bits_mut::<Msb0>();
+ assert_eq!([!a[0], !a[1], !a[2]], b);
+
+ let mut c = [0u32; 3];
+ let d = !&mut c.view_bits_mut::<Lsb0>()[16 .. 80];
+ let _ = !&mut d[24 .. 40];
+ assert_eq!(c, [0xFF_FF_00_00, 0xFF_00_00_FF, 0x00_00_FF_FF]);
+}
+
+#[test]
+fn indexing() {
+ let bits = bits![mut 0, 1, 0, 0, 1];
+
+ assert!(bits[1]);
+ assert!(!bits[2]);
+ assert_eq!(bits[1 ..= 2], bits![1, 0]);
+
+ assert!((10 .. 12).get(bits).is_none());
+ assert!((10 .. 12).get_mut(bits).is_none());
+}
+
+#[test]
+#[should_panic = "index 10 out of bounds: 5"]
+fn index_mut_usize() {
+ let bits = bits![mut 0, 1, 0, 0, 1];
+ 10.index_mut(bits);
+}
diff --git a/src/slice/tests/traits.rs b/src/slice/tests/traits.rs
new file mode 100644
index 0000000..8a4d418
--- /dev/null
+++ b/src/slice/tests/traits.rs
@@ -0,0 +1,349 @@
+use core::{
+ cmp,
+ convert::TryFrom,
+};
+
+use static_assertions::*;
+
+use crate::prelude::*;
+
+#[test]
+fn core_impls() {
+ use core::{
+ cmp::{
+ Eq,
+ Ord,
+ },
+ fmt::Debug,
+ hash::Hash,
+ ops::{
+ Index,
+ Range,
+ },
+ panic::{
+ RefUnwindSafe,
+ UnwindSafe,
+ },
+ };
+
+ assert_impl_all!(BitSlice<usize, Lsb0>:
+ AsRef<BitSlice<usize, Lsb0>>,
+ AsMut<BitSlice<usize, Lsb0>>,
+ Debug,
+ Eq,
+ Hash,
+ Index<usize>,
+ Index<Range<usize>>,
+ Ord,
+ PartialEq<BitSlice<u32, Msb0>>,
+ PartialOrd<BitSlice<u32, Msb0>>,
+ Send,
+ Sync,
+ Unpin,
+ UnwindSafe,
+ RefUnwindSafe,
+ );
+ assert_impl_all!(&BitSlice<usize, Lsb0>:
+ Default,
+ IntoIterator,
+ );
+ assert_impl_all!(&mut BitSlice<usize, Lsb0>:
+ Default,
+ IntoIterator,
+ );
+}
+
+#[test]
+#[cfg(feature = "alloc")]
+fn alloc_impls() {
+ use alloc::borrow::ToOwned;
+
+ assert_impl_all!(BitSlice<usize, Lsb0>:
+ ToOwned,
+ );
+}
+
+#[test]
+#[allow(deprecated)]
+#[cfg(feature = "std")]
+fn std_impls() {
+ use std::{
+ ascii::AsciiExt,
+ io::{
+ BufRead,
+ Read,
+ Write,
+ },
+ };
+
+ assert_impl_all!(&BitSlice<usize, Lsb0>:
+ Read,
+ );
+ assert_impl_all!(&mut BitSlice<usize, Lsb0>:
+ Write,
+ );
+ assert_not_impl_any!(BitSlice<u8, Lsb0>:
+ AsciiExt,
+ BufRead,
+ );
+}
+
+#[test]
+fn cmp() {
+ let a = bits![0, 1];
+ let b = bits![1, 0];
+ let c = bits![u8, Msb0; 1, 0];
+ let d = bits![u8, Msb0; 1, 1];
+
+ assert_eq!(a.cmp(b), cmp::Ordering::Less);
+ assert_ne!(a, b);
+ assert_eq!(b, c);
+ assert_ne!(c, d);
+}
+
+#[test]
+fn conv() -> Result<(), ()> {
+ let mut a = [0u8, 1, 2, 3];
+ let _ = <&BitSlice<_, Lsb0>>::try_from(&a[..]).map_err(drop)?;
+ let _ = <&mut BitSlice<_, Lsb0>>::try_from(&mut a[..]).map_err(drop)?;
+ Ok(())
+}
+
+#[cfg(feature = "alloc")]
+mod format {
+ #[cfg(not(feature = "std"))]
+ use alloc::format;
+
+ use crate::prelude::*;
+
+ #[test]
+ fn binary() {
+ let data = [0u8, 0x0F, !0];
+ let bits = data.view_bits::<Msb0>();
+
+ assert_eq!(format!("{:b}", &bits[.. 0]), "[]");
+ assert_eq!(format!("{:#b}", &bits[.. 0]), "[]");
+
+ assert_eq!(format!("{:b}", &bits[9 .. 15]), "[000111]");
+ assert_eq!(
+ format!("{:#b}", &bits[9 .. 15]),
+ "[
+ 0b000111,
+]"
+ );
+
+ assert_eq!(format!("{:b}", &bits[4 .. 20]), "[0000, 00001111, 1111]");
+ assert_eq!(
+ format!("{:#b}", &bits[4 .. 20]),
+ "[
+ 0b0000,
+ 0b00001111,
+ 0b1111,
+]"
+ );
+
+ assert_eq!(format!("{:b}", &bits[4 ..]), "[0000, 00001111, 11111111]");
+ assert_eq!(
+ format!("{:#b}", &bits[4 ..]),
+ "[
+ 0b0000,
+ 0b00001111,
+ 0b11111111,
+]"
+ );
+
+ assert_eq!(format!("{:b}", &bits[.. 20]), "[00000000, 00001111, 1111]");
+ assert_eq!(
+ format!("{:#b}", &bits[.. 20]),
+ "[
+ 0b00000000,
+ 0b00001111,
+ 0b1111,
+]"
+ );
+
+ assert_eq!(format!("{:b}", bits), "[00000000, 00001111, 11111111]");
+ assert_eq!(
+ format!("{:#b}", bits),
+ "[
+ 0b00000000,
+ 0b00001111,
+ 0b11111111,
+]"
+ );
+ }
+
+ #[test]
+ fn octal() {
+ let data = [0u8, 0x0F, !0];
+ let bits = data.view_bits::<Msb0>();
+
+ assert_eq!(format!("{:o}", &bits[.. 0]), "[]");
+ assert_eq!(format!("{:#o}", &bits[.. 0]), "[]");
+
+ assert_eq!(format!("{:o}", &bits[9 .. 15]), "[07]");
+ assert_eq!(
+ format!("{:#o}", &bits[9 .. 15]),
+ "[
+ 0o07,
+]"
+ );
+
+ // …0_000 00_001_111 1_111…
+ assert_eq!(format!("{:o}", &bits[4 .. 20]), "[00, 017, 17]");
+ assert_eq!(
+ format!("{:#o}", &bits[4 .. 20]),
+ "[
+ 0o00,
+ 0o017,
+ 0o17,
+]"
+ );
+
+ assert_eq!(format!("{:o}", &bits[4 ..]), "[00, 017, 377]");
+ assert_eq!(
+ format!("{:#o}", &bits[4 ..]),
+ "[
+ 0o00,
+ 0o017,
+ 0o377,
+]"
+ );
+
+ assert_eq!(format!("{:o}", &bits[.. 20]), "[000, 017, 17]");
+ assert_eq!(
+ format!("{:#o}", &bits[.. 20]),
+ "[
+ 0o000,
+ 0o017,
+ 0o17,
+]"
+ );
+
+ assert_eq!(format!("{:o}", bits), "[000, 017, 377]");
+ assert_eq!(
+ format!("{:#o}", bits),
+ "[
+ 0o000,
+ 0o017,
+ 0o377,
+]"
+ );
+ }
+
+ #[test]
+ fn hex_lower() {
+ let data = [0u8, 0x0F, !0];
+ let bits = data.view_bits::<Msb0>();
+
+ assert_eq!(format!("{:x}", &bits[.. 0]), "[]");
+ assert_eq!(format!("{:#x}", &bits[.. 0]), "[]");
+
+ // …00_0111 …
+ assert_eq!(format!("{:x}", &bits[9 .. 15]), "[07]");
+ assert_eq!(
+ format!("{:#x}", &bits[9 .. 15]),
+ "[
+ 0x07,
+]"
+ );
+
+ // …0000 00001111 1111…
+ assert_eq!(format!("{:x}", &bits[4 .. 20]), "[0, 0f, f]");
+ assert_eq!(
+ format!("{:#x}", &bits[4 .. 20]),
+ "[
+ 0x0,
+ 0x0f,
+ 0xf,
+]"
+ );
+
+ assert_eq!(format!("{:x}", &bits[4 ..]), "[0, 0f, ff]");
+ assert_eq!(
+ format!("{:#x}", &bits[4 ..]),
+ "[
+ 0x0,
+ 0x0f,
+ 0xff,
+]"
+ );
+
+ assert_eq!(format!("{:x}", &bits[.. 20]), "[00, 0f, f]");
+ assert_eq!(
+ format!("{:#x}", &bits[.. 20]),
+ "[
+ 0x00,
+ 0x0f,
+ 0xf,
+]"
+ );
+
+ assert_eq!(format!("{:x}", bits), "[00, 0f, ff]");
+ assert_eq!(
+ format!("{:#x}", bits),
+ "[
+ 0x00,
+ 0x0f,
+ 0xff,
+]"
+ );
+ }
+
+ #[test]
+ fn hex_upper() {
+ let data = [0u8, 0x0F, !0];
+ let bits = data.view_bits::<Msb0>();
+
+ assert_eq!(format!("{:X}", &bits[.. 0]), "[]");
+ assert_eq!(format!("{:#X}", &bits[.. 0]), "[]");
+
+ assert_eq!(format!("{:X}", &bits[9 .. 15]), "[07]");
+ assert_eq!(
+ format!("{:#X}", &bits[9 .. 15]),
+ "[
+ 0x07,
+]"
+ );
+
+ assert_eq!(format!("{:X}", &bits[4 .. 20]), "[0, 0F, F]");
+ assert_eq!(
+ format!("{:#X}", &bits[4 .. 20]),
+ "[
+ 0x0,
+ 0x0F,
+ 0xF,
+]"
+ );
+
+ assert_eq!(format!("{:X}", &bits[4 ..]), "[0, 0F, FF]");
+ assert_eq!(
+ format!("{:#X}", &bits[4 ..]),
+ "[
+ 0x0,
+ 0x0F,
+ 0xFF,
+]"
+ );
+
+ assert_eq!(format!("{:X}", &bits[.. 20]), "[00, 0F, F]");
+ assert_eq!(
+ format!("{:#X}", &bits[.. 20]),
+ "[
+ 0x00,
+ 0x0F,
+ 0xF,
+]"
+ );
+
+ assert_eq!(format!("{:X}", bits), "[00, 0F, FF]");
+ assert_eq!(
+ format!("{:#X}", bits),
+ "[
+ 0x00,
+ 0x0F,
+ 0xFF,
+]"
+ );
+ }
+}
diff --git a/src/slice/traits.rs b/src/slice/traits.rs
new file mode 100644
index 0000000..749c527
--- /dev/null
+++ b/src/slice/traits.rs
@@ -0,0 +1,582 @@
+#![doc = include_str!("../../doc/slice/traits.md")]
+
+#[cfg(feature = "alloc")]
+use alloc::borrow::ToOwned;
+use core::{
+ cmp,
+ convert::TryFrom,
+ fmt::{
+ self,
+ Binary,
+ Debug,
+ Display,
+ Formatter,
+ LowerHex,
+ Octal,
+ Pointer,
+ UpperHex,
+ },
+ hash::{
+ Hash,
+ Hasher,
+ },
+ str,
+};
+
+use wyz::fmt::FmtForward;
+
+use super::BitSlice;
+#[cfg(feature = "alloc")]
+use crate::vec::BitVec;
+use crate::{
+ domain::Domain,
+ mem,
+ order::{
+ BitOrder,
+ Lsb0,
+ Msb0,
+ },
+ store::BitStore,
+ view::BitView,
+};
+
+/// [Original](https://doc.rust-lang.org/std/primitive.slice.html#impl-AsRef%3C%5BT%5D%3E)
+impl<T, O> AsRef<Self> for BitSlice<T, O>
+where
+ T: BitStore,
+ O: BitOrder,
+{
+ #[inline]
+ fn as_ref(&self) -> &Self {
+ self
+ }
+}
+
+/// [Original](https://doc.rust-lang.org/std/primitive.slice.html#impl-AsMut%3C%5BT%5D%3E)
+impl<T, O> AsMut<Self> for BitSlice<T, O>
+where
+ T: BitStore,
+ O: BitOrder,
+{
+ #[inline]
+ fn as_mut(&mut self) -> &mut Self {
+ self
+ }
+}
+
+/// [Original](https://doc.rust-lang.org/std/primitive.slice.html#impl-Eq)
+impl<T, O> Eq for BitSlice<T, O>
+where
+ T: BitStore,
+ O: BitOrder,
+{
+}
+
+/// [Original](https://doc.rust-lang.org/std/primitive.slice.html#impl-Ord)
+impl<T, O> Ord for BitSlice<T, O>
+where
+ T: BitStore,
+ O: BitOrder,
+{
+ #[inline]
+ fn cmp(&self, rhs: &Self) -> cmp::Ordering {
+ self.partial_cmp(rhs)
+ .expect("BitSlice has a total ordering")
+ }
+}
+
+/** Tests if two `BitSlice`s are semantically — not representationally — equal.
+
+It is valid to compare slices of different ordering or memory types.
+
+The equality condition requires that they have the same length and that at each
+index, the two slices have the same bit value.
+
+[Original](https://doc.rust-lang.org/std/primitive.slice.html#impl-PartialEq%3C%5BB%5D%3E)
+**/
+impl<T1, T2, O1, O2> PartialEq<BitSlice<T2, O2>> for BitSlice<T1, O1>
+where
+ T1: BitStore,
+ T2: BitStore,
+ O1: BitOrder,
+ O2: BitOrder,
+{
+ #[inline]
+ fn eq(&self, rhs: &BitSlice<T2, O2>) -> bool {
+ if let (Some(this), Some(that)) =
+ (self.coerce::<T1, Lsb0>(), rhs.coerce::<T1, Lsb0>())
+ {
+ this.sp_eq(that)
+ }
+ else if let (Some(this), Some(that)) =
+ (self.coerce::<T1, Msb0>(), rhs.coerce::<T1, Msb0>())
+ {
+ this.sp_eq(that)
+ }
+ else {
+ self.len() == rhs.len()
+ && self
+ .iter()
+ .by_vals()
+ .zip(rhs.iter().by_vals())
+ .all(|(l, r)| l == r)
+ }
+ }
+}
+
+// ref-to-val equality
+
+impl<T1, T2, O1, O2> PartialEq<BitSlice<T2, O2>> for &BitSlice<T1, O1>
+where
+ T1: BitStore,
+ T2: BitStore,
+ O1: BitOrder,
+ O2: BitOrder,
+{
+ #[inline]
+ fn eq(&self, rhs: &BitSlice<T2, O2>) -> bool {
+ **self == rhs
+ }
+}
+
+impl<T1, T2, O1, O2> PartialEq<BitSlice<T2, O2>> for &mut BitSlice<T1, O1>
+where
+ T1: BitStore,
+ T2: BitStore,
+ O1: BitOrder,
+ O2: BitOrder,
+{
+ #[inline]
+ fn eq(&self, rhs: &BitSlice<T2, O2>) -> bool {
+ **self == rhs
+ }
+}
+
+// val-to-ref equality
+
+impl<T1, T2, O1, O2> PartialEq<&BitSlice<T2, O2>> for BitSlice<T1, O1>
+where
+ T1: BitStore,
+ T2: BitStore,
+ O1: BitOrder,
+ O2: BitOrder,
+{
+ #[inline]
+ fn eq(&self, rhs: &&BitSlice<T2, O2>) -> bool {
+ *self == **rhs
+ }
+}
+
+impl<T1, T2, O1, O2> PartialEq<&mut BitSlice<T2, O2>> for BitSlice<T1, O1>
+where
+ T1: BitStore,
+ T2: BitStore,
+ O1: BitOrder,
+ O2: BitOrder,
+{
+ #[inline]
+ fn eq(&self, rhs: &&mut BitSlice<T2, O2>) -> bool {
+ *self == **rhs
+ }
+}
+
+/** Compares two `BitSlice`s by semantic — not representational — ordering.
+
+The comparison sorts by testing at each index if one slice has a high bit where
+the other has a low. At the first index where the slices differ, the slice with
+the high bit is greater. If the slices are equal until at least one terminates,
+then they are compared by length.
+
+[Original](https://doc.rust-lang.org/std/primitive.slice.html#impl-PartialOrd%3C%5BT%5D%3E)
+**/
+impl<T1, T2, O1, O2> PartialOrd<BitSlice<T2, O2>> for BitSlice<T1, O1>
+where
+ T1: BitStore,
+ T2: BitStore,
+ O1: BitOrder,
+ O2: BitOrder,
+{
+ #[inline]
+ fn partial_cmp(&self, rhs: &BitSlice<T2, O2>) -> Option<cmp::Ordering> {
+ for (l, r) in self.iter().by_vals().zip(rhs.iter().by_vals()) {
+ match (l, r) {
+ (true, false) => return Some(cmp::Ordering::Greater),
+ (false, true) => return Some(cmp::Ordering::Less),
+ _ => continue,
+ }
+ }
+ self.len().partial_cmp(&rhs.len())
+ }
+}
+
+// ref-to-val ordering
+
+impl<T1, T2, O1, O2> PartialOrd<BitSlice<T2, O2>> for &BitSlice<T1, O1>
+where
+ T1: BitStore,
+ T2: BitStore,
+ O1: BitOrder,
+ O2: BitOrder,
+{
+ #[inline]
+ fn partial_cmp(&self, rhs: &BitSlice<T2, O2>) -> Option<cmp::Ordering> {
+ (*self).partial_cmp(rhs)
+ }
+}
+
+impl<T1, T2, O1, O2> PartialOrd<BitSlice<T2, O2>> for &mut BitSlice<T1, O1>
+where
+ T1: BitStore,
+ T2: BitStore,
+ O1: BitOrder,
+ O2: BitOrder,
+{
+ #[inline]
+ fn partial_cmp(&self, rhs: &BitSlice<T2, O2>) -> Option<cmp::Ordering> {
+ (**self).partial_cmp(rhs)
+ }
+}
+
+// val-to-ref ordering
+
+impl<T1, T2, O1, O2> PartialOrd<&BitSlice<T2, O2>> for BitSlice<T1, O1>
+where
+ T1: BitStore,
+ T2: BitStore,
+ O1: BitOrder,
+ O2: BitOrder,
+{
+ #[inline]
+ fn partial_cmp(&self, rhs: &&BitSlice<T2, O2>) -> Option<cmp::Ordering> {
+ (*self).partial_cmp(&**rhs)
+ }
+}
+
+impl<T1, T2, O1, O2> PartialOrd<&mut BitSlice<T2, O2>> for BitSlice<T1, O1>
+where
+ T1: BitStore,
+ T2: BitStore,
+ O1: BitOrder,
+ O2: BitOrder,
+{
+ #[inline]
+ fn partial_cmp(&self, rhs: &&mut BitSlice<T2, O2>) -> Option<cmp::Ordering> {
+ (*self).partial_cmp(&**rhs)
+ }
+}
+
+// &mut-to-& ordering
+
+impl<T1, T2, O1, O2> PartialOrd<&mut BitSlice<T2, O2>> for &BitSlice<T1, O1>
+where
+ T1: BitStore,
+ T2: BitStore,
+ O1: BitOrder,
+ O2: BitOrder,
+{
+ #[inline]
+ fn partial_cmp(&self, rhs: &&mut BitSlice<T2, O2>) -> Option<cmp::Ordering> {
+ (**self).partial_cmp(&**rhs)
+ }
+}
+
+impl<T1, T2, O1, O2> PartialOrd<&BitSlice<T2, O2>> for &mut BitSlice<T1, O1>
+where
+ T1: BitStore,
+ T2: BitStore,
+ O1: BitOrder,
+ O2: BitOrder,
+{
+ #[inline]
+ fn partial_cmp(&self, rhs: &&BitSlice<T2, O2>) -> Option<cmp::Ordering> {
+ (**self).partial_cmp(&**rhs)
+ }
+}
+
+/** Calls [`BitSlice::try_from_slice`], but returns the original Rust slice on
+error instead of the failure event.
+
+This only fails if `slice.len()` exceeds `BitSlice::MAX_ELTS`.
+
+[`BitSlice::try_from_slice`]: crate::slice::BitSlice::try_from_slice
+**/
+impl<'a, T, O> TryFrom<&'a [T]> for &'a BitSlice<T, O>
+where
+ T: BitStore,
+ O: BitOrder,
+{
+ type Error = &'a [T];
+
+ #[inline]
+ fn try_from(slice: &'a [T]) -> Result<Self, Self::Error> {
+ BitSlice::try_from_slice(slice).map_err(|_| slice)
+ }
+}
+
+/** Calls [`BitSlice::try_from_slice_mut`], but returns the original Rust slice
+on error instead of the failure event.
+
+This only fails if `slice.len()` exceeds `BitSlice::MAX_ELTS`.
+
+[`BitSlice::try_from_slice_mut`]: crate::slice::BitSlice::try_from_slice_mut
+**/
+impl<'a, T, O> TryFrom<&'a mut [T]> for &'a mut BitSlice<T, O>
+where
+ T: BitStore,
+ O: BitOrder,
+{
+ type Error = &'a mut [T];
+
+ #[inline]
+ fn try_from(slice: &'a mut [T]) -> Result<Self, Self::Error> {
+ let slice_ptr = slice as *mut [T];
+ BitSlice::try_from_slice_mut(slice)
+ .map_err(|_| unsafe { &mut *slice_ptr })
+ }
+}
+
+/// [Original](https://doc.rust-lang.org/std/primitive.slice.html#impl-Default-1)
+impl<T, O> Default for &BitSlice<T, O>
+where
+ T: BitStore,
+ O: BitOrder,
+{
+ #[inline]
+ fn default() -> Self {
+ BitSlice::empty()
+ }
+}
+
+/// [Original](https://doc.rust-lang.org/std/primitive.slice.html#impl-Default)
+impl<T, O> Default for &mut BitSlice<T, O>
+where
+ T: BitStore,
+ O: BitOrder,
+{
+ #[inline]
+ fn default() -> Self {
+ BitSlice::empty_mut()
+ }
+}
+
+/// [Original](https://doc.rust-lang.org/std/primitive.slice.html#impl-Debug)
+impl<T, O> Debug for BitSlice<T, O>
+where
+ T: BitStore,
+ O: BitOrder,
+{
+ #[inline]
+ fn fmt(&self, fmt: &mut Formatter) -> fmt::Result {
+ self.as_bitspan().render(fmt, "Slice", None)?;
+ fmt.write_str(" ")?;
+ Display::fmt(self, fmt)
+ }
+}
+
+impl<T, O> Display for BitSlice<T, O>
+where
+ T: BitStore,
+ O: BitOrder,
+{
+ #[inline]
+ fn fmt(&self, fmt: &mut Formatter) -> fmt::Result {
+ fmt.debug_list()
+ .entries(self.iter().by_vals().map(|b| if b { 1 } else { 0 }))
+ .finish()
+ }
+}
+
+impl<T, O> Pointer for BitSlice<T, O>
+where
+ T: BitStore,
+ O: BitOrder,
+{
+ #[inline]
+ fn fmt(&self, fmt: &mut Formatter) -> fmt::Result {
+ Pointer::fmt(&self.as_bitspan(), fmt)
+ }
+}
+
+/// Encodes a short bit-slice into an ASCII b36 value.
+#[inline(always)]
+fn bits_to_ascii<T, O>(bits: &BitSlice<T, O>, alpha: u8) -> u8
+where
+ T: BitStore,
+ O: BitOrder,
+{
+ let mut val = 0u8;
+ for bit in bits.iter().by_vals() {
+ val <<= 1;
+ val |= bit as u8;
+ }
+ match val {
+ v @ 0 ..= 9 => b'0' + v,
+ v @ 10 ..= 35 => alpha - 10 + v,
+ _ => unreachable!(
+ "bit-slices wider than five bits cannot be rendered to ASCII b36"
+ ),
+ }
+}
+
+/** Encodes an arbitrary bit-slice into an ASCII b36 string.
+
+## Parameters
+
+- `bits`: the bit-slice to encode.
+- `into`: a provided buffer into which the bit-slice is encoded.
+- `radix`: the bit width of each digit (log2 of its radix).
+- `skip`: the number of bytes to skip before beginning the write.
+- `alpha`: one of `b'a'` or `b'A'`.
+
+## Returns
+
+A subset of `into` that is now initialized to the ASCII encoding.
+**/
+#[inline(always)]
+fn encode_ascii<'a, T, O>(
+ bits: &BitSlice<T, O>,
+ into: &'a mut [u8],
+ radix: usize,
+ mut skip: usize,
+ alpha: u8,
+) -> &'a str
+where
+ T: BitStore,
+ O: BitOrder,
+{
+ for (chunk, slot) in
+ bits.rchunks(radix).rev().zip(into.iter_mut().skip(skip))
+ {
+ *slot = bits_to_ascii(chunk, alpha);
+ skip += 1;
+ }
+ unsafe { str::from_utf8_unchecked(&into[.. skip]) }
+}
+
+/// Constructs the numeric formatting implementations.
+macro_rules! fmt {
+ ($($trait:ident: $alpha:expr, $pfx:expr, $radix:expr;)+) => { $(
+ #[doc = include_str!("../../doc/slice/format.md")]
+ impl<T, O> $trait for BitSlice<T, O>
+ where
+ T: BitStore,
+ O: BitOrder,
+ {
+ #[inline]
+ #[allow(clippy::modulo_one)] // I know what I’m doing.
+ // TODO(myrrlyn): See if Binary codegen ditches the loops.
+ fn fmt(&self, fmt: &mut Formatter) -> fmt::Result {
+ const D: usize = mem::bits_of::<usize>() / $radix;
+ const M: usize = mem::bits_of::<usize>() % $radix;
+ const W: usize = D + (M != 0) as usize;
+ let mut txt: [u8; W + 2] = [b'0'; W + 2];
+ txt[1] = $pfx;
+
+ let start = if fmt.alternate() { 0 } else { 2 };
+ let mut seq = fmt.debug_list();
+ match self.domain() {
+ Domain::Enclave(elem) => {
+ seq.entry(&encode_ascii(
+ elem.into_bitslice(),
+ &mut txt[start ..],
+ $radix,
+ 2 - start,
+ $alpha,
+ ).fmt_display());
+ },
+ Domain::Region { head, body, tail } => {
+ if let Some(elem) = head {
+ seq.entry(&encode_ascii(
+ elem.into_bitslice(),
+ &mut txt[start ..],
+ $radix,
+ 2 - start,
+ $alpha,
+ ).fmt_display());
+ }
+ for elem in body.iter().map(BitStore::load_value) {
+ seq.entry(&encode_ascii(
+ elem.view_bits::<O>(),
+ &mut txt[start ..],
+ $radix,
+ 2 - start,
+ $alpha,
+ ).fmt_display());
+ }
+ if let Some(elem) = tail {
+ seq.entry(&encode_ascii(
+ elem.into_bitslice(),
+ &mut txt[start ..],
+ $radix,
+ 2 - start,
+ $alpha,
+ ).fmt_display());
+ }
+ },
+ }
+ seq.finish()
+ }
+ }
+ )+ };
+}
+
+fmt! {
+ Binary: b'0', b'b', 1;
+ Octal: b'0', b'o', 3;
+ LowerHex: b'a', b'x', 4;
+ UpperHex: b'A', b'x', 4;
+}
+
+/// [Original](https://doc.rust-lang.org/std/primitive.slice.html#impl-Hash)
+#[cfg(not(tarpaulin_include))]
+impl<T, O> Hash for BitSlice<T, O>
+where
+ T: BitStore,
+ O: BitOrder,
+{
+ #[inline]
+ fn hash<H>(&self, hasher: &mut H)
+ where H: Hasher {
+ self.iter().by_vals().for_each(|bit| bit.hash(hasher));
+ }
+}
+
+#[doc = include_str!("../../doc/slice/threadsafe.md")]
+unsafe impl<T, O> Send for BitSlice<T, O>
+where
+ T: BitStore + Sync,
+ O: BitOrder,
+{
+}
+
+#[doc = include_str!("../../doc/slice/threadsafe.md")]
+unsafe impl<T, O> Sync for BitSlice<T, O>
+where
+ T: BitStore + Sync,
+ O: BitOrder,
+{
+}
+
+/// [Original](https://doc.rust-lang.org/std/primitive.slice.html#impl-Unpin)
+impl<T, O> Unpin for BitSlice<T, O>
+where
+ T: BitStore,
+ O: BitOrder,
+{
+}
+
+/// [Original](https://doc.rust-lang.org/std/primitive.slice.html#impl-ToOwned)
+#[cfg(feature = "alloc")]
+#[cfg(not(tarpaulin_include))]
+impl<T, O> ToOwned for BitSlice<T, O>
+where
+ T: BitStore,
+ O: BitOrder,
+{
+ type Owned = BitVec<T, O>;
+
+ #[inline]
+ fn to_owned(&self) -> Self::Owned {
+ BitVec::from_bitslice(self)
+ }
+}
diff --git a/src/store.rs b/src/store.rs
new file mode 100644
index 0000000..00b52af
--- /dev/null
+++ b/src/store.rs
@@ -0,0 +1,329 @@
+#![doc = include_str!("../doc/store.md")]
+
+use core::{
+ cell::Cell,
+ fmt::Debug,
+};
+
+use funty::Integral;
+
+use crate::{
+ access::*,
+ index::BitIdx,
+ mem::{
+ self,
+ BitRegister,
+ },
+ order::BitOrder,
+};
+
+#[doc = include_str!("../doc/store/BitStore.md")]
+pub trait BitStore: 'static + Debug {
+ /// The element type used in the memory region underlying a `BitSlice`. It
+ /// is *always* one of the unsigned integer fundamentals.
+ type Mem: BitRegister + BitStore<Mem = Self::Mem>;
+ /// A type that selects the appropriate load/store instructions when
+ /// accessing the memory bus. It determines what instructions are used when
+ /// moving a `Self::Mem` value between the processor and the memory system.
+ ///
+ /// This must be *at least* able to manage aliasing.
+ type Access: BitAccess<Item = Self::Mem> + BitStore<Mem = Self::Mem>;
+ /// A sibling `BitStore` implementor that is known to be alias-safe. It is
+ /// used when a `BitSlice` introduces multiple handles that view the same
+ /// memory location, and at least one of them has write capabilities to it.
+ /// It must have the same underlying memory type, and can only change access
+ /// patterns or public-facing usage.
+ type Alias: BitStore<Mem = Self::Mem>;
+ /// The inverse of `::Alias`. It is used when a `BitSlice` removes the
+ /// conditions that required a `T -> T::Alias` transition.
+ type Unalias: BitStore<Mem = Self::Mem>;
+
+ /// The zero constant.
+ const ZERO: Self;
+
+ /// Wraps a raw memory value as a `BitStore` type.
+ fn new(value: Self::Mem) -> Self;
+
+ /// Loads a value out of the memory system according to the `::Access`
+ /// rules. This may be called when the value is aliased by a write-capable
+ /// reference.
+ fn load_value(&self) -> Self::Mem;
+
+ /// Stores a value into the memory system. This is only called when there
+ /// are no other handles to the value, and it may bypass `::Access`
+ /// constraints.
+ fn store_value(&mut self, value: Self::Mem);
+
+ /// Reads a single bit out of the memory system according to the `::Access`
+ /// rules. This is lifted from [`BitAccess`] so that it can be used
+ /// elsewhere without additional casts.
+ ///
+ /// ## Type Parameters
+ ///
+ /// - `O`: The ordering of bits within `Self::Mem` governing the lookup.
+ ///
+ /// ## Parameters
+ ///
+ /// - `index`: The semantic index of a bit in `*self`.
+ ///
+ /// ## Returns
+ ///
+ /// The value of the bit in `*self` at `BitOrder::at(index)`.
+ ///
+ /// [`BitAccess`]: crate::access::BitAccess
+ #[inline]
+ fn get_bit<O>(&self, index: BitIdx<Self::Mem>) -> bool
+ where O: BitOrder {
+ self.load_value() & index.select::<O>().into_inner()
+ != <Self::Mem as Integral>::ZERO
+ }
+
+ /// All implementors are required to have their alignment match their size.
+ ///
+ /// Use [`mem::aligned_to_size::<Self>()`][0] to prove this.
+ ///
+ /// [0]: crate::mem::aligned_to_size
+ const ALIGNED_TO_SIZE: [(); 1];
+
+ /// All implementors are required to have `Self` and `Self::Alias` be equal
+ /// in representation. This is true by fiat for all types except the
+ /// unsigned integers.
+ ///
+ /// Use [`mem::layout_eq::<Self, Self::Alias>()`][0] to prove this.
+ ///
+ /// [0]: crate::mem::layout_eq
+ const ALIAS_WIDTH: [(); 1];
+}
+
+/// Generates `BitStore` implementations for ordinary integers and `Cell`s.
+macro_rules! store {
+ ($($base:ty => $safe:ty);+ $(;)?) => { $(
+ impl BitStore for $base {
+ type Mem = Self;
+ /// The unsigned integers will only be `BitStore` type parameters
+ /// for handles to unaliased memory, following the normal Rust
+ /// reference rules.
+ type Access = Cell<Self>;
+ type Alias = $safe;
+ type Unalias = Self;
+
+ const ZERO: Self = 0;
+
+ #[inline]
+ fn new(value: Self::Mem) -> Self { value }
+
+ #[inline]
+ fn load_value(&self) -> Self::Mem {
+ *self
+ }
+
+ #[inline]
+ fn store_value(&mut self, value: Self::Mem) {
+ *self = value;
+ }
+
+ const ALIGNED_TO_SIZE: [(); 1]
+ = [(); mem::aligned_to_size::<Self>() as usize];
+
+ const ALIAS_WIDTH: [(); 1]
+ = [(); mem::layout_eq::<Self, Self::Alias>() as usize];
+ }
+
+ impl BitStore for $safe {
+ type Mem = $base;
+ type Access = <Self as BitSafe>::Rad;
+ type Alias = Self;
+ type Unalias = $base;
+
+ const ZERO: Self = <Self as BitSafe>::ZERO;
+
+ #[inline]
+ fn new(value: Self::Mem) -> Self { <Self>::new(value) }
+
+ #[inline]
+ fn load_value(&self) -> Self::Mem {
+ self.load()
+ }
+
+ #[inline]
+ fn store_value(&mut self, value: Self::Mem) {
+ *self = Self::new(value);
+ }
+
+ const ALIGNED_TO_SIZE: [(); 1]
+ = [(); mem::aligned_to_size::<Self>() as usize];
+
+ const ALIAS_WIDTH: [(); 1] = [()];
+ }
+
+ impl BitStore for Cell<$base> {
+ type Mem = $base;
+ type Access = Self;
+ type Alias = Self;
+ type Unalias = Self;
+
+ const ZERO: Self = Self::new(0);
+
+ #[inline]
+ fn new(value: Self::Mem) -> Self { <Self>::new(value) }
+
+ #[inline]
+ fn load_value(&self) -> Self::Mem {
+ self.get()
+ }
+
+ #[inline]
+ fn store_value(&mut self, value: Self::Mem) {
+ *self = Self::new(value);
+ }
+
+ const ALIGNED_TO_SIZE: [(); 1]
+ = [(); mem::aligned_to_size::<Self>() as usize];
+
+ const ALIAS_WIDTH: [(); 1] = [()];
+ }
+ )+ };
+}
+
+store! {
+ u8 => BitSafeU8;
+ u16 => BitSafeU16;
+ u32 => BitSafeU32;
+}
+
+#[cfg(target_pointer_width = "64")]
+store!(u64 => BitSafeU64);
+
+store!(usize => BitSafeUsize);
+
+/// Generates `BitStore` implementations for atomic types.
+macro_rules! atomic {
+ ($($size:tt, $base:ty => $atom:ident);+ $(;)?) => { $(
+ radium::if_atomic!(if atomic($size) {
+ use core::sync::atomic::$atom;
+
+ impl BitStore for $atom {
+ type Mem = $base;
+ type Access = Self;
+ type Alias = Self;
+ type Unalias = Self;
+
+ const ZERO: Self = <Self>::new(0);
+
+ #[inline]
+ fn new(value: Self::Mem) -> Self { <Self>::new(value) }
+
+ #[inline]
+ fn load_value(&self) -> Self::Mem {
+ self.load(core::sync::atomic::Ordering::Relaxed)
+ }
+
+ #[inline]
+ fn store_value(&mut self, value: Self::Mem) {
+ *self = Self::new(value);
+ }
+
+ const ALIGNED_TO_SIZE: [(); 1]
+ = [(); mem::aligned_to_size::<Self>() as usize];
+
+ const ALIAS_WIDTH: [(); 1] = [()];
+ }
+ });
+ )+ };
+}
+
+atomic! {
+ 8, u8 => AtomicU8;
+ 16, u16 => AtomicU16;
+ 32, u32 => AtomicU32;
+}
+
+#[cfg(target_pointer_width = "64")]
+atomic!(64, u64 => AtomicU64);
+
+atomic!(size, usize => AtomicUsize);
+
+#[cfg(test)]
+mod tests {
+ use static_assertions::*;
+
+ use super::*;
+ use crate::prelude::*;
+
+ #[test]
+ fn load_store() {
+ let mut word = 0usize;
+
+ word.store_value(39);
+ assert_eq!(word.load_value(), 39);
+
+ let mut safe = BitSafeUsize::new(word);
+ safe.store_value(57);
+ assert_eq!(safe.load_value(), 57);
+
+ let mut cell = Cell::new(0usize);
+ cell.store_value(39);
+ assert_eq!(cell.load_value(), 39);
+
+ radium::if_atomic!(if atomic(size) {
+ let mut atom = AtomicUsize::new(0);
+ atom.store_value(57);
+ assert_eq!(atom.load_value(), 57);
+ });
+ }
+
+ /// Unaliased `BitSlice`s are universally threadsafe, because they satisfy
+ /// Rust’s unsynchronized mutation rules.
+ #[test]
+ fn unaliased_send_sync() {
+ assert_impl_all!(BitSlice<u8, LocalBits>: Send, Sync);
+ assert_impl_all!(BitSlice<u16, LocalBits>: Send, Sync);
+ assert_impl_all!(BitSlice<u32, LocalBits>: Send, Sync);
+ assert_impl_all!(BitSlice<usize, LocalBits>: Send, Sync);
+
+ #[cfg(target_pointer_width = "64")]
+ assert_impl_all!(BitSlice<u64, LocalBits>: Send, Sync);
+ }
+
+ #[test]
+ fn cell_unsend_unsync() {
+ assert_not_impl_any!(BitSlice<Cell<u8>, LocalBits>: Send, Sync);
+ assert_not_impl_any!(BitSlice<Cell<u16>, LocalBits>: Send, Sync);
+ assert_not_impl_any!(BitSlice<Cell<u32>, LocalBits>: Send, Sync);
+ assert_not_impl_any!(BitSlice<Cell<usize>, LocalBits>: Send, Sync);
+
+ #[cfg(target_pointer_width = "64")]
+ assert_not_impl_any!(BitSlice<Cell<u64>, LocalBits>: Send, Sync);
+ }
+
+ /// In non-atomic builds, aliased `BitSlice`s become universally
+ /// thread-unsafe. An `&mut BitSlice` is an `&Cell`, and `&Cell` cannot be
+ /// sent across threads.
+ ///
+ /// This test cannot be meaningfully expressed in atomic builds, because the
+ /// atomicity of a `BitSafeUN` type is target-specific, and expressed in
+ /// `radium` rather than in `bitvec`.
+ #[test]
+ #[cfg(not(feature = "atomic"))]
+ fn aliased_non_atomic_unsend_unsync() {
+ assert_not_impl_any!(BitSlice<BitSafeU8, LocalBits>: Send, Sync);
+ assert_not_impl_any!(BitSlice<BitSafeU16, LocalBits>: Send, Sync);
+ assert_not_impl_any!(BitSlice<BitSafeU32, LocalBits>: Send, Sync);
+ assert_not_impl_any!(BitSlice<BitSafeUsize, LocalBits>: Send, Sync);
+
+ #[cfg(target_pointer_width = "64")]
+ assert_not_impl_any!(BitSlice<BitSafeU64, LocalBits>: Send, Sync);
+ }
+
+ #[test]
+ #[cfg(feature = "atomic")]
+ fn aliased_atomic_send_sync() {
+ assert_impl_all!(BitSlice<AtomicU8, LocalBits>: Send, Sync);
+ assert_impl_all!(BitSlice<AtomicU16, LocalBits>: Send, Sync);
+ assert_impl_all!(BitSlice<AtomicU32, LocalBits>: Send, Sync);
+ assert_impl_all!(BitSlice<AtomicUsize, LocalBits>: Send, Sync);
+
+ #[cfg(target_pointer_width = "64")]
+ assert_impl_all!(BitSlice<AtomicU64, LocalBits>: Send, Sync);
+ }
+}
diff --git a/src/vec.rs b/src/vec.rs
new file mode 100644
index 0000000..ead0b43
--- /dev/null
+++ b/src/vec.rs
@@ -0,0 +1,666 @@
+#![doc = include_str!("../doc/vec.md")]
+#![cfg(feature = "alloc")]
+
+#[cfg(not(feature = "std"))]
+use alloc::vec;
+use alloc::vec::Vec;
+use core::{
+ mem::{
+ self,
+ ManuallyDrop,
+ },
+ ptr,
+ slice,
+};
+
+use tap::Pipe;
+use wyz::comu::{
+ Const,
+ Mut,
+};
+
+pub use self::iter::{
+ Drain,
+ Splice,
+};
+pub use crate::boxed::IntoIter;
+use crate::{
+ boxed::BitBox,
+ index::BitIdx,
+ mem::bits_of,
+ order::{
+ BitOrder,
+ Lsb0,
+ },
+ ptr::{
+ AddressExt,
+ BitPtr,
+ BitSpan,
+ BitSpanError,
+ },
+ slice::BitSlice,
+ store::BitStore,
+ view::BitView,
+};
+
+mod api;
+mod iter;
+mod ops;
+mod tests;
+mod traits;
+
+#[repr(C)]
+#[doc = include_str!("../doc/vec/BitVec.md")]
+pub struct BitVec<T = usize, O = Lsb0>
+where
+ T: BitStore,
+ O: BitOrder,
+{
+ /// Span description of the live bits in the allocation.
+ bitspan: BitSpan<Mut, T, O>,
+ /// Allocation capacity, measured in `T` elements.
+ capacity: usize,
+}
+
+/// Constructors.
+impl<T, O> BitVec<T, O>
+where
+ T: BitStore,
+ O: BitOrder,
+{
+ /// An empty bit-vector with no backing allocation.
+ pub const EMPTY: Self = Self {
+ bitspan: BitSpan::EMPTY,
+ capacity: 0,
+ };
+
+ /// Creates a new bit-vector by repeating a bit for the desired length.
+ ///
+ /// ## Examples
+ ///
+ /// ```rust
+ /// use bitvec::prelude::*;
+ ///
+ /// let zeros = BitVec::<u8, Msb0>::repeat(false, 50);
+ /// let ones = BitVec::<u16, Lsb0>::repeat(true, 50);
+ /// ```
+ #[inline]
+ pub fn repeat(bit: bool, len: usize) -> Self {
+ let mut out = Self::with_capacity(len);
+ unsafe {
+ out.set_len(len);
+ out.as_raw_mut_slice().fill_with(|| {
+ BitStore::new(if bit { !<T::Mem>::ZERO } else { <T::Mem>::ZERO })
+ });
+ }
+ out
+ }
+
+ /// Copies the contents of a bit-slice into a new heap allocation.
+ ///
+ /// This copies the raw underlying elements into a new allocation, and sets
+ /// the produced bit-vector to use the same memory layout as the originating
+ /// bit-slice. This means that it may begin at any bit in the first element,
+ /// not just the zeroth bit. If you require this property, call
+ /// [`.force_align()`].
+ ///
+ /// Dead bits in the copied memory elements are guaranteed to be zeroed.
+ ///
+ /// ## Examples
+ ///
+ /// ```rust
+ /// use bitvec::prelude::*;
+ ///
+ /// let bits = bits![0, 1, 0, 0, 1];
+ /// let bv = BitVec::from_bitslice(bits);
+ /// assert_eq!(bv, bits);
+ /// ```
+ ///
+ /// [`.force_align()`]: Self::force_align
+ #[inline]
+ pub fn from_bitslice(slice: &BitSlice<T, O>) -> Self {
+ let bitspan = slice.as_bitspan();
+
+ let mut vec = bitspan
+ .elements()
+ .pipe(Vec::with_capacity)
+ .pipe(ManuallyDrop::new);
+ vec.extend(slice.domain());
+
+ let bitspan = unsafe {
+ BitSpan::new_unchecked(
+ vec.as_mut_ptr().cast::<T>().into_address(),
+ bitspan.head(),
+ bitspan.len(),
+ )
+ };
+ let capacity = vec.capacity();
+ Self { bitspan, capacity }
+ }
+
+ /// Constructs a new bit-vector from a single element.
+ ///
+ /// This copies `elem` into a new heap allocation, and sets the bit-vector
+ /// to cover it entirely.
+ ///
+ /// ## Examples
+ ///
+ /// ```rust
+ /// use bitvec::prelude::*;
+ ///
+ /// let bv = BitVec::<_, Msb0>::from_element(1u8);
+ /// assert!(bv[7]);
+ /// ```
+ #[inline]
+ pub fn from_element(elem: T) -> Self {
+ Self::from_vec(vec![elem])
+ }
+
+ /// Constructs a new bit-vector from a slice of memory elements.
+ ///
+ /// This copies `slice` into a new heap allocation, and sets the bit-vector
+ /// to cover it entirely.
+ ///
+ /// ## Panics
+ ///
+ /// This panics if `slice` exceeds bit-vector capacity.
+ ///
+ /// ## Examples
+ ///
+ /// ```rust
+ /// use bitvec::prelude::*;
+ ///
+ /// let slice = &[0u8, 1, 2, 3];
+ /// let bv = BitVec::<_, Lsb0>::from_slice(slice);
+ /// assert_eq!(bv.len(), 32);
+ /// ```
+ #[inline]
+ pub fn from_slice(slice: &[T]) -> Self {
+ Self::try_from_slice(slice).unwrap()
+ }
+
+ /// Fallibly constructs a new bit-vector from a slice of memory elements.
+ ///
+ /// This fails early if `slice` exceeds bit-vector capacity. If it is not,
+ /// then `slice` is copied into a new heap allocation and fully spanned by
+ /// the returned bit-vector.
+ ///
+ /// ## Examples
+ ///
+ /// ```rust
+ /// use bitvec::prelude::*;
+ ///
+ /// let slice = &[0u8, 1, 2, 3];
+ /// let bv = BitVec::<_, Lsb0>::try_from_slice(slice).unwrap();
+ /// assert_eq!(bv.len(), 32);
+ /// ```
+ #[inline]
+ pub fn try_from_slice(slice: &[T]) -> Result<Self, BitSpanError<T>> {
+ BitSlice::<T, O>::try_from_slice(slice).map(Self::from_bitslice)
+ }
+
+ /// Converts a regular vector in-place into a bit-vector.
+ ///
+ /// The produced bit-vector spans every bit in the original vector. No
+ /// reällocation occurs; this is purely a transform of the handle.
+ ///
+ /// ## Panics
+ ///
+ /// This panics if the source vector is too long to view as a bit-slice.
+ ///
+ /// ## Examples
+ ///
+ /// ```rust
+ /// use bitvec::prelude::*;
+ ///
+ /// let v = vec![0u8, 1, 2, 3];
+ /// let bv = BitVec::<_, Msb0>::from_vec(v);
+ /// assert_eq!(bv.len(), 32);
+ /// ```
+ #[inline]
+ pub fn from_vec(vec: Vec<T>) -> Self {
+ Self::try_from_vec(vec)
+ .expect("vector was too long to be converted into a `BitVec`")
+ }
+
+ /// Attempts to convert a regular vector in-place into a bit-vector.
+ ///
+ /// This fails if the source vector is too long to view as a bit-slice. On
+ /// success, the produced bit-vector spans every bit in the original vector.
+ /// No reällocation occurs; this is purely a transform of the handle.
+ ///
+ /// ## Examples
+ ///
+ /// ```rust
+ /// use bitvec::prelude::*;
+ ///
+ /// let v = vec![0u8; 20];
+ /// assert_eq!(BitVec::<_, Msb0>::try_from_vec(v).unwrap().len(), 160);
+ /// ```
+ ///
+ /// It is not practical to allocate a vector that will fail this conversion.
+ #[inline]
+ pub fn try_from_vec(vec: Vec<T>) -> Result<Self, Vec<T>> {
+ let mut vec = ManuallyDrop::new(vec);
+ let capacity = vec.capacity();
+
+ BitPtr::from_mut_slice(vec.as_mut_slice())
+ .span(vec.len() * bits_of::<T::Mem>())
+ .map(|bitspan| Self { bitspan, capacity })
+ .map_err(|_| ManuallyDrop::into_inner(vec))
+ }
+
+ /// Appends the contents of a bit-slice to a bit-vector.
+ ///
+ /// This can extend from a bit-slice of any type parameters; it is not
+ /// restricted to using the same parameters as `self`. However, when the
+ /// type parameters *do* match, it is possible for this to use a batch-copy
+ /// optimization to go faster than the individual-bit crawl that is
+ /// necessary when they differ.
+ ///
+ /// Until Rust provides extensive support for specialization in trait
+ /// implementations, you should use this method whenever you are extending
+ /// from a `BitSlice` proper, and only use the general [`.extend()`]
+ /// implementation if you are required to use a generic `bool` source.
+ ///
+ /// ## Original
+ ///
+ /// [`Vec::extend_from_slice`](alloc::vec::Vec::extend_from_slice)
+ ///
+ /// ## Examples
+ ///
+ /// ```rust
+ /// use bitvec::prelude::*;
+ ///
+ /// let mut bv = bitvec![0, 1];
+ /// bv.extend_from_bitslice(bits![0, 1, 0, 0, 1]);
+ /// assert_eq!(bv, bits![0, 1, 0, 1, 0, 0, 1]);
+ /// ```
+ ///
+ /// [`.extend()`]: https://docs.rs/bitvec/latest/bitvec/vec/struct.Vec.html#impl-Extend
+ #[inline]
+ pub fn extend_from_bitslice<T2, O2>(&mut self, other: &BitSlice<T2, O2>)
+ where
+ T2: BitStore,
+ O2: BitOrder,
+ {
+ let len = self.len();
+ let olen = other.len();
+ self.resize(len + olen, false);
+ unsafe { self.get_unchecked_mut(len ..) }.clone_from_bitslice(other);
+ }
+
+ /// Appends a slice of `T` elements to a bit-vector.
+ ///
+ /// The slice is viewed as a `BitSlice<T, O>`, then appended directly to the
+ /// bit-vector.
+ ///
+ /// ## Original
+ ///
+ /// [`Vec::extend_from_slice`](alloc::vec::Vec::extend_from_slice)
+ #[inline]
+ pub fn extend_from_raw_slice(&mut self, slice: &[T]) {
+ self.extend_from_bitslice(slice.view_bits::<O>());
+ }
+}
+
+/// Converters.
+impl<T, O> BitVec<T, O>
+where
+ T: BitStore,
+ O: BitOrder,
+{
+ /// Explicitly views the bit-vector as a bit-slice.
+ #[inline]
+ pub fn as_bitslice(&self) -> &BitSlice<T, O> {
+ unsafe { self.bitspan.into_bitslice_ref() }
+ }
+
+ /// Explicitly views the bit-vector as a mutable bit-slice.
+ #[inline]
+ pub fn as_mut_bitslice(&mut self) -> &mut BitSlice<T, O> {
+ unsafe { self.bitspan.into_bitslice_mut() }
+ }
+
+ /// Views the bit-vector as a slice of its underlying memory elements.
+ #[inline]
+ pub fn as_raw_slice(&self) -> &[T] {
+ let (data, len) =
+ (self.bitspan.address().to_const(), self.bitspan.elements());
+ unsafe { slice::from_raw_parts(data, len) }
+ }
+
+ /// Views the bit-vector as a mutable slice of its underlying memory
+ /// elements.
+ #[inline]
+ pub fn as_raw_mut_slice(&mut self) -> &mut [T] {
+ let (data, len) =
+ (self.bitspan.address().to_mut(), self.bitspan.elements());
+ unsafe { slice::from_raw_parts_mut(data, len) }
+ }
+
+ /// Creates an unsafe shared bit-pointer to the start of the buffer.
+ ///
+ /// ## Original
+ ///
+ /// [`Vec::as_ptr`](alloc::vec::Vec::as_ptr)
+ ///
+ /// ## Safety
+ ///
+ /// You must initialize the contents of the underlying buffer before
+ /// accessing memory through this pointer. See the `BitPtr` documentation
+ /// for more details.
+ #[inline]
+ pub fn as_bitptr(&self) -> BitPtr<Const, T, O> {
+ self.bitspan.to_bitptr().to_const()
+ }
+
+ /// Creates an unsafe writable bit-pointer to the start of the buffer.
+ ///
+ /// ## Original
+ ///
+ /// [`Vec::as_mut_ptr`](alloc::vec::Vec::as_mut_ptr)
+ ///
+ /// ## Safety
+ ///
+ /// You must initialize the contents of the underlying buffer before
+ /// accessing memory through this pointer. See the `BitPtr` documentation
+ /// for more details.
+ #[inline]
+ pub fn as_mut_bitptr(&mut self) -> BitPtr<Mut, T, O> {
+ self.bitspan.to_bitptr()
+ }
+
+ /// Converts a bit-vector into a boxed bit-slice.
+ ///
+ /// This may cause a reällocation to drop any excess capacity.
+ ///
+ /// ## Original
+ ///
+ /// [`Vec::into_boxed_slice`](alloc::vec::Vec::into_boxed_slice)
+ ///
+ /// ## Examples
+ ///
+ /// ```rust
+ /// use bitvec::prelude::*;
+ ///
+ /// let bv = bitvec![0, 1, 0, 0, 1];
+ /// let bb = bv.into_boxed_bitslice();
+ /// ```
+ #[inline]
+ pub fn into_boxed_bitslice(self) -> BitBox<T, O> {
+ let mut bitspan = self.bitspan;
+ let mut boxed =
+ self.into_vec().into_boxed_slice().pipe(ManuallyDrop::new);
+ unsafe {
+ bitspan.set_address(boxed.as_mut_ptr().into_address());
+ BitBox::from_raw(bitspan.into_bitslice_ptr_mut())
+ }
+ }
+
+ /// Converts a bit-vector into a `Vec` of its underlying storage.
+ ///
+ /// The produced vector contains all elements that contained live bits. Dead
+ /// bits have an unspecified value; you should call [`.set_uninitialized()`]
+ /// before converting into a vector.
+ ///
+ /// This does not affect the allocated memory; it is purely a conversion of
+ /// the handle.
+ ///
+ /// ## Examples
+ ///
+ /// ```rust
+ /// use bitvec::prelude::*;
+ ///
+ /// let bv = bitvec![u8, Msb0; 0, 1, 0, 0, 1];
+ /// let v = bv.into_vec();
+ /// assert_eq!(v[0] & 0xF8, 0b01001_000);
+ /// ```
+ ///
+ /// [`.set_uninitialized()`]: Self::set_uninitialized
+ #[inline]
+ pub fn into_vec(self) -> Vec<T> {
+ let (bitspan, capacity) = (self.bitspan, self.capacity);
+ mem::forget(self);
+ unsafe {
+ Vec::from_raw_parts(
+ bitspan.address().to_mut(),
+ bitspan.elements(),
+ capacity,
+ )
+ }
+ }
+}
+
+/// Utilities.
+impl<T, O> BitVec<T, O>
+where
+ T: BitStore,
+ O: BitOrder,
+{
+ /// Overwrites each element (visible in [`.as_raw_mut_slice()`]) with a new
+ /// bit-pattern.
+ ///
+ /// This unconditionally writes `element` into each element in the backing
+ /// slice, without altering the bit-vector’s length or capacity.
+ ///
+ /// This guarantees that dead bits visible in [`.as_raw_slice()`] but not
+ /// [`.as_bitslice()`] are initialized according to the bit-pattern of
+ /// `element.` The elements not visible in the raw slice, but present in the
+ /// allocation, do *not* specify a value. You may not rely on them being
+ /// zeroed *or* being set to the `element` bit-pattern.
+ ///
+ /// ## Parameters
+ ///
+ /// - `&mut self`
+ /// - `element`: The bit-pattern with which each live element in the backing
+ /// store is initialized.
+ ///
+ /// ## Examples
+ ///
+ /// ```rust
+ /// use bitvec::prelude::*;
+ ///
+ /// let mut bv = bitvec![u8, Msb0; 0; 20];
+ /// assert_eq!(bv.as_raw_slice(), [0; 3]);
+ /// bv.set_elements(0xA5);
+ /// assert_eq!(bv.as_raw_slice(), [0xA5; 3]);
+ /// ```
+ ///
+ /// [`.as_bitslice()`]: Self::as_bitslice
+ /// [`.as_raw_mut_slice()`]: Self::as_raw_mut_slice
+ /// [`.as_raw_slice()`]: Self::as_raw_slice
+ #[inline]
+ pub fn set_elements(&mut self, element: T::Mem) {
+ self.as_raw_mut_slice()
+ .iter_mut()
+ .for_each(|elt| elt.store_value(element));
+ }
+
+ /// Sets the uninitialized bits of a bit-vector to a known value.
+ ///
+ /// This method modifies all bits that are observable in [`.as_raw_slice()`]
+ /// but *not* observable in [`.as_bitslice()`] to a known value.
+ /// Memory beyond the raw-slice view, but still within the allocation, is
+ /// considered fully dead and will never be seen.
+ ///
+ /// This can be used to zero the unused memory so that when viewed as a raw
+ /// slice, unused bits have a consistent and predictable value.
+ ///
+ /// ## Examples
+ ///
+ /// ```rust
+ /// use bitvec::prelude::*;
+ ///
+ /// let mut bv = 0b1101_1100u8.view_bits::<Lsb0>().to_bitvec();
+ /// assert_eq!(bv.as_raw_slice()[0], 0b1101_1100u8);
+ ///
+ /// bv.truncate(4);
+ /// assert_eq!(bv.count_ones(), 2);
+ /// assert_eq!(bv.as_raw_slice()[0], 0b1101_1100u8);
+ ///
+ /// bv.set_uninitialized(false);
+ /// assert_eq!(bv.as_raw_slice()[0], 0b0000_1100u8);
+ ///
+ /// bv.set_uninitialized(true);
+ /// assert_eq!(bv.as_raw_slice()[0], 0b1111_1100u8);
+ /// ```
+ ///
+ /// [`.as_bitslice()`]: Self::as_bitslice
+ /// [`.as_raw_slice()`]: Self::as_raw_slice
+ #[inline]
+ pub fn set_uninitialized(&mut self, value: bool) {
+ let head = self.bitspan.head().into_inner() as usize;
+ let last = head + self.len();
+ let all = self.as_raw_mut_slice().view_bits_mut::<O>();
+ unsafe {
+ all.get_unchecked_mut(.. head).fill(value);
+ all.get_unchecked_mut(last ..).fill(value);
+ }
+ }
+
+ /// Ensures that the live region of the bit-vector’s contents begin at the
+ /// front edge of the buffer.
+ ///
+ /// `BitVec` has performance optimizations where it moves its view of its
+ /// buffer contents in order to avoid needless moves of its data within the
+ /// buffer. This can lead to unexpected contents of the raw memory values,
+ /// so this method ensures that the semantic contents of the bit-vector
+ /// match its in-memory storage.
+ ///
+ /// ## Examples
+ ///
+ /// ```rust
+ /// use bitvec::prelude::*;
+ ///
+ /// let data = 0b00_1111_00u8;
+ /// let bits = data.view_bits::<Msb0>();
+ ///
+ /// let mut bv = bits[2 .. 6].to_bitvec();
+ /// assert_eq!(bv, bits![1; 4]);
+ /// assert_eq!(bv.as_raw_slice()[0], data);
+ ///
+ /// bv.force_align();
+ /// assert_eq!(bv, bits![1; 4]);
+ /// // BitVec does not specify the value of dead bits in its buffer.
+ /// assert_eq!(bv.as_raw_slice()[0] & 0xF0, 0xF0);
+ /// ```
+ #[inline]
+ pub fn force_align(&mut self) {
+ let mut bitspan = self.bitspan;
+ let len = bitspan.len();
+ let head = self.bitspan.head();
+ if head == BitIdx::MIN {
+ return;
+ }
+ let head = head.into_inner() as usize;
+ let last = head + len;
+ unsafe {
+ bitspan.set_head(BitIdx::MIN);
+ bitspan.set_len(last);
+ bitspan
+ .into_bitslice_mut()
+ .copy_within_unchecked(head .., 0);
+ bitspan.set_len(len);
+ }
+ self.bitspan = bitspan;
+ }
+
+ /// Sets the starting-bit index of the span descriptor.
+ ///
+ /// ## Safety
+ ///
+ /// The new `head` value must not cause the final bits of the bit-vector to
+ /// depart allocated memory.
+ pub(crate) unsafe fn set_head(&mut self, new_head: BitIdx<T::Mem>) {
+ self.bitspan.set_head(new_head);
+ }
+
+ /// Sets a bit-vector’s length without checking that it fits in the
+ /// allocated capacity.
+ ///
+ /// ## Safety
+ ///
+ /// `new_len` must not exceed `self.capacity()`.
+ pub(crate) unsafe fn set_len_unchecked(&mut self, new_len: usize) {
+ self.bitspan.set_len(new_len);
+ }
+
+ /// Asserts that a length can be encoded into the bit-vector handle.
+ ///
+ /// ## Panics
+ ///
+ /// This panics if `len` is too large to encode into a `BitSpan`.
+ #[inline]
+ fn assert_len_encodable(len: usize) {
+ assert!(
+ BitSpan::<Const, T, O>::len_encodable(len),
+ "bit-vector capacity exceeded: {} > {}",
+ len,
+ BitSlice::<T, O>::MAX_BITS,
+ );
+ }
+
+ /// Reserves some memory through the underlying vector.
+ ///
+ /// ## Parameters
+ ///
+ /// - `&mut self`
+ /// - `additional`: The amount of additional space required after
+ /// `self.len()` in the allocation.
+ /// - `func`: A function that manipulates the memory reservation of the
+ /// underlying vector.
+ ///
+ /// ## Behavior
+ ///
+ /// `func` should perform the appropriate action to allocate space for at
+ /// least `additional` more bits. After it returns, the underlying vector is
+ /// extended with zero-initialized elements until `self.len() + additional`
+ /// bits have been given initialized memory.
+ #[inline]
+ fn do_reservation(
+ &mut self,
+ additional: usize,
+ func: impl FnOnce(&mut Vec<T>, usize),
+ ) {
+ let len = self.len();
+ let new_len = len.saturating_add(additional);
+ Self::assert_len_encodable(new_len);
+
+ let (head, elts) = (self.bitspan.head(), self.bitspan.elements());
+ let new_elts =
+ crate::mem::elts::<T>(head.into_inner() as usize + new_len);
+
+ let extra_elts = new_elts - elts;
+ self.with_vec(|vec| {
+ func(&mut **vec, extra_elts);
+ // Ensure that any new elements are initialized.
+ vec.resize_with(new_elts, || <T as BitStore>::ZERO);
+ });
+ }
+
+ /// Briefly constructs an ordinary `Vec` controlling the buffer, allowing
+ /// operations to be applied to the memory allocation.
+ ///
+ /// ## Parameters
+ ///
+ /// - `&mut self`
+ /// - `func`: A function which may interact with the memory allocation.
+ ///
+ /// After `func` runs, `self` is updated with the temporary `Vec`’s address
+ /// and capacity.
+ #[inline]
+ fn with_vec<F, R>(&mut self, func: F) -> R
+ where F: FnOnce(&mut ManuallyDrop<Vec<T>>) -> R {
+ let mut vec = unsafe { ptr::read(self) }
+ .into_vec()
+ .pipe(ManuallyDrop::new);
+ let out = func(&mut vec);
+
+ unsafe {
+ self.bitspan.set_address(vec.as_mut_ptr().into_address());
+ }
+ self.capacity = vec.capacity();
+ out
+ }
+}
diff --git a/src/vec/api.rs b/src/vec/api.rs
new file mode 100644
index 0000000..09d2d65
--- /dev/null
+++ b/src/vec/api.rs
@@ -0,0 +1,1031 @@
+//! Port of the `Vec<bool>` inherent API.
+
+use alloc::vec::Vec;
+use core::{
+ mem::ManuallyDrop,
+ ops::RangeBounds,
+};
+
+use tap::Pipe;
+use wyz::{
+ comu::{
+ Const,
+ Mut,
+ },
+ range::RangeExt,
+};
+
+use super::{
+ BitVec,
+ Drain,
+ Splice,
+};
+use crate::{
+ boxed::BitBox,
+ index::BitEnd,
+ mem,
+ order::BitOrder,
+ ptr::{
+ AddressExt,
+ BitPtr,
+ BitSpan,
+ },
+ slice::BitSlice,
+ store::BitStore,
+};
+
+/// Port of the `Vec<T>` inherent API.
+impl<T, O> BitVec<T, O>
+where
+ T: BitStore,
+ O: BitOrder,
+{
+ /// Constructs a new, empty, bit-vector.
+ ///
+ /// This does not allocate until bits are [`.push()`]ed into it, or space is
+ /// explicitly [`.reserve()`]d.
+ ///
+ /// ## Original
+ ///
+ /// [`Vec::new`](alloc::vec::Vec::new)
+ ///
+ /// ## Examples
+ ///
+ /// ```rust
+ /// use bitvec::prelude::*;
+ ///
+ /// let bv = BitVec::<u8, Msb0>::new();
+ /// assert!(bv.is_empty());
+ /// ```
+ ///
+ /// [`.push()`]: Self::push
+ /// [`.reserve()`]: Self::reserve
+ #[inline]
+ pub fn new() -> Self {
+ Self::EMPTY
+ }
+
+ /// Allocates a new, empty, bit-vector with space for at least `capacity`
+ /// bits before reallocating.
+ ///
+ /// ## Original
+ ///
+ /// [`Vec::with_capacity`](alloc::vec::Vec::with_capacity)
+ ///
+ /// ## Panics
+ ///
+ /// This panics if the requested capacity is longer than what the bit-vector
+ /// can represent. See [`BitSlice::MAX_BITS`].
+ ///
+ /// ## Examples
+ ///
+ /// ```rust
+ /// use bitvec::prelude::*;
+ ///
+ /// let mut bv: BitVec = BitVec::with_capacity(128);
+ ///
+ /// assert!(bv.is_empty());
+ /// assert!(bv.capacity() >= 128);
+ ///
+ /// for i in 0 .. 128 {
+ /// bv.push(i & 0xC0 == i);
+ /// }
+ /// assert_eq!(bv.len(), 128);
+ /// assert!(bv.capacity() >= 128);
+ ///
+ /// bv.push(false);
+ /// assert_eq!(bv.len(), 129);
+ /// assert!(bv.capacity() >= 129);
+ /// ```
+ ///
+ /// [`BitSlice::MAX_BITS`]: crate::slice::BitSlice::MAX_BITS
+ #[inline]
+ pub fn with_capacity(capacity: usize) -> Self {
+ Self::assert_len_encodable(capacity);
+ let mut vec = capacity
+ .pipe(crate::mem::elts::<T>)
+ .pipe(Vec::<T>::with_capacity)
+ .pipe(ManuallyDrop::new);
+ let (addr, capacity) = (vec.as_mut_ptr(), vec.capacity());
+ let bitspan = BitSpan::uninhabited(unsafe { addr.into_address() });
+ Self { bitspan, capacity }
+ }
+
+ /// Constructs a bit-vector handle from its constituent fields.
+ ///
+ /// ## Original
+ ///
+ /// [`Vec::from_raw_parts`](alloc::vec::Vec::from_raw_parts)
+ ///
+ /// ## Safety
+ ///
+ /// The **only** acceptable argument values for this function are those that
+ /// were previously produced by calling [`.into_raw_parts()`]. Furthermore,
+ /// you may only call this **at most once** on any set of arguments. Using
+ /// the same arguments in more than one call to this function will result in
+ /// a double- or use-after free error.
+ ///
+ /// Attempting to conjure your own values and pass them into this function
+ /// will break the allocator state.
+ ///
+ /// ## Examples
+ ///
+ /// ```rust
+ /// use bitvec::prelude::*;
+ ///
+ /// let bv = bitvec![0, 1, 0, 0, 1];
+ /// let (bitptr, len, capa) = bv.into_raw_parts();
+ /// let bv2 = unsafe {
+ /// BitVec::from_raw_parts(bitptr, len, capa)
+ /// };
+ /// assert_eq!(bv2, bits![0, 1, 0, 0, 1]);
+ /// ```
+ ///
+ /// [`.into_raw_parts()`]: Self::into_raw_parts
+ #[inline]
+ pub unsafe fn from_raw_parts(
+ bitptr: BitPtr<Mut, T, O>,
+ length: usize,
+ capacity: usize,
+ ) -> Self {
+ let bitspan = bitptr.span_unchecked(length);
+ Self {
+ bitspan,
+ capacity: mem::elts::<T>(
+ capacity.saturating_add(bitspan.head().into_inner() as usize),
+ ),
+ }
+ }
+
+ /// Decomposes a bit-vector into its constituent member fields.
+ ///
+ /// This disarms the destructor. In order to prevent a memory leak, you must
+ /// pass **these exact values** back into [`::from_raw_parts()`].
+ ///
+ /// ## Original
+ ///
+ /// [`Vec::into_raw_parts`](alloc::vec::Vec::into_raw_parts)
+ ///
+ /// ## API Differences
+ ///
+ /// This method is still unstable as of 1.54. It is provided here as a
+ /// convenience, under the expectation that the standard-library method will
+ /// stabilize as-is.
+ ///
+ /// [`::from_raw_parts()`]: Self::from_raw_parts
+ #[inline]
+ pub fn into_raw_parts(self) -> (BitPtr<Mut, T, O>, usize, usize) {
+ let this = ManuallyDrop::new(self);
+ (
+ this.bitspan.to_bitptr(),
+ this.bitspan.len(),
+ this.capacity(),
+ )
+ }
+
+ /// Gets the allocation capacity, measured in bits.
+ ///
+ /// This counts how many total bits the bit-vector can store before it must
+ /// perform a reällocation to acquire more memory.
+ ///
+ /// If the capacity is not a multiple of 8, you should call
+ /// [`.force_align()`].
+ ///
+ /// ## Original
+ ///
+ /// [`Vec::capacity`](alloc::vec::Vec::capacity)
+ ///
+ /// ## Examples
+ ///
+ /// ```rust
+ /// use bitvec::prelude::*;
+ ///
+ /// let bv = bitvec![0, 1, 0, 0, 1];
+ /// ```
+ ///
+ /// [`.force_align()`]: Self::force_align
+ #[inline]
+ pub fn capacity(&self) -> usize {
+ self.capacity
+ .checked_mul(mem::bits_of::<T>())
+ .expect("bit-vector capacity exceeded")
+ .saturating_sub(self.bitspan.head().into_inner() as usize)
+ }
+
+ /// Ensures that the bit-vector has allocation capacity for *at least*
+ /// `additional` more bits to be appended to it.
+ ///
+ /// For convenience, this method *guarantees* that the underlying memory for
+ /// `self[.. self.len() + additional]` is initialized, and may be safely
+ /// accessed directly without requiring use of `.push()` or `.extend()` to
+ /// initialize it.
+ ///
+ /// Newly-allocated memory is always initialized to zero. It is still *dead*
+ /// until the bit-vector is grown (by `.push()`, `.extend()`, or
+ /// `.set_len()`), but direct access will not trigger UB.
+ ///
+ /// ## Original
+ ///
+ /// [`Vec::reserve`](alloc::vec::Vec::reserve)
+ ///
+ /// ## Panics
+ ///
+ /// This panics if the new capacity exceeds the bit-vector’s maximum.
+ ///
+ /// ## Examples
+ ///
+ /// ```rust
+ /// use bitvec::prelude::*;
+ ///
+ /// let mut bv: BitVec = BitVec::with_capacity(80);
+ /// assert!(bv.capacity() >= 80);
+ /// bv.reserve(800);
+ /// assert!(bv.capacity() >= 800);
+ /// ```
+ #[inline]
+ pub fn reserve(&mut self, additional: usize) {
+ Self::assert_len_encodable(self.len() + additional);
+ self.do_reservation(additional, Vec::<T>::reserve);
+ }
+
+ /// Ensures that the bit-vector has allocation capacity for *at least*
+ /// `additional` more bits to be appended to it.
+ ///
+ /// This differs from [`.reserve()`] by requesting that the allocator
+ /// provide the minimum capacity necessary, rather than a potentially larger
+ /// amount that the allocator may find more convenient.
+ ///
+ /// Remember that this is a *request*: the allocator provides what it
+ /// provides, and you cannot rely on the new capacity to be exactly minimal.
+ /// You should still prefer `.reserve()`, especially if you expect to append
+ /// to the bit-vector in the future.
+ ///
+ /// ## Original
+ ///
+ /// [`Vec::reserve_exact`](alloc::vec::Vec::reserve_exact)
+ ///
+ /// ## Panics
+ ///
+ /// This panics if the new capacity exceeds the bit-vector’s maximum.
+ ///
+ /// ## Examples
+ ///
+ /// ```rust
+ /// use bitvec::prelude::*;
+ ///
+ /// let mut bv: BitVec = BitVec::with_capacity(80);
+ /// assert!(bv.capacity() >= 80);
+ /// bv.reserve_exact(800);
+ /// assert!(bv.capacity() >= 800);
+ /// ```
+ ///
+ /// [`.reserve()`]: Self::reserve
+ #[inline]
+ pub fn reserve_exact(&mut self, additional: usize) {
+ self.do_reservation(additional, Vec::<T>::reserve_exact);
+ }
+
+ /// Releases excess capacity back to the allocator.
+ ///
+ /// Like [`.reserve_exact()`], this is a *request* to the allocator, not a
+ /// command. The allocator may reclaim excess memory or may not.
+ ///
+ /// ## Original
+ ///
+ /// [`Vec::shrink_to_fit`](alloc::vec::Vec::shrink_to_fit)
+ ///
+ /// ## Examples
+ ///
+ /// ```rust
+ /// use bitvec::prelude::*;
+ ///
+ /// let mut bv: BitVec = BitVec::with_capacity(1000);
+ /// bv.push(true);
+ /// bv.shrink_to_fit();
+ /// ```
+ ///
+ /// [`.reserve_exact()`]: Self::reserve_exact
+ #[inline]
+ pub fn shrink_to_fit(&mut self) {
+ self.with_vec(|vec| vec.shrink_to_fit());
+ }
+
+ #[inline]
+ #[cfg(not(tarpaulin_include))]
+ #[deprecated = "prefer `.into_boxed_bitslice() instead"]
+ #[allow(missing_docs, clippy::missing_docs_in_private_items)]
+ pub fn into_boxed_slice(self) -> BitBox<T, O> {
+ self.into_boxed_bitslice()
+ }
+
+ /// Shortens the bit-vector, keeping the first `new_len` bits and discarding
+ /// the rest.
+ ///
+ /// If `len` is greater than the bit-vector’s current length, this has no
+ /// effect.
+ ///
+ /// The [`.drain()`] method can emulate `.truncate()`, except that it yields
+ /// the excess bits rather than discarding them.
+ ///
+ /// Note that this has no effect on the allocated capacity of the
+ /// bit-vector, **nor does it erase truncated memory**. Bits in the
+ /// allocated memory that are outside of the [`.as_bitslice()`] view are
+ /// always considered to have *initialized*, but **unspecified**, values,
+ /// and you cannot rely on them to be zero.
+ ///
+ /// ## Original
+ ///
+ /// [`Vec::truncate`](alloc::vec::Vec::truncate)
+ ///
+ /// ## Examples
+ ///
+ /// Truncating a five-bit vector to two bits:
+ ///
+ /// ```rust
+ /// use bitvec::prelude::*;
+ ///
+ /// let mut bv = bitvec![0, 1, 0, 0, 1];
+ /// bv.truncate(2);
+ /// assert_eq!(bv.len(), 2);
+ /// assert!(bv.as_raw_slice()[0].count_ones() >= 2);
+ /// ```
+ ///
+ /// No truncation occurs when `len` is greater than the bit-vector’s current
+ /// length:
+ ///
+ /// [`.as_bitslice()`]: Self::as_bitslice
+ /// [`.drain()`]: Self::drain
+ #[inline]
+ pub fn truncate(&mut self, new_len: usize) {
+ if new_len < self.len() {
+ unsafe {
+ self.set_len_unchecked(new_len);
+ }
+ }
+ }
+
+ #[inline]
+ #[cfg(not(tarpaulin_include))]
+ #[deprecated = "use `.as_bitslice()` instead"]
+ #[allow(missing_docs, clippy::missing_docs_in_private_items)]
+ pub fn as_slice(&self) -> &BitSlice<T, O> {
+ self.as_bitslice()
+ }
+
+ #[inline]
+ #[cfg(not(tarpaulin_include))]
+ #[deprecated = "use `.as_mut_bitslice()` instead"]
+ #[allow(missing_docs, clippy::missing_docs_in_private_items)]
+ pub fn as_mut_slice(&mut self) -> &mut BitSlice<T, O> {
+ self.as_mut_bitslice()
+ }
+
+ #[inline]
+ #[cfg(not(tarpaulin_include))]
+ #[deprecated = "use `.as_bitptr()` instead"]
+ #[allow(missing_docs, clippy::missing_docs_in_private_items)]
+ pub fn as_ptr(&self) -> BitPtr<Const, T, O> {
+ self.as_bitptr()
+ }
+
+ #[inline]
+ #[cfg(not(tarpaulin_include))]
+ #[deprecated = "use `.as_mut_bitptr()` instead"]
+ #[allow(missing_docs, clippy::missing_docs_in_private_items)]
+ pub fn as_mut_ptr(&mut self) -> BitPtr<Mut, T, O> {
+ self.as_mut_bitptr()
+ }
+
+ /// Resizes a bit-vector to a new length.
+ ///
+ /// ## Original
+ ///
+ /// [`Vec::set_len`](alloc::vec::Vec::set_len)
+ ///
+ /// ## Safety
+ ///
+ /// **NOT ALL MEMORY IN THE ALLOCATION IS INITIALIZED!**
+ ///
+ /// Memory in a bit-vector’s allocation is only initialized when the
+ /// bit-vector grows into it normally (through [`.push()`] or one of the
+ /// various `.extend*()` methods). Setting the length to a value beyond what
+ /// was previously initialized, but still within the allocation, is
+ /// undefined behavior.
+ ///
+ /// The caller is responsible for ensuring that all memory up to (but not
+ /// including) the new length has already been initialized.
+ ///
+ /// ## Panics
+ ///
+ /// This panics if `new_len` exceeds the capacity as reported by
+ /// [`.capacity()`].
+ ///
+ /// ## Examples
+ ///
+ /// ```rust
+ /// use bitvec::prelude::*;
+ ///
+ /// let mut bv = bitvec![0, 1, 0, 0, 1];
+ /// unsafe {
+ /// // The default storage type, `usize`, is at least 32 bits.
+ /// bv.set_len(32);
+ /// }
+ /// assert_eq!(bv, bits![
+ /// 0, 1, 0, 0, 1, 0, 0, 0,
+ /// 0, 0, 0, 0, 0, 0, 0, 0,
+ /// 0, 0, 0, 0, 0, 0, 0, 0,
+ /// 0, 0, 0, 0, 0, 0, 0, 0,
+ /// ]);
+ /// // `BitVec` guarantees that newly-initialized memory is zeroed.
+ /// ```
+ ///
+ /// [`.push()`]: Self::push
+ /// [`.capacity()`]: Self::capacity
+ #[inline]
+ pub unsafe fn set_len(&mut self, new_len: usize) {
+ let capa = self.capacity();
+ assert!(
+ new_len <= capa,
+ "bit-vector capacity exceeded: {} > {}",
+ new_len,
+ capa,
+ );
+ self.set_len_unchecked(new_len);
+ }
+
+ /// Takes a bit out of the bit-vector.
+ ///
+ /// The empty slot is filled with the last bit in the bit-vector, rather
+ /// than shunting `index + 1 .. self.len()` down by one.
+ ///
+ /// ## Original
+ ///
+ /// [`Vec::swap_remove`](alloc::vec::Vec::swap_remove)
+ ///
+ /// ## Panics
+ ///
+ /// This panics if `index` is out of bounds (`self.len()` or greater).
+ ///
+ /// ## Examples
+ ///
+ /// ```rust
+ /// use bitvec::prelude::*;
+ ///
+ /// let mut bv = bitvec![0, 1, 0, 0, 1];
+ /// assert!(!bv.swap_remove(2));
+ /// assert_eq!(bv, bits![0, 1, 1, 0]);
+ /// ```
+ #[inline]
+ pub fn swap_remove(&mut self, index: usize) -> bool {
+ self.assert_in_bounds(index, 0 .. self.len());
+ let last = self.len() - 1;
+ unsafe {
+ self.swap_unchecked(index, last);
+ self.set_len(last);
+ *self.get_unchecked(last)
+ }
+ }
+
+ /// Inserts a bit at a given position, shifting all bits after it one spot
+ /// to the right.
+ ///
+ /// `index` may be any value up to *and including* `self.len()`. If it is
+ /// `self.len()`, it behaves equivalently to `.push()`.
+ ///
+ /// ## Original
+ ///
+ /// [`Vec::insert`](alloc::vec::Vec::insert)
+ ///
+ /// ## Panics
+ ///
+ /// This panics if `index` is out of bounds (including `self.len()`).
+ #[inline]
+ pub fn insert(&mut self, index: usize, value: bool) {
+ self.assert_in_bounds(index, 0 ..= self.len());
+ self.push(value);
+ unsafe { self.get_unchecked_mut(index ..) }.rotate_right(1);
+ }
+
+ /// Removes a bit at a given position, shifting all bits after it one spot
+ /// to the left.
+ ///
+ /// `index` may be any value up to, but **not** including, `self.len()`.
+ ///
+ /// ## Original
+ ///
+ /// [`Vec::remove`](alloc::vec::Vec::remove)
+ ///
+ /// ## Panics
+ ///
+ /// This panics if `index` is out of bounds (excluding `self.len()`).
+ #[inline]
+ pub fn remove(&mut self, index: usize) -> bool {
+ self.assert_in_bounds(index, 0 .. self.len());
+ let last = self.len() - 1;
+ unsafe {
+ self.get_unchecked_mut(index ..).rotate_left(1);
+ let out = *self.get_unchecked(last);
+ self.set_len(last);
+ out
+ }
+ }
+
+ /// Retains only the bits that the predicate allows.
+ ///
+ /// Bits are deleted from the vector when the predicate function returns
+ /// false. This function is linear in `self.len()`.
+ ///
+ /// ## Original
+ ///
+ /// [`Vec::retain`](alloc::vec::Vec::retain)
+ ///
+ /// ## API Differences
+ ///
+ /// The predicate receives both the index of the bit as well as its value,
+ /// in order to allow the predicate to have more than one bit of
+ /// keep/discard information.
+ ///
+ /// ## Examples
+ ///
+ /// ```rust
+ /// use bitvec::prelude::*;
+ ///
+ /// let mut bv = bitvec![0, 1, 0, 0, 1];
+ /// bv.retain(|idx, _| idx % 2 == 0);
+ /// assert_eq!(bv, bits![0, 0, 1]);
+ /// ```
+ #[inline]
+ pub fn retain<F>(&mut self, mut func: F)
+ where F: FnMut(usize, &bool) -> bool {
+ let mut len = self.len();
+ let mut hole_ptr = self.as_mut_bitptr();
+ let mut reader = self.as_bitptr_range().enumerate();
+
+ // Advance until the *first* hole is created. This avoids writing into
+ // the bit-slice when no change takes place.
+ for (idx, bitptr) in reader.by_ref() {
+ let bit = unsafe { bitptr.read() };
+ if func(idx, &bit) {
+ hole_ptr = unsafe { hole_ptr.add(1) };
+ }
+ else {
+ len -= 1;
+ break;
+ }
+ }
+ // Now that a hole exists, switch to a loop that always writes into the
+ // hole pointer.
+ for (idx, bitptr) in reader {
+ let bit = unsafe { bitptr.read() };
+ if func(idx, &bit) {
+ hole_ptr = unsafe {
+ hole_ptr.write(bit);
+ hole_ptr.add(1)
+ };
+ }
+ else {
+ len -= 1;
+ }
+ }
+ // Discard the bits that did not survive the predicate.
+ unsafe {
+ self.set_len_unchecked(len);
+ }
+ }
+
+ /// Appends a single bit to the vector.
+ ///
+ /// ## Original
+ ///
+ /// [`Vec::push`](alloc::vec::Vec::push)
+ ///
+ /// ## Panics
+ ///
+ /// This panics if the push would cause the bit-vector to exceed its maximum
+ /// capacity.
+ ///
+ /// ## Examples
+ ///
+ /// ```rust
+ /// use bitvec::prelude::*;
+ ///
+ /// let mut bv = bitvec![0, 0];
+ /// bv.push(true);
+ /// assert_eq!(bv.as_bitslice(), bits![0, 0, 1]);
+ /// ```
+ #[inline]
+ pub fn push(&mut self, value: bool) {
+ let len = self.len();
+ let new_len = len + 1;
+ Self::assert_len_encodable(new_len);
+ // Push a new `T` into the underlying buffer if needed.
+ if len == 0 || self.bitspan.tail() == BitEnd::MAX {
+ self.with_vec(|vec| vec.push(T::ZERO));
+ }
+ // Write `value` into the now-safely-allocated `len` slot.
+ unsafe {
+ self.set_len_unchecked(new_len);
+ self.set_unchecked(len, value);
+ }
+ }
+
+ /// Attempts to remove the trailing bit from the bit-vector.
+ ///
+ /// This returns `None` if the bit-vector is empty.
+ ///
+ /// ## Original
+ ///
+ /// [`Vec::pop`](alloc::vec::Vec::pop)
+ ///
+ /// ## Examples
+ ///
+ /// ```rust
+ /// use bitvec::prelude::*;
+ ///
+ /// let mut bv = bitvec![0, 1];
+ /// assert!(bv.pop().unwrap());
+ /// assert!(!bv.pop().unwrap());
+ /// assert!(bv.pop().is_none());
+ /// ```
+ #[inline]
+ pub fn pop(&mut self) -> Option<bool> {
+ match self.len() {
+ 0 => None,
+ n => unsafe {
+ let new_len = n - 1;
+ let out = Some(*self.get_unchecked(new_len));
+ self.set_len_unchecked(new_len);
+ out
+ },
+ }
+ }
+
+ /// Moves all the bits out of `other` into the back of `self`.
+ ///
+ /// The `other` bit-vector is emptied after this occurs.
+ ///
+ /// ## Original
+ ///
+ /// [`Vec::append`](alloc::vec::Vec::append)
+ ///
+ /// ## API Differences
+ ///
+ /// This permits `other` to have different type parameters than `self`, and
+ /// does not require that it be literally `Self`.
+ ///
+ /// ## Panics
+ ///
+ /// This panics if `self.len() + other.len()` exceeds the maximum capacity
+ /// of a bit-vector.
+ ///
+ /// ## Examples
+ ///
+ /// ```rust
+ /// use bitvec::prelude::*;
+ ///
+ /// let mut bv1 = bitvec![u16, Msb0; 0; 10];
+ /// let mut bv2 = bitvec![u32, Lsb0; 1; 10];
+ ///
+ /// bv1.append(&mut bv2);
+ ///
+ /// assert_eq!(bv1.count_ones(), 10);
+ /// assert_eq!(bv1.count_zeros(), 10);
+ /// assert!(bv2.is_empty());
+ /// ```
+ #[inline]
+ pub fn append<T2, O2>(&mut self, other: &mut BitVec<T2, O2>)
+ where
+ T2: BitStore,
+ O2: BitOrder,
+ {
+ self.extend_from_bitslice(other);
+ other.clear();
+ }
+
+ /// Iterates over a portion of the bit-vector, *removing* all yielded bits
+ /// from it.
+ ///
+ /// When the iterator drops, *all* bits in its coverage are removed from
+ /// `self`, even if the iterator did not yield them. If the iterator is
+ /// leaked or otherwise forgotten, and its destructor never runs, then the
+ /// amount of un-yielded bits removed from the bit-vector is not specified.
+ ///
+ /// ## Original
+ ///
+ /// [`Vec::drain`](alloc::vec::Vec::drain)
+ ///
+ /// ## Panics
+ ///
+ /// This panics if `range` departs `0 .. self.len()`.
+ ///
+ /// ## Examples
+ ///
+ /// ```rust
+ /// use bitvec::prelude::*;
+ ///
+ /// let mut bv = bitvec![0, 1, 0, 0, 1];
+ /// let bv2 = bv.drain(1 ..= 3).collect::<BitVec>();
+ /// assert_eq!(bv, bits![0, 1]);
+ /// assert_eq!(bv2, bits![1, 0, 0]);
+ ///
+ /// // A full range clears the bit-vector.
+ /// bv.drain(..);
+ /// assert!(bv.is_empty());
+ /// ```
+ #[inline]
+ pub fn drain<R>(&mut self, range: R) -> Drain<T, O>
+ where R: RangeBounds<usize> {
+ Drain::new(self, range)
+ }
+
+ /// Empties the bit-vector.
+ ///
+ /// This does not affect the allocated capacity.
+ ///
+ /// ## Original
+ ///
+ /// [`Vec::clear`](alloc::vec::Vec::clear)
+ ///
+ /// ## Examples
+ ///
+ /// ```rust
+ /// use bitvec::prelude::*;
+ ///
+ /// let mut bv = bitvec![0, 1, 0, 0, 1];
+ /// bv.clear();
+ /// assert!(bv.is_empty());
+ /// ```
+ #[inline]
+ pub fn clear(&mut self) {
+ self.truncate(0);
+ }
+
+ /// Gets the length of the bit-vector.
+ ///
+ /// This is equivalent to `BitSlice::len`; it is provided as an inherent
+ /// method here rather than relying on `Deref` forwarding so that you can
+ /// write `BitVec::len` as a named function item.
+ ///
+ /// ## Original
+ ///
+ /// [`Vec::len`](alloc::vec::Vec::len)
+ #[inline]
+ #[cfg(not(tarpaulin_include))]
+ pub fn len(&self) -> usize {
+ self.bitspan.len()
+ }
+
+ /// Tests if the bit-vector is empty.
+ ///
+ /// This is equivalent to `BitSlice::is_empty`; it is provided as an
+ /// inherent method here rather than relying on `Deref` forwarding so that
+ /// you can write `BitVec::is_empty` as a named function item.
+ ///
+ /// ## Original
+ ///
+ /// [`Vec::is_empty`](alloc::vec::Vec::is_empty)
+ #[inline]
+ #[cfg(not(tarpaulin_include))]
+ pub fn is_empty(&self) -> bool {
+ self.bitspan.len() == 0
+ }
+
+ /// Splits the bit-vector in half at an index, moving `self[at ..]` out into
+ /// a new bit-vector.
+ ///
+ /// ## Original
+ ///
+ /// [`Vec::split_off`](alloc::vec::Vec::split_off)
+ ///
+ /// ## Examples
+ ///
+ /// ```rust
+ /// use bitvec::prelude::*;
+ ///
+ /// let mut bv = bitvec![0, 1, 0, 0, 1];
+ /// let bv2 = bv.split_off(2);
+ /// assert_eq!((&*bv, &*bv2), (bits![0, 1], bits![0, 0, 1]));
+ /// ```
+ #[inline]
+ pub fn split_off(&mut self, at: usize) -> Self {
+ let len = self.len();
+ self.assert_in_bounds(at, 0 ..= len);
+ let (this, that) = unsafe {
+ self.bitspan
+ .into_bitslice_mut()
+ .split_at_unchecked_mut_noalias(at)
+ };
+ self.bitspan = this.as_mut_bitspan();
+ Self::from_bitslice(that)
+ }
+
+ /// Resizes the bit-vector to a new length, using a function to produce each
+ /// inserted bit.
+ ///
+ /// If `new_len` is less than `self.len()`, this is a truncate operation; if
+ /// it is greater, then `self` is extended by repeatedly pushing `func()`.
+ ///
+ /// ## Original
+ ///
+ /// [`Vec::resize_with`](alloc::vec::Vec::resize_with)
+ ///
+ /// ## API Differences
+ ///
+ /// The generator function receives the index into which its bit will be
+ /// placed.
+ ///
+ /// ## Examples
+ ///
+ /// ```rust
+ /// use bitvec::prelude::*;
+ ///
+ /// let mut bv = bitvec![1; 2];
+ /// bv.resize_with(5, |idx| idx % 2 == 1);
+ /// assert_eq!(bv, bits![1, 1, 0, 1, 0]);
+ /// ```
+ #[inline]
+ pub fn resize_with<F>(&mut self, new_len: usize, mut func: F)
+ where F: FnMut(usize) -> bool {
+ let old_len = self.len();
+ self.resize(new_len, false);
+ if new_len > old_len {
+ for (bitptr, idx) in unsafe { self.get_unchecked_mut(old_len ..) }
+ .as_mut_bitptr_range()
+ .zip(old_len ..)
+ {
+ unsafe {
+ bitptr.write(func(idx));
+ }
+ }
+ }
+ }
+
+ /// Destroys the `BitVec` handle without destroying the bit-vector
+ /// allocation. The allocation is returned as an `&mut BitSlice` that lasts
+ /// for the remaining program lifetime.
+ ///
+ /// You *may* call [`BitBox::from_raw`] on this slice handle exactly once in
+ /// order to reap the allocation before program exit. That function takes a
+ /// mutable pointer, not a mutable reference, so you must ensure that the
+ /// returned reference is never used again after restoring the allocation
+ /// handle.
+ ///
+ /// ## Original
+ ///
+ /// [`Vec::leak`](alloc::vec::Vec::leak)
+ ///
+ /// ## Examples
+ ///
+ /// ```rust
+ /// use bitvec::prelude::*;
+ ///
+ /// let bv = bitvec![0, 0, 1];
+ /// let static_bits: &'static mut BitSlice = bv.leak();
+ /// static_bits.set(0, true);
+ /// assert_eq!(static_bits, bits![1, 0, 1]);
+ ///
+ /// let bb = unsafe { BitBox::from_raw(static_bits) };
+ /// // static_bits may no longer be used.
+ /// drop(bb); // explicitly reap memory before program exit
+ /// ```
+ ///
+ /// [`BitBox::leak`]: crate::boxed::BitBox::leak
+ #[inline]
+ #[cfg(not(tarpaulin_include))]
+ pub fn leak<'a>(self) -> &'a mut BitSlice<T, O> {
+ self.into_boxed_bitslice().pipe(BitBox::leak)
+ }
+
+ /// Resizes the bit-vector to a new length. New bits are initialized to
+ /// `value`.
+ ///
+ /// ## Original
+ ///
+ /// [`Vec::resize`](alloc::vec::Vec::resize)
+ ///
+ /// ## Examples
+ ///
+ /// ```rust
+ /// use bitvec::prelude::*;
+ ///
+ /// let mut bv = bitvec![0; 2];
+ /// bv.resize(5, true);
+ /// assert_eq!(bv, bits![0, 0, 1, 1, 1]);
+ /// ```
+ #[inline]
+ pub fn resize(&mut self, new_len: usize, value: bool) {
+ let len = self.len();
+ if new_len > len {
+ self.reserve(new_len - len);
+ unsafe {
+ self.set_len(new_len);
+ self.get_unchecked_mut(len .. new_len).fill(value);
+ }
+ }
+ else {
+ self.truncate(new_len);
+ }
+ }
+
+ #[inline]
+ #[cfg(not(tarpaulin_include))]
+ #[allow(missing_docs, clippy::missing_docs_in_private_items)]
+ #[deprecated = "use `.extend_from_bitslice()` or `.extend_from_raw_slice()` \
+ instead"]
+ pub fn extend_from_slice<T2, O2>(&mut self, other: &BitSlice<T2, O2>)
+ where
+ T2: BitStore,
+ O2: BitOrder,
+ {
+ self.extend_from_bitslice(other);
+ }
+
+ /// Extends `self` by copying an internal range of its bit-slice as the
+ /// region to append.
+ ///
+ /// ## Original
+ ///
+ /// [`Vec::extend_from_within`](alloc::vec::Vec::extend_from_within)
+ ///
+ /// ## Panics
+ ///
+ /// This panics if `src` is not within `0 .. self.len()`.
+ ///
+ /// ## Examples
+ ///
+ /// ```rust
+ /// use bitvec::prelude::*;
+ ///
+ /// let mut bv = bitvec![0, 1, 0, 0, 1];
+ /// bv.extend_from_within(1 .. 4);
+ /// assert_eq!(bv, bits![0, 1, 0, 0, 1, 1, 0, 0]);
+ /// ```
+ #[inline]
+ pub fn extend_from_within<R>(&mut self, src: R)
+ where R: RangeExt<usize> {
+ let old_len = self.len();
+ let src = src.normalize(0, old_len);
+ self.assert_in_bounds(src.end, 0 .. old_len);
+ self.resize(old_len + src.len(), false);
+ unsafe {
+ self.copy_within_unchecked(src, old_len);
+ }
+ }
+
+ /// Modifies [`self.drain()`] so that the removed bit-slice is instead
+ /// replaced with the contents of another bit-stream.
+ ///
+ /// As with `.drain()`, the specified range is always removed from the
+ /// bit-vector even if the splicer is not fully consumed, and the splicer
+ /// does not specify how many bits are removed if it leaks.
+ ///
+ /// The replacement source is only consumed when the splicer drops; however,
+ /// it may be pulled before then. The replacement source cannot assume that
+ /// there will be a delay between creation of the splicer and when it must
+ /// begin producing bits.
+ ///
+ /// This copies the `Vec::splice` implementation; see its documentation for
+ /// more details about how the replacement should act.
+ ///
+ /// ## Original
+ ///
+ /// [`Vec::splice`](alloc::vec::Vec::splice)
+ ///
+ /// ## Panics
+ ///
+ /// This panics if `range` departs `0 .. self.len()`.
+ ///
+ /// ## Examples
+ ///
+ /// ```rust
+ /// use bitvec::prelude::*;
+ ///
+ /// let mut bv = bitvec![0, 1, 1];
+ /// // a b c
+ /// let mut yank = bv.splice(
+ /// .. 2,
+ /// bits![static 1, 1, 0].iter().by_vals(),
+ /// // d e f
+ /// );
+ ///
+ /// assert!(!yank.next().unwrap()); // a
+ /// assert!(yank.next().unwrap()); // b
+ /// drop(yank);
+ /// assert_eq!(bv, bits![1, 1, 0, 1]);
+ /// // d e f c
+ /// ```
+ ///
+ /// [`self.drain()`]: Self::drain
+ #[inline]
+ pub fn splice<R, I>(
+ &mut self,
+ range: R,
+ replace_with: I,
+ ) -> Splice<T, O, I::IntoIter>
+ where
+ R: RangeBounds<usize>,
+ I: IntoIterator<Item = bool>,
+ {
+ Splice::new(self.drain(range), replace_with)
+ }
+}
diff --git a/src/vec/iter.rs b/src/vec/iter.rs
new file mode 100644
index 0000000..afa3848
--- /dev/null
+++ b/src/vec/iter.rs
@@ -0,0 +1,689 @@
+#![doc = include_str!("../../doc/vec/iter.md")]
+
+use alloc::vec::Vec;
+use core::{
+ fmt::{
+ self,
+ Debug,
+ Formatter,
+ },
+ iter::{
+ FromIterator,
+ FusedIterator,
+ },
+ mem,
+ ops::Range,
+};
+
+use tap::{
+ Pipe,
+ Tap,
+ TapOptional,
+};
+use wyz::{
+ comu::{
+ Mut,
+ Mutability,
+ },
+ range::RangeExt,
+};
+
+use super::BitVec;
+use crate::{
+ boxed::BitBox,
+ mem::bits_of,
+ order::BitOrder,
+ ptr::{
+ BitPtrRange,
+ BitRef,
+ },
+ slice::BitSlice,
+ store::BitStore,
+ view::BitView,
+};
+
+#[doc = include_str!("../../doc/vec/iter/Extend_bool.md")]
+impl<T, O> Extend<bool> for BitVec<T, O>
+where
+ T: BitStore,
+ O: BitOrder,
+{
+ #[inline]
+ fn extend<I>(&mut self, iter: I)
+ where I: IntoIterator<Item = bool> {
+ let mut iter = iter.into_iter();
+ #[allow(irrefutable_let_patterns)] // Removing the `if` is unstable.
+ if let (_, Some(n)) | (n, None) = iter.size_hint() {
+ self.reserve(n);
+ let len = self.len();
+ // If the reservation did not panic, then this will not overflow.
+ let new_len = len.wrapping_add(n);
+ let new = unsafe { self.get_unchecked_mut(len .. new_len) };
+
+ let pulled = new
+ .as_mut_bitptr_range()
+ .zip(iter.by_ref())
+ .map(|(ptr, bit)| unsafe {
+ ptr.write(bit);
+ })
+ .count();
+ unsafe {
+ self.set_len(len + pulled);
+ }
+ }
+
+ // If the iterator is well-behaved and finite, this should never
+ // enter; if the iterator is infinite, then this will eventually crash.
+ iter.for_each(|bit| self.push(bit));
+ }
+}
+
+#[cfg(not(tarpaulin_include))]
+impl<'a, T, O> Extend<&'a bool> for BitVec<T, O>
+where
+ T: BitStore,
+ O: BitOrder,
+{
+ #[inline]
+ fn extend<I>(&mut self, iter: I)
+ where I: IntoIterator<Item = &'a bool> {
+ self.extend(iter.into_iter().copied());
+ }
+}
+
+#[cfg(not(tarpaulin_include))]
+#[doc = include_str!("../../doc/vec/iter/Extend_BitRef.md")]
+impl<'a, M, T1, T2, O1, O2> Extend<BitRef<'a, M, T2, O2>> for BitVec<T1, O1>
+where
+ M: Mutability,
+ T1: BitStore,
+ T2: BitStore,
+ O1: BitOrder,
+ O2: BitOrder,
+{
+ #[inline]
+ fn extend<I>(&mut self, iter: I)
+ where I: IntoIterator<Item = BitRef<'a, M, T2, O2>> {
+ self.extend(iter.into_iter().map(|bit| *bit));
+ }
+}
+
+impl<T, O> Extend<T> for BitVec<T, O>
+where
+ T: BitStore,
+ O: BitOrder,
+{
+ #[inline]
+ fn extend<I>(&mut self, iter: I)
+ where I: IntoIterator<Item = T> {
+ let iter = iter.into_iter();
+ #[allow(irrefutable_let_patterns)]
+ if let (_, Some(n)) | (n, None) = iter.size_hint() {
+ self.reserve(n.checked_mul(bits_of::<T::Mem>()).unwrap());
+ }
+ iter.for_each(|elem| self.extend_from_bitslice(elem.view_bits::<O>()));
+ }
+}
+
+#[cfg(not(tarpaulin_include))]
+impl<'a, T, O> Extend<&'a T> for BitVec<T, O>
+where
+ T: BitStore,
+ O: BitOrder,
+{
+ #[inline]
+ fn extend<I>(&mut self, iter: I)
+ where I: IntoIterator<Item = &'a T> {
+ self.extend(
+ iter.into_iter()
+ .map(BitStore::load_value)
+ .map(<T as BitStore>::new),
+ );
+ }
+}
+
+#[cfg(not(tarpaulin_include))]
+#[doc = include_str!("../../doc/vec/iter/FromIterator_bool.md")]
+impl<T, O> FromIterator<bool> for BitVec<T, O>
+where
+ T: BitStore,
+ O: BitOrder,
+{
+ #[inline]
+ fn from_iter<I>(iter: I) -> Self
+ where I: IntoIterator<Item = bool> {
+ Self::new().tap_mut(|bv| bv.extend(iter))
+ }
+}
+
+#[cfg(not(tarpaulin_include))]
+impl<'a, T, O> FromIterator<&'a bool> for BitVec<T, O>
+where
+ T: BitStore,
+ O: BitOrder,
+{
+ #[inline]
+ fn from_iter<I>(iter: I) -> Self
+ where I: IntoIterator<Item = &'a bool> {
+ iter.into_iter().copied().collect::<Self>()
+ }
+}
+
+#[cfg(not(tarpaulin_include))]
+#[doc = include_str!("../../doc/vec/iter/FromIterator_BitRef.md")]
+impl<'a, M, T1, T2, O1, O2> FromIterator<BitRef<'a, M, T2, O2>>
+ for BitVec<T1, O1>
+where
+ M: Mutability,
+ T1: BitStore,
+ T2: BitStore,
+ O1: BitOrder,
+ O2: BitOrder,
+{
+ #[inline]
+ fn from_iter<I>(iter: I) -> Self
+ where I: IntoIterator<Item = BitRef<'a, M, T2, O2>> {
+ iter.into_iter().map(|br| *br).pipe(Self::from_iter)
+ }
+}
+
+#[cfg(not(tarpaulin_include))]
+impl<T, O> FromIterator<T> for BitVec<T, O>
+where
+ T: BitStore,
+ O: BitOrder,
+{
+ #[inline]
+ fn from_iter<I>(iter: I) -> Self
+ where I: IntoIterator<Item = T> {
+ iter.into_iter().collect::<Vec<T>>().pipe(Self::from_vec)
+ }
+}
+
+#[cfg(not(tarpaulin_include))]
+impl<'a, T, O> FromIterator<&'a T> for BitVec<T, O>
+where
+ T: BitStore,
+ O: BitOrder,
+{
+ #[inline]
+ fn from_iter<I>(iter: I) -> Self
+ where I: IntoIterator<Item = &'a T> {
+ iter.into_iter()
+ .map(<T as BitStore>::load_value)
+ .map(<T as BitStore>::new)
+ .collect::<Self>()
+ }
+}
+
+#[doc = include_str!("../../doc/vec/iter/IntoIterator.md")]
+impl<T, O> IntoIterator for BitVec<T, O>
+where
+ T: BitStore,
+ O: BitOrder,
+{
+ type IntoIter = <BitBox<T, O> as IntoIterator>::IntoIter;
+ type Item = <BitBox<T, O> as IntoIterator>::Item;
+
+ #[inline]
+ fn into_iter(self) -> Self::IntoIter {
+ self.into_boxed_bitslice().into_iter()
+ }
+}
+
+#[cfg(not(tarpaulin_include))]
+/// [Original](https://doc.rust-lang.org/alloc/vec/struct.Vec.html#impl-IntoIterator-1)
+impl<'a, T, O> IntoIterator for &'a BitVec<T, O>
+where
+ O: BitOrder,
+ T: 'a + BitStore,
+{
+ type IntoIter = <&'a BitSlice<T, O> as IntoIterator>::IntoIter;
+ type Item = <&'a BitSlice<T, O> as IntoIterator>::Item;
+
+ #[inline]
+ fn into_iter(self) -> Self::IntoIter {
+ self.as_bitslice().iter()
+ }
+}
+
+#[cfg(not(tarpaulin_include))]
+/// [Original](https://doc.rust-lang.org/alloc/vec/struct.Vec.html#impl-IntoIterator-2)
+impl<'a, T, O> IntoIterator for &'a mut BitVec<T, O>
+where
+ O: BitOrder,
+ T: 'a + BitStore,
+{
+ type IntoIter = <&'a mut BitSlice<T, O> as IntoIterator>::IntoIter;
+ type Item = <&'a mut BitSlice<T, O> as IntoIterator>::Item;
+
+ #[inline]
+ fn into_iter(self) -> Self::IntoIter {
+ self.as_mut_bitslice().iter_mut()
+ }
+}
+
+#[doc = include_str!("../../doc/vec/iter/Drain.md")]
+pub struct Drain<'a, T, O>
+where
+ O: BitOrder,
+ T: 'a + BitStore,
+{
+ /// Exclusive reference to the handle that created the drain.
+ source: &'a mut BitVec<T, O>,
+ /// The range of the source bit-vector’s buffer that is being drained.
+ drain: BitPtrRange<Mut, T, O>,
+ /// The range of the source bit-vector’s preserved back section. This runs
+ /// from the first bit after the `.drain` to the first bit after the
+ /// original bit-vector ends.
+ tail: Range<usize>,
+}
+
+impl<'a, T, O> Drain<'a, T, O>
+where
+ O: BitOrder,
+ T: 'a + BitStore,
+{
+ /// Produces a new drain over a region of a bit-vector.
+ pub(super) fn new<R>(source: &'a mut BitVec<T, O>, range: R) -> Self
+ where R: RangeExt<usize> {
+ let len = source.len();
+ let region = range.normalize(None, len);
+ assert!(
+ region.end <= len,
+ "drains cannot extend past the length of their source bit-vector",
+ );
+
+ // The `.tail` region is everything in the bit-vector after the drain.
+ let tail = region.end .. len;
+ let drain = unsafe {
+ // Artificially truncate the source bit-vector to before the drain
+ // region. This is restored in the destructor.
+ source.set_len_unchecked(region.start);
+ let base = source.as_mut_bitptr();
+ BitPtrRange {
+ start: base.add(region.start),
+ end: base.add(region.end),
+ }
+ };
+
+ Self {
+ source,
+ drain,
+ tail,
+ }
+ }
+
+ /// Views the unyielded bits remaining in the drain.
+ ///
+ /// ## Original
+ ///
+ /// [`Drain::as_slice`](alloc::vec::Drain::as_slice)
+ #[inline]
+ #[cfg(not(tarpaulin_include))]
+ pub fn as_bitslice(&self) -> &'a BitSlice<T, O> {
+ unsafe { self.drain.clone().into_bitspan().into_bitslice_ref() }
+ }
+
+ #[inline]
+ #[cfg(not(tarpaulin_include))]
+ #[deprecated = "use `.as_bitslice()` instead"]
+ #[allow(missing_docs, clippy::missing_docs_in_private_items)]
+ pub fn as_slice(&self) -> &'a BitSlice<T, O> {
+ self.as_bitslice()
+ }
+
+ /// Attempts to fill the `drain` region with the contents of another
+ /// iterator.
+ ///
+ /// The source bit-vector is extended to include each bit that the
+ /// replacement iterator provides, but is *not yet* extended to include the
+ /// `tail` region, even if the replacement iterator completely fills the
+ /// `drain` region. That work occurs in the destructor.
+ ///
+ /// This is only used by [`Splice`].
+ ///
+ /// [`Splice`]: crate::vec::Splice
+ #[inline]
+ fn fill(&mut self, iter: &mut impl Iterator<Item = bool>) -> FillStatus {
+ let bv = &mut *self.source;
+ let mut len = bv.len();
+ let span =
+ unsafe { bv.as_mut_bitptr().add(len).range(self.tail.start - len) };
+
+ let mut out = FillStatus::FullSpan;
+ for ptr in span {
+ if let Some(bit) = iter.next() {
+ unsafe {
+ ptr.write(bit);
+ }
+ len += 1;
+ }
+ else {
+ out = FillStatus::EmptyInput;
+ break;
+ }
+ }
+ unsafe {
+ bv.set_len_unchecked(len);
+ }
+ out
+ }
+
+ /// Reserves space for `additional` more bits at the end of the `drain`
+ /// region by moving the `tail` region upwards in memory.
+ ///
+ /// This has the same effects as [`BitVec::resize`], except that the bits
+ /// are inserted between `drain` and `tail` rather than at the end.
+ ///
+ /// This does not modify the drain iteration cursor, including its endpoint.
+ /// The newly inserted bits are not available for iteration.
+ ///
+ /// This is only used by [`Splice`], which may insert more bits than the
+ /// drain removed.
+ ///
+ /// [`BitVec::resize`]: crate::vec::BitVec::resize
+ /// [`Splice`]: crate::vec::Splice
+ unsafe fn move_tail(&mut self, additional: usize) {
+ if additional == 0 {
+ return;
+ }
+
+ let bv = &mut *self.source;
+ let tail_len = self.tail.len();
+
+ let full_len = additional + tail_len;
+ bv.reserve(full_len);
+ let new_tail_start = additional + self.tail.start;
+ let orig_tail = mem::replace(
+ &mut self.tail,
+ new_tail_start .. new_tail_start + tail_len,
+ );
+ let len = bv.len();
+ bv.set_len_unchecked(full_len);
+ bv.copy_within_unchecked(orig_tail, new_tail_start);
+ bv.set_len_unchecked(len);
+ }
+}
+
+/// [Original](https://doc.rust-lang.org/alloc/vec/struct.Drain.html#impl-AsRef%3C%5BT%5D%3E)
+#[cfg(not(tarpaulin_include))]
+impl<T, O> AsRef<BitSlice<T, O>> for Drain<'_, T, O>
+where
+ T: BitStore,
+ O: BitOrder,
+{
+ #[inline]
+ fn as_ref(&self) -> &BitSlice<T, O> {
+ self.as_bitslice()
+ }
+}
+
+#[cfg(not(tarpaulin_include))]
+impl<T, O> Debug for Drain<'_, T, O>
+where
+ T: BitStore,
+ O: BitOrder,
+{
+ #[inline]
+ fn fmt(&self, fmt: &mut Formatter) -> fmt::Result {
+ fmt.debug_tuple("Drain").field(&self.as_bitslice()).finish()
+ }
+}
+
+/// [Original](https://doc.rust-lang.org/alloc/vec/struct.Drain.html#impl-Iterator)
+#[cfg(not(tarpaulin_include))]
+impl<T, O> Iterator for Drain<'_, T, O>
+where
+ T: BitStore,
+ O: BitOrder,
+{
+ type Item = bool;
+
+ easy_iter!();
+
+ #[inline]
+ fn next(&mut self) -> Option<Self::Item> {
+ self.drain.next().map(|bp| unsafe { bp.read() })
+ }
+
+ #[inline]
+ fn nth(&mut self, n: usize) -> Option<Self::Item> {
+ self.drain.nth(n).map(|bp| unsafe { bp.read() })
+ }
+}
+
+/// [Original](https://doc.rust-lang.org/alloc/vec/struct.Drain.html#impl-DoubleEndedIterator)
+#[cfg(not(tarpaulin_include))]
+impl<T, O> DoubleEndedIterator for Drain<'_, T, O>
+where
+ T: BitStore,
+ O: BitOrder,
+{
+ #[inline]
+ fn next_back(&mut self) -> Option<Self::Item> {
+ self.drain.next_back().map(|bp| unsafe { bp.read() })
+ }
+
+ #[inline]
+ fn nth_back(&mut self, n: usize) -> Option<Self::Item> {
+ self.drain.nth_back(n).map(|bp| unsafe { bp.read() })
+ }
+}
+
+/// [Original](https://doc.rust-lang.org/alloc/vec/struct.Drain.html#impl-ExactSizeIterator)
+#[cfg(not(tarpaulin_include))]
+impl<T, O> ExactSizeIterator for Drain<'_, T, O>
+where
+ T: BitStore,
+ O: BitOrder,
+{
+ #[inline]
+ fn len(&self) -> usize {
+ self.drain.len()
+ }
+}
+
+/// [Original](https://doc.rust-lang.org/alloc/vec/struct.Drain.html#impl-FusedIterator)
+impl<T, O> FusedIterator for Drain<'_, T, O>
+where
+ T: BitStore,
+ O: BitOrder,
+{
+}
+
+/// [Original](https://doc.rust-lang.org/alloc/vec/struct.Drain.html#impl-Send)
+// #[allow(clippy::non_send_fields_in_send_ty)]
+unsafe impl<T, O> Send for Drain<'_, T, O>
+where
+ T: BitStore,
+ O: BitOrder,
+ for<'a> &'a mut BitSlice<T, O>: Send,
+{
+}
+
+/// [Original](https://doc.rust-lang.org/alloc/vec/struct.Drain.html#impl-Sync)
+unsafe impl<T, O> Sync for Drain<'_, T, O>
+where
+ T: BitStore,
+ O: BitOrder,
+ BitSlice<T, O>: Sync,
+{
+}
+
+/// [Original](https://doc.rust-lang.org/alloc/vec/struct.Drain.html#impl-Drop)
+impl<T, O> Drop for Drain<'_, T, O>
+where
+ T: BitStore,
+ O: BitOrder,
+{
+ #[inline]
+ fn drop(&mut self) {
+ let tail = mem::take(&mut self.tail);
+ let tail_len = tail.len();
+ if tail_len == 0 {
+ return;
+ }
+
+ let bv = &mut *self.source;
+ let old_len = bv.len();
+ unsafe {
+ bv.set_len_unchecked(tail.end);
+ bv.copy_within_unchecked(tail, old_len);
+ bv.set_len_unchecked(old_len + tail_len);
+ }
+ }
+}
+
+#[repr(u8)]
+#[doc = include_str!("../../doc/vec/iter/FillStatus.md")]
+#[derive(Clone, Copy, Debug, Eq, Hash, Ord, PartialEq, PartialOrd)]
+enum FillStatus {
+ /// The drain span is completely filled.
+ FullSpan = 0,
+ /// The replacement source is completely exhausted.
+ EmptyInput = 1,
+}
+
+#[derive(Debug)]
+#[doc = include_str!("../../doc/vec/iter/Splice.md")]
+pub struct Splice<'a, T, O, I>
+where
+ O: BitOrder,
+ T: 'a + BitStore,
+ I: Iterator<Item = bool>,
+{
+ /// The region of the bit-vector being drained.
+ drain: Drain<'a, T, O>,
+ /// The bitstream that replaces drained bits.
+ splice: I,
+}
+
+impl<'a, T, O, I> Splice<'a, T, O, I>
+where
+ O: BitOrder,
+ T: 'a + BitStore,
+ I: Iterator<Item = bool>,
+{
+ /// Constructs a splice out of a drain and a replacement source.
+ pub(super) fn new(
+ drain: Drain<'a, T, O>,
+ splice: impl IntoIterator<IntoIter = I, Item = bool>,
+ ) -> Self {
+ let splice = splice.into_iter();
+ Self { drain, splice }
+ }
+}
+
+impl<T, O, I> Iterator for Splice<'_, T, O, I>
+where
+ T: BitStore,
+ O: BitOrder,
+ I: Iterator<Item = bool>,
+{
+ type Item = bool;
+
+ easy_iter!();
+
+ #[inline]
+ fn next(&mut self) -> Option<Self::Item> {
+ self.drain.next().tap_some(|_| unsafe {
+ if let Some(bit) = self.splice.next() {
+ let bv = &mut *self.drain.source;
+ let len = bv.len();
+ bv.set_len_unchecked(len + 1);
+ bv.set_unchecked(len, bit);
+ }
+ })
+ }
+}
+
+#[cfg(not(tarpaulin_include))]
+impl<T, O, I> DoubleEndedIterator for Splice<'_, T, O, I>
+where
+ T: BitStore,
+ O: BitOrder,
+ I: Iterator<Item = bool>,
+{
+ #[inline]
+ fn next_back(&mut self) -> Option<Self::Item> {
+ self.drain.next_back()
+ }
+
+ #[inline]
+ fn nth_back(&mut self, n: usize) -> Option<Self::Item> {
+ self.drain.nth_back(n)
+ }
+}
+
+#[cfg(not(tarpaulin_include))]
+impl<T, O, I> ExactSizeIterator for Splice<'_, T, O, I>
+where
+ T: BitStore,
+ O: BitOrder,
+ I: Iterator<Item = bool>,
+{
+ #[inline]
+ fn len(&self) -> usize {
+ self.drain.len()
+ }
+}
+
+impl<T, O, I> FusedIterator for Splice<'_, T, O, I>
+where
+ T: BitStore,
+ O: BitOrder,
+ I: Iterator<Item = bool>,
+{
+}
+
+/// [Original](https://doc.rust-lang.org/alloc/vec/struct.Drain.html#impl-Drop)
+impl<T, O, I> Drop for Splice<'_, T, O, I>
+where
+ T: BitStore,
+ O: BitOrder,
+ I: Iterator<Item = bool>,
+{
+ #[inline]
+ fn drop(&mut self) {
+ let tail = self.drain.tail.clone();
+ let tail_len = tail.len();
+ let bv = &mut *self.drain.source;
+
+ if tail_len == 0 {
+ bv.extend(self.splice.by_ref());
+ return;
+ }
+
+ if let FillStatus::EmptyInput = self.drain.fill(&mut self.splice) {
+ return;
+ }
+
+ let len = match self.splice.size_hint() {
+ (n, None) | (_, Some(n)) => n,
+ };
+
+ unsafe {
+ self.drain.move_tail(len);
+ }
+ if let FillStatus::EmptyInput = self.drain.fill(&mut self.splice) {
+ return;
+ }
+
+ /* If the `.splice` *still* has bits to provide, then its
+ * `.size_hint()` is untrustworthy. Collect the `.splice` into a
+ * bit-vector, then insert the bit-vector into the spliced region.
+ */
+ let mut collected =
+ self.splice.by_ref().collect::<BitVec<T, O>>().into_iter();
+ let len = collected.len();
+ if len > 0 {
+ unsafe {
+ self.drain.move_tail(len);
+ }
+ let filled = self.drain.fill(collected.by_ref());
+ debug_assert_eq!(filled, FillStatus::EmptyInput);
+ debug_assert_eq!(collected.len(), 0);
+ }
+ }
+}
diff --git a/src/vec/ops.rs b/src/vec/ops.rs
new file mode 100644
index 0000000..71bd36e
--- /dev/null
+++ b/src/vec/ops.rs
@@ -0,0 +1,272 @@
+//! Operator trait implementations for bit-vectors.
+
+use core::{
+ mem::ManuallyDrop,
+ ops::{
+ BitAnd,
+ BitAndAssign,
+ BitOr,
+ BitOrAssign,
+ BitXor,
+ BitXorAssign,
+ Deref,
+ DerefMut,
+ Index,
+ IndexMut,
+ Not,
+ },
+};
+
+use wyz::comu::Mut;
+
+use super::BitVec;
+use crate::{
+ order::BitOrder,
+ ptr::BitSpan,
+ slice::BitSlice,
+ store::BitStore,
+};
+
+#[cfg(not(tarpaulin_include))]
+impl<T, O> BitAndAssign<BitVec<T, O>> for BitSlice<T, O>
+where
+ T: BitStore,
+ O: BitOrder,
+{
+ #[inline]
+ fn bitand_assign(&mut self, rhs: BitVec<T, O>) {
+ *self &= rhs.as_bitslice()
+ }
+}
+
+#[cfg(not(tarpaulin_include))]
+impl<T, O> BitAndAssign<&BitVec<T, O>> for BitSlice<T, O>
+where
+ T: BitStore,
+ O: BitOrder,
+{
+ #[inline]
+ fn bitand_assign(&mut self, rhs: &BitVec<T, O>) {
+ *self &= rhs.as_bitslice()
+ }
+}
+
+#[cfg(not(tarpaulin_include))]
+impl<T, O, Rhs> BitAnd<Rhs> for BitVec<T, O>
+where
+ T: BitStore,
+ O: BitOrder,
+ BitSlice<T, O>: BitAndAssign<Rhs>,
+{
+ type Output = Self;
+
+ #[inline]
+ fn bitand(mut self, rhs: Rhs) -> Self::Output {
+ self &= rhs;
+ self
+ }
+}
+
+#[cfg(not(tarpaulin_include))]
+impl<T, O, Rhs> BitAndAssign<Rhs> for BitVec<T, O>
+where
+ T: BitStore,
+ O: BitOrder,
+ BitSlice<T, O>: BitAndAssign<Rhs>,
+{
+ #[inline]
+ fn bitand_assign(&mut self, rhs: Rhs) {
+ *self.as_mut_bitslice() &= rhs;
+ }
+}
+
+#[cfg(not(tarpaulin_include))]
+impl<T, O> BitOrAssign<BitVec<T, O>> for BitSlice<T, O>
+where
+ T: BitStore,
+ O: BitOrder,
+{
+ #[inline]
+ fn bitor_assign(&mut self, rhs: BitVec<T, O>) {
+ *self |= rhs.as_bitslice()
+ }
+}
+
+#[cfg(not(tarpaulin_include))]
+impl<T, O> BitOrAssign<&BitVec<T, O>> for BitSlice<T, O>
+where
+ T: BitStore,
+ O: BitOrder,
+{
+ #[inline]
+ fn bitor_assign(&mut self, rhs: &BitVec<T, O>) {
+ *self |= rhs.as_bitslice()
+ }
+}
+
+#[cfg(not(tarpaulin_include))]
+impl<T, O, Rhs> BitOr<Rhs> for BitVec<T, O>
+where
+ T: BitStore,
+ O: BitOrder,
+ BitSlice<T, O>: BitOrAssign<Rhs>,
+{
+ type Output = Self;
+
+ #[inline]
+ fn bitor(mut self, rhs: Rhs) -> Self::Output {
+ self |= rhs;
+ self
+ }
+}
+
+#[cfg(not(tarpaulin_include))]
+impl<T, O, Rhs> BitOrAssign<Rhs> for BitVec<T, O>
+where
+ T: BitStore,
+ O: BitOrder,
+ BitSlice<T, O>: BitOrAssign<Rhs>,
+{
+ #[inline]
+ fn bitor_assign(&mut self, rhs: Rhs) {
+ *self.as_mut_bitslice() |= rhs;
+ }
+}
+
+#[cfg(not(tarpaulin_include))]
+impl<T, O> BitXorAssign<BitVec<T, O>> for BitSlice<T, O>
+where
+ T: BitStore,
+ O: BitOrder,
+{
+ #[inline]
+ fn bitxor_assign(&mut self, rhs: BitVec<T, O>) {
+ *self ^= rhs.as_bitslice()
+ }
+}
+
+#[cfg(not(tarpaulin_include))]
+impl<T, O> BitXorAssign<&BitVec<T, O>> for BitSlice<T, O>
+where
+ T: BitStore,
+ O: BitOrder,
+{
+ #[inline]
+ fn bitxor_assign(&mut self, rhs: &BitVec<T, O>) {
+ *self ^= rhs.as_bitslice()
+ }
+}
+
+#[cfg(not(tarpaulin_include))]
+impl<T, O, Rhs> BitXor<Rhs> for BitVec<T, O>
+where
+ T: BitStore,
+ O: BitOrder,
+ BitSlice<T, O>: BitXorAssign<Rhs>,
+{
+ type Output = Self;
+
+ #[inline]
+ fn bitxor(mut self, rhs: Rhs) -> Self::Output {
+ self ^= rhs;
+ self
+ }
+}
+
+#[cfg(not(tarpaulin_include))]
+impl<T, O, Rhs> BitXorAssign<Rhs> for BitVec<T, O>
+where
+ T: BitStore,
+ O: BitOrder,
+ BitSlice<T, O>: BitXorAssign<Rhs>,
+{
+ #[inline]
+ fn bitxor_assign(&mut self, rhs: Rhs) {
+ *self.as_mut_bitslice() ^= rhs;
+ }
+}
+
+impl<T, O> Deref for BitVec<T, O>
+where
+ T: BitStore,
+ O: BitOrder,
+{
+ type Target = BitSlice<T, O>;
+
+ #[inline]
+ fn deref(&self) -> &Self::Target {
+ self.as_bitslice()
+ }
+}
+
+impl<T, O> DerefMut for BitVec<T, O>
+where
+ T: BitStore,
+ O: BitOrder,
+{
+ #[inline]
+ fn deref_mut(&mut self) -> &mut Self::Target {
+ self.as_mut_bitslice()
+ }
+}
+
+impl<T, O> Drop for BitVec<T, O>
+where
+ T: BitStore,
+ O: BitOrder,
+{
+ #[inline]
+ fn drop(&mut self) {
+ if self.bitspan != BitSpan::<Mut, T, O>::EMPTY {
+ self.with_vec(|slot| unsafe { ManuallyDrop::drop(slot) });
+ }
+ }
+}
+
+#[cfg(not(tarpaulin_include))]
+impl<T, O, Idx> Index<Idx> for BitVec<T, O>
+where
+ T: BitStore,
+ O: BitOrder,
+ BitSlice<T, O>: Index<Idx>,
+{
+ type Output = <BitSlice<T, O> as Index<Idx>>::Output;
+
+ #[inline]
+ fn index(&self, index: Idx) -> &Self::Output {
+ &self.as_bitslice()[index]
+ }
+}
+
+#[cfg(not(tarpaulin_include))]
+impl<T, O, Idx> IndexMut<Idx> for BitVec<T, O>
+where
+ T: BitStore,
+ O: BitOrder,
+ BitSlice<T, O>: IndexMut<Idx>,
+{
+ #[inline]
+ fn index_mut(&mut self, index: Idx) -> &mut Self::Output {
+ &mut self.as_mut_bitslice()[index]
+ }
+}
+
+/** This implementation inverts all elements in the live buffer. You cannot rely
+on the value of bits in the buffer that are outside the domain of
+[`BitVec::as_mut_bitslice`].
+**/
+impl<T, O> Not for BitVec<T, O>
+where
+ T: BitStore,
+ O: BitOrder,
+{
+ type Output = Self;
+
+ #[inline]
+ fn not(mut self) -> Self::Output {
+ for elem in self.as_raw_mut_slice() {
+ elem.store_value(!elem.load_value());
+ }
+ self
+ }
+}
diff --git a/src/vec/tests.rs b/src/vec/tests.rs
new file mode 100644
index 0000000..6d9184f
--- /dev/null
+++ b/src/vec/tests.rs
@@ -0,0 +1,81 @@
+//! Unit tests for bit-vectors.
+
+#![cfg(test)]
+
+use core::mem;
+
+use rand::random;
+
+use crate::{
+ mem::bits_of,
+ prelude::*,
+};
+
+mod api;
+mod iter;
+mod traits;
+
+#[test]
+fn make_and_resize() {
+ let mut bv: BitVec = BitVec::new();
+ assert!(bv.is_empty());
+ assert_eq!(bv.capacity(), 0);
+
+ bv.reserve(20);
+ // Capacity always rounds up to the storage size, which is an
+ // at-least-32-bit `usize`.
+ assert!(bv.capacity() >= 32);
+ bv.reserve_exact(90);
+ assert!(bv.capacity() >= 96);
+
+ bv = BitVec::with_capacity(100);
+ assert!(bv.is_empty());
+ assert!(bv.capacity() >= 128);
+
+ bv.extend_from_bitslice(bits![0, 1, 0, 0, 1]);
+ assert_eq!(bv.len(), 5);
+ let (bitptr, length, capacity) = mem::take(&mut bv).into_raw_parts();
+ bv = unsafe { BitVec::from_raw_parts(bitptr, length, capacity) };
+ assert_eq!(bv, bits![0, 1, 0, 0, 1]);
+
+ let capacity = bv.capacity();
+ bv.shrink_to_fit();
+ assert!(bv.capacity() <= capacity);
+
+ bv.truncate(2);
+ assert_eq!(bv.len(), 2);
+ assert_eq!(bv, bits![0, 1]);
+ bv.truncate(20);
+ assert_eq!(bv.len(), 2);
+
+ let capacity = bv.capacity();
+ unsafe {
+ bv.set_len(capacity);
+ bv.set_elements((&false) as *const bool as usize);
+ }
+}
+
+#[test]
+fn misc() {
+ let elem = random::<usize>();
+ let bv: BitVec = BitVec::from_element(elem);
+ assert_eq!(bv, elem.view_bits::<Lsb0>());
+
+ let array: [usize; 10] = random();
+ let mut bv: BitVec = BitVec::from_slice(&array[..]);
+ assert_eq!(bv, array.view_bits::<Lsb0>());
+
+ bv.extend_from_raw_slice(&[elem]);
+ assert_eq!(bv[10 * bits_of::<usize>() ..], elem.view_bits::<Lsb0>());
+
+ let elem = random::<u32>();
+ let bits = &elem.view_bits::<Lsb0>()[4 .. 28];
+ let mut bv = bits.to_bitvec();
+
+ bv.set_uninitialized(false);
+ bv.force_align();
+ bv.set_uninitialized(true);
+ bv.force_align();
+
+ assert_eq!(!bitvec![0, 1], bits![1, 0]);
+}
diff --git a/src/vec/tests/api.rs b/src/vec/tests/api.rs
new file mode 100644
index 0000000..ffd58f5
--- /dev/null
+++ b/src/vec/tests/api.rs
@@ -0,0 +1,70 @@
+use crate::prelude::*;
+
+#[test]
+fn ins_del() {
+ let mut bv = bitvec![0, 1, 0, 0, 1];
+
+ assert!(!bv.swap_remove(2));
+ assert_eq!(bv, bits![0, 1, 1, 0]);
+
+ bv.insert(2, false);
+ assert_eq!(bv, bits![0, 1, 0, 1, 0]);
+
+ assert!(bv.remove(3));
+ assert_eq!(bv, bits![0, 1, 0, 0]);
+}
+
+#[test]
+fn walk() {
+ let mut bv = bitvec![
+ 0, 0, 1, 1, 0, 1, 0, 1, 0, 0, 0, 1, 0, 1, 0, 0, 0, 1, 0, 1, 0
+ ];
+ assert_eq!(bv.pop(), Some(false));
+ assert_eq!(bv.count_ones(), 8);
+
+ bv.retain(|idx, &bit| bit && idx % 2 == 1);
+ assert_eq!(bv, bits![1; 7]);
+
+ let mut bv2 = bitvec![1, 1, 1, 1, 0, 1, 0, 0, 1, 0, 0, 0, 0, 1];
+ bv.append(&mut bv2);
+ assert_eq!(bv.count_ones(), 14);
+ assert!(bv2.is_empty());
+
+ let mut splice = bv.splice(2 .. 10, Some(false));
+ assert!(splice.all(|bit| bit));
+ drop(splice);
+ assert_eq!(bv, bits![1, 1, 0, 1, 0, 1, 0, 0, 1, 0, 0, 0, 0, 1]);
+}
+
+#[test]
+fn misc() {
+ let mut bv = bitvec![0, 0, 1, 1, 0, 1, 0, 1, 0, 0, 0, 1, 0, 1];
+ let bv2 = bv.split_off(10);
+ assert_eq!(bv2, bits![0, 1, 0, 1]);
+ bv.clear();
+
+ let mut a = 1;
+ let mut b = 1;
+ let fib = |idx| {
+ if idx == a.max(b) {
+ let c = a + b;
+ b = a;
+ a = c;
+ true
+ }
+ else {
+ false
+ }
+ };
+ bv.resize_with(22, fib);
+ assert_eq!(bv, bits![
+ 0, 1, 1, 1, 0, 1, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 1,
+ ]);
+
+ bv.resize(14, false);
+ assert_eq!(bv, bits![0, 1, 1, 1, 0, 1, 0, 0, 1, 0, 0, 0, 0, 1]);
+
+ let mut bv = bitvec![0, 0, 1, 1, 0, 0];
+ bv.extend_from_within(2 .. 4);
+ assert_eq!(bv, bits![0, 0, 1, 1, 0, 0, 1, 1]);
+}
diff --git a/src/vec/tests/iter.rs b/src/vec/tests/iter.rs
new file mode 100644
index 0000000..217f533
--- /dev/null
+++ b/src/vec/tests/iter.rs
@@ -0,0 +1,41 @@
+use crate::prelude::*;
+
+#[test]
+fn extend() {
+ let mut bv = bitvec![];
+ bv.extend(Some(true));
+ bv.extend([2usize]);
+
+ let mut iter = bv.into_iter();
+ assert!(iter.next().unwrap());
+}
+
+#[test]
+fn drain() {
+ let mut bv = bitvec![0, 1, 1, 1, 0];
+ let mut drain = bv.drain(1 .. 4);
+ assert!(drain.next().unwrap());
+ assert!(drain.next_back().unwrap());
+ drop(drain);
+ assert_eq!(bv, bits![0; 2]);
+}
+
+#[test]
+fn splice() {
+ let mut bv = bitvec![0, 1, 1, 1, 0];
+ let mut splice = bv.splice(1 .. 4, [false, true, true, false]);
+
+ assert!(splice.next().unwrap());
+ assert!(splice.next_back().unwrap());
+ drop(splice);
+
+ assert_eq!(bv, bits![0, 0, 1, 1, 0, 0]);
+
+ let mut bv = bitvec![0, 1, 0, 0, 1];
+ drop(bv.splice(2 .., None));
+ assert_eq!(bv, bits![0, 1]);
+
+ let mut bv = bitvec![0, 1, 0, 0, 1];
+ drop(bv.splice(2 .. 2, Some(true)));
+ assert_eq!(bv, bits![0, 1, 1, 0, 0, 1]);
+}
diff --git a/src/vec/tests/traits.rs b/src/vec/tests/traits.rs
new file mode 100644
index 0000000..27a023f
--- /dev/null
+++ b/src/vec/tests/traits.rs
@@ -0,0 +1,102 @@
+use alloc::{
+ borrow::{
+ Borrow,
+ BorrowMut,
+ Cow,
+ },
+ vec::Vec,
+};
+use core::{
+ convert::TryFrom,
+ fmt::Debug,
+ hash::Hash,
+ iter::FromIterator,
+ ops::{
+ Deref,
+ DerefMut,
+ Index,
+ Range,
+ },
+ panic::{
+ RefUnwindSafe,
+ UnwindSafe,
+ },
+};
+#[cfg(feature = "std")]
+use std::io::Write;
+
+use static_assertions::*;
+
+use crate::prelude::*;
+
+#[test]
+fn alloc_impl() {
+ assert_impl_all!(BitVec<usize, Lsb0>:
+ AsMut<BitSlice<usize, Lsb0>>,
+ AsMut<BitVec<usize, Lsb0>>,
+ AsRef<BitSlice<usize, Lsb0>>,
+ AsRef<BitVec<usize, Lsb0>>,
+ Borrow<BitSlice<usize, Lsb0>>,
+ BorrowMut<BitSlice<usize, Lsb0>>,
+ Clone,
+ Debug,
+ Default,
+ Deref,
+ DerefMut,
+ Drop,
+ Eq,
+ Extend<&'static bool>,
+ Extend<bool>,
+ From<&'static BitSlice<usize, Lsb0>>,
+ From<&'static mut BitSlice<usize, Lsb0>>,
+ From<BitArray<[usize; 20], Lsb0>>,
+ From<BitBox<usize, Lsb0>>,
+ From<Cow<'static, BitSlice<usize, Lsb0>>>,
+ FromIterator<bool>,
+ Hash,
+ Index<usize>,
+ Index<Range<usize>>,
+ IntoIterator,
+ Ord,
+ PartialEq<&'static BitSlice<usize, Lsb0>>,
+ PartialEq<BitArray<[usize; 20], Lsb0>>,
+ RefUnwindSafe,
+ Send,
+ Sync,
+ TryFrom<Vec<usize>>,
+ Unpin,
+ UnwindSafe,
+ );
+}
+
+#[test]
+#[cfg(feature = "std")]
+fn std_impl() {
+ assert_impl_all!(BitVec<usize, Lsb0>: Write);
+}
+
+#[test]
+fn format() {
+ #[cfg(not(feature = "std"))]
+ use alloc::format;
+
+ let bv = bitvec![0, 0, 1, 1, 0, 1, 0, 1];
+ assert_eq!(format!("{}", bv), format!("{}", bv.as_bitslice()));
+ assert_eq!(format!("{:b}", bv), format!("{:b}", bv.as_bitslice()));
+ assert_eq!(format!("{:o}", bv), format!("{:o}", bv.as_bitslice()));
+ assert_eq!(format!("{:x}", bv), format!("{:x}", bv.as_bitslice()));
+ assert_eq!(format!("{:X}", bv), format!("{:X}", bv.as_bitslice()));
+
+ let text = format!("{:?}", bitvec![u8, Msb0; 0, 1, 0, 0]);
+ assert!(
+ text.starts_with("BitVec<u8, bitvec::order::Msb0> { addr: 0x"),
+ "{}",
+ text
+ );
+ assert!(
+ text.contains(", head: 000, bits: 4, capacity: "),
+ "{}",
+ text
+ );
+ assert!(text.ends_with(" } [0, 1, 0, 0]"), "{}", text);
+}
diff --git a/src/vec/traits.rs b/src/vec/traits.rs
new file mode 100644
index 0000000..55d1d3b
--- /dev/null
+++ b/src/vec/traits.rs
@@ -0,0 +1,409 @@
+//! General trait implementations for bit-vectors.
+
+use alloc::{
+ borrow::{
+ Cow,
+ ToOwned,
+ },
+ vec::Vec,
+};
+use core::{
+ borrow::{
+ Borrow,
+ BorrowMut,
+ },
+ cmp,
+ convert::TryFrom,
+ fmt::{
+ self,
+ Debug,
+ Display,
+ Formatter,
+ },
+ hash::{
+ Hash,
+ Hasher,
+ },
+ marker::Unpin,
+};
+
+use super::BitVec;
+use crate::{
+ array::BitArray,
+ boxed::BitBox,
+ order::BitOrder,
+ slice::BitSlice,
+ store::BitStore,
+ view::BitViewSized,
+};
+
+#[cfg(not(tarpaulin_include))]
+impl<T, O> Borrow<BitSlice<T, O>> for BitVec<T, O>
+where
+ T: BitStore,
+ O: BitOrder,
+{
+ #[inline]
+ fn borrow(&self) -> &BitSlice<T, O> {
+ self.as_bitslice()
+ }
+}
+
+#[cfg(not(tarpaulin_include))]
+impl<T, O> BorrowMut<BitSlice<T, O>> for BitVec<T, O>
+where
+ T: BitStore,
+ O: BitOrder,
+{
+ #[inline]
+ fn borrow_mut(&mut self) -> &mut BitSlice<T, O> {
+ self.as_mut_bitslice()
+ }
+}
+
+#[cfg(not(tarpaulin_include))]
+impl<T, O> Clone for BitVec<T, O>
+where
+ T: BitStore,
+ O: BitOrder,
+{
+ #[inline]
+ fn clone(&self) -> Self {
+ Self::from_bitslice(self.as_bitslice())
+ }
+}
+
+impl<T, O> Eq for BitVec<T, O>
+where
+ T: BitStore,
+ O: BitOrder,
+{
+}
+
+#[cfg(not(tarpaulin_include))]
+impl<T, O> Ord for BitVec<T, O>
+where
+ T: BitStore,
+ O: BitOrder,
+{
+ #[inline]
+ fn cmp(&self, other: &Self) -> cmp::Ordering {
+ self.as_bitslice().cmp(other.as_bitslice())
+ }
+}
+
+#[cfg(not(tarpaulin_include))]
+impl<T1, T2, O1, O2> PartialEq<BitVec<T2, O2>> for BitSlice<T1, O1>
+where
+ T1: BitStore,
+ T2: BitStore,
+ O1: BitOrder,
+ O2: BitOrder,
+{
+ #[inline]
+ fn eq(&self, other: &BitVec<T2, O2>) -> bool {
+ self == other.as_bitslice()
+ }
+}
+
+#[cfg(not(tarpaulin_include))]
+impl<T1, T2, O1, O2> PartialEq<BitVec<T2, O2>> for &BitSlice<T1, O1>
+where
+ T1: BitStore,
+ T2: BitStore,
+ O1: BitOrder,
+ O2: BitOrder,
+{
+ #[inline]
+ fn eq(&self, other: &BitVec<T2, O2>) -> bool {
+ *self == other.as_bitslice()
+ }
+}
+
+#[cfg(not(tarpaulin_include))]
+impl<T1, T2, O1, O2> PartialEq<BitVec<T2, O2>> for &mut BitSlice<T1, O1>
+where
+ T1: BitStore,
+ T2: BitStore,
+ O1: BitOrder,
+ O2: BitOrder,
+{
+ #[inline]
+ fn eq(&self, other: &BitVec<T2, O2>) -> bool {
+ **self == other.as_bitslice()
+ }
+}
+
+#[cfg(not(tarpaulin_include))]
+impl<T, O, Rhs> PartialEq<Rhs> for BitVec<T, O>
+where
+ T: BitStore,
+ O: BitOrder,
+ Rhs: ?Sized + PartialEq<BitSlice<T, O>>,
+{
+ #[inline]
+ fn eq(&self, other: &Rhs) -> bool {
+ other == self.as_bitslice()
+ }
+}
+
+#[cfg(not(tarpaulin_include))]
+impl<T1, T2, O1, O2> PartialOrd<BitVec<T2, O2>> for BitSlice<T1, O1>
+where
+ T1: BitStore,
+ T2: BitStore,
+ O1: BitOrder,
+ O2: BitOrder,
+{
+ #[inline]
+ fn partial_cmp(&self, other: &BitVec<T2, O2>) -> Option<cmp::Ordering> {
+ self.partial_cmp(other.as_bitslice())
+ }
+}
+
+#[cfg(not(tarpaulin_include))]
+impl<'a, T1, T2, O1, O2> PartialOrd<BitVec<T2, O2>> for &'a BitSlice<T1, O1>
+where
+ T1: BitStore,
+ T2: BitStore,
+ O1: BitOrder,
+ O2: BitOrder,
+{
+ #[inline]
+ fn partial_cmp(&self, other: &BitVec<T2, O2>) -> Option<cmp::Ordering> {
+ self.partial_cmp(other.as_bitslice())
+ }
+}
+
+#[cfg(not(tarpaulin_include))]
+impl<'a, T1, T2, O1, O2> PartialOrd<BitVec<T2, O2>> for &'a mut BitSlice<T1, O1>
+where
+ T1: BitStore,
+ T2: BitStore,
+ O1: BitOrder,
+ O2: BitOrder,
+{
+ #[inline]
+ fn partial_cmp(&self, other: &BitVec<T2, O2>) -> Option<cmp::Ordering> {
+ self.partial_cmp(other.as_bitslice())
+ }
+}
+
+#[cfg(not(tarpaulin_include))]
+impl<T, O, Rhs> PartialOrd<Rhs> for BitVec<T, O>
+where
+ T: BitStore,
+ O: BitOrder,
+ Rhs: ?Sized + PartialOrd<BitSlice<T, O>>,
+{
+ #[inline]
+ fn partial_cmp(&self, other: &Rhs) -> Option<cmp::Ordering> {
+ other.partial_cmp(self.as_bitslice())
+ }
+}
+
+#[cfg(not(tarpaulin_include))]
+impl<T, O> AsRef<BitSlice<T, O>> for BitVec<T, O>
+where
+ T: BitStore,
+ O: BitOrder,
+{
+ #[inline]
+ fn as_ref(&self) -> &BitSlice<T, O> {
+ self.as_bitslice()
+ }
+}
+
+#[cfg(not(tarpaulin_include))]
+impl<T, O> AsMut<BitSlice<T, O>> for BitVec<T, O>
+where
+ T: BitStore,
+ O: BitOrder,
+{
+ #[inline]
+ fn as_mut(&mut self) -> &mut BitSlice<T, O> {
+ self.as_mut_bitslice()
+ }
+}
+
+#[cfg(not(tarpaulin_include))]
+impl<T, O> AsRef<BitVec<T, O>> for BitVec<T, O>
+where
+ T: BitStore,
+ O: BitOrder,
+{
+ #[inline]
+ fn as_ref(&self) -> &Self {
+ self
+ }
+}
+
+#[cfg(not(tarpaulin_include))]
+impl<T, O> AsMut<BitVec<T, O>> for BitVec<T, O>
+where
+ T: BitStore,
+ O: BitOrder,
+{
+ #[inline]
+ fn as_mut(&mut self) -> &mut Self {
+ self
+ }
+}
+
+#[cfg(not(tarpaulin_include))]
+impl<T, O> From<&'_ BitSlice<T, O>> for BitVec<T, O>
+where
+ T: BitStore,
+ O: BitOrder,
+{
+ #[inline]
+ fn from(slice: &BitSlice<T, O>) -> Self {
+ Self::from_bitslice(slice)
+ }
+}
+
+#[cfg(not(tarpaulin_include))]
+impl<T, O> From<&'_ mut BitSlice<T, O>> for BitVec<T, O>
+where
+ T: BitStore,
+ O: BitOrder,
+{
+ #[inline]
+ fn from(slice: &mut BitSlice<T, O>) -> Self {
+ Self::from_bitslice(slice)
+ }
+}
+
+#[cfg(not(tarpaulin_include))]
+impl<A, O> From<BitArray<A, O>> for BitVec<A::Store, O>
+where
+ O: BitOrder,
+ A: BitViewSized,
+{
+ #[inline]
+ fn from(array: BitArray<A, O>) -> Self {
+ array.as_bitslice().to_owned()
+ }
+}
+
+#[cfg(not(tarpaulin_include))]
+impl<T, O> From<BitBox<T, O>> for BitVec<T, O>
+where
+ T: BitStore,
+ O: BitOrder,
+{
+ #[inline]
+ fn from(boxed: BitBox<T, O>) -> Self {
+ boxed.into_bitvec()
+ }
+}
+
+#[cfg(not(tarpaulin_include))]
+impl<T, O> From<BitVec<T, O>> for Vec<T>
+where
+ T: BitStore,
+ O: BitOrder,
+{
+ #[inline]
+ fn from(bv: BitVec<T, O>) -> Self {
+ bv.into_vec()
+ }
+}
+
+#[cfg(not(tarpaulin_include))]
+impl<'a, T, O> From<Cow<'a, BitSlice<T, O>>> for BitVec<T, O>
+where
+ O: BitOrder,
+ T: 'a + BitStore,
+{
+ #[inline]
+ fn from(cow: Cow<'a, BitSlice<T, O>>) -> Self {
+ cow.into_owned()
+ }
+}
+
+#[cfg(not(tarpaulin_include))]
+impl<T, O> TryFrom<Vec<T>> for BitVec<T, O>
+where
+ T: BitStore,
+ O: BitOrder,
+{
+ type Error = Vec<T>;
+
+ #[inline]
+ fn try_from(vec: Vec<T>) -> Result<Self, Self::Error> {
+ Self::try_from_vec(vec)
+ }
+}
+
+#[cfg(not(tarpaulin_include))]
+impl<T, O> Default for BitVec<T, O>
+where
+ T: BitStore,
+ O: BitOrder,
+{
+ #[inline]
+ fn default() -> Self {
+ Self::new()
+ }
+}
+
+impl<T, O> Debug for BitVec<T, O>
+where
+ T: BitStore,
+ O: BitOrder,
+{
+ #[inline]
+ fn fmt(&self, fmt: &mut Formatter) -> fmt::Result {
+ self.as_bitspan().render(fmt, "Vec", &[(
+ "capacity",
+ &self.capacity() as &dyn Debug,
+ )])?;
+ fmt.write_str(" ")?;
+ Display::fmt(self, fmt)
+ }
+}
+
+easy_fmt! {
+ impl Binary
+ impl Display
+ impl LowerHex
+ impl Octal
+ impl Pointer
+ impl UpperHex
+ for BitVec
+}
+
+#[cfg(not(tarpaulin_include))]
+impl<T, O> Hash for BitVec<T, O>
+where
+ T: BitStore,
+ O: BitOrder,
+{
+ #[inline]
+ fn hash<H>(&self, state: &mut H)
+ where H: Hasher {
+ self.as_bitslice().hash(state)
+ }
+}
+
+unsafe impl<T, O> Send for BitVec<T, O>
+where
+ T: BitStore,
+ O: BitOrder,
+{
+}
+
+unsafe impl<T, O> Sync for BitVec<T, O>
+where
+ T: BitStore,
+ O: BitOrder,
+{
+}
+
+impl<T, O> Unpin for BitVec<T, O>
+where
+ T: BitStore,
+ O: BitOrder,
+{
+}
diff --git a/src/view.rs b/src/view.rs
new file mode 100644
index 0000000..e394c9a
--- /dev/null
+++ b/src/view.rs
@@ -0,0 +1,301 @@
+#![doc = include_str!("../doc/view.md")]
+
+use core::slice;
+
+use crate::{
+ array::BitArray,
+ order::BitOrder,
+ ptr::BitSpanError,
+ slice::BitSlice,
+ store::BitStore,
+};
+
+#[doc = include_str!("../doc/view/BitView.md")]
+pub trait BitView {
+ /// The underlying element type.
+ type Store: BitStore;
+
+ /// Views a memory region as an immutable bit-slice.
+ fn view_bits<O>(&self) -> &BitSlice<Self::Store, O>
+ where O: BitOrder;
+
+ /// Attempts to view a memory region as an immutable bit-slice.
+ ///
+ /// This may return an error if `self` is too long to view as a bit-slice.
+ fn try_view_bits<O>(
+ &self,
+ ) -> Result<&BitSlice<Self::Store, O>, BitSpanError<Self::Store>>
+ where O: BitOrder;
+
+ /// Views a memory region as a mutable bit-slice.
+ fn view_bits_mut<O>(&mut self) -> &mut BitSlice<Self::Store, O>
+ where O: BitOrder;
+
+ /// Attempts to view a memory region as a mutable bit-slice.
+ ///
+ /// This may return an error if `self` is too long to view as a bit-slice.
+ fn try_view_bits_mut<O>(
+ &mut self,
+ ) -> Result<&mut BitSlice<Self::Store, O>, BitSpanError<Self::Store>>
+ where O: BitOrder;
+}
+
+#[cfg(not(tarpaulin_include))]
+impl<T> BitView for T
+where T: BitStore
+{
+ type Store = Self;
+
+ fn view_bits<O>(&self) -> &BitSlice<T, O>
+ where O: BitOrder {
+ BitSlice::from_element(self)
+ }
+
+ fn try_view_bits<O>(&self) -> Result<&BitSlice<T, O>, BitSpanError<T>>
+ where O: BitOrder {
+ Ok(BitSlice::from_element(self))
+ }
+
+ fn view_bits_mut<O>(&mut self) -> &mut BitSlice<T, O>
+ where O: BitOrder {
+ BitSlice::from_element_mut(self)
+ }
+
+ fn try_view_bits_mut<O>(
+ &mut self,
+ ) -> Result<&mut BitSlice<T, O>, BitSpanError<T>>
+ where O: BitOrder {
+ Ok(BitSlice::from_element_mut(self))
+ }
+}
+
+/// Note that overly-large slices may cause the conversions to fail.
+#[cfg(not(tarpaulin_include))]
+impl<T> BitView for [T]
+where T: BitStore
+{
+ type Store = T;
+
+ #[inline]
+ fn view_bits<O>(&self) -> &BitSlice<T, O>
+ where O: BitOrder {
+ BitSlice::from_slice(self)
+ }
+
+ #[inline]
+ fn try_view_bits<O>(&self) -> Result<&BitSlice<T, O>, BitSpanError<T>>
+ where O: BitOrder {
+ BitSlice::try_from_slice(self)
+ }
+
+ #[inline]
+ fn view_bits_mut<O>(&mut self) -> &mut BitSlice<T, O>
+ where O: BitOrder {
+ BitSlice::from_slice_mut(self)
+ }
+
+ #[inline]
+ fn try_view_bits_mut<O>(
+ &mut self,
+ ) -> Result<&mut BitSlice<T, O>, BitSpanError<T>>
+ where O: BitOrder {
+ BitSlice::try_from_slice_mut(self)
+ }
+}
+
+/// Note that overly-large arrays may cause the conversions to fail.
+#[cfg(not(tarpaulin_include))]
+impl<T, const N: usize> BitView for [T; N]
+where T: BitStore
+{
+ type Store = T;
+
+ #[inline]
+ fn view_bits<O>(&self) -> &BitSlice<T, O>
+ where O: BitOrder {
+ BitSlice::from_slice(self)
+ }
+
+ #[inline]
+ fn try_view_bits<O>(&self) -> Result<&BitSlice<T, O>, BitSpanError<T>>
+ where O: BitOrder {
+ BitSlice::try_from_slice(self)
+ }
+
+ #[inline]
+ fn view_bits_mut<O>(&mut self) -> &mut BitSlice<T, O>
+ where O: BitOrder {
+ BitSlice::from_slice_mut(self)
+ }
+
+ #[inline]
+ fn try_view_bits_mut<O>(
+ &mut self,
+ ) -> Result<&mut BitSlice<T, O>, BitSpanError<T>>
+ where O: BitOrder {
+ BitSlice::try_from_slice_mut(self)
+ }
+}
+
+/// Helper trait for scalars and arrays, but not slices.
+pub trait BitViewSized: BitView + Sized {
+ /// The zero constant.
+ const ZERO: Self;
+
+ /// Wraps `self` in a `BitArray`.
+ #[inline]
+ fn into_bitarray<O>(self) -> BitArray<Self, O>
+ where O: BitOrder {
+ BitArray::new(self)
+ }
+
+ /// Views the type as a slice of its elements.
+ fn as_raw_slice(&self) -> &[Self::Store];
+
+ /// Views the type as a mutable slice of its elements.
+ fn as_raw_mut_slice(&mut self) -> &mut [Self::Store];
+}
+
+impl<T> BitViewSized for T
+where T: BitStore
+{
+ const ZERO: Self = <T as BitStore>::ZERO;
+
+ #[inline]
+ fn as_raw_slice(&self) -> &[Self::Store] {
+ slice::from_ref(self)
+ }
+
+ #[inline]
+ fn as_raw_mut_slice(&mut self) -> &mut [Self::Store] {
+ slice::from_mut(self)
+ }
+}
+
+impl<T, const N: usize> BitViewSized for [T; N]
+where T: BitStore
+{
+ const ZERO: Self = [T::ZERO; N];
+
+ #[inline]
+ fn as_raw_slice(&self) -> &[Self::Store] {
+ &self[..]
+ }
+
+ #[inline]
+ fn as_raw_mut_slice(&mut self) -> &mut [Self::Store] {
+ &mut self[..]
+ }
+}
+
+#[doc = include_str!("../doc/view/AsBits.md")]
+pub trait AsBits<T>
+where T: BitStore
+{
+ /// Views `self` as an immutable bit-slice region with the `O` ordering.
+ fn as_bits<O>(&self) -> &BitSlice<T, O>
+ where O: BitOrder;
+
+ /// Attempts to view `self` as an immutable bit-slice region with the `O`
+ /// ordering.
+ ///
+ /// This may return an error if `self` is too long to view as a bit-slice.
+ fn try_as_bits<O>(&self) -> Result<&BitSlice<T, O>, BitSpanError<T>>
+ where O: BitOrder;
+}
+
+#[doc = include_str!("../doc/view/AsMutBits.md")]
+pub trait AsMutBits<T>
+where T: BitStore
+{
+ /// Views `self` as a mutable bit-slice region with the `O` ordering.
+ fn as_mut_bits<O>(&mut self) -> &mut BitSlice<T, O>
+ where O: BitOrder;
+
+ /// Attempts to view `self` as a mutable bit-slice region with the `O`
+ /// ordering.
+ ///
+ /// This may return an error if `self` is too long to view as a bit-slice.
+ fn try_as_mut_bits<O>(
+ &mut self,
+ ) -> Result<&mut BitSlice<T, O>, BitSpanError<T>>
+ where O: BitOrder;
+}
+
+#[cfg(not(tarpaulin_include))]
+impl<A, T> AsBits<T> for A
+where
+ A: AsRef<[T]>,
+ T: BitStore,
+{
+ #[inline]
+ fn as_bits<O>(&self) -> &BitSlice<T, O>
+ where O: BitOrder {
+ self.as_ref().view_bits::<O>()
+ }
+
+ #[inline]
+ fn try_as_bits<O>(&self) -> Result<&BitSlice<T, O>, BitSpanError<T>>
+ where O: BitOrder {
+ self.as_ref().try_view_bits::<O>()
+ }
+}
+
+#[cfg(not(tarpaulin_include))]
+impl<A, T> AsMutBits<T> for A
+where
+ A: AsMut<[T]>,
+ T: BitStore,
+{
+ #[inline]
+ fn as_mut_bits<O>(&mut self) -> &mut BitSlice<T, O>
+ where O: BitOrder {
+ self.as_mut().view_bits_mut::<O>()
+ }
+
+ #[inline]
+ fn try_as_mut_bits<O>(
+ &mut self,
+ ) -> Result<&mut BitSlice<T, O>, BitSpanError<T>>
+ where O: BitOrder {
+ self.as_mut().try_view_bits_mut::<O>()
+ }
+}
+
+#[cfg(test)]
+mod tests {
+ use static_assertions::*;
+
+ use super::*;
+ use crate::prelude::*;
+
+ #[test]
+ fn implementations() {
+ let mut byte = 0u8;
+ let mut bytes = [0u8; 2];
+ assert!(byte.view_bits::<LocalBits>().not_any());
+ assert!(byte.view_bits_mut::<LocalBits>().not_any());
+ assert!(bytes.view_bits::<LocalBits>().not_any());
+ assert!(bytes.view_bits_mut::<LocalBits>().not_any());
+ assert!(bytes[..].view_bits::<LocalBits>().not_any());
+ assert!(bytes[..].view_bits_mut::<LocalBits>().not_any());
+
+ let mut blank: [u8; 0] = [];
+ assert!(blank.view_bits::<LocalBits>().is_empty());
+ assert!(blank.view_bits_mut::<LocalBits>().is_empty());
+
+ assert_eq!([0u32; 2].as_bits::<LocalBits>().len(), 64);
+ assert_eq!([0u32; 2].as_mut_bits::<LocalBits>().len(), 64);
+
+ assert_eq!(0usize.as_raw_slice().len(), 1);
+ assert_eq!(0usize.as_raw_mut_slice().len(), 1);
+ assert_eq!(0u32.into_bitarray::<LocalBits>().len(), 32);
+
+ assert_impl_all!(
+ [usize; 10]: AsBits<usize>,
+ AsMutBits<usize>,
+ BitViewSized,
+ BitView,
+ );
+ }
+}