Files
addr2line
adler
adler32
ahash
aho_corasick
angle
approx
backtrace
bitflags
blender
bytemuck
byteorder
case
cast_trait
cfg_if
chrono
color
color_quant
const_fn
crc32fast
crossbeam
crossbeam_channel
crossbeam_deque
crossbeam_epoch
crossbeam_queue
crossbeam_skiplist
crossbeam_utils
darling
darling_core
darling_macro
dds
deflate
densevec
derive_builder
derive_builder_core
dot
downcast_rs
dual_quat
either
erased_serde
failure
failure_derive
fixedbitset
float_cmp
fnv
freeimage
freeimage_sys
freetype
freetype_gl_sys
freetype_sys
freetypegl
futures
futures_channel
futures_core
futures_executor
futures_io
futures_macro
futures_sink
futures_task
futures_util
async_await
future
io
lock
sink
stream
task
fxhash
generational_arena
generic_array
getrandom
gif
gimli
glfw
glfw_sys
glin
glin_derive
glsl
half
harfbuzz
harfbuzz_ft_sys
harfbuzz_sys
hashbrown
human_sort
ident_case
image
indexmap
instant
itertools
itoa
jpeg_decoder
lazy_static
libc
libm
lock_api
log
lut_parser
matrixmultiply
memchr
memoffset
meshopt
miniz_oxide
monotonic_clock
mopa
mutiny_derive
na
nalgebra
base
geometry
linalg
ncollide3d
bounding_volume
interpolation
partitioning
pipeline
procedural
query
algorithms
closest_points
contact
distance
nonlinear_time_of_impact
point
proximity
ray
time_of_impact
visitors
shape
transformation
utils
nom
num_complex
num_cpus
num_integer
num_iter
num_rational
num_traits
numext_constructor
numext_fixed_uint
numext_fixed_uint_core
numext_fixed_uint_hack
object
once_cell
parking_lot
parking_lot_core
pathfinding
pennereq
petgraph
pin_project_lite
pin_utils
png
polygon2
ppv_lite86
proc_macro2
proc_macro_crate
proc_macro_hack
proc_macro_nested
quote
rand
rand_chacha
rand_core
rand_distr
raw_window_handle
rawpointer
rayon
rayon_core
rect_packer
regex
regex_syntax
retain_mut
rin
rin_app
rin_blender
rin_core
rin_gl
rin_graphics
rin_gui
rin_material
rin_math
rin_postpo
rin_scene
rin_util
rin_window
rinblender
rinecs
rinecs_derive
rinecs_derive_utils
ringui_derive
rustc_demangle
rusty_pool
ryu
scopeguard
seitan
seitan_derive
semver
semver_parser
serde
serde_derive
serde_json
shaderdata_derive
simba
slab
slice_of_array
slotmap
smallvec
std140_data
streaming_iterator
strsim
syn
synstructure
thiserror
thiserror_impl
thread_local
tiff
time
toml
typenum
unchecked_unwrap
unicode_xid
vec2
vec3
weezl
x11
zlib_sys
 1
 2
 3
 4
 5
 6
 7
 8
 9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
use crate::bitstream::LsbWriter;
use byteorder::{LittleEndian, WriteBytesExt};
use std::io;
use std::io::Write;
use std::u16;

#[cfg(test)]
const BLOCK_SIZE: u16 = 32000;

const STORED_FIRST_BYTE: u8 = 0b0000_0000;
pub const STORED_FIRST_BYTE_FINAL: u8 = 0b0000_0001;
pub const MAX_STORED_BLOCK_LENGTH: usize = (u16::MAX as usize) / 2;

pub fn write_stored_header(writer: &mut LsbWriter, final_block: bool) {
    let header = if final_block {
        STORED_FIRST_BYTE_FINAL
    } else {
        STORED_FIRST_BYTE
    };
    // Write the block header
    writer.write_bits(header.into(), 3);
    // Flush the writer to make sure we are aligned to the byte boundary.
    writer.flush_raw();
}

// Compress one stored block (excluding the header)
pub fn compress_block_stored<W: Write>(input: &[u8], writer: &mut W) -> io::Result<usize> {
    if input.len() > u16::max_value() as usize {
        return Err(io::Error::new(
            io::ErrorKind::InvalidInput,
            "Stored block too long!",
        ));
    };
    // The header is written before this function.
    // The next two bytes indicates the length
    writer.write_u16::<LittleEndian>(input.len() as u16)?;
    // the next two after the length is the ones complement of the length
    writer.write_u16::<LittleEndian>(!input.len() as u16)?;
    // After this the data is written directly with no compression
    writer.write(input)
}

#[cfg(test)]
pub fn compress_data_stored(input: &[u8]) -> Vec<u8> {
    let block_length = BLOCK_SIZE as usize;

    let mut output = Vec::with_capacity(input.len() + 2);
    let mut i = input.chunks(block_length).peekable();
    while let Some(chunk) = i.next() {
        let last_chunk = i.peek().is_none();
        // First bit tells us if this is the final chunk
        // the next two details compression type (none in this case)
        let first_byte = if last_chunk {
            STORED_FIRST_BYTE_FINAL
        } else {
            STORED_FIRST_BYTE
        };
        output.write(&[first_byte]).unwrap();

        compress_block_stored(chunk, &mut output).unwrap();
    }
    output
}

#[cfg(test)]
mod test {
    use super::*;
    use crate::test_utils::decompress_to_end;

    #[test]
    fn no_compression_one_chunk() {
        let test_data = vec![1u8, 2, 3, 4, 5, 6, 7, 8];
        let compressed = compress_data_stored(&test_data);
        let result = decompress_to_end(&compressed);
        assert_eq!(test_data, result);
    }

    #[test]
    fn no_compression_multiple_chunks() {
        let test_data = vec![32u8; 40000];
        let compressed = compress_data_stored(&test_data);
        let result = decompress_to_end(&compressed);
        assert_eq!(test_data, result);
    }

    #[test]
    fn no_compression_string() {
        let test_data = String::from(
            "This is some text, this is some more text, this is even \
             more text, lots of text here.",
        )
        .into_bytes();
        let compressed = compress_data_stored(&test_data);
        let result = decompress_to_end(&compressed);
        assert_eq!(test_data, result);
    }
}