1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139
use crate::ffi; use crate::{DecodePosition, VertexDataAdapter}; pub type Bounds = ffi::meshopt_Bounds; pub type Meshlet = ffi::meshopt_Meshlet; /// Splits the mesh into a set of meshlets where each meshlet has a micro index buffer /// indexing into meshlet vertices that refer to the original vertex buffer. /// /// The resulting data can be used to render meshes using NVidia programmable mesh shading /// pipeline, or in other cluster-based renderers. /// /// For maximum efficiency the index buffer being converted has to be optimized for vertex /// cache first. /// /// Note: `max_vertices` must be <= 64 and `max_triangles` must be <= 126 pub fn build_meshlets( indices: &[u32], vertex_count: usize, max_vertices: usize, max_triangles: usize, ) -> Vec<Meshlet> { let meshlet_count = unsafe { ffi::meshopt_buildMeshletsBound(indices.len(), max_vertices, max_triangles) }; let mut meshlets: Vec<Meshlet> = vec![unsafe { ::std::mem::zeroed() }; meshlet_count]; let count = unsafe { ffi::meshopt_buildMeshlets( meshlets.as_mut_ptr(), indices.as_ptr(), indices.len(), vertex_count, max_vertices, max_triangles, ) }; meshlets.resize(count, unsafe { ::std::mem::zeroed() }); meshlets } /// Creates bounding volumes that can be used for frustum, backface and occlusion culling. /// /// For backface culling with orthographic projection, use the following formula to reject backfacing clusters: /// `dot(view, cone_axis) >= cone_cutoff` /// /// For perspective projection, use the following formula that needs cone apex in addition to axis & cutoff: /// `dot(normalize(cone_apex - camera_position), cone_axis) >= cone_cutoff` /// /// Alternatively, you can use the formula that doesn't need cone apex and uses bounding sphere instead: /// `dot(normalize(center - camera_position), cone_axis) >= cone_cutoff + radius / length(center - camera_position)` /// /// or an equivalent formula that doesn't have a singularity at center = camera_position: /// `dot(center - camera_position, cone_axis) >= cone_cutoff * length(center - camera_position) + radius` /// /// The formula that uses the apex is slightly more accurate but needs the apex; if you are already using bounding sphere /// to do frustum/occlusion culling, the formula that doesn't use the apex may be preferable. /// /// `index_count` should be <= 256*3 (the function assumes clusters of limited size) pub fn compute_cluster_bounds(indices: &[u32], vertices: &VertexDataAdapter) -> Bounds { unsafe { ffi::meshopt_computeClusterBounds( indices.as_ptr(), indices.len(), vertices.pos_ptr(), vertices.vertex_count, vertices.vertex_stride, ) } } /// Creates bounding volumes that can be used for frustum, backface and occlusion culling. /// /// For backface culling with orthographic projection, use the following formula to reject backfacing clusters: /// `dot(view, cone_axis) >= cone_cutoff` /// /// For perspective projection, use the following formula that needs cone apex in addition to axis & cutoff: /// `dot(normalize(cone_apex - camera_position), cone_axis) >= cone_cutoff` /// /// Alternatively, you can use the formula that doesn't need cone apex and uses bounding sphere instead: /// `dot(normalize(center - camera_position), cone_axis) >= cone_cutoff + radius / length(center - camera_position)` /// /// or an equivalent formula that doesn't have a singularity at center = camera_position: /// `dot(center - camera_position, cone_axis) >= cone_cutoff * length(center - camera_position) + radius` /// /// The formula that uses the apex is slightly more accurate but needs the apex; if you are already using bounding sphere /// to do frustum/occlusion culling, the formula that doesn't use the apex may be preferable. /// /// `index_count` should be <= 256*3 (the function assumes clusters of limited size) pub fn compute_cluster_bounds_decoder<T: DecodePosition>( indices: &[u32], vertices: &[T], ) -> Bounds { let vertices = vertices .iter() .map(|vertex| vertex.decode_position()) .collect::<Vec<[f32; 3]>>(); let positions = vertices.as_ptr() as *const f32; unsafe { ffi::meshopt_computeClusterBounds( indices.as_ptr(), indices.len(), positions, vertices.len() * 3, ::std::mem::size_of::<f32>() * 3, ) } } pub fn compute_meshlet_bounds(meshlet: &Meshlet, vertices: &VertexDataAdapter) -> Bounds { let vertex_data = vertices.reader.get_ref(); let vertex_data = vertex_data.as_ptr() as *const u8; let positions = unsafe { vertex_data.add(vertices.position_offset) }; unsafe { ffi::meshopt_computeMeshletBounds( meshlet, positions as *const f32, vertices.vertex_count, vertices.vertex_stride, ) } } pub fn compute_meshlet_bounds_decoder<T: DecodePosition>( meshlet: &Meshlet, vertices: &[T], ) -> Bounds { let vertices = vertices .iter() .map(|vertex| vertex.decode_position()) .collect::<Vec<[f32; 3]>>(); let positions = vertices.as_ptr() as *const f32; unsafe { ffi::meshopt_computeMeshletBounds( meshlet, positions, vertices.len() * 3, ::std::mem::size_of::<f32>() * 3, ) } }