use gl::types::*;
use std::mem;
use std::ptr;
use std::marker::PhantomData;
use std::cell::RefCell;
use ::Result;
use state::StateRef;
use super::traits::*;
use super::range::Range;
use super::backend::*;
use super::shared::SharedBuffer;
#[cfg(not(feature="webgl"))]
use std::slice;
#[cfg(not(feature="webgl"))]
use gl;
#[cfg(not(feature="webgl"))]
use super::map::*;
#[cfg(all(not(feature = "gles"), not(feature="webgl")))]
use super::storage::BufferStorage;
#[cfg(not(feature="webgl"))]
use ::Error;
#[derive(Debug)]
pub struct Buffer<T>{
pub(super) backend: Box<dyn Backend>,
pub(super) len: usize,
pub(super) reserved: usize,
pub(super) marker: PhantomData<T>,
}
impl<T> PartialEq for Buffer<T> {
fn eq(&self, other: &Self) -> bool {
self.backend.id() == other.backend.id()
}
}
impl<T> Eq for Buffer<T> {}
pub struct Builder<'a>(pub(crate) &'a StateRef);
impl<'a> Builder<'a>{
#[cfg(all(not(feature = "gles"), not(feature="webgl")))]
fn create_backend<T>(&self, len: usize, usage: GLenum, target: GLenum) -> Result<Box<dyn Backend>>{
let size = (len * mem::size_of::<T>()) as GLsizeiptr;
if self.0.capabilities().supports_dsa() {
Ok(Box::new(DsaBackend::new(self.0, size, usage)?))
}else{
Ok(Box::new(BindBackend::new(self.0, size, usage, target)?))
}
}
#[cfg(any(feature = "gles", feature="webgl"))]
fn create_backend<T>(&self, len: usize, usage: GLenum, target: GLenum) -> Result<Box<dyn Backend>>{
let size = (len * mem::size_of::<T>()) as GLsizeiptr;
Ok(Box::new(BindBackend::new(self.0, size, usage, target)?))
}
#[cfg(all(not(feature = "gles"), not(feature="webgl")))]
fn empty_backend(&self, target: GLenum) -> Result<Box<dyn Backend>>{
if self.0.capabilities().supports_dsa() {
Ok(Box::new(DsaBackend::empty(self.0)?))
}else{
Ok(Box::new(BindBackend::empty(self.0, target)?))
}
}
#[cfg(any(feature = "gles", feature="webgl"))]
fn empty_backend(&self, target: GLenum) -> Result<Box<dyn Backend>>{
Ok(Box::new(BindBackend::empty(self.0, target)?))
}
#[cfg(not(feature="webgl"))]
pub fn create<T>(&self, len: usize, usage: GLenum) -> Result<Buffer<T>>{
let backend = self.create_backend::<T>(len, usage, gl::ARRAY_BUFFER)?;
Ok(Buffer{
backend: backend,
len: len,
reserved: len,
marker: PhantomData
})
}
pub fn create_target<T>(&self, len: usize, usage: GLenum, target: GLenum) -> Result<Buffer<T>>{
let backend = self.create_backend::<T>(len, usage, target)?;
Ok(Buffer{
backend: backend,
len: len,
reserved: len,
marker: PhantomData
})
}
#[cfg(not(feature="webgl"))]
pub fn empty<T>(&self) -> Result<Buffer<T>>{
let backend = self.empty_backend(gl::ARRAY_BUFFER)?;
Ok(Buffer{
backend: backend,
len: 0,
reserved: 0,
marker: PhantomData
})
}
pub fn empty_target<T>(&self, target: GLenum) -> Result<Buffer<T>>{
let backend = self.empty_backend(target)?;
Ok(Buffer{
backend: backend,
len: 0,
reserved: 0,
marker: PhantomData
})
}
#[cfg(not(feature="webgl"))]
pub fn from_data<T: 'static>(&self, data: &[T], usage: GLenum) -> Result<Buffer<T>>{
self.empty().and_then(|mut buffer|{
buffer.load(data, usage);
Ok(buffer)
})
}
pub fn from_data_target<T: 'static>(&self, data: &[T], usage: GLenum, target: GLenum) -> Result<Buffer<T>>{
self.empty_target(target).and_then(|mut buffer|{
buffer.load(data, usage);
Ok(buffer)
})
}
#[cfg(all(not(feature = "gles"), not(feature="webgl")))]
pub fn create_immutable<T>(&self, len: usize, flags: GLbitfield) -> Result<BufferStorage<T>>{
let size = (len * mem::size_of::<T>()) as GLsizeiptr;
let backend = if self.0.capabilities().supports_dsa() {
Box::new(DsaBackend::new_immutable(self.0, size, flags)?) as Box<dyn Backend>
}else{
Box::new(BindBackend::new_immutable(self.0, size, flags, gl::ARRAY_BUFFER)?) as Box<dyn Backend>
};
let persistent_map_token = if (flags & gl::MAP_READ_BIT != 0) || (flags & gl::MAP_WRITE_BIT != 0) {
Some(())
}else{
None
};
Ok(BufferStorage{
backend: backend,
len: len,
reserved: len,
marker: PhantomData,
creation_flags: flags,
persistent_map_token: RefCell::new(persistent_map_token),
})
}
#[cfg(all(not(feature = "gles"), not(feature="webgl")))]
pub fn create_immutable_target<T>(&self, len: usize, flags: GLbitfield, target: GLenum) -> Result<BufferStorage<T>>{
let size = (len * mem::size_of::<T>()) as GLsizeiptr;
let backend = if self.0.capabilities().supports_dsa() {
Box::new(DsaBackend::new_immutable(self.0, size, flags)?) as Box<dyn Backend>
}else{
Box::new(BindBackend::new_immutable(self.0, size, flags, target)?) as Box<dyn Backend>
};
let persistent_map_token = if (flags & gl::MAP_READ_BIT != 0) || (flags & gl::MAP_WRITE_BIT != 0) {
Some(())
}else{
None
};
Ok(BufferStorage{
backend: backend,
len: len,
reserved: len,
marker: PhantomData,
creation_flags: flags,
persistent_map_token: RefCell::new(persistent_map_token),
})
}
#[cfg(all(not(feature = "gles"), not(feature="webgl")))]
pub fn immutable_from_data<T>(&self, data: &[T], flags: GLbitfield) -> Result<BufferStorage<T>>{
let size = (data.len() * mem::size_of::<T>()) as GLsizeiptr;
let backend = if self.0.capabilities().supports_dsa() {
Box::new(DsaBackend::immutable_from_data(self.0, data.as_ptr() as *const _, size, flags)?) as Box<dyn Backend>
}else{
Box::new(BindBackend::immutable_from_data(self.0, data.as_ptr() as *const _, size, flags, gl::ARRAY_BUFFER)?) as Box<dyn Backend>
};
let persistent_map_token = if (flags & gl::MAP_READ_BIT != 0) || (flags & gl::MAP_WRITE_BIT != 0) {
Some(())
}else{
None
};
Ok(BufferStorage{
backend: backend,
len: data.len(),
reserved: data.len(),
creation_flags: flags,
marker: PhantomData,
persistent_map_token: RefCell::new(persistent_map_token),
})
}
}
impl<T: 'static> Buffer<T>{
pub fn load(&mut self, data: &[T], usage: GLenum){
let size = data.len() * mem::size_of::<T>();
unsafe{
self.backend.load(data.as_ptr() as *const _, size, usage);
}
self.len = data.len();
self.reserved = data.len();
}
pub fn load_target(&mut self, data: &[T], usage: GLenum, target: GLenum){
let size = data.len() * mem::size_of::<T>();
unsafe{
self.backend.load_target(data.as_ptr() as *const _, size, usage, target);
}
self.len = data.len();
self.reserved = data.len();
}
pub fn reserve(&mut self, len: usize, usage: GLenum){
let size = len * mem::size_of::<T>();
unsafe{
self.backend.load(ptr::null() as *const _, size, usage);
}
self.reserved = len;
}
pub fn reserve_target(&mut self, len: usize, usage: GLenum, target: GLenum){
let size = len * mem::size_of::<T>();
unsafe{
self.backend.load_target(ptr::null() as *const _, size, usage, target);
}
self.reserved = len;
}
pub fn update(&mut self, data: &[T]){
assert!(data.len() <= self.reserved);
let size = data.len() * mem::size_of::<T>();
unsafe{
self.backend.update(data.as_ptr() as *const _, size, 0);
}
self.len = data.len();
}
#[cfg(not(feature="webgl"))]
pub fn map_read(&mut self, flags: MapReadFlags) -> Result<MapRead<T,Self>>{
let start = self.start() * mem::size_of::<T>();
let len = self.len() * mem::size_of::<T>();
let data = unsafe{
self.backend.map_range(start as isize, len as isize, gl::MAP_READ_BIT | flags.bits())
};
if data.is_null() {
Err(Error::new(::ErrorKind::MapError, None))
}else{
unsafe{
let slice = slice::from_raw_parts(data as *const T, self.reserved);
Ok(MapRead{
map: slice,
buffer: self,
})
}
}
}
#[cfg(not(feature="webgl"))]
pub fn map_write(&mut self, flags: MapWriteFlags) -> Result<MapWrite<T,Self>>{
let start = self.start() * mem::size_of::<T>();
let len = self.len() * mem::size_of::<T>();
let data = unsafe{ self.backend.map_range(start as isize, len as isize, gl::MAP_WRITE_BIT | flags.bits()) };
if data.is_null(){
Err(Error::new(::ErrorKind::MapError, None))
}else{
unsafe{
let slice = slice::from_raw_parts_mut(data as *mut T, self.reserved);
Ok(MapWrite{
map: slice,
dropper: MapDropper{buffer: self},
})
}
}
}
#[cfg(not(feature="webgl"))]
pub fn map_read_write(&mut self, flags: MapReadWriteFlags) -> Result<MapReadWrite<T,Self>>{
let start = self.start() * mem::size_of::<T>();
let len = self.len() * mem::size_of::<T>();
let data = unsafe{ self.backend.map_range(start as isize, len as isize, gl::MAP_WRITE_BIT | gl::MAP_READ_BIT | flags.bits()) };
if data.is_null(){
Err(Error::new(::ErrorKind::MapError, None))
}else{
unsafe{
let slice = slice::from_raw_parts_mut(data as *mut T, self.reserved);
Ok(MapReadWrite{
map: slice,
dropper: MapDropper{buffer: self}
})
}
}
}
#[cfg(not(feature="webgl"))]
pub unsafe fn map_read_slice(&self, flags: MapReadFlags) -> Result<&[T]>{
let start = self.start() * mem::size_of::<T>();
let len = self.len() * mem::size_of::<T>();
let data = self.backend.map_range(
start as isize,
len as isize,
gl::MAP_READ_BIT | flags.bits());
if data.is_null() {
Err(Error::new(::ErrorKind::MapError, None))
}else{
let slice = slice::from_raw_parts(data as *const T, self.reserved);
Ok(slice)
}
}
#[cfg(not(feature="webgl"))]
pub unsafe fn map_write_slice(&mut self, flags: MapWriteFlags) -> Result<&mut [T]>{
let start = self.start() * mem::size_of::<T>();
let len = self.len() * mem::size_of::<T>();
let data = self.backend.map_range(
start as isize,
len as isize,
gl::MAP_WRITE_BIT | flags.bits());
if data.is_null(){
Err(Error::new(::ErrorKind::MapError, None))
}else{
let slice = slice::from_raw_parts_mut(data as *mut T, self.reserved);
Ok(slice)
}
}
#[cfg(not(feature="webgl"))]
pub unsafe fn map_read_write_slice(&mut self, flags: MapReadWriteFlags) -> Result<&mut [T]>{
let start = self.start() * mem::size_of::<T>();
let len = self.len() * mem::size_of::<T>();
let data = self.backend.map_range(
start as isize,
len as isize,
gl::MAP_WRITE_BIT | gl::MAP_READ_BIT | flags.bits());
if data.is_null(){
Err(Error::new(::ErrorKind::MapError, None))
}else{
let slice = slice::from_raw_parts_mut(data as *mut T, self.reserved);
Ok(slice)
}
}
#[cfg(not(feature="webgl"))]
pub unsafe fn unmap(&mut self){
self.backend.unmap();
}
pub fn copy_to<U, B:BufferRange<U> + WithBackendMut>(&self, dst: &mut B){
let offset = dst.start() * (dst as &B).stride();
dst.with_backend_mut(|dst_backend|
self.backend.copy_to(dst_backend, 0, offset, self.capacity_bytes())
);
}
pub fn len(&self) -> usize{
self.len
}
pub fn is_empty(&self) -> bool{
self.len != 0
}
pub fn capacity(&self) -> usize{
self.reserved
}
pub fn bytes(&self) -> usize{
self.len * mem::size_of::<T>()
}
pub fn capacity_bytes(&self) -> usize{
self.reserved * mem::size_of::<T>()
}
pub fn id(&self) -> GLuint{
self.backend.id()
}
pub fn stride(&self) -> usize{
mem::size_of::<T>()
}
pub fn into_shared(self) -> SharedBuffer<T>{
SharedBuffer::from(self)
}
pub fn range<R: InputRange>(&self, range: R) -> Range<T, Buffer<T>, &Buffer<T>>{
Range{
buffer: self,
range: range.to_range(self),
marker_type: PhantomData,
marker_buffer: PhantomData,
}
}
pub fn range_mut<R: InputRange>(&mut self, range: R) -> Range<T, Buffer<T>, &mut Buffer<T>>{
Range{
range: range.to_range(self),
buffer: self,
marker_type: PhantomData,
marker_buffer: PhantomData,
}
}
pub fn into_range<R: InputRange>(self, range: R) -> Range<T, Buffer<T>, Buffer<T>>{
Range{
range: range.to_range(&self),
buffer: self,
marker_type: PhantomData,
marker_buffer: PhantomData,
}
}
}
impl Buffer<u8>{
pub fn cast<T>(self) -> Buffer<T>{
Buffer{
backend: self.backend,
len: self.len / mem::size_of::<T>(),
reserved: self.reserved / mem::size_of::<T>(),
marker: PhantomData,
}
}
}
impl<T> Cast<T> for Buffer<u8>{
type CastTo = Buffer<T>;
fn cast(self) -> Buffer<T>{
Buffer {
backend: self.backend,
len: self.len / mem::size_of::<T>(),
reserved: self.reserved / mem::size_of::<T>(),
marker: PhantomData,
}
}
}
impl<'a, T: 'static> TypedBuffer<T> for Buffer<T> {
fn id(&self) -> GLuint{
self.id()
}
fn len(&self) -> usize{
self.len()
}
fn capacity(&self) -> usize{
self.capacity()
}
#[cfg(not(feature="webgl"))]
fn with_map_read<F: FnMut(&[T])>(&self, flags: MapReadFlags, mut f: F) -> Result<()>{
let start = self.start() * mem::size_of::<T>();
let len = self.len() * mem::size_of::<T>();
let data = unsafe{
self.backend.map_range(start as isize, len as isize, gl::MAP_READ_BIT | flags.bits())
};
if data.is_null() {
Err(Error::new(::ErrorKind::MapError, None))
}else{
unsafe{
let slice = slice::from_raw_parts(data as *const T, self.reserved);
f(slice);
Ok(())
}
}
}
fn copy_to<U, BB:BufferRange<U> + WithBackendMut>(&self, dst: &mut BB) where Self: Sized{
self.copy_to(dst)
}
#[cfg(not(feature="webgl"))]
unsafe fn unmap(&self){
self.backend.unmap()
}
}
impl<'a, T: 'static> TypedBufferMut<T> for Buffer<T> {
#[cfg(not(feature="webgl"))]
unsafe fn with_map_write<F: Fn(&mut [T])>(&mut self, flags: MapWriteFlags, f: F) -> Result<()>{
self.map_write(flags).map(|mut m| f(m.data_mut()))
}
#[cfg(not(feature="webgl"))]
fn with_map_read_write<F: Fn(&mut [T])>(&mut self, flags: MapReadWriteFlags, f: F) -> Result<()>{
self.map_read_write(flags).map(|mut m| f(m.data_mut()))
}
}
impl<'a, T: 'static> BufferRange<T> for Buffer<T> {
fn start(&self) -> usize{
0
}
fn end(&self) -> usize{
self.len()
}
fn into_range<R: InputRange>(self, range: R) -> super::Range<T, Self, Self> where Self: Sized{
self.into_range(range)
}
}
impl<'a, T: 'static> BufferRangeMut<T> for Buffer<T> {
fn update(&mut self, data: &[T]){
self.update(data);
}
}
impl<'a, T: 'static> TypedBuffer<T> for &Buffer<T>{
fn id(&self) -> GLuint{
(*self).id()
}
fn len(&self) -> usize{
(*self).len()
}
fn capacity(&self) -> usize{
(*self).capacity()
}
#[cfg(not(feature="webgl"))]
fn with_map_read<F: FnMut(&[T])>(&self, flags: MapReadFlags, f: F) -> Result<()>{
(*self).with_map_read(flags, f)
}
fn copy_to<U, BB:BufferRange<U> + WithBackendMut>(&self, dst: &mut BB) where Self: Sized{
(*self).copy_to(dst)
}
#[cfg(not(feature="webgl"))]
unsafe fn unmap(&self){
self.backend.unmap()
}
}
impl<'a, T: 'static> BufferRange<T> for &Buffer<T>{
fn start(&self) -> usize{
0
}
fn end(&self) -> usize{
(*self).len()
}
fn into_range<R: InputRange>(self, range: R) -> super::Range<T, Self, Self> where Self: Sized{
Range{
range: range.to_range(&self),
buffer: self,
marker_type: PhantomData,
marker_buffer: PhantomData,
}
}
}
impl<'a, T> WithBackend for Buffer<T>{
fn with_backend<F:FnMut(&dyn Backend)->R, R>(&self, mut f:F) -> R{
f(&*self.backend)
}
}
impl<'a, T> WithBackendMut for Buffer<T>{
fn with_backend_mut<F:FnMut(&mut dyn Backend)->R, R>(&mut self, mut f:F) -> R{
f(&mut *self.backend)
}
}
#[cfg(not(feature="webgl"))]
impl<'a, T: 'static> WithMapRange<T> for Buffer<T>{
fn with_map_range_read<F: FnMut(&[T])>(&self, offset: usize, length: usize, flags: MapReadFlags, mut f: F) -> Result<()>{
if offset + length > self.reserved{
return Err(Error::new(::ErrorKind::OutOfBounds,None));
}
let bytes_offset = offset * mem::size_of::<T>();
let length_offset = length * mem::size_of::<T>();
let data = unsafe{ self.backend.map_range(bytes_offset as GLintptr, length_offset as GLsizeiptr, gl::MAP_READ_BIT | flags.bits()) };
if data.is_null() {
Err(Error::new(::ErrorKind::MapError,None))
}else{
unsafe{
let slice = slice::from_raw_parts(data as *const T, length);
f(slice);
Ok(())
}
}
}
}
#[cfg(not(feature = "webgl"))]
impl<'a, T: 'static> WithMapRangeMut<T> for Buffer<T>{
unsafe fn with_map_range_write<F: FnMut(&mut [T])>(&mut self, offset: usize, length: usize, flags: MapWriteFlags, mut f: F) -> Result<()>{
self.map_range_write(offset, length, flags)
.map(|mut m| f(m.data_mut()))
}
fn with_map_range_read_write<F: FnMut(&mut [T])>(&mut self, offset: usize, length: usize, flags: MapReadWriteFlags, mut f: F) -> Result<()>{
self.map_range_read_write(offset, length, flags)
.map(|mut m| f(m.data_mut()))
}
}
#[cfg(not(feature="webgl"))]
impl<'a, T> MapRange<T> for Buffer<T>{
fn map_range_read(&mut self, offset: usize, length: usize, flags: MapReadFlags) -> Result<MapRead<T, Self>>{
if offset + length > self.reserved{
return Err(Error::new(::ErrorKind::OutOfBounds,None));
}
let bytes_offset = offset * mem::size_of::<T>();
let length_offset = length * mem::size_of::<T>();
let data = unsafe{ self.backend.map_range(bytes_offset as GLintptr, length_offset as GLsizeiptr, gl::MAP_READ_BIT | flags.bits()) };
if data.is_null() {
Err(Error::new(::ErrorKind::MapError,None))
}else{
unsafe{
let slice = slice::from_raw_parts(data as *const T, length);
Ok(MapRead{
map: slice,
buffer: self
})
}
}
}
}
#[cfg(not(feature = "webgl"))]
impl<'a, T> MapRangeMut<T> for Buffer<T>{
fn map_range_write(&mut self, offset: usize, length: usize, flags: MapWriteFlags) -> Result<MapWrite<T, Self>>{
if offset + length > self.reserved {
return Err(Error::new(::ErrorKind::OutOfBounds,None));
}
let bytes_offset = offset * mem::size_of::<T>();
let length_offset = length * mem::size_of::<T>();
let data = unsafe{ self.backend.map_range(bytes_offset as GLintptr, length_offset as GLsizeiptr, gl::MAP_WRITE_BIT | flags.bits()) };
if data.is_null(){
Err(Error::new(::ErrorKind::MapError,None))
}else{
unsafe{
let slice = slice::from_raw_parts_mut(data as *mut T, length);
Ok(MapWrite{
map: slice,
dropper: MapDropper{ buffer: self }
})
}
}
}
fn map_range_read_write(&mut self, offset: usize, length: usize, flags: MapReadWriteFlags) -> Result<MapReadWrite<T, Self>>{
if offset + length > self.reserved{
return Err(Error::new(::ErrorKind::OutOfBounds,None));
}
let bytes_offset = offset * mem::size_of::<T>();
let length_offset = length * mem::size_of::<T>();
let data = unsafe{ self.backend.map_range(bytes_offset as GLintptr, length_offset as GLsizeiptr, gl::MAP_READ_BIT | gl::MAP_WRITE_BIT | flags.bits()) };
if data.is_null(){
Err(Error::new(::ErrorKind::MapError,None))
}else{
unsafe{
let slice = slice::from_raw_parts_mut(data as *mut T, length);
Ok(MapReadWrite{
map: slice,
dropper: MapDropper{buffer: self}
})
}
}
}
}