use gl::types::*;
use gl;
use std::mem;
#[cfg(not(feature="webgl"))]
use std::slice;
use std::marker::PhantomData;
use ::Result;
use ::Error;
use super::traits::*;
use super::map::*;
use super::shared_storage::SharedBufferStorage;
use super::range::Range;
use std::ops::Range as StdRange;
use std::os::raw::c_void;
use std::cell::RefCell;
#[cfg(all(not(feature = "gles"), not(feature="webgl")))]
#[derive(Debug)]
pub struct BufferStorage<T>{
pub(super) backend: Box<dyn Backend>,
pub(super) len: usize,
pub(super) reserved: usize,
pub(super) marker: PhantomData<T>,
pub(super) creation_flags: GLbitfield,
pub(super) persistent_map_token: RefCell<Option<()>>,
}
#[cfg(all(not(feature = "gles"), not(feature="webgl")))]
impl<T> PartialEq for BufferStorage<T> {
fn eq(&self, other: &Self) -> bool {
self.backend.id() == other.backend.id()
}
}
#[cfg(all(not(feature = "gles"), not(feature="webgl")))]
impl<T> Eq for BufferStorage<T> {}
#[cfg(all(not(feature = "gles"), not(feature="webgl")))]
impl<T: 'static> BufferStorage<T>{
pub fn update(&mut self, data: &[T]){
assert!(data.len() <= self.reserved);
let size = data.len() * mem::size_of::<T>();
unsafe{
self.backend.update( data.as_ptr() as *const c_void, size, 0);
}
self.len = data.len();
}
pub fn map_read(&mut self, flags: MapReadFlags) -> Result<MapRead<T, Self>>{
if self.is_read_map() {
if self.is_persistant() && self.persistent_map_token.borrow().is_none(){
return Err(Error::new(::ErrorKind::MapError, "Buffer already persistently mapped"))
}
let start = self.start() * mem::size_of::<T>();
let len = self.len() * mem::size_of::<T>();
let data = unsafe{ self.backend.map_range(start as isize, len as isize, gl::MAP_READ_BIT | flags.bits()) };
if data.is_null(){
Err(Error::new(::ErrorKind::MapError, None))
}else{
unsafe{
let slice = slice::from_raw_parts(data as *const T, self.reserved);
Ok(MapRead{
map: slice,
buffer: self,
})
}
}
}else{
Err(Error::new(::ErrorKind::MapError, "Can't map for read since storage was created without gl::MAP_READ_BIT"))
}
}
pub fn map_write(&mut self, flags: MapWriteFlags) -> Result<MapWrite<T,Self>>{
if self.is_write_map(){
if self.is_persistant() && self.persistent_map_token.borrow().is_none(){
return Err(Error::new(::ErrorKind::MapError, "Buffer already persistently mapped"))
}
let start = self.start() * mem::size_of::<T>();
let len = self.len() * mem::size_of::<T>();
let data = unsafe{ self.backend.map_range(start as isize, len as isize, gl::MAP_WRITE_BIT | flags.bits()) };
if data.is_null(){
Err(Error::new(::ErrorKind::MapError, None))
}else{
unsafe{
let slice = slice::from_raw_parts_mut(data as *mut T, self.reserved);
Ok(MapWrite{
map: slice,
dropper: MapDropper{buffer: self},
})
}
}
}else{
Err(Error::new(::ErrorKind::MapError, "Can't map for write since storage was created without gl::MAP_WRITE_BIT"))
}
}
pub fn map_read_write(&mut self, flags: MapReadWriteFlags) -> Result<MapReadWrite<T,Self>>{
if !self.is_read_map(){
return Err(Error::new(::ErrorKind::MapError, "Can't map for read since storage was created without gl::MAP_READ_BIT"));
}
if !self.is_write_map(){
return Err(Error::new(::ErrorKind::MapError, "Can't map for write since storage was created without gl::MAP_WRITE_BIT"));
}
if self.is_persistant() && self.persistent_map_token.borrow().is_none(){
return Err(Error::new(::ErrorKind::MapError, "Buffer already persistently mapped"))
}
let start = self.start() * mem::size_of::<T>();
let len = self.len() * mem::size_of::<T>();
let data = unsafe{ self.backend.map_range(start as isize, len as isize, gl::MAP_WRITE_BIT | gl::MAP_READ_BIT | flags.bits()) };
if data.is_null(){
Err(Error::new(::ErrorKind::MapError, None))
}else{
unsafe{
let slice = slice::from_raw_parts_mut(data as *mut T, self.reserved);
Ok(MapReadWrite{
map: slice,
dropper: MapDropper{buffer: self},
})
}
}
}
pub unsafe fn map_read_slice(&mut self, flags: MapReadFlags) -> Result<&[T]>{
if self.is_read_map() {
if self.is_persistant() && self.persistent_map_token.borrow().is_none(){
return Err(Error::new(::ErrorKind::MapError, "Buffer already persistently mapped"))
}
let start = self.start() * mem::size_of::<T>();
let len = self.len() * mem::size_of::<T>();
let data = self.backend.map_range(start as isize, len as isize, gl::MAP_READ_BIT | flags.bits());
if data.is_null(){
Err(Error::new(::ErrorKind::MapError, None))
}else{
let slice = slice::from_raw_parts(data as *const T, self.reserved);
Ok(slice)
}
}else{
Err(Error::new(::ErrorKind::MapError, "Can't map for read since storage was created without gl::MAP_READ_BIT"))
}
}
pub unsafe fn map_write_slice(&mut self, flags: MapWriteFlags) -> Result<&mut [T]>{
if self.is_write_map(){
if self.is_persistant() && self.persistent_map_token.borrow().is_none(){
return Err(Error::new(::ErrorKind::MapError, "Buffer already persistently mapped"))
}
let start = self.start() * mem::size_of::<T>();
let len = self.len() * mem::size_of::<T>();
let data = self.backend.map_range(start as isize, len as isize, gl::MAP_WRITE_BIT | flags.bits());
if data.is_null(){
Err(Error::new(::ErrorKind::MapError, None))
}else{
let slice = slice::from_raw_parts_mut(data as *mut T, self.reserved);
Ok(slice)
}
}else{
Err(Error::new(::ErrorKind::MapError, "Can't map for write since storage was created without gl::MAP_WRITE_BIT"))
}
}
pub unsafe fn map_read_write_slice(&mut self, flags: MapReadWriteFlags) -> Result<&mut [T]>{
if !self.is_read_map(){
return Err(Error::new(::ErrorKind::MapError, "Can't map for read since storage was created without gl::MAP_READ_BIT"));
}
if !self.is_write_map(){
return Err(Error::new(::ErrorKind::MapError, "Can't map for write since storage was created without gl::MAP_WRITE_BIT"));
}
if self.is_persistant() && self.persistent_map_token.borrow().is_none(){
return Err(Error::new(::ErrorKind::MapError, "Buffer already persistently mapped"))
}
let start = self.start() * mem::size_of::<T>();
let len = self.len() * mem::size_of::<T>();
let data = self.backend.map_range(start as isize, len as isize, gl::MAP_WRITE_BIT | gl::MAP_READ_BIT | flags.bits());
if data.is_null(){
Err(Error::new(::ErrorKind::MapError, None))
}else{
let slice = slice::from_raw_parts_mut(data as *mut T, self.reserved);
Ok(slice)
}
}
pub unsafe fn unmap(&mut self){
if self.is_persistant() && self.persistent_map_token.borrow().is_none() {
*self.persistent_map_token.borrow_mut() = Some(())
}
self.backend.unmap();
}
pub fn is_persistant(&self) -> bool {
self.creation_flags & gl::MAP_PERSISTENT_BIT != 0
}
pub fn is_read_map(&self) -> bool {
self.creation_flags & gl::MAP_READ_BIT != 0
}
pub fn is_write_map(&self) -> bool {
self.creation_flags & gl::MAP_WRITE_BIT != 0
}
pub fn is_dynamic_storage(&self) -> bool {
self.creation_flags & gl::DYNAMIC_STORAGE_BIT != 0
}
pub fn is_coherent(&self) -> bool {
self.creation_flags & gl::MAP_COHERENT_BIT != 0
}
pub fn map_persistent_read(&self, flags: MapReadFlags) -> Result<MapPersistentRead<T, &Self>>{
unsafe{
self.unsafe_map_range_persistent_read(None, flags)
.map(|map| MapPersistentRead::new(
map,
self
))
}
}
pub fn into_map_persistent_read(self, flags: MapReadFlags) -> Result<MapPersistentRead<T, Self>>{
unsafe{
self.unsafe_map_range_persistent_read(None, flags)
.map(|map| MapPersistentRead::new(
map,
self
))
}
}
pub fn map_persistent_write(&self, flags: MapWriteFlags) -> Result<MapPersistentWrite<T, &Self>>{
unsafe{
self.unsafe_map_range_persistent_write(None, flags)
.map(|map| MapPersistentWrite::new(map, self))
}
}
pub fn into_map_persistent_write(self, flags: MapWriteFlags) -> Result<MapPersistentWrite<T, Self>>{
unsafe{
self.unsafe_map_range_persistent_write(None, flags)
.map(|map| MapPersistentWrite::new(map, self))
}
}
pub fn map_persistent_read_write(&self, flags: MapReadWriteFlags) -> Result<MapPersistentReadWrite<T, &Self>>{
unsafe{
self.unsafe_map_range_persistent_read_write(None, flags)
.map(|map| MapPersistentReadWrite::new(
map,
self
))
}
}
pub fn into_map_persistent_read_write(self, flags: MapReadWriteFlags) -> Result<MapPersistentReadWrite<T, Self>>{
unsafe{
self.unsafe_map_range_persistent_read_write(None, flags)
.map(|map| MapPersistentReadWrite::new(
map,
self
))
}
}
pub fn copy_to<U, B:BufferRange<U> + WithBackendMut>(&self, dst: &mut B){
let offset = dst.start() * (dst as &B).stride();
dst.with_backend_mut(|dst_backend|
self.backend.copy_to(dst_backend, 0, offset, self.capacity_bytes())
);
}
pub fn len(&self) -> usize{
self.len
}
pub fn is_empty(&self) -> bool{
self.len != 0
}
pub fn capacity(&self) -> usize{
self.reserved
}
pub fn bytes(&self) -> usize{
self.len * mem::size_of::<T>()
}
pub fn capacity_bytes(&self) -> usize{
self.reserved * mem::size_of::<T>()
}
pub fn id(&self) -> GLuint{
self.backend.id()
}
pub fn stride(&self) -> usize{
mem::size_of::<T>()
}
pub fn into_shared(self) -> SharedBufferStorage<T>{
SharedBufferStorage::from(self)
}
pub fn range<R: InputRange>(&self, range: R) -> Range<T, BufferStorage<T>, &BufferStorage<T>>{
Range{
buffer: self,
range: range.to_range(self),
marker_type: PhantomData,
marker_buffer: PhantomData,
}
}
pub fn range_mut<R: InputRange>(&mut self, range: R) -> Range<T, BufferStorage<T>, &mut BufferStorage<T>>{
Range{
range: range.to_range(self),
buffer: self,
marker_type: PhantomData,
marker_buffer: PhantomData,
}
}
pub fn into_range<R: InputRange>(self, range: R) -> Range<T, BufferStorage<T>, BufferStorage<T>>{
Range{
range: range.to_range(&self),
buffer: self,
marker_type: PhantomData,
marker_buffer: PhantomData,
}
}
}
impl BufferStorage<u8>{
pub fn cast<T>(self) -> BufferStorage<T> {
BufferStorage {
backend: self.backend,
len: self.len / mem::size_of::<T>(),
reserved: self.reserved / mem::size_of::<T>(),
marker: PhantomData,
creation_flags: self.creation_flags,
persistent_map_token: self.persistent_map_token,
}
}
}
impl<T> Cast<T> for BufferStorage<u8>{
type CastTo = BufferStorage<T>;
fn cast(self) -> BufferStorage<T>{
BufferStorage {
backend: self.backend,
len: self.len / mem::size_of::<T>(),
reserved: self.reserved / mem::size_of::<T>(),
marker: PhantomData,
creation_flags: self.creation_flags,
persistent_map_token: self.persistent_map_token,
}
}
}
#[cfg(all(not(feature = "gles"), not(feature="webgl")))]
impl<T: 'static> TypedBuffer<T> for BufferStorage<T>{
fn id(&self) -> GLuint{
(*self).id()
}
fn len(&self) -> usize{
(*self).len()
}
fn capacity(&self) -> usize{
(*self).capacity()
}
fn with_map_read<F: FnMut(&[T])>(&self, flags: MapReadFlags, mut f: F) -> Result<()>{
if self.is_read_map() {
let start = self.start() * mem::size_of::<T>();
let len = self.len() * mem::size_of::<T>();
let data = unsafe{ self.backend.map_range(start as isize, len as isize, gl::MAP_READ_BIT | flags.bits()) };
if data.is_null(){
Err(Error::new(::ErrorKind::MapError, None))
}else{
unsafe{
let slice = slice::from_raw_parts(data as *const T, self.reserved);
f(slice);
Ok(())
}
}
}else{
Err(Error::new(::ErrorKind::MapError, "Can't map for read since storage was created without gl::MAP_READ_BIT"))
}
}
fn copy_to<U,B:BufferRange<U> + WithBackendMut>(&self, dst: &mut B) where Self: Sized{
(*self).copy_to(dst)
}
#[cfg(not(feature="webgl"))]
unsafe fn unmap(&self){
self.backend.unmap()
}
}
#[cfg(all(not(feature = "gles"), not(feature="webgl")))]
impl<'a, T: 'static> TypedBufferMut<T> for BufferStorage<T> {
unsafe fn with_map_write<F: FnMut(&mut [T])>(&mut self, flags: MapWriteFlags, mut f: F) -> Result<()>{
(*self).map_write(flags).map(|mut m| f(m.data_mut()))
}
#[cfg(all(not(feature = "gles"), not(feature="webgl")))]
fn with_map_read_write<F: FnMut(&mut [T])>(&mut self, flags: MapReadWriteFlags, mut f: F) -> Result<()>{
(*self).map_read_write(flags).map(|mut m| f(m.data_mut()))
}
}
#[cfg(all(not(feature = "gles"), not(feature="webgl")))]
impl<T: 'static> BufferRange<T> for BufferStorage<T>{
fn start(&self) -> usize{
0
}
fn end(&self) -> usize{
self.len()
}
fn into_range<R: InputRange>(self, range: R) -> super::Range<T, Self, Self> where Self: Sized{
Range{
range: range.to_range(&self),
buffer: self,
marker_type: PhantomData,
marker_buffer: PhantomData,
}
}
}
#[cfg(all(not(feature = "gles"), not(feature="webgl")))]
impl<T:'static> BufferRangeMut<T> for BufferStorage<T>{
fn update(&mut self, data: &[T]){
self.update(data);
}
}
#[cfg(all(not(feature = "gles"), not(feature="webgl")))]
impl<T> WithBackend for BufferStorage<T>{
fn with_backend<F:FnMut(&dyn Backend)->R, R>(&self, mut f:F) -> R{
f(&*self.backend)
}
}
#[cfg(all(not(feature = "gles"), not(feature="webgl")))]
impl<T> WithBackendMut for BufferStorage<T>{
fn with_backend_mut<F:FnMut(&mut dyn Backend)->R, R>(&mut self, mut f:F) -> R{
f(&mut *self.backend)
}
}
#[cfg(all(not(feature = "gles"), not(feature="webgl")))]
impl<T: 'static> MapRange<T> for BufferStorage<T>{
fn map_range_read(&mut self, offset: usize, length: usize, flags: MapReadFlags) -> Result<MapRead<T,Self>>{
if offset + length > self.reserved{
return Err(Error::new(::ErrorKind::OutOfBounds,None));
}
let bytes_offset = offset * mem::size_of::<T>();
let length_offset = length * mem::size_of::<T>();
let flags = gl::MAP_READ_BIT | flags.bits();
let data = unsafe{ self.backend.map_range(bytes_offset as GLintptr, length_offset as GLsizeiptr, flags) };
if data.is_null(){
Err(Error::new(::ErrorKind::MapError,None))
}else{
unsafe{
let slice = slice::from_raw_parts(data as *const T, length);
Ok(MapRead{
map: slice,
buffer: self,
})
}
}
}
}
#[cfg(all(not(feature = "gles"), not(feature="webgl")))]
impl<T: 'static> MapRangeMut<T> for BufferStorage<T>{
fn map_range_write(&mut self, offset: usize, length: usize, flags: MapWriteFlags) -> Result<MapWrite<T,Self>>{
if offset + length > self.reserved{
return Err(Error::new(::ErrorKind::OutOfBounds,None));
}
let bytes_offset = offset * mem::size_of::<T>();
let length_offset = length * mem::size_of::<T>();
let flags = gl::MAP_WRITE_BIT | flags.bits();
let data = unsafe{ self.backend.map_range(
bytes_offset as GLintptr,
length_offset as GLsizeiptr,
flags) };
if data.is_null(){
Err(Error::new(::ErrorKind::MapError,None))
}else{
unsafe{
let slice = slice::from_raw_parts_mut(data as *mut T, length);
Ok(MapWrite{
map: slice,
dropper: MapDropper{buffer: self}
})
}
}
}
fn map_range_read_write(&mut self, offset: usize, length: usize, flags: MapReadWriteFlags) -> Result<MapReadWrite<T,Self>>{
if offset + length > self.reserved{
return Err(Error::new(::ErrorKind::OutOfBounds,None));
}
let bytes_offset = offset * mem::size_of::<T>();
let length_offset = length * mem::size_of::<T>();
let flags = gl::MAP_READ_BIT | gl::MAP_WRITE_BIT | flags.bits();
let data = unsafe{ self.backend.map_range(
bytes_offset as GLintptr,
length_offset as GLsizeiptr,
flags) };
if data.is_null(){
Err(Error::new(::ErrorKind::MapError,None))
}else{
unsafe{
let slice = slice::from_raw_parts_mut(data as *mut T, length);
Ok(MapReadWrite{
map: slice,
dropper: MapDropper{buffer: self}
})
}
}
}
}
#[cfg(all(not(feature = "gles"), not(feature="webgl")))]
impl<T: 'static> WithMapRange<T> for BufferStorage<T>{
fn with_map_range_read<F: FnMut(&[T])>(&self, offset: usize, length: usize, flags: MapReadFlags, mut f: F) -> Result<()>{
if offset + length > self.reserved{
return Err(Error::new(::ErrorKind::OutOfBounds,None));
}
let bytes_offset = offset * mem::size_of::<T>();
let length_offset = length * mem::size_of::<T>();
let flags = gl::MAP_READ_BIT | flags.bits();
let data = unsafe{ self.backend.map_range(bytes_offset as GLintptr, length_offset as GLsizeiptr, flags) };
if data.is_null(){
Err(Error::new(::ErrorKind::MapError,None))
}else{
unsafe{
let slice = slice::from_raw_parts(data as *const T, length);
f(slice);
Ok(())
}
}
}
}
#[cfg(all(not(feature = "gles"), not(feature="webgl")))]
impl<T: 'static> WithMapRangeMut<T> for BufferStorage<T>{
unsafe fn with_map_range_write<F: FnMut(&mut [T])>(&mut self, offset: usize, length: usize, flags: MapWriteFlags, mut f: F) -> Result<()>{
self.map_range_write(offset, length, flags)
.map(|mut m| f(m.data_mut()))
}
fn with_map_range_read_write<F: FnMut(&mut [T])>(&mut self, offset: usize, length: usize, flags: MapReadWriteFlags, mut f: F) -> Result<()>{
self.map_range_read_write(offset, length, flags)
.map(|mut m| f(m.data_mut()))
}
}
#[cfg(all(not(feature = "gles"), not(feature="webgl")))]
impl<T: 'static> MapPersistentRange<T> for BufferStorage<T>{
unsafe fn unsafe_map_range_persistent_read(&self, range: Option<StdRange<usize>>, flags: MapReadFlags) -> Result<&'static [T]>{
if !self.is_read_map(){
return Err(Error::new(::ErrorKind::MapError, "Can't map for read since storage was created without gl::MAP_READ_BIT"));
}
if !self.is_persistant() {
return Err(Error::new(::ErrorKind::MapError, "Can't map persistently since storage was created without gl::MAP_PERSISTENT_BIT"));
}
if self.persistent_map_token.borrow_mut().take().is_none(){
return Err(Error::new(::ErrorKind::MapError, "Buffer already mapped"))
}
let start = range.clone().map(|range| range.start).unwrap_or(0);
let len = range.map(|range| range.len()).unwrap_or(self.len());
if start + len > self.reserved{
*self.persistent_map_token.borrow_mut() = Some(());
return Err(Error::new(::ErrorKind::OutOfBounds,None));
}
let start_bytes = start * mem::size_of::<T>();
let len_bytes = len * mem::size_of::<T>();
let flags = gl::MAP_READ_BIT | gl::MAP_PERSISTENT_BIT | flags.bits();
let data = self.backend.map_range(start_bytes as isize, len_bytes as isize, flags);
if data.is_null(){
*self.persistent_map_token.borrow_mut() = Some(());
Err(Error::new(::ErrorKind::MapError, None))
}else{
Ok(slice::from_raw_parts(data as *const T, len))
}
}
fn map_range_persistent_read(&self, offset: usize, length: usize, flags: MapReadFlags) -> Result<MapPersistentRead<T,&Self>>{
unsafe{
self.unsafe_map_range_persistent_read(Some(offset .. offset + length), flags)
.map(|map| MapPersistentRead::new(
map,
self
))
}
}
fn into_map_range_persistent_read(self, offset: usize, length: usize, flags: MapReadFlags) -> Result<MapPersistentRead<T,Self>>
where Self: Sized
{
unsafe{
self.unsafe_map_range_persistent_read(Some(offset .. offset + length), flags)
.map(|map| MapPersistentRead::new(
map,
self
))
}
}
}
#[cfg(all(not(feature = "gles"), not(feature="webgl")))]
impl<T: 'static> MapPersistentRangeMut<T> for BufferStorage<T>{
unsafe fn unsafe_map_range_persistent_write(&self, range: Option<StdRange<usize>>, flags: MapWriteFlags) -> Result<&'static mut [T]>{
if !self.is_write_map(){
return Err(Error::new(::ErrorKind::MapError, "Can't map for write since storage was created without gl::MAP_WRITE_BIT"));
}
if !self.is_persistant() {
return Err(Error::new(::ErrorKind::MapError, "Can't map persistently since storage was created without gl::MAP_PERSISTENT_BIT"));
}
if self.persistent_map_token.borrow_mut().take().is_none(){
return Err(Error::new(::ErrorKind::MapError, "Buffer already mapped"))
}
let start = range.clone().map(|range| range.start).unwrap_or(0);
let len = range.map(|range| range.len()).unwrap_or(self.capacity());
if start + len > self.reserved{
*self.persistent_map_token.borrow_mut() = Some(());
return Err(Error::new(::ErrorKind::OutOfBounds,None));
}
let start_bytes = start * mem::size_of::<T>();
let len_bytes = len * mem::size_of::<T>();
let flags = gl::MAP_WRITE_BIT | gl::MAP_PERSISTENT_BIT | flags.bits();
let data = self.backend.map_range(start_bytes as isize, len_bytes as isize, flags);
if data.is_null(){
*self.persistent_map_token.borrow_mut() = Some(());
Err(Error::new(::ErrorKind::MapError, None))
}else{
Ok(slice::from_raw_parts_mut(data as *mut T, len))
}
}
unsafe fn unsafe_map_range_persistent_read_write(&self, range: Option<StdRange<usize>>, flags: MapReadWriteFlags) -> Result<&'static mut [T]>{
if !self.is_read_map(){
return Err(Error::new(::ErrorKind::MapError, "Can't map for read since storage was created without gl::MAP_READ_BIT"));
}
if !self.is_write_map(){
return Err(Error::new(::ErrorKind::MapError, "Can't map for write since storage was created without gl::MAP_WRITE_BIT"));
}
if !self.is_persistant() {
return Err(Error::new(::ErrorKind::MapError, "Can't map persistently since storage was created without gl::MAP_PERSISTENT_BIT"));
}
if self.persistent_map_token.borrow_mut().take().is_none(){
return Err(Error::new(::ErrorKind::MapError, "Buffer already mapped"))
}
let start = range.clone().map(|range| range.start).unwrap_or(0);
let len = range.map(|range| range.len()).unwrap_or(self.capacity());
if start + len > self.reserved{
*self.persistent_map_token.borrow_mut() = Some(());
return Err(Error::new(::ErrorKind::OutOfBounds,None));
}
let start_bytes = start * mem::size_of::<T>();
let len_bytes = len * mem::size_of::<T>();
let flags = gl::MAP_WRITE_BIT | gl::MAP_PERSISTENT_BIT | flags.bits();
let data = self.backend.map_range(start_bytes as isize, len_bytes as isize, flags);
if data.is_null(){
*self.persistent_map_token.borrow_mut() = Some(());
Err(Error::new(::ErrorKind::MapError, None))
}else{
Ok(slice::from_raw_parts_mut(data as *mut T, len))
}
}
fn map_range_persistent_write(&self, offset: usize, length: usize, flags: MapWriteFlags) -> Result<MapPersistentWrite<T,&Self>>{
unsafe{
self.unsafe_map_range_persistent_write(Some(offset .. offset + length), flags)
.map(move |map| MapPersistentWrite::new(map, self as &BufferStorage<T>))
}
}
fn map_range_persistent_read_write(&self, offset: usize, length: usize, flags: MapReadWriteFlags) -> Result<MapPersistentReadWrite<T,&Self>>{
unsafe{
self.unsafe_map_range_persistent_read_write(Some(offset .. offset + length), flags)
.map(move |map| MapPersistentReadWrite::new(
map,
self as &BufferStorage<T>
))
}
}
fn into_map_range_persistent_write(self, offset: usize, length: usize, flags: MapWriteFlags) -> Result<MapPersistentWrite<T,Self>>{
unsafe{
self.unsafe_map_range_persistent_write(Some(offset .. offset + length), flags)
.map(|map| MapPersistentWrite::new(map, self))
}
}
fn into_map_range_persistent_read_write(self, offset: usize, length: usize, flags: MapReadWriteFlags) -> Result<MapPersistentReadWrite<T,Self>>{
unsafe{
self.unsafe_map_range_persistent_read_write(Some(offset .. offset + length), flags)
.map(|map| MapPersistentReadWrite::new(
map,
self
))
}
}
}
#[cfg(all(not(feature = "gles"), not(feature="webgl")))]
impl<T: 'static> MapPersistent<T> for BufferStorage<T>{
fn map_persistent_read(&self, flags: MapReadFlags) -> Result<MapPersistentRead<T,&Self>>{
(*self).map_persistent_read(flags)
}
fn into_map_persistent_read(self, flags: MapReadFlags) -> Result<MapPersistentRead<T,Self>>
where Self: Sized
{
self.into_map_persistent_read(flags)
}
}
#[cfg(all(not(feature = "gles"), not(feature="webgl")))]
impl<T: 'static> MapPersistentMut<T> for BufferStorage<T>{
fn map_persistent_write(&self, flags: MapWriteFlags) -> Result<MapPersistentWrite<T,&Self>>{
(*self).map_persistent_write(flags)
}
fn map_persistent_read_write(&self, flags: MapReadWriteFlags) -> Result<MapPersistentReadWrite<T,&Self>>{
(*self).map_persistent_read_write(flags)
}
fn into_map_persistent_write(self, flags: MapWriteFlags) -> Result<MapPersistentWrite<T,Self>>{
self.into_map_persistent_write(flags)
}
fn into_map_persistent_read_write(self, flags: MapReadWriteFlags) -> Result<MapPersistentReadWrite<T,Self>>{
self.into_map_persistent_read_write(flags)
}
}
impl<'a, T: 'static> TypedBuffer<T> for &BufferStorage<T>{
fn id(&self) -> GLuint{
(*self).id()
}
fn len(&self) -> usize{
(*self).len()
}
fn capacity(&self) -> usize{
(*self).capacity()
}
#[cfg(not(feature="webgl"))]
fn with_map_read<F: FnMut(&[T])>(&self, flags: MapReadFlags, f: F) -> Result<()>{
(*self).with_map_read(flags, f)
}
fn copy_to<U, BB:BufferRange<U> + WithBackendMut>(&self, dst: &mut BB) where Self: Sized{
(*self).copy_to(dst)
}
#[cfg(not(feature="webgl"))]
unsafe fn unmap(&self){
(*self).unmap()
}
}
impl<'a, T: 'static> BufferRange<T> for &BufferStorage<T>{
fn start(&self) -> usize{
0
}
fn end(&self) -> usize{
(*self).len()
}
fn into_range<R: InputRange>(self, range: R) -> super::Range<T, Self, Self> where Self: Sized{
Range{
range: range.to_range(&self),
buffer: self,
marker_type: PhantomData,
marker_buffer: PhantomData,
}
}
}