feoxdb/utils/
allocator.rs1#[cfg(unix)]
2use nix::sys::mman::{mmap_anonymous, munmap, MapFlags, ProtFlags};
3use std::alloc::{alloc, dealloc, Layout};
4#[cfg(unix)]
5use std::os::raw::c_void;
6use std::ptr::NonNull;
7use std::sync::atomic::{AtomicUsize, Ordering};
8
9use crate::constants::*;
10use crate::error::{FeoxError, Result};
11
12static TOTAL_ALLOCATED: AtomicUsize = AtomicUsize::new(0);
13
14pub struct FeoxAllocator;
15
16impl FeoxAllocator {
17 pub fn allocate(size: usize) -> Result<NonNull<u8>> {
18 let ptr = if size <= KMALLOC_LIMIT {
19 Self::allocate_small(size)?
20 } else {
21 Self::allocate_large(size)?
22 };
23
24 TOTAL_ALLOCATED.fetch_add(size, Ordering::AcqRel);
25 Ok(ptr)
26 }
27
28 pub fn allocate_aligned(size: usize, alignment: usize) -> Result<NonNull<u8>> {
30 #[cfg(unix)]
31 unsafe {
32 let mut ptr: *mut libc::c_void = std::ptr::null_mut();
33 let result = libc::posix_memalign(&mut ptr, alignment, size);
34 if result != 0 {
35 return Err(FeoxError::AllocationFailed);
36 }
37 TOTAL_ALLOCATED.fetch_add(size, Ordering::AcqRel);
38 NonNull::new(ptr as *mut u8).ok_or(FeoxError::AllocationFailed)
39 }
40
41 #[cfg(not(unix))]
42 {
43 let layout = Layout::from_size_align(size, alignment)
45 .map_err(|_| FeoxError::AllocationFailed)?;
46 unsafe {
47 let ptr = alloc(layout);
48 TOTAL_ALLOCATED.fetch_add(size, Ordering::AcqRel);
49 NonNull::new(ptr).ok_or(FeoxError::AllocationFailed)
50 }
51 }
52 }
53
54 pub fn deallocate(ptr: NonNull<u8>, size: usize) {
55 if size <= KMALLOC_LIMIT {
56 Self::deallocate_small(ptr, size);
57 } else {
58 Self::deallocate_large(ptr, size);
59 }
60
61 TOTAL_ALLOCATED.fetch_sub(size, Ordering::AcqRel);
62 }
63
64 pub fn deallocate_aligned(ptr: NonNull<u8>, size: usize, alignment: usize) {
66 #[cfg(unix)]
67 {
68 let _ = alignment; unsafe {
70 libc::free(ptr.as_ptr() as *mut libc::c_void);
71 }
72 }
73
74 #[cfg(not(unix))]
75 {
76 let layout = Layout::from_size_align(size, alignment).unwrap();
78 unsafe {
79 dealloc(ptr.as_ptr(), layout);
80 }
81 }
82
83 TOTAL_ALLOCATED.fetch_sub(size, Ordering::AcqRel);
84 }
85
86 fn allocate_small(size: usize) -> Result<NonNull<u8>> {
87 let layout = Layout::from_size_align(size, 8).map_err(|_| FeoxError::AllocationFailed)?;
88
89 unsafe {
90 let ptr = alloc(layout);
91 NonNull::new(ptr).ok_or(FeoxError::AllocationFailed)
92 }
93 }
94
95 fn allocate_large(size: usize) -> Result<NonNull<u8>> {
96 let aligned_size = (size + PAGE_MASK) & !PAGE_MASK;
97
98 #[cfg(unix)]
99 unsafe {
100 let non_zero_size = std::num::NonZeroUsize::new_unchecked(aligned_size);
102 let ptr = mmap_anonymous(
103 None,
104 non_zero_size,
105 ProtFlags::PROT_READ | ProtFlags::PROT_WRITE,
106 MapFlags::MAP_PRIVATE,
107 )
108 .map_err(|_| FeoxError::AllocationFailed)?;
109
110 Ok(ptr.cast())
111 }
112
113 #[cfg(not(unix))]
114 {
115 let layout = Layout::from_size_align(aligned_size, PAGE_SIZE)
117 .map_err(|_| FeoxError::AllocationFailed)?;
118
119 unsafe {
120 let ptr = alloc(layout);
121 NonNull::new(ptr).ok_or(FeoxError::AllocationFailed)
122 }
123 }
124 }
125
126 fn deallocate_small(ptr: NonNull<u8>, size: usize) {
127 let layout = Layout::from_size_align(size, 8).unwrap();
128 unsafe {
129 dealloc(ptr.as_ptr(), layout);
130 }
131 }
132
133 fn deallocate_large(ptr: NonNull<u8>, size: usize) {
134 let aligned_size = (size + PAGE_MASK) & !PAGE_MASK;
135
136 #[cfg(unix)]
137 unsafe {
138 let ptr_void = ptr.cast::<c_void>();
139 let _ = munmap(ptr_void, aligned_size);
140 }
141
142 #[cfg(not(unix))]
143 {
144 let layout = Layout::from_size_align(aligned_size, PAGE_SIZE).unwrap();
145 unsafe {
146 dealloc(ptr.as_ptr(), layout);
147 }
148 }
149 }
150
151 pub fn get_allocated() -> usize {
152 TOTAL_ALLOCATED.load(Ordering::Acquire)
153 }
154}
155
156pub struct AlignedBuffer {
157 ptr: NonNull<u8>,
158 size: usize,
159 capacity: usize,
160 is_aligned: bool,
161 alignment: usize,
162}
163
164impl AlignedBuffer {
165 pub fn new(capacity: usize) -> Result<Self> {
166 let alignment = FEOX_BLOCK_SIZE;
169 let aligned_capacity = capacity.div_ceil(alignment) * alignment;
170
171 let ptr = FeoxAllocator::allocate_aligned(aligned_capacity, alignment)?;
173
174 Ok(Self {
175 ptr,
176 size: 0,
177 capacity: aligned_capacity,
178 is_aligned: true,
179 alignment,
180 })
181 }
182
183 pub fn as_ptr(&self) -> *const u8 {
184 self.ptr.as_ptr()
185 }
186
187 pub fn as_mut_ptr(&mut self) -> *mut u8 {
188 self.ptr.as_ptr()
189 }
190
191 pub fn as_slice(&self) -> &[u8] {
192 unsafe { std::slice::from_raw_parts(self.ptr.as_ptr(), self.size) }
193 }
194
195 pub fn as_mut_slice(&mut self) -> &mut [u8] {
196 unsafe { std::slice::from_raw_parts_mut(self.ptr.as_ptr(), self.size) }
197 }
198
199 pub fn len(&self) -> usize {
200 self.size
201 }
202
203 pub fn is_empty(&self) -> bool {
204 self.size == 0
205 }
206
207 pub fn capacity(&self) -> usize {
208 self.capacity
209 }
210
211 pub fn set_len(&mut self, new_len: usize) {
212 assert!(new_len <= self.capacity);
213 self.size = new_len;
214 }
215
216 pub fn clear(&mut self) {
217 self.size = 0;
218 }
219}
220
221impl Drop for AlignedBuffer {
222 fn drop(&mut self) {
223 if self.is_aligned {
224 FeoxAllocator::deallocate_aligned(self.ptr, self.capacity, self.alignment);
225 } else {
226 FeoxAllocator::deallocate(self.ptr, self.capacity);
227 }
228 }
229}