1 /* This Source Code Form is subject to the terms of the Mozilla Public 2 * License, v. 2.0. If a copy of the MPL was not distributed with this 3 * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ 4 5 /* 6 * Native allocator. 7 * 8 * Copyright: Eugene Wissner 2016-2020. 9 * License: $(LINK2 https://www.mozilla.org/en-US/MPL/2.0/, 10 * Mozilla Public License, v. 2.0). 11 * Authors: $(LINK2 mailto:info@caraus.de, Eugene Wissner) 12 * Source: $(LINK2 https://github.com/caraus-ecms/tanya/blob/master/middle/tanya/memory/mmappool.d, 13 * tanya/memory/mmappool.d) 14 */ 15 module tanya.memory.mmappool; 16 17 import core.sys.linux.sys.mman; 18 import tanya.memory.allocator; 19 import tanya.memory.op; 20 import tanya.os.error; 21 22 extern(C) pragma(mangle, "mmap") 23 private void* mapMemory(void *addr, size_t length, int prot, int flags, int fd, off_t offset) 24 @nogc nothrow pure @system; 25 26 extern(C) pragma(mangle, "munmap") 27 private bool unmapMemory(shared void* addr, size_t length) 28 @nogc nothrow pure @system; 29 30 /* 31 * This allocator allocates memory in regions (multiple of 64 KB for example). 32 * Each region is then splitted in blocks. So it doesn't request the memory 33 * from the operating system on each call, but only if there are no large 34 * enough free blocks in the available regions. 35 * Deallocation works in the same way. Deallocation doesn't immediately 36 * gives the memory back to the operating system, but marks the appropriate 37 * block as free and only if all blocks in the region are free, the complete 38 * region is deallocated. 39 * 40 * <pre> 41 * ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ 42 * | | | | | || | | | 43 * | |prev <----------- | || | | | 44 * | R | B | | B | || R | B | | 45 * | E | L | | L | next E | L | | 46 * | G | O | DATA | O | FREE ---> G | O | DATA | 47 * | I | C | | C | <--- I | C | | 48 * | O | K | | K | prev O | K | | 49 * | N | -----------> next| || N | | | 50 * | | | | | || | | | 51 * ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ 52 * </pre> 53 */ 54 final class MmapPool : Allocator 55 { 56 version (none) 57 { 58 @nogc nothrow pure @system invariant 59 { 60 for (auto r = &head; *r !is null; r = &((*r).next)) 61 { 62 auto block = cast(Block) (cast(void*) *r + RegionEntry.sizeof); 63 do 64 { 65 assert(block.prev is null || block.prev.next is block); 66 assert(block.next is null || block.next.prev is block); 67 assert(block.region is *r); 68 } 69 while ((block = block.next) !is null); 70 } 71 } 72 } 73 74 /* 75 * Allocates $(D_PARAM size) bytes of memory. 76 * 77 * Params: 78 * size = Amount of memory to allocate. 79 * 80 * Returns: Pointer to the new allocated memory. 81 */ 82 void[] allocate(size_t size) @nogc nothrow pure shared @system 83 { 84 if (size == 0) 85 { 86 return null; 87 } 88 const dataSize = addAlignment(size); 89 if (dataSize < size) 90 { 91 return null; 92 } 93 94 void* data = findBlock(dataSize); 95 if (data is null) 96 { 97 data = initializeRegion(dataSize); 98 } 99 100 return data is null ? null : data[0 .. size]; 101 } 102 103 /* 104 * Search for a block large enough to keep $(D_PARAM size) and split it 105 * into two blocks if the block is too large. 106 * 107 * Params: 108 * size = Minimum size the block should have (aligned). 109 * 110 * Returns: Data the block points to or $(D_KEYWORD null). 111 */ 112 private void* findBlock(const ref size_t size) 113 @nogc nothrow pure shared @system 114 { 115 Block block1; 116 RegionLoop: for (auto r = head; r !is null; r = r.next) 117 { 118 block1 = cast(Block) (cast(void*) r + RegionEntry.sizeof); 119 do 120 { 121 if (block1.free && block1.size >= size) 122 { 123 break RegionLoop; 124 } 125 } 126 while ((block1 = block1.next) !is null); 127 } 128 if (block1 is null) 129 { 130 return null; 131 } 132 else if (block1.size >= size + alignment_ + BlockEntry.sizeof) 133 { // Split the block if needed 134 Block block2 = cast(Block) (cast(void*) block1 + BlockEntry.sizeof + size); 135 block2.prev = block1; 136 block2.next = block1.next; 137 block2.free = true; 138 block2.size = block1.size - BlockEntry.sizeof - size; 139 block2.region = block1.region; 140 141 if (block1.next !is null) 142 { 143 block1.next.prev = block2; 144 } 145 block1.next = block2; 146 block1.size = size; 147 } 148 block1.free = false; 149 block1.region.blocks = block1.region.blocks + 1; 150 151 return cast(void*) block1 + BlockEntry.sizeof; 152 } 153 154 // Merge block with the next one. 155 private void mergeNext(Block block) const @nogc nothrow pure @safe shared 156 { 157 block.size = block.size + BlockEntry.sizeof + block.next.size; 158 if (block.next.next !is null) 159 { 160 block.next.next.prev = block; 161 } 162 block.next = block.next.next; 163 } 164 165 /* 166 * Deallocates a memory block. 167 * 168 * Params: 169 * p = A pointer to the memory block to be freed. 170 * 171 * Returns: Whether the deallocation was successful. 172 */ 173 bool deallocate(void[] p) @nogc nothrow pure shared @system 174 { 175 if (p.ptr is null) 176 { 177 return true; 178 } 179 180 Block block = cast(Block) (p.ptr - BlockEntry.sizeof); 181 if (block.region.blocks <= 1) 182 { 183 if (block.region.prev !is null) 184 { 185 block.region.prev.next = block.region.next; 186 } 187 else // Replace the list head. It is being deallocated 188 { 189 head = block.region.next; 190 } 191 if (block.region.next !is null) 192 { 193 block.region.next.prev = block.region.prev; 194 } 195 return unmapMemory(block.region, block.region.size) == 0; 196 } 197 // Merge blocks if neigbours are free. 198 if (block.next !is null && block.next.free) 199 { 200 mergeNext(block); 201 } 202 if (block.prev !is null && block.prev.free) 203 { 204 block.prev.size = block.prev.size + BlockEntry.sizeof + block.size; 205 if (block.next !is null) 206 { 207 block.next.prev = block.prev; 208 } 209 block.prev.next = block.next; 210 } 211 else 212 { 213 block.free = true; 214 } 215 block.region.blocks = block.region.blocks - 1; 216 return true; 217 } 218 219 /* 220 * Reallocates a memory block in place if possible or returns 221 * $(D_KEYWORD false). This function cannot be used to allocate or 222 * deallocate memory, so if $(D_PARAM p) is $(D_KEYWORD null) or 223 * $(D_PARAM size) is `0`, it should return $(D_KEYWORD false). 224 * 225 * Params: 226 * p = A pointer to the memory block. 227 * size = Size of the reallocated block. 228 * 229 * Returns: $(D_KEYWORD true) if successful, $(D_KEYWORD false) otherwise. 230 */ 231 bool reallocateInPlace(ref void[] p, size_t size) 232 @nogc nothrow pure shared @system 233 { 234 if (p is null || size == 0) 235 { 236 return false; 237 } 238 if (size <= p.length) 239 { 240 // Leave the block as is. 241 p = p.ptr[0 .. size]; 242 return true; 243 } 244 Block block1 = cast(Block) (p.ptr - BlockEntry.sizeof); 245 246 if (block1.size >= size) 247 { 248 // Enough space in the current block. 249 p = p.ptr[0 .. size]; 250 return true; 251 } 252 const dataSize = addAlignment(size); 253 const pAlignment = addAlignment(p.length); 254 assert(pAlignment >= p.length, "Invalid memory chunk length"); 255 const delta = dataSize - pAlignment; 256 257 if (block1.next is null 258 || !block1.next.free 259 || dataSize < size 260 || block1.next.size + BlockEntry.sizeof < delta) 261 { 262 /* - It is the last block in the region 263 * - The next block isn't free 264 * - The next block is too small 265 * - Requested size is too large 266 */ 267 return false; 268 } 269 if (block1.next.size >= delta + alignment_) 270 { 271 // Move size from block2 to block1. 272 block1.next.size = block1.next.size - delta; 273 block1.size = block1.size + delta; 274 275 auto block2 = cast(Block) (p.ptr + dataSize); 276 if (block1.next.next !is null) 277 { 278 block1.next.next.prev = block2; 279 } 280 copyBackward((cast(void*) block1.next)[0 .. BlockEntry.sizeof], 281 (cast(void*) block2)[0 .. BlockEntry.sizeof]); 282 block1.next = block2; 283 } 284 else 285 { 286 // The next block has enough space, but is too small for further 287 // allocations. Merge it with the current block. 288 mergeNext(block1); 289 } 290 291 p = p.ptr[0 .. size]; 292 return true; 293 } 294 295 /* 296 * Increases or decreases the size of a memory block. 297 * 298 * Params: 299 * p = A pointer to the memory block. 300 * size = Size of the reallocated block. 301 * 302 * Returns: Whether the reallocation was successful. 303 */ 304 bool reallocate(ref void[] p, size_t size) 305 @nogc nothrow pure shared @system 306 { 307 if (size == 0) 308 { 309 if (deallocate(p)) 310 { 311 p = null; 312 return true; 313 } 314 return false; 315 } 316 else if (reallocateInPlace(p, size)) 317 { 318 return true; 319 } 320 // Can't reallocate in place, allocate a new block, 321 // copy and delete the previous one. 322 void[] reallocP = allocate(size); 323 if (reallocP is null) 324 { 325 return false; 326 } 327 if (p !is null) 328 { 329 copy(p[0 .. p.length < size ? p.length : size], reallocP); 330 deallocate(p); 331 } 332 p = reallocP; 333 334 return true; 335 } 336 337 static private shared(MmapPool) instantiate() @nogc nothrow @system 338 { 339 if (instance_ is null) 340 { 341 const instanceSize = addAlignment(__traits(classInstanceSize, 342 MmapPool)); 343 344 Region head; // Will become soon our region list head 345 void* data = initializeRegion(instanceSize, head); 346 if (data !is null) 347 { 348 copy(typeid(MmapPool).initializer, data[0 .. instanceSize]); 349 instance_ = cast(shared MmapPool) data; 350 instance_.head = head; 351 } 352 } 353 return instance_; 354 } 355 356 /* 357 * Static allocator instance and initializer. 358 * 359 * Returns: Global $(D_PSYMBOL MmapPool) instance. 360 */ 361 static @property shared(MmapPool) instance() @nogc nothrow pure @system 362 { 363 return (cast(GetPureInstance!MmapPool) &instantiate)(); 364 } 365 366 /* 367 * Initializes a region for one element. 368 * 369 * Params: 370 * size = Aligned size of the first data block in the region. 371 * head = Region list head. 372 * 373 * Returns: A pointer to the data. 374 */ 375 private static void* initializeRegion(const size_t size, ref Region head) 376 @nogc nothrow pure @system 377 { 378 const regionSize = calculateRegionSize(size); 379 if (regionSize < size) 380 { 381 return null; 382 } 383 void* p = mapMemory(null, 384 regionSize, 385 PROT_READ | PROT_WRITE, 386 MAP_PRIVATE | MAP_ANONYMOUS, 387 -1, 388 0); 389 if (cast(ptrdiff_t) p == -1) 390 { 391 return null; 392 } 393 394 Region region = cast(Region) p; 395 region.blocks = 1; 396 region.size = regionSize; 397 398 // Set the pointer to the head of the region list 399 if (head !is null) 400 { 401 head.prev = region; 402 } 403 region.next = head; 404 region.prev = null; 405 head = region; 406 407 // Initialize the data block 408 void* memoryPointer = p + RegionEntry.sizeof; 409 Block block1 = cast(Block) memoryPointer; 410 block1.size = size; 411 block1.free = false; 412 413 // It is what we want to return 414 void* data = memoryPointer + BlockEntry.sizeof; 415 416 // Free block after data 417 memoryPointer = data + size; 418 Block block2 = cast(Block) memoryPointer; 419 block1.prev = block2.next = null; 420 block1.next = block2; 421 block2.prev = block1; 422 block2.size = regionSize - size - RegionEntry.sizeof - BlockEntry.sizeof * 2; 423 block2.free = true; 424 block1.region = block2.region = region; 425 426 return data; 427 } 428 429 private void* initializeRegion(const size_t size) 430 @nogc nothrow pure shared @system 431 { 432 return initializeRegion(size, this.head); 433 } 434 435 /* 436 * Params: 437 * x = Space to be aligned. 438 * 439 * Returns: Aligned size of $(D_PARAM x). 440 */ 441 private static size_t addAlignment(const size_t x) @nogc nothrow pure @safe 442 { 443 return (x - 1) / alignment_ * alignment_ + alignment_; 444 } 445 446 /* 447 * Params: 448 * x = Required space. 449 * 450 * Returns: Minimum region size (a multiple of $(D_PSYMBOL pageSize)). 451 */ 452 private static size_t calculateRegionSize(ref const size_t x) 453 @nogc nothrow pure @safe 454 { 455 return (x + RegionEntry.sizeof + BlockEntry.sizeof * 2) 456 / pageSize * pageSize + pageSize; 457 } 458 459 /* 460 * Returns: Alignment offered. 461 */ 462 @property uint alignment() const @nogc nothrow pure @safe shared 463 { 464 return alignment_; 465 } 466 467 private enum uint alignment_ = 8; 468 469 private shared static MmapPool instance_; 470 471 // Page size. 472 enum size_t pageSize = 65536; 473 474 private shared struct RegionEntry 475 { 476 Region prev; 477 Region next; 478 uint blocks; 479 size_t size; 480 } 481 private alias Region = shared RegionEntry*; 482 private shared Region head; 483 484 private shared struct BlockEntry 485 { 486 Block prev; 487 Block next; 488 Region region; 489 size_t size; 490 bool free; 491 } 492 private alias Block = shared BlockEntry*; 493 } 494 495 @nogc nothrow pure @system unittest 496 { 497 // allocate() check. 498 size_t tooMuchMemory = size_t.max 499 - MmapPool.alignment_ 500 - MmapPool.BlockEntry.sizeof * 2 501 - MmapPool.RegionEntry.sizeof 502 - MmapPool.pageSize; 503 assert(MmapPool.instance.allocate(tooMuchMemory) is null); 504 505 assert(MmapPool.instance.allocate(size_t.max) is null); 506 507 // initializeRegion() check. 508 tooMuchMemory = size_t.max - MmapPool.alignment_; 509 assert(MmapPool.instance.allocate(tooMuchMemory) is null); 510 }