1 /* This Source Code Form is subject to the terms of the Mozilla Public 2 * License, v. 2.0. If a copy of the MPL was not distributed with this 3 * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ 4 5 /* 6 * Native allocator. 7 * 8 * Copyright: Eugene Wissner 2016-2020. 9 * License: $(LINK2 https://www.mozilla.org/en-US/MPL/2.0/, 10 * Mozilla Public License, v. 2.0). 11 * Authors: $(LINK2 mailto:info@caraus.de, Eugene Wissner) 12 * Source: $(LINK2 https://github.com/caraus-ecms/tanya/blob/master/middle/tanya/memory/mmappool.d, 13 * tanya/memory/mmappool.d) 14 */ 15 module tanya.memory.mmappool; 16 17 import core.sys.linux.sys.mman; 18 import tanya.memory.allocator; 19 import tanya.memory.op; 20 import tanya.os.error; 21 22 version (Windows) 23 { 24 import core.sys.windows.basetsd : SIZE_T; 25 import core.sys.windows.windef : BOOL, DWORD; 26 import core.sys.windows.winnt : MEM_COMMIT, MEM_RELEASE, PAGE_READWRITE, PVOID; 27 28 extern (Windows) 29 private PVOID VirtualAlloc(PVOID, SIZE_T, DWORD, DWORD) 30 @nogc nothrow pure @system; 31 32 extern (Windows) 33 private BOOL VirtualFree(shared PVOID, SIZE_T, DWORD) 34 @nogc nothrow pure @system; 35 } 36 else 37 { 38 extern(C) pragma(mangle, "mmap") 39 private void* mapMemory(void *addr, size_t length, int prot, int flags, int fd, off_t offset) 40 @nogc nothrow pure @system; 41 42 extern(C) pragma(mangle, "munmap") 43 private bool unmapMemory(shared void* addr, size_t length) 44 @nogc nothrow pure @system; 45 } 46 47 /* 48 * This allocator allocates memory in regions (multiple of 64 KB for example). 49 * Each region is then splitted in blocks. So it doesn't request the memory 50 * from the operating system on each call, but only if there are no large 51 * enough free blocks in the available regions. 52 * Deallocation works in the same way. Deallocation doesn't immediately 53 * gives the memory back to the operating system, but marks the appropriate 54 * block as free and only if all blocks in the region are free, the complete 55 * region is deallocated. 56 * 57 * <pre> 58 * ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ 59 * | | | | | || | | | 60 * | |prev <----------- | || | | | 61 * | R | B | | B | || R | B | | 62 * | E | L | | L | next E | L | | 63 * | G | O | DATA | O | FREE ---> G | O | DATA | 64 * | I | C | | C | <--- I | C | | 65 * | O | K | | K | prev O | K | | 66 * | N | -----------> next| || N | | | 67 * | | | | | || | | | 68 * ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ 69 * </pre> 70 */ 71 final class MmapPool : Allocator 72 { 73 version (none) 74 { 75 @nogc nothrow pure @system invariant 76 { 77 for (auto r = &head; *r !is null; r = &((*r).next)) 78 { 79 auto block = cast(Block) (cast(void*) *r + RegionEntry.sizeof); 80 do 81 { 82 assert(block.prev is null || block.prev.next is block); 83 assert(block.next is null || block.next.prev is block); 84 assert(block.region is *r); 85 } 86 while ((block = block.next) !is null); 87 } 88 } 89 } 90 91 /* 92 * Allocates $(D_PARAM size) bytes of memory. 93 * 94 * Params: 95 * size = Amount of memory to allocate. 96 * 97 * Returns: Pointer to the new allocated memory. 98 */ 99 void[] allocate(size_t size) @nogc nothrow pure shared @system 100 { 101 if (size == 0) 102 { 103 return null; 104 } 105 const dataSize = addAlignment(size); 106 if (dataSize < size) 107 { 108 return null; 109 } 110 111 void* data = findBlock(dataSize); 112 if (data is null) 113 { 114 data = initializeRegion(dataSize); 115 } 116 117 return data is null ? null : data[0 .. size]; 118 } 119 120 /* 121 * Search for a block large enough to keep $(D_PARAM size) and split it 122 * into two blocks if the block is too large. 123 * 124 * Params: 125 * size = Minimum size the block should have (aligned). 126 * 127 * Returns: Data the block points to or $(D_KEYWORD null). 128 */ 129 private void* findBlock(const ref size_t size) 130 @nogc nothrow pure shared @system 131 { 132 Block block1; 133 RegionLoop: for (auto r = head; r !is null; r = r.next) 134 { 135 block1 = cast(Block) (cast(void*) r + RegionEntry.sizeof); 136 do 137 { 138 if (block1.free && block1.size >= size) 139 { 140 break RegionLoop; 141 } 142 } 143 while ((block1 = block1.next) !is null); 144 } 145 if (block1 is null) 146 { 147 return null; 148 } 149 else if (block1.size >= size + alignment_ + BlockEntry.sizeof) 150 { // Split the block if needed 151 Block block2 = cast(Block) (cast(void*) block1 + BlockEntry.sizeof + size); 152 block2.prev = block1; 153 block2.next = block1.next; 154 block2.free = true; 155 block2.size = block1.size - BlockEntry.sizeof - size; 156 block2.region = block1.region; 157 158 if (block1.next !is null) 159 { 160 block1.next.prev = block2; 161 } 162 block1.next = block2; 163 block1.size = size; 164 } 165 block1.free = false; 166 block1.region.blocks = block1.region.blocks + 1; 167 168 return cast(void*) block1 + BlockEntry.sizeof; 169 } 170 171 // Merge block with the next one. 172 private void mergeNext(Block block) const @nogc nothrow pure @safe shared 173 { 174 block.size = block.size + BlockEntry.sizeof + block.next.size; 175 if (block.next.next !is null) 176 { 177 block.next.next.prev = block; 178 } 179 block.next = block.next.next; 180 } 181 182 /* 183 * Deallocates a memory block. 184 * 185 * Params: 186 * p = A pointer to the memory block to be freed. 187 * 188 * Returns: Whether the deallocation was successful. 189 */ 190 bool deallocate(void[] p) @nogc nothrow pure shared @system 191 { 192 if (p.ptr is null) 193 { 194 return true; 195 } 196 197 Block block = cast(Block) (p.ptr - BlockEntry.sizeof); 198 if (block.region.blocks <= 1) 199 { 200 if (block.region.prev !is null) 201 { 202 block.region.prev.next = block.region.next; 203 } 204 else // Replace the list head. It is being deallocated 205 { 206 head = block.region.next; 207 } 208 if (block.region.next !is null) 209 { 210 block.region.next.prev = block.region.prev; 211 } 212 version (Windows) 213 return VirtualFree(block.region, 0, MEM_RELEASE) != 0; 214 else 215 return unmapMemory(block.region, block.region.size) == 0; 216 } 217 // Merge blocks if neigbours are free. 218 if (block.next !is null && block.next.free) 219 { 220 mergeNext(block); 221 } 222 if (block.prev !is null && block.prev.free) 223 { 224 block.prev.size = block.prev.size + BlockEntry.sizeof + block.size; 225 if (block.next !is null) 226 { 227 block.next.prev = block.prev; 228 } 229 block.prev.next = block.next; 230 } 231 else 232 { 233 block.free = true; 234 } 235 block.region.blocks = block.region.blocks - 1; 236 return true; 237 } 238 239 /* 240 * Reallocates a memory block in place if possible or returns 241 * $(D_KEYWORD false). This function cannot be used to allocate or 242 * deallocate memory, so if $(D_PARAM p) is $(D_KEYWORD null) or 243 * $(D_PARAM size) is `0`, it should return $(D_KEYWORD false). 244 * 245 * Params: 246 * p = A pointer to the memory block. 247 * size = Size of the reallocated block. 248 * 249 * Returns: $(D_KEYWORD true) if successful, $(D_KEYWORD false) otherwise. 250 */ 251 bool reallocateInPlace(ref void[] p, size_t size) 252 @nogc nothrow pure shared @system 253 { 254 if (p is null || size == 0) 255 { 256 return false; 257 } 258 if (size <= p.length) 259 { 260 // Leave the block as is. 261 p = p.ptr[0 .. size]; 262 return true; 263 } 264 Block block1 = cast(Block) (p.ptr - BlockEntry.sizeof); 265 266 if (block1.size >= size) 267 { 268 // Enough space in the current block. 269 p = p.ptr[0 .. size]; 270 return true; 271 } 272 const dataSize = addAlignment(size); 273 const pAlignment = addAlignment(p.length); 274 assert(pAlignment >= p.length, "Invalid memory chunk length"); 275 const delta = dataSize - pAlignment; 276 277 if (block1.next is null 278 || !block1.next.free 279 || dataSize < size 280 || block1.next.size + BlockEntry.sizeof < delta) 281 { 282 /* - It is the last block in the region 283 * - The next block isn't free 284 * - The next block is too small 285 * - Requested size is too large 286 */ 287 return false; 288 } 289 if (block1.next.size >= delta + alignment_) 290 { 291 // Move size from block2 to block1. 292 block1.next.size = block1.next.size - delta; 293 block1.size = block1.size + delta; 294 295 auto block2 = cast(Block) (p.ptr + dataSize); 296 if (block1.next.next !is null) 297 { 298 block1.next.next.prev = block2; 299 } 300 copyBackward((cast(void*) block1.next)[0 .. BlockEntry.sizeof], 301 (cast(void*) block2)[0 .. BlockEntry.sizeof]); 302 block1.next = block2; 303 } 304 else 305 { 306 // The next block has enough space, but is too small for further 307 // allocations. Merge it with the current block. 308 mergeNext(block1); 309 } 310 311 p = p.ptr[0 .. size]; 312 return true; 313 } 314 315 /* 316 * Increases or decreases the size of a memory block. 317 * 318 * Params: 319 * p = A pointer to the memory block. 320 * size = Size of the reallocated block. 321 * 322 * Returns: Whether the reallocation was successful. 323 */ 324 bool reallocate(ref void[] p, size_t size) 325 @nogc nothrow pure shared @system 326 { 327 if (size == 0) 328 { 329 if (deallocate(p)) 330 { 331 p = null; 332 return true; 333 } 334 return false; 335 } 336 else if (reallocateInPlace(p, size)) 337 { 338 return true; 339 } 340 // Can't reallocate in place, allocate a new block, 341 // copy and delete the previous one. 342 void[] reallocP = allocate(size); 343 if (reallocP is null) 344 { 345 return false; 346 } 347 if (p !is null) 348 { 349 copy(p[0 .. p.length < size ? p.length : size], reallocP); 350 deallocate(p); 351 } 352 p = reallocP; 353 354 return true; 355 } 356 357 static private shared(MmapPool) instantiate() @nogc nothrow @system 358 { 359 if (instance_ is null) 360 { 361 const instanceSize = addAlignment(__traits(classInstanceSize, 362 MmapPool)); 363 364 Region head; // Will become soon our region list head 365 void* data = initializeRegion(instanceSize, head); 366 if (data !is null) 367 { 368 copy(typeid(MmapPool).initializer, data[0 .. instanceSize]); 369 instance_ = cast(shared MmapPool) data; 370 instance_.head = head; 371 } 372 } 373 return instance_; 374 } 375 376 /* 377 * Static allocator instance and initializer. 378 * 379 * Returns: Global $(D_PSYMBOL MmapPool) instance. 380 */ 381 static @property shared(MmapPool) instance() @nogc nothrow pure @system 382 { 383 return (cast(GetPureInstance!MmapPool) &instantiate)(); 384 } 385 386 /* 387 * Initializes a region for one element. 388 * 389 * Params: 390 * size = Aligned size of the first data block in the region. 391 * head = Region list head. 392 * 393 * Returns: A pointer to the data. 394 */ 395 private static void* initializeRegion(const size_t size, ref Region head) 396 @nogc nothrow pure @system 397 { 398 const regionSize = calculateRegionSize(size); 399 if (regionSize < size) 400 { 401 return null; 402 } 403 version (Windows) 404 { 405 void* p = VirtualAlloc(null, 406 regionSize, 407 MEM_COMMIT, 408 PAGE_READWRITE); 409 if (p is null) 410 { 411 return null; 412 } 413 } 414 else 415 { 416 void* p = mapMemory(null, 417 regionSize, 418 PROT_READ | PROT_WRITE, 419 MAP_PRIVATE | MAP_ANONYMOUS, 420 -1, 421 0); 422 if (cast(ptrdiff_t) p == -1) 423 { 424 return null; 425 } 426 } 427 428 Region region = cast(Region) p; 429 region.blocks = 1; 430 region.size = regionSize; 431 432 // Set the pointer to the head of the region list 433 if (head !is null) 434 { 435 head.prev = region; 436 } 437 region.next = head; 438 region.prev = null; 439 head = region; 440 441 // Initialize the data block 442 void* memoryPointer = p + RegionEntry.sizeof; 443 Block block1 = cast(Block) memoryPointer; 444 block1.size = size; 445 block1.free = false; 446 447 // It is what we want to return 448 void* data = memoryPointer + BlockEntry.sizeof; 449 450 // Free block after data 451 memoryPointer = data + size; 452 Block block2 = cast(Block) memoryPointer; 453 block1.prev = block2.next = null; 454 block1.next = block2; 455 block2.prev = block1; 456 block2.size = regionSize - size - RegionEntry.sizeof - BlockEntry.sizeof * 2; 457 block2.free = true; 458 block1.region = block2.region = region; 459 460 return data; 461 } 462 463 private void* initializeRegion(const size_t size) 464 @nogc nothrow pure shared @system 465 { 466 return initializeRegion(size, this.head); 467 } 468 469 /* 470 * Params: 471 * x = Space to be aligned. 472 * 473 * Returns: Aligned size of $(D_PARAM x). 474 */ 475 private static size_t addAlignment(const size_t x) @nogc nothrow pure @safe 476 { 477 return (x - 1) / alignment_ * alignment_ + alignment_; 478 } 479 480 /* 481 * Params: 482 * x = Required space. 483 * 484 * Returns: Minimum region size (a multiple of $(D_PSYMBOL pageSize)). 485 */ 486 private static size_t calculateRegionSize(ref const size_t x) 487 @nogc nothrow pure @safe 488 { 489 return (x + RegionEntry.sizeof + BlockEntry.sizeof * 2) 490 / pageSize * pageSize + pageSize; 491 } 492 493 /* 494 * Returns: Alignment offered. 495 */ 496 @property uint alignment() const @nogc nothrow pure @safe shared 497 { 498 return alignment_; 499 } 500 501 private enum uint alignment_ = 8; 502 503 private shared static MmapPool instance_; 504 505 // Page size. 506 enum size_t pageSize = 65536; 507 508 private shared struct RegionEntry 509 { 510 Region prev; 511 Region next; 512 uint blocks; 513 size_t size; 514 } 515 private alias Region = shared RegionEntry*; 516 private shared Region head; 517 518 private shared struct BlockEntry 519 { 520 Block prev; 521 Block next; 522 Region region; 523 size_t size; 524 bool free; 525 } 526 private alias Block = shared BlockEntry*; 527 } 528 529 @nogc nothrow pure @system unittest 530 { 531 // allocate() check. 532 size_t tooMuchMemory = size_t.max 533 - MmapPool.alignment_ 534 - MmapPool.BlockEntry.sizeof * 2 535 - MmapPool.RegionEntry.sizeof 536 - MmapPool.pageSize; 537 assert(MmapPool.instance.allocate(tooMuchMemory) is null); 538 539 assert(MmapPool.instance.allocate(size_t.max) is null); 540 541 // initializeRegion() check. 542 tooMuchMemory = size_t.max - MmapPool.alignment_; 543 assert(MmapPool.instance.allocate(tooMuchMemory) is null); 544 }