1 // Created on: 2005-03-15
2 // Created by: Peter KURNEV
3 // Copyright (c) 2005-2014 OPEN CASCADE SAS
5 // This file is part of Open CASCADE Technology software library.
7 // This library is free software; you can redistribute it and / or modify it
8 // under the terms of the GNU Lesser General Public version 2.1 as published
9 // by the Free Software Foundation, with special exception defined in the file
10 // OCCT_LGPL_EXCEPTION.txt. Consult the file LICENSE_LGPL_21.txt included in OCCT
11 // distribution for complete text of the license and disclaimer of any warranty.
13 // Alternatively, this file may be used under the terms of Open CASCADE
14 // commercial license or contractual agreement.
16 #include <Standard_MMgrOpt.hxx>
17 #include <Standard_OutOfMemory.hxx>
18 #include <Standard_Assert.hxx>
40 # ifdef HAVE_SYS_MMAN_H
41 # include <sys/mman.h> /* mmap() */
48 #include <sys/types.h>
52 #if defined (__sun) || defined(SOLARIS)
53 extern "C" int getpagesize() ;
56 //======================================================================
58 //======================================================================
60 // This implementation makes a number of assumptions regarding size of
63 // sizeof(Standard_Size) == sizeof(Standard_Address==void*)
65 // On WNT, sizeof(HANDLE) is equal of multiple of sizeof(Standard_Size)
67 //======================================================================
69 //======================================================================
71 // For clarity of implementation, the following conventions are used
72 // for naming variables:
74 // ...Size: size in bytes
76 // RoundSize, RSize etc.: size in bytes, rounded according to allocation granularity
78 // ...SizeN: size counted in number of items of sizeof(Standard_Size) bytes each
80 // ...Storage: address of the user area of the memory block (Standard_Address)
82 // ...Block: address of the hole memory block (header) (Standard_Size*)
84 //======================================================================
86 //======================================================================
89 // MMAP_BASE_ADDRESS, MMAP_FLAGS
90 #if defined (__hpux) || defined(HPUX)
91 #define MMAP_BASE_ADDRESS 0x80000000
92 #define MMAP_FLAGS (MAP_ANONYMOUS | MAP_PRIVATE | MAP_VARIABLE)
93 #elif defined (__osf__) || defined(DECOSF1)
94 #define MMAP_BASE_ADDRESS 0x1000000000
95 #define MMAP_FLAGS (MAP_ANONYMOUS | MAP_PRIVATE | MAP_VARIABLE)
97 #define MMAP_BASE_ADDRESS 0x80000000
98 #define MMAP_FLAGS (MAP_ANONYMOUS | MAP_PRIVATE | MAP_VARIABLE)
99 #elif defined(__APPLE__)
100 #define MMAP_BASE_ADDRESS 0x80000000
101 #define MMAP_FLAGS (MAP_ANON | MAP_PRIVATE)
103 #define MMAP_BASE_ADDRESS 0x20000000
104 #define MMAP_FLAGS (MAP_PRIVATE)
106 //static HANDLE myhMap;
108 #define MMAP_BASE_ADDRESS 0x60000000
109 #define MMAP_FLAGS (MAP_PRIVATE)
112 // Round size up to the specified page size
113 #define PAGE_ALIGN(size,thePageSize) \
114 (((size) + (thePageSize) - 1) & ~((thePageSize) - 1))
116 // Round size up to 4, 8, or 16 bytes
117 // Note that 0 yields 0
118 #define ROUNDUP16(size) (((size) + 0xf) & ~(Standard_Size)0xf)
119 #define ROUNDUP8(size) (((size) + 0x7) & ~(Standard_Size)0x7)
120 #define ROUNDUP4(size) (((size) + 0x3) & ~(Standard_Size)0x3)
121 #define ROUNDDOWN8(size) ((size) & ~(Standard_Size)0x7)
123 // The following two macros define granularity of memory allocation,
124 // by rounding size to the size of the allocation cell,
125 // and obtaining cell index from rounded size.
126 // Note that granularity shall be not less than sizeof(Standard_Size)
128 // Traditional implementation: granularity 16 bytes
129 //#define ROUNDUP_CELL(size) ROUNDUP16(size)
130 //#define INDEX_CELL(rsize) ((rsize) >> 4)
132 // Reduced granularity: 8 bytes
133 #define ROUNDUP_CELL(size) ROUNDUP8(size)
134 #define ROUNDDOWN_CELL(size) ROUNDDOWN8(size)
135 #define INDEX_CELL(rsize) ((rsize) >> 3)
137 // Minimal granularity: 4 bytes (32-bit systems only)
139 //#define ROUNDUP_CELL(size) ROUNDUP4(size)
140 //#define INDEX_CELL(rsize) ((rsize) >> 2)
143 // Adaptive granularity, less for little blocks and greater for bigger ones:
146 #define ROUNDUP_CELL(size) ((size) <= 0x40 ? ROUNDUP8(size) : ROUNDUP16(size))
147 #define INDEX_CELL(rsize) ((rsize) <= 0x40 ? ((rsize) >> 3) : (4 + ((rsize) >> 4)))
149 #define ROUNDUP_CELL(size) ((size) <= 0x40 ? ROUNDUP4(size) : ROUNDUP8(size))
150 #define INDEX_CELL(rsize) ((rsize) <= 0x40 ? ((rsize) >> 2) : (8 + ((rsize) >> 3)))
155 /* In the allocated block, first bytes are used for storing of memory manager's data.
156 (size of block). The minimal size of these data is sizeof(int).
157 The memory allocated in system usually alligned by 16 bytes.Tthe aligment of the
158 data area in the memory block is shfted on BLOCK_SHIFT*sizeof(Standard_Size)
160 It is OK for WNT, SUN and Linux systems, but on SGI aligment should be 8 bytes.
161 So, BLOCK_SHIFT is formed as macro for support on other possible platforms.
164 #if defined(IRIX) || defined(SOLARIS)
165 #define BLOCK_SHIFT 2
167 #define BLOCK_SHIFT 1
170 // Get address of user area from block address, and vice-versa
171 #define GET_USER(block) (((Standard_Size*)(block)) + BLOCK_SHIFT)
172 #define GET_BLOCK(storage) (((Standard_Size*)(storage))-BLOCK_SHIFT)
174 // create static instance of out-of-memory exception to protect
175 // against possible lack of memory for its raising
176 static Handle(Standard_OutOfMemory) anOutOfMemError = new Standard_OutOfMemory;
178 //=======================================================================
179 //function : Standard_MMgr
181 //=======================================================================
183 Standard_MMgrOpt::Standard_MMgrOpt(const Standard_Boolean aClear,
184 const Standard_Boolean aMMap,
185 const Standard_Size aCellSize,
186 const Standard_Integer aNbPages,
187 const Standard_Size aThreshold)
189 // check basic assumption
190 Standard_STATIC_ASSERT(sizeof(Standard_Size) == sizeof(Standard_Address));
192 // clear buffer fields
200 // initialize parameters
202 myMMap = (Standard_Integer)aMMap;
203 myCellSize = aCellSize;
204 myNbPages = aNbPages;
205 myThreshold = aThreshold;
211 //=======================================================================
212 //function : ~Standard_MMgrOpt
214 //=======================================================================
216 Standard_MMgrOpt::~Standard_MMgrOpt()
218 Purge(Standard_True);
221 // NOTE: freeing pools may be dangerous if not all memory taken by
222 // this instance of the memory manager has been freed
228 //=======================================================================
229 //function : Initialize
231 //=======================================================================
233 void Standard_MMgrOpt::Initialize()
235 // check number of pages in small blocks pools
236 if ( myNbPages < 100 )
239 // get system-dependent page size
241 myPageSize = getpagesize();
245 SYSTEM_INFO SystemInfo;
246 GetSystemInfo (&SystemInfo);
247 myPageSize = SystemInfo.dwPageSize;
250 // initialize memory mapped files
252 #if defined (__sgi) || defined(IRIX)
253 /* Probleme de conflit en la zone des malloc et la zone des mmap sur SGI */
254 /* Ce probleme a ete identifie en IRIX 5.3 jusqu'en IRIX 6.2. Le probleme */
255 /* ne semble pas apparaitre en IRIX 6.4 */
256 /* Les malloc successifs donnent des adresses croissantes (a partir de 0x0x10000000) */
257 /* ce que l'on appelle le pointeur de BREAK */
258 /* Le premier mmap est force a l'addresse MMAP_BASE_ADDRESS (soit 0x60000000 sur SGI) */
259 /* mais les mmap suivants sont decides par le systeme (flag MAP_VARIABLE). Malheureusement */
260 /* il renvoie une addresse la plus basse possible dans la zone des malloc juste au dessus */
261 /* du BREAK soit 0x18640000 ce qui donne un espace d'allocation d'environ 140 Mo pour les */
262 /* malloc. Sur des gros modeles on peut avoir des pointes a 680 Mo en Rev6 pour une maquette */
263 /* de 2 000 000 de points. En Rev7, la meme maquette n'excedera pas 286 Mo (voir vision.for) */
264 /* Pour palier ce comportement, la solution adoptee est la suivante : */
265 /* Lorsque l'on entre dans alloc_startup (ici), on n'a pas encore fait de mmap. */
266 /* On fait alors un malloc (d'environ 700Mo) que l'on libere de suite. Cela a pour */
267 /* consequence de deplacer le BREAK tres haut. Le BREAK ne redescend jamais meme lors du free */
268 /* Le mmap donnant une adresse (environ 100 Mo au dessus du BREAK) on se retrouve alors avec */
269 /* le partage des zones de memoire suivant : */
270 /* 700 Mo pour les malloc - 500 Mo (1,2Go - 700Mo ) pour les mmap. Avec un CLD_SD_SIZE */
271 /* de 2 000 000 on atteind jamais 500 Mo de mmap, meme en chargeant des applications (qui */
272 /* utilisent la zone de mmap */
273 /* Ce partage des zones memoire pourra eventuellemt etre regle par une variable d'environnement */
276 Standard_Size high_sbrk;
278 high_sbrk = 700*1024*1024;
279 if ( (var=getenv("CLD_HIGH_SBRK")) != NULL ) {
280 high_sbrk = atoi(var);
283 var = (char*)malloc(high_sbrk); // 700 Mb
287 perror("ERR_MEMRY_FAIL");
290 #if defined(IRIX) || defined(__sgi) || defined(SOLARIS) || defined(__sun) || defined(LIN) || defined(linux) || defined(__FreeBSD__)
291 if ((myMMap = open ("/dev/zero", O_RDWR)) < 0) {
292 if ((myMMap = open ("/dev/null", O_RDWR)) < 0){
297 perror("ERR_MMAP_FAIL");
303 // initialize free lists
304 myFreeListMax = INDEX_CELL(ROUNDUP_CELL(myThreshold-BLOCK_SHIFT)); // all blocks less than myThreshold are to be recycled
305 myFreeList = (Standard_Size **) calloc (myFreeListMax+1, sizeof(Standard_Size *));
306 myCellSize = ROUNDUP16(myCellSize);
309 //=======================================================================
310 //function : SetMMgrOptCallBack
311 //purpose : Sets a callback function to be called on each alloc/free
312 //=======================================================================
314 static Standard_MMgrOpt::TPCallBackFunc MyPCallBackFunc = NULL;
316 Standard_EXPORT void Standard_MMgrOpt::SetCallBackFunction(TPCallBackFunc pFunc)
318 MyPCallBackFunc = pFunc;
321 inline void callBack(const Standard_Boolean isAlloc,
322 const Standard_Address aStorage,
323 const Standard_Size aRoundSize,
324 const Standard_Size aSize)
327 (*MyPCallBackFunc)(isAlloc, aStorage, aRoundSize, aSize);
330 //=======================================================================
331 //function : Allocate
333 //=======================================================================
335 Standard_Address Standard_MMgrOpt::Allocate(const Standard_Size aSize)
337 Standard_Size * aStorage = NULL;
339 // round up size according to allocation granularity
340 // The keyword 'volatile' is only used here for GCC 64-bit compilations
341 // otherwise this method would crash in runtime in optimized build.
342 volatile Standard_Size RoundSize = ROUNDUP_CELL(aSize);
343 const Standard_Size Index = INDEX_CELL(RoundSize);
345 // blocks of small and medium size are recyclable
346 if ( Index <= myFreeListMax ) {
347 const Standard_Size RoundSizeN = RoundSize / sizeof(Standard_Size);
349 // Lock access to critical data (myFreeList and other fields) by mutex.
350 // Note that we do not lock fields that do not change during the
351 // object life (such as myThreshold), and assume that calls to functions
352 // of standard library are already protected by their implementation.
353 // The unlock is called as soon as possible, for every treatment case.
354 // We also do not use Sentry, since in case if OCC signal or exception is
355 // caused by this block we will have deadlock anyway...
358 // if free block of the requested size is available, return it
359 if ( myFreeList[Index] ) {
360 // the address of the next free block is stored in the header
361 // of the memory block; use it to update list pointer
362 // to point to next free block
363 Standard_Size* aBlock = myFreeList[Index];
364 myFreeList[Index] = *(Standard_Size**)aBlock;
369 // record size of the allocated block in the block header and
370 // shift the pointer to the beginning of the user part of block
371 aBlock[0] = RoundSize;
372 aStorage = GET_USER(aBlock);
374 // clear block if requested
376 memset (aStorage, 0, RoundSize);
378 // else if block size is small allocate it in pools
379 else if ( RoundSize <= myCellSize ) {
380 // unlock the mutex for free lists
383 // and lock the specific mutex used to protect access to small blocks pools;
384 // note that this is done by sentry class so as to ensure unlocking in case of
385 // possible exception that may be thrown from AllocMemory()
386 Standard_Mutex::Sentry aSentry (myMutexPools);
388 // check for availability of requested space in the current pool
389 Standard_Size *aBlock = myNextAddr;
390 if ( &aBlock[ BLOCK_SHIFT+RoundSizeN] > myEndBlock ) {
391 // otherwise, allocate new memory pool with page-aligned size
392 Standard_Size Size = myPageSize * myNbPages;
393 aBlock = AllocMemory(Size); // note that size may be aligned by this call
395 if (myEndBlock > myNextAddr) {
396 // put the remaining piece to the free lists
397 const Standard_Size aPSize = (myEndBlock - GET_USER(myNextAddr))
398 * sizeof(Standard_Size);
399 const Standard_Size aRPSize = ROUNDDOWN_CELL(aPSize);
400 const Standard_Size aPIndex = INDEX_CELL(aRPSize);
401 if ( aPIndex > 0 && aPIndex <= myFreeListMax ) {
403 *(Standard_Size**)myNextAddr = myFreeList[aPIndex];
404 myFreeList[aPIndex] = myNextAddr;
409 // set end pointer to the end of the new pool
410 myEndBlock = aBlock + Size / sizeof(Standard_Size);
411 // record in the first bytes of the pool the address of the previous one
412 *(Standard_Size**)aBlock = myAllocList;
413 // and make new pool current (last)
414 // and get pointer to the first memory block in the pool
415 myAllocList = aBlock;
419 // initialize header of the new block by its size
420 // and get the pointer to the user part of block
421 aBlock[0] = RoundSize;
422 aStorage = GET_USER(aBlock);
424 // and advance pool pointer to the next free piece of pool
425 myNextAddr = &aStorage[RoundSizeN];
427 // blocks of medium size are allocated directly
429 // unlock the mutex immediately, as we do not need further to access any field
432 // we use operator ?: instead of if() since it is faster
433 Standard_Size *aBlock = (Standard_Size*) (myClear ? calloc( RoundSizeN+BLOCK_SHIFT, sizeof(Standard_Size)) :
434 malloc((RoundSizeN+BLOCK_SHIFT) * sizeof(Standard_Size)) );
436 // if allocation failed, try to free some memory by purging free lists, and retry
438 if ( Purge (Standard_False) )
439 aBlock = (Standard_Size*)calloc(RoundSizeN+BLOCK_SHIFT, sizeof(Standard_Size));
440 // if still not succeeded, raise exception
442 anOutOfMemError->Reraise ("Standard_MMgrOpt::Allocate(): malloc failed");
445 // initialize new block header by its size
446 // and get the pointer to the user part of block
447 aBlock[0] = RoundSize;
448 aStorage = GET_USER(aBlock);
451 // blocks of big size may be allocated as memory mapped files
453 // Compute size of the block to be allocated, including header,
454 // Note that we use rounded size, even if this block will not be stored in
455 // the free list, for consistency of calls to AllocMemory() / FreeMemory()
456 // and calculation of index in the free list
457 Standard_Size AllocSize = RoundSize + sizeof(Standard_Size);
460 Standard_Size* aBlock = AllocMemory(AllocSize);
462 // initialize new block header by its size
463 // and get the pointer to the user part of block.
464 aBlock[0] = RoundSize;
465 aStorage = GET_USER(aBlock);
468 callBack(Standard_True, aStorage, RoundSize, aSize);
473 //=======================================================================
476 //=======================================================================
478 void Standard_MMgrOpt::Free(Standard_Address theStorage)
480 // safely return if attempt to free null pointer
484 // get the pointer to the memory block header
485 Standard_Size* aBlock = GET_BLOCK(theStorage);
487 // and get the allocated size of the block
488 Standard_Size RoundSize = aBlock[0];
490 callBack(Standard_False, theStorage, RoundSize, 0);
492 // check whether blocks with that size are recyclable
493 const Standard_Size Index = INDEX_CELL(RoundSize);
494 if ( Index <= myFreeListMax ) {
495 // Lock access to critical data (myFreeList and other) by mutex
496 // Note that we do not lock fields that do not change during the
497 // object life (such as myThreshold), and assume that calls to functions
498 // of standard library are already protected by their implementation.
499 // We also do not use Sentry, since in case if OCC signal or exception is
500 // caused by this block we will have deadlock anyway...
503 // in the memory block header, record address of the next free block
504 *(Standard_Size**)aBlock = myFreeList[Index];
505 // add new block to be first in the list
506 myFreeList[Index] = aBlock;
510 // otherwise, we have block of big size which shall be simply released
512 FreeMemory (aBlock, RoundSize);
515 //=======================================================================
517 //purpose : Frees all free lists except small blocks (less than CellSize)
518 //=======================================================================
520 Standard_Integer Standard_MMgrOpt::Purge(Standard_Boolean )
522 // Lock access to critical data by mutex
523 Standard_Mutex::Sentry aSentry (myMutex);
525 // TODO: implement support for isDeleted = True
527 // free memory blocks contained in free lists
528 // whose sizes are greater than cellsize
529 Standard_Integer nbFreed = 0;
530 Standard_Size i = INDEX_CELL(ROUNDUP_CELL(myCellSize+BLOCK_SHIFT));
531 for (; i <= myFreeListMax; i++ ) {
532 Standard_Size * aFree = myFreeList[i];
534 Standard_Size * anOther = aFree;
535 aFree = * (Standard_Size **) aFree;
539 myFreeList[i] = NULL;
542 // Lock access to critical data by mutex
543 Standard_Mutex::Sentry aSentry1 (myMutexPools);
545 // release memory pools containing no busy memory;
546 // for that for each pool count the summary size of blocks
547 // got from the free lists allocated from this pool
549 const Standard_Size PoolSize = myPageSize * myNbPages;
551 const Standard_Size PoolSize =
552 PAGE_ALIGN(myPageSize * myNbPages + sizeof(HANDLE), myPageSize) -
555 const Standard_Size RPoolSize = ROUNDDOWN_CELL(PoolSize);
556 const Standard_Size PoolSizeN = RPoolSize / sizeof(Standard_Size);
558 // declare the table of pools;
559 // (we map free blocks onto a number of pools simultaneously)
560 static const Standard_Integer NB_POOLS_WIN = 512;
561 static Standard_Size* aPools[NB_POOLS_WIN];
562 static Standard_Size aFreeSize[NB_POOLS_WIN];
563 static Standard_Integer aFreePools[NB_POOLS_WIN];
565 Standard_Size * aNextPool = myAllocList;
566 Standard_Size * aPrevPool = NULL;
567 const Standard_Size nCells = INDEX_CELL(myCellSize);
568 Standard_Integer nPool = 0, nPoolFreed = 0;
571 // fill the table of pools
572 Standard_Integer iPool;
573 for (iPool = 0; aNextPool && iPool < NB_POOLS_WIN; iPool++) {
574 aPools[iPool] = aNextPool;
575 aFreeSize[iPool] = 0;
576 aNextPool = * (Standard_Size **) aNextPool; // get next pool
578 const Standard_Integer iLast = iPool - 1;
581 // scan free blocks, find corresponding pools and increment
583 for (i = 0; i <= nCells; i++ ) {
584 Standard_Size * aFree = myFreeList[i];
585 Standard_Size aSize = BLOCK_SHIFT * sizeof(Standard_Size) +
588 for (iPool = 0; iPool <= iLast; iPool++) {
589 if (aFree >= aPools[iPool] && aFree < aPools[iPool] + PoolSizeN) {
590 aFreeSize[iPool] += aSize;
594 aFree = * (Standard_Size **) aFree; // get next free block
598 // scan the table and make the list of free pools
599 Standard_Integer iLastFree = -1;
600 for (iPool = 0; iPool <= iLast; iPool++) {
601 aFreeSize[iPool] = ROUNDUP_CELL(aFreeSize[iPool]);
602 if (aFreeSize[iPool] == RPoolSize)
603 aFreePools[++iLastFree] = iPool;
605 if (iLastFree == -1) {
606 // no free pools found in this table
607 aPrevPool = aPools[iLast];
611 // scan free blocks again, and remove those of them
612 // that belong to free pools
614 for (i = 0; i <= nCells; i++ ) {
615 Standard_Size * aFree = myFreeList[i];
616 Standard_Size * aPrevFree = NULL;
618 for (j = 0; j <= iLastFree; j++) {
619 iPool = aFreePools[j];
620 if (aFree >= aPools[iPool] && aFree < aPools[iPool] + PoolSizeN)
626 aFree = * (Standard_Size **) aFree;
628 * (Standard_Size **) aPrevFree = aFree; // link to previous
630 myFreeList[i] = aFree;
636 aFree = * (Standard_Size **) aFree;
641 // release free pools, and reconnect remaining pools
642 // in the linked list
643 Standard_Size * aPrev = (aFreePools[0] == 0
645 : aPools[aFreePools[0] - 1]);
646 for (j = 0; j <= iLastFree; j++) {
647 iPool = aFreePools[j];
649 // update the pointer to the previous non-free pool
650 if (iPool - aFreePools[j - 1] > 1)
651 aPrev = aPools[iPool - 1];
653 if (j == iLastFree || aFreePools[j + 1] - iPool > 1) {
654 // get next non-free pool
655 Standard_Size * aNext =
656 (j == iLastFree && aFreePools[j] == iLast)
659 // and connect it to the list of pools that have been processed
660 // and remain non-free
662 * (Standard_Size **) aPrev = aNext;
666 FreeMemory(aPools[iPool], PoolSize);
668 // update the pointer to the previous non-free pool
669 aPrevPool = (aFreePools[iLastFree] == iLast
672 nPoolFreed += iLastFree + 1;
678 //=======================================================================
679 //function : FreePools
680 //purpose : Frees all memory pools allocated for small blocks
681 //=======================================================================
683 void Standard_MMgrOpt::FreePools()
685 // Lock access to critical data by mutex
686 Standard_Mutex::Sentry aSentry (myMutexPools);
688 // last pool is remembered in myAllocList
689 Standard_Size * aFree = myAllocList;
692 Standard_Size * aBlock = aFree;
693 // next pool address is stored in first 8 bytes of each pool
694 aFree = * (Standard_Size **) aFree;
695 // free pool (note that its size is calculated rather than stored)
696 FreeMemory ( aBlock, myPageSize * myNbPages );
700 //=======================================================================
701 //function : Reallocate
703 //=======================================================================
705 Standard_Address Standard_MMgrOpt::Reallocate(Standard_Address theStorage,
706 const Standard_Size theNewSize)
708 // if theStorage == NULL, just allocate new memory block
711 return Allocate(theNewSize);
714 Standard_Size * aBlock = GET_BLOCK(theStorage);
715 Standard_Address newStorage = NULL;
717 // get current size of the memory block from its header
718 Standard_Size OldSize = aBlock[0];
720 // if new size is less than old one, just do nothing
721 if (theNewSize <= OldSize) {
722 newStorage = theStorage;
724 // otherwise, allocate new block and copy the data to it
726 newStorage = Allocate(theNewSize);
727 memcpy (newStorage, theStorage, OldSize);
729 // clear newly added part of the block
731 memset(((char*)newStorage) + OldSize, 0, theNewSize-OldSize);
736 //=======================================================================
737 //function : AllocMemory
738 //purpose : Allocate a big block of memory using either malloc/calloc
739 // or memory mapped file
740 //=======================================================================
742 Standard_Size * Standard_MMgrOpt::AllocMemory(Standard_Size &Size)
744 // goto is used as efficient method for a possibility to retry allocation
747 Standard_Size * aBlock = NULL;
749 // if MMap option is ON, allocate using memory mapped files
753 // align size to page size
754 const Standard_Size AlignedSize = PAGE_ALIGN(Size, myPageSize);
757 // note that on UNIX myMMap is file descriptor for /dev/null
758 aBlock = (Standard_Size * )mmap((char*)MMAP_BASE_ADDRESS, AlignedSize,
759 PROT_READ | PROT_WRITE, MMAP_FLAGS,
761 if (aBlock == MAP_FAILED /* -1 */) {
763 // as a last resort, try freeing some memory by calling Purge()
764 if ( Purge(Standard_False) )
766 // if nothing helps, raise exception
767 anOutOfMemError->Reraise (strerror(errcode));
770 // save actually allocated size into argument
775 // align size to page size, taking into account additional space needed to
776 // store handle to the memory map
777 const Standard_Size AlignedSize = PAGE_ALIGN(Size+sizeof(HANDLE), myPageSize);
779 // allocate mapped file
780 HANDLE hMap = CreateFileMapping(INVALID_HANDLE_VALUE, NULL,
782 DWORD(AlignedSize / 0x80000000),
783 DWORD(AlignedSize % 0x80000000), NULL);
784 HANDLE * aMBlock = (hMap && GetLastError() != ERROR_ALREADY_EXISTS ?
785 (HANDLE*)MapViewOfFile(hMap,FILE_MAP_WRITE,0,0,0) : NULL);
786 // check for error and try allocating address space
789 // close handle if allocated
793 // as a last resort, try freeing some memory by calling Purge() and retry
794 if ( Purge(Standard_False) )
796 // if nothing helps, make error message and raise exception
797 const int BUFSIZE=1024;
798 char message[BUFSIZE];
799 if ( FormatMessage (FORMAT_MESSAGE_FROM_SYSTEM, 0, GetLastError(), 0, message, BUFSIZE-1, 0) <=0 )
800 strcpy (message, "Standard_MMgrOpt::AllocMemory() failed to mmap");
801 anOutOfMemError->Reraise (message);
804 // record map handle in the beginning
807 // and shift to the beginning of usable area
808 aBlock = (Standard_Size*)(aMBlock+1);
810 // save actually allocated size into argument
811 Size = AlignedSize - sizeof(HANDLE);
814 // else just allocate by malloc or calloc
816 aBlock = (Standard_Size *) (myClear ? calloc(Size,sizeof(char)) : malloc(Size));
820 // as a last resort, try freeing some memory by calling Purge()
821 if ( Purge(Standard_False) )
823 // if nothing helps, raise exception
824 anOutOfMemError->Reraise ("Standard_MMgrOpt::Allocate(): malloc failed");
827 // clear whole block if clearing option is set
829 memset (aBlock, 0, Size);
833 //=======================================================================
834 //function : FreeMemory
836 //=======================================================================
838 void Standard_MMgrOpt::FreeMemory (Standard_Address aBlock,
845 // release memory (either free or unmap)
848 // align size to page size, just the same as in AllocMemory()
849 const Standard_Size AlignedSize = PAGE_ALIGN(aSize, myPageSize);
850 munmap((char*)aBlock, AlignedSize);
852 // recover handle to the memory mapping stored just before the block
853 const HANDLE * aMBlock = (const HANDLE *)aBlock;
854 HANDLE hMap = *(--aMBlock);
855 UnmapViewOfFile((LPCVOID)aMBlock);