1 // Created on: 2005-03-15
2 // Created by: Peter KURNEV
3 // Copyright (c) 2005-2014 OPEN CASCADE SAS
5 // This file is part of Open CASCADE Technology software library.
7 // This library is free software; you can redistribute it and/or modify it under
8 // the terms of the GNU Lesser General Public License version 2.1 as published
9 // by the Free Software Foundation, with special exception defined in the file
10 // OCCT_LGPL_EXCEPTION.txt. Consult the file LICENSE_LGPL_21.txt included in OCCT
11 // distribution for complete text of the license and disclaimer of any warranty.
13 // Alternatively, this file may be used under the terms of Open CASCADE
14 // commercial license or contractual agreement.
20 #include <Standard_MMgrOpt.hxx>
21 #include <Standard_OutOfMemory.hxx>
22 #include <Standard_Assert.hxx>
28 # include <sys/mman.h> /* mmap() */
33 #if defined (__sun) || defined(SOLARIS)
34 extern "C" int getpagesize() ;
40 //======================================================================
42 //======================================================================
44 // This implementation makes a number of assumptions regarding size of
47 // sizeof(Standard_Size) == sizeof(Standard_Address==void*)
49 // On WNT, sizeof(HANDLE) is equal of multiple of sizeof(Standard_Size)
51 //======================================================================
53 //======================================================================
55 // For clarity of implementation, the following conventions are used
56 // for naming variables:
58 // ...Size: size in bytes
60 // RoundSize, RSize etc.: size in bytes, rounded according to allocation granularity
62 // ...SizeN: size counted in number of items of sizeof(Standard_Size) bytes each
64 // ...Storage: address of the user area of the memory block (Standard_Address)
66 // ...Block: address of the hole memory block (header) (Standard_Size*)
68 //======================================================================
70 //======================================================================
73 // MMAP_BASE_ADDRESS, MMAP_FLAGS
74 #if defined (__hpux) || defined(HPUX)
75 #define MMAP_BASE_ADDRESS 0x80000000
76 #define MMAP_FLAGS (MAP_ANONYMOUS | MAP_PRIVATE | MAP_VARIABLE)
77 #elif defined (__osf__) || defined(DECOSF1)
78 #define MMAP_BASE_ADDRESS 0x1000000000
79 #define MMAP_FLAGS (MAP_ANONYMOUS | MAP_PRIVATE | MAP_VARIABLE)
81 #define MMAP_BASE_ADDRESS 0x80000000
82 #define MMAP_FLAGS (MAP_ANONYMOUS | MAP_PRIVATE | MAP_VARIABLE)
83 #elif defined(__APPLE__)
84 #define MMAP_BASE_ADDRESS 0x80000000
85 #define MMAP_FLAGS (MAP_ANON | MAP_PRIVATE)
86 #elif defined(__linux__)
87 #define MMAP_BASE_ADDRESS 0x20000000
88 #define MMAP_FLAGS (MAP_PRIVATE)
90 //static HANDLE myhMap;
92 #define MMAP_BASE_ADDRESS 0x60000000
93 #define MMAP_FLAGS (MAP_PRIVATE)
96 // Round size up to the specified page size
97 #define PAGE_ALIGN(size,thePageSize) \
98 (((size) + (thePageSize) - 1) & ~((thePageSize) - 1))
100 // Round size up to 4, 8, or 16 bytes
101 // Note that 0 yields 0
102 #define ROUNDUP16(size) (((size) + 0xf) & ~(Standard_Size)0xf)
103 #define ROUNDUP8(size) (((size) + 0x7) & ~(Standard_Size)0x7)
104 #define ROUNDUP4(size) (((size) + 0x3) & ~(Standard_Size)0x3)
105 #define ROUNDDOWN8(size) ((size) & ~(Standard_Size)0x7)
107 // The following two macros define granularity of memory allocation,
108 // by rounding size to the size of the allocation cell,
109 // and obtaining cell index from rounded size.
110 // Note that granularity shall be not less than sizeof(Standard_Size)
112 // Traditional implementation: granularity 16 bytes
113 //#define ROUNDUP_CELL(size) ROUNDUP16(size)
114 //#define INDEX_CELL(rsize) ((rsize) >> 4)
116 // Reduced granularity: 8 bytes
117 #define ROUNDUP_CELL(size) ROUNDUP8(size)
118 #define ROUNDDOWN_CELL(size) ROUNDDOWN8(size)
119 #define INDEX_CELL(rsize) ((rsize) >> 3)
121 /* In the allocated block, first bytes are used for storing of memory manager's data.
122 (size of block). The minimal size of these data is sizeof(int).
123 The memory allocated in system usually alligned by 16 bytes.Tthe aligment of the
124 data area in the memory block is shfted on BLOCK_SHIFT*sizeof(Standard_Size)
126 It is OK for WNT, SUN and Linux systems, but on SGI aligment should be 8 bytes.
127 So, BLOCK_SHIFT is formed as macro for support on other possible platforms.
130 #if defined(IRIX) || defined(SOLARIS)
131 #define BLOCK_SHIFT 2
133 #define BLOCK_SHIFT 1
136 // Get address of user area from block address, and vice-versa
137 #define GET_USER(block) (((Standard_Size*)(block)) + BLOCK_SHIFT)
138 #define GET_BLOCK(storage) (((Standard_Size*)(storage))-BLOCK_SHIFT)
140 //=======================================================================
141 //function : Standard_MMgr
143 //=======================================================================
145 Standard_MMgrOpt::Standard_MMgrOpt(const Standard_Boolean aClear,
146 const Standard_Boolean aMMap,
147 const Standard_Size aCellSize,
148 const Standard_Integer aNbPages,
149 const Standard_Size aThreshold)
151 // check basic assumption
152 Standard_STATIC_ASSERT(sizeof(Standard_Size) == sizeof(Standard_Address));
154 // clear buffer fields
162 // initialize parameters
164 myMMap = (Standard_Integer)aMMap;
165 myCellSize = aCellSize;
166 myNbPages = aNbPages;
167 myThreshold = aThreshold;
173 //=======================================================================
174 //function : ~Standard_MMgrOpt
176 //=======================================================================
178 Standard_MMgrOpt::~Standard_MMgrOpt()
180 Purge(Standard_True);
183 // NOTE: freeing pools may be dangerous if not all memory taken by
184 // this instance of the memory manager has been freed
190 //=======================================================================
191 //function : Initialize
193 //=======================================================================
195 void Standard_MMgrOpt::Initialize()
197 // check number of pages in small blocks pools
198 if ( myNbPages < 100 )
201 // get system-dependent page size
203 myPageSize = getpagesize();
207 SYSTEM_INFO SystemInfo;
208 GetSystemInfo (&SystemInfo);
209 myPageSize = SystemInfo.dwPageSize;
212 // initialize memory mapped files
214 #if defined (__sgi) || defined(IRIX)
215 /* Probleme de conflit en la zone des malloc et la zone des mmap sur SGI */
216 /* Ce probleme a ete identifie en IRIX 5.3 jusqu'en IRIX 6.2. Le probleme */
217 /* ne semble pas apparaitre en IRIX 6.4 */
218 /* Les malloc successifs donnent des adresses croissantes (a partir de 0x0x10000000) */
219 /* ce que l'on appelle le pointeur de BREAK */
220 /* Le premier mmap est force a l'addresse MMAP_BASE_ADDRESS (soit 0x60000000 sur SGI) */
221 /* mais les mmap suivants sont decides par le systeme (flag MAP_VARIABLE). Malheureusement */
222 /* il renvoie une addresse la plus basse possible dans la zone des malloc juste au dessus */
223 /* du BREAK soit 0x18640000 ce qui donne un espace d'allocation d'environ 140 Mo pour les */
224 /* malloc. Sur des gros modeles on peut avoir des pointes a 680 Mo en Rev6 pour une maquette */
225 /* de 2 000 000 de points. En Rev7, la meme maquette n'excedera pas 286 Mo (voir vision.for) */
226 /* Pour palier ce comportement, la solution adoptee est la suivante : */
227 /* Lorsque l'on entre dans alloc_startup (ici), on n'a pas encore fait de mmap. */
228 /* On fait alors un malloc (d'environ 700Mo) que l'on libere de suite. Cela a pour */
229 /* consequence de deplacer le BREAK tres haut. Le BREAK ne redescend jamais meme lors du free */
230 /* Le mmap donnant une adresse (environ 100 Mo au dessus du BREAK) on se retrouve alors avec */
231 /* le partage des zones de memoire suivant : */
232 /* 700 Mo pour les malloc - 500 Mo (1,2Go - 700Mo ) pour les mmap. Avec un CLD_SD_SIZE */
233 /* de 2 000 000 on atteind jamais 500 Mo de mmap, meme en chargeant des applications (qui */
234 /* utilisent la zone de mmap */
235 /* Ce partage des zones memoire pourra eventuellemt etre regle par une variable d'environnement */
238 Standard_Size high_sbrk;
240 high_sbrk = 700*1024*1024;
241 if ( (var=getenv("CLD_HIGH_SBRK")) != NULL ) {
242 high_sbrk = atoi(var);
245 var = (char*)malloc(high_sbrk); // 700 Mb
249 perror("ERR_MEMRY_FAIL");
252 #if defined(IRIX) || defined(__sgi) || defined(SOLARIS) || defined(__sun) || defined(__linux__) || defined(__FreeBSD__) || defined(__ANDROID__)
253 if ((myMMap = open ("/dev/zero", O_RDWR)) < 0) {
254 if ((myMMap = open ("/dev/null", O_RDWR)) < 0){
259 perror("ERR_MMAP_FAIL");
265 // initialize free lists
266 myFreeListMax = INDEX_CELL(ROUNDUP_CELL(myThreshold-BLOCK_SHIFT)); // all blocks less than myThreshold are to be recycled
267 myFreeList = (Standard_Size **) calloc (myFreeListMax+1, sizeof(Standard_Size *));
268 myCellSize = ROUNDUP16(myCellSize);
271 //=======================================================================
272 //function : SetMMgrOptCallBack
273 //purpose : Sets a callback function to be called on each alloc/free
274 //=======================================================================
276 static Standard_MMgrOpt::TPCallBackFunc MyPCallBackFunc = NULL;
278 Standard_EXPORT void Standard_MMgrOpt::SetCallBackFunction(TPCallBackFunc pFunc)
280 MyPCallBackFunc = pFunc;
283 inline void callBack(const Standard_Boolean isAlloc,
284 const Standard_Address aStorage,
285 const Standard_Size aRoundSize,
286 const Standard_Size aSize)
289 (*MyPCallBackFunc)(isAlloc, aStorage, aRoundSize, aSize);
292 //=======================================================================
293 //function : Allocate
295 //=======================================================================
297 Standard_Address Standard_MMgrOpt::Allocate(const Standard_Size aSize)
299 Standard_Size * aStorage = NULL;
301 // round up size according to allocation granularity
302 // The keyword 'volatile' is only used here for GCC 64-bit compilations
303 // otherwise this method would crash in runtime in optimized build.
304 volatile Standard_Size RoundSize = ROUNDUP_CELL(aSize);
305 const Standard_Size Index = INDEX_CELL(RoundSize);
307 // blocks of small and medium size are recyclable
308 if ( Index <= myFreeListMax ) {
309 const Standard_Size RoundSizeN = RoundSize / sizeof(Standard_Size);
311 // Lock access to critical data (myFreeList and other fields) by mutex.
312 // Note that we do not lock fields that do not change during the
313 // object life (such as myThreshold), and assume that calls to functions
314 // of standard library are already protected by their implementation.
315 // The unlock is called as soon as possible, for every treatment case.
316 // We also do not use Sentry, since in case if OCC signal or exception is
317 // caused by this block we will have deadlock anyway...
320 // if free block of the requested size is available, return it
321 if ( myFreeList[Index] ) {
322 // the address of the next free block is stored in the header
323 // of the memory block; use it to update list pointer
324 // to point to next free block
325 Standard_Size* aBlock = myFreeList[Index];
326 myFreeList[Index] = *(Standard_Size**)aBlock;
331 // record size of the allocated block in the block header and
332 // shift the pointer to the beginning of the user part of block
333 aBlock[0] = RoundSize;
334 aStorage = GET_USER(aBlock);
336 // clear block if requested
338 memset (aStorage, 0, RoundSize);
340 // else if block size is small allocate it in pools
341 else if ( RoundSize <= myCellSize ) {
342 // unlock the mutex for free lists
345 // and lock the specific mutex used to protect access to small blocks pools;
346 // note that this is done by sentry class so as to ensure unlocking in case of
347 // possible exception that may be thrown from AllocMemory()
348 Standard_Mutex::Sentry aSentry (myMutexPools);
350 // check for availability of requested space in the current pool
351 Standard_Size *aBlock = myNextAddr;
352 if ( &aBlock[ BLOCK_SHIFT+RoundSizeN] > myEndBlock ) {
353 // otherwise, allocate new memory pool with page-aligned size
354 Standard_Size Size = myPageSize * myNbPages;
355 aBlock = AllocMemory(Size); // note that size may be aligned by this call
357 if (myEndBlock > myNextAddr) {
358 // put the remaining piece to the free lists
359 const Standard_Size aPSize = (myEndBlock - GET_USER(myNextAddr))
360 * sizeof(Standard_Size);
361 const Standard_Size aRPSize = ROUNDDOWN_CELL(aPSize);
362 const Standard_Size aPIndex = INDEX_CELL(aRPSize);
363 if ( aPIndex > 0 && aPIndex <= myFreeListMax ) {
365 *(Standard_Size**)myNextAddr = myFreeList[aPIndex];
366 myFreeList[aPIndex] = myNextAddr;
371 // set end pointer to the end of the new pool
372 myEndBlock = aBlock + Size / sizeof(Standard_Size);
373 // record in the first bytes of the pool the address of the previous one
374 *(Standard_Size**)aBlock = myAllocList;
375 // and make new pool current (last)
376 // and get pointer to the first memory block in the pool
377 myAllocList = aBlock;
381 // initialize header of the new block by its size
382 // and get the pointer to the user part of block
383 aBlock[0] = RoundSize;
384 aStorage = GET_USER(aBlock);
386 // and advance pool pointer to the next free piece of pool
387 myNextAddr = &aStorage[RoundSizeN];
389 // blocks of medium size are allocated directly
391 // unlock the mutex immediately, as we do not need further to access any field
394 // we use operator ?: instead of if() since it is faster
395 Standard_Size *aBlock = (Standard_Size*) (myClear ? calloc( RoundSizeN+BLOCK_SHIFT, sizeof(Standard_Size)) :
396 malloc((RoundSizeN+BLOCK_SHIFT) * sizeof(Standard_Size)) );
398 // if allocation failed, try to free some memory by purging free lists, and retry
400 if ( Purge (Standard_False) )
401 aBlock = (Standard_Size*)calloc(RoundSizeN+BLOCK_SHIFT, sizeof(Standard_Size));
402 // if still not succeeded, raise exception
404 Standard_OutOfMemory::Raise ("Standard_MMgrOpt::Allocate(): malloc failed");
407 // initialize new block header by its size
408 // and get the pointer to the user part of block
409 aBlock[0] = RoundSize;
410 aStorage = GET_USER(aBlock);
413 // blocks of big size may be allocated as memory mapped files
415 // Compute size of the block to be allocated, including header,
416 // Note that we use rounded size, even if this block will not be stored in
417 // the free list, for consistency of calls to AllocMemory() / FreeMemory()
418 // and calculation of index in the free list
419 Standard_Size AllocSize = RoundSize + sizeof(Standard_Size);
422 Standard_Size* aBlock = AllocMemory(AllocSize);
424 // initialize new block header by its size
425 // and get the pointer to the user part of block.
426 aBlock[0] = RoundSize;
427 aStorage = GET_USER(aBlock);
430 callBack(Standard_True, aStorage, RoundSize, aSize);
435 //=======================================================================
438 //=======================================================================
440 void Standard_MMgrOpt::Free(Standard_Address theStorage)
442 // safely return if attempt to free null pointer
446 // get the pointer to the memory block header
447 Standard_Size* aBlock = GET_BLOCK(theStorage);
449 // and get the allocated size of the block
450 Standard_Size RoundSize = aBlock[0];
452 callBack(Standard_False, theStorage, RoundSize, 0);
454 // check whether blocks with that size are recyclable
455 const Standard_Size Index = INDEX_CELL(RoundSize);
456 if ( Index <= myFreeListMax ) {
457 // Lock access to critical data (myFreeList and other) by mutex
458 // Note that we do not lock fields that do not change during the
459 // object life (such as myThreshold), and assume that calls to functions
460 // of standard library are already protected by their implementation.
461 // We also do not use Sentry, since in case if OCC signal or exception is
462 // caused by this block we will have deadlock anyway...
465 // in the memory block header, record address of the next free block
466 *(Standard_Size**)aBlock = myFreeList[Index];
467 // add new block to be first in the list
468 myFreeList[Index] = aBlock;
472 // otherwise, we have block of big size which shall be simply released
474 FreeMemory (aBlock, RoundSize);
477 //=======================================================================
479 //purpose : Frees all free lists except small blocks (less than CellSize)
480 //=======================================================================
482 Standard_Integer Standard_MMgrOpt::Purge(Standard_Boolean )
484 // Lock access to critical data by mutex
485 Standard_Mutex::Sentry aSentry (myMutex);
487 // TODO: implement support for isDeleted = True
489 // free memory blocks contained in free lists
490 // whose sizes are greater than cellsize
491 Standard_Integer nbFreed = 0;
492 Standard_Size i = INDEX_CELL(ROUNDUP_CELL(myCellSize+BLOCK_SHIFT));
493 for (; i <= myFreeListMax; i++ ) {
494 Standard_Size * aFree = myFreeList[i];
496 Standard_Size * anOther = aFree;
497 aFree = * (Standard_Size **) aFree;
501 myFreeList[i] = NULL;
504 // Lock access to critical data by mutex
505 Standard_Mutex::Sentry aSentry1 (myMutexPools);
507 // release memory pools containing no busy memory;
508 // for that for each pool count the summary size of blocks
509 // got from the free lists allocated from this pool
511 const Standard_Size PoolSize = myPageSize * myNbPages;
513 const Standard_Size PoolSize =
514 PAGE_ALIGN(myPageSize * myNbPages + sizeof(HANDLE), myPageSize) -
517 const Standard_Size RPoolSize = ROUNDDOWN_CELL(PoolSize);
518 const Standard_Size PoolSizeN = RPoolSize / sizeof(Standard_Size);
520 // declare the table of pools;
521 // (we map free blocks onto a number of pools simultaneously)
522 static const Standard_Integer NB_POOLS_WIN = 512;
523 static Standard_Size* aPools[NB_POOLS_WIN];
524 static Standard_Size aFreeSize[NB_POOLS_WIN];
525 static Standard_Integer aFreePools[NB_POOLS_WIN];
527 Standard_Size * aNextPool = myAllocList;
528 Standard_Size * aPrevPool = NULL;
529 const Standard_Size nCells = INDEX_CELL(myCellSize);
530 Standard_Integer nPool = 0, nPoolFreed = 0;
533 // fill the table of pools
534 Standard_Integer iPool;
535 for (iPool = 0; aNextPool && iPool < NB_POOLS_WIN; iPool++) {
536 aPools[iPool] = aNextPool;
537 aFreeSize[iPool] = 0;
538 aNextPool = * (Standard_Size **) aNextPool; // get next pool
540 const Standard_Integer iLast = iPool - 1;
543 // scan free blocks, find corresponding pools and increment
545 for (i = 0; i <= nCells; i++ ) {
546 Standard_Size * aFree = myFreeList[i];
547 Standard_Size aSize = BLOCK_SHIFT * sizeof(Standard_Size) +
550 for (iPool = 0; iPool <= iLast; iPool++) {
551 if (aFree >= aPools[iPool] && aFree < aPools[iPool] + PoolSizeN) {
552 aFreeSize[iPool] += aSize;
556 aFree = * (Standard_Size **) aFree; // get next free block
560 // scan the table and make the list of free pools
561 Standard_Integer iLastFree = -1;
562 for (iPool = 0; iPool <= iLast; iPool++) {
563 aFreeSize[iPool] = ROUNDUP_CELL(aFreeSize[iPool]);
564 if (aFreeSize[iPool] == RPoolSize)
565 aFreePools[++iLastFree] = iPool;
567 if (iLastFree == -1) {
568 // no free pools found in this table
569 aPrevPool = aPools[iLast];
573 // scan free blocks again, and remove those of them
574 // that belong to free pools
576 for (i = 0; i <= nCells; i++ ) {
577 Standard_Size * aFree = myFreeList[i];
578 Standard_Size * aPrevFree = NULL;
580 for (j = 0; j <= iLastFree; j++) {
581 iPool = aFreePools[j];
582 if (aFree >= aPools[iPool] && aFree < aPools[iPool] + PoolSizeN)
588 aFree = * (Standard_Size **) aFree;
590 * (Standard_Size **) aPrevFree = aFree; // link to previous
592 myFreeList[i] = aFree;
598 aFree = * (Standard_Size **) aFree;
603 // release free pools, and reconnect remaining pools
604 // in the linked list
605 Standard_Size * aPrev = (aFreePools[0] == 0
607 : aPools[aFreePools[0] - 1]);
608 for (j = 0; j <= iLastFree; j++) {
609 iPool = aFreePools[j];
611 // update the pointer to the previous non-free pool
612 if (iPool - aFreePools[j - 1] > 1)
613 aPrev = aPools[iPool - 1];
615 if (j == iLastFree || aFreePools[j + 1] - iPool > 1) {
616 // get next non-free pool
617 Standard_Size * aNext =
618 (j == iLastFree && aFreePools[j] == iLast)
621 // and connect it to the list of pools that have been processed
622 // and remain non-free
624 * (Standard_Size **) aPrev = aNext;
628 FreeMemory(aPools[iPool], PoolSize);
630 // update the pointer to the previous non-free pool
631 aPrevPool = (aFreePools[iLastFree] == iLast
634 nPoolFreed += iLastFree + 1;
640 //=======================================================================
641 //function : FreePools
642 //purpose : Frees all memory pools allocated for small blocks
643 //=======================================================================
645 void Standard_MMgrOpt::FreePools()
647 // Lock access to critical data by mutex
648 Standard_Mutex::Sentry aSentry (myMutexPools);
650 // last pool is remembered in myAllocList
651 Standard_Size * aFree = myAllocList;
654 Standard_Size * aBlock = aFree;
655 // next pool address is stored in first 8 bytes of each pool
656 aFree = * (Standard_Size **) aFree;
657 // free pool (note that its size is calculated rather than stored)
658 FreeMemory ( aBlock, myPageSize * myNbPages );
662 //=======================================================================
663 //function : Reallocate
665 //=======================================================================
667 Standard_Address Standard_MMgrOpt::Reallocate(Standard_Address theStorage,
668 const Standard_Size theNewSize)
670 // if theStorage == NULL, just allocate new memory block
673 return Allocate(theNewSize);
676 Standard_Size * aBlock = GET_BLOCK(theStorage);
677 Standard_Address newStorage = NULL;
679 // get current size of the memory block from its header
680 Standard_Size OldSize = aBlock[0];
682 // if new size is less than old one, just do nothing
683 if (theNewSize <= OldSize) {
684 newStorage = theStorage;
686 // otherwise, allocate new block and copy the data to it
688 newStorage = Allocate(theNewSize);
689 memcpy (newStorage, theStorage, OldSize);
691 // clear newly added part of the block
693 memset(((char*)newStorage) + OldSize, 0, theNewSize-OldSize);
698 //=======================================================================
699 //function : AllocMemory
700 //purpose : Allocate a big block of memory using either malloc/calloc
701 // or memory mapped file
702 //=======================================================================
704 Standard_Size * Standard_MMgrOpt::AllocMemory(Standard_Size &Size)
706 // goto is used as efficient method for a possibility to retry allocation
709 Standard_Size * aBlock = NULL;
711 // if MMap option is ON, allocate using memory mapped files
715 // align size to page size
716 const Standard_Size AlignedSize = PAGE_ALIGN(Size, myPageSize);
719 // note that on UNIX myMMap is file descriptor for /dev/null
720 aBlock = (Standard_Size * )mmap((char*)MMAP_BASE_ADDRESS, AlignedSize,
721 PROT_READ | PROT_WRITE, MMAP_FLAGS,
723 if (aBlock == MAP_FAILED /* -1 */) {
725 // as a last resort, try freeing some memory by calling Purge()
726 if ( Purge(Standard_False) )
728 // if nothing helps, raise exception
729 Standard_OutOfMemory::Raise (strerror(errcode));
732 // save actually allocated size into argument
737 // align size to page size, taking into account additional space needed to
738 // store handle to the memory map
739 const Standard_Size AlignedSize = PAGE_ALIGN(Size+sizeof(HANDLE), myPageSize);
741 // allocate mapped file
742 HANDLE hMap = CreateFileMapping(INVALID_HANDLE_VALUE, NULL,
744 DWORD(AlignedSize / 0x80000000),
745 DWORD(AlignedSize % 0x80000000), NULL);
746 HANDLE * aMBlock = (hMap && GetLastError() != ERROR_ALREADY_EXISTS ?
747 (HANDLE*)MapViewOfFile(hMap,FILE_MAP_WRITE,0,0,0) : NULL);
748 // check for error and try allocating address space
751 // close handle if allocated
755 // as a last resort, try freeing some memory by calling Purge() and retry
756 if ( Purge(Standard_False) )
758 // if nothing helps, make error message and raise exception
759 const int BUFSIZE=1024;
761 wchar_t message[BUFSIZE];
763 if ( FormatMessageW (FORMAT_MESSAGE_FROM_SYSTEM, 0, GetLastError(), 0,
764 message, BUFSIZE-1, 0) <=0 )
765 StringCchCopyW(message, _countof(message), L"Standard_MMgrOpt::AllocMemory() failed to mmap");
767 char messageA[BUFSIZE];
768 WideCharToMultiByte(CP_UTF8, 0, message, -1, messageA, sizeof(messageA), NULL, NULL);
769 Standard_OutOfMemory::Raise(messageA);
772 // record map handle in the beginning
775 // and shift to the beginning of usable area
776 aBlock = (Standard_Size*)(aMBlock+1);
778 // save actually allocated size into argument
779 Size = AlignedSize - sizeof(HANDLE);
782 // else just allocate by malloc or calloc
784 aBlock = (Standard_Size *) (myClear ? calloc(Size,sizeof(char)) : malloc(Size));
788 // as a last resort, try freeing some memory by calling Purge()
789 if ( Purge(Standard_False) )
791 // if nothing helps, raise exception
792 Standard_OutOfMemory::Raise ("Standard_MMgrOpt::Allocate(): malloc failed");
795 // clear whole block if clearing option is set
797 memset (aBlock, 0, Size);
801 //=======================================================================
802 //function : FreeMemory
804 //=======================================================================
806 void Standard_MMgrOpt::FreeMemory (Standard_Address aBlock,
813 // release memory (either free or unmap)
816 // align size to page size, just the same as in AllocMemory()
817 const Standard_Size AlignedSize = PAGE_ALIGN(aSize, myPageSize);
818 munmap((char*)aBlock, AlignedSize);
820 // recover handle to the memory mapping stored just before the block
821 const HANDLE * aMBlock = (const HANDLE *)aBlock;
822 HANDLE hMap = *(--aMBlock);
823 UnmapViewOfFile((LPCVOID)aMBlock);