Integration of OCCT 6.5.0 from SVN
[occt.git] / src / Standard / Standard_MMgrOpt.cxx
CommitLineData
7fd59977 1// File: Standard_MMgrOpt.cxx
2// Created: Tue Mar 15 12:09:38 2005
3// Author: Peter KURNEV
4// <pkv@irinox>
5
6#include <Standard_MMgrOpt.hxx>
7#include <Standard_OutOfMemory.hxx>
8
9//
10#ifdef HAVE_CONFIG_H
11# include <config.h>
12#endif
13
14#include <stdio.h>
15
16#ifdef HAVE_STRING_H
17# include <string.h>
18#endif
19
20#ifndef WNT
21# include <stdlib.h>
22# include <errno.h>
23#endif
24
25#ifdef WNT
26#include <windows.h>
27#else
28# ifdef HAVE_UNISTD_H
29# include <unistd.h>
30# endif
31# ifdef HAVE_SYS_MMAN_H
32# include <sys/mman.h> /* mmap() */
33# endif
34#endif
35#ifdef HAVE_MALLOC_H
36# include <malloc.h>
37#endif
38#include <stdlib.h>
39#include <sys/types.h>
40#include <sys/stat.h>
41#include <fcntl.h>
42//
43#if defined (__sun) || defined(SOLARIS)
44extern "C" int getpagesize() ;
45#endif
46
47//======================================================================
48// Assumptions
49//======================================================================
50
51// This implementation makes a number of assumptions regarding size of
52// types:
53//
54// sizeof(Standard_Size) == sizeof(Standard_Address==void*)
55//
56// On WNT, sizeof(HANDLE) is equal of multiple of sizeof(Standard_Size)
57
58//======================================================================
59// Naming conventions
60//======================================================================
61
62// For clarity of implementation, the following conventions are used
63// for naming variables:
64//
65// ...Size: size in bytes
66//
67// RoundSize, RSize etc.: size in bytes, rounded according to allocation granularity
68//
69// ...SizeN: size counted in number of items of sizeof(Standard_Size) bytes each
70//
71// ...Storage: address of the user area of the memory block (Standard_Address)
72//
73// ...Block: address of the hole memory block (header) (Standard_Size*)
74
75//======================================================================
76// Macro definitions
77//======================================================================
78
79//
80// MMAP_BASE_ADDRESS, MMAP_FLAGS
81#if defined (__hpux) || defined(HPUX)
82#define MMAP_BASE_ADDRESS 0x80000000
83#define MMAP_FLAGS (MAP_ANONYMOUS | MAP_PRIVATE | MAP_VARIABLE)
84#elif defined (__osf__) || defined(DECOSF1)
85#define MMAP_BASE_ADDRESS 0x1000000000
86#define MMAP_FLAGS (MAP_ANONYMOUS | MAP_PRIVATE | MAP_VARIABLE)
87#elif defined(_AIX)
88#define MMAP_BASE_ADDRESS 0x80000000
89#define MMAP_FLAGS (MAP_ANONYMOUS | MAP_PRIVATE | MAP_VARIABLE)
90#elif defined(__APPLE__)
91#define MMAP_BASE_ADDRESS 0x80000000
92#define MMAP_FLAGS (MAP_ANON | MAP_PRIVATE)
93#elif defined(LIN)
94#define MMAP_BASE_ADDRESS 0x20000000
95#define MMAP_FLAGS (MAP_PRIVATE)
96#elif defined(WNT)
97//static HANDLE myhMap;
98#else
99#define MMAP_BASE_ADDRESS 0x60000000
100#define MMAP_FLAGS (MAP_PRIVATE)
101#endif
102
103// Round size up to the specified page size
104#define PAGE_ALIGN(size,thePageSize) \
105 (((size) + (thePageSize) - 1) & ~((thePageSize) - 1))
106
107// Round size up to 4, 8, or 16 bytes
108// Note that 0 yields 0
109#define ROUNDUP16(size) (((size) + 0xf) & ~(Standard_Size)0xf)
110#define ROUNDUP8(size) (((size) + 0x7) & ~(Standard_Size)0x7)
111#define ROUNDUP4(size) (((size) + 0x3) & ~(Standard_Size)0x3)
112#define ROUNDDOWN8(size) ((size) & ~(Standard_Size)0x7)
113
114// The following two macros define granularity of memory allocation,
115// by rounding size to the size of the allocation cell,
116// and obtaining cell index from rounded size.
117// Note that granularity shall be not less than sizeof(Standard_Size)
118
119// Traditional implementation: granularity 16 bytes
120//#define ROUNDUP_CELL(size) ROUNDUP16(size)
121//#define INDEX_CELL(rsize) ((rsize) >> 4)
122
123// Reduced granularity: 8 bytes
124#define ROUNDUP_CELL(size) ROUNDUP8(size)
125#define ROUNDDOWN_CELL(size) ROUNDDOWN8(size)
126#define INDEX_CELL(rsize) ((rsize) >> 3)
127
128// Minimal granularity: 4 bytes (32-bit systems only)
129#ifndef _OCC64
130//#define ROUNDUP_CELL(size) ROUNDUP4(size)
131//#define INDEX_CELL(rsize) ((rsize) >> 2)
132#endif
133
134// Adaptive granularity, less for little blocks and greater for bigger ones:
135/*
136#if _OCC64
137#define ROUNDUP_CELL(size) ((size) <= 0x40 ? ROUNDUP8(size) : ROUNDUP16(size))
138#define INDEX_CELL(rsize) ((rsize) <= 0x40 ? ((rsize) >> 3) : (4 + ((rsize) >> 4)))
139#else
140#define ROUNDUP_CELL(size) ((size) <= 0x40 ? ROUNDUP4(size) : ROUNDUP8(size))
141#define INDEX_CELL(rsize) ((rsize) <= 0x40 ? ((rsize) >> 2) : (8 + ((rsize) >> 3)))
142#endif
143*/
144
145
146/* In the allocated block, first bytes are used for storing of memory manager's data.
147 (size of block). The minimal size of these data is sizeof(int).
148 The memory allocated in system usually alligned by 16 bytes.Tthe aligment of the
149 data area in the memory block is shfted on BLOCK_SHIFT*sizeof(Standard_Size)
150 bytes.
151 It is OK for WNT, SUN and Linux systems, but on SGI aligment should be 8 bytes.
152 So, BLOCK_SHIFT is formed as macro for support on other possible platforms.
153*/
154
155#if defined(IRIX) || defined(SOLARIS)
156#define BLOCK_SHIFT 2
157#else
158#define BLOCK_SHIFT 1
159#endif
160
161// Get address of user area from block address, and vice-versa
162#define GET_USER(block) (((Standard_Size*)(block)) + BLOCK_SHIFT)
163#define GET_BLOCK(storage) (((Standard_Size*)(storage))-BLOCK_SHIFT)
164
165// create static instance of out-of-memory exception to protect
166// against possible lack of memory for its raising
167static Handle(Standard_OutOfMemory) anOutOfMemError = new Standard_OutOfMemory;
168
169//=======================================================================
170//function : Standard_MMgr
171//purpose :
172//=======================================================================
173
174Standard_MMgrOpt::Standard_MMgrOpt(const Standard_Boolean aClear,
175 const Standard_Boolean aMMap,
176 const Standard_Size aCellSize,
177 const Standard_Integer aNbPages,
178 const Standard_Size aThreshold,
179 const Standard_Boolean isReentrant)
180{
181 // check basic assumption
182 if ( sizeof(Standard_Size) != sizeof(Standard_Address) )
183 {
184 cerr << "Fatal error: Open CASCADE Optimized Memory manager: this platform is not supported!" << endl;
185 exit(1);
186 }
187
188 // clear buffer fields
189 myFreeListMax = 0;
190 myFreeList = NULL;
191 myPageSize = 0;
192 myAllocList = NULL;
193 myNextAddr = NULL;
194 myEndBlock = NULL;
195
196 // initialize parameters
197 myClear = aClear;
198 myMMap = (Standard_Integer)aMMap;
199 myCellSize = aCellSize;
200 myNbPages = aNbPages;
201 myThreshold = aThreshold;
202 myReentrant = isReentrant;
203
204 // initialize
205 Initialize();
206}
207
208//=======================================================================
209//function : ~Standard_MMgrOpt
210//purpose :
211//=======================================================================
212
213Standard_MMgrOpt::~Standard_MMgrOpt()
214{
215 Purge(Standard_True);
216 free(myFreeList);
217
218 // NOTE: freeing pools may be dangerous if not all memory taken by
219 // this instance of the memory manager has been freed
220 FreePools();
221}
222
223// interface level
224
225//=======================================================================
226//function : Initialize
227//purpose :
228//=======================================================================
229
230void Standard_MMgrOpt::Initialize()
231{
232 // check number of pages in small blocks pools
233 if ( myNbPages < 100 )
234 myNbPages = 1000;
235
236 // get system-dependent page size
237#ifndef WNT
238 myPageSize = getpagesize();
239 if ( ! myPageSize )
240 myMMap = 0;
241#else
242 SYSTEM_INFO SystemInfo;
243 GetSystemInfo (&SystemInfo);
244 myPageSize = SystemInfo.dwPageSize;
245#endif
246
247 // initialize memory mapped files
248 if(myMMap) {
249#if defined (__sgi) || defined(IRIX)
250 /* Probleme de conflit en la zone des malloc et la zone des mmap sur SGI */
251 /* Ce probleme a ete identifie en IRIX 5.3 jusqu'en IRIX 6.2. Le probleme */
252 /* ne semble pas apparaitre en IRIX 6.4 */
253 /* Les malloc successifs donnent des adresses croissantes (a partir de 0x0x10000000) */
254 /* ce que l'on appelle le pointeur de BREAK */
255 /* Le premier mmap est force a l'addresse MMAP_BASE_ADDRESS (soit 0x60000000 sur SGI) */
256 /* mais les mmap suivants sont decides par le systeme (flag MAP_VARIABLE). Malheureusement */
257 /* il renvoie une addresse la plus basse possible dans la zone des malloc juste au dessus */
258 /* du BREAK soit 0x18640000 ce qui donne un espace d'allocation d'environ 140 Mo pour les */
259 /* malloc. Sur des gros modeles on peut avoir des pointes a 680 Mo en Rev6 pour une maquette */
260 /* de 2 000 000 de points. En Rev7, la meme maquette n'excedera pas 286 Mo (voir vision.for) */
261 /* Pour palier ce comportement, la solution adoptee est la suivante : */
262 /* Lorsque l'on entre dans alloc_startup (ici), on n'a pas encore fait de mmap. */
263 /* On fait alors un malloc (d'environ 700Mo) que l'on libere de suite. Cela a pour */
264 /* consequence de deplacer le BREAK tres haut. Le BREAK ne redescend jamais meme lors du free */
265 /* Le mmap donnant une adresse (environ 100 Mo au dessus du BREAK) on se retrouve alors avec */
266 /* le partage des zones de memoire suivant : */
267 /* 700 Mo pour les malloc - 500 Mo (1,2Go - 700Mo ) pour les mmap. Avec un CLD_SD_SIZE */
268 /* de 2 000 000 on atteind jamais 500 Mo de mmap, meme en chargeant des applications (qui */
269 /* utilisent la zone de mmap */
270 /* Ce partage des zones memoire pourra eventuellemt etre regle par une variable d'environnement */
271 /* CLD_HIGH_SBRK */
272 char *var;
273 Standard_Size high_sbrk;
274
275 high_sbrk = 700*1024*1024;
276 if ( (var=getenv("CLD_HIGH_SBRK")) != NULL ) {
277 high_sbrk = atoi(var);
278 }
279
280 var = (char*)malloc(high_sbrk); // 700 Mb
281 if ( var )
282 free(var);
283 else
284 perror("ERR_MEMRY_FAIL");
285#endif
286
287#if defined(IRIX) || defined(__sgi) || defined(SOLARIS) || defined(__sun) || defined(LIN) || defined(linux) || defined(__FreeBSD__)
288 if ((myMMap = open ("/dev/zero", O_RDWR)) < 0) {
289 if ((myMMap = open ("/dev/null", O_RDWR)) < 0){
290 myMMap = 0;
291 }
292 }
293 if (!myMMap)
294 perror("ERR_MMAP_FAIL");
295#else
296 myMMap = -1;
297#endif
298 }
299
300 // initialize free lists
301 myFreeListMax = INDEX_CELL(ROUNDUP_CELL(myThreshold-BLOCK_SHIFT)); // all blocks less than myThreshold are to be recycled
302 myFreeList = (Standard_Size **) calloc (myFreeListMax+1, sizeof(Standard_Size *));
303 myCellSize = ROUNDUP16(myCellSize);
304}
305
306//=======================================================================
307//function : SetMMgrOptCallBack
308//purpose : Sets a callback function to be called on each alloc/free
309//=======================================================================
310
311static Standard_MMgrOpt::TPCallBackFunc MyPCallBackFunc = NULL;
312
313Standard_EXPORT void Standard_MMgrOpt::SetCallBackFunction(TPCallBackFunc pFunc)
314{
315 MyPCallBackFunc = pFunc;
316}
317
318inline void callBack(const Standard_Boolean isAlloc,
319 const Standard_Address aStorage,
320 const Standard_Size aRoundSize,
321 const Standard_Size aSize)
322{
323 if (MyPCallBackFunc)
324 (*MyPCallBackFunc)(isAlloc, aStorage, aRoundSize, aSize);
325}
326
327//=======================================================================
328//function : Allocate
329//purpose :
330//=======================================================================
331
332Standard_Address Standard_MMgrOpt::Allocate(const Standard_Size aSize)
333{
334 Standard_Size * aStorage = NULL;
335
336 // round up size according to allocation granularity
337 // The keyword 'volatile' is only used here for GCC 64-bit compilations
338 // otherwise this method would crash in runtime in optimized build.
339 volatile Standard_Size RoundSize = ROUNDUP_CELL(aSize);
340 const Standard_Size Index = INDEX_CELL(RoundSize);
341
342 // blocks of small and medium size are recyclable
343 if ( Index <= myFreeListMax ) {
344 const Standard_Size RoundSizeN = RoundSize / sizeof(Standard_Size);
345
346 // Lock access to critical data (myFreeList and other fields) by mutex.
347 // Note that we do not lock fields that do not change during the
348 // object life (such as myThreshold), and assume that calls to functions
349 // of standard library are already protected by their implementation.
350 // The unlock is called as soon as possible, for every treatment case.
351 // We also do not use Sentry, since in case if OCC signal or exception is
352 // caused by this block we will have deadlock anyway...
353 if (myReentrant) myMutex.Lock();
354
355 // if free block of the requested size is available, return it
356 if ( myFreeList[Index] ) {
357 // the address of the next free block is stored in the header
358 // of the memory block; use it to update list pointer
359 // to point to next free block
360 Standard_Size* aBlock = myFreeList[Index];
361 myFreeList[Index] = *(Standard_Size**)aBlock;
362
363 // unlock the mutex
364 if ( myReentrant ) myMutex.Unlock();
365
366 // record size of the allocated block in the block header and
367 // shift the pointer to the beginning of the user part of block
368 aBlock[0] = RoundSize;
369 aStorage = GET_USER(aBlock);
370
371 // clear block if requested
372 if (myClear)
373 memset (aStorage, 0, RoundSize);
374 }
375 // else if block size is small allocate it in pools
376 else if ( RoundSize <= myCellSize ) {
377 // unlock the mutex for free lists
378 if ( myReentrant ) myMutex.Unlock();
379
380 // and lock the specific mutex used to protect access to small blocks pools;
381 // note that this is done by sentry class so as to ensure unlocking in case of
382 // possible exception that may be thrown from AllocMemory()
383 Standard_Mutex::SentryNested aSentry ( myMutexPools, myReentrant );
384
385 // check for availability of requested space in the current pool
386 Standard_Size *aBlock = myNextAddr;
387 if ( &aBlock[ BLOCK_SHIFT+RoundSizeN] > myEndBlock ) {
388 // otherwise, allocate new memory pool with page-aligned size
389 Standard_Size Size = myPageSize * myNbPages;
390 aBlock = AllocMemory(Size); // note that size may be aligned by this call
391
392 if (myEndBlock > myNextAddr) {
393 // put the remaining piece to the free lists
394 const Standard_Size aPSize = (myEndBlock - GET_USER(myNextAddr))
395 * sizeof(Standard_Size);
396 const Standard_Size aRPSize = ROUNDDOWN_CELL(aPSize);
397 const Standard_Size aPIndex = INDEX_CELL(aRPSize);
398 if ( aPIndex > 0 && aPIndex <= myFreeListMax ) {
399 if (myReentrant) myMutex.Lock();
400 *(Standard_Size**)myNextAddr = myFreeList[aPIndex];
401 myFreeList[aPIndex] = myNextAddr;
402 if (myReentrant) myMutex.Unlock();
403 }
404 }
405
406 // set end pointer to the end of the new pool
407 myEndBlock = aBlock + Size / sizeof(Standard_Size);
408 // record in the first bytes of the pool the address of the previous one
409 *(Standard_Size**)aBlock = myAllocList;
410 // and make new pool current (last)
411 // and get pointer to the first memory block in the pool
412 myAllocList = aBlock;
413 aBlock+=BLOCK_SHIFT;
414 }
415
416 // initialize header of the new block by its size
417 // and get the pointer to the user part of block
418 aBlock[0] = RoundSize;
419 aStorage = GET_USER(aBlock);
420
421 // and advance pool pointer to the next free piece of pool
422 myNextAddr = &aStorage[RoundSizeN];
423 }
424 // blocks of medium size are allocated directly
425 else {
426 // unlock the mutex immediately, as we do not need further to access any field
427 if ( myReentrant ) myMutex.Unlock();
428
429 // we use operator ?: instead of if() since it is faster
430 Standard_Size *aBlock = (Standard_Size*) (myClear ? calloc( RoundSizeN+BLOCK_SHIFT, sizeof(Standard_Size)) :
431 malloc((RoundSizeN+BLOCK_SHIFT) * sizeof(Standard_Size)) );
432
433 // if allocation failed, try to free some memory by purging free lists, and retry
434 if ( ! aBlock ) {
435 if ( Purge (Standard_False) )
436 aBlock = (Standard_Size*)calloc(RoundSizeN+BLOCK_SHIFT, sizeof(Standard_Size));
437 // if still not succeeded, raise exception
438 if ( ! aBlock )
439 anOutOfMemError->Reraise ("Standard_MMgrOpt::Allocate(): malloc failed");
440 }
441
442 // initialize new block header by its size
443 // and get the pointer to the user part of block
444 aBlock[0] = RoundSize;
445 aStorage = GET_USER(aBlock);
446 }
447 }
448 // blocks of big size may be allocated as memory mapped files
449 else {
450 // Compute size of the block to be allocated, including header,
451 // Note that we use rounded size, even if this block will not be stored in
452 // the free list, for consistency of calls to AllocMemory() / FreeMemory()
453 // and calculation of index in the free list
454 Standard_Size AllocSize = RoundSize + sizeof(Standard_Size);
455
456 // allocate memory
457 Standard_Size* aBlock = AllocMemory(AllocSize);
458
459 // initialize new block header by its size
460 // and get the pointer to the user part of block.
461 aBlock[0] = RoundSize;
462 aStorage = GET_USER(aBlock);
463 }
464
465 callBack(Standard_True, aStorage, RoundSize, aSize);
466
467 return aStorage;
468}
469
470//=======================================================================
471//function : Free
472//purpose :
473//=======================================================================
474
475void Standard_MMgrOpt::Free(Standard_Address& theStorage)
476{
477 // safely return if attempt to free null pointer
478 if ( ! theStorage )
479 return;
480
481 // get the pointer to the memory block header
482 Standard_Size* aBlock = GET_BLOCK(theStorage);
483
484 // and get the allocated size of the block
485 Standard_Size RoundSize = aBlock[0];
486
487 callBack(Standard_False, theStorage, RoundSize, 0);
488
489 // check whether blocks with that size are recyclable
490 const Standard_Size Index = INDEX_CELL(RoundSize);
491 if ( Index <= myFreeListMax ) {
492 // Lock access to critical data (myFreeList and other) by mutex
493 // Note that we do not lock fields that do not change during the
494 // object life (such as myThreshold), and assume that calls to functions
495 // of standard library are already protected by their implementation.
496 // We also do not use Sentry, since in case if OCC signal or exception is
497 // caused by this block we will have deadlock anyway...
498 if (myReentrant) myMutex.Lock();
499
500 // in the memory block header, record address of the next free block
501 *(Standard_Size**)aBlock = myFreeList[Index];
502 // add new block to be first in the list
503 myFreeList[Index] = aBlock;
504
505 if (myReentrant) myMutex.Unlock();
506 }
507 // otherwise, we have block of big size which shall be simply released
508 else
509 FreeMemory (aBlock, RoundSize);
510
511 theStorage = NULL;
512}
513
514//=======================================================================
515//function : Purge
516//purpose : Frees all free lists except small blocks (less than CellSize)
517//=======================================================================
518
519Standard_Integer Standard_MMgrOpt::Purge(Standard_Boolean )//isDeleted)
520{
521 // Lock access to critical data by mutex
522 Standard_Mutex::SentryNested aSentry (myMutex, myReentrant);
523
524 // TODO: implement support for isDeleted = True
525
526 // free memory blocks contained in free lists
527 // whose sizes are greater than cellsize
528 Standard_Integer nbFreed = 0;
529 Standard_Size i = INDEX_CELL(ROUNDUP_CELL(myCellSize+BLOCK_SHIFT));
530 for (; i <= myFreeListMax; i++ ) {
531 Standard_Size * aFree = myFreeList[i];
532 while(aFree) {
533 Standard_Size * anOther = aFree;
534 aFree = * (Standard_Size **) aFree;
535 free(anOther);
536 nbFreed++;
537 }
538 myFreeList[i] = NULL;
539 }
540
541 // Lock access to critical data by mutex
542 Standard_Mutex::SentryNested aSentry1 ( myMutexPools, myReentrant );
543
544 // release memory pools containing no busy memory;
545 // for that for each pool count the summary size of blocks
546 // got from the free lists allocated from this pool
547#ifndef WNT
548 const Standard_Size PoolSize = myPageSize * myNbPages;
549#else
550 const Standard_Size PoolSize =
551 PAGE_ALIGN(myPageSize * myNbPages + sizeof(HANDLE), myPageSize) -
552 sizeof(HANDLE);
553#endif
554 const Standard_Size RPoolSize = ROUNDDOWN_CELL(PoolSize);
555 const Standard_Size PoolSizeN = RPoolSize / sizeof(Standard_Size);
556
557 // declare the table of pools;
558 // (we map free blocks onto a number of pools simultaneously)
559 static const Standard_Integer NB_POOLS_WIN = 512;
560 static Standard_Size* aPools[NB_POOLS_WIN];
561 static Standard_Size aFreeSize[NB_POOLS_WIN];
562 static Standard_Integer aFreePools[NB_POOLS_WIN];
563
564 Standard_Size * aNextPool = myAllocList;
565 Standard_Size * aPrevPool = NULL;
566 const Standard_Size nCells = INDEX_CELL(myCellSize);
567 Standard_Integer nPool = 0, nPoolFreed = 0;
568
569 while (aNextPool) {
570 // fill the table of pools
571 Standard_Integer iPool;
572 for (iPool = 0; aNextPool && iPool < NB_POOLS_WIN; iPool++) {
573 aPools[iPool] = aNextPool;
574 aFreeSize[iPool] = 0;
575 aNextPool = * (Standard_Size **) aNextPool; // get next pool
576 }
577 const Standard_Integer iLast = iPool - 1;
578 nPool += iPool;
579
580 // scan free blocks, find corresponding pools and increment
581 // counters
582 for (i = 0; i <= nCells; i++ ) {
583 Standard_Size * aFree = myFreeList[i];
584 Standard_Size aSize = BLOCK_SHIFT * sizeof(Standard_Size) +
585 ROUNDUP_CELL(1) * i;
586 while(aFree) {
587 for (iPool = 0; iPool <= iLast; iPool++) {
588 if (aFree >= aPools[iPool] && aFree < aPools[iPool] + PoolSizeN) {
589 aFreeSize[iPool] += aSize;
590 break;
591 }
592 }
593 aFree = * (Standard_Size **) aFree; // get next free block
594 }
595 }
596
597 // scan the table and make the list of free pools
598 Standard_Integer iLastFree = -1;
599 for (iPool = 0; iPool <= iLast; iPool++) {
600 aFreeSize[iPool] = ROUNDUP_CELL(aFreeSize[iPool]);
601 if (aFreeSize[iPool] == RPoolSize)
602 aFreePools[++iLastFree] = iPool;
603 }
604 if (iLastFree == -1) {
605 // no free pools found in this table
606 aPrevPool = aPools[iLast];
607 continue;
608 }
609
610 // scan free blocks again, and remove those of them
611 // that belong to free pools
612 Standard_Integer j;
613 for (i = 0; i <= nCells; i++ ) {
614 Standard_Size * aFree = myFreeList[i];
615 Standard_Size * aPrevFree = NULL;
616 while(aFree) {
617 for (j = 0; j <= iLastFree; j++) {
618 iPool = aFreePools[j];
619 if (aFree >= aPools[iPool] && aFree < aPools[iPool] + PoolSizeN)
620 break;
621 }
622 if (j <= iLastFree)
623 {
624 // remove
625 aFree = * (Standard_Size **) aFree;
626 if (aPrevFree)
627 * (Standard_Size **) aPrevFree = aFree; // link to previous
628 else
629 myFreeList[i] = aFree;
630 nbFreed++;
631 }
632 else {
633 // skip
634 aPrevFree = aFree;
635 aFree = * (Standard_Size **) aFree;
636 }
637 }
638 }
639
640 // release free pools, and reconnect remaining pools
641 // in the linked list
642 Standard_Size * aPrev = (aFreePools[0] == 0
643 ? aPrevPool
644 : aPools[aFreePools[0] - 1]);
645 for (j = 0; j <= iLastFree; j++) {
646 iPool = aFreePools[j];
647 if (j > 0) {
648 // update the pointer to the previous non-free pool
649 if (iPool - aFreePools[j - 1] > 1)
650 aPrev = aPools[iPool - 1];
651 }
652 if (j == iLastFree || aFreePools[j + 1] - iPool > 1) {
653 // get next non-free pool
654 Standard_Size * aNext =
655 (j == iLastFree && aFreePools[j] == iLast)
656 ? aNextPool
657 : aPools[iPool + 1];
658 // and connect it to the list of pools that have been processed
659 // and remain non-free
660 if (aPrev)
661 * (Standard_Size **) aPrev = aNext;
662 else
663 myAllocList = aNext;
664 }
665 FreeMemory(aPools[iPool], PoolSize);
666 }
667 // update the pointer to the previous non-free pool
668 aPrevPool = (aFreePools[iLastFree] == iLast
669 ? aPrev
670 : aPools[iLast]);
671 nPoolFreed += iLastFree + 1;
672 }
673
674 return nbFreed;
675}
676
677//=======================================================================
678//function : FreePools
679//purpose : Frees all memory pools allocated for small blocks
680//=======================================================================
681
682void Standard_MMgrOpt::FreePools()
683{
684 // Lock access to critical data by mutex
685 Standard_Mutex::SentryNested aSentry ( myMutexPools, myReentrant );
686
687 // last pool is remembered in myAllocList
688 Standard_Size * aFree = myAllocList;
689 myAllocList = 0;
690 while (aFree) {
691 Standard_Size * aBlock = aFree;
692 // next pool address is stored in first 8 bytes of each pool
693 aFree = * (Standard_Size **) aFree;
694 // free pool (note that its size is calculated rather than stored)
695 FreeMemory ( aBlock, myPageSize * myNbPages );
696 }
697}
698
699//=======================================================================
700//function : Reallocate
701//purpose :
702//=======================================================================
703
704Standard_Address Standard_MMgrOpt::Reallocate(Standard_Address& theStorage,
705 const Standard_Size theNewSize)
706{
707 Standard_Size * aBlock = GET_BLOCK(theStorage);
708 Standard_Address newStorage = NULL;
709
710 // get current size of the memory block from its header
711 Standard_Size OldSize = aBlock[0];
712
713 // if new size is less than old one, just do nothing
714 if (theNewSize <= OldSize) {
715 newStorage = theStorage;
716 }
717 // otherwise, allocate new block and copy the data to it
718 else {
719 newStorage = Allocate(theNewSize);
720 memcpy (newStorage, theStorage, OldSize);
721 Free( theStorage );
722 // clear newly added part of the block
723 if ( myClear )
724 memset(((char*)newStorage) + OldSize, 0, theNewSize-OldSize);
725 }
726 theStorage = NULL;
727 return newStorage;
728}
729
730//=======================================================================
731//function : AllocMemory
732//purpose : Allocate a big block of memory using either malloc/calloc
733// or memory mapped file
734//=======================================================================
735
736Standard_Size * Standard_MMgrOpt::AllocMemory(Standard_Size &Size)
737{
738 // goto is used as efficient method for a possibility to retry allocation
739retry:
740
741 Standard_Size * aBlock = NULL;
742
743 // if MMap option is ON, allocate using memory mapped files
744 if (myMMap) {
745#ifndef WNT
746
747 // align size to page size
748 const Standard_Size AlignedSize = PAGE_ALIGN(Size, myPageSize);
749
750 // allocate memory
751 // note that on UNIX myMMap is file descriptor for /dev/null
752 aBlock = (Standard_Size * )mmap((char*)MMAP_BASE_ADDRESS, AlignedSize,
753 PROT_READ | PROT_WRITE, MMAP_FLAGS,
754 myMMap, 0);
755 if (aBlock == MAP_FAILED /* -1 */) {
756 int errcode = errno;
757 // as a last resort, try freeing some memory by calling Purge()
758 if ( Purge(Standard_False) )
759 goto retry;
760 // if nothing helps, raise exception
761 anOutOfMemError->Reraise (strerror(errcode));
762 }
763
764 // save actually allocated size into argument
765 Size = AlignedSize;
766
767#else /* WNT */
768
769 // align size to page size, taking into account additional space needed to
770 // store handle to the memory map
771 const Standard_Size AlignedSize = PAGE_ALIGN(Size+sizeof(HANDLE), myPageSize);
772
773 // allocate mapped file
774 HANDLE hMap = CreateFileMapping(INVALID_HANDLE_VALUE, NULL,
775 PAGE_READWRITE,
776 DWORD(AlignedSize / 0x80000000),
777 DWORD(AlignedSize % 0x80000000), NULL);
778 HANDLE * aMBlock = NULL;
779 // check for error and try allocating address space
780 if ( ! hMap || GetLastError() == ERROR_ALREADY_EXISTS ||
781 ! (aMBlock = (HANDLE*)MapViewOfFile(hMap,FILE_MAP_WRITE,0,0,0)) )
782 {
783 // close handle if allocated
784 if ( hMap )
785 CloseHandle(hMap);
786 hMap = 0;
787 // as a last resort, try freeing some memory by calling Purge() and retry
788 if ( Purge(Standard_False) )
789 goto retry;
790 // if nothing helps, make error message and raise exception
791 const int BUFSIZE=1024;
792 char message[BUFSIZE];
793 if ( FormatMessage (FORMAT_MESSAGE_FROM_SYSTEM, 0, GetLastError(), 0, message, BUFSIZE-1, 0) <=0 )
794 strcpy (message, "Standard_MMgrOpt::AllocMemory() failed to mmap");
795 anOutOfMemError->Reraise (message);
796 }
797
798 // record map handle in the beginning
799 aMBlock[0] = hMap;
800
801 // and shift to the beginning of usable area
802 aBlock = (Standard_Size*)(aMBlock+1);
803
804 // save actually allocated size into argument
805 Size = AlignedSize - sizeof(HANDLE);
806#endif
807 }
808 // else just allocate by malloc or calloc
809 else {
810 aBlock = (Standard_Size *) (myClear ? calloc(Size,sizeof(char)) : malloc(Size));
811 // check the result
812 if ( ! aBlock )
813 {
814 // as a last resort, try freeing some memory by calling Purge()
815 if ( Purge(Standard_False) )
816 goto retry;
817 // if nothing helps, raise exception
818 anOutOfMemError->Reraise ("Standard_MMgrOpt::Allocate(): malloc failed");
819 }
820 }
821 // clear whole block if clearing option is set
822 if (myClear)
823 memset (aBlock, 0, Size);
824 return aBlock;
825}
826
827//=======================================================================
828//function : FreeMemory
829//purpose :
830//=======================================================================
831
832void Standard_MMgrOpt::FreeMemory (Standard_Address aBlock,
833 const Standard_Size
834#ifndef WNT
835 aSize
836#endif
837 )
838{
839 // release memory (either free or unmap)
840 if ( myMMap ) {
841#ifndef WNT
842 // align size to page size, just the same as in AllocMemory()
843 const Standard_Size AlignedSize = PAGE_ALIGN(aSize, myPageSize);
844 munmap((char*)aBlock, AlignedSize);
845#else
846 // recover handle to the memory mapping stored just before the block
847 const HANDLE * aMBlock = (const HANDLE *)aBlock;
848 HANDLE hMap = *(--aMBlock);
849 UnmapViewOfFile((LPCVOID)aMBlock);
850 CloseHandle (hMap);
851#endif
852 }
853 else
854 free(aBlock);
855}
856
857//=======================================================================
858//function : SetReentrant
859//purpose :
860//=======================================================================
861
862void Standard_MMgrOpt::SetReentrant(Standard_Boolean isReentrant)
863{
864 myReentrant = isReentrant;
865}