0023947: Eliminate trivial compiler warnings in MSVC++ with warning level 4
[occt.git] / src / Standard / Standard_MMgrOpt.cxx
CommitLineData
b311480e 1// Created on: 2005-03-15
2// Created by: Peter KURNEV
3// Copyright (c) 2005-2012 OPEN CASCADE SAS
4//
5// The content of this file is subject to the Open CASCADE Technology Public
6// License Version 6.5 (the "License"). You may not use the content of this file
7// except in compliance with the License. Please obtain a copy of the License
8// at http://www.opencascade.org and read it completely before using this file.
9//
10// The Initial Developer of the Original Code is Open CASCADE S.A.S., having its
11// main offices at: 1, place des Freres Montgolfier, 78280 Guyancourt, France.
12//
13// The Original Code and all software distributed under the License is
14// distributed on an "AS IS" basis, without warranty of any kind, and the
15// Initial Developer hereby disclaims all such warranties, including without
16// limitation, any warranties of merchantability, fitness for a particular
17// purpose or non-infringement. Please see the License for the specific terms
18// and conditions governing the rights and limitations under the License.
19
7fd59977 20
21#include <Standard_MMgrOpt.hxx>
22#include <Standard_OutOfMemory.hxx>
23
7fd59977 24#ifdef HAVE_CONFIG_H
25# include <config.h>
26#endif
27
28#include <stdio.h>
29
30#ifdef HAVE_STRING_H
31# include <string.h>
32#endif
33
34#ifndef WNT
35# include <stdlib.h>
36# include <errno.h>
37#endif
38
39#ifdef WNT
40#include <windows.h>
41#else
42# ifdef HAVE_UNISTD_H
43# include <unistd.h>
44# endif
45# ifdef HAVE_SYS_MMAN_H
46# include <sys/mman.h> /* mmap() */
47# endif
48#endif
49#ifdef HAVE_MALLOC_H
50# include <malloc.h>
51#endif
52#include <stdlib.h>
53#include <sys/types.h>
54#include <sys/stat.h>
55#include <fcntl.h>
56//
57#if defined (__sun) || defined(SOLARIS)
58extern "C" int getpagesize() ;
59#endif
60
61//======================================================================
62// Assumptions
63//======================================================================
64
65// This implementation makes a number of assumptions regarding size of
66// types:
67//
68// sizeof(Standard_Size) == sizeof(Standard_Address==void*)
69//
70// On WNT, sizeof(HANDLE) is equal of multiple of sizeof(Standard_Size)
71
72//======================================================================
73// Naming conventions
74//======================================================================
75
76// For clarity of implementation, the following conventions are used
77// for naming variables:
78//
79// ...Size: size in bytes
80//
81// RoundSize, RSize etc.: size in bytes, rounded according to allocation granularity
82//
83// ...SizeN: size counted in number of items of sizeof(Standard_Size) bytes each
84//
85// ...Storage: address of the user area of the memory block (Standard_Address)
86//
87// ...Block: address of the hole memory block (header) (Standard_Size*)
88
89//======================================================================
90// Macro definitions
91//======================================================================
92
93//
94// MMAP_BASE_ADDRESS, MMAP_FLAGS
95#if defined (__hpux) || defined(HPUX)
96#define MMAP_BASE_ADDRESS 0x80000000
97#define MMAP_FLAGS (MAP_ANONYMOUS | MAP_PRIVATE | MAP_VARIABLE)
98#elif defined (__osf__) || defined(DECOSF1)
99#define MMAP_BASE_ADDRESS 0x1000000000
100#define MMAP_FLAGS (MAP_ANONYMOUS | MAP_PRIVATE | MAP_VARIABLE)
101#elif defined(_AIX)
102#define MMAP_BASE_ADDRESS 0x80000000
103#define MMAP_FLAGS (MAP_ANONYMOUS | MAP_PRIVATE | MAP_VARIABLE)
104#elif defined(__APPLE__)
105#define MMAP_BASE_ADDRESS 0x80000000
106#define MMAP_FLAGS (MAP_ANON | MAP_PRIVATE)
107#elif defined(LIN)
108#define MMAP_BASE_ADDRESS 0x20000000
109#define MMAP_FLAGS (MAP_PRIVATE)
110#elif defined(WNT)
111//static HANDLE myhMap;
112#else
113#define MMAP_BASE_ADDRESS 0x60000000
114#define MMAP_FLAGS (MAP_PRIVATE)
115#endif
116
117// Round size up to the specified page size
118#define PAGE_ALIGN(size,thePageSize) \
119 (((size) + (thePageSize) - 1) & ~((thePageSize) - 1))
120
121// Round size up to 4, 8, or 16 bytes
122// Note that 0 yields 0
123#define ROUNDUP16(size) (((size) + 0xf) & ~(Standard_Size)0xf)
124#define ROUNDUP8(size) (((size) + 0x7) & ~(Standard_Size)0x7)
125#define ROUNDUP4(size) (((size) + 0x3) & ~(Standard_Size)0x3)
126#define ROUNDDOWN8(size) ((size) & ~(Standard_Size)0x7)
127
128// The following two macros define granularity of memory allocation,
129// by rounding size to the size of the allocation cell,
130// and obtaining cell index from rounded size.
131// Note that granularity shall be not less than sizeof(Standard_Size)
132
133// Traditional implementation: granularity 16 bytes
134//#define ROUNDUP_CELL(size) ROUNDUP16(size)
135//#define INDEX_CELL(rsize) ((rsize) >> 4)
136
137// Reduced granularity: 8 bytes
138#define ROUNDUP_CELL(size) ROUNDUP8(size)
139#define ROUNDDOWN_CELL(size) ROUNDDOWN8(size)
140#define INDEX_CELL(rsize) ((rsize) >> 3)
141
142// Minimal granularity: 4 bytes (32-bit systems only)
143#ifndef _OCC64
144//#define ROUNDUP_CELL(size) ROUNDUP4(size)
145//#define INDEX_CELL(rsize) ((rsize) >> 2)
146#endif
147
148// Adaptive granularity, less for little blocks and greater for bigger ones:
149/*
150#if _OCC64
151#define ROUNDUP_CELL(size) ((size) <= 0x40 ? ROUNDUP8(size) : ROUNDUP16(size))
152#define INDEX_CELL(rsize) ((rsize) <= 0x40 ? ((rsize) >> 3) : (4 + ((rsize) >> 4)))
153#else
154#define ROUNDUP_CELL(size) ((size) <= 0x40 ? ROUNDUP4(size) : ROUNDUP8(size))
155#define INDEX_CELL(rsize) ((rsize) <= 0x40 ? ((rsize) >> 2) : (8 + ((rsize) >> 3)))
156#endif
157*/
158
159
160/* In the allocated block, first bytes are used for storing of memory manager's data.
161 (size of block). The minimal size of these data is sizeof(int).
162 The memory allocated in system usually alligned by 16 bytes.Tthe aligment of the
163 data area in the memory block is shfted on BLOCK_SHIFT*sizeof(Standard_Size)
164 bytes.
165 It is OK for WNT, SUN and Linux systems, but on SGI aligment should be 8 bytes.
166 So, BLOCK_SHIFT is formed as macro for support on other possible platforms.
167*/
168
169#if defined(IRIX) || defined(SOLARIS)
170#define BLOCK_SHIFT 2
171#else
172#define BLOCK_SHIFT 1
173#endif
174
175// Get address of user area from block address, and vice-versa
176#define GET_USER(block) (((Standard_Size*)(block)) + BLOCK_SHIFT)
177#define GET_BLOCK(storage) (((Standard_Size*)(storage))-BLOCK_SHIFT)
178
179// create static instance of out-of-memory exception to protect
180// against possible lack of memory for its raising
181static Handle(Standard_OutOfMemory) anOutOfMemError = new Standard_OutOfMemory;
182
183//=======================================================================
184//function : Standard_MMgr
185//purpose :
186//=======================================================================
187
188Standard_MMgrOpt::Standard_MMgrOpt(const Standard_Boolean aClear,
189 const Standard_Boolean aMMap,
190 const Standard_Size aCellSize,
191 const Standard_Integer aNbPages,
bd0c22ce 192 const Standard_Size aThreshold)
7fd59977 193{
194 // check basic assumption
195 if ( sizeof(Standard_Size) != sizeof(Standard_Address) )
196 {
197 cerr << "Fatal error: Open CASCADE Optimized Memory manager: this platform is not supported!" << endl;
198 exit(1);
199 }
200
201 // clear buffer fields
202 myFreeListMax = 0;
203 myFreeList = NULL;
204 myPageSize = 0;
205 myAllocList = NULL;
206 myNextAddr = NULL;
207 myEndBlock = NULL;
208
209 // initialize parameters
210 myClear = aClear;
211 myMMap = (Standard_Integer)aMMap;
212 myCellSize = aCellSize;
213 myNbPages = aNbPages;
214 myThreshold = aThreshold;
7fd59977 215
216 // initialize
217 Initialize();
218}
219
220//=======================================================================
221//function : ~Standard_MMgrOpt
222//purpose :
223//=======================================================================
224
225Standard_MMgrOpt::~Standard_MMgrOpt()
226{
227 Purge(Standard_True);
228 free(myFreeList);
229
230 // NOTE: freeing pools may be dangerous if not all memory taken by
231 // this instance of the memory manager has been freed
232 FreePools();
233}
234
235// interface level
236
237//=======================================================================
238//function : Initialize
239//purpose :
240//=======================================================================
241
242void Standard_MMgrOpt::Initialize()
243{
244 // check number of pages in small blocks pools
245 if ( myNbPages < 100 )
246 myNbPages = 1000;
247
248 // get system-dependent page size
249#ifndef WNT
250 myPageSize = getpagesize();
251 if ( ! myPageSize )
252 myMMap = 0;
253#else
254 SYSTEM_INFO SystemInfo;
255 GetSystemInfo (&SystemInfo);
256 myPageSize = SystemInfo.dwPageSize;
257#endif
258
259 // initialize memory mapped files
260 if(myMMap) {
261#if defined (__sgi) || defined(IRIX)
262 /* Probleme de conflit en la zone des malloc et la zone des mmap sur SGI */
263 /* Ce probleme a ete identifie en IRIX 5.3 jusqu'en IRIX 6.2. Le probleme */
264 /* ne semble pas apparaitre en IRIX 6.4 */
265 /* Les malloc successifs donnent des adresses croissantes (a partir de 0x0x10000000) */
266 /* ce que l'on appelle le pointeur de BREAK */
267 /* Le premier mmap est force a l'addresse MMAP_BASE_ADDRESS (soit 0x60000000 sur SGI) */
268 /* mais les mmap suivants sont decides par le systeme (flag MAP_VARIABLE). Malheureusement */
269 /* il renvoie une addresse la plus basse possible dans la zone des malloc juste au dessus */
270 /* du BREAK soit 0x18640000 ce qui donne un espace d'allocation d'environ 140 Mo pour les */
271 /* malloc. Sur des gros modeles on peut avoir des pointes a 680 Mo en Rev6 pour une maquette */
272 /* de 2 000 000 de points. En Rev7, la meme maquette n'excedera pas 286 Mo (voir vision.for) */
273 /* Pour palier ce comportement, la solution adoptee est la suivante : */
274 /* Lorsque l'on entre dans alloc_startup (ici), on n'a pas encore fait de mmap. */
275 /* On fait alors un malloc (d'environ 700Mo) que l'on libere de suite. Cela a pour */
276 /* consequence de deplacer le BREAK tres haut. Le BREAK ne redescend jamais meme lors du free */
277 /* Le mmap donnant une adresse (environ 100 Mo au dessus du BREAK) on se retrouve alors avec */
278 /* le partage des zones de memoire suivant : */
279 /* 700 Mo pour les malloc - 500 Mo (1,2Go - 700Mo ) pour les mmap. Avec un CLD_SD_SIZE */
280 /* de 2 000 000 on atteind jamais 500 Mo de mmap, meme en chargeant des applications (qui */
281 /* utilisent la zone de mmap */
282 /* Ce partage des zones memoire pourra eventuellemt etre regle par une variable d'environnement */
283 /* CLD_HIGH_SBRK */
284 char *var;
285 Standard_Size high_sbrk;
286
287 high_sbrk = 700*1024*1024;
288 if ( (var=getenv("CLD_HIGH_SBRK")) != NULL ) {
289 high_sbrk = atoi(var);
290 }
291
292 var = (char*)malloc(high_sbrk); // 700 Mb
293 if ( var )
294 free(var);
295 else
296 perror("ERR_MEMRY_FAIL");
297#endif
298
299#if defined(IRIX) || defined(__sgi) || defined(SOLARIS) || defined(__sun) || defined(LIN) || defined(linux) || defined(__FreeBSD__)
300 if ((myMMap = open ("/dev/zero", O_RDWR)) < 0) {
301 if ((myMMap = open ("/dev/null", O_RDWR)) < 0){
302 myMMap = 0;
303 }
304 }
305 if (!myMMap)
306 perror("ERR_MMAP_FAIL");
307#else
308 myMMap = -1;
309#endif
310 }
311
312 // initialize free lists
313 myFreeListMax = INDEX_CELL(ROUNDUP_CELL(myThreshold-BLOCK_SHIFT)); // all blocks less than myThreshold are to be recycled
314 myFreeList = (Standard_Size **) calloc (myFreeListMax+1, sizeof(Standard_Size *));
315 myCellSize = ROUNDUP16(myCellSize);
316}
317
318//=======================================================================
319//function : SetMMgrOptCallBack
320//purpose : Sets a callback function to be called on each alloc/free
321//=======================================================================
322
323static Standard_MMgrOpt::TPCallBackFunc MyPCallBackFunc = NULL;
324
325Standard_EXPORT void Standard_MMgrOpt::SetCallBackFunction(TPCallBackFunc pFunc)
326{
327 MyPCallBackFunc = pFunc;
328}
329
330inline void callBack(const Standard_Boolean isAlloc,
331 const Standard_Address aStorage,
332 const Standard_Size aRoundSize,
333 const Standard_Size aSize)
334{
335 if (MyPCallBackFunc)
336 (*MyPCallBackFunc)(isAlloc, aStorage, aRoundSize, aSize);
337}
338
339//=======================================================================
340//function : Allocate
341//purpose :
342//=======================================================================
343
344Standard_Address Standard_MMgrOpt::Allocate(const Standard_Size aSize)
345{
346 Standard_Size * aStorage = NULL;
347
348 // round up size according to allocation granularity
349 // The keyword 'volatile' is only used here for GCC 64-bit compilations
350 // otherwise this method would crash in runtime in optimized build.
351 volatile Standard_Size RoundSize = ROUNDUP_CELL(aSize);
352 const Standard_Size Index = INDEX_CELL(RoundSize);
353
354 // blocks of small and medium size are recyclable
355 if ( Index <= myFreeListMax ) {
356 const Standard_Size RoundSizeN = RoundSize / sizeof(Standard_Size);
357
358 // Lock access to critical data (myFreeList and other fields) by mutex.
359 // Note that we do not lock fields that do not change during the
360 // object life (such as myThreshold), and assume that calls to functions
361 // of standard library are already protected by their implementation.
362 // The unlock is called as soon as possible, for every treatment case.
363 // We also do not use Sentry, since in case if OCC signal or exception is
364 // caused by this block we will have deadlock anyway...
bd0c22ce 365 myMutex.Lock();
7fd59977 366
367 // if free block of the requested size is available, return it
368 if ( myFreeList[Index] ) {
369 // the address of the next free block is stored in the header
370 // of the memory block; use it to update list pointer
371 // to point to next free block
372 Standard_Size* aBlock = myFreeList[Index];
373 myFreeList[Index] = *(Standard_Size**)aBlock;
374
375 // unlock the mutex
bd0c22ce 376 myMutex.Unlock();
7fd59977 377
378 // record size of the allocated block in the block header and
379 // shift the pointer to the beginning of the user part of block
380 aBlock[0] = RoundSize;
381 aStorage = GET_USER(aBlock);
382
383 // clear block if requested
384 if (myClear)
385 memset (aStorage, 0, RoundSize);
386 }
387 // else if block size is small allocate it in pools
388 else if ( RoundSize <= myCellSize ) {
389 // unlock the mutex for free lists
bd0c22ce 390 myMutex.Unlock();
7fd59977 391
392 // and lock the specific mutex used to protect access to small blocks pools;
393 // note that this is done by sentry class so as to ensure unlocking in case of
394 // possible exception that may be thrown from AllocMemory()
bd0c22ce 395 Standard_Mutex::Sentry aSentry (myMutexPools);
7fd59977 396
397 // check for availability of requested space in the current pool
398 Standard_Size *aBlock = myNextAddr;
399 if ( &aBlock[ BLOCK_SHIFT+RoundSizeN] > myEndBlock ) {
400 // otherwise, allocate new memory pool with page-aligned size
401 Standard_Size Size = myPageSize * myNbPages;
402 aBlock = AllocMemory(Size); // note that size may be aligned by this call
403
404 if (myEndBlock > myNextAddr) {
405 // put the remaining piece to the free lists
406 const Standard_Size aPSize = (myEndBlock - GET_USER(myNextAddr))
407 * sizeof(Standard_Size);
408 const Standard_Size aRPSize = ROUNDDOWN_CELL(aPSize);
409 const Standard_Size aPIndex = INDEX_CELL(aRPSize);
410 if ( aPIndex > 0 && aPIndex <= myFreeListMax ) {
bd0c22ce 411 myMutex.Lock();
7fd59977 412 *(Standard_Size**)myNextAddr = myFreeList[aPIndex];
413 myFreeList[aPIndex] = myNextAddr;
bd0c22ce 414 myMutex.Unlock();
7fd59977 415 }
416 }
417
418 // set end pointer to the end of the new pool
419 myEndBlock = aBlock + Size / sizeof(Standard_Size);
420 // record in the first bytes of the pool the address of the previous one
421 *(Standard_Size**)aBlock = myAllocList;
422 // and make new pool current (last)
423 // and get pointer to the first memory block in the pool
424 myAllocList = aBlock;
425 aBlock+=BLOCK_SHIFT;
426 }
427
428 // initialize header of the new block by its size
429 // and get the pointer to the user part of block
430 aBlock[0] = RoundSize;
431 aStorage = GET_USER(aBlock);
432
433 // and advance pool pointer to the next free piece of pool
434 myNextAddr = &aStorage[RoundSizeN];
435 }
436 // blocks of medium size are allocated directly
437 else {
438 // unlock the mutex immediately, as we do not need further to access any field
bd0c22ce 439 myMutex.Unlock();
7fd59977 440
441 // we use operator ?: instead of if() since it is faster
442 Standard_Size *aBlock = (Standard_Size*) (myClear ? calloc( RoundSizeN+BLOCK_SHIFT, sizeof(Standard_Size)) :
443 malloc((RoundSizeN+BLOCK_SHIFT) * sizeof(Standard_Size)) );
444
445 // if allocation failed, try to free some memory by purging free lists, and retry
446 if ( ! aBlock ) {
447 if ( Purge (Standard_False) )
448 aBlock = (Standard_Size*)calloc(RoundSizeN+BLOCK_SHIFT, sizeof(Standard_Size));
449 // if still not succeeded, raise exception
450 if ( ! aBlock )
451 anOutOfMemError->Reraise ("Standard_MMgrOpt::Allocate(): malloc failed");
452 }
453
454 // initialize new block header by its size
455 // and get the pointer to the user part of block
456 aBlock[0] = RoundSize;
457 aStorage = GET_USER(aBlock);
458 }
459 }
460 // blocks of big size may be allocated as memory mapped files
461 else {
462 // Compute size of the block to be allocated, including header,
463 // Note that we use rounded size, even if this block will not be stored in
464 // the free list, for consistency of calls to AllocMemory() / FreeMemory()
465 // and calculation of index in the free list
466 Standard_Size AllocSize = RoundSize + sizeof(Standard_Size);
467
468 // allocate memory
469 Standard_Size* aBlock = AllocMemory(AllocSize);
470
471 // initialize new block header by its size
472 // and get the pointer to the user part of block.
473 aBlock[0] = RoundSize;
474 aStorage = GET_USER(aBlock);
475 }
476
477 callBack(Standard_True, aStorage, RoundSize, aSize);
478
479 return aStorage;
480}
481
482//=======================================================================
483//function : Free
484//purpose :
485//=======================================================================
486
487void Standard_MMgrOpt::Free(Standard_Address& theStorage)
488{
489 // safely return if attempt to free null pointer
490 if ( ! theStorage )
491 return;
492
493 // get the pointer to the memory block header
494 Standard_Size* aBlock = GET_BLOCK(theStorage);
495
496 // and get the allocated size of the block
497 Standard_Size RoundSize = aBlock[0];
498
499 callBack(Standard_False, theStorage, RoundSize, 0);
500
501 // check whether blocks with that size are recyclable
502 const Standard_Size Index = INDEX_CELL(RoundSize);
503 if ( Index <= myFreeListMax ) {
504 // Lock access to critical data (myFreeList and other) by mutex
505 // Note that we do not lock fields that do not change during the
506 // object life (such as myThreshold), and assume that calls to functions
507 // of standard library are already protected by their implementation.
508 // We also do not use Sentry, since in case if OCC signal or exception is
509 // caused by this block we will have deadlock anyway...
bd0c22ce 510 myMutex.Lock();
7fd59977 511
512 // in the memory block header, record address of the next free block
513 *(Standard_Size**)aBlock = myFreeList[Index];
514 // add new block to be first in the list
515 myFreeList[Index] = aBlock;
516
bd0c22ce 517 myMutex.Unlock();
7fd59977 518 }
519 // otherwise, we have block of big size which shall be simply released
520 else
521 FreeMemory (aBlock, RoundSize);
522
523 theStorage = NULL;
524}
525
526//=======================================================================
527//function : Purge
528//purpose : Frees all free lists except small blocks (less than CellSize)
529//=======================================================================
530
bd0c22ce 531Standard_Integer Standard_MMgrOpt::Purge(Standard_Boolean )
7fd59977 532{
533 // Lock access to critical data by mutex
bd0c22ce 534 Standard_Mutex::Sentry aSentry (myMutex);
7fd59977 535
536 // TODO: implement support for isDeleted = True
537
538 // free memory blocks contained in free lists
539 // whose sizes are greater than cellsize
540 Standard_Integer nbFreed = 0;
541 Standard_Size i = INDEX_CELL(ROUNDUP_CELL(myCellSize+BLOCK_SHIFT));
542 for (; i <= myFreeListMax; i++ ) {
543 Standard_Size * aFree = myFreeList[i];
544 while(aFree) {
545 Standard_Size * anOther = aFree;
546 aFree = * (Standard_Size **) aFree;
547 free(anOther);
548 nbFreed++;
549 }
550 myFreeList[i] = NULL;
551 }
552
553 // Lock access to critical data by mutex
bd0c22ce 554 Standard_Mutex::Sentry aSentry1 (myMutexPools);
7fd59977 555
556 // release memory pools containing no busy memory;
557 // for that for each pool count the summary size of blocks
558 // got from the free lists allocated from this pool
559#ifndef WNT
560 const Standard_Size PoolSize = myPageSize * myNbPages;
561#else
562 const Standard_Size PoolSize =
563 PAGE_ALIGN(myPageSize * myNbPages + sizeof(HANDLE), myPageSize) -
564 sizeof(HANDLE);
565#endif
566 const Standard_Size RPoolSize = ROUNDDOWN_CELL(PoolSize);
567 const Standard_Size PoolSizeN = RPoolSize / sizeof(Standard_Size);
568
569 // declare the table of pools;
570 // (we map free blocks onto a number of pools simultaneously)
571 static const Standard_Integer NB_POOLS_WIN = 512;
572 static Standard_Size* aPools[NB_POOLS_WIN];
573 static Standard_Size aFreeSize[NB_POOLS_WIN];
574 static Standard_Integer aFreePools[NB_POOLS_WIN];
575
576 Standard_Size * aNextPool = myAllocList;
577 Standard_Size * aPrevPool = NULL;
578 const Standard_Size nCells = INDEX_CELL(myCellSize);
579 Standard_Integer nPool = 0, nPoolFreed = 0;
580
581 while (aNextPool) {
582 // fill the table of pools
583 Standard_Integer iPool;
584 for (iPool = 0; aNextPool && iPool < NB_POOLS_WIN; iPool++) {
585 aPools[iPool] = aNextPool;
586 aFreeSize[iPool] = 0;
587 aNextPool = * (Standard_Size **) aNextPool; // get next pool
588 }
589 const Standard_Integer iLast = iPool - 1;
590 nPool += iPool;
591
592 // scan free blocks, find corresponding pools and increment
593 // counters
594 for (i = 0; i <= nCells; i++ ) {
595 Standard_Size * aFree = myFreeList[i];
596 Standard_Size aSize = BLOCK_SHIFT * sizeof(Standard_Size) +
597 ROUNDUP_CELL(1) * i;
598 while(aFree) {
599 for (iPool = 0; iPool <= iLast; iPool++) {
600 if (aFree >= aPools[iPool] && aFree < aPools[iPool] + PoolSizeN) {
601 aFreeSize[iPool] += aSize;
602 break;
603 }
604 }
605 aFree = * (Standard_Size **) aFree; // get next free block
606 }
607 }
608
609 // scan the table and make the list of free pools
610 Standard_Integer iLastFree = -1;
611 for (iPool = 0; iPool <= iLast; iPool++) {
612 aFreeSize[iPool] = ROUNDUP_CELL(aFreeSize[iPool]);
613 if (aFreeSize[iPool] == RPoolSize)
614 aFreePools[++iLastFree] = iPool;
615 }
616 if (iLastFree == -1) {
617 // no free pools found in this table
618 aPrevPool = aPools[iLast];
619 continue;
620 }
621
622 // scan free blocks again, and remove those of them
623 // that belong to free pools
624 Standard_Integer j;
625 for (i = 0; i <= nCells; i++ ) {
626 Standard_Size * aFree = myFreeList[i];
627 Standard_Size * aPrevFree = NULL;
628 while(aFree) {
629 for (j = 0; j <= iLastFree; j++) {
630 iPool = aFreePools[j];
631 if (aFree >= aPools[iPool] && aFree < aPools[iPool] + PoolSizeN)
632 break;
633 }
634 if (j <= iLastFree)
635 {
636 // remove
637 aFree = * (Standard_Size **) aFree;
638 if (aPrevFree)
639 * (Standard_Size **) aPrevFree = aFree; // link to previous
640 else
641 myFreeList[i] = aFree;
642 nbFreed++;
643 }
644 else {
645 // skip
646 aPrevFree = aFree;
647 aFree = * (Standard_Size **) aFree;
648 }
649 }
650 }
651
652 // release free pools, and reconnect remaining pools
653 // in the linked list
654 Standard_Size * aPrev = (aFreePools[0] == 0
655 ? aPrevPool
656 : aPools[aFreePools[0] - 1]);
657 for (j = 0; j <= iLastFree; j++) {
658 iPool = aFreePools[j];
659 if (j > 0) {
660 // update the pointer to the previous non-free pool
661 if (iPool - aFreePools[j - 1] > 1)
662 aPrev = aPools[iPool - 1];
663 }
664 if (j == iLastFree || aFreePools[j + 1] - iPool > 1) {
665 // get next non-free pool
666 Standard_Size * aNext =
667 (j == iLastFree && aFreePools[j] == iLast)
668 ? aNextPool
669 : aPools[iPool + 1];
670 // and connect it to the list of pools that have been processed
671 // and remain non-free
672 if (aPrev)
673 * (Standard_Size **) aPrev = aNext;
674 else
675 myAllocList = aNext;
676 }
677 FreeMemory(aPools[iPool], PoolSize);
678 }
679 // update the pointer to the previous non-free pool
680 aPrevPool = (aFreePools[iLastFree] == iLast
681 ? aPrev
682 : aPools[iLast]);
683 nPoolFreed += iLastFree + 1;
684 }
685
686 return nbFreed;
687}
688
689//=======================================================================
690//function : FreePools
691//purpose : Frees all memory pools allocated for small blocks
692//=======================================================================
693
694void Standard_MMgrOpt::FreePools()
695{
696 // Lock access to critical data by mutex
bd0c22ce 697 Standard_Mutex::Sentry aSentry (myMutexPools);
7fd59977 698
699 // last pool is remembered in myAllocList
700 Standard_Size * aFree = myAllocList;
701 myAllocList = 0;
702 while (aFree) {
703 Standard_Size * aBlock = aFree;
704 // next pool address is stored in first 8 bytes of each pool
705 aFree = * (Standard_Size **) aFree;
706 // free pool (note that its size is calculated rather than stored)
707 FreeMemory ( aBlock, myPageSize * myNbPages );
708 }
709}
710
711//=======================================================================
712//function : Reallocate
713//purpose :
714//=======================================================================
715
716Standard_Address Standard_MMgrOpt::Reallocate(Standard_Address& theStorage,
717 const Standard_Size theNewSize)
718{
cf9a910a 719 // if theStorage == NULL, just allocate new memory block
720 if (!theStorage)
721 {
722 return Allocate(theNewSize);
723 }
724
7fd59977 725 Standard_Size * aBlock = GET_BLOCK(theStorage);
726 Standard_Address newStorage = NULL;
727
728 // get current size of the memory block from its header
729 Standard_Size OldSize = aBlock[0];
730
731 // if new size is less than old one, just do nothing
732 if (theNewSize <= OldSize) {
733 newStorage = theStorage;
734 }
735 // otherwise, allocate new block and copy the data to it
736 else {
737 newStorage = Allocate(theNewSize);
738 memcpy (newStorage, theStorage, OldSize);
739 Free( theStorage );
740 // clear newly added part of the block
741 if ( myClear )
742 memset(((char*)newStorage) + OldSize, 0, theNewSize-OldSize);
743 }
744 theStorage = NULL;
745 return newStorage;
746}
747
748//=======================================================================
749//function : AllocMemory
750//purpose : Allocate a big block of memory using either malloc/calloc
751// or memory mapped file
752//=======================================================================
753
754Standard_Size * Standard_MMgrOpt::AllocMemory(Standard_Size &Size)
755{
756 // goto is used as efficient method for a possibility to retry allocation
757retry:
758
759 Standard_Size * aBlock = NULL;
760
761 // if MMap option is ON, allocate using memory mapped files
762 if (myMMap) {
763#ifndef WNT
764
765 // align size to page size
766 const Standard_Size AlignedSize = PAGE_ALIGN(Size, myPageSize);
767
768 // allocate memory
769 // note that on UNIX myMMap is file descriptor for /dev/null
770 aBlock = (Standard_Size * )mmap((char*)MMAP_BASE_ADDRESS, AlignedSize,
771 PROT_READ | PROT_WRITE, MMAP_FLAGS,
772 myMMap, 0);
773 if (aBlock == MAP_FAILED /* -1 */) {
774 int errcode = errno;
775 // as a last resort, try freeing some memory by calling Purge()
776 if ( Purge(Standard_False) )
777 goto retry;
778 // if nothing helps, raise exception
779 anOutOfMemError->Reraise (strerror(errcode));
780 }
781
782 // save actually allocated size into argument
783 Size = AlignedSize;
784
785#else /* WNT */
786
787 // align size to page size, taking into account additional space needed to
788 // store handle to the memory map
789 const Standard_Size AlignedSize = PAGE_ALIGN(Size+sizeof(HANDLE), myPageSize);
790
791 // allocate mapped file
792 HANDLE hMap = CreateFileMapping(INVALID_HANDLE_VALUE, NULL,
793 PAGE_READWRITE,
794 DWORD(AlignedSize / 0x80000000),
795 DWORD(AlignedSize % 0x80000000), NULL);
796 HANDLE * aMBlock = NULL;
797 // check for error and try allocating address space
798 if ( ! hMap || GetLastError() == ERROR_ALREADY_EXISTS ||
302f96fb 799 ! ((aMBlock = (HANDLE*)MapViewOfFile(hMap,FILE_MAP_WRITE,0,0,0))) )
7fd59977 800 {
801 // close handle if allocated
802 if ( hMap )
803 CloseHandle(hMap);
804 hMap = 0;
805 // as a last resort, try freeing some memory by calling Purge() and retry
806 if ( Purge(Standard_False) )
807 goto retry;
808 // if nothing helps, make error message and raise exception
809 const int BUFSIZE=1024;
810 char message[BUFSIZE];
811 if ( FormatMessage (FORMAT_MESSAGE_FROM_SYSTEM, 0, GetLastError(), 0, message, BUFSIZE-1, 0) <=0 )
812 strcpy (message, "Standard_MMgrOpt::AllocMemory() failed to mmap");
813 anOutOfMemError->Reraise (message);
814 }
815
816 // record map handle in the beginning
817 aMBlock[0] = hMap;
818
819 // and shift to the beginning of usable area
820 aBlock = (Standard_Size*)(aMBlock+1);
821
822 // save actually allocated size into argument
823 Size = AlignedSize - sizeof(HANDLE);
824#endif
825 }
826 // else just allocate by malloc or calloc
827 else {
828 aBlock = (Standard_Size *) (myClear ? calloc(Size,sizeof(char)) : malloc(Size));
829 // check the result
830 if ( ! aBlock )
831 {
832 // as a last resort, try freeing some memory by calling Purge()
833 if ( Purge(Standard_False) )
834 goto retry;
835 // if nothing helps, raise exception
836 anOutOfMemError->Reraise ("Standard_MMgrOpt::Allocate(): malloc failed");
837 }
838 }
839 // clear whole block if clearing option is set
840 if (myClear)
841 memset (aBlock, 0, Size);
842 return aBlock;
843}
844
845//=======================================================================
846//function : FreeMemory
847//purpose :
848//=======================================================================
849
850void Standard_MMgrOpt::FreeMemory (Standard_Address aBlock,
851 const Standard_Size
852#ifndef WNT
853 aSize
854#endif
855 )
856{
857 // release memory (either free or unmap)
858 if ( myMMap ) {
859#ifndef WNT
860 // align size to page size, just the same as in AllocMemory()
861 const Standard_Size AlignedSize = PAGE_ALIGN(aSize, myPageSize);
862 munmap((char*)aBlock, AlignedSize);
863#else
864 // recover handle to the memory mapping stored just before the block
865 const HANDLE * aMBlock = (const HANDLE *)aBlock;
866 HANDLE hMap = *(--aMBlock);
867 UnmapViewOfFile((LPCVOID)aMBlock);
868 CloseHandle (hMap);
869#endif
870 }
871 else
872 free(aBlock);
873}