0027645: Data Exchange - access violation when reading STEP AP242 file
[occt.git] / src / Standard / Standard_MMgrOpt.cxx
CommitLineData
b311480e 1// Created on: 2005-03-15
2// Created by: Peter KURNEV
973c2be1 3// Copyright (c) 2005-2014 OPEN CASCADE SAS
b311480e 4//
973c2be1 5// This file is part of Open CASCADE Technology software library.
b311480e 6//
d5f74e42 7// This library is free software; you can redistribute it and/or modify it under
8// the terms of the GNU Lesser General Public License version 2.1 as published
973c2be1 9// by the Free Software Foundation, with special exception defined in the file
10// OCCT_LGPL_EXCEPTION.txt. Consult the file LICENSE_LGPL_21.txt included in OCCT
11// distribution for complete text of the license and disclaimer of any warranty.
b311480e 12//
973c2be1 13// Alternatively, this file may be used under the terms of Open CASCADE
14// commercial license or contractual agreement.
7fd59977 15
16#include <Standard_MMgrOpt.hxx>
17#include <Standard_OutOfMemory.hxx>
8b381bc3 18#include <Standard_Assert.hxx>
7fd59977 19
20#include <stdio.h>
d8d01f6e 21#include <errno.h>
7fd59977 22
03155c18 23#ifdef _WIN32
24# include <windows.h>
7fd59977 25#else
03155c18 26# include <sys/mman.h> /* mmap() */
7fd59977 27#endif
03155c18 28
7fd59977 29#include <fcntl.h>
30//
31#if defined (__sun) || defined(SOLARIS)
32extern "C" int getpagesize() ;
33#endif
34
35//======================================================================
36// Assumptions
37//======================================================================
38
39// This implementation makes a number of assumptions regarding size of
40// types:
41//
42// sizeof(Standard_Size) == sizeof(Standard_Address==void*)
43//
44// On WNT, sizeof(HANDLE) is equal of multiple of sizeof(Standard_Size)
45
46//======================================================================
47// Naming conventions
48//======================================================================
49
50// For clarity of implementation, the following conventions are used
51// for naming variables:
52//
53// ...Size: size in bytes
54//
55// RoundSize, RSize etc.: size in bytes, rounded according to allocation granularity
56//
57// ...SizeN: size counted in number of items of sizeof(Standard_Size) bytes each
58//
59// ...Storage: address of the user area of the memory block (Standard_Address)
60//
61// ...Block: address of the hole memory block (header) (Standard_Size*)
62
63//======================================================================
64// Macro definitions
65//======================================================================
66
67//
68// MMAP_BASE_ADDRESS, MMAP_FLAGS
69#if defined (__hpux) || defined(HPUX)
70#define MMAP_BASE_ADDRESS 0x80000000
71#define MMAP_FLAGS (MAP_ANONYMOUS | MAP_PRIVATE | MAP_VARIABLE)
72#elif defined (__osf__) || defined(DECOSF1)
73#define MMAP_BASE_ADDRESS 0x1000000000
74#define MMAP_FLAGS (MAP_ANONYMOUS | MAP_PRIVATE | MAP_VARIABLE)
75#elif defined(_AIX)
76#define MMAP_BASE_ADDRESS 0x80000000
77#define MMAP_FLAGS (MAP_ANONYMOUS | MAP_PRIVATE | MAP_VARIABLE)
78#elif defined(__APPLE__)
79#define MMAP_BASE_ADDRESS 0x80000000
80#define MMAP_FLAGS (MAP_ANON | MAP_PRIVATE)
57c28b61 81#elif defined(__linux__)
7fd59977 82#define MMAP_BASE_ADDRESS 0x20000000
83#define MMAP_FLAGS (MAP_PRIVATE)
57c28b61 84#elif defined(_WIN32)
7fd59977 85//static HANDLE myhMap;
86#else
87#define MMAP_BASE_ADDRESS 0x60000000
88#define MMAP_FLAGS (MAP_PRIVATE)
89#endif
90
91// Round size up to the specified page size
92#define PAGE_ALIGN(size,thePageSize) \
93 (((size) + (thePageSize) - 1) & ~((thePageSize) - 1))
94
95// Round size up to 4, 8, or 16 bytes
96// Note that 0 yields 0
97#define ROUNDUP16(size) (((size) + 0xf) & ~(Standard_Size)0xf)
98#define ROUNDUP8(size) (((size) + 0x7) & ~(Standard_Size)0x7)
99#define ROUNDUP4(size) (((size) + 0x3) & ~(Standard_Size)0x3)
100#define ROUNDDOWN8(size) ((size) & ~(Standard_Size)0x7)
101
102// The following two macros define granularity of memory allocation,
103// by rounding size to the size of the allocation cell,
104// and obtaining cell index from rounded size.
105// Note that granularity shall be not less than sizeof(Standard_Size)
106
107// Traditional implementation: granularity 16 bytes
108//#define ROUNDUP_CELL(size) ROUNDUP16(size)
109//#define INDEX_CELL(rsize) ((rsize) >> 4)
110
111// Reduced granularity: 8 bytes
112#define ROUNDUP_CELL(size) ROUNDUP8(size)
113#define ROUNDDOWN_CELL(size) ROUNDDOWN8(size)
114#define INDEX_CELL(rsize) ((rsize) >> 3)
115
7fd59977 116/* In the allocated block, first bytes are used for storing of memory manager's data.
117 (size of block). The minimal size of these data is sizeof(int).
118 The memory allocated in system usually alligned by 16 bytes.Tthe aligment of the
119 data area in the memory block is shfted on BLOCK_SHIFT*sizeof(Standard_Size)
120 bytes.
121 It is OK for WNT, SUN and Linux systems, but on SGI aligment should be 8 bytes.
122 So, BLOCK_SHIFT is formed as macro for support on other possible platforms.
123*/
124
125#if defined(IRIX) || defined(SOLARIS)
126#define BLOCK_SHIFT 2
127#else
128#define BLOCK_SHIFT 1
129#endif
130
131// Get address of user area from block address, and vice-versa
132#define GET_USER(block) (((Standard_Size*)(block)) + BLOCK_SHIFT)
133#define GET_BLOCK(storage) (((Standard_Size*)(storage))-BLOCK_SHIFT)
134
7fd59977 135//=======================================================================
136//function : Standard_MMgr
137//purpose :
138//=======================================================================
139
140Standard_MMgrOpt::Standard_MMgrOpt(const Standard_Boolean aClear,
141 const Standard_Boolean aMMap,
142 const Standard_Size aCellSize,
143 const Standard_Integer aNbPages,
bd0c22ce 144 const Standard_Size aThreshold)
7fd59977 145{
146 // check basic assumption
8b381bc3 147 Standard_STATIC_ASSERT(sizeof(Standard_Size) == sizeof(Standard_Address));
7fd59977 148
149 // clear buffer fields
150 myFreeListMax = 0;
151 myFreeList = NULL;
152 myPageSize = 0;
153 myAllocList = NULL;
154 myNextAddr = NULL;
155 myEndBlock = NULL;
156
157 // initialize parameters
158 myClear = aClear;
159 myMMap = (Standard_Integer)aMMap;
160 myCellSize = aCellSize;
161 myNbPages = aNbPages;
162 myThreshold = aThreshold;
7fd59977 163
164 // initialize
165 Initialize();
166}
167
168//=======================================================================
169//function : ~Standard_MMgrOpt
170//purpose :
171//=======================================================================
172
173Standard_MMgrOpt::~Standard_MMgrOpt()
174{
175 Purge(Standard_True);
176 free(myFreeList);
177
178 // NOTE: freeing pools may be dangerous if not all memory taken by
179 // this instance of the memory manager has been freed
180 FreePools();
181}
182
183// interface level
184
185//=======================================================================
186//function : Initialize
187//purpose :
188//=======================================================================
189
190void Standard_MMgrOpt::Initialize()
191{
192 // check number of pages in small blocks pools
193 if ( myNbPages < 100 )
194 myNbPages = 1000;
195
196 // get system-dependent page size
57c28b61 197#ifndef _WIN32
7fd59977 198 myPageSize = getpagesize();
199 if ( ! myPageSize )
200 myMMap = 0;
201#else
202 SYSTEM_INFO SystemInfo;
203 GetSystemInfo (&SystemInfo);
204 myPageSize = SystemInfo.dwPageSize;
205#endif
206
207 // initialize memory mapped files
208 if(myMMap) {
209#if defined (__sgi) || defined(IRIX)
210 /* Probleme de conflit en la zone des malloc et la zone des mmap sur SGI */
211 /* Ce probleme a ete identifie en IRIX 5.3 jusqu'en IRIX 6.2. Le probleme */
212 /* ne semble pas apparaitre en IRIX 6.4 */
213 /* Les malloc successifs donnent des adresses croissantes (a partir de 0x0x10000000) */
214 /* ce que l'on appelle le pointeur de BREAK */
215 /* Le premier mmap est force a l'addresse MMAP_BASE_ADDRESS (soit 0x60000000 sur SGI) */
216 /* mais les mmap suivants sont decides par le systeme (flag MAP_VARIABLE). Malheureusement */
217 /* il renvoie une addresse la plus basse possible dans la zone des malloc juste au dessus */
218 /* du BREAK soit 0x18640000 ce qui donne un espace d'allocation d'environ 140 Mo pour les */
219 /* malloc. Sur des gros modeles on peut avoir des pointes a 680 Mo en Rev6 pour une maquette */
220 /* de 2 000 000 de points. En Rev7, la meme maquette n'excedera pas 286 Mo (voir vision.for) */
221 /* Pour palier ce comportement, la solution adoptee est la suivante : */
222 /* Lorsque l'on entre dans alloc_startup (ici), on n'a pas encore fait de mmap. */
223 /* On fait alors un malloc (d'environ 700Mo) que l'on libere de suite. Cela a pour */
224 /* consequence de deplacer le BREAK tres haut. Le BREAK ne redescend jamais meme lors du free */
225 /* Le mmap donnant une adresse (environ 100 Mo au dessus du BREAK) on se retrouve alors avec */
226 /* le partage des zones de memoire suivant : */
227 /* 700 Mo pour les malloc - 500 Mo (1,2Go - 700Mo ) pour les mmap. Avec un CLD_SD_SIZE */
228 /* de 2 000 000 on atteind jamais 500 Mo de mmap, meme en chargeant des applications (qui */
229 /* utilisent la zone de mmap */
230 /* Ce partage des zones memoire pourra eventuellemt etre regle par une variable d'environnement */
231 /* CLD_HIGH_SBRK */
232 char *var;
233 Standard_Size high_sbrk;
234
235 high_sbrk = 700*1024*1024;
236 if ( (var=getenv("CLD_HIGH_SBRK")) != NULL ) {
237 high_sbrk = atoi(var);
238 }
239
240 var = (char*)malloc(high_sbrk); // 700 Mb
241 if ( var )
242 free(var);
243 else
244 perror("ERR_MEMRY_FAIL");
245#endif
246
57c28b61 247#if defined(IRIX) || defined(__sgi) || defined(SOLARIS) || defined(__sun) || defined(__linux__) || defined(linux) || defined(__FreeBSD__) || defined(__ANDROID__)
7fd59977 248 if ((myMMap = open ("/dev/zero", O_RDWR)) < 0) {
249 if ((myMMap = open ("/dev/null", O_RDWR)) < 0){
250 myMMap = 0;
251 }
252 }
253 if (!myMMap)
254 perror("ERR_MMAP_FAIL");
255#else
256 myMMap = -1;
257#endif
258 }
259
260 // initialize free lists
261 myFreeListMax = INDEX_CELL(ROUNDUP_CELL(myThreshold-BLOCK_SHIFT)); // all blocks less than myThreshold are to be recycled
262 myFreeList = (Standard_Size **) calloc (myFreeListMax+1, sizeof(Standard_Size *));
263 myCellSize = ROUNDUP16(myCellSize);
264}
265
266//=======================================================================
267//function : SetMMgrOptCallBack
268//purpose : Sets a callback function to be called on each alloc/free
269//=======================================================================
270
271static Standard_MMgrOpt::TPCallBackFunc MyPCallBackFunc = NULL;
272
273Standard_EXPORT void Standard_MMgrOpt::SetCallBackFunction(TPCallBackFunc pFunc)
274{
275 MyPCallBackFunc = pFunc;
276}
277
278inline void callBack(const Standard_Boolean isAlloc,
279 const Standard_Address aStorage,
280 const Standard_Size aRoundSize,
281 const Standard_Size aSize)
282{
283 if (MyPCallBackFunc)
284 (*MyPCallBackFunc)(isAlloc, aStorage, aRoundSize, aSize);
285}
286
287//=======================================================================
288//function : Allocate
289//purpose :
290//=======================================================================
291
292Standard_Address Standard_MMgrOpt::Allocate(const Standard_Size aSize)
293{
294 Standard_Size * aStorage = NULL;
295
296 // round up size according to allocation granularity
297 // The keyword 'volatile' is only used here for GCC 64-bit compilations
298 // otherwise this method would crash in runtime in optimized build.
299 volatile Standard_Size RoundSize = ROUNDUP_CELL(aSize);
300 const Standard_Size Index = INDEX_CELL(RoundSize);
301
302 // blocks of small and medium size are recyclable
303 if ( Index <= myFreeListMax ) {
304 const Standard_Size RoundSizeN = RoundSize / sizeof(Standard_Size);
305
306 // Lock access to critical data (myFreeList and other fields) by mutex.
307 // Note that we do not lock fields that do not change during the
308 // object life (such as myThreshold), and assume that calls to functions
309 // of standard library are already protected by their implementation.
310 // The unlock is called as soon as possible, for every treatment case.
311 // We also do not use Sentry, since in case if OCC signal or exception is
312 // caused by this block we will have deadlock anyway...
bd0c22ce 313 myMutex.Lock();
7fd59977 314
315 // if free block of the requested size is available, return it
316 if ( myFreeList[Index] ) {
317 // the address of the next free block is stored in the header
318 // of the memory block; use it to update list pointer
319 // to point to next free block
320 Standard_Size* aBlock = myFreeList[Index];
321 myFreeList[Index] = *(Standard_Size**)aBlock;
322
323 // unlock the mutex
bd0c22ce 324 myMutex.Unlock();
7fd59977 325
326 // record size of the allocated block in the block header and
327 // shift the pointer to the beginning of the user part of block
328 aBlock[0] = RoundSize;
329 aStorage = GET_USER(aBlock);
330
331 // clear block if requested
332 if (myClear)
333 memset (aStorage, 0, RoundSize);
334 }
335 // else if block size is small allocate it in pools
336 else if ( RoundSize <= myCellSize ) {
337 // unlock the mutex for free lists
bd0c22ce 338 myMutex.Unlock();
7fd59977 339
340 // and lock the specific mutex used to protect access to small blocks pools;
341 // note that this is done by sentry class so as to ensure unlocking in case of
342 // possible exception that may be thrown from AllocMemory()
bd0c22ce 343 Standard_Mutex::Sentry aSentry (myMutexPools);
7fd59977 344
345 // check for availability of requested space in the current pool
346 Standard_Size *aBlock = myNextAddr;
347 if ( &aBlock[ BLOCK_SHIFT+RoundSizeN] > myEndBlock ) {
348 // otherwise, allocate new memory pool with page-aligned size
349 Standard_Size Size = myPageSize * myNbPages;
350 aBlock = AllocMemory(Size); // note that size may be aligned by this call
351
352 if (myEndBlock > myNextAddr) {
353 // put the remaining piece to the free lists
354 const Standard_Size aPSize = (myEndBlock - GET_USER(myNextAddr))
355 * sizeof(Standard_Size);
356 const Standard_Size aRPSize = ROUNDDOWN_CELL(aPSize);
357 const Standard_Size aPIndex = INDEX_CELL(aRPSize);
358 if ( aPIndex > 0 && aPIndex <= myFreeListMax ) {
bd0c22ce 359 myMutex.Lock();
7fd59977 360 *(Standard_Size**)myNextAddr = myFreeList[aPIndex];
361 myFreeList[aPIndex] = myNextAddr;
bd0c22ce 362 myMutex.Unlock();
7fd59977 363 }
364 }
365
366 // set end pointer to the end of the new pool
367 myEndBlock = aBlock + Size / sizeof(Standard_Size);
368 // record in the first bytes of the pool the address of the previous one
369 *(Standard_Size**)aBlock = myAllocList;
370 // and make new pool current (last)
371 // and get pointer to the first memory block in the pool
372 myAllocList = aBlock;
373 aBlock+=BLOCK_SHIFT;
374 }
375
376 // initialize header of the new block by its size
377 // and get the pointer to the user part of block
378 aBlock[0] = RoundSize;
379 aStorage = GET_USER(aBlock);
380
381 // and advance pool pointer to the next free piece of pool
382 myNextAddr = &aStorage[RoundSizeN];
383 }
384 // blocks of medium size are allocated directly
385 else {
386 // unlock the mutex immediately, as we do not need further to access any field
bd0c22ce 387 myMutex.Unlock();
7fd59977 388
389 // we use operator ?: instead of if() since it is faster
390 Standard_Size *aBlock = (Standard_Size*) (myClear ? calloc( RoundSizeN+BLOCK_SHIFT, sizeof(Standard_Size)) :
391 malloc((RoundSizeN+BLOCK_SHIFT) * sizeof(Standard_Size)) );
392
393 // if allocation failed, try to free some memory by purging free lists, and retry
394 if ( ! aBlock ) {
395 if ( Purge (Standard_False) )
396 aBlock = (Standard_Size*)calloc(RoundSizeN+BLOCK_SHIFT, sizeof(Standard_Size));
397 // if still not succeeded, raise exception
398 if ( ! aBlock )
4db4247a 399 Standard_OutOfMemory::Raise ("Standard_MMgrOpt::Allocate(): malloc failed");
7fd59977 400 }
401
402 // initialize new block header by its size
403 // and get the pointer to the user part of block
404 aBlock[0] = RoundSize;
405 aStorage = GET_USER(aBlock);
406 }
407 }
408 // blocks of big size may be allocated as memory mapped files
409 else {
410 // Compute size of the block to be allocated, including header,
411 // Note that we use rounded size, even if this block will not be stored in
412 // the free list, for consistency of calls to AllocMemory() / FreeMemory()
413 // and calculation of index in the free list
414 Standard_Size AllocSize = RoundSize + sizeof(Standard_Size);
415
416 // allocate memory
417 Standard_Size* aBlock = AllocMemory(AllocSize);
418
419 // initialize new block header by its size
420 // and get the pointer to the user part of block.
421 aBlock[0] = RoundSize;
422 aStorage = GET_USER(aBlock);
423 }
424
425 callBack(Standard_True, aStorage, RoundSize, aSize);
426
427 return aStorage;
428}
429
430//=======================================================================
431//function : Free
432//purpose :
433//=======================================================================
434
547702a1 435void Standard_MMgrOpt::Free(Standard_Address theStorage)
7fd59977 436{
437 // safely return if attempt to free null pointer
438 if ( ! theStorage )
439 return;
440
441 // get the pointer to the memory block header
442 Standard_Size* aBlock = GET_BLOCK(theStorage);
443
444 // and get the allocated size of the block
445 Standard_Size RoundSize = aBlock[0];
446
447 callBack(Standard_False, theStorage, RoundSize, 0);
448
449 // check whether blocks with that size are recyclable
450 const Standard_Size Index = INDEX_CELL(RoundSize);
451 if ( Index <= myFreeListMax ) {
452 // Lock access to critical data (myFreeList and other) by mutex
453 // Note that we do not lock fields that do not change during the
454 // object life (such as myThreshold), and assume that calls to functions
455 // of standard library are already protected by their implementation.
456 // We also do not use Sentry, since in case if OCC signal or exception is
457 // caused by this block we will have deadlock anyway...
bd0c22ce 458 myMutex.Lock();
7fd59977 459
460 // in the memory block header, record address of the next free block
461 *(Standard_Size**)aBlock = myFreeList[Index];
462 // add new block to be first in the list
463 myFreeList[Index] = aBlock;
464
bd0c22ce 465 myMutex.Unlock();
7fd59977 466 }
467 // otherwise, we have block of big size which shall be simply released
468 else
469 FreeMemory (aBlock, RoundSize);
7fd59977 470}
471
472//=======================================================================
473//function : Purge
474//purpose : Frees all free lists except small blocks (less than CellSize)
475//=======================================================================
476
bd0c22ce 477Standard_Integer Standard_MMgrOpt::Purge(Standard_Boolean )
7fd59977 478{
479 // Lock access to critical data by mutex
bd0c22ce 480 Standard_Mutex::Sentry aSentry (myMutex);
7fd59977 481
482 // TODO: implement support for isDeleted = True
483
484 // free memory blocks contained in free lists
485 // whose sizes are greater than cellsize
486 Standard_Integer nbFreed = 0;
487 Standard_Size i = INDEX_CELL(ROUNDUP_CELL(myCellSize+BLOCK_SHIFT));
488 for (; i <= myFreeListMax; i++ ) {
489 Standard_Size * aFree = myFreeList[i];
490 while(aFree) {
491 Standard_Size * anOther = aFree;
492 aFree = * (Standard_Size **) aFree;
493 free(anOther);
494 nbFreed++;
495 }
496 myFreeList[i] = NULL;
497 }
498
499 // Lock access to critical data by mutex
bd0c22ce 500 Standard_Mutex::Sentry aSentry1 (myMutexPools);
7fd59977 501
502 // release memory pools containing no busy memory;
503 // for that for each pool count the summary size of blocks
504 // got from the free lists allocated from this pool
57c28b61 505#ifndef _WIN32
7fd59977 506 const Standard_Size PoolSize = myPageSize * myNbPages;
507#else
508 const Standard_Size PoolSize =
509 PAGE_ALIGN(myPageSize * myNbPages + sizeof(HANDLE), myPageSize) -
510 sizeof(HANDLE);
511#endif
512 const Standard_Size RPoolSize = ROUNDDOWN_CELL(PoolSize);
513 const Standard_Size PoolSizeN = RPoolSize / sizeof(Standard_Size);
514
515 // declare the table of pools;
516 // (we map free blocks onto a number of pools simultaneously)
517 static const Standard_Integer NB_POOLS_WIN = 512;
518 static Standard_Size* aPools[NB_POOLS_WIN];
519 static Standard_Size aFreeSize[NB_POOLS_WIN];
520 static Standard_Integer aFreePools[NB_POOLS_WIN];
521
522 Standard_Size * aNextPool = myAllocList;
523 Standard_Size * aPrevPool = NULL;
524 const Standard_Size nCells = INDEX_CELL(myCellSize);
525 Standard_Integer nPool = 0, nPoolFreed = 0;
526
527 while (aNextPool) {
528 // fill the table of pools
529 Standard_Integer iPool;
530 for (iPool = 0; aNextPool && iPool < NB_POOLS_WIN; iPool++) {
531 aPools[iPool] = aNextPool;
532 aFreeSize[iPool] = 0;
533 aNextPool = * (Standard_Size **) aNextPool; // get next pool
534 }
535 const Standard_Integer iLast = iPool - 1;
536 nPool += iPool;
537
538 // scan free blocks, find corresponding pools and increment
539 // counters
540 for (i = 0; i <= nCells; i++ ) {
541 Standard_Size * aFree = myFreeList[i];
542 Standard_Size aSize = BLOCK_SHIFT * sizeof(Standard_Size) +
543 ROUNDUP_CELL(1) * i;
544 while(aFree) {
545 for (iPool = 0; iPool <= iLast; iPool++) {
546 if (aFree >= aPools[iPool] && aFree < aPools[iPool] + PoolSizeN) {
547 aFreeSize[iPool] += aSize;
548 break;
549 }
550 }
551 aFree = * (Standard_Size **) aFree; // get next free block
552 }
553 }
554
555 // scan the table and make the list of free pools
556 Standard_Integer iLastFree = -1;
557 for (iPool = 0; iPool <= iLast; iPool++) {
558 aFreeSize[iPool] = ROUNDUP_CELL(aFreeSize[iPool]);
559 if (aFreeSize[iPool] == RPoolSize)
560 aFreePools[++iLastFree] = iPool;
561 }
562 if (iLastFree == -1) {
563 // no free pools found in this table
564 aPrevPool = aPools[iLast];
565 continue;
566 }
567
568 // scan free blocks again, and remove those of them
569 // that belong to free pools
570 Standard_Integer j;
571 for (i = 0; i <= nCells; i++ ) {
572 Standard_Size * aFree = myFreeList[i];
573 Standard_Size * aPrevFree = NULL;
574 while(aFree) {
575 for (j = 0; j <= iLastFree; j++) {
576 iPool = aFreePools[j];
577 if (aFree >= aPools[iPool] && aFree < aPools[iPool] + PoolSizeN)
578 break;
579 }
580 if (j <= iLastFree)
581 {
582 // remove
583 aFree = * (Standard_Size **) aFree;
584 if (aPrevFree)
585 * (Standard_Size **) aPrevFree = aFree; // link to previous
586 else
587 myFreeList[i] = aFree;
588 nbFreed++;
589 }
590 else {
591 // skip
592 aPrevFree = aFree;
593 aFree = * (Standard_Size **) aFree;
594 }
595 }
596 }
597
598 // release free pools, and reconnect remaining pools
599 // in the linked list
600 Standard_Size * aPrev = (aFreePools[0] == 0
601 ? aPrevPool
602 : aPools[aFreePools[0] - 1]);
603 for (j = 0; j <= iLastFree; j++) {
604 iPool = aFreePools[j];
605 if (j > 0) {
606 // update the pointer to the previous non-free pool
607 if (iPool - aFreePools[j - 1] > 1)
608 aPrev = aPools[iPool - 1];
609 }
610 if (j == iLastFree || aFreePools[j + 1] - iPool > 1) {
611 // get next non-free pool
612 Standard_Size * aNext =
613 (j == iLastFree && aFreePools[j] == iLast)
614 ? aNextPool
615 : aPools[iPool + 1];
616 // and connect it to the list of pools that have been processed
617 // and remain non-free
618 if (aPrev)
619 * (Standard_Size **) aPrev = aNext;
620 else
621 myAllocList = aNext;
622 }
623 FreeMemory(aPools[iPool], PoolSize);
624 }
625 // update the pointer to the previous non-free pool
626 aPrevPool = (aFreePools[iLastFree] == iLast
627 ? aPrev
628 : aPools[iLast]);
629 nPoolFreed += iLastFree + 1;
630 }
631
632 return nbFreed;
633}
634
635//=======================================================================
636//function : FreePools
637//purpose : Frees all memory pools allocated for small blocks
638//=======================================================================
639
640void Standard_MMgrOpt::FreePools()
641{
642 // Lock access to critical data by mutex
bd0c22ce 643 Standard_Mutex::Sentry aSentry (myMutexPools);
7fd59977 644
645 // last pool is remembered in myAllocList
646 Standard_Size * aFree = myAllocList;
647 myAllocList = 0;
648 while (aFree) {
649 Standard_Size * aBlock = aFree;
650 // next pool address is stored in first 8 bytes of each pool
651 aFree = * (Standard_Size **) aFree;
652 // free pool (note that its size is calculated rather than stored)
653 FreeMemory ( aBlock, myPageSize * myNbPages );
654 }
655}
656
657//=======================================================================
658//function : Reallocate
659//purpose :
660//=======================================================================
661
547702a1 662Standard_Address Standard_MMgrOpt::Reallocate(Standard_Address theStorage,
7fd59977 663 const Standard_Size theNewSize)
664{
cf9a910a 665 // if theStorage == NULL, just allocate new memory block
666 if (!theStorage)
667 {
668 return Allocate(theNewSize);
669 }
670
7fd59977 671 Standard_Size * aBlock = GET_BLOCK(theStorage);
672 Standard_Address newStorage = NULL;
673
674 // get current size of the memory block from its header
675 Standard_Size OldSize = aBlock[0];
676
677 // if new size is less than old one, just do nothing
678 if (theNewSize <= OldSize) {
679 newStorage = theStorage;
680 }
681 // otherwise, allocate new block and copy the data to it
682 else {
683 newStorage = Allocate(theNewSize);
684 memcpy (newStorage, theStorage, OldSize);
685 Free( theStorage );
686 // clear newly added part of the block
687 if ( myClear )
688 memset(((char*)newStorage) + OldSize, 0, theNewSize-OldSize);
689 }
7fd59977 690 return newStorage;
691}
692
693//=======================================================================
694//function : AllocMemory
695//purpose : Allocate a big block of memory using either malloc/calloc
696// or memory mapped file
697//=======================================================================
698
699Standard_Size * Standard_MMgrOpt::AllocMemory(Standard_Size &Size)
700{
701 // goto is used as efficient method for a possibility to retry allocation
702retry:
703
704 Standard_Size * aBlock = NULL;
705
706 // if MMap option is ON, allocate using memory mapped files
707 if (myMMap) {
57c28b61 708#ifndef _WIN32
7fd59977 709
710 // align size to page size
711 const Standard_Size AlignedSize = PAGE_ALIGN(Size, myPageSize);
712
713 // allocate memory
714 // note that on UNIX myMMap is file descriptor for /dev/null
715 aBlock = (Standard_Size * )mmap((char*)MMAP_BASE_ADDRESS, AlignedSize,
716 PROT_READ | PROT_WRITE, MMAP_FLAGS,
717 myMMap, 0);
718 if (aBlock == MAP_FAILED /* -1 */) {
719 int errcode = errno;
720 // as a last resort, try freeing some memory by calling Purge()
721 if ( Purge(Standard_False) )
722 goto retry;
723 // if nothing helps, raise exception
4db4247a 724 Standard_OutOfMemory::Raise (strerror(errcode));
7fd59977 725 }
726
727 // save actually allocated size into argument
728 Size = AlignedSize;
729
57c28b61 730#else /* _WIN32 */
7fd59977 731
732 // align size to page size, taking into account additional space needed to
733 // store handle to the memory map
734 const Standard_Size AlignedSize = PAGE_ALIGN(Size+sizeof(HANDLE), myPageSize);
735
736 // allocate mapped file
737 HANDLE hMap = CreateFileMapping(INVALID_HANDLE_VALUE, NULL,
738 PAGE_READWRITE,
739 DWORD(AlignedSize / 0x80000000),
740 DWORD(AlignedSize % 0x80000000), NULL);
498ce76b 741 HANDLE * aMBlock = (hMap && GetLastError() != ERROR_ALREADY_EXISTS ?
742 (HANDLE*)MapViewOfFile(hMap,FILE_MAP_WRITE,0,0,0) : NULL);
7fd59977 743 // check for error and try allocating address space
498ce76b 744 if ( ! aMBlock )
7fd59977 745 {
746 // close handle if allocated
747 if ( hMap )
748 CloseHandle(hMap);
749 hMap = 0;
750 // as a last resort, try freeing some memory by calling Purge() and retry
751 if ( Purge(Standard_False) )
752 goto retry;
753 // if nothing helps, make error message and raise exception
754 const int BUFSIZE=1024;
755 char message[BUFSIZE];
756 if ( FormatMessage (FORMAT_MESSAGE_FROM_SYSTEM, 0, GetLastError(), 0, message, BUFSIZE-1, 0) <=0 )
757 strcpy (message, "Standard_MMgrOpt::AllocMemory() failed to mmap");
4db4247a 758 Standard_OutOfMemory::Raise (message);
7fd59977 759 }
760
761 // record map handle in the beginning
762 aMBlock[0] = hMap;
763
764 // and shift to the beginning of usable area
765 aBlock = (Standard_Size*)(aMBlock+1);
766
767 // save actually allocated size into argument
768 Size = AlignedSize - sizeof(HANDLE);
769#endif
770 }
771 // else just allocate by malloc or calloc
772 else {
773 aBlock = (Standard_Size *) (myClear ? calloc(Size,sizeof(char)) : malloc(Size));
774 // check the result
775 if ( ! aBlock )
776 {
777 // as a last resort, try freeing some memory by calling Purge()
778 if ( Purge(Standard_False) )
779 goto retry;
780 // if nothing helps, raise exception
4db4247a 781 Standard_OutOfMemory::Raise ("Standard_MMgrOpt::Allocate(): malloc failed");
7fd59977 782 }
783 }
784 // clear whole block if clearing option is set
785 if (myClear)
786 memset (aBlock, 0, Size);
787 return aBlock;
788}
789
790//=======================================================================
791//function : FreeMemory
792//purpose :
793//=======================================================================
794
795void Standard_MMgrOpt::FreeMemory (Standard_Address aBlock,
796 const Standard_Size
57c28b61 797#ifndef _WIN32
7fd59977 798 aSize
799#endif
800 )
801{
802 // release memory (either free or unmap)
803 if ( myMMap ) {
57c28b61 804#ifndef _WIN32
7fd59977 805 // align size to page size, just the same as in AllocMemory()
806 const Standard_Size AlignedSize = PAGE_ALIGN(aSize, myPageSize);
807 munmap((char*)aBlock, AlignedSize);
808#else
809 // recover handle to the memory mapping stored just before the block
810 const HANDLE * aMBlock = (const HANDLE *)aBlock;
811 HANDLE hMap = *(--aMBlock);
812 UnmapViewOfFile((LPCVOID)aMBlock);
813 CloseHandle (hMap);
814#endif
815 }
816 else
817 free(aBlock);
818}