0024489: Avoid type casts in call to Standard::Free()
[occt.git] / src / Standard / Standard_MMgrOpt.cxx
CommitLineData
b311480e 1// Created on: 2005-03-15
2// Created by: Peter KURNEV
973c2be1 3// Copyright (c) 2005-2014 OPEN CASCADE SAS
b311480e 4//
973c2be1 5// This file is part of Open CASCADE Technology software library.
b311480e 6//
973c2be1 7// This library is free software; you can redistribute it and / or modify it
8// under the terms of the GNU Lesser General Public version 2.1 as published
9// by the Free Software Foundation, with special exception defined in the file
10// OCCT_LGPL_EXCEPTION.txt. Consult the file LICENSE_LGPL_21.txt included in OCCT
11// distribution for complete text of the license and disclaimer of any warranty.
b311480e 12//
973c2be1 13// Alternatively, this file may be used under the terms of Open CASCADE
14// commercial license or contractual agreement.
7fd59977 15
16#include <Standard_MMgrOpt.hxx>
17#include <Standard_OutOfMemory.hxx>
8b381bc3 18#include <Standard_Assert.hxx>
7fd59977 19#ifdef HAVE_CONFIG_H
20# include <config.h>
21#endif
22
23#include <stdio.h>
24
25#ifdef HAVE_STRING_H
26# include <string.h>
27#endif
28
29#ifndef WNT
30# include <stdlib.h>
31# include <errno.h>
32#endif
33
34#ifdef WNT
35#include <windows.h>
36#else
37# ifdef HAVE_UNISTD_H
38# include <unistd.h>
39# endif
40# ifdef HAVE_SYS_MMAN_H
41# include <sys/mman.h> /* mmap() */
42# endif
43#endif
44#ifdef HAVE_MALLOC_H
45# include <malloc.h>
46#endif
47#include <stdlib.h>
48#include <sys/types.h>
49#include <sys/stat.h>
50#include <fcntl.h>
51//
52#if defined (__sun) || defined(SOLARIS)
53extern "C" int getpagesize() ;
54#endif
55
56//======================================================================
57// Assumptions
58//======================================================================
59
60// This implementation makes a number of assumptions regarding size of
61// types:
62//
63// sizeof(Standard_Size) == sizeof(Standard_Address==void*)
64//
65// On WNT, sizeof(HANDLE) is equal of multiple of sizeof(Standard_Size)
66
67//======================================================================
68// Naming conventions
69//======================================================================
70
71// For clarity of implementation, the following conventions are used
72// for naming variables:
73//
74// ...Size: size in bytes
75//
76// RoundSize, RSize etc.: size in bytes, rounded according to allocation granularity
77//
78// ...SizeN: size counted in number of items of sizeof(Standard_Size) bytes each
79//
80// ...Storage: address of the user area of the memory block (Standard_Address)
81//
82// ...Block: address of the hole memory block (header) (Standard_Size*)
83
84//======================================================================
85// Macro definitions
86//======================================================================
87
88//
89// MMAP_BASE_ADDRESS, MMAP_FLAGS
90#if defined (__hpux) || defined(HPUX)
91#define MMAP_BASE_ADDRESS 0x80000000
92#define MMAP_FLAGS (MAP_ANONYMOUS | MAP_PRIVATE | MAP_VARIABLE)
93#elif defined (__osf__) || defined(DECOSF1)
94#define MMAP_BASE_ADDRESS 0x1000000000
95#define MMAP_FLAGS (MAP_ANONYMOUS | MAP_PRIVATE | MAP_VARIABLE)
96#elif defined(_AIX)
97#define MMAP_BASE_ADDRESS 0x80000000
98#define MMAP_FLAGS (MAP_ANONYMOUS | MAP_PRIVATE | MAP_VARIABLE)
99#elif defined(__APPLE__)
100#define MMAP_BASE_ADDRESS 0x80000000
101#define MMAP_FLAGS (MAP_ANON | MAP_PRIVATE)
102#elif defined(LIN)
103#define MMAP_BASE_ADDRESS 0x20000000
104#define MMAP_FLAGS (MAP_PRIVATE)
105#elif defined(WNT)
106//static HANDLE myhMap;
107#else
108#define MMAP_BASE_ADDRESS 0x60000000
109#define MMAP_FLAGS (MAP_PRIVATE)
110#endif
111
112// Round size up to the specified page size
113#define PAGE_ALIGN(size,thePageSize) \
114 (((size) + (thePageSize) - 1) & ~((thePageSize) - 1))
115
116// Round size up to 4, 8, or 16 bytes
117// Note that 0 yields 0
118#define ROUNDUP16(size) (((size) + 0xf) & ~(Standard_Size)0xf)
119#define ROUNDUP8(size) (((size) + 0x7) & ~(Standard_Size)0x7)
120#define ROUNDUP4(size) (((size) + 0x3) & ~(Standard_Size)0x3)
121#define ROUNDDOWN8(size) ((size) & ~(Standard_Size)0x7)
122
123// The following two macros define granularity of memory allocation,
124// by rounding size to the size of the allocation cell,
125// and obtaining cell index from rounded size.
126// Note that granularity shall be not less than sizeof(Standard_Size)
127
128// Traditional implementation: granularity 16 bytes
129//#define ROUNDUP_CELL(size) ROUNDUP16(size)
130//#define INDEX_CELL(rsize) ((rsize) >> 4)
131
132// Reduced granularity: 8 bytes
133#define ROUNDUP_CELL(size) ROUNDUP8(size)
134#define ROUNDDOWN_CELL(size) ROUNDDOWN8(size)
135#define INDEX_CELL(rsize) ((rsize) >> 3)
136
137// Minimal granularity: 4 bytes (32-bit systems only)
138#ifndef _OCC64
139//#define ROUNDUP_CELL(size) ROUNDUP4(size)
140//#define INDEX_CELL(rsize) ((rsize) >> 2)
141#endif
142
143// Adaptive granularity, less for little blocks and greater for bigger ones:
144/*
145#if _OCC64
146#define ROUNDUP_CELL(size) ((size) <= 0x40 ? ROUNDUP8(size) : ROUNDUP16(size))
147#define INDEX_CELL(rsize) ((rsize) <= 0x40 ? ((rsize) >> 3) : (4 + ((rsize) >> 4)))
148#else
149#define ROUNDUP_CELL(size) ((size) <= 0x40 ? ROUNDUP4(size) : ROUNDUP8(size))
150#define INDEX_CELL(rsize) ((rsize) <= 0x40 ? ((rsize) >> 2) : (8 + ((rsize) >> 3)))
151#endif
152*/
153
154
155/* In the allocated block, first bytes are used for storing of memory manager's data.
156 (size of block). The minimal size of these data is sizeof(int).
157 The memory allocated in system usually alligned by 16 bytes.Tthe aligment of the
158 data area in the memory block is shfted on BLOCK_SHIFT*sizeof(Standard_Size)
159 bytes.
160 It is OK for WNT, SUN and Linux systems, but on SGI aligment should be 8 bytes.
161 So, BLOCK_SHIFT is formed as macro for support on other possible platforms.
162*/
163
164#if defined(IRIX) || defined(SOLARIS)
165#define BLOCK_SHIFT 2
166#else
167#define BLOCK_SHIFT 1
168#endif
169
170// Get address of user area from block address, and vice-versa
171#define GET_USER(block) (((Standard_Size*)(block)) + BLOCK_SHIFT)
172#define GET_BLOCK(storage) (((Standard_Size*)(storage))-BLOCK_SHIFT)
173
174// create static instance of out-of-memory exception to protect
175// against possible lack of memory for its raising
176static Handle(Standard_OutOfMemory) anOutOfMemError = new Standard_OutOfMemory;
177
178//=======================================================================
179//function : Standard_MMgr
180//purpose :
181//=======================================================================
182
183Standard_MMgrOpt::Standard_MMgrOpt(const Standard_Boolean aClear,
184 const Standard_Boolean aMMap,
185 const Standard_Size aCellSize,
186 const Standard_Integer aNbPages,
bd0c22ce 187 const Standard_Size aThreshold)
7fd59977 188{
189 // check basic assumption
8b381bc3 190 Standard_STATIC_ASSERT(sizeof(Standard_Size) == sizeof(Standard_Address));
7fd59977 191
192 // clear buffer fields
193 myFreeListMax = 0;
194 myFreeList = NULL;
195 myPageSize = 0;
196 myAllocList = NULL;
197 myNextAddr = NULL;
198 myEndBlock = NULL;
199
200 // initialize parameters
201 myClear = aClear;
202 myMMap = (Standard_Integer)aMMap;
203 myCellSize = aCellSize;
204 myNbPages = aNbPages;
205 myThreshold = aThreshold;
7fd59977 206
207 // initialize
208 Initialize();
209}
210
211//=======================================================================
212//function : ~Standard_MMgrOpt
213//purpose :
214//=======================================================================
215
216Standard_MMgrOpt::~Standard_MMgrOpt()
217{
218 Purge(Standard_True);
219 free(myFreeList);
220
221 // NOTE: freeing pools may be dangerous if not all memory taken by
222 // this instance of the memory manager has been freed
223 FreePools();
224}
225
226// interface level
227
228//=======================================================================
229//function : Initialize
230//purpose :
231//=======================================================================
232
233void Standard_MMgrOpt::Initialize()
234{
235 // check number of pages in small blocks pools
236 if ( myNbPages < 100 )
237 myNbPages = 1000;
238
239 // get system-dependent page size
240#ifndef WNT
241 myPageSize = getpagesize();
242 if ( ! myPageSize )
243 myMMap = 0;
244#else
245 SYSTEM_INFO SystemInfo;
246 GetSystemInfo (&SystemInfo);
247 myPageSize = SystemInfo.dwPageSize;
248#endif
249
250 // initialize memory mapped files
251 if(myMMap) {
252#if defined (__sgi) || defined(IRIX)
253 /* Probleme de conflit en la zone des malloc et la zone des mmap sur SGI */
254 /* Ce probleme a ete identifie en IRIX 5.3 jusqu'en IRIX 6.2. Le probleme */
255 /* ne semble pas apparaitre en IRIX 6.4 */
256 /* Les malloc successifs donnent des adresses croissantes (a partir de 0x0x10000000) */
257 /* ce que l'on appelle le pointeur de BREAK */
258 /* Le premier mmap est force a l'addresse MMAP_BASE_ADDRESS (soit 0x60000000 sur SGI) */
259 /* mais les mmap suivants sont decides par le systeme (flag MAP_VARIABLE). Malheureusement */
260 /* il renvoie une addresse la plus basse possible dans la zone des malloc juste au dessus */
261 /* du BREAK soit 0x18640000 ce qui donne un espace d'allocation d'environ 140 Mo pour les */
262 /* malloc. Sur des gros modeles on peut avoir des pointes a 680 Mo en Rev6 pour une maquette */
263 /* de 2 000 000 de points. En Rev7, la meme maquette n'excedera pas 286 Mo (voir vision.for) */
264 /* Pour palier ce comportement, la solution adoptee est la suivante : */
265 /* Lorsque l'on entre dans alloc_startup (ici), on n'a pas encore fait de mmap. */
266 /* On fait alors un malloc (d'environ 700Mo) que l'on libere de suite. Cela a pour */
267 /* consequence de deplacer le BREAK tres haut. Le BREAK ne redescend jamais meme lors du free */
268 /* Le mmap donnant une adresse (environ 100 Mo au dessus du BREAK) on se retrouve alors avec */
269 /* le partage des zones de memoire suivant : */
270 /* 700 Mo pour les malloc - 500 Mo (1,2Go - 700Mo ) pour les mmap. Avec un CLD_SD_SIZE */
271 /* de 2 000 000 on atteind jamais 500 Mo de mmap, meme en chargeant des applications (qui */
272 /* utilisent la zone de mmap */
273 /* Ce partage des zones memoire pourra eventuellemt etre regle par une variable d'environnement */
274 /* CLD_HIGH_SBRK */
275 char *var;
276 Standard_Size high_sbrk;
277
278 high_sbrk = 700*1024*1024;
279 if ( (var=getenv("CLD_HIGH_SBRK")) != NULL ) {
280 high_sbrk = atoi(var);
281 }
282
283 var = (char*)malloc(high_sbrk); // 700 Mb
284 if ( var )
285 free(var);
286 else
287 perror("ERR_MEMRY_FAIL");
288#endif
289
290#if defined(IRIX) || defined(__sgi) || defined(SOLARIS) || defined(__sun) || defined(LIN) || defined(linux) || defined(__FreeBSD__)
291 if ((myMMap = open ("/dev/zero", O_RDWR)) < 0) {
292 if ((myMMap = open ("/dev/null", O_RDWR)) < 0){
293 myMMap = 0;
294 }
295 }
296 if (!myMMap)
297 perror("ERR_MMAP_FAIL");
298#else
299 myMMap = -1;
300#endif
301 }
302
303 // initialize free lists
304 myFreeListMax = INDEX_CELL(ROUNDUP_CELL(myThreshold-BLOCK_SHIFT)); // all blocks less than myThreshold are to be recycled
305 myFreeList = (Standard_Size **) calloc (myFreeListMax+1, sizeof(Standard_Size *));
306 myCellSize = ROUNDUP16(myCellSize);
307}
308
309//=======================================================================
310//function : SetMMgrOptCallBack
311//purpose : Sets a callback function to be called on each alloc/free
312//=======================================================================
313
314static Standard_MMgrOpt::TPCallBackFunc MyPCallBackFunc = NULL;
315
316Standard_EXPORT void Standard_MMgrOpt::SetCallBackFunction(TPCallBackFunc pFunc)
317{
318 MyPCallBackFunc = pFunc;
319}
320
321inline void callBack(const Standard_Boolean isAlloc,
322 const Standard_Address aStorage,
323 const Standard_Size aRoundSize,
324 const Standard_Size aSize)
325{
326 if (MyPCallBackFunc)
327 (*MyPCallBackFunc)(isAlloc, aStorage, aRoundSize, aSize);
328}
329
330//=======================================================================
331//function : Allocate
332//purpose :
333//=======================================================================
334
335Standard_Address Standard_MMgrOpt::Allocate(const Standard_Size aSize)
336{
337 Standard_Size * aStorage = NULL;
338
339 // round up size according to allocation granularity
340 // The keyword 'volatile' is only used here for GCC 64-bit compilations
341 // otherwise this method would crash in runtime in optimized build.
342 volatile Standard_Size RoundSize = ROUNDUP_CELL(aSize);
343 const Standard_Size Index = INDEX_CELL(RoundSize);
344
345 // blocks of small and medium size are recyclable
346 if ( Index <= myFreeListMax ) {
347 const Standard_Size RoundSizeN = RoundSize / sizeof(Standard_Size);
348
349 // Lock access to critical data (myFreeList and other fields) by mutex.
350 // Note that we do not lock fields that do not change during the
351 // object life (such as myThreshold), and assume that calls to functions
352 // of standard library are already protected by their implementation.
353 // The unlock is called as soon as possible, for every treatment case.
354 // We also do not use Sentry, since in case if OCC signal or exception is
355 // caused by this block we will have deadlock anyway...
bd0c22ce 356 myMutex.Lock();
7fd59977 357
358 // if free block of the requested size is available, return it
359 if ( myFreeList[Index] ) {
360 // the address of the next free block is stored in the header
361 // of the memory block; use it to update list pointer
362 // to point to next free block
363 Standard_Size* aBlock = myFreeList[Index];
364 myFreeList[Index] = *(Standard_Size**)aBlock;
365
366 // unlock the mutex
bd0c22ce 367 myMutex.Unlock();
7fd59977 368
369 // record size of the allocated block in the block header and
370 // shift the pointer to the beginning of the user part of block
371 aBlock[0] = RoundSize;
372 aStorage = GET_USER(aBlock);
373
374 // clear block if requested
375 if (myClear)
376 memset (aStorage, 0, RoundSize);
377 }
378 // else if block size is small allocate it in pools
379 else if ( RoundSize <= myCellSize ) {
380 // unlock the mutex for free lists
bd0c22ce 381 myMutex.Unlock();
7fd59977 382
383 // and lock the specific mutex used to protect access to small blocks pools;
384 // note that this is done by sentry class so as to ensure unlocking in case of
385 // possible exception that may be thrown from AllocMemory()
bd0c22ce 386 Standard_Mutex::Sentry aSentry (myMutexPools);
7fd59977 387
388 // check for availability of requested space in the current pool
389 Standard_Size *aBlock = myNextAddr;
390 if ( &aBlock[ BLOCK_SHIFT+RoundSizeN] > myEndBlock ) {
391 // otherwise, allocate new memory pool with page-aligned size
392 Standard_Size Size = myPageSize * myNbPages;
393 aBlock = AllocMemory(Size); // note that size may be aligned by this call
394
395 if (myEndBlock > myNextAddr) {
396 // put the remaining piece to the free lists
397 const Standard_Size aPSize = (myEndBlock - GET_USER(myNextAddr))
398 * sizeof(Standard_Size);
399 const Standard_Size aRPSize = ROUNDDOWN_CELL(aPSize);
400 const Standard_Size aPIndex = INDEX_CELL(aRPSize);
401 if ( aPIndex > 0 && aPIndex <= myFreeListMax ) {
bd0c22ce 402 myMutex.Lock();
7fd59977 403 *(Standard_Size**)myNextAddr = myFreeList[aPIndex];
404 myFreeList[aPIndex] = myNextAddr;
bd0c22ce 405 myMutex.Unlock();
7fd59977 406 }
407 }
408
409 // set end pointer to the end of the new pool
410 myEndBlock = aBlock + Size / sizeof(Standard_Size);
411 // record in the first bytes of the pool the address of the previous one
412 *(Standard_Size**)aBlock = myAllocList;
413 // and make new pool current (last)
414 // and get pointer to the first memory block in the pool
415 myAllocList = aBlock;
416 aBlock+=BLOCK_SHIFT;
417 }
418
419 // initialize header of the new block by its size
420 // and get the pointer to the user part of block
421 aBlock[0] = RoundSize;
422 aStorage = GET_USER(aBlock);
423
424 // and advance pool pointer to the next free piece of pool
425 myNextAddr = &aStorage[RoundSizeN];
426 }
427 // blocks of medium size are allocated directly
428 else {
429 // unlock the mutex immediately, as we do not need further to access any field
bd0c22ce 430 myMutex.Unlock();
7fd59977 431
432 // we use operator ?: instead of if() since it is faster
433 Standard_Size *aBlock = (Standard_Size*) (myClear ? calloc( RoundSizeN+BLOCK_SHIFT, sizeof(Standard_Size)) :
434 malloc((RoundSizeN+BLOCK_SHIFT) * sizeof(Standard_Size)) );
435
436 // if allocation failed, try to free some memory by purging free lists, and retry
437 if ( ! aBlock ) {
438 if ( Purge (Standard_False) )
439 aBlock = (Standard_Size*)calloc(RoundSizeN+BLOCK_SHIFT, sizeof(Standard_Size));
440 // if still not succeeded, raise exception
441 if ( ! aBlock )
442 anOutOfMemError->Reraise ("Standard_MMgrOpt::Allocate(): malloc failed");
443 }
444
445 // initialize new block header by its size
446 // and get the pointer to the user part of block
447 aBlock[0] = RoundSize;
448 aStorage = GET_USER(aBlock);
449 }
450 }
451 // blocks of big size may be allocated as memory mapped files
452 else {
453 // Compute size of the block to be allocated, including header,
454 // Note that we use rounded size, even if this block will not be stored in
455 // the free list, for consistency of calls to AllocMemory() / FreeMemory()
456 // and calculation of index in the free list
457 Standard_Size AllocSize = RoundSize + sizeof(Standard_Size);
458
459 // allocate memory
460 Standard_Size* aBlock = AllocMemory(AllocSize);
461
462 // initialize new block header by its size
463 // and get the pointer to the user part of block.
464 aBlock[0] = RoundSize;
465 aStorage = GET_USER(aBlock);
466 }
467
468 callBack(Standard_True, aStorage, RoundSize, aSize);
469
470 return aStorage;
471}
472
473//=======================================================================
474//function : Free
475//purpose :
476//=======================================================================
477
547702a1 478void Standard_MMgrOpt::Free(Standard_Address theStorage)
7fd59977 479{
480 // safely return if attempt to free null pointer
481 if ( ! theStorage )
482 return;
483
484 // get the pointer to the memory block header
485 Standard_Size* aBlock = GET_BLOCK(theStorage);
486
487 // and get the allocated size of the block
488 Standard_Size RoundSize = aBlock[0];
489
490 callBack(Standard_False, theStorage, RoundSize, 0);
491
492 // check whether blocks with that size are recyclable
493 const Standard_Size Index = INDEX_CELL(RoundSize);
494 if ( Index <= myFreeListMax ) {
495 // Lock access to critical data (myFreeList and other) by mutex
496 // Note that we do not lock fields that do not change during the
497 // object life (such as myThreshold), and assume that calls to functions
498 // of standard library are already protected by their implementation.
499 // We also do not use Sentry, since in case if OCC signal or exception is
500 // caused by this block we will have deadlock anyway...
bd0c22ce 501 myMutex.Lock();
7fd59977 502
503 // in the memory block header, record address of the next free block
504 *(Standard_Size**)aBlock = myFreeList[Index];
505 // add new block to be first in the list
506 myFreeList[Index] = aBlock;
507
bd0c22ce 508 myMutex.Unlock();
7fd59977 509 }
510 // otherwise, we have block of big size which shall be simply released
511 else
512 FreeMemory (aBlock, RoundSize);
7fd59977 513}
514
515//=======================================================================
516//function : Purge
517//purpose : Frees all free lists except small blocks (less than CellSize)
518//=======================================================================
519
bd0c22ce 520Standard_Integer Standard_MMgrOpt::Purge(Standard_Boolean )
7fd59977 521{
522 // Lock access to critical data by mutex
bd0c22ce 523 Standard_Mutex::Sentry aSentry (myMutex);
7fd59977 524
525 // TODO: implement support for isDeleted = True
526
527 // free memory blocks contained in free lists
528 // whose sizes are greater than cellsize
529 Standard_Integer nbFreed = 0;
530 Standard_Size i = INDEX_CELL(ROUNDUP_CELL(myCellSize+BLOCK_SHIFT));
531 for (; i <= myFreeListMax; i++ ) {
532 Standard_Size * aFree = myFreeList[i];
533 while(aFree) {
534 Standard_Size * anOther = aFree;
535 aFree = * (Standard_Size **) aFree;
536 free(anOther);
537 nbFreed++;
538 }
539 myFreeList[i] = NULL;
540 }
541
542 // Lock access to critical data by mutex
bd0c22ce 543 Standard_Mutex::Sentry aSentry1 (myMutexPools);
7fd59977 544
545 // release memory pools containing no busy memory;
546 // for that for each pool count the summary size of blocks
547 // got from the free lists allocated from this pool
548#ifndef WNT
549 const Standard_Size PoolSize = myPageSize * myNbPages;
550#else
551 const Standard_Size PoolSize =
552 PAGE_ALIGN(myPageSize * myNbPages + sizeof(HANDLE), myPageSize) -
553 sizeof(HANDLE);
554#endif
555 const Standard_Size RPoolSize = ROUNDDOWN_CELL(PoolSize);
556 const Standard_Size PoolSizeN = RPoolSize / sizeof(Standard_Size);
557
558 // declare the table of pools;
559 // (we map free blocks onto a number of pools simultaneously)
560 static const Standard_Integer NB_POOLS_WIN = 512;
561 static Standard_Size* aPools[NB_POOLS_WIN];
562 static Standard_Size aFreeSize[NB_POOLS_WIN];
563 static Standard_Integer aFreePools[NB_POOLS_WIN];
564
565 Standard_Size * aNextPool = myAllocList;
566 Standard_Size * aPrevPool = NULL;
567 const Standard_Size nCells = INDEX_CELL(myCellSize);
568 Standard_Integer nPool = 0, nPoolFreed = 0;
569
570 while (aNextPool) {
571 // fill the table of pools
572 Standard_Integer iPool;
573 for (iPool = 0; aNextPool && iPool < NB_POOLS_WIN; iPool++) {
574 aPools[iPool] = aNextPool;
575 aFreeSize[iPool] = 0;
576 aNextPool = * (Standard_Size **) aNextPool; // get next pool
577 }
578 const Standard_Integer iLast = iPool - 1;
579 nPool += iPool;
580
581 // scan free blocks, find corresponding pools and increment
582 // counters
583 for (i = 0; i <= nCells; i++ ) {
584 Standard_Size * aFree = myFreeList[i];
585 Standard_Size aSize = BLOCK_SHIFT * sizeof(Standard_Size) +
586 ROUNDUP_CELL(1) * i;
587 while(aFree) {
588 for (iPool = 0; iPool <= iLast; iPool++) {
589 if (aFree >= aPools[iPool] && aFree < aPools[iPool] + PoolSizeN) {
590 aFreeSize[iPool] += aSize;
591 break;
592 }
593 }
594 aFree = * (Standard_Size **) aFree; // get next free block
595 }
596 }
597
598 // scan the table and make the list of free pools
599 Standard_Integer iLastFree = -1;
600 for (iPool = 0; iPool <= iLast; iPool++) {
601 aFreeSize[iPool] = ROUNDUP_CELL(aFreeSize[iPool]);
602 if (aFreeSize[iPool] == RPoolSize)
603 aFreePools[++iLastFree] = iPool;
604 }
605 if (iLastFree == -1) {
606 // no free pools found in this table
607 aPrevPool = aPools[iLast];
608 continue;
609 }
610
611 // scan free blocks again, and remove those of them
612 // that belong to free pools
613 Standard_Integer j;
614 for (i = 0; i <= nCells; i++ ) {
615 Standard_Size * aFree = myFreeList[i];
616 Standard_Size * aPrevFree = NULL;
617 while(aFree) {
618 for (j = 0; j <= iLastFree; j++) {
619 iPool = aFreePools[j];
620 if (aFree >= aPools[iPool] && aFree < aPools[iPool] + PoolSizeN)
621 break;
622 }
623 if (j <= iLastFree)
624 {
625 // remove
626 aFree = * (Standard_Size **) aFree;
627 if (aPrevFree)
628 * (Standard_Size **) aPrevFree = aFree; // link to previous
629 else
630 myFreeList[i] = aFree;
631 nbFreed++;
632 }
633 else {
634 // skip
635 aPrevFree = aFree;
636 aFree = * (Standard_Size **) aFree;
637 }
638 }
639 }
640
641 // release free pools, and reconnect remaining pools
642 // in the linked list
643 Standard_Size * aPrev = (aFreePools[0] == 0
644 ? aPrevPool
645 : aPools[aFreePools[0] - 1]);
646 for (j = 0; j <= iLastFree; j++) {
647 iPool = aFreePools[j];
648 if (j > 0) {
649 // update the pointer to the previous non-free pool
650 if (iPool - aFreePools[j - 1] > 1)
651 aPrev = aPools[iPool - 1];
652 }
653 if (j == iLastFree || aFreePools[j + 1] - iPool > 1) {
654 // get next non-free pool
655 Standard_Size * aNext =
656 (j == iLastFree && aFreePools[j] == iLast)
657 ? aNextPool
658 : aPools[iPool + 1];
659 // and connect it to the list of pools that have been processed
660 // and remain non-free
661 if (aPrev)
662 * (Standard_Size **) aPrev = aNext;
663 else
664 myAllocList = aNext;
665 }
666 FreeMemory(aPools[iPool], PoolSize);
667 }
668 // update the pointer to the previous non-free pool
669 aPrevPool = (aFreePools[iLastFree] == iLast
670 ? aPrev
671 : aPools[iLast]);
672 nPoolFreed += iLastFree + 1;
673 }
674
675 return nbFreed;
676}
677
678//=======================================================================
679//function : FreePools
680//purpose : Frees all memory pools allocated for small blocks
681//=======================================================================
682
683void Standard_MMgrOpt::FreePools()
684{
685 // Lock access to critical data by mutex
bd0c22ce 686 Standard_Mutex::Sentry aSentry (myMutexPools);
7fd59977 687
688 // last pool is remembered in myAllocList
689 Standard_Size * aFree = myAllocList;
690 myAllocList = 0;
691 while (aFree) {
692 Standard_Size * aBlock = aFree;
693 // next pool address is stored in first 8 bytes of each pool
694 aFree = * (Standard_Size **) aFree;
695 // free pool (note that its size is calculated rather than stored)
696 FreeMemory ( aBlock, myPageSize * myNbPages );
697 }
698}
699
700//=======================================================================
701//function : Reallocate
702//purpose :
703//=======================================================================
704
547702a1 705Standard_Address Standard_MMgrOpt::Reallocate(Standard_Address theStorage,
7fd59977 706 const Standard_Size theNewSize)
707{
cf9a910a 708 // if theStorage == NULL, just allocate new memory block
709 if (!theStorage)
710 {
711 return Allocate(theNewSize);
712 }
713
7fd59977 714 Standard_Size * aBlock = GET_BLOCK(theStorage);
715 Standard_Address newStorage = NULL;
716
717 // get current size of the memory block from its header
718 Standard_Size OldSize = aBlock[0];
719
720 // if new size is less than old one, just do nothing
721 if (theNewSize <= OldSize) {
722 newStorage = theStorage;
723 }
724 // otherwise, allocate new block and copy the data to it
725 else {
726 newStorage = Allocate(theNewSize);
727 memcpy (newStorage, theStorage, OldSize);
728 Free( theStorage );
729 // clear newly added part of the block
730 if ( myClear )
731 memset(((char*)newStorage) + OldSize, 0, theNewSize-OldSize);
732 }
7fd59977 733 return newStorage;
734}
735
736//=======================================================================
737//function : AllocMemory
738//purpose : Allocate a big block of memory using either malloc/calloc
739// or memory mapped file
740//=======================================================================
741
742Standard_Size * Standard_MMgrOpt::AllocMemory(Standard_Size &Size)
743{
744 // goto is used as efficient method for a possibility to retry allocation
745retry:
746
747 Standard_Size * aBlock = NULL;
748
749 // if MMap option is ON, allocate using memory mapped files
750 if (myMMap) {
751#ifndef WNT
752
753 // align size to page size
754 const Standard_Size AlignedSize = PAGE_ALIGN(Size, myPageSize);
755
756 // allocate memory
757 // note that on UNIX myMMap is file descriptor for /dev/null
758 aBlock = (Standard_Size * )mmap((char*)MMAP_BASE_ADDRESS, AlignedSize,
759 PROT_READ | PROT_WRITE, MMAP_FLAGS,
760 myMMap, 0);
761 if (aBlock == MAP_FAILED /* -1 */) {
762 int errcode = errno;
763 // as a last resort, try freeing some memory by calling Purge()
764 if ( Purge(Standard_False) )
765 goto retry;
766 // if nothing helps, raise exception
767 anOutOfMemError->Reraise (strerror(errcode));
768 }
769
770 // save actually allocated size into argument
771 Size = AlignedSize;
772
773#else /* WNT */
774
775 // align size to page size, taking into account additional space needed to
776 // store handle to the memory map
777 const Standard_Size AlignedSize = PAGE_ALIGN(Size+sizeof(HANDLE), myPageSize);
778
779 // allocate mapped file
780 HANDLE hMap = CreateFileMapping(INVALID_HANDLE_VALUE, NULL,
781 PAGE_READWRITE,
782 DWORD(AlignedSize / 0x80000000),
783 DWORD(AlignedSize % 0x80000000), NULL);
498ce76b 784 HANDLE * aMBlock = (hMap && GetLastError() != ERROR_ALREADY_EXISTS ?
785 (HANDLE*)MapViewOfFile(hMap,FILE_MAP_WRITE,0,0,0) : NULL);
7fd59977 786 // check for error and try allocating address space
498ce76b 787 if ( ! aMBlock )
7fd59977 788 {
789 // close handle if allocated
790 if ( hMap )
791 CloseHandle(hMap);
792 hMap = 0;
793 // as a last resort, try freeing some memory by calling Purge() and retry
794 if ( Purge(Standard_False) )
795 goto retry;
796 // if nothing helps, make error message and raise exception
797 const int BUFSIZE=1024;
798 char message[BUFSIZE];
799 if ( FormatMessage (FORMAT_MESSAGE_FROM_SYSTEM, 0, GetLastError(), 0, message, BUFSIZE-1, 0) <=0 )
800 strcpy (message, "Standard_MMgrOpt::AllocMemory() failed to mmap");
801 anOutOfMemError->Reraise (message);
802 }
803
804 // record map handle in the beginning
805 aMBlock[0] = hMap;
806
807 // and shift to the beginning of usable area
808 aBlock = (Standard_Size*)(aMBlock+1);
809
810 // save actually allocated size into argument
811 Size = AlignedSize - sizeof(HANDLE);
812#endif
813 }
814 // else just allocate by malloc or calloc
815 else {
816 aBlock = (Standard_Size *) (myClear ? calloc(Size,sizeof(char)) : malloc(Size));
817 // check the result
818 if ( ! aBlock )
819 {
820 // as a last resort, try freeing some memory by calling Purge()
821 if ( Purge(Standard_False) )
822 goto retry;
823 // if nothing helps, raise exception
824 anOutOfMemError->Reraise ("Standard_MMgrOpt::Allocate(): malloc failed");
825 }
826 }
827 // clear whole block if clearing option is set
828 if (myClear)
829 memset (aBlock, 0, Size);
830 return aBlock;
831}
832
833//=======================================================================
834//function : FreeMemory
835//purpose :
836//=======================================================================
837
838void Standard_MMgrOpt::FreeMemory (Standard_Address aBlock,
839 const Standard_Size
840#ifndef WNT
841 aSize
842#endif
843 )
844{
845 // release memory (either free or unmap)
846 if ( myMMap ) {
847#ifndef WNT
848 // align size to page size, just the same as in AllocMemory()
849 const Standard_Size AlignedSize = PAGE_ALIGN(aSize, myPageSize);
850 munmap((char*)aBlock, AlignedSize);
851#else
852 // recover handle to the memory mapping stored just before the block
853 const HANDLE * aMBlock = (const HANDLE *)aBlock;
854 HANDLE hMap = *(--aMBlock);
855 UnmapViewOfFile((LPCVOID)aMBlock);
856 CloseHandle (hMap);
857#endif
858 }
859 else
860 free(aBlock);
861}