b311480e |
1 | // Created on: 2005-03-15 |
2 | // Created by: Peter KURNEV |
973c2be1 |
3 | // Copyright (c) 2005-2014 OPEN CASCADE SAS |
b311480e |
4 | // |
973c2be1 |
5 | // This file is part of Open CASCADE Technology software library. |
b311480e |
6 | // |
d5f74e42 |
7 | // This library is free software; you can redistribute it and/or modify it under |
8 | // the terms of the GNU Lesser General Public License version 2.1 as published |
973c2be1 |
9 | // by the Free Software Foundation, with special exception defined in the file |
10 | // OCCT_LGPL_EXCEPTION.txt. Consult the file LICENSE_LGPL_21.txt included in OCCT |
11 | // distribution for complete text of the license and disclaimer of any warranty. |
b311480e |
12 | // |
973c2be1 |
13 | // Alternatively, this file may be used under the terms of Open CASCADE |
14 | // commercial license or contractual agreement. |
7fd59977 |
15 | |
16 | #include <Standard_MMgrOpt.hxx> |
17 | #include <Standard_OutOfMemory.hxx> |
8b381bc3 |
18 | #include <Standard_Assert.hxx> |
7fd59977 |
19 | |
20 | #include <stdio.h> |
21 | |
03155c18 |
22 | #ifdef _WIN32 |
23 | # include <windows.h> |
7fd59977 |
24 | #else |
03155c18 |
25 | # include <sys/mman.h> /* mmap() */ |
7fd59977 |
26 | #endif |
03155c18 |
27 | |
7fd59977 |
28 | #include <fcntl.h> |
29 | // |
30 | #if defined (__sun) || defined(SOLARIS) |
31 | extern "C" int getpagesize() ; |
32 | #endif |
33 | |
34 | //====================================================================== |
35 | // Assumptions |
36 | //====================================================================== |
37 | |
38 | // This implementation makes a number of assumptions regarding size of |
39 | // types: |
40 | // |
41 | // sizeof(Standard_Size) == sizeof(Standard_Address==void*) |
42 | // |
43 | // On WNT, sizeof(HANDLE) is equal of multiple of sizeof(Standard_Size) |
44 | |
45 | //====================================================================== |
46 | // Naming conventions |
47 | //====================================================================== |
48 | |
49 | // For clarity of implementation, the following conventions are used |
50 | // for naming variables: |
51 | // |
52 | // ...Size: size in bytes |
53 | // |
54 | // RoundSize, RSize etc.: size in bytes, rounded according to allocation granularity |
55 | // |
56 | // ...SizeN: size counted in number of items of sizeof(Standard_Size) bytes each |
57 | // |
58 | // ...Storage: address of the user area of the memory block (Standard_Address) |
59 | // |
60 | // ...Block: address of the hole memory block (header) (Standard_Size*) |
61 | |
62 | //====================================================================== |
63 | // Macro definitions |
64 | //====================================================================== |
65 | |
66 | // |
67 | // MMAP_BASE_ADDRESS, MMAP_FLAGS |
68 | #if defined (__hpux) || defined(HPUX) |
69 | #define MMAP_BASE_ADDRESS 0x80000000 |
70 | #define MMAP_FLAGS (MAP_ANONYMOUS | MAP_PRIVATE | MAP_VARIABLE) |
71 | #elif defined (__osf__) || defined(DECOSF1) |
72 | #define MMAP_BASE_ADDRESS 0x1000000000 |
73 | #define MMAP_FLAGS (MAP_ANONYMOUS | MAP_PRIVATE | MAP_VARIABLE) |
74 | #elif defined(_AIX) |
75 | #define MMAP_BASE_ADDRESS 0x80000000 |
76 | #define MMAP_FLAGS (MAP_ANONYMOUS | MAP_PRIVATE | MAP_VARIABLE) |
77 | #elif defined(__APPLE__) |
78 | #define MMAP_BASE_ADDRESS 0x80000000 |
79 | #define MMAP_FLAGS (MAP_ANON | MAP_PRIVATE) |
80 | #elif defined(LIN) |
81 | #define MMAP_BASE_ADDRESS 0x20000000 |
82 | #define MMAP_FLAGS (MAP_PRIVATE) |
83 | #elif defined(WNT) |
84 | //static HANDLE myhMap; |
85 | #else |
86 | #define MMAP_BASE_ADDRESS 0x60000000 |
87 | #define MMAP_FLAGS (MAP_PRIVATE) |
88 | #endif |
89 | |
90 | // Round size up to the specified page size |
91 | #define PAGE_ALIGN(size,thePageSize) \ |
92 | (((size) + (thePageSize) - 1) & ~((thePageSize) - 1)) |
93 | |
94 | // Round size up to 4, 8, or 16 bytes |
95 | // Note that 0 yields 0 |
96 | #define ROUNDUP16(size) (((size) + 0xf) & ~(Standard_Size)0xf) |
97 | #define ROUNDUP8(size) (((size) + 0x7) & ~(Standard_Size)0x7) |
98 | #define ROUNDUP4(size) (((size) + 0x3) & ~(Standard_Size)0x3) |
99 | #define ROUNDDOWN8(size) ((size) & ~(Standard_Size)0x7) |
100 | |
101 | // The following two macros define granularity of memory allocation, |
102 | // by rounding size to the size of the allocation cell, |
103 | // and obtaining cell index from rounded size. |
104 | // Note that granularity shall be not less than sizeof(Standard_Size) |
105 | |
106 | // Traditional implementation: granularity 16 bytes |
107 | //#define ROUNDUP_CELL(size) ROUNDUP16(size) |
108 | //#define INDEX_CELL(rsize) ((rsize) >> 4) |
109 | |
110 | // Reduced granularity: 8 bytes |
111 | #define ROUNDUP_CELL(size) ROUNDUP8(size) |
112 | #define ROUNDDOWN_CELL(size) ROUNDDOWN8(size) |
113 | #define INDEX_CELL(rsize) ((rsize) >> 3) |
114 | |
7fd59977 |
115 | /* In the allocated block, first bytes are used for storing of memory manager's data. |
116 | (size of block). The minimal size of these data is sizeof(int). |
117 | The memory allocated in system usually alligned by 16 bytes.Tthe aligment of the |
118 | data area in the memory block is shfted on BLOCK_SHIFT*sizeof(Standard_Size) |
119 | bytes. |
120 | It is OK for WNT, SUN and Linux systems, but on SGI aligment should be 8 bytes. |
121 | So, BLOCK_SHIFT is formed as macro for support on other possible platforms. |
122 | */ |
123 | |
124 | #if defined(IRIX) || defined(SOLARIS) |
125 | #define BLOCK_SHIFT 2 |
126 | #else |
127 | #define BLOCK_SHIFT 1 |
128 | #endif |
129 | |
130 | // Get address of user area from block address, and vice-versa |
131 | #define GET_USER(block) (((Standard_Size*)(block)) + BLOCK_SHIFT) |
132 | #define GET_BLOCK(storage) (((Standard_Size*)(storage))-BLOCK_SHIFT) |
133 | |
134 | // create static instance of out-of-memory exception to protect |
135 | // against possible lack of memory for its raising |
136 | static Handle(Standard_OutOfMemory) anOutOfMemError = new Standard_OutOfMemory; |
137 | |
138 | //======================================================================= |
139 | //function : Standard_MMgr |
140 | //purpose : |
141 | //======================================================================= |
142 | |
143 | Standard_MMgrOpt::Standard_MMgrOpt(const Standard_Boolean aClear, |
144 | const Standard_Boolean aMMap, |
145 | const Standard_Size aCellSize, |
146 | const Standard_Integer aNbPages, |
bd0c22ce |
147 | const Standard_Size aThreshold) |
7fd59977 |
148 | { |
149 | // check basic assumption |
8b381bc3 |
150 | Standard_STATIC_ASSERT(sizeof(Standard_Size) == sizeof(Standard_Address)); |
7fd59977 |
151 | |
152 | // clear buffer fields |
153 | myFreeListMax = 0; |
154 | myFreeList = NULL; |
155 | myPageSize = 0; |
156 | myAllocList = NULL; |
157 | myNextAddr = NULL; |
158 | myEndBlock = NULL; |
159 | |
160 | // initialize parameters |
161 | myClear = aClear; |
162 | myMMap = (Standard_Integer)aMMap; |
163 | myCellSize = aCellSize; |
164 | myNbPages = aNbPages; |
165 | myThreshold = aThreshold; |
7fd59977 |
166 | |
167 | // initialize |
168 | Initialize(); |
169 | } |
170 | |
171 | //======================================================================= |
172 | //function : ~Standard_MMgrOpt |
173 | //purpose : |
174 | //======================================================================= |
175 | |
176 | Standard_MMgrOpt::~Standard_MMgrOpt() |
177 | { |
178 | Purge(Standard_True); |
179 | free(myFreeList); |
180 | |
181 | // NOTE: freeing pools may be dangerous if not all memory taken by |
182 | // this instance of the memory manager has been freed |
183 | FreePools(); |
184 | } |
185 | |
186 | // interface level |
187 | |
188 | //======================================================================= |
189 | //function : Initialize |
190 | //purpose : |
191 | //======================================================================= |
192 | |
193 | void Standard_MMgrOpt::Initialize() |
194 | { |
195 | // check number of pages in small blocks pools |
196 | if ( myNbPages < 100 ) |
197 | myNbPages = 1000; |
198 | |
199 | // get system-dependent page size |
200 | #ifndef WNT |
201 | myPageSize = getpagesize(); |
202 | if ( ! myPageSize ) |
203 | myMMap = 0; |
204 | #else |
205 | SYSTEM_INFO SystemInfo; |
206 | GetSystemInfo (&SystemInfo); |
207 | myPageSize = SystemInfo.dwPageSize; |
208 | #endif |
209 | |
210 | // initialize memory mapped files |
211 | if(myMMap) { |
212 | #if defined (__sgi) || defined(IRIX) |
213 | /* Probleme de conflit en la zone des malloc et la zone des mmap sur SGI */ |
214 | /* Ce probleme a ete identifie en IRIX 5.3 jusqu'en IRIX 6.2. Le probleme */ |
215 | /* ne semble pas apparaitre en IRIX 6.4 */ |
216 | /* Les malloc successifs donnent des adresses croissantes (a partir de 0x0x10000000) */ |
217 | /* ce que l'on appelle le pointeur de BREAK */ |
218 | /* Le premier mmap est force a l'addresse MMAP_BASE_ADDRESS (soit 0x60000000 sur SGI) */ |
219 | /* mais les mmap suivants sont decides par le systeme (flag MAP_VARIABLE). Malheureusement */ |
220 | /* il renvoie une addresse la plus basse possible dans la zone des malloc juste au dessus */ |
221 | /* du BREAK soit 0x18640000 ce qui donne un espace d'allocation d'environ 140 Mo pour les */ |
222 | /* malloc. Sur des gros modeles on peut avoir des pointes a 680 Mo en Rev6 pour une maquette */ |
223 | /* de 2 000 000 de points. En Rev7, la meme maquette n'excedera pas 286 Mo (voir vision.for) */ |
224 | /* Pour palier ce comportement, la solution adoptee est la suivante : */ |
225 | /* Lorsque l'on entre dans alloc_startup (ici), on n'a pas encore fait de mmap. */ |
226 | /* On fait alors un malloc (d'environ 700Mo) que l'on libere de suite. Cela a pour */ |
227 | /* consequence de deplacer le BREAK tres haut. Le BREAK ne redescend jamais meme lors du free */ |
228 | /* Le mmap donnant une adresse (environ 100 Mo au dessus du BREAK) on se retrouve alors avec */ |
229 | /* le partage des zones de memoire suivant : */ |
230 | /* 700 Mo pour les malloc - 500 Mo (1,2Go - 700Mo ) pour les mmap. Avec un CLD_SD_SIZE */ |
231 | /* de 2 000 000 on atteind jamais 500 Mo de mmap, meme en chargeant des applications (qui */ |
232 | /* utilisent la zone de mmap */ |
233 | /* Ce partage des zones memoire pourra eventuellemt etre regle par une variable d'environnement */ |
234 | /* CLD_HIGH_SBRK */ |
235 | char *var; |
236 | Standard_Size high_sbrk; |
237 | |
238 | high_sbrk = 700*1024*1024; |
239 | if ( (var=getenv("CLD_HIGH_SBRK")) != NULL ) { |
240 | high_sbrk = atoi(var); |
241 | } |
242 | |
243 | var = (char*)malloc(high_sbrk); // 700 Mb |
244 | if ( var ) |
245 | free(var); |
246 | else |
247 | perror("ERR_MEMRY_FAIL"); |
248 | #endif |
249 | |
250 | #if defined(IRIX) || defined(__sgi) || defined(SOLARIS) || defined(__sun) || defined(LIN) || defined(linux) || defined(__FreeBSD__) |
251 | if ((myMMap = open ("/dev/zero", O_RDWR)) < 0) { |
252 | if ((myMMap = open ("/dev/null", O_RDWR)) < 0){ |
253 | myMMap = 0; |
254 | } |
255 | } |
256 | if (!myMMap) |
257 | perror("ERR_MMAP_FAIL"); |
258 | #else |
259 | myMMap = -1; |
260 | #endif |
261 | } |
262 | |
263 | // initialize free lists |
264 | myFreeListMax = INDEX_CELL(ROUNDUP_CELL(myThreshold-BLOCK_SHIFT)); // all blocks less than myThreshold are to be recycled |
265 | myFreeList = (Standard_Size **) calloc (myFreeListMax+1, sizeof(Standard_Size *)); |
266 | myCellSize = ROUNDUP16(myCellSize); |
267 | } |
268 | |
269 | //======================================================================= |
270 | //function : SetMMgrOptCallBack |
271 | //purpose : Sets a callback function to be called on each alloc/free |
272 | //======================================================================= |
273 | |
274 | static Standard_MMgrOpt::TPCallBackFunc MyPCallBackFunc = NULL; |
275 | |
276 | Standard_EXPORT void Standard_MMgrOpt::SetCallBackFunction(TPCallBackFunc pFunc) |
277 | { |
278 | MyPCallBackFunc = pFunc; |
279 | } |
280 | |
281 | inline void callBack(const Standard_Boolean isAlloc, |
282 | const Standard_Address aStorage, |
283 | const Standard_Size aRoundSize, |
284 | const Standard_Size aSize) |
285 | { |
286 | if (MyPCallBackFunc) |
287 | (*MyPCallBackFunc)(isAlloc, aStorage, aRoundSize, aSize); |
288 | } |
289 | |
290 | //======================================================================= |
291 | //function : Allocate |
292 | //purpose : |
293 | //======================================================================= |
294 | |
295 | Standard_Address Standard_MMgrOpt::Allocate(const Standard_Size aSize) |
296 | { |
297 | Standard_Size * aStorage = NULL; |
298 | |
299 | // round up size according to allocation granularity |
300 | // The keyword 'volatile' is only used here for GCC 64-bit compilations |
301 | // otherwise this method would crash in runtime in optimized build. |
302 | volatile Standard_Size RoundSize = ROUNDUP_CELL(aSize); |
303 | const Standard_Size Index = INDEX_CELL(RoundSize); |
304 | |
305 | // blocks of small and medium size are recyclable |
306 | if ( Index <= myFreeListMax ) { |
307 | const Standard_Size RoundSizeN = RoundSize / sizeof(Standard_Size); |
308 | |
309 | // Lock access to critical data (myFreeList and other fields) by mutex. |
310 | // Note that we do not lock fields that do not change during the |
311 | // object life (such as myThreshold), and assume that calls to functions |
312 | // of standard library are already protected by their implementation. |
313 | // The unlock is called as soon as possible, for every treatment case. |
314 | // We also do not use Sentry, since in case if OCC signal or exception is |
315 | // caused by this block we will have deadlock anyway... |
bd0c22ce |
316 | myMutex.Lock(); |
7fd59977 |
317 | |
318 | // if free block of the requested size is available, return it |
319 | if ( myFreeList[Index] ) { |
320 | // the address of the next free block is stored in the header |
321 | // of the memory block; use it to update list pointer |
322 | // to point to next free block |
323 | Standard_Size* aBlock = myFreeList[Index]; |
324 | myFreeList[Index] = *(Standard_Size**)aBlock; |
325 | |
326 | // unlock the mutex |
bd0c22ce |
327 | myMutex.Unlock(); |
7fd59977 |
328 | |
329 | // record size of the allocated block in the block header and |
330 | // shift the pointer to the beginning of the user part of block |
331 | aBlock[0] = RoundSize; |
332 | aStorage = GET_USER(aBlock); |
333 | |
334 | // clear block if requested |
335 | if (myClear) |
336 | memset (aStorage, 0, RoundSize); |
337 | } |
338 | // else if block size is small allocate it in pools |
339 | else if ( RoundSize <= myCellSize ) { |
340 | // unlock the mutex for free lists |
bd0c22ce |
341 | myMutex.Unlock(); |
7fd59977 |
342 | |
343 | // and lock the specific mutex used to protect access to small blocks pools; |
344 | // note that this is done by sentry class so as to ensure unlocking in case of |
345 | // possible exception that may be thrown from AllocMemory() |
bd0c22ce |
346 | Standard_Mutex::Sentry aSentry (myMutexPools); |
7fd59977 |
347 | |
348 | // check for availability of requested space in the current pool |
349 | Standard_Size *aBlock = myNextAddr; |
350 | if ( &aBlock[ BLOCK_SHIFT+RoundSizeN] > myEndBlock ) { |
351 | // otherwise, allocate new memory pool with page-aligned size |
352 | Standard_Size Size = myPageSize * myNbPages; |
353 | aBlock = AllocMemory(Size); // note that size may be aligned by this call |
354 | |
355 | if (myEndBlock > myNextAddr) { |
356 | // put the remaining piece to the free lists |
357 | const Standard_Size aPSize = (myEndBlock - GET_USER(myNextAddr)) |
358 | * sizeof(Standard_Size); |
359 | const Standard_Size aRPSize = ROUNDDOWN_CELL(aPSize); |
360 | const Standard_Size aPIndex = INDEX_CELL(aRPSize); |
361 | if ( aPIndex > 0 && aPIndex <= myFreeListMax ) { |
bd0c22ce |
362 | myMutex.Lock(); |
7fd59977 |
363 | *(Standard_Size**)myNextAddr = myFreeList[aPIndex]; |
364 | myFreeList[aPIndex] = myNextAddr; |
bd0c22ce |
365 | myMutex.Unlock(); |
7fd59977 |
366 | } |
367 | } |
368 | |
369 | // set end pointer to the end of the new pool |
370 | myEndBlock = aBlock + Size / sizeof(Standard_Size); |
371 | // record in the first bytes of the pool the address of the previous one |
372 | *(Standard_Size**)aBlock = myAllocList; |
373 | // and make new pool current (last) |
374 | // and get pointer to the first memory block in the pool |
375 | myAllocList = aBlock; |
376 | aBlock+=BLOCK_SHIFT; |
377 | } |
378 | |
379 | // initialize header of the new block by its size |
380 | // and get the pointer to the user part of block |
381 | aBlock[0] = RoundSize; |
382 | aStorage = GET_USER(aBlock); |
383 | |
384 | // and advance pool pointer to the next free piece of pool |
385 | myNextAddr = &aStorage[RoundSizeN]; |
386 | } |
387 | // blocks of medium size are allocated directly |
388 | else { |
389 | // unlock the mutex immediately, as we do not need further to access any field |
bd0c22ce |
390 | myMutex.Unlock(); |
7fd59977 |
391 | |
392 | // we use operator ?: instead of if() since it is faster |
393 | Standard_Size *aBlock = (Standard_Size*) (myClear ? calloc( RoundSizeN+BLOCK_SHIFT, sizeof(Standard_Size)) : |
394 | malloc((RoundSizeN+BLOCK_SHIFT) * sizeof(Standard_Size)) ); |
395 | |
396 | // if allocation failed, try to free some memory by purging free lists, and retry |
397 | if ( ! aBlock ) { |
398 | if ( Purge (Standard_False) ) |
399 | aBlock = (Standard_Size*)calloc(RoundSizeN+BLOCK_SHIFT, sizeof(Standard_Size)); |
400 | // if still not succeeded, raise exception |
401 | if ( ! aBlock ) |
402 | anOutOfMemError->Reraise ("Standard_MMgrOpt::Allocate(): malloc failed"); |
403 | } |
404 | |
405 | // initialize new block header by its size |
406 | // and get the pointer to the user part of block |
407 | aBlock[0] = RoundSize; |
408 | aStorage = GET_USER(aBlock); |
409 | } |
410 | } |
411 | // blocks of big size may be allocated as memory mapped files |
412 | else { |
413 | // Compute size of the block to be allocated, including header, |
414 | // Note that we use rounded size, even if this block will not be stored in |
415 | // the free list, for consistency of calls to AllocMemory() / FreeMemory() |
416 | // and calculation of index in the free list |
417 | Standard_Size AllocSize = RoundSize + sizeof(Standard_Size); |
418 | |
419 | // allocate memory |
420 | Standard_Size* aBlock = AllocMemory(AllocSize); |
421 | |
422 | // initialize new block header by its size |
423 | // and get the pointer to the user part of block. |
424 | aBlock[0] = RoundSize; |
425 | aStorage = GET_USER(aBlock); |
426 | } |
427 | |
428 | callBack(Standard_True, aStorage, RoundSize, aSize); |
429 | |
430 | return aStorage; |
431 | } |
432 | |
433 | //======================================================================= |
434 | //function : Free |
435 | //purpose : |
436 | //======================================================================= |
437 | |
547702a1 |
438 | void Standard_MMgrOpt::Free(Standard_Address theStorage) |
7fd59977 |
439 | { |
440 | // safely return if attempt to free null pointer |
441 | if ( ! theStorage ) |
442 | return; |
443 | |
444 | // get the pointer to the memory block header |
445 | Standard_Size* aBlock = GET_BLOCK(theStorage); |
446 | |
447 | // and get the allocated size of the block |
448 | Standard_Size RoundSize = aBlock[0]; |
449 | |
450 | callBack(Standard_False, theStorage, RoundSize, 0); |
451 | |
452 | // check whether blocks with that size are recyclable |
453 | const Standard_Size Index = INDEX_CELL(RoundSize); |
454 | if ( Index <= myFreeListMax ) { |
455 | // Lock access to critical data (myFreeList and other) by mutex |
456 | // Note that we do not lock fields that do not change during the |
457 | // object life (such as myThreshold), and assume that calls to functions |
458 | // of standard library are already protected by their implementation. |
459 | // We also do not use Sentry, since in case if OCC signal or exception is |
460 | // caused by this block we will have deadlock anyway... |
bd0c22ce |
461 | myMutex.Lock(); |
7fd59977 |
462 | |
463 | // in the memory block header, record address of the next free block |
464 | *(Standard_Size**)aBlock = myFreeList[Index]; |
465 | // add new block to be first in the list |
466 | myFreeList[Index] = aBlock; |
467 | |
bd0c22ce |
468 | myMutex.Unlock(); |
7fd59977 |
469 | } |
470 | // otherwise, we have block of big size which shall be simply released |
471 | else |
472 | FreeMemory (aBlock, RoundSize); |
7fd59977 |
473 | } |
474 | |
475 | //======================================================================= |
476 | //function : Purge |
477 | //purpose : Frees all free lists except small blocks (less than CellSize) |
478 | //======================================================================= |
479 | |
bd0c22ce |
480 | Standard_Integer Standard_MMgrOpt::Purge(Standard_Boolean ) |
7fd59977 |
481 | { |
482 | // Lock access to critical data by mutex |
bd0c22ce |
483 | Standard_Mutex::Sentry aSentry (myMutex); |
7fd59977 |
484 | |
485 | // TODO: implement support for isDeleted = True |
486 | |
487 | // free memory blocks contained in free lists |
488 | // whose sizes are greater than cellsize |
489 | Standard_Integer nbFreed = 0; |
490 | Standard_Size i = INDEX_CELL(ROUNDUP_CELL(myCellSize+BLOCK_SHIFT)); |
491 | for (; i <= myFreeListMax; i++ ) { |
492 | Standard_Size * aFree = myFreeList[i]; |
493 | while(aFree) { |
494 | Standard_Size * anOther = aFree; |
495 | aFree = * (Standard_Size **) aFree; |
496 | free(anOther); |
497 | nbFreed++; |
498 | } |
499 | myFreeList[i] = NULL; |
500 | } |
501 | |
502 | // Lock access to critical data by mutex |
bd0c22ce |
503 | Standard_Mutex::Sentry aSentry1 (myMutexPools); |
7fd59977 |
504 | |
505 | // release memory pools containing no busy memory; |
506 | // for that for each pool count the summary size of blocks |
507 | // got from the free lists allocated from this pool |
508 | #ifndef WNT |
509 | const Standard_Size PoolSize = myPageSize * myNbPages; |
510 | #else |
511 | const Standard_Size PoolSize = |
512 | PAGE_ALIGN(myPageSize * myNbPages + sizeof(HANDLE), myPageSize) - |
513 | sizeof(HANDLE); |
514 | #endif |
515 | const Standard_Size RPoolSize = ROUNDDOWN_CELL(PoolSize); |
516 | const Standard_Size PoolSizeN = RPoolSize / sizeof(Standard_Size); |
517 | |
518 | // declare the table of pools; |
519 | // (we map free blocks onto a number of pools simultaneously) |
520 | static const Standard_Integer NB_POOLS_WIN = 512; |
521 | static Standard_Size* aPools[NB_POOLS_WIN]; |
522 | static Standard_Size aFreeSize[NB_POOLS_WIN]; |
523 | static Standard_Integer aFreePools[NB_POOLS_WIN]; |
524 | |
525 | Standard_Size * aNextPool = myAllocList; |
526 | Standard_Size * aPrevPool = NULL; |
527 | const Standard_Size nCells = INDEX_CELL(myCellSize); |
528 | Standard_Integer nPool = 0, nPoolFreed = 0; |
529 | |
530 | while (aNextPool) { |
531 | // fill the table of pools |
532 | Standard_Integer iPool; |
533 | for (iPool = 0; aNextPool && iPool < NB_POOLS_WIN; iPool++) { |
534 | aPools[iPool] = aNextPool; |
535 | aFreeSize[iPool] = 0; |
536 | aNextPool = * (Standard_Size **) aNextPool; // get next pool |
537 | } |
538 | const Standard_Integer iLast = iPool - 1; |
539 | nPool += iPool; |
540 | |
541 | // scan free blocks, find corresponding pools and increment |
542 | // counters |
543 | for (i = 0; i <= nCells; i++ ) { |
544 | Standard_Size * aFree = myFreeList[i]; |
545 | Standard_Size aSize = BLOCK_SHIFT * sizeof(Standard_Size) + |
546 | ROUNDUP_CELL(1) * i; |
547 | while(aFree) { |
548 | for (iPool = 0; iPool <= iLast; iPool++) { |
549 | if (aFree >= aPools[iPool] && aFree < aPools[iPool] + PoolSizeN) { |
550 | aFreeSize[iPool] += aSize; |
551 | break; |
552 | } |
553 | } |
554 | aFree = * (Standard_Size **) aFree; // get next free block |
555 | } |
556 | } |
557 | |
558 | // scan the table and make the list of free pools |
559 | Standard_Integer iLastFree = -1; |
560 | for (iPool = 0; iPool <= iLast; iPool++) { |
561 | aFreeSize[iPool] = ROUNDUP_CELL(aFreeSize[iPool]); |
562 | if (aFreeSize[iPool] == RPoolSize) |
563 | aFreePools[++iLastFree] = iPool; |
564 | } |
565 | if (iLastFree == -1) { |
566 | // no free pools found in this table |
567 | aPrevPool = aPools[iLast]; |
568 | continue; |
569 | } |
570 | |
571 | // scan free blocks again, and remove those of them |
572 | // that belong to free pools |
573 | Standard_Integer j; |
574 | for (i = 0; i <= nCells; i++ ) { |
575 | Standard_Size * aFree = myFreeList[i]; |
576 | Standard_Size * aPrevFree = NULL; |
577 | while(aFree) { |
578 | for (j = 0; j <= iLastFree; j++) { |
579 | iPool = aFreePools[j]; |
580 | if (aFree >= aPools[iPool] && aFree < aPools[iPool] + PoolSizeN) |
581 | break; |
582 | } |
583 | if (j <= iLastFree) |
584 | { |
585 | // remove |
586 | aFree = * (Standard_Size **) aFree; |
587 | if (aPrevFree) |
588 | * (Standard_Size **) aPrevFree = aFree; // link to previous |
589 | else |
590 | myFreeList[i] = aFree; |
591 | nbFreed++; |
592 | } |
593 | else { |
594 | // skip |
595 | aPrevFree = aFree; |
596 | aFree = * (Standard_Size **) aFree; |
597 | } |
598 | } |
599 | } |
600 | |
601 | // release free pools, and reconnect remaining pools |
602 | // in the linked list |
603 | Standard_Size * aPrev = (aFreePools[0] == 0 |
604 | ? aPrevPool |
605 | : aPools[aFreePools[0] - 1]); |
606 | for (j = 0; j <= iLastFree; j++) { |
607 | iPool = aFreePools[j]; |
608 | if (j > 0) { |
609 | // update the pointer to the previous non-free pool |
610 | if (iPool - aFreePools[j - 1] > 1) |
611 | aPrev = aPools[iPool - 1]; |
612 | } |
613 | if (j == iLastFree || aFreePools[j + 1] - iPool > 1) { |
614 | // get next non-free pool |
615 | Standard_Size * aNext = |
616 | (j == iLastFree && aFreePools[j] == iLast) |
617 | ? aNextPool |
618 | : aPools[iPool + 1]; |
619 | // and connect it to the list of pools that have been processed |
620 | // and remain non-free |
621 | if (aPrev) |
622 | * (Standard_Size **) aPrev = aNext; |
623 | else |
624 | myAllocList = aNext; |
625 | } |
626 | FreeMemory(aPools[iPool], PoolSize); |
627 | } |
628 | // update the pointer to the previous non-free pool |
629 | aPrevPool = (aFreePools[iLastFree] == iLast |
630 | ? aPrev |
631 | : aPools[iLast]); |
632 | nPoolFreed += iLastFree + 1; |
633 | } |
634 | |
635 | return nbFreed; |
636 | } |
637 | |
638 | //======================================================================= |
639 | //function : FreePools |
640 | //purpose : Frees all memory pools allocated for small blocks |
641 | //======================================================================= |
642 | |
643 | void Standard_MMgrOpt::FreePools() |
644 | { |
645 | // Lock access to critical data by mutex |
bd0c22ce |
646 | Standard_Mutex::Sentry aSentry (myMutexPools); |
7fd59977 |
647 | |
648 | // last pool is remembered in myAllocList |
649 | Standard_Size * aFree = myAllocList; |
650 | myAllocList = 0; |
651 | while (aFree) { |
652 | Standard_Size * aBlock = aFree; |
653 | // next pool address is stored in first 8 bytes of each pool |
654 | aFree = * (Standard_Size **) aFree; |
655 | // free pool (note that its size is calculated rather than stored) |
656 | FreeMemory ( aBlock, myPageSize * myNbPages ); |
657 | } |
658 | } |
659 | |
660 | //======================================================================= |
661 | //function : Reallocate |
662 | //purpose : |
663 | //======================================================================= |
664 | |
547702a1 |
665 | Standard_Address Standard_MMgrOpt::Reallocate(Standard_Address theStorage, |
7fd59977 |
666 | const Standard_Size theNewSize) |
667 | { |
cf9a910a |
668 | // if theStorage == NULL, just allocate new memory block |
669 | if (!theStorage) |
670 | { |
671 | return Allocate(theNewSize); |
672 | } |
673 | |
7fd59977 |
674 | Standard_Size * aBlock = GET_BLOCK(theStorage); |
675 | Standard_Address newStorage = NULL; |
676 | |
677 | // get current size of the memory block from its header |
678 | Standard_Size OldSize = aBlock[0]; |
679 | |
680 | // if new size is less than old one, just do nothing |
681 | if (theNewSize <= OldSize) { |
682 | newStorage = theStorage; |
683 | } |
684 | // otherwise, allocate new block and copy the data to it |
685 | else { |
686 | newStorage = Allocate(theNewSize); |
687 | memcpy (newStorage, theStorage, OldSize); |
688 | Free( theStorage ); |
689 | // clear newly added part of the block |
690 | if ( myClear ) |
691 | memset(((char*)newStorage) + OldSize, 0, theNewSize-OldSize); |
692 | } |
7fd59977 |
693 | return newStorage; |
694 | } |
695 | |
696 | //======================================================================= |
697 | //function : AllocMemory |
698 | //purpose : Allocate a big block of memory using either malloc/calloc |
699 | // or memory mapped file |
700 | //======================================================================= |
701 | |
702 | Standard_Size * Standard_MMgrOpt::AllocMemory(Standard_Size &Size) |
703 | { |
704 | // goto is used as efficient method for a possibility to retry allocation |
705 | retry: |
706 | |
707 | Standard_Size * aBlock = NULL; |
708 | |
709 | // if MMap option is ON, allocate using memory mapped files |
710 | if (myMMap) { |
711 | #ifndef WNT |
712 | |
713 | // align size to page size |
714 | const Standard_Size AlignedSize = PAGE_ALIGN(Size, myPageSize); |
715 | |
716 | // allocate memory |
717 | // note that on UNIX myMMap is file descriptor for /dev/null |
718 | aBlock = (Standard_Size * )mmap((char*)MMAP_BASE_ADDRESS, AlignedSize, |
719 | PROT_READ | PROT_WRITE, MMAP_FLAGS, |
720 | myMMap, 0); |
721 | if (aBlock == MAP_FAILED /* -1 */) { |
722 | int errcode = errno; |
723 | // as a last resort, try freeing some memory by calling Purge() |
724 | if ( Purge(Standard_False) ) |
725 | goto retry; |
726 | // if nothing helps, raise exception |
727 | anOutOfMemError->Reraise (strerror(errcode)); |
728 | } |
729 | |
730 | // save actually allocated size into argument |
731 | Size = AlignedSize; |
732 | |
733 | #else /* WNT */ |
734 | |
735 | // align size to page size, taking into account additional space needed to |
736 | // store handle to the memory map |
737 | const Standard_Size AlignedSize = PAGE_ALIGN(Size+sizeof(HANDLE), myPageSize); |
738 | |
739 | // allocate mapped file |
740 | HANDLE hMap = CreateFileMapping(INVALID_HANDLE_VALUE, NULL, |
741 | PAGE_READWRITE, |
742 | DWORD(AlignedSize / 0x80000000), |
743 | DWORD(AlignedSize % 0x80000000), NULL); |
498ce76b |
744 | HANDLE * aMBlock = (hMap && GetLastError() != ERROR_ALREADY_EXISTS ? |
745 | (HANDLE*)MapViewOfFile(hMap,FILE_MAP_WRITE,0,0,0) : NULL); |
7fd59977 |
746 | // check for error and try allocating address space |
498ce76b |
747 | if ( ! aMBlock ) |
7fd59977 |
748 | { |
749 | // close handle if allocated |
750 | if ( hMap ) |
751 | CloseHandle(hMap); |
752 | hMap = 0; |
753 | // as a last resort, try freeing some memory by calling Purge() and retry |
754 | if ( Purge(Standard_False) ) |
755 | goto retry; |
756 | // if nothing helps, make error message and raise exception |
757 | const int BUFSIZE=1024; |
758 | char message[BUFSIZE]; |
759 | if ( FormatMessage (FORMAT_MESSAGE_FROM_SYSTEM, 0, GetLastError(), 0, message, BUFSIZE-1, 0) <=0 ) |
760 | strcpy (message, "Standard_MMgrOpt::AllocMemory() failed to mmap"); |
761 | anOutOfMemError->Reraise (message); |
762 | } |
763 | |
764 | // record map handle in the beginning |
765 | aMBlock[0] = hMap; |
766 | |
767 | // and shift to the beginning of usable area |
768 | aBlock = (Standard_Size*)(aMBlock+1); |
769 | |
770 | // save actually allocated size into argument |
771 | Size = AlignedSize - sizeof(HANDLE); |
772 | #endif |
773 | } |
774 | // else just allocate by malloc or calloc |
775 | else { |
776 | aBlock = (Standard_Size *) (myClear ? calloc(Size,sizeof(char)) : malloc(Size)); |
777 | // check the result |
778 | if ( ! aBlock ) |
779 | { |
780 | // as a last resort, try freeing some memory by calling Purge() |
781 | if ( Purge(Standard_False) ) |
782 | goto retry; |
783 | // if nothing helps, raise exception |
784 | anOutOfMemError->Reraise ("Standard_MMgrOpt::Allocate(): malloc failed"); |
785 | } |
786 | } |
787 | // clear whole block if clearing option is set |
788 | if (myClear) |
789 | memset (aBlock, 0, Size); |
790 | return aBlock; |
791 | } |
792 | |
793 | //======================================================================= |
794 | //function : FreeMemory |
795 | //purpose : |
796 | //======================================================================= |
797 | |
798 | void Standard_MMgrOpt::FreeMemory (Standard_Address aBlock, |
799 | const Standard_Size |
800 | #ifndef WNT |
801 | aSize |
802 | #endif |
803 | ) |
804 | { |
805 | // release memory (either free or unmap) |
806 | if ( myMMap ) { |
807 | #ifndef WNT |
808 | // align size to page size, just the same as in AllocMemory() |
809 | const Standard_Size AlignedSize = PAGE_ALIGN(aSize, myPageSize); |
810 | munmap((char*)aBlock, AlignedSize); |
811 | #else |
812 | // recover handle to the memory mapping stored just before the block |
813 | const HANDLE * aMBlock = (const HANDLE *)aBlock; |
814 | HANDLE hMap = *(--aMBlock); |
815 | UnmapViewOfFile((LPCVOID)aMBlock); |
816 | CloseHandle (hMap); |
817 | #endif |
818 | } |
819 | else |
820 | free(aBlock); |
821 | } |