b311480e |
1 | // Created on: 2005-03-15 |
2 | // Created by: Peter KURNEV |
973c2be1 |
3 | // Copyright (c) 2005-2014 OPEN CASCADE SAS |
b311480e |
4 | // |
973c2be1 |
5 | // This file is part of Open CASCADE Technology software library. |
b311480e |
6 | // |
d5f74e42 |
7 | // This library is free software; you can redistribute it and/or modify it under |
8 | // the terms of the GNU Lesser General Public License version 2.1 as published |
973c2be1 |
9 | // by the Free Software Foundation, with special exception defined in the file |
10 | // OCCT_LGPL_EXCEPTION.txt. Consult the file LICENSE_LGPL_21.txt included in OCCT |
11 | // distribution for complete text of the license and disclaimer of any warranty. |
b311480e |
12 | // |
973c2be1 |
13 | // Alternatively, this file may be used under the terms of Open CASCADE |
14 | // commercial license or contractual agreement. |
7fd59977 |
15 | |
16 | #include <Standard_MMgrOpt.hxx> |
17 | #include <Standard_OutOfMemory.hxx> |
8b381bc3 |
18 | #include <Standard_Assert.hxx> |
7fd59977 |
19 | #ifdef HAVE_CONFIG_H |
20 | # include <config.h> |
21 | #endif |
22 | |
23 | #include <stdio.h> |
24 | |
25 | #ifdef HAVE_STRING_H |
26 | # include <string.h> |
27 | #endif |
28 | |
29 | #ifndef WNT |
30 | # include <stdlib.h> |
31 | # include <errno.h> |
32 | #endif |
33 | |
34 | #ifdef WNT |
35 | #include <windows.h> |
36 | #else |
37 | # ifdef HAVE_UNISTD_H |
38 | # include <unistd.h> |
39 | # endif |
40 | # ifdef HAVE_SYS_MMAN_H |
41 | # include <sys/mman.h> /* mmap() */ |
42 | # endif |
43 | #endif |
44 | #ifdef HAVE_MALLOC_H |
45 | # include <malloc.h> |
46 | #endif |
47 | #include <stdlib.h> |
48 | #include <sys/types.h> |
49 | #include <sys/stat.h> |
50 | #include <fcntl.h> |
51 | // |
52 | #if defined (__sun) || defined(SOLARIS) |
53 | extern "C" int getpagesize() ; |
54 | #endif |
55 | |
56 | //====================================================================== |
57 | // Assumptions |
58 | //====================================================================== |
59 | |
60 | // This implementation makes a number of assumptions regarding size of |
61 | // types: |
62 | // |
63 | // sizeof(Standard_Size) == sizeof(Standard_Address==void*) |
64 | // |
65 | // On WNT, sizeof(HANDLE) is equal of multiple of sizeof(Standard_Size) |
66 | |
67 | //====================================================================== |
68 | // Naming conventions |
69 | //====================================================================== |
70 | |
71 | // For clarity of implementation, the following conventions are used |
72 | // for naming variables: |
73 | // |
74 | // ...Size: size in bytes |
75 | // |
76 | // RoundSize, RSize etc.: size in bytes, rounded according to allocation granularity |
77 | // |
78 | // ...SizeN: size counted in number of items of sizeof(Standard_Size) bytes each |
79 | // |
80 | // ...Storage: address of the user area of the memory block (Standard_Address) |
81 | // |
82 | // ...Block: address of the hole memory block (header) (Standard_Size*) |
83 | |
84 | //====================================================================== |
85 | // Macro definitions |
86 | //====================================================================== |
87 | |
88 | // |
89 | // MMAP_BASE_ADDRESS, MMAP_FLAGS |
90 | #if defined (__hpux) || defined(HPUX) |
91 | #define MMAP_BASE_ADDRESS 0x80000000 |
92 | #define MMAP_FLAGS (MAP_ANONYMOUS | MAP_PRIVATE | MAP_VARIABLE) |
93 | #elif defined (__osf__) || defined(DECOSF1) |
94 | #define MMAP_BASE_ADDRESS 0x1000000000 |
95 | #define MMAP_FLAGS (MAP_ANONYMOUS | MAP_PRIVATE | MAP_VARIABLE) |
96 | #elif defined(_AIX) |
97 | #define MMAP_BASE_ADDRESS 0x80000000 |
98 | #define MMAP_FLAGS (MAP_ANONYMOUS | MAP_PRIVATE | MAP_VARIABLE) |
99 | #elif defined(__APPLE__) |
100 | #define MMAP_BASE_ADDRESS 0x80000000 |
101 | #define MMAP_FLAGS (MAP_ANON | MAP_PRIVATE) |
102 | #elif defined(LIN) |
103 | #define MMAP_BASE_ADDRESS 0x20000000 |
104 | #define MMAP_FLAGS (MAP_PRIVATE) |
105 | #elif defined(WNT) |
106 | //static HANDLE myhMap; |
107 | #else |
108 | #define MMAP_BASE_ADDRESS 0x60000000 |
109 | #define MMAP_FLAGS (MAP_PRIVATE) |
110 | #endif |
111 | |
112 | // Round size up to the specified page size |
113 | #define PAGE_ALIGN(size,thePageSize) \ |
114 | (((size) + (thePageSize) - 1) & ~((thePageSize) - 1)) |
115 | |
116 | // Round size up to 4, 8, or 16 bytes |
117 | // Note that 0 yields 0 |
118 | #define ROUNDUP16(size) (((size) + 0xf) & ~(Standard_Size)0xf) |
119 | #define ROUNDUP8(size) (((size) + 0x7) & ~(Standard_Size)0x7) |
120 | #define ROUNDUP4(size) (((size) + 0x3) & ~(Standard_Size)0x3) |
121 | #define ROUNDDOWN8(size) ((size) & ~(Standard_Size)0x7) |
122 | |
123 | // The following two macros define granularity of memory allocation, |
124 | // by rounding size to the size of the allocation cell, |
125 | // and obtaining cell index from rounded size. |
126 | // Note that granularity shall be not less than sizeof(Standard_Size) |
127 | |
128 | // Traditional implementation: granularity 16 bytes |
129 | //#define ROUNDUP_CELL(size) ROUNDUP16(size) |
130 | //#define INDEX_CELL(rsize) ((rsize) >> 4) |
131 | |
132 | // Reduced granularity: 8 bytes |
133 | #define ROUNDUP_CELL(size) ROUNDUP8(size) |
134 | #define ROUNDDOWN_CELL(size) ROUNDDOWN8(size) |
135 | #define INDEX_CELL(rsize) ((rsize) >> 3) |
136 | |
7fd59977 |
137 | /* In the allocated block, first bytes are used for storing of memory manager's data. |
138 | (size of block). The minimal size of these data is sizeof(int). |
139 | The memory allocated in system usually alligned by 16 bytes.Tthe aligment of the |
140 | data area in the memory block is shfted on BLOCK_SHIFT*sizeof(Standard_Size) |
141 | bytes. |
142 | It is OK for WNT, SUN and Linux systems, but on SGI aligment should be 8 bytes. |
143 | So, BLOCK_SHIFT is formed as macro for support on other possible platforms. |
144 | */ |
145 | |
146 | #if defined(IRIX) || defined(SOLARIS) |
147 | #define BLOCK_SHIFT 2 |
148 | #else |
149 | #define BLOCK_SHIFT 1 |
150 | #endif |
151 | |
152 | // Get address of user area from block address, and vice-versa |
153 | #define GET_USER(block) (((Standard_Size*)(block)) + BLOCK_SHIFT) |
154 | #define GET_BLOCK(storage) (((Standard_Size*)(storage))-BLOCK_SHIFT) |
155 | |
156 | // create static instance of out-of-memory exception to protect |
157 | // against possible lack of memory for its raising |
158 | static Handle(Standard_OutOfMemory) anOutOfMemError = new Standard_OutOfMemory; |
159 | |
160 | //======================================================================= |
161 | //function : Standard_MMgr |
162 | //purpose : |
163 | //======================================================================= |
164 | |
165 | Standard_MMgrOpt::Standard_MMgrOpt(const Standard_Boolean aClear, |
166 | const Standard_Boolean aMMap, |
167 | const Standard_Size aCellSize, |
168 | const Standard_Integer aNbPages, |
bd0c22ce |
169 | const Standard_Size aThreshold) |
7fd59977 |
170 | { |
171 | // check basic assumption |
8b381bc3 |
172 | Standard_STATIC_ASSERT(sizeof(Standard_Size) == sizeof(Standard_Address)); |
7fd59977 |
173 | |
174 | // clear buffer fields |
175 | myFreeListMax = 0; |
176 | myFreeList = NULL; |
177 | myPageSize = 0; |
178 | myAllocList = NULL; |
179 | myNextAddr = NULL; |
180 | myEndBlock = NULL; |
181 | |
182 | // initialize parameters |
183 | myClear = aClear; |
184 | myMMap = (Standard_Integer)aMMap; |
185 | myCellSize = aCellSize; |
186 | myNbPages = aNbPages; |
187 | myThreshold = aThreshold; |
7fd59977 |
188 | |
189 | // initialize |
190 | Initialize(); |
191 | } |
192 | |
193 | //======================================================================= |
194 | //function : ~Standard_MMgrOpt |
195 | //purpose : |
196 | //======================================================================= |
197 | |
198 | Standard_MMgrOpt::~Standard_MMgrOpt() |
199 | { |
200 | Purge(Standard_True); |
201 | free(myFreeList); |
202 | |
203 | // NOTE: freeing pools may be dangerous if not all memory taken by |
204 | // this instance of the memory manager has been freed |
205 | FreePools(); |
206 | } |
207 | |
208 | // interface level |
209 | |
210 | //======================================================================= |
211 | //function : Initialize |
212 | //purpose : |
213 | //======================================================================= |
214 | |
215 | void Standard_MMgrOpt::Initialize() |
216 | { |
217 | // check number of pages in small blocks pools |
218 | if ( myNbPages < 100 ) |
219 | myNbPages = 1000; |
220 | |
221 | // get system-dependent page size |
222 | #ifndef WNT |
223 | myPageSize = getpagesize(); |
224 | if ( ! myPageSize ) |
225 | myMMap = 0; |
226 | #else |
227 | SYSTEM_INFO SystemInfo; |
228 | GetSystemInfo (&SystemInfo); |
229 | myPageSize = SystemInfo.dwPageSize; |
230 | #endif |
231 | |
232 | // initialize memory mapped files |
233 | if(myMMap) { |
234 | #if defined (__sgi) || defined(IRIX) |
235 | /* Probleme de conflit en la zone des malloc et la zone des mmap sur SGI */ |
236 | /* Ce probleme a ete identifie en IRIX 5.3 jusqu'en IRIX 6.2. Le probleme */ |
237 | /* ne semble pas apparaitre en IRIX 6.4 */ |
238 | /* Les malloc successifs donnent des adresses croissantes (a partir de 0x0x10000000) */ |
239 | /* ce que l'on appelle le pointeur de BREAK */ |
240 | /* Le premier mmap est force a l'addresse MMAP_BASE_ADDRESS (soit 0x60000000 sur SGI) */ |
241 | /* mais les mmap suivants sont decides par le systeme (flag MAP_VARIABLE). Malheureusement */ |
242 | /* il renvoie une addresse la plus basse possible dans la zone des malloc juste au dessus */ |
243 | /* du BREAK soit 0x18640000 ce qui donne un espace d'allocation d'environ 140 Mo pour les */ |
244 | /* malloc. Sur des gros modeles on peut avoir des pointes a 680 Mo en Rev6 pour une maquette */ |
245 | /* de 2 000 000 de points. En Rev7, la meme maquette n'excedera pas 286 Mo (voir vision.for) */ |
246 | /* Pour palier ce comportement, la solution adoptee est la suivante : */ |
247 | /* Lorsque l'on entre dans alloc_startup (ici), on n'a pas encore fait de mmap. */ |
248 | /* On fait alors un malloc (d'environ 700Mo) que l'on libere de suite. Cela a pour */ |
249 | /* consequence de deplacer le BREAK tres haut. Le BREAK ne redescend jamais meme lors du free */ |
250 | /* Le mmap donnant une adresse (environ 100 Mo au dessus du BREAK) on se retrouve alors avec */ |
251 | /* le partage des zones de memoire suivant : */ |
252 | /* 700 Mo pour les malloc - 500 Mo (1,2Go - 700Mo ) pour les mmap. Avec un CLD_SD_SIZE */ |
253 | /* de 2 000 000 on atteind jamais 500 Mo de mmap, meme en chargeant des applications (qui */ |
254 | /* utilisent la zone de mmap */ |
255 | /* Ce partage des zones memoire pourra eventuellemt etre regle par une variable d'environnement */ |
256 | /* CLD_HIGH_SBRK */ |
257 | char *var; |
258 | Standard_Size high_sbrk; |
259 | |
260 | high_sbrk = 700*1024*1024; |
261 | if ( (var=getenv("CLD_HIGH_SBRK")) != NULL ) { |
262 | high_sbrk = atoi(var); |
263 | } |
264 | |
265 | var = (char*)malloc(high_sbrk); // 700 Mb |
266 | if ( var ) |
267 | free(var); |
268 | else |
269 | perror("ERR_MEMRY_FAIL"); |
270 | #endif |
271 | |
272 | #if defined(IRIX) || defined(__sgi) || defined(SOLARIS) || defined(__sun) || defined(LIN) || defined(linux) || defined(__FreeBSD__) |
273 | if ((myMMap = open ("/dev/zero", O_RDWR)) < 0) { |
274 | if ((myMMap = open ("/dev/null", O_RDWR)) < 0){ |
275 | myMMap = 0; |
276 | } |
277 | } |
278 | if (!myMMap) |
279 | perror("ERR_MMAP_FAIL"); |
280 | #else |
281 | myMMap = -1; |
282 | #endif |
283 | } |
284 | |
285 | // initialize free lists |
286 | myFreeListMax = INDEX_CELL(ROUNDUP_CELL(myThreshold-BLOCK_SHIFT)); // all blocks less than myThreshold are to be recycled |
287 | myFreeList = (Standard_Size **) calloc (myFreeListMax+1, sizeof(Standard_Size *)); |
288 | myCellSize = ROUNDUP16(myCellSize); |
289 | } |
290 | |
291 | //======================================================================= |
292 | //function : SetMMgrOptCallBack |
293 | //purpose : Sets a callback function to be called on each alloc/free |
294 | //======================================================================= |
295 | |
296 | static Standard_MMgrOpt::TPCallBackFunc MyPCallBackFunc = NULL; |
297 | |
298 | Standard_EXPORT void Standard_MMgrOpt::SetCallBackFunction(TPCallBackFunc pFunc) |
299 | { |
300 | MyPCallBackFunc = pFunc; |
301 | } |
302 | |
303 | inline void callBack(const Standard_Boolean isAlloc, |
304 | const Standard_Address aStorage, |
305 | const Standard_Size aRoundSize, |
306 | const Standard_Size aSize) |
307 | { |
308 | if (MyPCallBackFunc) |
309 | (*MyPCallBackFunc)(isAlloc, aStorage, aRoundSize, aSize); |
310 | } |
311 | |
312 | //======================================================================= |
313 | //function : Allocate |
314 | //purpose : |
315 | //======================================================================= |
316 | |
317 | Standard_Address Standard_MMgrOpt::Allocate(const Standard_Size aSize) |
318 | { |
319 | Standard_Size * aStorage = NULL; |
320 | |
321 | // round up size according to allocation granularity |
322 | // The keyword 'volatile' is only used here for GCC 64-bit compilations |
323 | // otherwise this method would crash in runtime in optimized build. |
324 | volatile Standard_Size RoundSize = ROUNDUP_CELL(aSize); |
325 | const Standard_Size Index = INDEX_CELL(RoundSize); |
326 | |
327 | // blocks of small and medium size are recyclable |
328 | if ( Index <= myFreeListMax ) { |
329 | const Standard_Size RoundSizeN = RoundSize / sizeof(Standard_Size); |
330 | |
331 | // Lock access to critical data (myFreeList and other fields) by mutex. |
332 | // Note that we do not lock fields that do not change during the |
333 | // object life (such as myThreshold), and assume that calls to functions |
334 | // of standard library are already protected by their implementation. |
335 | // The unlock is called as soon as possible, for every treatment case. |
336 | // We also do not use Sentry, since in case if OCC signal or exception is |
337 | // caused by this block we will have deadlock anyway... |
bd0c22ce |
338 | myMutex.Lock(); |
7fd59977 |
339 | |
340 | // if free block of the requested size is available, return it |
341 | if ( myFreeList[Index] ) { |
342 | // the address of the next free block is stored in the header |
343 | // of the memory block; use it to update list pointer |
344 | // to point to next free block |
345 | Standard_Size* aBlock = myFreeList[Index]; |
346 | myFreeList[Index] = *(Standard_Size**)aBlock; |
347 | |
348 | // unlock the mutex |
bd0c22ce |
349 | myMutex.Unlock(); |
7fd59977 |
350 | |
351 | // record size of the allocated block in the block header and |
352 | // shift the pointer to the beginning of the user part of block |
353 | aBlock[0] = RoundSize; |
354 | aStorage = GET_USER(aBlock); |
355 | |
356 | // clear block if requested |
357 | if (myClear) |
358 | memset (aStorage, 0, RoundSize); |
359 | } |
360 | // else if block size is small allocate it in pools |
361 | else if ( RoundSize <= myCellSize ) { |
362 | // unlock the mutex for free lists |
bd0c22ce |
363 | myMutex.Unlock(); |
7fd59977 |
364 | |
365 | // and lock the specific mutex used to protect access to small blocks pools; |
366 | // note that this is done by sentry class so as to ensure unlocking in case of |
367 | // possible exception that may be thrown from AllocMemory() |
bd0c22ce |
368 | Standard_Mutex::Sentry aSentry (myMutexPools); |
7fd59977 |
369 | |
370 | // check for availability of requested space in the current pool |
371 | Standard_Size *aBlock = myNextAddr; |
372 | if ( &aBlock[ BLOCK_SHIFT+RoundSizeN] > myEndBlock ) { |
373 | // otherwise, allocate new memory pool with page-aligned size |
374 | Standard_Size Size = myPageSize * myNbPages; |
375 | aBlock = AllocMemory(Size); // note that size may be aligned by this call |
376 | |
377 | if (myEndBlock > myNextAddr) { |
378 | // put the remaining piece to the free lists |
379 | const Standard_Size aPSize = (myEndBlock - GET_USER(myNextAddr)) |
380 | * sizeof(Standard_Size); |
381 | const Standard_Size aRPSize = ROUNDDOWN_CELL(aPSize); |
382 | const Standard_Size aPIndex = INDEX_CELL(aRPSize); |
383 | if ( aPIndex > 0 && aPIndex <= myFreeListMax ) { |
bd0c22ce |
384 | myMutex.Lock(); |
7fd59977 |
385 | *(Standard_Size**)myNextAddr = myFreeList[aPIndex]; |
386 | myFreeList[aPIndex] = myNextAddr; |
bd0c22ce |
387 | myMutex.Unlock(); |
7fd59977 |
388 | } |
389 | } |
390 | |
391 | // set end pointer to the end of the new pool |
392 | myEndBlock = aBlock + Size / sizeof(Standard_Size); |
393 | // record in the first bytes of the pool the address of the previous one |
394 | *(Standard_Size**)aBlock = myAllocList; |
395 | // and make new pool current (last) |
396 | // and get pointer to the first memory block in the pool |
397 | myAllocList = aBlock; |
398 | aBlock+=BLOCK_SHIFT; |
399 | } |
400 | |
401 | // initialize header of the new block by its size |
402 | // and get the pointer to the user part of block |
403 | aBlock[0] = RoundSize; |
404 | aStorage = GET_USER(aBlock); |
405 | |
406 | // and advance pool pointer to the next free piece of pool |
407 | myNextAddr = &aStorage[RoundSizeN]; |
408 | } |
409 | // blocks of medium size are allocated directly |
410 | else { |
411 | // unlock the mutex immediately, as we do not need further to access any field |
bd0c22ce |
412 | myMutex.Unlock(); |
7fd59977 |
413 | |
414 | // we use operator ?: instead of if() since it is faster |
415 | Standard_Size *aBlock = (Standard_Size*) (myClear ? calloc( RoundSizeN+BLOCK_SHIFT, sizeof(Standard_Size)) : |
416 | malloc((RoundSizeN+BLOCK_SHIFT) * sizeof(Standard_Size)) ); |
417 | |
418 | // if allocation failed, try to free some memory by purging free lists, and retry |
419 | if ( ! aBlock ) { |
420 | if ( Purge (Standard_False) ) |
421 | aBlock = (Standard_Size*)calloc(RoundSizeN+BLOCK_SHIFT, sizeof(Standard_Size)); |
422 | // if still not succeeded, raise exception |
423 | if ( ! aBlock ) |
424 | anOutOfMemError->Reraise ("Standard_MMgrOpt::Allocate(): malloc failed"); |
425 | } |
426 | |
427 | // initialize new block header by its size |
428 | // and get the pointer to the user part of block |
429 | aBlock[0] = RoundSize; |
430 | aStorage = GET_USER(aBlock); |
431 | } |
432 | } |
433 | // blocks of big size may be allocated as memory mapped files |
434 | else { |
435 | // Compute size of the block to be allocated, including header, |
436 | // Note that we use rounded size, even if this block will not be stored in |
437 | // the free list, for consistency of calls to AllocMemory() / FreeMemory() |
438 | // and calculation of index in the free list |
439 | Standard_Size AllocSize = RoundSize + sizeof(Standard_Size); |
440 | |
441 | // allocate memory |
442 | Standard_Size* aBlock = AllocMemory(AllocSize); |
443 | |
444 | // initialize new block header by its size |
445 | // and get the pointer to the user part of block. |
446 | aBlock[0] = RoundSize; |
447 | aStorage = GET_USER(aBlock); |
448 | } |
449 | |
450 | callBack(Standard_True, aStorage, RoundSize, aSize); |
451 | |
452 | return aStorage; |
453 | } |
454 | |
455 | //======================================================================= |
456 | //function : Free |
457 | //purpose : |
458 | //======================================================================= |
459 | |
547702a1 |
460 | void Standard_MMgrOpt::Free(Standard_Address theStorage) |
7fd59977 |
461 | { |
462 | // safely return if attempt to free null pointer |
463 | if ( ! theStorage ) |
464 | return; |
465 | |
466 | // get the pointer to the memory block header |
467 | Standard_Size* aBlock = GET_BLOCK(theStorage); |
468 | |
469 | // and get the allocated size of the block |
470 | Standard_Size RoundSize = aBlock[0]; |
471 | |
472 | callBack(Standard_False, theStorage, RoundSize, 0); |
473 | |
474 | // check whether blocks with that size are recyclable |
475 | const Standard_Size Index = INDEX_CELL(RoundSize); |
476 | if ( Index <= myFreeListMax ) { |
477 | // Lock access to critical data (myFreeList and other) by mutex |
478 | // Note that we do not lock fields that do not change during the |
479 | // object life (such as myThreshold), and assume that calls to functions |
480 | // of standard library are already protected by their implementation. |
481 | // We also do not use Sentry, since in case if OCC signal or exception is |
482 | // caused by this block we will have deadlock anyway... |
bd0c22ce |
483 | myMutex.Lock(); |
7fd59977 |
484 | |
485 | // in the memory block header, record address of the next free block |
486 | *(Standard_Size**)aBlock = myFreeList[Index]; |
487 | // add new block to be first in the list |
488 | myFreeList[Index] = aBlock; |
489 | |
bd0c22ce |
490 | myMutex.Unlock(); |
7fd59977 |
491 | } |
492 | // otherwise, we have block of big size which shall be simply released |
493 | else |
494 | FreeMemory (aBlock, RoundSize); |
7fd59977 |
495 | } |
496 | |
497 | //======================================================================= |
498 | //function : Purge |
499 | //purpose : Frees all free lists except small blocks (less than CellSize) |
500 | //======================================================================= |
501 | |
bd0c22ce |
502 | Standard_Integer Standard_MMgrOpt::Purge(Standard_Boolean ) |
7fd59977 |
503 | { |
504 | // Lock access to critical data by mutex |
bd0c22ce |
505 | Standard_Mutex::Sentry aSentry (myMutex); |
7fd59977 |
506 | |
507 | // TODO: implement support for isDeleted = True |
508 | |
509 | // free memory blocks contained in free lists |
510 | // whose sizes are greater than cellsize |
511 | Standard_Integer nbFreed = 0; |
512 | Standard_Size i = INDEX_CELL(ROUNDUP_CELL(myCellSize+BLOCK_SHIFT)); |
513 | for (; i <= myFreeListMax; i++ ) { |
514 | Standard_Size * aFree = myFreeList[i]; |
515 | while(aFree) { |
516 | Standard_Size * anOther = aFree; |
517 | aFree = * (Standard_Size **) aFree; |
518 | free(anOther); |
519 | nbFreed++; |
520 | } |
521 | myFreeList[i] = NULL; |
522 | } |
523 | |
524 | // Lock access to critical data by mutex |
bd0c22ce |
525 | Standard_Mutex::Sentry aSentry1 (myMutexPools); |
7fd59977 |
526 | |
527 | // release memory pools containing no busy memory; |
528 | // for that for each pool count the summary size of blocks |
529 | // got from the free lists allocated from this pool |
530 | #ifndef WNT |
531 | const Standard_Size PoolSize = myPageSize * myNbPages; |
532 | #else |
533 | const Standard_Size PoolSize = |
534 | PAGE_ALIGN(myPageSize * myNbPages + sizeof(HANDLE), myPageSize) - |
535 | sizeof(HANDLE); |
536 | #endif |
537 | const Standard_Size RPoolSize = ROUNDDOWN_CELL(PoolSize); |
538 | const Standard_Size PoolSizeN = RPoolSize / sizeof(Standard_Size); |
539 | |
540 | // declare the table of pools; |
541 | // (we map free blocks onto a number of pools simultaneously) |
542 | static const Standard_Integer NB_POOLS_WIN = 512; |
543 | static Standard_Size* aPools[NB_POOLS_WIN]; |
544 | static Standard_Size aFreeSize[NB_POOLS_WIN]; |
545 | static Standard_Integer aFreePools[NB_POOLS_WIN]; |
546 | |
547 | Standard_Size * aNextPool = myAllocList; |
548 | Standard_Size * aPrevPool = NULL; |
549 | const Standard_Size nCells = INDEX_CELL(myCellSize); |
550 | Standard_Integer nPool = 0, nPoolFreed = 0; |
551 | |
552 | while (aNextPool) { |
553 | // fill the table of pools |
554 | Standard_Integer iPool; |
555 | for (iPool = 0; aNextPool && iPool < NB_POOLS_WIN; iPool++) { |
556 | aPools[iPool] = aNextPool; |
557 | aFreeSize[iPool] = 0; |
558 | aNextPool = * (Standard_Size **) aNextPool; // get next pool |
559 | } |
560 | const Standard_Integer iLast = iPool - 1; |
561 | nPool += iPool; |
562 | |
563 | // scan free blocks, find corresponding pools and increment |
564 | // counters |
565 | for (i = 0; i <= nCells; i++ ) { |
566 | Standard_Size * aFree = myFreeList[i]; |
567 | Standard_Size aSize = BLOCK_SHIFT * sizeof(Standard_Size) + |
568 | ROUNDUP_CELL(1) * i; |
569 | while(aFree) { |
570 | for (iPool = 0; iPool <= iLast; iPool++) { |
571 | if (aFree >= aPools[iPool] && aFree < aPools[iPool] + PoolSizeN) { |
572 | aFreeSize[iPool] += aSize; |
573 | break; |
574 | } |
575 | } |
576 | aFree = * (Standard_Size **) aFree; // get next free block |
577 | } |
578 | } |
579 | |
580 | // scan the table and make the list of free pools |
581 | Standard_Integer iLastFree = -1; |
582 | for (iPool = 0; iPool <= iLast; iPool++) { |
583 | aFreeSize[iPool] = ROUNDUP_CELL(aFreeSize[iPool]); |
584 | if (aFreeSize[iPool] == RPoolSize) |
585 | aFreePools[++iLastFree] = iPool; |
586 | } |
587 | if (iLastFree == -1) { |
588 | // no free pools found in this table |
589 | aPrevPool = aPools[iLast]; |
590 | continue; |
591 | } |
592 | |
593 | // scan free blocks again, and remove those of them |
594 | // that belong to free pools |
595 | Standard_Integer j; |
596 | for (i = 0; i <= nCells; i++ ) { |
597 | Standard_Size * aFree = myFreeList[i]; |
598 | Standard_Size * aPrevFree = NULL; |
599 | while(aFree) { |
600 | for (j = 0; j <= iLastFree; j++) { |
601 | iPool = aFreePools[j]; |
602 | if (aFree >= aPools[iPool] && aFree < aPools[iPool] + PoolSizeN) |
603 | break; |
604 | } |
605 | if (j <= iLastFree) |
606 | { |
607 | // remove |
608 | aFree = * (Standard_Size **) aFree; |
609 | if (aPrevFree) |
610 | * (Standard_Size **) aPrevFree = aFree; // link to previous |
611 | else |
612 | myFreeList[i] = aFree; |
613 | nbFreed++; |
614 | } |
615 | else { |
616 | // skip |
617 | aPrevFree = aFree; |
618 | aFree = * (Standard_Size **) aFree; |
619 | } |
620 | } |
621 | } |
622 | |
623 | // release free pools, and reconnect remaining pools |
624 | // in the linked list |
625 | Standard_Size * aPrev = (aFreePools[0] == 0 |
626 | ? aPrevPool |
627 | : aPools[aFreePools[0] - 1]); |
628 | for (j = 0; j <= iLastFree; j++) { |
629 | iPool = aFreePools[j]; |
630 | if (j > 0) { |
631 | // update the pointer to the previous non-free pool |
632 | if (iPool - aFreePools[j - 1] > 1) |
633 | aPrev = aPools[iPool - 1]; |
634 | } |
635 | if (j == iLastFree || aFreePools[j + 1] - iPool > 1) { |
636 | // get next non-free pool |
637 | Standard_Size * aNext = |
638 | (j == iLastFree && aFreePools[j] == iLast) |
639 | ? aNextPool |
640 | : aPools[iPool + 1]; |
641 | // and connect it to the list of pools that have been processed |
642 | // and remain non-free |
643 | if (aPrev) |
644 | * (Standard_Size **) aPrev = aNext; |
645 | else |
646 | myAllocList = aNext; |
647 | } |
648 | FreeMemory(aPools[iPool], PoolSize); |
649 | } |
650 | // update the pointer to the previous non-free pool |
651 | aPrevPool = (aFreePools[iLastFree] == iLast |
652 | ? aPrev |
653 | : aPools[iLast]); |
654 | nPoolFreed += iLastFree + 1; |
655 | } |
656 | |
657 | return nbFreed; |
658 | } |
659 | |
660 | //======================================================================= |
661 | //function : FreePools |
662 | //purpose : Frees all memory pools allocated for small blocks |
663 | //======================================================================= |
664 | |
665 | void Standard_MMgrOpt::FreePools() |
666 | { |
667 | // Lock access to critical data by mutex |
bd0c22ce |
668 | Standard_Mutex::Sentry aSentry (myMutexPools); |
7fd59977 |
669 | |
670 | // last pool is remembered in myAllocList |
671 | Standard_Size * aFree = myAllocList; |
672 | myAllocList = 0; |
673 | while (aFree) { |
674 | Standard_Size * aBlock = aFree; |
675 | // next pool address is stored in first 8 bytes of each pool |
676 | aFree = * (Standard_Size **) aFree; |
677 | // free pool (note that its size is calculated rather than stored) |
678 | FreeMemory ( aBlock, myPageSize * myNbPages ); |
679 | } |
680 | } |
681 | |
682 | //======================================================================= |
683 | //function : Reallocate |
684 | //purpose : |
685 | //======================================================================= |
686 | |
547702a1 |
687 | Standard_Address Standard_MMgrOpt::Reallocate(Standard_Address theStorage, |
7fd59977 |
688 | const Standard_Size theNewSize) |
689 | { |
cf9a910a |
690 | // if theStorage == NULL, just allocate new memory block |
691 | if (!theStorage) |
692 | { |
693 | return Allocate(theNewSize); |
694 | } |
695 | |
7fd59977 |
696 | Standard_Size * aBlock = GET_BLOCK(theStorage); |
697 | Standard_Address newStorage = NULL; |
698 | |
699 | // get current size of the memory block from its header |
700 | Standard_Size OldSize = aBlock[0]; |
701 | |
702 | // if new size is less than old one, just do nothing |
703 | if (theNewSize <= OldSize) { |
704 | newStorage = theStorage; |
705 | } |
706 | // otherwise, allocate new block and copy the data to it |
707 | else { |
708 | newStorage = Allocate(theNewSize); |
709 | memcpy (newStorage, theStorage, OldSize); |
710 | Free( theStorage ); |
711 | // clear newly added part of the block |
712 | if ( myClear ) |
713 | memset(((char*)newStorage) + OldSize, 0, theNewSize-OldSize); |
714 | } |
7fd59977 |
715 | return newStorage; |
716 | } |
717 | |
718 | //======================================================================= |
719 | //function : AllocMemory |
720 | //purpose : Allocate a big block of memory using either malloc/calloc |
721 | // or memory mapped file |
722 | //======================================================================= |
723 | |
724 | Standard_Size * Standard_MMgrOpt::AllocMemory(Standard_Size &Size) |
725 | { |
726 | // goto is used as efficient method for a possibility to retry allocation |
727 | retry: |
728 | |
729 | Standard_Size * aBlock = NULL; |
730 | |
731 | // if MMap option is ON, allocate using memory mapped files |
732 | if (myMMap) { |
733 | #ifndef WNT |
734 | |
735 | // align size to page size |
736 | const Standard_Size AlignedSize = PAGE_ALIGN(Size, myPageSize); |
737 | |
738 | // allocate memory |
739 | // note that on UNIX myMMap is file descriptor for /dev/null |
740 | aBlock = (Standard_Size * )mmap((char*)MMAP_BASE_ADDRESS, AlignedSize, |
741 | PROT_READ | PROT_WRITE, MMAP_FLAGS, |
742 | myMMap, 0); |
743 | if (aBlock == MAP_FAILED /* -1 */) { |
744 | int errcode = errno; |
745 | // as a last resort, try freeing some memory by calling Purge() |
746 | if ( Purge(Standard_False) ) |
747 | goto retry; |
748 | // if nothing helps, raise exception |
749 | anOutOfMemError->Reraise (strerror(errcode)); |
750 | } |
751 | |
752 | // save actually allocated size into argument |
753 | Size = AlignedSize; |
754 | |
755 | #else /* WNT */ |
756 | |
757 | // align size to page size, taking into account additional space needed to |
758 | // store handle to the memory map |
759 | const Standard_Size AlignedSize = PAGE_ALIGN(Size+sizeof(HANDLE), myPageSize); |
760 | |
761 | // allocate mapped file |
762 | HANDLE hMap = CreateFileMapping(INVALID_HANDLE_VALUE, NULL, |
763 | PAGE_READWRITE, |
764 | DWORD(AlignedSize / 0x80000000), |
765 | DWORD(AlignedSize % 0x80000000), NULL); |
498ce76b |
766 | HANDLE * aMBlock = (hMap && GetLastError() != ERROR_ALREADY_EXISTS ? |
767 | (HANDLE*)MapViewOfFile(hMap,FILE_MAP_WRITE,0,0,0) : NULL); |
7fd59977 |
768 | // check for error and try allocating address space |
498ce76b |
769 | if ( ! aMBlock ) |
7fd59977 |
770 | { |
771 | // close handle if allocated |
772 | if ( hMap ) |
773 | CloseHandle(hMap); |
774 | hMap = 0; |
775 | // as a last resort, try freeing some memory by calling Purge() and retry |
776 | if ( Purge(Standard_False) ) |
777 | goto retry; |
778 | // if nothing helps, make error message and raise exception |
779 | const int BUFSIZE=1024; |
780 | char message[BUFSIZE]; |
781 | if ( FormatMessage (FORMAT_MESSAGE_FROM_SYSTEM, 0, GetLastError(), 0, message, BUFSIZE-1, 0) <=0 ) |
782 | strcpy (message, "Standard_MMgrOpt::AllocMemory() failed to mmap"); |
783 | anOutOfMemError->Reraise (message); |
784 | } |
785 | |
786 | // record map handle in the beginning |
787 | aMBlock[0] = hMap; |
788 | |
789 | // and shift to the beginning of usable area |
790 | aBlock = (Standard_Size*)(aMBlock+1); |
791 | |
792 | // save actually allocated size into argument |
793 | Size = AlignedSize - sizeof(HANDLE); |
794 | #endif |
795 | } |
796 | // else just allocate by malloc or calloc |
797 | else { |
798 | aBlock = (Standard_Size *) (myClear ? calloc(Size,sizeof(char)) : malloc(Size)); |
799 | // check the result |
800 | if ( ! aBlock ) |
801 | { |
802 | // as a last resort, try freeing some memory by calling Purge() |
803 | if ( Purge(Standard_False) ) |
804 | goto retry; |
805 | // if nothing helps, raise exception |
806 | anOutOfMemError->Reraise ("Standard_MMgrOpt::Allocate(): malloc failed"); |
807 | } |
808 | } |
809 | // clear whole block if clearing option is set |
810 | if (myClear) |
811 | memset (aBlock, 0, Size); |
812 | return aBlock; |
813 | } |
814 | |
815 | //======================================================================= |
816 | //function : FreeMemory |
817 | //purpose : |
818 | //======================================================================= |
819 | |
820 | void Standard_MMgrOpt::FreeMemory (Standard_Address aBlock, |
821 | const Standard_Size |
822 | #ifndef WNT |
823 | aSize |
824 | #endif |
825 | ) |
826 | { |
827 | // release memory (either free or unmap) |
828 | if ( myMMap ) { |
829 | #ifndef WNT |
830 | // align size to page size, just the same as in AllocMemory() |
831 | const Standard_Size AlignedSize = PAGE_ALIGN(aSize, myPageSize); |
832 | munmap((char*)aBlock, AlignedSize); |
833 | #else |
834 | // recover handle to the memory mapping stored just before the block |
835 | const HANDLE * aMBlock = (const HANDLE *)aBlock; |
836 | HANDLE hMap = *(--aMBlock); |
837 | UnmapViewOfFile((LPCVOID)aMBlock); |
838 | CloseHandle (hMap); |
839 | #endif |
840 | } |
841 | else |
842 | free(aBlock); |
843 | } |