b311480e |
1 | // Created on: 2005-03-15 |
2 | // Created by: Peter KURNEV |
973c2be1 |
3 | // Copyright (c) 2005-2014 OPEN CASCADE SAS |
b311480e |
4 | // |
973c2be1 |
5 | // This file is part of Open CASCADE Technology software library. |
b311480e |
6 | // |
d5f74e42 |
7 | // This library is free software; you can redistribute it and/or modify it under |
8 | // the terms of the GNU Lesser General Public License version 2.1 as published |
973c2be1 |
9 | // by the Free Software Foundation, with special exception defined in the file |
10 | // OCCT_LGPL_EXCEPTION.txt. Consult the file LICENSE_LGPL_21.txt included in OCCT |
11 | // distribution for complete text of the license and disclaimer of any warranty. |
b311480e |
12 | // |
973c2be1 |
13 | // Alternatively, this file may be used under the terms of Open CASCADE |
14 | // commercial license or contractual agreement. |
7fd59977 |
15 | |
742cc8b0 |
16 | #ifdef _WIN32 |
17 | #include <windows.h> |
18 | #endif |
19 | |
7fd59977 |
20 | #include <Standard_MMgrOpt.hxx> |
21 | #include <Standard_OutOfMemory.hxx> |
8b381bc3 |
22 | #include <Standard_Assert.hxx> |
7fd59977 |
23 | |
24 | #include <stdio.h> |
d8d01f6e |
25 | #include <errno.h> |
7fd59977 |
26 | |
742cc8b0 |
27 | #ifndef _WIN32 |
03155c18 |
28 | # include <sys/mman.h> /* mmap() */ |
7fd59977 |
29 | #endif |
03155c18 |
30 | |
7fd59977 |
31 | #include <fcntl.h> |
32 | // |
33 | #if defined (__sun) || defined(SOLARIS) |
34 | extern "C" int getpagesize() ; |
35 | #endif |
36 | |
742cc8b0 |
37 | #ifdef _WIN32 |
5fecc495 |
38 | #include <strsafe.h> |
742cc8b0 |
39 | #endif |
7fd59977 |
40 | //====================================================================== |
41 | // Assumptions |
42 | //====================================================================== |
43 | |
44 | // This implementation makes a number of assumptions regarding size of |
45 | // types: |
46 | // |
47 | // sizeof(Standard_Size) == sizeof(Standard_Address==void*) |
48 | // |
49 | // On WNT, sizeof(HANDLE) is equal of multiple of sizeof(Standard_Size) |
50 | |
51 | //====================================================================== |
52 | // Naming conventions |
53 | //====================================================================== |
54 | |
55 | // For clarity of implementation, the following conventions are used |
56 | // for naming variables: |
57 | // |
58 | // ...Size: size in bytes |
59 | // |
60 | // RoundSize, RSize etc.: size in bytes, rounded according to allocation granularity |
61 | // |
62 | // ...SizeN: size counted in number of items of sizeof(Standard_Size) bytes each |
63 | // |
64 | // ...Storage: address of the user area of the memory block (Standard_Address) |
65 | // |
66 | // ...Block: address of the hole memory block (header) (Standard_Size*) |
67 | |
68 | //====================================================================== |
69 | // Macro definitions |
70 | //====================================================================== |
71 | |
72 | // |
73 | // MMAP_BASE_ADDRESS, MMAP_FLAGS |
74 | #if defined (__hpux) || defined(HPUX) |
75 | #define MMAP_BASE_ADDRESS 0x80000000 |
76 | #define MMAP_FLAGS (MAP_ANONYMOUS | MAP_PRIVATE | MAP_VARIABLE) |
77 | #elif defined (__osf__) || defined(DECOSF1) |
78 | #define MMAP_BASE_ADDRESS 0x1000000000 |
79 | #define MMAP_FLAGS (MAP_ANONYMOUS | MAP_PRIVATE | MAP_VARIABLE) |
80 | #elif defined(_AIX) |
81 | #define MMAP_BASE_ADDRESS 0x80000000 |
82 | #define MMAP_FLAGS (MAP_ANONYMOUS | MAP_PRIVATE | MAP_VARIABLE) |
83 | #elif defined(__APPLE__) |
84 | #define MMAP_BASE_ADDRESS 0x80000000 |
85 | #define MMAP_FLAGS (MAP_ANON | MAP_PRIVATE) |
57c28b61 |
86 | #elif defined(__linux__) |
7fd59977 |
87 | #define MMAP_BASE_ADDRESS 0x20000000 |
88 | #define MMAP_FLAGS (MAP_PRIVATE) |
57c28b61 |
89 | #elif defined(_WIN32) |
7fd59977 |
90 | //static HANDLE myhMap; |
91 | #else |
92 | #define MMAP_BASE_ADDRESS 0x60000000 |
93 | #define MMAP_FLAGS (MAP_PRIVATE) |
94 | #endif |
95 | |
96 | // Round size up to the specified page size |
97 | #define PAGE_ALIGN(size,thePageSize) \ |
98 | (((size) + (thePageSize) - 1) & ~((thePageSize) - 1)) |
99 | |
100 | // Round size up to 4, 8, or 16 bytes |
101 | // Note that 0 yields 0 |
102 | #define ROUNDUP16(size) (((size) + 0xf) & ~(Standard_Size)0xf) |
103 | #define ROUNDUP8(size) (((size) + 0x7) & ~(Standard_Size)0x7) |
104 | #define ROUNDUP4(size) (((size) + 0x3) & ~(Standard_Size)0x3) |
105 | #define ROUNDDOWN8(size) ((size) & ~(Standard_Size)0x7) |
106 | |
107 | // The following two macros define granularity of memory allocation, |
108 | // by rounding size to the size of the allocation cell, |
109 | // and obtaining cell index from rounded size. |
110 | // Note that granularity shall be not less than sizeof(Standard_Size) |
111 | |
112 | // Traditional implementation: granularity 16 bytes |
113 | //#define ROUNDUP_CELL(size) ROUNDUP16(size) |
114 | //#define INDEX_CELL(rsize) ((rsize) >> 4) |
115 | |
116 | // Reduced granularity: 8 bytes |
117 | #define ROUNDUP_CELL(size) ROUNDUP8(size) |
118 | #define ROUNDDOWN_CELL(size) ROUNDDOWN8(size) |
119 | #define INDEX_CELL(rsize) ((rsize) >> 3) |
120 | |
7fd59977 |
121 | /* In the allocated block, first bytes are used for storing of memory manager's data. |
122 | (size of block). The minimal size of these data is sizeof(int). |
123 | The memory allocated in system usually alligned by 16 bytes.Tthe aligment of the |
124 | data area in the memory block is shfted on BLOCK_SHIFT*sizeof(Standard_Size) |
125 | bytes. |
126 | It is OK for WNT, SUN and Linux systems, but on SGI aligment should be 8 bytes. |
127 | So, BLOCK_SHIFT is formed as macro for support on other possible platforms. |
128 | */ |
129 | |
130 | #if defined(IRIX) || defined(SOLARIS) |
131 | #define BLOCK_SHIFT 2 |
132 | #else |
133 | #define BLOCK_SHIFT 1 |
134 | #endif |
135 | |
136 | // Get address of user area from block address, and vice-versa |
137 | #define GET_USER(block) (((Standard_Size*)(block)) + BLOCK_SHIFT) |
138 | #define GET_BLOCK(storage) (((Standard_Size*)(storage))-BLOCK_SHIFT) |
139 | |
7fd59977 |
140 | //======================================================================= |
141 | //function : Standard_MMgr |
142 | //purpose : |
143 | //======================================================================= |
144 | |
145 | Standard_MMgrOpt::Standard_MMgrOpt(const Standard_Boolean aClear, |
146 | const Standard_Boolean aMMap, |
147 | const Standard_Size aCellSize, |
148 | const Standard_Integer aNbPages, |
bd0c22ce |
149 | const Standard_Size aThreshold) |
7fd59977 |
150 | { |
151 | // check basic assumption |
8b381bc3 |
152 | Standard_STATIC_ASSERT(sizeof(Standard_Size) == sizeof(Standard_Address)); |
7fd59977 |
153 | |
154 | // clear buffer fields |
155 | myFreeListMax = 0; |
156 | myFreeList = NULL; |
157 | myPageSize = 0; |
158 | myAllocList = NULL; |
159 | myNextAddr = NULL; |
160 | myEndBlock = NULL; |
161 | |
162 | // initialize parameters |
163 | myClear = aClear; |
164 | myMMap = (Standard_Integer)aMMap; |
165 | myCellSize = aCellSize; |
166 | myNbPages = aNbPages; |
167 | myThreshold = aThreshold; |
7fd59977 |
168 | |
169 | // initialize |
170 | Initialize(); |
171 | } |
172 | |
173 | //======================================================================= |
174 | //function : ~Standard_MMgrOpt |
175 | //purpose : |
176 | //======================================================================= |
177 | |
178 | Standard_MMgrOpt::~Standard_MMgrOpt() |
179 | { |
180 | Purge(Standard_True); |
181 | free(myFreeList); |
182 | |
183 | // NOTE: freeing pools may be dangerous if not all memory taken by |
184 | // this instance of the memory manager has been freed |
185 | FreePools(); |
186 | } |
187 | |
188 | // interface level |
189 | |
190 | //======================================================================= |
191 | //function : Initialize |
192 | //purpose : |
193 | //======================================================================= |
194 | |
195 | void Standard_MMgrOpt::Initialize() |
196 | { |
197 | // check number of pages in small blocks pools |
198 | if ( myNbPages < 100 ) |
199 | myNbPages = 1000; |
200 | |
201 | // get system-dependent page size |
57c28b61 |
202 | #ifndef _WIN32 |
7fd59977 |
203 | myPageSize = getpagesize(); |
204 | if ( ! myPageSize ) |
205 | myMMap = 0; |
206 | #else |
207 | SYSTEM_INFO SystemInfo; |
208 | GetSystemInfo (&SystemInfo); |
209 | myPageSize = SystemInfo.dwPageSize; |
210 | #endif |
211 | |
212 | // initialize memory mapped files |
213 | if(myMMap) { |
214 | #if defined (__sgi) || defined(IRIX) |
215 | /* Probleme de conflit en la zone des malloc et la zone des mmap sur SGI */ |
216 | /* Ce probleme a ete identifie en IRIX 5.3 jusqu'en IRIX 6.2. Le probleme */ |
217 | /* ne semble pas apparaitre en IRIX 6.4 */ |
218 | /* Les malloc successifs donnent des adresses croissantes (a partir de 0x0x10000000) */ |
219 | /* ce que l'on appelle le pointeur de BREAK */ |
220 | /* Le premier mmap est force a l'addresse MMAP_BASE_ADDRESS (soit 0x60000000 sur SGI) */ |
221 | /* mais les mmap suivants sont decides par le systeme (flag MAP_VARIABLE). Malheureusement */ |
222 | /* il renvoie une addresse la plus basse possible dans la zone des malloc juste au dessus */ |
223 | /* du BREAK soit 0x18640000 ce qui donne un espace d'allocation d'environ 140 Mo pour les */ |
224 | /* malloc. Sur des gros modeles on peut avoir des pointes a 680 Mo en Rev6 pour une maquette */ |
225 | /* de 2 000 000 de points. En Rev7, la meme maquette n'excedera pas 286 Mo (voir vision.for) */ |
226 | /* Pour palier ce comportement, la solution adoptee est la suivante : */ |
227 | /* Lorsque l'on entre dans alloc_startup (ici), on n'a pas encore fait de mmap. */ |
228 | /* On fait alors un malloc (d'environ 700Mo) que l'on libere de suite. Cela a pour */ |
229 | /* consequence de deplacer le BREAK tres haut. Le BREAK ne redescend jamais meme lors du free */ |
230 | /* Le mmap donnant une adresse (environ 100 Mo au dessus du BREAK) on se retrouve alors avec */ |
231 | /* le partage des zones de memoire suivant : */ |
232 | /* 700 Mo pour les malloc - 500 Mo (1,2Go - 700Mo ) pour les mmap. Avec un CLD_SD_SIZE */ |
233 | /* de 2 000 000 on atteind jamais 500 Mo de mmap, meme en chargeant des applications (qui */ |
234 | /* utilisent la zone de mmap */ |
235 | /* Ce partage des zones memoire pourra eventuellemt etre regle par une variable d'environnement */ |
236 | /* CLD_HIGH_SBRK */ |
237 | char *var; |
238 | Standard_Size high_sbrk; |
239 | |
240 | high_sbrk = 700*1024*1024; |
241 | if ( (var=getenv("CLD_HIGH_SBRK")) != NULL ) { |
242 | high_sbrk = atoi(var); |
243 | } |
244 | |
245 | var = (char*)malloc(high_sbrk); // 700 Mb |
246 | if ( var ) |
247 | free(var); |
248 | else |
249 | perror("ERR_MEMRY_FAIL"); |
250 | #endif |
251 | |
c381fda2 |
252 | #if defined(IRIX) || defined(__sgi) || defined(SOLARIS) || defined(__sun) || defined(__linux__) || defined(__FreeBSD__) || defined(__ANDROID__) |
7fd59977 |
253 | if ((myMMap = open ("/dev/zero", O_RDWR)) < 0) { |
254 | if ((myMMap = open ("/dev/null", O_RDWR)) < 0){ |
255 | myMMap = 0; |
256 | } |
257 | } |
258 | if (!myMMap) |
259 | perror("ERR_MMAP_FAIL"); |
260 | #else |
261 | myMMap = -1; |
262 | #endif |
263 | } |
264 | |
265 | // initialize free lists |
266 | myFreeListMax = INDEX_CELL(ROUNDUP_CELL(myThreshold-BLOCK_SHIFT)); // all blocks less than myThreshold are to be recycled |
267 | myFreeList = (Standard_Size **) calloc (myFreeListMax+1, sizeof(Standard_Size *)); |
268 | myCellSize = ROUNDUP16(myCellSize); |
269 | } |
270 | |
271 | //======================================================================= |
272 | //function : SetMMgrOptCallBack |
273 | //purpose : Sets a callback function to be called on each alloc/free |
274 | //======================================================================= |
275 | |
276 | static Standard_MMgrOpt::TPCallBackFunc MyPCallBackFunc = NULL; |
277 | |
278 | Standard_EXPORT void Standard_MMgrOpt::SetCallBackFunction(TPCallBackFunc pFunc) |
279 | { |
280 | MyPCallBackFunc = pFunc; |
281 | } |
282 | |
283 | inline void callBack(const Standard_Boolean isAlloc, |
284 | const Standard_Address aStorage, |
285 | const Standard_Size aRoundSize, |
286 | const Standard_Size aSize) |
287 | { |
288 | if (MyPCallBackFunc) |
289 | (*MyPCallBackFunc)(isAlloc, aStorage, aRoundSize, aSize); |
290 | } |
291 | |
292 | //======================================================================= |
293 | //function : Allocate |
294 | //purpose : |
295 | //======================================================================= |
296 | |
297 | Standard_Address Standard_MMgrOpt::Allocate(const Standard_Size aSize) |
298 | { |
299 | Standard_Size * aStorage = NULL; |
300 | |
301 | // round up size according to allocation granularity |
302 | // The keyword 'volatile' is only used here for GCC 64-bit compilations |
303 | // otherwise this method would crash in runtime in optimized build. |
304 | volatile Standard_Size RoundSize = ROUNDUP_CELL(aSize); |
305 | const Standard_Size Index = INDEX_CELL(RoundSize); |
306 | |
307 | // blocks of small and medium size are recyclable |
308 | if ( Index <= myFreeListMax ) { |
309 | const Standard_Size RoundSizeN = RoundSize / sizeof(Standard_Size); |
310 | |
311 | // Lock access to critical data (myFreeList and other fields) by mutex. |
312 | // Note that we do not lock fields that do not change during the |
313 | // object life (such as myThreshold), and assume that calls to functions |
314 | // of standard library are already protected by their implementation. |
315 | // The unlock is called as soon as possible, for every treatment case. |
316 | // We also do not use Sentry, since in case if OCC signal or exception is |
317 | // caused by this block we will have deadlock anyway... |
bd0c22ce |
318 | myMutex.Lock(); |
7fd59977 |
319 | |
320 | // if free block of the requested size is available, return it |
321 | if ( myFreeList[Index] ) { |
322 | // the address of the next free block is stored in the header |
323 | // of the memory block; use it to update list pointer |
324 | // to point to next free block |
325 | Standard_Size* aBlock = myFreeList[Index]; |
326 | myFreeList[Index] = *(Standard_Size**)aBlock; |
327 | |
328 | // unlock the mutex |
bd0c22ce |
329 | myMutex.Unlock(); |
7fd59977 |
330 | |
331 | // record size of the allocated block in the block header and |
332 | // shift the pointer to the beginning of the user part of block |
333 | aBlock[0] = RoundSize; |
334 | aStorage = GET_USER(aBlock); |
335 | |
336 | // clear block if requested |
337 | if (myClear) |
338 | memset (aStorage, 0, RoundSize); |
339 | } |
340 | // else if block size is small allocate it in pools |
341 | else if ( RoundSize <= myCellSize ) { |
342 | // unlock the mutex for free lists |
bd0c22ce |
343 | myMutex.Unlock(); |
7fd59977 |
344 | |
345 | // and lock the specific mutex used to protect access to small blocks pools; |
346 | // note that this is done by sentry class so as to ensure unlocking in case of |
347 | // possible exception that may be thrown from AllocMemory() |
bd0c22ce |
348 | Standard_Mutex::Sentry aSentry (myMutexPools); |
7fd59977 |
349 | |
350 | // check for availability of requested space in the current pool |
351 | Standard_Size *aBlock = myNextAddr; |
352 | if ( &aBlock[ BLOCK_SHIFT+RoundSizeN] > myEndBlock ) { |
353 | // otherwise, allocate new memory pool with page-aligned size |
354 | Standard_Size Size = myPageSize * myNbPages; |
355 | aBlock = AllocMemory(Size); // note that size may be aligned by this call |
356 | |
357 | if (myEndBlock > myNextAddr) { |
358 | // put the remaining piece to the free lists |
359 | const Standard_Size aPSize = (myEndBlock - GET_USER(myNextAddr)) |
360 | * sizeof(Standard_Size); |
361 | const Standard_Size aRPSize = ROUNDDOWN_CELL(aPSize); |
362 | const Standard_Size aPIndex = INDEX_CELL(aRPSize); |
363 | if ( aPIndex > 0 && aPIndex <= myFreeListMax ) { |
bd0c22ce |
364 | myMutex.Lock(); |
7fd59977 |
365 | *(Standard_Size**)myNextAddr = myFreeList[aPIndex]; |
366 | myFreeList[aPIndex] = myNextAddr; |
bd0c22ce |
367 | myMutex.Unlock(); |
7fd59977 |
368 | } |
369 | } |
370 | |
371 | // set end pointer to the end of the new pool |
372 | myEndBlock = aBlock + Size / sizeof(Standard_Size); |
373 | // record in the first bytes of the pool the address of the previous one |
374 | *(Standard_Size**)aBlock = myAllocList; |
375 | // and make new pool current (last) |
376 | // and get pointer to the first memory block in the pool |
377 | myAllocList = aBlock; |
378 | aBlock+=BLOCK_SHIFT; |
379 | } |
380 | |
381 | // initialize header of the new block by its size |
382 | // and get the pointer to the user part of block |
383 | aBlock[0] = RoundSize; |
384 | aStorage = GET_USER(aBlock); |
385 | |
386 | // and advance pool pointer to the next free piece of pool |
387 | myNextAddr = &aStorage[RoundSizeN]; |
388 | } |
389 | // blocks of medium size are allocated directly |
390 | else { |
391 | // unlock the mutex immediately, as we do not need further to access any field |
bd0c22ce |
392 | myMutex.Unlock(); |
7fd59977 |
393 | |
394 | // we use operator ?: instead of if() since it is faster |
395 | Standard_Size *aBlock = (Standard_Size*) (myClear ? calloc( RoundSizeN+BLOCK_SHIFT, sizeof(Standard_Size)) : |
396 | malloc((RoundSizeN+BLOCK_SHIFT) * sizeof(Standard_Size)) ); |
397 | |
398 | // if allocation failed, try to free some memory by purging free lists, and retry |
399 | if ( ! aBlock ) { |
400 | if ( Purge (Standard_False) ) |
401 | aBlock = (Standard_Size*)calloc(RoundSizeN+BLOCK_SHIFT, sizeof(Standard_Size)); |
402 | // if still not succeeded, raise exception |
403 | if ( ! aBlock ) |
9775fa61 |
404 | throw Standard_OutOfMemory("Standard_MMgrOpt::Allocate(): malloc failed"); |
7fd59977 |
405 | } |
406 | |
407 | // initialize new block header by its size |
408 | // and get the pointer to the user part of block |
409 | aBlock[0] = RoundSize; |
410 | aStorage = GET_USER(aBlock); |
411 | } |
412 | } |
413 | // blocks of big size may be allocated as memory mapped files |
414 | else { |
415 | // Compute size of the block to be allocated, including header, |
416 | // Note that we use rounded size, even if this block will not be stored in |
417 | // the free list, for consistency of calls to AllocMemory() / FreeMemory() |
418 | // and calculation of index in the free list |
419 | Standard_Size AllocSize = RoundSize + sizeof(Standard_Size); |
420 | |
421 | // allocate memory |
422 | Standard_Size* aBlock = AllocMemory(AllocSize); |
423 | |
424 | // initialize new block header by its size |
425 | // and get the pointer to the user part of block. |
426 | aBlock[0] = RoundSize; |
427 | aStorage = GET_USER(aBlock); |
428 | } |
429 | |
430 | callBack(Standard_True, aStorage, RoundSize, aSize); |
431 | |
432 | return aStorage; |
433 | } |
434 | |
435 | //======================================================================= |
436 | //function : Free |
437 | //purpose : |
438 | //======================================================================= |
439 | |
547702a1 |
440 | void Standard_MMgrOpt::Free(Standard_Address theStorage) |
7fd59977 |
441 | { |
442 | // safely return if attempt to free null pointer |
443 | if ( ! theStorage ) |
444 | return; |
445 | |
446 | // get the pointer to the memory block header |
447 | Standard_Size* aBlock = GET_BLOCK(theStorage); |
448 | |
449 | // and get the allocated size of the block |
450 | Standard_Size RoundSize = aBlock[0]; |
451 | |
452 | callBack(Standard_False, theStorage, RoundSize, 0); |
453 | |
454 | // check whether blocks with that size are recyclable |
455 | const Standard_Size Index = INDEX_CELL(RoundSize); |
456 | if ( Index <= myFreeListMax ) { |
457 | // Lock access to critical data (myFreeList and other) by mutex |
458 | // Note that we do not lock fields that do not change during the |
459 | // object life (such as myThreshold), and assume that calls to functions |
460 | // of standard library are already protected by their implementation. |
461 | // We also do not use Sentry, since in case if OCC signal or exception is |
462 | // caused by this block we will have deadlock anyway... |
bd0c22ce |
463 | myMutex.Lock(); |
7fd59977 |
464 | |
465 | // in the memory block header, record address of the next free block |
466 | *(Standard_Size**)aBlock = myFreeList[Index]; |
467 | // add new block to be first in the list |
468 | myFreeList[Index] = aBlock; |
469 | |
bd0c22ce |
470 | myMutex.Unlock(); |
7fd59977 |
471 | } |
472 | // otherwise, we have block of big size which shall be simply released |
473 | else |
474 | FreeMemory (aBlock, RoundSize); |
7fd59977 |
475 | } |
476 | |
477 | //======================================================================= |
478 | //function : Purge |
479 | //purpose : Frees all free lists except small blocks (less than CellSize) |
480 | //======================================================================= |
481 | |
bd0c22ce |
482 | Standard_Integer Standard_MMgrOpt::Purge(Standard_Boolean ) |
7fd59977 |
483 | { |
484 | // Lock access to critical data by mutex |
bd0c22ce |
485 | Standard_Mutex::Sentry aSentry (myMutex); |
7fd59977 |
486 | |
487 | // TODO: implement support for isDeleted = True |
488 | |
489 | // free memory blocks contained in free lists |
490 | // whose sizes are greater than cellsize |
491 | Standard_Integer nbFreed = 0; |
492 | Standard_Size i = INDEX_CELL(ROUNDUP_CELL(myCellSize+BLOCK_SHIFT)); |
493 | for (; i <= myFreeListMax; i++ ) { |
494 | Standard_Size * aFree = myFreeList[i]; |
495 | while(aFree) { |
496 | Standard_Size * anOther = aFree; |
497 | aFree = * (Standard_Size **) aFree; |
498 | free(anOther); |
499 | nbFreed++; |
500 | } |
501 | myFreeList[i] = NULL; |
502 | } |
503 | |
504 | // Lock access to critical data by mutex |
bd0c22ce |
505 | Standard_Mutex::Sentry aSentry1 (myMutexPools); |
7fd59977 |
506 | |
507 | // release memory pools containing no busy memory; |
508 | // for that for each pool count the summary size of blocks |
509 | // got from the free lists allocated from this pool |
57c28b61 |
510 | #ifndef _WIN32 |
7fd59977 |
511 | const Standard_Size PoolSize = myPageSize * myNbPages; |
512 | #else |
513 | const Standard_Size PoolSize = |
514 | PAGE_ALIGN(myPageSize * myNbPages + sizeof(HANDLE), myPageSize) - |
515 | sizeof(HANDLE); |
516 | #endif |
517 | const Standard_Size RPoolSize = ROUNDDOWN_CELL(PoolSize); |
518 | const Standard_Size PoolSizeN = RPoolSize / sizeof(Standard_Size); |
519 | |
520 | // declare the table of pools; |
521 | // (we map free blocks onto a number of pools simultaneously) |
522 | static const Standard_Integer NB_POOLS_WIN = 512; |
523 | static Standard_Size* aPools[NB_POOLS_WIN]; |
524 | static Standard_Size aFreeSize[NB_POOLS_WIN]; |
525 | static Standard_Integer aFreePools[NB_POOLS_WIN]; |
526 | |
527 | Standard_Size * aNextPool = myAllocList; |
528 | Standard_Size * aPrevPool = NULL; |
529 | const Standard_Size nCells = INDEX_CELL(myCellSize); |
530 | Standard_Integer nPool = 0, nPoolFreed = 0; |
531 | |
532 | while (aNextPool) { |
533 | // fill the table of pools |
534 | Standard_Integer iPool; |
535 | for (iPool = 0; aNextPool && iPool < NB_POOLS_WIN; iPool++) { |
536 | aPools[iPool] = aNextPool; |
537 | aFreeSize[iPool] = 0; |
538 | aNextPool = * (Standard_Size **) aNextPool; // get next pool |
539 | } |
540 | const Standard_Integer iLast = iPool - 1; |
541 | nPool += iPool; |
542 | |
543 | // scan free blocks, find corresponding pools and increment |
544 | // counters |
545 | for (i = 0; i <= nCells; i++ ) { |
546 | Standard_Size * aFree = myFreeList[i]; |
547 | Standard_Size aSize = BLOCK_SHIFT * sizeof(Standard_Size) + |
548 | ROUNDUP_CELL(1) * i; |
549 | while(aFree) { |
550 | for (iPool = 0; iPool <= iLast; iPool++) { |
551 | if (aFree >= aPools[iPool] && aFree < aPools[iPool] + PoolSizeN) { |
552 | aFreeSize[iPool] += aSize; |
553 | break; |
554 | } |
555 | } |
556 | aFree = * (Standard_Size **) aFree; // get next free block |
557 | } |
558 | } |
559 | |
560 | // scan the table and make the list of free pools |
561 | Standard_Integer iLastFree = -1; |
562 | for (iPool = 0; iPool <= iLast; iPool++) { |
563 | aFreeSize[iPool] = ROUNDUP_CELL(aFreeSize[iPool]); |
564 | if (aFreeSize[iPool] == RPoolSize) |
565 | aFreePools[++iLastFree] = iPool; |
566 | } |
567 | if (iLastFree == -1) { |
568 | // no free pools found in this table |
569 | aPrevPool = aPools[iLast]; |
570 | continue; |
571 | } |
572 | |
573 | // scan free blocks again, and remove those of them |
574 | // that belong to free pools |
575 | Standard_Integer j; |
576 | for (i = 0; i <= nCells; i++ ) { |
577 | Standard_Size * aFree = myFreeList[i]; |
578 | Standard_Size * aPrevFree = NULL; |
579 | while(aFree) { |
580 | for (j = 0; j <= iLastFree; j++) { |
581 | iPool = aFreePools[j]; |
582 | if (aFree >= aPools[iPool] && aFree < aPools[iPool] + PoolSizeN) |
583 | break; |
584 | } |
585 | if (j <= iLastFree) |
586 | { |
587 | // remove |
588 | aFree = * (Standard_Size **) aFree; |
589 | if (aPrevFree) |
590 | * (Standard_Size **) aPrevFree = aFree; // link to previous |
591 | else |
592 | myFreeList[i] = aFree; |
593 | nbFreed++; |
594 | } |
595 | else { |
596 | // skip |
597 | aPrevFree = aFree; |
598 | aFree = * (Standard_Size **) aFree; |
599 | } |
600 | } |
601 | } |
602 | |
603 | // release free pools, and reconnect remaining pools |
604 | // in the linked list |
605 | Standard_Size * aPrev = (aFreePools[0] == 0 |
606 | ? aPrevPool |
607 | : aPools[aFreePools[0] - 1]); |
608 | for (j = 0; j <= iLastFree; j++) { |
609 | iPool = aFreePools[j]; |
610 | if (j > 0) { |
611 | // update the pointer to the previous non-free pool |
612 | if (iPool - aFreePools[j - 1] > 1) |
613 | aPrev = aPools[iPool - 1]; |
614 | } |
615 | if (j == iLastFree || aFreePools[j + 1] - iPool > 1) { |
616 | // get next non-free pool |
617 | Standard_Size * aNext = |
618 | (j == iLastFree && aFreePools[j] == iLast) |
619 | ? aNextPool |
620 | : aPools[iPool + 1]; |
621 | // and connect it to the list of pools that have been processed |
622 | // and remain non-free |
623 | if (aPrev) |
624 | * (Standard_Size **) aPrev = aNext; |
625 | else |
626 | myAllocList = aNext; |
627 | } |
628 | FreeMemory(aPools[iPool], PoolSize); |
629 | } |
630 | // update the pointer to the previous non-free pool |
631 | aPrevPool = (aFreePools[iLastFree] == iLast |
632 | ? aPrev |
633 | : aPools[iLast]); |
634 | nPoolFreed += iLastFree + 1; |
635 | } |
636 | |
637 | return nbFreed; |
638 | } |
639 | |
640 | //======================================================================= |
641 | //function : FreePools |
642 | //purpose : Frees all memory pools allocated for small blocks |
643 | //======================================================================= |
644 | |
645 | void Standard_MMgrOpt::FreePools() |
646 | { |
647 | // Lock access to critical data by mutex |
bd0c22ce |
648 | Standard_Mutex::Sentry aSentry (myMutexPools); |
7fd59977 |
649 | |
650 | // last pool is remembered in myAllocList |
651 | Standard_Size * aFree = myAllocList; |
652 | myAllocList = 0; |
653 | while (aFree) { |
654 | Standard_Size * aBlock = aFree; |
655 | // next pool address is stored in first 8 bytes of each pool |
656 | aFree = * (Standard_Size **) aFree; |
657 | // free pool (note that its size is calculated rather than stored) |
658 | FreeMemory ( aBlock, myPageSize * myNbPages ); |
659 | } |
660 | } |
661 | |
662 | //======================================================================= |
663 | //function : Reallocate |
664 | //purpose : |
665 | //======================================================================= |
666 | |
547702a1 |
667 | Standard_Address Standard_MMgrOpt::Reallocate(Standard_Address theStorage, |
7fd59977 |
668 | const Standard_Size theNewSize) |
669 | { |
cf9a910a |
670 | // if theStorage == NULL, just allocate new memory block |
671 | if (!theStorage) |
672 | { |
673 | return Allocate(theNewSize); |
674 | } |
675 | |
7fd59977 |
676 | Standard_Size * aBlock = GET_BLOCK(theStorage); |
677 | Standard_Address newStorage = NULL; |
678 | |
679 | // get current size of the memory block from its header |
680 | Standard_Size OldSize = aBlock[0]; |
681 | |
682 | // if new size is less than old one, just do nothing |
683 | if (theNewSize <= OldSize) { |
684 | newStorage = theStorage; |
685 | } |
686 | // otherwise, allocate new block and copy the data to it |
687 | else { |
688 | newStorage = Allocate(theNewSize); |
689 | memcpy (newStorage, theStorage, OldSize); |
690 | Free( theStorage ); |
691 | // clear newly added part of the block |
692 | if ( myClear ) |
693 | memset(((char*)newStorage) + OldSize, 0, theNewSize-OldSize); |
694 | } |
7fd59977 |
695 | return newStorage; |
696 | } |
697 | |
698 | //======================================================================= |
699 | //function : AllocMemory |
700 | //purpose : Allocate a big block of memory using either malloc/calloc |
701 | // or memory mapped file |
702 | //======================================================================= |
703 | |
704 | Standard_Size * Standard_MMgrOpt::AllocMemory(Standard_Size &Size) |
705 | { |
706 | // goto is used as efficient method for a possibility to retry allocation |
707 | retry: |
708 | |
709 | Standard_Size * aBlock = NULL; |
710 | |
711 | // if MMap option is ON, allocate using memory mapped files |
712 | if (myMMap) { |
57c28b61 |
713 | #ifndef _WIN32 |
7fd59977 |
714 | |
715 | // align size to page size |
716 | const Standard_Size AlignedSize = PAGE_ALIGN(Size, myPageSize); |
717 | |
718 | // allocate memory |
719 | // note that on UNIX myMMap is file descriptor for /dev/null |
720 | aBlock = (Standard_Size * )mmap((char*)MMAP_BASE_ADDRESS, AlignedSize, |
721 | PROT_READ | PROT_WRITE, MMAP_FLAGS, |
722 | myMMap, 0); |
723 | if (aBlock == MAP_FAILED /* -1 */) { |
724 | int errcode = errno; |
725 | // as a last resort, try freeing some memory by calling Purge() |
726 | if ( Purge(Standard_False) ) |
727 | goto retry; |
728 | // if nothing helps, raise exception |
9775fa61 |
729 | throw Standard_OutOfMemory(strerror(errcode)); |
7fd59977 |
730 | } |
731 | |
732 | // save actually allocated size into argument |
733 | Size = AlignedSize; |
734 | |
57c28b61 |
735 | #else /* _WIN32 */ |
7fd59977 |
736 | |
737 | // align size to page size, taking into account additional space needed to |
738 | // store handle to the memory map |
739 | const Standard_Size AlignedSize = PAGE_ALIGN(Size+sizeof(HANDLE), myPageSize); |
740 | |
741 | // allocate mapped file |
742 | HANDLE hMap = CreateFileMapping(INVALID_HANDLE_VALUE, NULL, |
743 | PAGE_READWRITE, |
744 | DWORD(AlignedSize / 0x80000000), |
745 | DWORD(AlignedSize % 0x80000000), NULL); |
498ce76b |
746 | HANDLE * aMBlock = (hMap && GetLastError() != ERROR_ALREADY_EXISTS ? |
747 | (HANDLE*)MapViewOfFile(hMap,FILE_MAP_WRITE,0,0,0) : NULL); |
7fd59977 |
748 | // check for error and try allocating address space |
498ce76b |
749 | if ( ! aMBlock ) |
7fd59977 |
750 | { |
751 | // close handle if allocated |
752 | if ( hMap ) |
753 | CloseHandle(hMap); |
754 | hMap = 0; |
755 | // as a last resort, try freeing some memory by calling Purge() and retry |
756 | if ( Purge(Standard_False) ) |
757 | goto retry; |
758 | // if nothing helps, make error message and raise exception |
759 | const int BUFSIZE=1024; |
742cc8b0 |
760 | |
761 | wchar_t message[BUFSIZE]; |
762 | |
763 | if ( FormatMessageW (FORMAT_MESSAGE_FROM_SYSTEM, 0, GetLastError(), 0, |
764 | message, BUFSIZE-1, 0) <=0 ) |
765 | StringCchCopyW(message, _countof(message), L"Standard_MMgrOpt::AllocMemory() failed to mmap"); |
766 | |
767 | char messageA[BUFSIZE]; |
768 | WideCharToMultiByte(CP_UTF8, 0, message, -1, messageA, sizeof(messageA), NULL, NULL); |
9775fa61 |
769 | throw Standard_OutOfMemory(messageA); |
7fd59977 |
770 | } |
771 | |
772 | // record map handle in the beginning |
773 | aMBlock[0] = hMap; |
774 | |
775 | // and shift to the beginning of usable area |
776 | aBlock = (Standard_Size*)(aMBlock+1); |
777 | |
778 | // save actually allocated size into argument |
779 | Size = AlignedSize - sizeof(HANDLE); |
780 | #endif |
781 | } |
782 | // else just allocate by malloc or calloc |
783 | else { |
784 | aBlock = (Standard_Size *) (myClear ? calloc(Size,sizeof(char)) : malloc(Size)); |
785 | // check the result |
786 | if ( ! aBlock ) |
787 | { |
788 | // as a last resort, try freeing some memory by calling Purge() |
789 | if ( Purge(Standard_False) ) |
790 | goto retry; |
791 | // if nothing helps, raise exception |
9775fa61 |
792 | throw Standard_OutOfMemory("Standard_MMgrOpt::Allocate(): malloc failed"); |
7fd59977 |
793 | } |
794 | } |
795 | // clear whole block if clearing option is set |
796 | if (myClear) |
797 | memset (aBlock, 0, Size); |
798 | return aBlock; |
799 | } |
800 | |
801 | //======================================================================= |
802 | //function : FreeMemory |
803 | //purpose : |
804 | //======================================================================= |
805 | |
806 | void Standard_MMgrOpt::FreeMemory (Standard_Address aBlock, |
807 | const Standard_Size |
57c28b61 |
808 | #ifndef _WIN32 |
7fd59977 |
809 | aSize |
810 | #endif |
811 | ) |
812 | { |
813 | // release memory (either free or unmap) |
814 | if ( myMMap ) { |
57c28b61 |
815 | #ifndef _WIN32 |
7fd59977 |
816 | // align size to page size, just the same as in AllocMemory() |
817 | const Standard_Size AlignedSize = PAGE_ALIGN(aSize, myPageSize); |
818 | munmap((char*)aBlock, AlignedSize); |
819 | #else |
820 | // recover handle to the memory mapping stored just before the block |
821 | const HANDLE * aMBlock = (const HANDLE *)aBlock; |
822 | HANDLE hMap = *(--aMBlock); |
823 | UnmapViewOfFile((LPCVOID)aMBlock); |
824 | CloseHandle (hMap); |
825 | #endif |
826 | } |
827 | else |
828 | free(aBlock); |
829 | } |