40 #if defined(_SC_PAGE_SIZE)
41 #define MHD_SC_PAGESIZE _SC_PAGE_SIZE
42 #elif defined(_SC_PAGESIZE)
43 #define MHD_SC_PAGESIZE _SC_PAGESIZE
48 #if defined(MAP_ANON) && ! defined(MAP_ANONYMOUS)
49 #define MAP_ANONYMOUS MAP_ANON
52 #define MAP_FAILED NULL
53 #elif ! defined(MAP_FAILED)
54 #define MAP_FAILED ((void*) -1)
60 #define ALIGN_SIZE (2 * sizeof(void*))
65 #define ROUND_TO_ALIGN(n) (((n) + (ALIGN_SIZE - 1)) \
66 / (ALIGN_SIZE) *(ALIGN_SIZE))
68 #if defined(PAGE_SIZE) && (0 < (PAGE_SIZE + 0))
69 #define MHD_DEF_PAGE_SIZE_ PAGE_SIZE
70 #elif defined(PAGESIZE) && (0 < (PAGESIZE + 0))
71 #define MHD_DEF_PAGE_SIZE_ PAGESIZE
73 #define MHD_DEF_PAGE_SIZE_ (4096)
87 #ifdef MHD_SC_PAGESIZE
89 result = sysconf (MHD_SC_PAGESIZE);
148 struct MemoryPool *pool;
153 pool = malloc (
sizeof (
struct MemoryPool));
156 #if defined(MAP_ANONYMOUS) || defined(_WIN32)
157 if ( (max <= 32 * 1024) ||
167 #if defined(MAP_ANONYMOUS) && ! defined(_WIN32)
168 pool->memory = mmap (
NULL,
170 PROT_READ | PROT_WRITE,
171 MAP_PRIVATE | MAP_ANONYMOUS,
174 #elif defined(_WIN32)
175 pool->memory = VirtualAlloc (
NULL,
177 MEM_COMMIT | MEM_RESERVE,
187 pool->memory = malloc (alloc_size);
188 if (
NULL == pool->memory)
193 pool->is_mmap =
false;
195 #if defined(MAP_ANONYMOUS) || defined(_WIN32)
198 pool->is_mmap =
true;
203 pool->end = alloc_size;
204 pool->size = alloc_size;
222 mhd_assert (pool->size >= pool->end - pool->pos);
226 #if defined(MAP_ANONYMOUS) && ! defined(_WIN32)
227 munmap (pool->memory,
229 #elif defined(_WIN32)
230 VirtualFree (pool->memory,
250 mhd_assert (pool->size >= pool->end - pool->pos);
251 return (pool->end - pool->pos);
275 mhd_assert (pool->size >= pool->end - pool->pos);
277 if ( (0 == asize) && (0 != size) )
279 if ( (pool->pos + asize > pool->end) ||
280 (pool->pos + asize < pool->pos))
284 ret = &pool->memory[pool->end - asize];
289 ret = &pool->memory[pool->pos];
323 mhd_assert (pool->size >= pool->end - pool->pos);
326 mhd_assert (old ==
NULL || pool->memory + pool->size >= (uint8_t*) old
330 pool->memory + pool->pos > (uint8_t*) old);
334 const size_t old_offset = (uint8_t*) old - pool->memory;
335 const bool shrinking = (old_size > new_size);
339 memset ((uint8_t*) old + new_size, 0, old_size - new_size);
346 if ( (new_apos > pool->end) ||
347 (new_apos < pool->pos) )
351 pool->pos = new_apos;
359 if ( ( (0 == asize) &&
361 (asize > pool->end - pool->pos) )
364 new_blc = pool->memory + pool->pos;
370 memcpy (new_blc, old, old_size);
372 memset (old, 0, old_size);
398 mhd_assert (pool->size >= pool->end - pool->pos);
402 mhd_assert (keep ==
NULL || pool->memory + pool->size >= (uint8_t*) keep
404 if ( (
NULL != keep) &&
405 (keep != pool->memory) )
408 memmove (pool->memory,
413 if (pool->size > copy_bytes)
417 to_zero = pool->size - copy_bytes;
422 uint8_t *recommit_addr;
425 recommit_addr = pool->memory + pool->size - to_recommit;
429 if (VirtualFree (recommit_addr,
433 to_zero -= to_recommit;
435 if (recommit_addr != VirtualAlloc (recommit_addr,
443 memset (&pool->memory[copy_bytes],
448 pool->end = pool->size;