1 /*************************************************
2 * Exim - an Internet mail transport agent *
3 *************************************************/
5 /* Copyright (c) University of Cambridge 1995 - 2018 */
6 /* Copyright (c) The Exim maintainers 2019 - 2020 */
7 /* See the file NOTICE for conditions of use and distribution. */
9 /* Exim gets and frees all its store through these functions. In the original
10 implementation there was a lot of mallocing and freeing of small bits of store.
11 The philosophy has now changed to a scheme which includes the concept of
12 "stacking pools" of store. For the short-lived processes, there isn't any real
13 need to do any garbage collection, but the stack concept allows quick resetting
14 in places where this seems sensible.
16 Obviously the long-running processes (the daemon, the queue runner, and eximon)
17 must take care not to eat store.
19 The following different types of store are recognized:
21 . Long-lived, large blocks: This is implemented by retaining the original
22 malloc/free functions, and it used for permanent working buffers and for
23 getting blocks to cut up for the other types.
25 . Long-lived, small blocks: This is used for blocks that have to survive until
26 the process exits. It is implemented as a stacking pool (POOL_PERM). This is
27 functionally the same as store_malloc(), except that the store can't be
28 freed, but I expect it to be more efficient for handling small blocks.
30 . Short-lived, short blocks: Most of the dynamic store falls into this
31 category. It is implemented as a stacking pool (POOL_MAIN) which is reset
32 after accepting a message when multiple messages are received by a single
33 process. Resetting happens at some other times as well, usually fairly
34 locally after some specific processing that needs working store.
36 . There is a separate pool (POOL_SEARCH) that is used only for lookup storage.
37 This means it can be freed when search_tidyup() is called to close down all
40 - There is another pool (POOL_MESSAGE) used for medium-lifetime objects; within
41 a single message transaction but needed for longer than the use of the main
42 pool permits. Currently this means only receive-time DKIM information.
44 . Orthogonal to the three pool types, there are two classes of memory: untainted
45 and tainted. The latter is used for values derived from untrusted input, and
46 the string-expansion mechanism refuses to operate on such values (obviously,
47 it can expand an untainted value to return a tainted result). The classes
48 are implemented by duplicating the four pool types. Pool resets are requested
49 against the nontainted sibling and apply to both siblings.
51 Only memory blocks requested for tainted use are regarded as tainted; anything
52 else (including stack auto variables) is untainted. Care is needed when coding
53 to not copy untrusted data into untainted memory, as downstream taint-checks
56 Intermediate layers (eg. the string functions) can test for taint, and use this
57 for ensurinng that results have proper state. For example the
58 string_vformat_trc() routing supporting the string_sprintf() interface will
59 recopy a string being built into a tainted allocation if it meets a %s for a
60 tainted argument. Any intermediate-layer function that (can) return a new
61 allocation should behave this way; returning a tainted result if any tainted
62 content is used. Intermediate-layer functions (eg. Ustrncpy) that modify
63 existing allocations fail if tainted data is written into an untainted area.
64 Users of functions that modify existing allocations should check if a tainted
65 source and an untainted destination is used, and fail instead (sprintf() being
71 /* keep config.h before memcheck.h, for NVALGRIND */
78 /* We need to know how to align blocks of data for general use. I'm not sure
79 how to get an alignment factor in general. In the current world, a value of 8
80 is probably right, and this is sizeof(double) on some systems and sizeof(void
81 *) on others, so take the larger of those. Since everything in this expression
82 is a constant, the compiler should optimize it to a simple constant wherever it
83 appears (I checked that gcc does do this). */
86 (sizeof(void *) > sizeof(double) ? sizeof(void *) : sizeof(double))
88 /* store_reset() will not free the following block if the last used block has
89 less than this much left in it. */
91 #define STOREPOOL_MIN_SIZE 256
93 /* Structure describing the beginning of each big block. */
95 typedef struct storeblock {
96 struct storeblock *next;
100 /* Just in case we find ourselves on a system where the structure above has a
101 length that is not a multiple of the alignment, set up a macro for the padded
104 #define ALIGNED_SIZEOF_STOREBLOCK \
105 (((sizeof(storeblock) + alignment - 1) / alignment) * alignment)
107 /* Size of block to get from malloc to carve up into smaller ones. This
108 must be a multiple of the alignment. We assume that 4096 is going to be
109 suitably aligned. Double the size per-pool for every malloc, to mitigate
110 certain denial-of-service attacks. Don't bother to decrease on block frees.
111 We waste average half the current alloc size per pool. This could be several
112 hundred kB now, vs. 4kB with a constant-size block size. But the search time
113 for is_tainted(), linear in the number of blocks for the pool, is O(n log n)
115 A test of 2000 RCPTs and just accept ACL had 370kB in 21 blocks before,
116 504kB in 6 blocks now, for the untainted-main (largest) pool.
117 Builds for restricted-memory system can disable the expansion by
118 defining RESTRICTED_MEMORY */
119 /*XXX should we allow any for malloc's own overhead? But how much? */
121 /* #define RESTRICTED_MEMORY */
122 #define STORE_BLOCK_SIZE(order) ((1U << (order)) - ALIGNED_SIZEOF_STOREBLOCK)
124 /* Variables holding data for the local pools of store. The current pool number
125 is held in store_pool, which is global so that it can be changed from outside.
126 Setting the initial length values to -1 forces a malloc for the first call,
127 even if the length is zero (which is used for getting a point to reset to). */
129 int store_pool = POOL_MAIN;
131 static storeblock *chainbase[NPOOLS];
132 static storeblock *current_block[NPOOLS];
133 static void *next_yield[NPOOLS];
134 static int yield_length[NPOOLS];
135 static unsigned store_block_order[NPOOLS];
137 /* pool_malloc holds the amount of memory used by the store pools; this goes up
138 and down as store is reset or released. nonpool_malloc is the total got by
139 malloc from other calls; this doesn't go down because it is just freed by
142 static int pool_malloc;
143 static int nonpool_malloc;
145 /* This variable is set by store_get() to its yield, and by store_reset() to
146 NULL. This enables string_cat() to optimize its store handling for very long
147 strings. That's why the variable is global. */
149 void *store_last_get[NPOOLS];
151 /* These are purely for stats-gathering */
153 static int nbytes[NPOOLS]; /* current bytes allocated */
154 static int maxbytes[NPOOLS]; /* max number reached */
155 static int nblocks[NPOOLS]; /* current number of blocks allocated */
156 static int maxblocks[NPOOLS];
157 static unsigned maxorder[NPOOLS];
158 static int n_nonpool_blocks; /* current number of direct store_malloc() blocks */
159 static int max_nonpool_blocks;
160 static int max_pool_malloc; /* max value for pool_malloc */
161 static int max_nonpool_malloc; /* max value for nonpool_malloc */
164 #ifndef COMPILE_UTILITY
165 static const uschar * pooluse[NPOOLS] = {
166 [POOL_MAIN] = US"main",
167 [POOL_PERM] = US"perm",
168 [POOL_SEARCH] = US"search",
169 [POOL_MESSAGE] = US"message",
170 [POOL_TAINT_MAIN] = US"main",
171 [POOL_TAINT_PERM] = US"perm",
172 [POOL_TAINT_SEARCH] = US"search",
173 [POOL_TAINT_SEARCH] = US"search",
174 [POOL_TAINT_MESSAGE] = US"message",
176 static const uschar * poolclass[NPOOLS] = {
177 [POOL_MAIN] = US"untainted",
178 [POOL_PERM] = US"untainted",
179 [POOL_SEARCH] = US"untainted",
180 [POOL_MESSAGE] = US"untainted",
181 [POOL_TAINT_MAIN] = US"tainted",
182 [POOL_TAINT_PERM] = US"tainted",
183 [POOL_TAINT_SEARCH] = US"tainted",
184 [POOL_TAINT_MESSAGE] = US"tainted",
189 static void * internal_store_malloc(int, const char *, int);
190 static void internal_store_free(void *, const char *, int linenumber);
192 /******************************************************************************/
193 /* Initialisation, for things fragile with parameter channges when using
194 static initialisers. */
199 for (int i = 0; i < NPOOLS; i++)
201 yield_length[i] = -1;
202 store_block_order[i] = 12; /* log2(allocation_size) ie. 4kB */
206 /******************************************************************************/
208 /* Test if a pointer refers to tainted memory.
210 Slower version check, for use when platform intermixes malloc and mmap area
211 addresses. Test against the current-block of all tainted pools first, then all
212 blocks of all tainted pools.
214 Return: TRUE iff tainted
218 is_tainted_fn(const void * p)
222 for (int pool = POOL_TAINT_BASE; pool < nelem(chainbase); pool++)
223 if ((b = current_block[pool]))
225 uschar * bc = US b + ALIGNED_SIZEOF_STOREBLOCK;
226 if (US p >= bc && US p < bc + b->length) return TRUE;
229 for (int pool = POOL_TAINT_BASE; pool < nelem(chainbase); pool++)
230 for (b = chainbase[pool]; b; b = b->next)
232 uschar * bc = US b + ALIGNED_SIZEOF_STOREBLOCK;
233 if (US p >= bc && US p < bc + b->length) return TRUE;
240 die_tainted(const uschar * msg, const uschar * func, int line)
242 log_write(0, LOG_MAIN|LOG_PANIC_DIE, "Taint mismatch, %s: %s %d\n",
248 /*************************************************
249 * Get a block from the current pool *
250 *************************************************/
252 /* Running out of store is a total disaster. This function is called via the
253 macro store_get(). It passes back a block of store within the current big
254 block, getting a new one if necessary. The address is saved in
258 size amount wanted, bytes
259 tainted class: set to true for untrusted data (eg. from smtp input)
260 func function from which called
261 linenumber line number in source file
263 Returns: pointer to store (panic on malloc failure)
267 store_get_3(int size, BOOL tainted, const char *func, int linenumber)
269 int pool = tainted ? store_pool + POOL_TAINT_BASE : store_pool;
271 /* Ensure we've been asked to allocate memory.
272 A negative size is a sign of a security problem.
273 A zero size is also suspect (but we might have to allow it if we find our API
274 expects it in some places). */
277 log_write(0, LOG_MAIN|LOG_PANIC_DIE,
278 "bad memory allocation requested (%d bytes) at %s %d",
279 size, func, linenumber);
282 /* Round up the size to a multiple of the alignment. Although this looks a
283 messy statement, because "alignment" is a constant expression, the compiler can
284 do a reasonable job of optimizing, especially if the value of "alignment" is a
285 power of two. I checked this with -O2, and gcc did very well, compiling it to 4
286 instructions on a Sparc (alignment = 8). */
288 if (size % alignment != 0) size += alignment - (size % alignment);
290 /* If there isn't room in the current block, get a new one. The minimum
291 size is STORE_BLOCK_SIZE, and we would expect this to be the norm, since
292 these functions are mostly called for small amounts of store. */
294 if (size > yield_length[pool])
297 STORE_BLOCK_SIZE(store_block_order[pool]) - ALIGNED_SIZEOF_STOREBLOCK,
299 int mlength = length + ALIGNED_SIZEOF_STOREBLOCK;
300 storeblock * newblock;
302 /* Sometimes store_reset() may leave a block for us; check if we can use it */
304 if ( (newblock = current_block[pool])
305 && (newblock = newblock->next)
306 && newblock->length < length
309 /* Give up on this block, because it's too small */
311 internal_store_free(newblock, func, linenumber);
315 /* If there was no free block, get a new one */
319 if ((nbytes[pool] += mlength) > maxbytes[pool])
320 maxbytes[pool] = nbytes[pool];
321 if ((pool_malloc += mlength) > max_pool_malloc) /* Used in pools */
322 max_pool_malloc = pool_malloc;
323 nonpool_malloc -= mlength; /* Exclude from overall total */
324 if (++nblocks[pool] > maxblocks[pool])
325 maxblocks[pool] = nblocks[pool];
327 newblock = internal_store_malloc(mlength, func, linenumber);
328 newblock->next = NULL;
329 newblock->length = length;
330 #ifndef RESTRICTED_MEMORY
331 if (store_block_order[pool]++ > maxorder[pool])
332 maxorder[pool] = store_block_order[pool];
335 if (!chainbase[pool])
336 chainbase[pool] = newblock;
338 current_block[pool]->next = newblock;
341 current_block[pool] = newblock;
342 yield_length[pool] = newblock->length;
344 (void *)(CS current_block[pool] + ALIGNED_SIZEOF_STOREBLOCK);
345 (void) VALGRIND_MAKE_MEM_NOACCESS(next_yield[pool], yield_length[pool]);
348 /* There's (now) enough room in the current block; the yield is the next
351 store_last_get[pool] = next_yield[pool];
353 /* Cut out the debugging stuff for utilities, but stop picky compilers from
356 #ifndef COMPILE_UTILITY
358 debug_printf("---%d Get %6p %5d %-14s %4d\n", pool,
359 store_last_get[pool], size, func, linenumber);
360 #endif /* COMPILE_UTILITY */
362 (void) VALGRIND_MAKE_MEM_UNDEFINED(store_last_get[pool], size);
363 /* Update next pointer and number of bytes left in the current block. */
365 next_yield[pool] = (void *)(CS next_yield[pool] + size);
366 yield_length[pool] -= size;
367 return store_last_get[pool];
372 /*************************************************
373 * Get a block from the PERM pool *
374 *************************************************/
376 /* This is just a convenience function, useful when just a single block is to
381 func function from which called
382 linenumber line number in source file
384 Returns: pointer to store (panic on malloc failure)
388 store_get_perm_3(int size, BOOL tainted, const char *func, int linenumber)
391 int old_pool = store_pool;
392 store_pool = POOL_PERM;
393 yield = store_get_3(size, tainted, func, linenumber);
394 store_pool = old_pool;
400 /*************************************************
401 * Extend a block if it is at the top *
402 *************************************************/
404 /* While reading strings of unknown length, it is often the case that the
405 string is being read into the block at the top of the stack. If it needs to be
406 extended, it is more efficient just to extend within the top block rather than
407 allocate a new block and then have to copy the data. This function is provided
408 for the use of string_cat(), but of course can be used elsewhere too.
409 The block itself is not expanded; only the top allocation from it.
412 ptr pointer to store block
413 oldsize current size of the block, as requested by user
414 newsize new size required
415 func function from which called
416 linenumber line number in source file
418 Returns: TRUE if the block is at the top of the stack and has been
419 extended; FALSE if it isn't at the top of the stack, or cannot
424 store_extend_3(void *ptr, BOOL tainted, int oldsize, int newsize,
425 const char *func, int linenumber)
427 int pool = tainted ? store_pool + POOL_TAINT_BASE : store_pool;
428 int inc = newsize - oldsize;
429 int rounded_oldsize = oldsize;
433 log_write(0, LOG_MAIN|LOG_PANIC_DIE,
434 "bad memory extension requested (%d -> %d bytes) at %s %d",
435 oldsize, newsize, func, linenumber);
438 /* Check that the block being extended was already of the required taint status;
439 refuse to extend if not. */
441 if (is_tainted(ptr) != tainted)
444 if (rounded_oldsize % alignment != 0)
445 rounded_oldsize += alignment - (rounded_oldsize % alignment);
447 if (CS ptr + rounded_oldsize != CS (next_yield[pool]) ||
448 inc > yield_length[pool] + rounded_oldsize - oldsize)
451 /* Cut out the debugging stuff for utilities, but stop picky compilers from
454 #ifndef COMPILE_UTILITY
456 debug_printf("---%d Ext %6p %5d %-14s %4d\n", pool, ptr, newsize,
458 #endif /* COMPILE_UTILITY */
460 if (newsize % alignment != 0) newsize += alignment - (newsize % alignment);
461 next_yield[pool] = CS ptr + newsize;
462 yield_length[pool] -= newsize - rounded_oldsize;
463 (void) VALGRIND_MAKE_MEM_UNDEFINED(ptr + oldsize, inc);
471 is_pwr2_size(int len)
474 return (x & (x - 1)) == 0;
478 /*************************************************
479 * Back up to a previous point on the stack *
480 *************************************************/
482 /* This function resets the next pointer, freeing any subsequent whole blocks
483 that are now unused. Call with a cookie obtained from store_mark() only; do
484 not call with a pointer returned by store_get(). Both the untainted and tainted
485 pools corresposding to store_pool are reset.
488 r place to back up to
489 func function from which called
490 linenumber line number in source file
496 internal_store_reset(void * ptr, int pool, const char *func, int linenumber)
499 storeblock * b = current_block[pool];
500 char * bc = CS b + ALIGNED_SIZEOF_STOREBLOCK;
501 int newlength, count;
502 #ifndef COMPILE_UTILITY
503 int oldmalloc = pool_malloc;
506 /* Last store operation was not a get */
508 store_last_get[pool] = NULL;
510 /* See if the place is in the current block - as it often will be. Otherwise,
511 search for the block in which it lies. */
513 if (CS ptr < bc || CS ptr > bc + b->length)
515 for (b = chainbase[pool]; b; b = b->next)
517 bc = CS b + ALIGNED_SIZEOF_STOREBLOCK;
518 if (CS ptr >= bc && CS ptr <= bc + b->length) break;
521 log_write(0, LOG_MAIN|LOG_PANIC_DIE, "internal error: store_reset(%p) "
522 "failed: pool=%d %-14s %4d", ptr, pool, func, linenumber);
525 /* Back up, rounding to the alignment if necessary. When testing, flatten
526 the released memory. */
528 newlength = bc + b->length - CS ptr;
529 #ifndef COMPILE_UTILITY
532 assert_no_variables(ptr, newlength, func, linenumber);
533 if (f.running_in_test_harness)
535 (void) VALGRIND_MAKE_MEM_DEFINED(ptr, newlength);
536 memset(ptr, 0xF0, newlength);
540 (void) VALGRIND_MAKE_MEM_NOACCESS(ptr, newlength);
541 next_yield[pool] = CS ptr + (newlength % alignment);
542 count = yield_length[pool];
543 count = (yield_length[pool] = newlength - (newlength % alignment)) - count;
544 current_block[pool] = b;
546 /* Free any subsequent block. Do NOT free the first
547 successor, if our current block has less than 256 bytes left. This should
548 prevent us from flapping memory. However, keep this block only when it has
549 a power-of-two size so probably is not a custom inflated one. */
551 if ( yield_length[pool] < STOREPOOL_MIN_SIZE
553 && is_pwr2_size(b->next->length + ALIGNED_SIZEOF_STOREBLOCK))
556 #ifndef COMPILE_UTILITY
558 assert_no_variables(b, b->length + ALIGNED_SIZEOF_STOREBLOCK,
561 (void) VALGRIND_MAKE_MEM_NOACCESS(CS b + ALIGNED_SIZEOF_STOREBLOCK,
562 b->length - ALIGNED_SIZEOF_STOREBLOCK);
570 int siz = b->length + ALIGNED_SIZEOF_STOREBLOCK;
572 #ifndef COMPILE_UTILITY
574 assert_no_variables(b, b->length + ALIGNED_SIZEOF_STOREBLOCK,
581 internal_store_free(b, func, linenumber);
583 #ifndef RESTRICTED_MEMORY
584 if (store_block_order[pool] > 13) store_block_order[pool]--;
588 /* Cut out the debugging stuff for utilities, but stop picky compilers from
591 #ifndef COMPILE_UTILITY
593 debug_printf("---%d Rst %6p %5d %-14s %4d\tpool %d\n", pool, ptr,
594 count + oldmalloc - pool_malloc,
595 func, linenumber, pool_malloc);
596 #endif /* COMPILE_UTILITY */
601 store_reset_3(rmark r, const char *func, int linenumber)
605 if (store_pool >= POOL_TAINT_BASE)
606 log_write(0, LOG_MAIN|LOG_PANIC_DIE,
607 "store_reset called for pool %d: %s %d\n", store_pool, func, linenumber);
609 log_write(0, LOG_MAIN|LOG_PANIC_DIE,
610 "store_reset called with bad mark: %s %d\n", func, linenumber);
612 internal_store_reset(*ptr, store_pool + POOL_TAINT_BASE, func, linenumber);
613 internal_store_reset(ptr, store_pool, func, linenumber);
619 /* Free tail-end unused allocation. This lets us allocate a big chunk
620 early, for cases when we only discover later how much was really needed.
622 Can be called with a value from store_get(), or an offset after such. Only
623 the tainted or untainted pool that serviced the store_get() will be affected.
625 This is mostly a cut-down version of internal_store_reset().
626 XXX needs rationalising
630 store_release_above_3(void *ptr, const char *func, int linenumber)
632 /* Search all pools' "current" blocks. If it isn't one of those,
633 ignore it (it usually will be). */
635 for (int pool = 0; pool < nelem(current_block); pool++)
637 storeblock * b = current_block[pool];
639 int count, newlength;
644 bc = CS b + ALIGNED_SIZEOF_STOREBLOCK;
645 if (CS ptr < bc || CS ptr > bc + b->length)
648 /* Last store operation was not a get */
650 store_last_get[pool] = NULL;
652 /* Back up, rounding to the alignment if necessary. When testing, flatten
653 the released memory. */
655 newlength = bc + b->length - CS ptr;
656 #ifndef COMPILE_UTILITY
659 assert_no_variables(ptr, newlength, func, linenumber);
660 if (f.running_in_test_harness)
662 (void) VALGRIND_MAKE_MEM_DEFINED(ptr, newlength);
663 memset(ptr, 0xF0, newlength);
667 (void) VALGRIND_MAKE_MEM_NOACCESS(ptr, newlength);
668 next_yield[pool] = CS ptr + (newlength % alignment);
669 count = yield_length[pool];
670 count = (yield_length[pool] = newlength - (newlength % alignment)) - count;
672 /* Cut out the debugging stuff for utilities, but stop picky compilers from
675 #ifndef COMPILE_UTILITY
677 debug_printf("---%d Rel %6p %5d %-14s %4d\tpool %d\n", pool, ptr, count,
678 func, linenumber, pool_malloc);
682 #ifndef COMPILE_UTILITY
684 debug_printf("non-last memory release try: %s %d\n", func, linenumber);
691 store_mark_3(const char *func, int linenumber)
695 #ifndef COMPILE_UTILITY
697 debug_printf("---%d Mrk %-14s %4d\tpool %d\n",
698 store_pool, func, linenumber, pool_malloc);
699 #endif /* COMPILE_UTILITY */
701 if (store_pool >= POOL_TAINT_BASE)
702 log_write(0, LOG_MAIN|LOG_PANIC_DIE,
703 "store_mark called for pool %d: %s %d\n", store_pool, func, linenumber);
705 /* Stash a mark for the tainted-twin release, in the untainted twin. Return
706 a cookie (actually the address in the untainted pool) to the caller.
707 Reset uses the cookie to recover the t-mark, winds back the tainted pool with it
708 and winds back the untainted pool with the cookie. */
710 p = store_get_3(sizeof(void *), FALSE, func, linenumber);
711 *p = store_get_3(0, TRUE, func, linenumber);
718 /************************************************
720 ************************************************/
722 /* This function checks that the pointer it is given is the first thing in a
723 block, and if so, releases that block.
726 block block of store to consider
727 func function from which called
728 linenumber line number in source file
734 store_release_3(void * block, int pool, const char * func, int linenumber)
736 /* It will never be the first block, so no need to check that. */
738 for (storeblock * b = chainbase[pool]; b; b = b->next)
740 storeblock * bb = b->next;
741 if (bb && CS block == CS bb + ALIGNED_SIZEOF_STOREBLOCK)
743 int siz = bb->length + ALIGNED_SIZEOF_STOREBLOCK;
749 /* Cut out the debugging stuff for utilities, but stop picky compilers
750 from giving warnings. */
752 #ifndef COMPILE_UTILITY
754 debug_printf("-Release %6p %-20s %4d %d\n", (void *)bb, func,
755 linenumber, pool_malloc);
757 if (f.running_in_test_harness)
758 memset(bb, 0xF0, bb->length+ALIGNED_SIZEOF_STOREBLOCK);
759 #endif /* COMPILE_UTILITY */
761 internal_store_free(bb, func, linenumber);
768 /************************************************
770 ************************************************/
772 /* Allocate a new block big enough to expend to the given size and
773 copy the current data into it. Free the old one if possible.
775 This function is specifically provided for use when reading very
776 long strings, e.g. header lines. When the string gets longer than a
777 complete block, it gets copied to a new block. It is helpful to free
778 the old block iff the previous copy of the string is at its start,
779 and therefore the only thing in it. Otherwise, for very long strings,
780 dead store can pile up somewhat disastrously. This function checks that
781 the pointer it is given is the first thing in a block, and that nothing
782 has been allocated since. If so, releases that block.
789 Returns: new location of data
793 store_newblock_3(void * block, BOOL tainted, int newsize, int len,
794 const char * func, int linenumber)
796 int pool = tainted ? store_pool + POOL_TAINT_BASE : store_pool;
797 BOOL release_ok = !tainted && store_last_get[pool] == block;
800 #if !defined(MACRO_PREDEF) && !defined(COMPILE_UTILITY)
801 if (is_tainted(block) != tainted)
802 die_tainted(US"store_newblock", CUS func, linenumber);
805 newtext = store_get(newsize, tainted);
806 memcpy(newtext, block, len);
807 if (release_ok) store_release_3(block, pool, func, linenumber);
808 return (void *)newtext;
814 /*************************************************
816 *************************************************/
818 /* Running out of store is a total disaster for exim. Some malloc functions
819 do not run happily on very small sizes, nor do they document this fact. This
820 function is called via the macro store_malloc().
823 size amount of store wanted
824 func function from which called
825 line line number in source file
827 Returns: pointer to gotten store (panic on failure)
831 internal_store_malloc(int size, const char *func, int line)
835 size += sizeof(int); /* space to store the size, used under debug */
836 if (size < 16) size = 16;
838 if (!(yield = malloc((size_t)size)))
839 log_write(0, LOG_MAIN|LOG_PANIC_DIE, "failed to malloc %d bytes of memory: "
840 "called from line %d in %s", size, line, func);
842 #ifndef COMPILE_UTILITY
843 DEBUG(D_any) *(int *)yield = size;
845 yield = US yield + sizeof(int);
847 if ((nonpool_malloc += size) > max_nonpool_malloc)
848 max_nonpool_malloc = nonpool_malloc;
850 /* Cut out the debugging stuff for utilities, but stop picky compilers from
853 #ifndef COMPILE_UTILITY
854 /* If running in test harness, spend time making sure all the new store
855 is not filled with zeros so as to catch problems. */
857 if (f.running_in_test_harness)
858 memset(yield, 0xF0, (size_t)size - sizeof(int));
859 DEBUG(D_memory) debug_printf("--Malloc %6p %5d bytes\t%-20s %4d\tpool %5d nonpool %5d\n",
860 yield, size, func, line, pool_malloc, nonpool_malloc);
861 #endif /* COMPILE_UTILITY */
867 store_malloc_3(int size, const char *func, int linenumber)
869 if (n_nonpool_blocks++ > max_nonpool_blocks)
870 max_nonpool_blocks = n_nonpool_blocks;
871 return internal_store_malloc(size, func, linenumber);
875 /************************************************
877 ************************************************/
879 /* This function is called by the macro store_free().
882 block block of store to free
883 func function from which called
884 linenumber line number in source file
890 internal_store_free(void * block, const char * func, int linenumber)
892 uschar * p = US block - sizeof(int);
893 #ifndef COMPILE_UTILITY
894 DEBUG(D_any) nonpool_malloc -= *(int *)p;
895 DEBUG(D_memory) debug_printf("----Free %6p %5d bytes\t%-20s %4d\n", block, *(int *)p, func, linenumber);
901 store_free_3(void * block, const char * func, int linenumber)
904 internal_store_free(block, func, linenumber);
907 /******************************************************************************/
908 /* Stats output on process exit */
912 #ifndef COMPILE_UTILITY
915 debug_printf("----Exit nonpool max: %3d kB in %d blocks\n",
916 (max_nonpool_malloc+1023)/1024, max_nonpool_blocks);
917 debug_printf("----Exit npools max: %3d kB\n", max_pool_malloc/1024);
918 for (int i = 0; i < NPOOLS; i++)
919 debug_printf("----Exit pool %d max: %3d kB in %d blocks at order %u\t%s %s\n",
920 i, (maxbytes[i]+1023)/1024, maxblocks[i], maxorder[i],
921 poolclass[i], pooluse[i]);
927 /******************************************************************************/
928 /* Per-message pool management */
930 static rmark message_reset_point = NULL;
935 int oldpool = store_pool;
936 store_pool = POOL_MESSAGE;
937 if (!message_reset_point) message_reset_point = store_mark();
938 store_pool = oldpool;
941 void message_tidyup(void)
944 if (!message_reset_point) return;
945 oldpool = store_pool;
946 store_pool = POOL_MESSAGE;
947 message_reset_point = store_reset(message_reset_point);
948 store_pool = oldpool;