Daily bump.
[gcc.git] / gcc / ggc-common.c
1 /* Simple garbage collection for the GNU compiler.
2 Copyright (C) 1999-2021 Free Software Foundation, Inc.
3
4 This file is part of GCC.
5
6 GCC is free software; you can redistribute it and/or modify it under
7 the terms of the GNU General Public License as published by the Free
8 Software Foundation; either version 3, or (at your option) any later
9 version.
10
11 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
12 WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
14 for more details.
15
16 You should have received a copy of the GNU General Public License
17 along with GCC; see the file COPYING3. If not see
18 <http://www.gnu.org/licenses/>. */
19
20 /* Generic garbage collection (GC) functions and data, not specific to
21 any particular GC implementation. */
22
23 #include "config.h"
24 #define INCLUDE_MALLOC_H
25 #include "system.h"
26 #include "coretypes.h"
27 #include "timevar.h"
28 #include "diagnostic-core.h"
29 #include "ggc-internal.h"
30 #include "hosthooks.h"
31 #include "plugin.h"
32 #include "options.h"
33
34 /* When set, ggc_collect will do collection. */
35 bool ggc_force_collect;
36
37 /* When true, protect the contents of the identifier hash table. */
38 bool ggc_protect_identifiers = true;
39
40 /* Statistics about the allocation. */
41 static ggc_statistics *ggc_stats;
42
43 struct traversal_state;
44
45 static int compare_ptr_data (const void *, const void *);
46 static void relocate_ptrs (void *, void *);
47 static void write_pch_globals (const struct ggc_root_tab * const *tab,
48 struct traversal_state *state);
49
50 /* Maintain global roots that are preserved during GC. */
51
52 /* This extra vector of dynamically registered root_tab-s is used by
53 ggc_mark_roots and gives the ability to dynamically add new GGC root
54 tables, for instance from some plugins; this vector is on the heap
55 since it is used by GGC internally. */
56 typedef const struct ggc_root_tab *const_ggc_root_tab_t;
57 static vec<const_ggc_root_tab_t> extra_root_vec;
58
59 /* Dynamically register a new GGC root table RT. This is useful for
60 plugins. */
61
62 void
63 ggc_register_root_tab (const struct ggc_root_tab* rt)
64 {
65 if (rt)
66 extra_root_vec.safe_push (rt);
67 }
68
69 /* Mark all the roots in the table RT. */
70
71 static void
72 ggc_mark_root_tab (const_ggc_root_tab_t rt)
73 {
74 size_t i;
75
76 for ( ; rt->base != NULL; rt++)
77 for (i = 0; i < rt->nelt; i++)
78 (*rt->cb) (*(void **) ((char *)rt->base + rt->stride * i));
79 }
80
81 /* Iterate through all registered roots and mark each element. */
82
83 void
84 ggc_mark_roots (void)
85 {
86 const struct ggc_root_tab *const *rt;
87 const_ggc_root_tab_t rtp, rti;
88 size_t i;
89
90 for (rt = gt_ggc_deletable_rtab; *rt; rt++)
91 for (rti = *rt; rti->base != NULL; rti++)
92 memset (rti->base, 0, rti->stride);
93
94 for (rt = gt_ggc_rtab; *rt; rt++)
95 ggc_mark_root_tab (*rt);
96
97 FOR_EACH_VEC_ELT (extra_root_vec, i, rtp)
98 ggc_mark_root_tab (rtp);
99
100 if (ggc_protect_identifiers)
101 ggc_mark_stringpool ();
102
103 gt_clear_caches ();
104
105 if (! ggc_protect_identifiers)
106 ggc_purge_stringpool ();
107
108 /* Some plugins may call ggc_set_mark from here. */
109 invoke_plugin_callbacks (PLUGIN_GGC_MARKING, NULL);
110 }
111
112 /* Allocate a block of memory, then clear it. */
113 void *
114 ggc_internal_cleared_alloc (size_t size, void (*f)(void *), size_t s, size_t n
115 MEM_STAT_DECL)
116 {
117 void *buf = ggc_internal_alloc (size, f, s, n PASS_MEM_STAT);
118 memset (buf, 0, size);
119 return buf;
120 }
121
122 /* Resize a block of memory, possibly re-allocating it. */
123 void *
124 ggc_realloc (void *x, size_t size MEM_STAT_DECL)
125 {
126 void *r;
127 size_t old_size;
128
129 if (x == NULL)
130 return ggc_internal_alloc (size PASS_MEM_STAT);
131
132 old_size = ggc_get_size (x);
133
134 if (size <= old_size)
135 {
136 /* Mark the unwanted memory as unaccessible. We also need to make
137 the "new" size accessible, since ggc_get_size returns the size of
138 the pool, not the size of the individually allocated object, the
139 size which was previously made accessible. Unfortunately, we
140 don't know that previously allocated size. Without that
141 knowledge we have to lose some initialization-tracking for the
142 old parts of the object. An alternative is to mark the whole
143 old_size as reachable, but that would lose tracking of writes
144 after the end of the object (by small offsets). Discard the
145 handle to avoid handle leak. */
146 VALGRIND_DISCARD (VALGRIND_MAKE_MEM_NOACCESS ((char *) x + size,
147 old_size - size));
148 VALGRIND_DISCARD (VALGRIND_MAKE_MEM_DEFINED (x, size));
149 return x;
150 }
151
152 r = ggc_internal_alloc (size PASS_MEM_STAT);
153
154 /* Since ggc_get_size returns the size of the pool, not the size of the
155 individually allocated object, we'd access parts of the old object
156 that were marked invalid with the memcpy below. We lose a bit of the
157 initialization-tracking since some of it may be uninitialized. */
158 VALGRIND_DISCARD (VALGRIND_MAKE_MEM_DEFINED (x, old_size));
159
160 memcpy (r, x, old_size);
161
162 /* The old object is not supposed to be used anymore. */
163 ggc_free (x);
164
165 return r;
166 }
167
168 void *
169 ggc_cleared_alloc_htab_ignore_args (size_t c ATTRIBUTE_UNUSED,
170 size_t n ATTRIBUTE_UNUSED)
171 {
172 gcc_assert (c * n == sizeof (struct htab));
173 return ggc_cleared_alloc<htab> ();
174 }
175
176 /* TODO: once we actually use type information in GGC, create a new tag
177 gt_gcc_ptr_array and use it for pointer arrays. */
178 void *
179 ggc_cleared_alloc_ptr_array_two_args (size_t c, size_t n)
180 {
181 gcc_assert (sizeof (PTR *) == n);
182 return ggc_cleared_vec_alloc<PTR *> (c);
183 }
184
185 /* These are for splay_tree_new_ggc. */
186 void *
187 ggc_splay_alloc (int sz, void *nl)
188 {
189 gcc_assert (!nl);
190 return ggc_internal_alloc (sz);
191 }
192
193 void
194 ggc_splay_dont_free (void * x ATTRIBUTE_UNUSED, void *nl)
195 {
196 gcc_assert (!nl);
197 }
198
199 void
200 ggc_print_common_statistics (FILE *stream ATTRIBUTE_UNUSED,
201 ggc_statistics *stats)
202 {
203 /* Set the pointer so that during collection we will actually gather
204 the statistics. */
205 ggc_stats = stats;
206
207 /* Then do one collection to fill in the statistics. */
208 ggc_collect ();
209
210 /* At present, we don't really gather any interesting statistics. */
211
212 /* Don't gather statistics any more. */
213 ggc_stats = NULL;
214 }
215 \f
216 /* Functions for saving and restoring GCable memory to disk. */
217
218 struct ptr_data
219 {
220 void *obj;
221 void *note_ptr_cookie;
222 gt_note_pointers note_ptr_fn;
223 gt_handle_reorder reorder_fn;
224 size_t size;
225 void *new_addr;
226 };
227
228 #define POINTER_HASH(x) (hashval_t)((intptr_t)x >> 3)
229
230 /* Helper for hashing saving_htab. */
231
232 struct saving_hasher : free_ptr_hash <ptr_data>
233 {
234 typedef void *compare_type;
235 static inline hashval_t hash (const ptr_data *);
236 static inline bool equal (const ptr_data *, const void *);
237 };
238
239 inline hashval_t
240 saving_hasher::hash (const ptr_data *p)
241 {
242 return POINTER_HASH (p->obj);
243 }
244
245 inline bool
246 saving_hasher::equal (const ptr_data *p1, const void *p2)
247 {
248 return p1->obj == p2;
249 }
250
251 static hash_table<saving_hasher> *saving_htab;
252
253 /* Register an object in the hash table. */
254
255 int
256 gt_pch_note_object (void *obj, void *note_ptr_cookie,
257 gt_note_pointers note_ptr_fn)
258 {
259 struct ptr_data **slot;
260
261 if (obj == NULL || obj == (void *) 1)
262 return 0;
263
264 slot = (struct ptr_data **)
265 saving_htab->find_slot_with_hash (obj, POINTER_HASH (obj), INSERT);
266 if (*slot != NULL)
267 {
268 gcc_assert ((*slot)->note_ptr_fn == note_ptr_fn
269 && (*slot)->note_ptr_cookie == note_ptr_cookie);
270 return 0;
271 }
272
273 *slot = XCNEW (struct ptr_data);
274 (*slot)->obj = obj;
275 (*slot)->note_ptr_fn = note_ptr_fn;
276 (*slot)->note_ptr_cookie = note_ptr_cookie;
277 if (note_ptr_fn == gt_pch_p_S)
278 (*slot)->size = strlen ((const char *)obj) + 1;
279 else
280 (*slot)->size = ggc_get_size (obj);
281 return 1;
282 }
283
284 /* Register an object in the hash table. */
285
286 void
287 gt_pch_note_reorder (void *obj, void *note_ptr_cookie,
288 gt_handle_reorder reorder_fn)
289 {
290 struct ptr_data *data;
291
292 if (obj == NULL || obj == (void *) 1)
293 return;
294
295 data = (struct ptr_data *)
296 saving_htab->find_with_hash (obj, POINTER_HASH (obj));
297 gcc_assert (data && data->note_ptr_cookie == note_ptr_cookie);
298
299 data->reorder_fn = reorder_fn;
300 }
301
302 /* Handy state for the traversal functions. */
303
304 struct traversal_state
305 {
306 FILE *f;
307 struct ggc_pch_data *d;
308 size_t count;
309 struct ptr_data **ptrs;
310 size_t ptrs_i;
311 };
312
313 /* Callbacks for htab_traverse. */
314
315 int
316 ggc_call_count (ptr_data **slot, traversal_state *state)
317 {
318 struct ptr_data *d = *slot;
319
320 ggc_pch_count_object (state->d, d->obj, d->size,
321 d->note_ptr_fn == gt_pch_p_S);
322 state->count++;
323 return 1;
324 }
325
326 int
327 ggc_call_alloc (ptr_data **slot, traversal_state *state)
328 {
329 struct ptr_data *d = *slot;
330
331 d->new_addr = ggc_pch_alloc_object (state->d, d->obj, d->size,
332 d->note_ptr_fn == gt_pch_p_S);
333 state->ptrs[state->ptrs_i++] = d;
334 return 1;
335 }
336
337 /* Callback for qsort. */
338
339 static int
340 compare_ptr_data (const void *p1_p, const void *p2_p)
341 {
342 const struct ptr_data *const p1 = *(const struct ptr_data *const *)p1_p;
343 const struct ptr_data *const p2 = *(const struct ptr_data *const *)p2_p;
344 return (((size_t)p1->new_addr > (size_t)p2->new_addr)
345 - ((size_t)p1->new_addr < (size_t)p2->new_addr));
346 }
347
348 /* Callbacks for note_ptr_fn. */
349
350 static void
351 relocate_ptrs (void *ptr_p, void *state_p)
352 {
353 void **ptr = (void **)ptr_p;
354 struct traversal_state *state ATTRIBUTE_UNUSED
355 = (struct traversal_state *)state_p;
356 struct ptr_data *result;
357
358 if (*ptr == NULL || *ptr == (void *)1)
359 return;
360
361 result = (struct ptr_data *)
362 saving_htab->find_with_hash (*ptr, POINTER_HASH (*ptr));
363 gcc_assert (result);
364 *ptr = result->new_addr;
365 }
366
367 /* Write out, after relocation, the pointers in TAB. */
368 static void
369 write_pch_globals (const struct ggc_root_tab * const *tab,
370 struct traversal_state *state)
371 {
372 const struct ggc_root_tab *const *rt;
373 const struct ggc_root_tab *rti;
374 size_t i;
375
376 for (rt = tab; *rt; rt++)
377 for (rti = *rt; rti->base != NULL; rti++)
378 for (i = 0; i < rti->nelt; i++)
379 {
380 void *ptr = *(void **)((char *)rti->base + rti->stride * i);
381 struct ptr_data *new_ptr;
382 if (ptr == NULL || ptr == (void *)1)
383 {
384 if (fwrite (&ptr, sizeof (void *), 1, state->f)
385 != 1)
386 fatal_error (input_location, "cannot write PCH file: %m");
387 }
388 else
389 {
390 new_ptr = (struct ptr_data *)
391 saving_htab->find_with_hash (ptr, POINTER_HASH (ptr));
392 if (fwrite (&new_ptr->new_addr, sizeof (void *), 1, state->f)
393 != 1)
394 fatal_error (input_location, "cannot write PCH file: %m");
395 }
396 }
397 }
398
399 /* Hold the information we need to mmap the file back in. */
400
401 struct mmap_info
402 {
403 size_t offset;
404 size_t size;
405 void *preferred_base;
406 };
407
408 /* Write out the state of the compiler to F. */
409
410 void
411 gt_pch_save (FILE *f)
412 {
413 const struct ggc_root_tab *const *rt;
414 const struct ggc_root_tab *rti;
415 size_t i;
416 struct traversal_state state;
417 char *this_object = NULL;
418 size_t this_object_size = 0;
419 struct mmap_info mmi;
420 const size_t mmap_offset_alignment = host_hooks.gt_pch_alloc_granularity ();
421
422 gt_pch_save_stringpool ();
423
424 timevar_push (TV_PCH_PTR_REALLOC);
425 saving_htab = new hash_table<saving_hasher> (50000);
426
427 for (rt = gt_ggc_rtab; *rt; rt++)
428 for (rti = *rt; rti->base != NULL; rti++)
429 for (i = 0; i < rti->nelt; i++)
430 (*rti->pchw)(*(void **)((char *)rti->base + rti->stride * i));
431
432 /* Prepare the objects for writing, determine addresses and such. */
433 state.f = f;
434 state.d = init_ggc_pch ();
435 state.count = 0;
436 saving_htab->traverse <traversal_state *, ggc_call_count> (&state);
437
438 mmi.size = ggc_pch_total_size (state.d);
439
440 /* Try to arrange things so that no relocation is necessary, but
441 don't try very hard. On most platforms, this will always work,
442 and on the rest it's a lot of work to do better.
443 (The extra work goes in HOST_HOOKS_GT_PCH_GET_ADDRESS and
444 HOST_HOOKS_GT_PCH_USE_ADDRESS.) */
445 mmi.preferred_base = host_hooks.gt_pch_get_address (mmi.size, fileno (f));
446
447 ggc_pch_this_base (state.d, mmi.preferred_base);
448
449 state.ptrs = XNEWVEC (struct ptr_data *, state.count);
450 state.ptrs_i = 0;
451
452 saving_htab->traverse <traversal_state *, ggc_call_alloc> (&state);
453 timevar_pop (TV_PCH_PTR_REALLOC);
454
455 timevar_push (TV_PCH_PTR_SORT);
456 qsort (state.ptrs, state.count, sizeof (*state.ptrs), compare_ptr_data);
457 timevar_pop (TV_PCH_PTR_SORT);
458
459 /* Write out all the scalar variables. */
460 for (rt = gt_pch_scalar_rtab; *rt; rt++)
461 for (rti = *rt; rti->base != NULL; rti++)
462 if (fwrite (rti->base, rti->stride, 1, f) != 1)
463 fatal_error (input_location, "cannot write PCH file: %m");
464
465 /* Write out all the global pointers, after translation. */
466 write_pch_globals (gt_ggc_rtab, &state);
467
468 /* Pad the PCH file so that the mmapped area starts on an allocation
469 granularity (usually page) boundary. */
470 {
471 long o;
472 o = ftell (state.f) + sizeof (mmi);
473 if (o == -1)
474 fatal_error (input_location, "cannot get position in PCH file: %m");
475 mmi.offset = mmap_offset_alignment - o % mmap_offset_alignment;
476 if (mmi.offset == mmap_offset_alignment)
477 mmi.offset = 0;
478 mmi.offset += o;
479 }
480 if (fwrite (&mmi, sizeof (mmi), 1, state.f) != 1)
481 fatal_error (input_location, "cannot write PCH file: %m");
482 if (mmi.offset != 0
483 && fseek (state.f, mmi.offset, SEEK_SET) != 0)
484 fatal_error (input_location, "cannot write padding to PCH file: %m");
485
486 ggc_pch_prepare_write (state.d, state.f);
487
488 #if defined ENABLE_VALGRIND_ANNOTATIONS && defined VALGRIND_GET_VBITS
489 vec<char> vbits = vNULL;
490 #endif
491
492 /* Actually write out the objects. */
493 for (i = 0; i < state.count; i++)
494 {
495 if (this_object_size < state.ptrs[i]->size)
496 {
497 this_object_size = state.ptrs[i]->size;
498 this_object = XRESIZEVAR (char, this_object, this_object_size);
499 }
500 #if defined ENABLE_VALGRIND_ANNOTATIONS && defined VALGRIND_GET_VBITS
501 /* obj might contain uninitialized bytes, e.g. in the trailing
502 padding of the object. Avoid warnings by making the memory
503 temporarily defined and then restoring previous state. */
504 int get_vbits = 0;
505 size_t valid_size = state.ptrs[i]->size;
506 if (__builtin_expect (RUNNING_ON_VALGRIND, 0))
507 {
508 if (vbits.length () < valid_size)
509 vbits.safe_grow (valid_size, true);
510 get_vbits = VALGRIND_GET_VBITS (state.ptrs[i]->obj,
511 vbits.address (), valid_size);
512 if (get_vbits == 3)
513 {
514 /* We assume that first part of obj is addressable, and
515 the rest is unaddressable. Find out where the boundary is
516 using binary search. */
517 size_t lo = 0, hi = valid_size;
518 while (hi > lo)
519 {
520 size_t mid = (lo + hi) / 2;
521 get_vbits = VALGRIND_GET_VBITS ((char *) state.ptrs[i]->obj
522 + mid, vbits.address (),
523 1);
524 if (get_vbits == 3)
525 hi = mid;
526 else if (get_vbits == 1)
527 lo = mid + 1;
528 else
529 break;
530 }
531 if (get_vbits == 1 || get_vbits == 3)
532 {
533 valid_size = lo;
534 get_vbits = VALGRIND_GET_VBITS (state.ptrs[i]->obj,
535 vbits.address (),
536 valid_size);
537 }
538 }
539 if (get_vbits == 1)
540 VALGRIND_DISCARD (VALGRIND_MAKE_MEM_DEFINED (state.ptrs[i]->obj,
541 state.ptrs[i]->size));
542 }
543 #endif
544 memcpy (this_object, state.ptrs[i]->obj, state.ptrs[i]->size);
545 if (state.ptrs[i]->reorder_fn != NULL)
546 state.ptrs[i]->reorder_fn (state.ptrs[i]->obj,
547 state.ptrs[i]->note_ptr_cookie,
548 relocate_ptrs, &state);
549 state.ptrs[i]->note_ptr_fn (state.ptrs[i]->obj,
550 state.ptrs[i]->note_ptr_cookie,
551 relocate_ptrs, &state);
552 ggc_pch_write_object (state.d, state.f, state.ptrs[i]->obj,
553 state.ptrs[i]->new_addr, state.ptrs[i]->size,
554 state.ptrs[i]->note_ptr_fn == gt_pch_p_S);
555 if (state.ptrs[i]->note_ptr_fn != gt_pch_p_S)
556 memcpy (state.ptrs[i]->obj, this_object, state.ptrs[i]->size);
557 #if defined ENABLE_VALGRIND_ANNOTATIONS && defined VALGRIND_GET_VBITS
558 if (__builtin_expect (get_vbits == 1, 0))
559 {
560 (void) VALGRIND_SET_VBITS (state.ptrs[i]->obj, vbits.address (),
561 valid_size);
562 if (valid_size != state.ptrs[i]->size)
563 VALGRIND_DISCARD (VALGRIND_MAKE_MEM_NOACCESS ((char *)
564 state.ptrs[i]->obj
565 + valid_size,
566 state.ptrs[i]->size
567 - valid_size));
568 }
569 #endif
570 }
571 #if defined ENABLE_VALGRIND_ANNOTATIONS && defined VALGRIND_GET_VBITS
572 vbits.release ();
573 #endif
574
575 ggc_pch_finish (state.d, state.f);
576 gt_pch_fixup_stringpool ();
577
578 XDELETE (state.ptrs);
579 XDELETE (this_object);
580 delete saving_htab;
581 saving_htab = NULL;
582 }
583
584 /* Read the state of the compiler back in from F. */
585
586 void
587 gt_pch_restore (FILE *f)
588 {
589 const struct ggc_root_tab *const *rt;
590 const struct ggc_root_tab *rti;
591 size_t i;
592 struct mmap_info mmi;
593 int result;
594
595 /* Delete any deletable objects. This makes ggc_pch_read much
596 faster, as it can be sure that no GCable objects remain other
597 than the ones just read in. */
598 for (rt = gt_ggc_deletable_rtab; *rt; rt++)
599 for (rti = *rt; rti->base != NULL; rti++)
600 memset (rti->base, 0, rti->stride);
601
602 /* Read in all the scalar variables. */
603 for (rt = gt_pch_scalar_rtab; *rt; rt++)
604 for (rti = *rt; rti->base != NULL; rti++)
605 if (fread (rti->base, rti->stride, 1, f) != 1)
606 fatal_error (input_location, "cannot read PCH file: %m");
607
608 /* Read in all the global pointers, in 6 easy loops. */
609 for (rt = gt_ggc_rtab; *rt; rt++)
610 for (rti = *rt; rti->base != NULL; rti++)
611 for (i = 0; i < rti->nelt; i++)
612 if (fread ((char *)rti->base + rti->stride * i,
613 sizeof (void *), 1, f) != 1)
614 fatal_error (input_location, "cannot read PCH file: %m");
615
616 if (fread (&mmi, sizeof (mmi), 1, f) != 1)
617 fatal_error (input_location, "cannot read PCH file: %m");
618
619 result = host_hooks.gt_pch_use_address (mmi.preferred_base, mmi.size,
620 fileno (f), mmi.offset);
621 if (result < 0)
622 fatal_error (input_location, "had to relocate PCH");
623 if (result == 0)
624 {
625 if (fseek (f, mmi.offset, SEEK_SET) != 0
626 || fread (mmi.preferred_base, mmi.size, 1, f) != 1)
627 fatal_error (input_location, "cannot read PCH file: %m");
628 }
629 else if (fseek (f, mmi.offset + mmi.size, SEEK_SET) != 0)
630 fatal_error (input_location, "cannot read PCH file: %m");
631
632 ggc_pch_read (f, mmi.preferred_base);
633
634 gt_pch_restore_stringpool ();
635 }
636
637 /* Default version of HOST_HOOKS_GT_PCH_GET_ADDRESS when mmap is not present.
638 Select no address whatsoever, and let gt_pch_save choose what it will with
639 malloc, presumably. */
640
641 void *
642 default_gt_pch_get_address (size_t size ATTRIBUTE_UNUSED,
643 int fd ATTRIBUTE_UNUSED)
644 {
645 return NULL;
646 }
647
648 /* Default version of HOST_HOOKS_GT_PCH_USE_ADDRESS when mmap is not present.
649 Allocate SIZE bytes with malloc. Return 0 if the address we got is the
650 same as base, indicating that the memory has been allocated but needs to
651 be read in from the file. Return -1 if the address differs, to relocation
652 of the PCH file would be required. */
653
654 int
655 default_gt_pch_use_address (void *base, size_t size, int fd ATTRIBUTE_UNUSED,
656 size_t offset ATTRIBUTE_UNUSED)
657 {
658 void *addr = xmalloc (size);
659 return (addr == base) - 1;
660 }
661
662 /* Default version of HOST_HOOKS_GT_PCH_GET_ADDRESS. Return the
663 alignment required for allocating virtual memory. Usually this is the
664 same as pagesize. */
665
666 size_t
667 default_gt_pch_alloc_granularity (void)
668 {
669 return getpagesize ();
670 }
671
672 #if HAVE_MMAP_FILE
673 /* Default version of HOST_HOOKS_GT_PCH_GET_ADDRESS when mmap is present.
674 We temporarily allocate SIZE bytes, and let the kernel place the data
675 wherever it will. If it worked, that's our spot, if not we're likely
676 to be in trouble. */
677
678 void *
679 mmap_gt_pch_get_address (size_t size, int fd)
680 {
681 void *ret;
682
683 ret = mmap (NULL, size, PROT_READ | PROT_WRITE, MAP_PRIVATE, fd, 0);
684 if (ret == (void *) MAP_FAILED)
685 ret = NULL;
686 else
687 munmap ((caddr_t) ret, size);
688
689 return ret;
690 }
691
692 /* Default version of HOST_HOOKS_GT_PCH_USE_ADDRESS when mmap is present.
693 Map SIZE bytes of FD+OFFSET at BASE. Return 1 if we succeeded at
694 mapping the data at BASE, -1 if we couldn't.
695
696 This version assumes that the kernel honors the START operand of mmap
697 even without MAP_FIXED if START through START+SIZE are not currently
698 mapped with something. */
699
700 int
701 mmap_gt_pch_use_address (void *base, size_t size, int fd, size_t offset)
702 {
703 void *addr;
704
705 /* We're called with size == 0 if we're not planning to load a PCH
706 file at all. This allows the hook to free any static space that
707 we might have allocated at link time. */
708 if (size == 0)
709 return -1;
710
711 addr = mmap ((caddr_t) base, size, PROT_READ | PROT_WRITE, MAP_PRIVATE,
712 fd, offset);
713
714 return addr == base ? 1 : -1;
715 }
716 #endif /* HAVE_MMAP_FILE */
717
718 #if !defined ENABLE_GC_CHECKING && !defined ENABLE_GC_ALWAYS_COLLECT
719
720 /* Modify the bound based on rlimits. */
721 static double
722 ggc_rlimit_bound (double limit)
723 {
724 #if defined(HAVE_GETRLIMIT)
725 struct rlimit rlim;
726 # if defined (RLIMIT_AS)
727 /* RLIMIT_AS is what POSIX says is the limit on mmap. Presumably
728 any OS which has RLIMIT_AS also has a working mmap that GCC will use. */
729 if (getrlimit (RLIMIT_AS, &rlim) == 0
730 && rlim.rlim_cur != (rlim_t) RLIM_INFINITY
731 && rlim.rlim_cur < limit)
732 limit = rlim.rlim_cur;
733 # elif defined (RLIMIT_DATA)
734 /* ... but some older OSs bound mmap based on RLIMIT_DATA, or we
735 might be on an OS that has a broken mmap. (Others don't bound
736 mmap at all, apparently.) */
737 if (getrlimit (RLIMIT_DATA, &rlim) == 0
738 && rlim.rlim_cur != (rlim_t) RLIM_INFINITY
739 && rlim.rlim_cur < limit
740 /* Darwin has this horribly bogus default setting of
741 RLIMIT_DATA, to 6144Kb. No-one notices because RLIMIT_DATA
742 appears to be ignored. Ignore such silliness. If a limit
743 this small was actually effective for mmap, GCC wouldn't even
744 start up. */
745 && rlim.rlim_cur >= 8 * ONE_M)
746 limit = rlim.rlim_cur;
747 # endif /* RLIMIT_AS or RLIMIT_DATA */
748 #endif /* HAVE_GETRLIMIT */
749
750 return limit;
751 }
752
753 /* Heuristic to set a default for GGC_MIN_EXPAND. */
754 static int
755 ggc_min_expand_heuristic (void)
756 {
757 double min_expand = physmem_total ();
758
759 /* Adjust for rlimits. */
760 min_expand = ggc_rlimit_bound (min_expand);
761
762 /* The heuristic is a percentage equal to 30% + 70%*(RAM/1GB), yielding
763 a lower bound of 30% and an upper bound of 100% (when RAM >= 1GB). */
764 min_expand /= ONE_G;
765 min_expand *= 70;
766 min_expand = MIN (min_expand, 70);
767 min_expand += 30;
768
769 return min_expand;
770 }
771
772 /* Heuristic to set a default for GGC_MIN_HEAPSIZE. */
773 static int
774 ggc_min_heapsize_heuristic (void)
775 {
776 double phys_kbytes = physmem_total ();
777 double limit_kbytes = ggc_rlimit_bound (phys_kbytes * 2);
778
779 phys_kbytes /= ONE_K; /* Convert to Kbytes. */
780 limit_kbytes /= ONE_K;
781
782 /* The heuristic is RAM/8, with a lower bound of 4M and an upper
783 bound of 128M (when RAM >= 1GB). */
784 phys_kbytes /= 8;
785
786 #if defined(HAVE_GETRLIMIT) && defined (RLIMIT_RSS)
787 /* Try not to overrun the RSS limit while doing garbage collection.
788 The RSS limit is only advisory, so no margin is subtracted. */
789 {
790 struct rlimit rlim;
791 if (getrlimit (RLIMIT_RSS, &rlim) == 0
792 && rlim.rlim_cur != (rlim_t) RLIM_INFINITY)
793 phys_kbytes = MIN (phys_kbytes, rlim.rlim_cur / ONE_K);
794 }
795 # endif
796
797 /* Don't blindly run over our data limit; do GC at least when the
798 *next* GC would be within 20Mb of the limit or within a quarter of
799 the limit, whichever is larger. If GCC does hit the data limit,
800 compilation will fail, so this tries to be conservative. */
801 limit_kbytes = MAX (0, limit_kbytes - MAX (limit_kbytes / 4, 20 * ONE_K));
802 limit_kbytes = (limit_kbytes * 100) / (110 + ggc_min_expand_heuristic ());
803 phys_kbytes = MIN (phys_kbytes, limit_kbytes);
804
805 phys_kbytes = MAX (phys_kbytes, 4 * ONE_K);
806 phys_kbytes = MIN (phys_kbytes, 128 * ONE_K);
807
808 return phys_kbytes;
809 }
810 #endif
811
812 void
813 init_ggc_heuristics (void)
814 {
815 #if !defined ENABLE_GC_CHECKING && !defined ENABLE_GC_ALWAYS_COLLECT
816 param_ggc_min_expand = ggc_min_expand_heuristic ();
817 param_ggc_min_heapsize = ggc_min_heapsize_heuristic ();
818 #endif
819 }
820
821 /* GGC memory usage. */
822 class ggc_usage: public mem_usage
823 {
824 public:
825 /* Default constructor. */
826 ggc_usage (): m_freed (0), m_collected (0), m_overhead (0) {}
827 /* Constructor. */
828 ggc_usage (size_t allocated, size_t times, size_t peak,
829 size_t freed, size_t collected, size_t overhead)
830 : mem_usage (allocated, times, peak),
831 m_freed (freed), m_collected (collected), m_overhead (overhead) {}
832
833 /* Equality operator. */
834 inline bool
835 operator== (const ggc_usage &second) const
836 {
837 return (get_balance () == second.get_balance ()
838 && m_peak == second.m_peak
839 && m_times == second.m_times);
840 }
841
842 /* Comparison operator. */
843 inline bool
844 operator< (const ggc_usage &second) const
845 {
846 if (*this == second)
847 return false;
848
849 return (get_balance () == second.get_balance () ?
850 (m_peak == second.m_peak ? m_times < second.m_times
851 : m_peak < second.m_peak)
852 : get_balance () < second.get_balance ());
853 }
854
855 /* Register overhead of ALLOCATED and OVERHEAD bytes. */
856 inline void
857 register_overhead (size_t allocated, size_t overhead)
858 {
859 m_allocated += allocated;
860 m_overhead += overhead;
861 m_times++;
862 }
863
864 /* Release overhead of SIZE bytes. */
865 inline void
866 release_overhead (size_t size)
867 {
868 m_freed += size;
869 }
870
871 /* Sum the usage with SECOND usage. */
872 ggc_usage
873 operator+ (const ggc_usage &second)
874 {
875 return ggc_usage (m_allocated + second.m_allocated,
876 m_times + second.m_times,
877 m_peak + second.m_peak,
878 m_freed + second.m_freed,
879 m_collected + second.m_collected,
880 m_overhead + second.m_overhead);
881 }
882
883 /* Dump usage with PREFIX, where TOTAL is sum of all rows. */
884 inline void
885 dump (const char *prefix, ggc_usage &total) const
886 {
887 size_t balance = get_balance ();
888 fprintf (stderr,
889 "%-48s " PRsa (9) ":%5.1f%%" PRsa (9) ":%5.1f%%"
890 PRsa (9) ":%5.1f%%" PRsa (9) ":%5.1f%%" PRsa (9) "\n",
891 prefix,
892 SIZE_AMOUNT (balance), get_percent (balance, total.get_balance ()),
893 SIZE_AMOUNT (m_collected),
894 get_percent (m_collected, total.m_collected),
895 SIZE_AMOUNT (m_freed), get_percent (m_freed, total.m_freed),
896 SIZE_AMOUNT (m_overhead),
897 get_percent (m_overhead, total.m_overhead),
898 SIZE_AMOUNT (m_times));
899 }
900
901 /* Dump usage coupled to LOC location, where TOTAL is sum of all rows. */
902 inline void
903 dump (mem_location *loc, ggc_usage &total) const
904 {
905 char *location_string = loc->to_string ();
906
907 dump (location_string, total);
908
909 free (location_string);
910 }
911
912 /* Dump footer. */
913 inline void
914 dump_footer ()
915 {
916 dump ("Total", *this);
917 }
918
919 /* Get balance which is GGC allocation leak. */
920 inline size_t
921 get_balance () const
922 {
923 return m_allocated + m_overhead - m_collected - m_freed;
924 }
925
926 typedef std::pair<mem_location *, ggc_usage *> mem_pair_t;
927
928 /* Compare wrapper used by qsort method. */
929 static int
930 compare (const void *first, const void *second)
931 {
932 const mem_pair_t mem1 = *(const mem_pair_t *) first;
933 const mem_pair_t mem2 = *(const mem_pair_t *) second;
934
935 size_t balance1 = mem1.second->get_balance ();
936 size_t balance2 = mem2.second->get_balance ();
937
938 return balance1 == balance2 ? 0 : (balance1 < balance2 ? 1 : -1);
939 }
940
941 /* Dump header with NAME. */
942 static inline void
943 dump_header (const char *name)
944 {
945 fprintf (stderr, "%-48s %11s%17s%17s%16s%17s\n", name, "Leak", "Garbage",
946 "Freed", "Overhead", "Times");
947 }
948
949 /* Freed memory in bytes. */
950 size_t m_freed;
951 /* Collected memory in bytes. */
952 size_t m_collected;
953 /* Overhead memory in bytes. */
954 size_t m_overhead;
955 };
956
957 /* GCC memory description. */
958 static mem_alloc_description<ggc_usage> ggc_mem_desc;
959
960 /* Dump per-site memory statistics. */
961
962 void
963 dump_ggc_loc_statistics ()
964 {
965 if (! GATHER_STATISTICS)
966 return;
967
968 ggc_force_collect = true;
969 ggc_collect ();
970
971 ggc_mem_desc.dump (GGC_ORIGIN);
972
973 ggc_force_collect = false;
974 }
975
976 /* Record ALLOCATED and OVERHEAD bytes to descriptor NAME:LINE (FUNCTION). */
977 void
978 ggc_record_overhead (size_t allocated, size_t overhead, void *ptr MEM_STAT_DECL)
979 {
980 ggc_usage *usage = ggc_mem_desc.register_descriptor (ptr, GGC_ORIGIN, false
981 FINAL_PASS_MEM_STAT);
982
983 ggc_mem_desc.register_object_overhead (usage, allocated + overhead, ptr);
984 usage->register_overhead (allocated, overhead);
985 }
986
987 /* Notice that the pointer has been freed. */
988 void
989 ggc_free_overhead (void *ptr)
990 {
991 ggc_mem_desc.release_object_overhead (ptr);
992 }
993
994 /* After live values has been marked, walk all recorded pointers and see if
995 they are still live. */
996 void
997 ggc_prune_overhead_list (void)
998 {
999 typedef hash_map<const void *, std::pair<ggc_usage *, size_t > > map_t;
1000
1001 map_t::iterator it = ggc_mem_desc.m_reverse_object_map->begin ();
1002
1003 for (; it != ggc_mem_desc.m_reverse_object_map->end (); ++it)
1004 if (!ggc_marked_p ((*it).first))
1005 {
1006 (*it).second.first->m_collected += (*it).second.second;
1007 ggc_mem_desc.m_reverse_object_map->remove ((*it).first);
1008 }
1009 }
1010
1011 /* Print memory used by heap if this info is available. */
1012
1013 void
1014 report_heap_memory_use ()
1015 {
1016 #if defined(HAVE_MALLINFO) || defined(HAVE_MALLINFO2)
1017 #ifdef HAVE_MALLINFO2
1018 #define MALLINFO_FN mallinfo2
1019 #else
1020 #define MALLINFO_FN mallinfo
1021 #endif
1022 if (!quiet_flag)
1023 fprintf (stderr, " {heap " PRsa (0) "}",
1024 SIZE_AMOUNT (MALLINFO_FN ().arena));
1025 #endif
1026 }