Daily bump.
[gcc.git] / gcc / emit-rtl.c
1 /* Emit RTL for the GCC expander.
2 Copyright (C) 1987-2021 Free Software Foundation, Inc.
3
4 This file is part of GCC.
5
6 GCC is free software; you can redistribute it and/or modify it under
7 the terms of the GNU General Public License as published by the Free
8 Software Foundation; either version 3, or (at your option) any later
9 version.
10
11 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
12 WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
14 for more details.
15
16 You should have received a copy of the GNU General Public License
17 along with GCC; see the file COPYING3. If not see
18 <http://www.gnu.org/licenses/>. */
19
20
21 /* Middle-to-low level generation of rtx code and insns.
22
23 This file contains support functions for creating rtl expressions
24 and manipulating them in the doubly-linked chain of insns.
25
26 The patterns of the insns are created by machine-dependent
27 routines in insn-emit.c, which is generated automatically from
28 the machine description. These routines make the individual rtx's
29 of the pattern with `gen_rtx_fmt_ee' and others in genrtl.[ch],
30 which are automatically generated from rtl.def; what is machine
31 dependent is the kind of rtx's they make and what arguments they
32 use. */
33
34 #include "config.h"
35 #include "system.h"
36 #include "coretypes.h"
37 #include "memmodel.h"
38 #include "backend.h"
39 #include "target.h"
40 #include "rtl.h"
41 #include "tree.h"
42 #include "df.h"
43 #include "tm_p.h"
44 #include "stringpool.h"
45 #include "insn-config.h"
46 #include "regs.h"
47 #include "emit-rtl.h"
48 #include "recog.h"
49 #include "diagnostic-core.h"
50 #include "alias.h"
51 #include "fold-const.h"
52 #include "varasm.h"
53 #include "cfgrtl.h"
54 #include "tree-eh.h"
55 #include "explow.h"
56 #include "expr.h"
57 #include "builtins.h"
58 #include "rtl-iter.h"
59 #include "stor-layout.h"
60 #include "opts.h"
61 #include "predict.h"
62 #include "rtx-vector-builder.h"
63 #include "gimple.h"
64 #include "gimple-ssa.h"
65 #include "gimplify.h"
66
67 struct target_rtl default_target_rtl;
68 #if SWITCHABLE_TARGET
69 struct target_rtl *this_target_rtl = &default_target_rtl;
70 #endif
71
72 #define initial_regno_reg_rtx (this_target_rtl->x_initial_regno_reg_rtx)
73
74 /* Commonly used modes. */
75
76 scalar_int_mode byte_mode; /* Mode whose width is BITS_PER_UNIT. */
77 scalar_int_mode word_mode; /* Mode whose width is BITS_PER_WORD. */
78 scalar_int_mode ptr_mode; /* Mode whose width is POINTER_SIZE. */
79
80 /* Datastructures maintained for currently processed function in RTL form. */
81
82 struct rtl_data x_rtl;
83
84 /* Indexed by pseudo register number, gives the rtx for that pseudo.
85 Allocated in parallel with regno_pointer_align.
86 FIXME: We could put it into emit_status struct, but gengtype is not able to deal
87 with length attribute nested in top level structures. */
88
89 rtx * regno_reg_rtx;
90
91 /* This is *not* reset after each function. It gives each CODE_LABEL
92 in the entire compilation a unique label number. */
93
94 static GTY(()) int label_num = 1;
95
96 /* We record floating-point CONST_DOUBLEs in each floating-point mode for
97 the values of 0, 1, and 2. For the integer entries and VOIDmode, we
98 record a copy of const[012]_rtx and constm1_rtx. CONSTM1_RTX
99 is set only for MODE_INT and MODE_VECTOR_INT modes. */
100
101 rtx const_tiny_rtx[4][(int) MAX_MACHINE_MODE];
102
103 rtx const_true_rtx;
104
105 REAL_VALUE_TYPE dconst0;
106 REAL_VALUE_TYPE dconst1;
107 REAL_VALUE_TYPE dconst2;
108 REAL_VALUE_TYPE dconstm1;
109 REAL_VALUE_TYPE dconsthalf;
110
111 /* Record fixed-point constant 0 and 1. */
112 FIXED_VALUE_TYPE fconst0[MAX_FCONST0];
113 FIXED_VALUE_TYPE fconst1[MAX_FCONST1];
114
115 /* We make one copy of (const_int C) where C is in
116 [- MAX_SAVED_CONST_INT, MAX_SAVED_CONST_INT]
117 to save space during the compilation and simplify comparisons of
118 integers. */
119
120 rtx const_int_rtx[MAX_SAVED_CONST_INT * 2 + 1];
121
122 /* Standard pieces of rtx, to be substituted directly into things. */
123 rtx pc_rtx;
124 rtx ret_rtx;
125 rtx simple_return_rtx;
126 rtx cc0_rtx;
127
128 /* Marker used for denoting an INSN, which should never be accessed (i.e.,
129 this pointer should normally never be dereferenced), but is required to be
130 distinct from NULL_RTX. Currently used by peephole2 pass. */
131 rtx_insn *invalid_insn_rtx;
132
133 /* A hash table storing CONST_INTs whose absolute value is greater
134 than MAX_SAVED_CONST_INT. */
135
136 struct const_int_hasher : ggc_cache_ptr_hash<rtx_def>
137 {
138 typedef HOST_WIDE_INT compare_type;
139
140 static hashval_t hash (rtx i);
141 static bool equal (rtx i, HOST_WIDE_INT h);
142 };
143
144 static GTY ((cache)) hash_table<const_int_hasher> *const_int_htab;
145
146 struct const_wide_int_hasher : ggc_cache_ptr_hash<rtx_def>
147 {
148 static hashval_t hash (rtx x);
149 static bool equal (rtx x, rtx y);
150 };
151
152 static GTY ((cache)) hash_table<const_wide_int_hasher> *const_wide_int_htab;
153
154 struct const_poly_int_hasher : ggc_cache_ptr_hash<rtx_def>
155 {
156 typedef std::pair<machine_mode, poly_wide_int_ref> compare_type;
157
158 static hashval_t hash (rtx x);
159 static bool equal (rtx x, const compare_type &y);
160 };
161
162 static GTY ((cache)) hash_table<const_poly_int_hasher> *const_poly_int_htab;
163
164 /* A hash table storing register attribute structures. */
165 struct reg_attr_hasher : ggc_cache_ptr_hash<reg_attrs>
166 {
167 static hashval_t hash (reg_attrs *x);
168 static bool equal (reg_attrs *a, reg_attrs *b);
169 };
170
171 static GTY ((cache)) hash_table<reg_attr_hasher> *reg_attrs_htab;
172
173 /* A hash table storing all CONST_DOUBLEs. */
174 struct const_double_hasher : ggc_cache_ptr_hash<rtx_def>
175 {
176 static hashval_t hash (rtx x);
177 static bool equal (rtx x, rtx y);
178 };
179
180 static GTY ((cache)) hash_table<const_double_hasher> *const_double_htab;
181
182 /* A hash table storing all CONST_FIXEDs. */
183 struct const_fixed_hasher : ggc_cache_ptr_hash<rtx_def>
184 {
185 static hashval_t hash (rtx x);
186 static bool equal (rtx x, rtx y);
187 };
188
189 static GTY ((cache)) hash_table<const_fixed_hasher> *const_fixed_htab;
190
191 #define cur_insn_uid (crtl->emit.x_cur_insn_uid)
192 #define cur_debug_insn_uid (crtl->emit.x_cur_debug_insn_uid)
193 #define first_label_num (crtl->emit.x_first_label_num)
194
195 static void set_used_decls (tree);
196 static void mark_label_nuses (rtx);
197 #if TARGET_SUPPORTS_WIDE_INT
198 static rtx lookup_const_wide_int (rtx);
199 #endif
200 static rtx lookup_const_double (rtx);
201 static rtx lookup_const_fixed (rtx);
202 static rtx gen_const_vector (machine_mode, int);
203 static void copy_rtx_if_shared_1 (rtx *orig);
204
205 /* Probability of the conditional branch currently proceeded by try_split. */
206 profile_probability split_branch_probability;
207 \f
208 /* Returns a hash code for X (which is a really a CONST_INT). */
209
210 hashval_t
211 const_int_hasher::hash (rtx x)
212 {
213 return (hashval_t) INTVAL (x);
214 }
215
216 /* Returns nonzero if the value represented by X (which is really a
217 CONST_INT) is the same as that given by Y (which is really a
218 HOST_WIDE_INT *). */
219
220 bool
221 const_int_hasher::equal (rtx x, HOST_WIDE_INT y)
222 {
223 return (INTVAL (x) == y);
224 }
225
226 #if TARGET_SUPPORTS_WIDE_INT
227 /* Returns a hash code for X (which is a really a CONST_WIDE_INT). */
228
229 hashval_t
230 const_wide_int_hasher::hash (rtx x)
231 {
232 int i;
233 unsigned HOST_WIDE_INT hash = 0;
234 const_rtx xr = x;
235
236 for (i = 0; i < CONST_WIDE_INT_NUNITS (xr); i++)
237 hash += CONST_WIDE_INT_ELT (xr, i);
238
239 return (hashval_t) hash;
240 }
241
242 /* Returns nonzero if the value represented by X (which is really a
243 CONST_WIDE_INT) is the same as that given by Y (which is really a
244 CONST_WIDE_INT). */
245
246 bool
247 const_wide_int_hasher::equal (rtx x, rtx y)
248 {
249 int i;
250 const_rtx xr = x;
251 const_rtx yr = y;
252 if (CONST_WIDE_INT_NUNITS (xr) != CONST_WIDE_INT_NUNITS (yr))
253 return false;
254
255 for (i = 0; i < CONST_WIDE_INT_NUNITS (xr); i++)
256 if (CONST_WIDE_INT_ELT (xr, i) != CONST_WIDE_INT_ELT (yr, i))
257 return false;
258
259 return true;
260 }
261 #endif
262
263 /* Returns a hash code for CONST_POLY_INT X. */
264
265 hashval_t
266 const_poly_int_hasher::hash (rtx x)
267 {
268 inchash::hash h;
269 h.add_int (GET_MODE (x));
270 for (unsigned int i = 0; i < NUM_POLY_INT_COEFFS; ++i)
271 h.add_wide_int (CONST_POLY_INT_COEFFS (x)[i]);
272 return h.end ();
273 }
274
275 /* Returns nonzero if CONST_POLY_INT X is an rtx representation of Y. */
276
277 bool
278 const_poly_int_hasher::equal (rtx x, const compare_type &y)
279 {
280 if (GET_MODE (x) != y.first)
281 return false;
282 for (unsigned int i = 0; i < NUM_POLY_INT_COEFFS; ++i)
283 if (CONST_POLY_INT_COEFFS (x)[i] != y.second.coeffs[i])
284 return false;
285 return true;
286 }
287
288 /* Returns a hash code for X (which is really a CONST_DOUBLE). */
289 hashval_t
290 const_double_hasher::hash (rtx x)
291 {
292 const_rtx const value = x;
293 hashval_t h;
294
295 if (TARGET_SUPPORTS_WIDE_INT == 0 && GET_MODE (value) == VOIDmode)
296 h = CONST_DOUBLE_LOW (value) ^ CONST_DOUBLE_HIGH (value);
297 else
298 {
299 h = real_hash (CONST_DOUBLE_REAL_VALUE (value));
300 /* MODE is used in the comparison, so it should be in the hash. */
301 h ^= GET_MODE (value);
302 }
303 return h;
304 }
305
306 /* Returns nonzero if the value represented by X (really a ...)
307 is the same as that represented by Y (really a ...) */
308 bool
309 const_double_hasher::equal (rtx x, rtx y)
310 {
311 const_rtx const a = x, b = y;
312
313 if (GET_MODE (a) != GET_MODE (b))
314 return 0;
315 if (TARGET_SUPPORTS_WIDE_INT == 0 && GET_MODE (a) == VOIDmode)
316 return (CONST_DOUBLE_LOW (a) == CONST_DOUBLE_LOW (b)
317 && CONST_DOUBLE_HIGH (a) == CONST_DOUBLE_HIGH (b));
318 else
319 return real_identical (CONST_DOUBLE_REAL_VALUE (a),
320 CONST_DOUBLE_REAL_VALUE (b));
321 }
322
323 /* Returns a hash code for X (which is really a CONST_FIXED). */
324
325 hashval_t
326 const_fixed_hasher::hash (rtx x)
327 {
328 const_rtx const value = x;
329 hashval_t h;
330
331 h = fixed_hash (CONST_FIXED_VALUE (value));
332 /* MODE is used in the comparison, so it should be in the hash. */
333 h ^= GET_MODE (value);
334 return h;
335 }
336
337 /* Returns nonzero if the value represented by X is the same as that
338 represented by Y. */
339
340 bool
341 const_fixed_hasher::equal (rtx x, rtx y)
342 {
343 const_rtx const a = x, b = y;
344
345 if (GET_MODE (a) != GET_MODE (b))
346 return 0;
347 return fixed_identical (CONST_FIXED_VALUE (a), CONST_FIXED_VALUE (b));
348 }
349
350 /* Return true if the given memory attributes are equal. */
351
352 bool
353 mem_attrs_eq_p (const class mem_attrs *p, const class mem_attrs *q)
354 {
355 if (p == q)
356 return true;
357 if (!p || !q)
358 return false;
359 return (p->alias == q->alias
360 && p->offset_known_p == q->offset_known_p
361 && (!p->offset_known_p || known_eq (p->offset, q->offset))
362 && p->size_known_p == q->size_known_p
363 && (!p->size_known_p || known_eq (p->size, q->size))
364 && p->align == q->align
365 && p->addrspace == q->addrspace
366 && (p->expr == q->expr
367 || (p->expr != NULL_TREE && q->expr != NULL_TREE
368 && operand_equal_p (p->expr, q->expr, 0))));
369 }
370
371 /* Set MEM's memory attributes so that they are the same as ATTRS. */
372
373 static void
374 set_mem_attrs (rtx mem, mem_attrs *attrs)
375 {
376 /* If everything is the default, we can just clear the attributes. */
377 if (mem_attrs_eq_p (attrs, mode_mem_attrs[(int) GET_MODE (mem)]))
378 {
379 MEM_ATTRS (mem) = 0;
380 return;
381 }
382
383 if (!MEM_ATTRS (mem)
384 || !mem_attrs_eq_p (attrs, MEM_ATTRS (mem)))
385 {
386 MEM_ATTRS (mem) = ggc_alloc<mem_attrs> ();
387 memcpy (MEM_ATTRS (mem), attrs, sizeof (mem_attrs));
388 }
389 }
390
391 /* Returns a hash code for X (which is a really a reg_attrs *). */
392
393 hashval_t
394 reg_attr_hasher::hash (reg_attrs *x)
395 {
396 const reg_attrs *const p = x;
397
398 inchash::hash h;
399 h.add_ptr (p->decl);
400 h.add_poly_hwi (p->offset);
401 return h.end ();
402 }
403
404 /* Returns nonzero if the value represented by X is the same as that given by
405 Y. */
406
407 bool
408 reg_attr_hasher::equal (reg_attrs *x, reg_attrs *y)
409 {
410 const reg_attrs *const p = x;
411 const reg_attrs *const q = y;
412
413 return (p->decl == q->decl && known_eq (p->offset, q->offset));
414 }
415 /* Allocate a new reg_attrs structure and insert it into the hash table if
416 one identical to it is not already in the table. We are doing this for
417 MEM of mode MODE. */
418
419 static reg_attrs *
420 get_reg_attrs (tree decl, poly_int64 offset)
421 {
422 reg_attrs attrs;
423
424 /* If everything is the default, we can just return zero. */
425 if (decl == 0 && known_eq (offset, 0))
426 return 0;
427
428 attrs.decl = decl;
429 attrs.offset = offset;
430
431 reg_attrs **slot = reg_attrs_htab->find_slot (&attrs, INSERT);
432 if (*slot == 0)
433 {
434 *slot = ggc_alloc<reg_attrs> ();
435 memcpy (*slot, &attrs, sizeof (reg_attrs));
436 }
437
438 return *slot;
439 }
440
441
442 #if !HAVE_blockage
443 /* Generate an empty ASM_INPUT, which is used to block attempts to schedule,
444 and to block register equivalences to be seen across this insn. */
445
446 rtx
447 gen_blockage (void)
448 {
449 rtx x = gen_rtx_ASM_INPUT (VOIDmode, "");
450 MEM_VOLATILE_P (x) = true;
451 return x;
452 }
453 #endif
454
455
456 /* Set the mode and register number of X to MODE and REGNO. */
457
458 void
459 set_mode_and_regno (rtx x, machine_mode mode, unsigned int regno)
460 {
461 unsigned int nregs = (HARD_REGISTER_NUM_P (regno)
462 ? hard_regno_nregs (regno, mode)
463 : 1);
464 PUT_MODE_RAW (x, mode);
465 set_regno_raw (x, regno, nregs);
466 }
467
468 /* Initialize a fresh REG rtx with mode MODE and register REGNO. */
469
470 rtx
471 init_raw_REG (rtx x, machine_mode mode, unsigned int regno)
472 {
473 set_mode_and_regno (x, mode, regno);
474 REG_ATTRS (x) = NULL;
475 ORIGINAL_REGNO (x) = regno;
476 return x;
477 }
478
479 /* Generate a new REG rtx. Make sure ORIGINAL_REGNO is set properly, and
480 don't attempt to share with the various global pieces of rtl (such as
481 frame_pointer_rtx). */
482
483 rtx
484 gen_raw_REG (machine_mode mode, unsigned int regno)
485 {
486 rtx x = rtx_alloc (REG MEM_STAT_INFO);
487 init_raw_REG (x, mode, regno);
488 return x;
489 }
490
491 /* There are some RTL codes that require special attention; the generation
492 functions do the raw handling. If you add to this list, modify
493 special_rtx in gengenrtl.c as well. */
494
495 rtx_expr_list *
496 gen_rtx_EXPR_LIST (machine_mode mode, rtx expr, rtx expr_list)
497 {
498 return as_a <rtx_expr_list *> (gen_rtx_fmt_ee (EXPR_LIST, mode, expr,
499 expr_list));
500 }
501
502 rtx_insn_list *
503 gen_rtx_INSN_LIST (machine_mode mode, rtx insn, rtx insn_list)
504 {
505 return as_a <rtx_insn_list *> (gen_rtx_fmt_ue (INSN_LIST, mode, insn,
506 insn_list));
507 }
508
509 rtx_insn *
510 gen_rtx_INSN (machine_mode mode, rtx_insn *prev_insn, rtx_insn *next_insn,
511 basic_block bb, rtx pattern, int location, int code,
512 rtx reg_notes)
513 {
514 return as_a <rtx_insn *> (gen_rtx_fmt_uuBeiie (INSN, mode,
515 prev_insn, next_insn,
516 bb, pattern, location, code,
517 reg_notes));
518 }
519
520 rtx
521 gen_rtx_CONST_INT (machine_mode mode ATTRIBUTE_UNUSED, HOST_WIDE_INT arg)
522 {
523 if (arg >= - MAX_SAVED_CONST_INT && arg <= MAX_SAVED_CONST_INT)
524 return const_int_rtx[arg + MAX_SAVED_CONST_INT];
525
526 #if STORE_FLAG_VALUE != 1 && STORE_FLAG_VALUE != -1
527 if (const_true_rtx && arg == STORE_FLAG_VALUE)
528 return const_true_rtx;
529 #endif
530
531 /* Look up the CONST_INT in the hash table. */
532 rtx *slot = const_int_htab->find_slot_with_hash (arg, (hashval_t) arg,
533 INSERT);
534 if (*slot == 0)
535 *slot = gen_rtx_raw_CONST_INT (VOIDmode, arg);
536
537 return *slot;
538 }
539
540 rtx
541 gen_int_mode (poly_int64 c, machine_mode mode)
542 {
543 c = trunc_int_for_mode (c, mode);
544 if (c.is_constant ())
545 return GEN_INT (c.coeffs[0]);
546 unsigned int prec = GET_MODE_PRECISION (as_a <scalar_mode> (mode));
547 return immed_wide_int_const (poly_wide_int::from (c, prec, SIGNED), mode);
548 }
549
550 /* CONST_DOUBLEs might be created from pairs of integers, or from
551 REAL_VALUE_TYPEs. Also, their length is known only at run time,
552 so we cannot use gen_rtx_raw_CONST_DOUBLE. */
553
554 /* Determine whether REAL, a CONST_DOUBLE, already exists in the
555 hash table. If so, return its counterpart; otherwise add it
556 to the hash table and return it. */
557 static rtx
558 lookup_const_double (rtx real)
559 {
560 rtx *slot = const_double_htab->find_slot (real, INSERT);
561 if (*slot == 0)
562 *slot = real;
563
564 return *slot;
565 }
566
567 /* Return a CONST_DOUBLE rtx for a floating-point value specified by
568 VALUE in mode MODE. */
569 rtx
570 const_double_from_real_value (REAL_VALUE_TYPE value, machine_mode mode)
571 {
572 rtx real = rtx_alloc (CONST_DOUBLE);
573 PUT_MODE (real, mode);
574
575 real->u.rv = value;
576
577 return lookup_const_double (real);
578 }
579
580 /* Determine whether FIXED, a CONST_FIXED, already exists in the
581 hash table. If so, return its counterpart; otherwise add it
582 to the hash table and return it. */
583
584 static rtx
585 lookup_const_fixed (rtx fixed)
586 {
587 rtx *slot = const_fixed_htab->find_slot (fixed, INSERT);
588 if (*slot == 0)
589 *slot = fixed;
590
591 return *slot;
592 }
593
594 /* Return a CONST_FIXED rtx for a fixed-point value specified by
595 VALUE in mode MODE. */
596
597 rtx
598 const_fixed_from_fixed_value (FIXED_VALUE_TYPE value, machine_mode mode)
599 {
600 rtx fixed = rtx_alloc (CONST_FIXED);
601 PUT_MODE (fixed, mode);
602
603 fixed->u.fv = value;
604
605 return lookup_const_fixed (fixed);
606 }
607
608 #if TARGET_SUPPORTS_WIDE_INT == 0
609 /* Constructs double_int from rtx CST. */
610
611 double_int
612 rtx_to_double_int (const_rtx cst)
613 {
614 double_int r;
615
616 if (CONST_INT_P (cst))
617 r = double_int::from_shwi (INTVAL (cst));
618 else if (CONST_DOUBLE_AS_INT_P (cst))
619 {
620 r.low = CONST_DOUBLE_LOW (cst);
621 r.high = CONST_DOUBLE_HIGH (cst);
622 }
623 else
624 gcc_unreachable ();
625
626 return r;
627 }
628 #endif
629
630 #if TARGET_SUPPORTS_WIDE_INT
631 /* Determine whether CONST_WIDE_INT WINT already exists in the hash table.
632 If so, return its counterpart; otherwise add it to the hash table and
633 return it. */
634
635 static rtx
636 lookup_const_wide_int (rtx wint)
637 {
638 rtx *slot = const_wide_int_htab->find_slot (wint, INSERT);
639 if (*slot == 0)
640 *slot = wint;
641
642 return *slot;
643 }
644 #endif
645
646 /* Return an rtx constant for V, given that the constant has mode MODE.
647 The returned rtx will be a CONST_INT if V fits, otherwise it will be
648 a CONST_DOUBLE (if !TARGET_SUPPORTS_WIDE_INT) or a CONST_WIDE_INT
649 (if TARGET_SUPPORTS_WIDE_INT). */
650
651 static rtx
652 immed_wide_int_const_1 (const wide_int_ref &v, machine_mode mode)
653 {
654 unsigned int len = v.get_len ();
655 /* Not scalar_int_mode because we also allow pointer bound modes. */
656 unsigned int prec = GET_MODE_PRECISION (as_a <scalar_mode> (mode));
657
658 /* Allow truncation but not extension since we do not know if the
659 number is signed or unsigned. */
660 gcc_assert (prec <= v.get_precision ());
661
662 if (len < 2 || prec <= HOST_BITS_PER_WIDE_INT)
663 return gen_int_mode (v.elt (0), mode);
664
665 #if TARGET_SUPPORTS_WIDE_INT
666 {
667 unsigned int i;
668 rtx value;
669 unsigned int blocks_needed
670 = (prec + HOST_BITS_PER_WIDE_INT - 1) / HOST_BITS_PER_WIDE_INT;
671
672 if (len > blocks_needed)
673 len = blocks_needed;
674
675 value = const_wide_int_alloc (len);
676
677 /* It is so tempting to just put the mode in here. Must control
678 myself ... */
679 PUT_MODE (value, VOIDmode);
680 CWI_PUT_NUM_ELEM (value, len);
681
682 for (i = 0; i < len; i++)
683 CONST_WIDE_INT_ELT (value, i) = v.elt (i);
684
685 return lookup_const_wide_int (value);
686 }
687 #else
688 return immed_double_const (v.elt (0), v.elt (1), mode);
689 #endif
690 }
691
692 #if TARGET_SUPPORTS_WIDE_INT == 0
693 /* Return a CONST_DOUBLE or CONST_INT for a value specified as a pair
694 of ints: I0 is the low-order word and I1 is the high-order word.
695 For values that are larger than HOST_BITS_PER_DOUBLE_INT, the
696 implied upper bits are copies of the high bit of i1. The value
697 itself is neither signed nor unsigned. Do not use this routine for
698 non-integer modes; convert to REAL_VALUE_TYPE and use
699 const_double_from_real_value. */
700
701 rtx
702 immed_double_const (HOST_WIDE_INT i0, HOST_WIDE_INT i1, machine_mode mode)
703 {
704 rtx value;
705 unsigned int i;
706
707 /* There are the following cases (note that there are no modes with
708 HOST_BITS_PER_WIDE_INT < GET_MODE_BITSIZE (mode) < HOST_BITS_PER_DOUBLE_INT):
709
710 1) If GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT, then we use
711 gen_int_mode.
712 2) If the value of the integer fits into HOST_WIDE_INT anyway
713 (i.e., i1 consists only from copies of the sign bit, and sign
714 of i0 and i1 are the same), then we return a CONST_INT for i0.
715 3) Otherwise, we create a CONST_DOUBLE for i0 and i1. */
716 scalar_mode smode;
717 if (is_a <scalar_mode> (mode, &smode)
718 && GET_MODE_BITSIZE (smode) <= HOST_BITS_PER_WIDE_INT)
719 return gen_int_mode (i0, mode);
720
721 /* If this integer fits in one word, return a CONST_INT. */
722 if ((i1 == 0 && i0 >= 0) || (i1 == ~0 && i0 < 0))
723 return GEN_INT (i0);
724
725 /* We use VOIDmode for integers. */
726 value = rtx_alloc (CONST_DOUBLE);
727 PUT_MODE (value, VOIDmode);
728
729 CONST_DOUBLE_LOW (value) = i0;
730 CONST_DOUBLE_HIGH (value) = i1;
731
732 for (i = 2; i < (sizeof CONST_DOUBLE_FORMAT - 1); i++)
733 XWINT (value, i) = 0;
734
735 return lookup_const_double (value);
736 }
737 #endif
738
739 /* Return an rtx representation of C in mode MODE. */
740
741 rtx
742 immed_wide_int_const (const poly_wide_int_ref &c, machine_mode mode)
743 {
744 if (c.is_constant ())
745 return immed_wide_int_const_1 (c.coeffs[0], mode);
746
747 /* Not scalar_int_mode because we also allow pointer bound modes. */
748 unsigned int prec = GET_MODE_PRECISION (as_a <scalar_mode> (mode));
749
750 /* Allow truncation but not extension since we do not know if the
751 number is signed or unsigned. */
752 gcc_assert (prec <= c.coeffs[0].get_precision ());
753 poly_wide_int newc = poly_wide_int::from (c, prec, SIGNED);
754
755 /* See whether we already have an rtx for this constant. */
756 inchash::hash h;
757 h.add_int (mode);
758 for (unsigned int i = 0; i < NUM_POLY_INT_COEFFS; ++i)
759 h.add_wide_int (newc.coeffs[i]);
760 const_poly_int_hasher::compare_type typed_value (mode, newc);
761 rtx *slot = const_poly_int_htab->find_slot_with_hash (typed_value,
762 h.end (), INSERT);
763 rtx x = *slot;
764 if (x)
765 return x;
766
767 /* Create a new rtx. There's a choice to be made here between installing
768 the actual mode of the rtx or leaving it as VOIDmode (for consistency
769 with CONST_INT). In practice the handling of the codes is different
770 enough that we get no benefit from using VOIDmode, and various places
771 assume that VOIDmode implies CONST_INT. Using the real mode seems like
772 the right long-term direction anyway. */
773 typedef trailing_wide_ints<NUM_POLY_INT_COEFFS> twi;
774 size_t extra_size = twi::extra_size (prec);
775 x = rtx_alloc_v (CONST_POLY_INT,
776 sizeof (struct const_poly_int_def) + extra_size);
777 PUT_MODE (x, mode);
778 CONST_POLY_INT_COEFFS (x).set_precision (prec);
779 for (unsigned int i = 0; i < NUM_POLY_INT_COEFFS; ++i)
780 CONST_POLY_INT_COEFFS (x)[i] = newc.coeffs[i];
781
782 *slot = x;
783 return x;
784 }
785
786 rtx
787 gen_rtx_REG (machine_mode mode, unsigned int regno)
788 {
789 /* In case the MD file explicitly references the frame pointer, have
790 all such references point to the same frame pointer. This is
791 used during frame pointer elimination to distinguish the explicit
792 references to these registers from pseudos that happened to be
793 assigned to them.
794
795 If we have eliminated the frame pointer or arg pointer, we will
796 be using it as a normal register, for example as a spill
797 register. In such cases, we might be accessing it in a mode that
798 is not Pmode and therefore cannot use the pre-allocated rtx.
799
800 Also don't do this when we are making new REGs in reload, since
801 we don't want to get confused with the real pointers. */
802
803 if (mode == Pmode && !reload_in_progress && !lra_in_progress)
804 {
805 if (regno == FRAME_POINTER_REGNUM
806 && (!reload_completed || frame_pointer_needed))
807 return frame_pointer_rtx;
808
809 if (!HARD_FRAME_POINTER_IS_FRAME_POINTER
810 && regno == HARD_FRAME_POINTER_REGNUM
811 && (!reload_completed || frame_pointer_needed))
812 return hard_frame_pointer_rtx;
813 #if !HARD_FRAME_POINTER_IS_ARG_POINTER
814 if (FRAME_POINTER_REGNUM != ARG_POINTER_REGNUM
815 && regno == ARG_POINTER_REGNUM)
816 return arg_pointer_rtx;
817 #endif
818 #ifdef RETURN_ADDRESS_POINTER_REGNUM
819 if (regno == RETURN_ADDRESS_POINTER_REGNUM)
820 return return_address_pointer_rtx;
821 #endif
822 if (regno == (unsigned) PIC_OFFSET_TABLE_REGNUM
823 && PIC_OFFSET_TABLE_REGNUM != INVALID_REGNUM
824 && fixed_regs[PIC_OFFSET_TABLE_REGNUM])
825 return pic_offset_table_rtx;
826 if (regno == STACK_POINTER_REGNUM)
827 return stack_pointer_rtx;
828 }
829
830 #if 0
831 /* If the per-function register table has been set up, try to re-use
832 an existing entry in that table to avoid useless generation of RTL.
833
834 This code is disabled for now until we can fix the various backends
835 which depend on having non-shared hard registers in some cases. Long
836 term we want to re-enable this code as it can significantly cut down
837 on the amount of useless RTL that gets generated.
838
839 We'll also need to fix some code that runs after reload that wants to
840 set ORIGINAL_REGNO. */
841
842 if (cfun
843 && cfun->emit
844 && regno_reg_rtx
845 && regno < FIRST_PSEUDO_REGISTER
846 && reg_raw_mode[regno] == mode)
847 return regno_reg_rtx[regno];
848 #endif
849
850 return gen_raw_REG (mode, regno);
851 }
852
853 rtx
854 gen_rtx_MEM (machine_mode mode, rtx addr)
855 {
856 rtx rt = gen_rtx_raw_MEM (mode, addr);
857
858 /* This field is not cleared by the mere allocation of the rtx, so
859 we clear it here. */
860 MEM_ATTRS (rt) = 0;
861
862 return rt;
863 }
864
865 /* Generate a memory referring to non-trapping constant memory. */
866
867 rtx
868 gen_const_mem (machine_mode mode, rtx addr)
869 {
870 rtx mem = gen_rtx_MEM (mode, addr);
871 MEM_READONLY_P (mem) = 1;
872 MEM_NOTRAP_P (mem) = 1;
873 return mem;
874 }
875
876 /* Generate a MEM referring to fixed portions of the frame, e.g., register
877 save areas. */
878
879 rtx
880 gen_frame_mem (machine_mode mode, rtx addr)
881 {
882 rtx mem = gen_rtx_MEM (mode, addr);
883 MEM_NOTRAP_P (mem) = 1;
884 set_mem_alias_set (mem, get_frame_alias_set ());
885 return mem;
886 }
887
888 /* Generate a MEM referring to a temporary use of the stack, not part
889 of the fixed stack frame. For example, something which is pushed
890 by a target splitter. */
891 rtx
892 gen_tmp_stack_mem (machine_mode mode, rtx addr)
893 {
894 rtx mem = gen_rtx_MEM (mode, addr);
895 MEM_NOTRAP_P (mem) = 1;
896 if (!cfun->calls_alloca)
897 set_mem_alias_set (mem, get_frame_alias_set ());
898 return mem;
899 }
900
901 /* We want to create (subreg:OMODE (obj:IMODE) OFFSET). Return true if
902 this construct would be valid, and false otherwise. */
903
904 bool
905 validate_subreg (machine_mode omode, machine_mode imode,
906 const_rtx reg, poly_uint64 offset)
907 {
908 poly_uint64 isize = GET_MODE_SIZE (imode);
909 poly_uint64 osize = GET_MODE_SIZE (omode);
910
911 /* The sizes must be ordered, so that we know whether the subreg
912 is partial, paradoxical or complete. */
913 if (!ordered_p (isize, osize))
914 return false;
915
916 /* All subregs must be aligned. */
917 if (!multiple_p (offset, osize))
918 return false;
919
920 /* The subreg offset cannot be outside the inner object. */
921 if (maybe_ge (offset, isize))
922 return false;
923
924 poly_uint64 regsize = REGMODE_NATURAL_SIZE (imode);
925
926 /* ??? This should not be here. Temporarily continue to allow word_mode
927 subregs of anything. The most common offender is (subreg:SI (reg:DF)).
928 Generally, backends are doing something sketchy but it'll take time to
929 fix them all. */
930 if (omode == word_mode)
931 ;
932 /* ??? Similarly, e.g. with (subreg:DF (reg:TI)). Though store_bit_field
933 is the culprit here, and not the backends. */
934 else if (known_ge (osize, regsize) && known_ge (isize, osize))
935 ;
936 /* Allow component subregs of complex and vector. Though given the below
937 extraction rules, it's not always clear what that means. */
938 else if ((COMPLEX_MODE_P (imode) || VECTOR_MODE_P (imode))
939 && GET_MODE_INNER (imode) == omode)
940 ;
941 /* ??? x86 sse code makes heavy use of *paradoxical* vector subregs,
942 i.e. (subreg:V4SF (reg:SF) 0) or (subreg:V4SF (reg:V2SF) 0). This
943 surely isn't the cleanest way to represent this. It's questionable
944 if this ought to be represented at all -- why can't this all be hidden
945 in post-reload splitters that make arbitrarily mode changes to the
946 registers themselves. */
947 else if (VECTOR_MODE_P (omode)
948 && GET_MODE_INNER (omode) == GET_MODE_INNER (imode))
949 ;
950 /* Subregs involving floating point modes are not allowed to
951 change size. Therefore (subreg:DI (reg:DF) 0) is fine, but
952 (subreg:SI (reg:DF) 0) isn't. */
953 else if (FLOAT_MODE_P (imode) || FLOAT_MODE_P (omode))
954 {
955 if (! (known_eq (isize, osize)
956 /* LRA can use subreg to store a floating point value in
957 an integer mode. Although the floating point and the
958 integer modes need the same number of hard registers,
959 the size of floating point mode can be less than the
960 integer mode. LRA also uses subregs for a register
961 should be used in different mode in on insn. */
962 || lra_in_progress))
963 return false;
964 }
965
966 /* Paradoxical subregs must have offset zero. */
967 if (maybe_gt (osize, isize))
968 return known_eq (offset, 0U);
969
970 /* This is a normal subreg. Verify that the offset is representable. */
971
972 /* For hard registers, we already have most of these rules collected in
973 subreg_offset_representable_p. */
974 if (reg && REG_P (reg) && HARD_REGISTER_P (reg))
975 {
976 unsigned int regno = REGNO (reg);
977
978 if ((COMPLEX_MODE_P (imode) || VECTOR_MODE_P (imode))
979 && GET_MODE_INNER (imode) == omode)
980 ;
981 else if (!REG_CAN_CHANGE_MODE_P (regno, imode, omode))
982 return false;
983
984 return subreg_offset_representable_p (regno, imode, offset, omode);
985 }
986
987 /* The outer size must be ordered wrt the register size, otherwise
988 we wouldn't know at compile time how many registers the outer
989 mode occupies. */
990 if (!ordered_p (osize, regsize))
991 return false;
992
993 /* For pseudo registers, we want most of the same checks. Namely:
994
995 Assume that the pseudo register will be allocated to hard registers
996 that can hold REGSIZE bytes each. If OSIZE is not a multiple of REGSIZE,
997 the remainder must correspond to the lowpart of the containing hard
998 register. If BYTES_BIG_ENDIAN, the lowpart is at the highest offset,
999 otherwise it is at the lowest offset.
1000
1001 Given that we've already checked the mode and offset alignment,
1002 we only have to check subblock subregs here. */
1003 if (maybe_lt (osize, regsize)
1004 && ! (lra_in_progress && (FLOAT_MODE_P (imode) || FLOAT_MODE_P (omode))))
1005 {
1006 /* It is invalid for the target to pick a register size for a mode
1007 that isn't ordered wrt to the size of that mode. */
1008 poly_uint64 block_size = ordered_min (isize, regsize);
1009 unsigned int start_reg;
1010 poly_uint64 offset_within_reg;
1011 if (!can_div_trunc_p (offset, block_size, &start_reg, &offset_within_reg)
1012 || (BYTES_BIG_ENDIAN
1013 ? maybe_ne (offset_within_reg, block_size - osize)
1014 : maybe_ne (offset_within_reg, 0U)))
1015 return false;
1016 }
1017 return true;
1018 }
1019
1020 rtx
1021 gen_rtx_SUBREG (machine_mode mode, rtx reg, poly_uint64 offset)
1022 {
1023 gcc_assert (validate_subreg (mode, GET_MODE (reg), reg, offset));
1024 return gen_rtx_raw_SUBREG (mode, reg, offset);
1025 }
1026
1027 /* Generate a SUBREG representing the least-significant part of REG if MODE
1028 is smaller than mode of REG, otherwise paradoxical SUBREG. */
1029
1030 rtx
1031 gen_lowpart_SUBREG (machine_mode mode, rtx reg)
1032 {
1033 machine_mode inmode;
1034
1035 inmode = GET_MODE (reg);
1036 if (inmode == VOIDmode)
1037 inmode = mode;
1038 return gen_rtx_SUBREG (mode, reg,
1039 subreg_lowpart_offset (mode, inmode));
1040 }
1041
1042 rtx
1043 gen_rtx_VAR_LOCATION (machine_mode mode, tree decl, rtx loc,
1044 enum var_init_status status)
1045 {
1046 rtx x = gen_rtx_fmt_te (VAR_LOCATION, mode, decl, loc);
1047 PAT_VAR_LOCATION_STATUS (x) = status;
1048 return x;
1049 }
1050 \f
1051
1052 /* Create an rtvec and stores within it the RTXen passed in the arguments. */
1053
1054 rtvec
1055 gen_rtvec (int n, ...)
1056 {
1057 int i;
1058 rtvec rt_val;
1059 va_list p;
1060
1061 va_start (p, n);
1062
1063 /* Don't allocate an empty rtvec... */
1064 if (n == 0)
1065 {
1066 va_end (p);
1067 return NULL_RTVEC;
1068 }
1069
1070 rt_val = rtvec_alloc (n);
1071
1072 for (i = 0; i < n; i++)
1073 rt_val->elem[i] = va_arg (p, rtx);
1074
1075 va_end (p);
1076 return rt_val;
1077 }
1078
1079 rtvec
1080 gen_rtvec_v (int n, rtx *argp)
1081 {
1082 int i;
1083 rtvec rt_val;
1084
1085 /* Don't allocate an empty rtvec... */
1086 if (n == 0)
1087 return NULL_RTVEC;
1088
1089 rt_val = rtvec_alloc (n);
1090
1091 for (i = 0; i < n; i++)
1092 rt_val->elem[i] = *argp++;
1093
1094 return rt_val;
1095 }
1096
1097 rtvec
1098 gen_rtvec_v (int n, rtx_insn **argp)
1099 {
1100 int i;
1101 rtvec rt_val;
1102
1103 /* Don't allocate an empty rtvec... */
1104 if (n == 0)
1105 return NULL_RTVEC;
1106
1107 rt_val = rtvec_alloc (n);
1108
1109 for (i = 0; i < n; i++)
1110 rt_val->elem[i] = *argp++;
1111
1112 return rt_val;
1113 }
1114
1115 \f
1116 /* Return the number of bytes between the start of an OUTER_MODE
1117 in-memory value and the start of an INNER_MODE in-memory value,
1118 given that the former is a lowpart of the latter. It may be a
1119 paradoxical lowpart, in which case the offset will be negative
1120 on big-endian targets. */
1121
1122 poly_int64
1123 byte_lowpart_offset (machine_mode outer_mode,
1124 machine_mode inner_mode)
1125 {
1126 if (paradoxical_subreg_p (outer_mode, inner_mode))
1127 return -subreg_lowpart_offset (inner_mode, outer_mode);
1128 else
1129 return subreg_lowpart_offset (outer_mode, inner_mode);
1130 }
1131
1132 /* Return the offset of (subreg:OUTER_MODE (mem:INNER_MODE X) OFFSET)
1133 from address X. For paradoxical big-endian subregs this is a
1134 negative value, otherwise it's the same as OFFSET. */
1135
1136 poly_int64
1137 subreg_memory_offset (machine_mode outer_mode, machine_mode inner_mode,
1138 poly_uint64 offset)
1139 {
1140 if (paradoxical_subreg_p (outer_mode, inner_mode))
1141 {
1142 gcc_assert (known_eq (offset, 0U));
1143 return -subreg_lowpart_offset (inner_mode, outer_mode);
1144 }
1145 return offset;
1146 }
1147
1148 /* As above, but return the offset that existing subreg X would have
1149 if SUBREG_REG (X) were stored in memory. The only significant thing
1150 about the current SUBREG_REG is its mode. */
1151
1152 poly_int64
1153 subreg_memory_offset (const_rtx x)
1154 {
1155 return subreg_memory_offset (GET_MODE (x), GET_MODE (SUBREG_REG (x)),
1156 SUBREG_BYTE (x));
1157 }
1158 \f
1159 /* Generate a REG rtx for a new pseudo register of mode MODE.
1160 This pseudo is assigned the next sequential register number. */
1161
1162 rtx
1163 gen_reg_rtx (machine_mode mode)
1164 {
1165 rtx val;
1166 unsigned int align = GET_MODE_ALIGNMENT (mode);
1167
1168 gcc_assert (can_create_pseudo_p ());
1169
1170 /* If a virtual register with bigger mode alignment is generated,
1171 increase stack alignment estimation because it might be spilled
1172 to stack later. */
1173 if (SUPPORTS_STACK_ALIGNMENT
1174 && crtl->stack_alignment_estimated < align
1175 && !crtl->stack_realign_processed)
1176 {
1177 unsigned int min_align = MINIMUM_ALIGNMENT (NULL, mode, align);
1178 if (crtl->stack_alignment_estimated < min_align)
1179 crtl->stack_alignment_estimated = min_align;
1180 }
1181
1182 if (generating_concat_p
1183 && (GET_MODE_CLASS (mode) == MODE_COMPLEX_FLOAT
1184 || GET_MODE_CLASS (mode) == MODE_COMPLEX_INT))
1185 {
1186 /* For complex modes, don't make a single pseudo.
1187 Instead, make a CONCAT of two pseudos.
1188 This allows noncontiguous allocation of the real and imaginary parts,
1189 which makes much better code. Besides, allocating DCmode
1190 pseudos overstrains reload on some machines like the 386. */
1191 rtx realpart, imagpart;
1192 machine_mode partmode = GET_MODE_INNER (mode);
1193
1194 realpart = gen_reg_rtx (partmode);
1195 imagpart = gen_reg_rtx (partmode);
1196 return gen_rtx_CONCAT (mode, realpart, imagpart);
1197 }
1198
1199 /* Do not call gen_reg_rtx with uninitialized crtl. */
1200 gcc_assert (crtl->emit.regno_pointer_align_length);
1201
1202 crtl->emit.ensure_regno_capacity ();
1203 gcc_assert (reg_rtx_no < crtl->emit.regno_pointer_align_length);
1204
1205 val = gen_raw_REG (mode, reg_rtx_no);
1206 regno_reg_rtx[reg_rtx_no++] = val;
1207 return val;
1208 }
1209
1210 /* Make sure m_regno_pointer_align, and regno_reg_rtx are large
1211 enough to have elements in the range 0 <= idx <= reg_rtx_no. */
1212
1213 void
1214 emit_status::ensure_regno_capacity ()
1215 {
1216 int old_size = regno_pointer_align_length;
1217
1218 if (reg_rtx_no < old_size)
1219 return;
1220
1221 int new_size = old_size * 2;
1222 while (reg_rtx_no >= new_size)
1223 new_size *= 2;
1224
1225 char *tmp = XRESIZEVEC (char, regno_pointer_align, new_size);
1226 memset (tmp + old_size, 0, new_size - old_size);
1227 regno_pointer_align = (unsigned char *) tmp;
1228
1229 rtx *new1 = GGC_RESIZEVEC (rtx, regno_reg_rtx, new_size);
1230 memset (new1 + old_size, 0, (new_size - old_size) * sizeof (rtx));
1231 regno_reg_rtx = new1;
1232
1233 crtl->emit.regno_pointer_align_length = new_size;
1234 }
1235
1236 /* Return TRUE if REG is a PARM_DECL, FALSE otherwise. */
1237
1238 bool
1239 reg_is_parm_p (rtx reg)
1240 {
1241 tree decl;
1242
1243 gcc_assert (REG_P (reg));
1244 decl = REG_EXPR (reg);
1245 return (decl && TREE_CODE (decl) == PARM_DECL);
1246 }
1247
1248 /* Update NEW with the same attributes as REG, but with OFFSET added
1249 to the REG_OFFSET. */
1250
1251 static void
1252 update_reg_offset (rtx new_rtx, rtx reg, poly_int64 offset)
1253 {
1254 REG_ATTRS (new_rtx) = get_reg_attrs (REG_EXPR (reg),
1255 REG_OFFSET (reg) + offset);
1256 }
1257
1258 /* Generate a register with same attributes as REG, but with OFFSET
1259 added to the REG_OFFSET. */
1260
1261 rtx
1262 gen_rtx_REG_offset (rtx reg, machine_mode mode, unsigned int regno,
1263 poly_int64 offset)
1264 {
1265 rtx new_rtx = gen_rtx_REG (mode, regno);
1266
1267 update_reg_offset (new_rtx, reg, offset);
1268 return new_rtx;
1269 }
1270
1271 /* Generate a new pseudo-register with the same attributes as REG, but
1272 with OFFSET added to the REG_OFFSET. */
1273
1274 rtx
1275 gen_reg_rtx_offset (rtx reg, machine_mode mode, int offset)
1276 {
1277 rtx new_rtx = gen_reg_rtx (mode);
1278
1279 update_reg_offset (new_rtx, reg, offset);
1280 return new_rtx;
1281 }
1282
1283 /* Adjust REG in-place so that it has mode MODE. It is assumed that the
1284 new register is a (possibly paradoxical) lowpart of the old one. */
1285
1286 void
1287 adjust_reg_mode (rtx reg, machine_mode mode)
1288 {
1289 update_reg_offset (reg, reg, byte_lowpart_offset (mode, GET_MODE (reg)));
1290 PUT_MODE (reg, mode);
1291 }
1292
1293 /* Copy REG's attributes from X, if X has any attributes. If REG and X
1294 have different modes, REG is a (possibly paradoxical) lowpart of X. */
1295
1296 void
1297 set_reg_attrs_from_value (rtx reg, rtx x)
1298 {
1299 poly_int64 offset;
1300 bool can_be_reg_pointer = true;
1301
1302 /* Don't call mark_reg_pointer for incompatible pointer sign
1303 extension. */
1304 while (GET_CODE (x) == SIGN_EXTEND
1305 || GET_CODE (x) == ZERO_EXTEND
1306 || GET_CODE (x) == TRUNCATE
1307 || (GET_CODE (x) == SUBREG && subreg_lowpart_p (x)))
1308 {
1309 #if defined(POINTERS_EXTEND_UNSIGNED)
1310 if (((GET_CODE (x) == SIGN_EXTEND && POINTERS_EXTEND_UNSIGNED)
1311 || (GET_CODE (x) == ZERO_EXTEND && ! POINTERS_EXTEND_UNSIGNED)
1312 || (paradoxical_subreg_p (x)
1313 && ! (SUBREG_PROMOTED_VAR_P (x)
1314 && SUBREG_CHECK_PROMOTED_SIGN (x,
1315 POINTERS_EXTEND_UNSIGNED))))
1316 && !targetm.have_ptr_extend ())
1317 can_be_reg_pointer = false;
1318 #endif
1319 x = XEXP (x, 0);
1320 }
1321
1322 /* Hard registers can be reused for multiple purposes within the same
1323 function, so setting REG_ATTRS, REG_POINTER and REG_POINTER_ALIGN
1324 on them is wrong. */
1325 if (HARD_REGISTER_P (reg))
1326 return;
1327
1328 offset = byte_lowpart_offset (GET_MODE (reg), GET_MODE (x));
1329 if (MEM_P (x))
1330 {
1331 if (MEM_OFFSET_KNOWN_P (x))
1332 REG_ATTRS (reg) = get_reg_attrs (MEM_EXPR (x),
1333 MEM_OFFSET (x) + offset);
1334 if (can_be_reg_pointer && MEM_POINTER (x))
1335 mark_reg_pointer (reg, 0);
1336 }
1337 else if (REG_P (x))
1338 {
1339 if (REG_ATTRS (x))
1340 update_reg_offset (reg, x, offset);
1341 if (can_be_reg_pointer && REG_POINTER (x))
1342 mark_reg_pointer (reg, REGNO_POINTER_ALIGN (REGNO (x)));
1343 }
1344 }
1345
1346 /* Generate a REG rtx for a new pseudo register, copying the mode
1347 and attributes from X. */
1348
1349 rtx
1350 gen_reg_rtx_and_attrs (rtx x)
1351 {
1352 rtx reg = gen_reg_rtx (GET_MODE (x));
1353 set_reg_attrs_from_value (reg, x);
1354 return reg;
1355 }
1356
1357 /* Set the register attributes for registers contained in PARM_RTX.
1358 Use needed values from memory attributes of MEM. */
1359
1360 void
1361 set_reg_attrs_for_parm (rtx parm_rtx, rtx mem)
1362 {
1363 if (REG_P (parm_rtx))
1364 set_reg_attrs_from_value (parm_rtx, mem);
1365 else if (GET_CODE (parm_rtx) == PARALLEL)
1366 {
1367 /* Check for a NULL entry in the first slot, used to indicate that the
1368 parameter goes both on the stack and in registers. */
1369 int i = XEXP (XVECEXP (parm_rtx, 0, 0), 0) ? 0 : 1;
1370 for (; i < XVECLEN (parm_rtx, 0); i++)
1371 {
1372 rtx x = XVECEXP (parm_rtx, 0, i);
1373 if (REG_P (XEXP (x, 0)))
1374 REG_ATTRS (XEXP (x, 0))
1375 = get_reg_attrs (MEM_EXPR (mem),
1376 INTVAL (XEXP (x, 1)));
1377 }
1378 }
1379 }
1380
1381 /* Set the REG_ATTRS for registers in value X, given that X represents
1382 decl T. */
1383
1384 void
1385 set_reg_attrs_for_decl_rtl (tree t, rtx x)
1386 {
1387 if (!t)
1388 return;
1389 tree tdecl = t;
1390 if (GET_CODE (x) == SUBREG)
1391 {
1392 gcc_assert (subreg_lowpart_p (x));
1393 x = SUBREG_REG (x);
1394 }
1395 if (REG_P (x))
1396 REG_ATTRS (x)
1397 = get_reg_attrs (t, byte_lowpart_offset (GET_MODE (x),
1398 DECL_P (tdecl)
1399 ? DECL_MODE (tdecl)
1400 : TYPE_MODE (TREE_TYPE (tdecl))));
1401 if (GET_CODE (x) == CONCAT)
1402 {
1403 if (REG_P (XEXP (x, 0)))
1404 REG_ATTRS (XEXP (x, 0)) = get_reg_attrs (t, 0);
1405 if (REG_P (XEXP (x, 1)))
1406 REG_ATTRS (XEXP (x, 1))
1407 = get_reg_attrs (t, GET_MODE_UNIT_SIZE (GET_MODE (XEXP (x, 0))));
1408 }
1409 if (GET_CODE (x) == PARALLEL)
1410 {
1411 int i, start;
1412
1413 /* Check for a NULL entry, used to indicate that the parameter goes
1414 both on the stack and in registers. */
1415 if (XEXP (XVECEXP (x, 0, 0), 0))
1416 start = 0;
1417 else
1418 start = 1;
1419
1420 for (i = start; i < XVECLEN (x, 0); i++)
1421 {
1422 rtx y = XVECEXP (x, 0, i);
1423 if (REG_P (XEXP (y, 0)))
1424 REG_ATTRS (XEXP (y, 0)) = get_reg_attrs (t, INTVAL (XEXP (y, 1)));
1425 }
1426 }
1427 }
1428
1429 /* Assign the RTX X to declaration T. */
1430
1431 void
1432 set_decl_rtl (tree t, rtx x)
1433 {
1434 DECL_WRTL_CHECK (t)->decl_with_rtl.rtl = x;
1435 if (x)
1436 set_reg_attrs_for_decl_rtl (t, x);
1437 }
1438
1439 /* Assign the RTX X to parameter declaration T. BY_REFERENCE_P is true
1440 if the ABI requires the parameter to be passed by reference. */
1441
1442 void
1443 set_decl_incoming_rtl (tree t, rtx x, bool by_reference_p)
1444 {
1445 DECL_INCOMING_RTL (t) = x;
1446 if (x && !by_reference_p)
1447 set_reg_attrs_for_decl_rtl (t, x);
1448 }
1449
1450 /* Identify REG (which may be a CONCAT) as a user register. */
1451
1452 void
1453 mark_user_reg (rtx reg)
1454 {
1455 if (GET_CODE (reg) == CONCAT)
1456 {
1457 REG_USERVAR_P (XEXP (reg, 0)) = 1;
1458 REG_USERVAR_P (XEXP (reg, 1)) = 1;
1459 }
1460 else
1461 {
1462 gcc_assert (REG_P (reg));
1463 REG_USERVAR_P (reg) = 1;
1464 }
1465 }
1466
1467 /* Identify REG as a probable pointer register and show its alignment
1468 as ALIGN, if nonzero. */
1469
1470 void
1471 mark_reg_pointer (rtx reg, int align)
1472 {
1473 if (! REG_POINTER (reg))
1474 {
1475 REG_POINTER (reg) = 1;
1476
1477 if (align)
1478 REGNO_POINTER_ALIGN (REGNO (reg)) = align;
1479 }
1480 else if (align && align < REGNO_POINTER_ALIGN (REGNO (reg)))
1481 /* We can no-longer be sure just how aligned this pointer is. */
1482 REGNO_POINTER_ALIGN (REGNO (reg)) = align;
1483 }
1484
1485 /* Return 1 plus largest pseudo reg number used in the current function. */
1486
1487 int
1488 max_reg_num (void)
1489 {
1490 return reg_rtx_no;
1491 }
1492
1493 /* Return 1 + the largest label number used so far in the current function. */
1494
1495 int
1496 max_label_num (void)
1497 {
1498 return label_num;
1499 }
1500
1501 /* Return first label number used in this function (if any were used). */
1502
1503 int
1504 get_first_label_num (void)
1505 {
1506 return first_label_num;
1507 }
1508
1509 /* If the rtx for label was created during the expansion of a nested
1510 function, then first_label_num won't include this label number.
1511 Fix this now so that array indices work later. */
1512
1513 void
1514 maybe_set_first_label_num (rtx_code_label *x)
1515 {
1516 if (CODE_LABEL_NUMBER (x) < first_label_num)
1517 first_label_num = CODE_LABEL_NUMBER (x);
1518 }
1519
1520 /* For use by the RTL function loader, when mingling with normal
1521 functions.
1522 Ensure that label_num is greater than the label num of X, to avoid
1523 duplicate labels in the generated assembler. */
1524
1525 void
1526 maybe_set_max_label_num (rtx_code_label *x)
1527 {
1528 if (CODE_LABEL_NUMBER (x) >= label_num)
1529 label_num = CODE_LABEL_NUMBER (x) + 1;
1530 }
1531
1532 \f
1533 /* Return a value representing some low-order bits of X, where the number
1534 of low-order bits is given by MODE. Note that no conversion is done
1535 between floating-point and fixed-point values, rather, the bit
1536 representation is returned.
1537
1538 This function handles the cases in common between gen_lowpart, below,
1539 and two variants in cse.c and combine.c. These are the cases that can
1540 be safely handled at all points in the compilation.
1541
1542 If this is not a case we can handle, return 0. */
1543
1544 rtx
1545 gen_lowpart_common (machine_mode mode, rtx x)
1546 {
1547 poly_uint64 msize = GET_MODE_SIZE (mode);
1548 machine_mode innermode;
1549
1550 /* Unfortunately, this routine doesn't take a parameter for the mode of X,
1551 so we have to make one up. Yuk. */
1552 innermode = GET_MODE (x);
1553 if (CONST_INT_P (x)
1554 && known_le (msize * BITS_PER_UNIT,
1555 (unsigned HOST_WIDE_INT) HOST_BITS_PER_WIDE_INT))
1556 innermode = int_mode_for_size (HOST_BITS_PER_WIDE_INT, 0).require ();
1557 else if (innermode == VOIDmode)
1558 innermode = int_mode_for_size (HOST_BITS_PER_DOUBLE_INT, 0).require ();
1559
1560 gcc_assert (innermode != VOIDmode && innermode != BLKmode);
1561
1562 if (innermode == mode)
1563 return x;
1564
1565 /* The size of the outer and inner modes must be ordered. */
1566 poly_uint64 xsize = GET_MODE_SIZE (innermode);
1567 if (!ordered_p (msize, xsize))
1568 return 0;
1569
1570 if (SCALAR_FLOAT_MODE_P (mode))
1571 {
1572 /* Don't allow paradoxical FLOAT_MODE subregs. */
1573 if (maybe_gt (msize, xsize))
1574 return 0;
1575 }
1576 else
1577 {
1578 /* MODE must occupy no more of the underlying registers than X. */
1579 poly_uint64 regsize = REGMODE_NATURAL_SIZE (innermode);
1580 unsigned int mregs, xregs;
1581 if (!can_div_away_from_zero_p (msize, regsize, &mregs)
1582 || !can_div_away_from_zero_p (xsize, regsize, &xregs)
1583 || mregs > xregs)
1584 return 0;
1585 }
1586
1587 scalar_int_mode int_mode, int_innermode, from_mode;
1588 if ((GET_CODE (x) == ZERO_EXTEND || GET_CODE (x) == SIGN_EXTEND)
1589 && is_a <scalar_int_mode> (mode, &int_mode)
1590 && is_a <scalar_int_mode> (innermode, &int_innermode)
1591 && is_a <scalar_int_mode> (GET_MODE (XEXP (x, 0)), &from_mode))
1592 {
1593 /* If we are getting the low-order part of something that has been
1594 sign- or zero-extended, we can either just use the object being
1595 extended or make a narrower extension. If we want an even smaller
1596 piece than the size of the object being extended, call ourselves
1597 recursively.
1598
1599 This case is used mostly by combine and cse. */
1600
1601 if (from_mode == int_mode)
1602 return XEXP (x, 0);
1603 else if (GET_MODE_SIZE (int_mode) < GET_MODE_SIZE (from_mode))
1604 return gen_lowpart_common (int_mode, XEXP (x, 0));
1605 else if (GET_MODE_SIZE (int_mode) < GET_MODE_SIZE (int_innermode))
1606 return gen_rtx_fmt_e (GET_CODE (x), int_mode, XEXP (x, 0));
1607 }
1608 else if (GET_CODE (x) == SUBREG || REG_P (x)
1609 || GET_CODE (x) == CONCAT || GET_CODE (x) == CONST_VECTOR
1610 || CONST_DOUBLE_AS_FLOAT_P (x) || CONST_SCALAR_INT_P (x)
1611 || CONST_POLY_INT_P (x))
1612 return lowpart_subreg (mode, x, innermode);
1613
1614 /* Otherwise, we can't do this. */
1615 return 0;
1616 }
1617 \f
1618 rtx
1619 gen_highpart (machine_mode mode, rtx x)
1620 {
1621 poly_uint64 msize = GET_MODE_SIZE (mode);
1622 rtx result;
1623
1624 /* This case loses if X is a subreg. To catch bugs early,
1625 complain if an invalid MODE is used even in other cases. */
1626 gcc_assert (known_le (msize, (unsigned int) UNITS_PER_WORD)
1627 || known_eq (msize, GET_MODE_UNIT_SIZE (GET_MODE (x))));
1628
1629 result = simplify_gen_subreg (mode, x, GET_MODE (x),
1630 subreg_highpart_offset (mode, GET_MODE (x)));
1631 gcc_assert (result);
1632
1633 /* simplify_gen_subreg is not guaranteed to return a valid operand for
1634 the target if we have a MEM. gen_highpart must return a valid operand,
1635 emitting code if necessary to do so. */
1636 if (MEM_P (result))
1637 {
1638 result = validize_mem (result);
1639 gcc_assert (result);
1640 }
1641
1642 return result;
1643 }
1644
1645 /* Like gen_highpart, but accept mode of EXP operand in case EXP can
1646 be VOIDmode constant. */
1647 rtx
1648 gen_highpart_mode (machine_mode outermode, machine_mode innermode, rtx exp)
1649 {
1650 if (GET_MODE (exp) != VOIDmode)
1651 {
1652 gcc_assert (GET_MODE (exp) == innermode);
1653 return gen_highpart (outermode, exp);
1654 }
1655 return simplify_gen_subreg (outermode, exp, innermode,
1656 subreg_highpart_offset (outermode, innermode));
1657 }
1658
1659 /* Return the SUBREG_BYTE for a lowpart subreg whose outer mode has
1660 OUTER_BYTES bytes and whose inner mode has INNER_BYTES bytes. */
1661
1662 poly_uint64
1663 subreg_size_lowpart_offset (poly_uint64 outer_bytes, poly_uint64 inner_bytes)
1664 {
1665 gcc_checking_assert (ordered_p (outer_bytes, inner_bytes));
1666 if (maybe_gt (outer_bytes, inner_bytes))
1667 /* Paradoxical subregs always have a SUBREG_BYTE of 0. */
1668 return 0;
1669
1670 if (BYTES_BIG_ENDIAN && WORDS_BIG_ENDIAN)
1671 return inner_bytes - outer_bytes;
1672 else if (!BYTES_BIG_ENDIAN && !WORDS_BIG_ENDIAN)
1673 return 0;
1674 else
1675 return subreg_size_offset_from_lsb (outer_bytes, inner_bytes, 0);
1676 }
1677
1678 /* Return the SUBREG_BYTE for a highpart subreg whose outer mode has
1679 OUTER_BYTES bytes and whose inner mode has INNER_BYTES bytes. */
1680
1681 poly_uint64
1682 subreg_size_highpart_offset (poly_uint64 outer_bytes, poly_uint64 inner_bytes)
1683 {
1684 gcc_assert (known_ge (inner_bytes, outer_bytes));
1685
1686 if (BYTES_BIG_ENDIAN && WORDS_BIG_ENDIAN)
1687 return 0;
1688 else if (!BYTES_BIG_ENDIAN && !WORDS_BIG_ENDIAN)
1689 return inner_bytes - outer_bytes;
1690 else
1691 return subreg_size_offset_from_lsb (outer_bytes, inner_bytes,
1692 (inner_bytes - outer_bytes)
1693 * BITS_PER_UNIT);
1694 }
1695
1696 /* Return 1 iff X, assumed to be a SUBREG,
1697 refers to the least significant part of its containing reg.
1698 If X is not a SUBREG, always return 1 (it is its own low part!). */
1699
1700 int
1701 subreg_lowpart_p (const_rtx x)
1702 {
1703 if (GET_CODE (x) != SUBREG)
1704 return 1;
1705 else if (GET_MODE (SUBREG_REG (x)) == VOIDmode)
1706 return 0;
1707
1708 return known_eq (subreg_lowpart_offset (GET_MODE (x),
1709 GET_MODE (SUBREG_REG (x))),
1710 SUBREG_BYTE (x));
1711 }
1712 \f
1713 /* Return subword OFFSET of operand OP.
1714 The word number, OFFSET, is interpreted as the word number starting
1715 at the low-order address. OFFSET 0 is the low-order word if not
1716 WORDS_BIG_ENDIAN, otherwise it is the high-order word.
1717
1718 If we cannot extract the required word, we return zero. Otherwise,
1719 an rtx corresponding to the requested word will be returned.
1720
1721 VALIDATE_ADDRESS is nonzero if the address should be validated. Before
1722 reload has completed, a valid address will always be returned. After
1723 reload, if a valid address cannot be returned, we return zero.
1724
1725 If VALIDATE_ADDRESS is zero, we simply form the required address; validating
1726 it is the responsibility of the caller.
1727
1728 MODE is the mode of OP in case it is a CONST_INT.
1729
1730 ??? This is still rather broken for some cases. The problem for the
1731 moment is that all callers of this thing provide no 'goal mode' to
1732 tell us to work with. This exists because all callers were written
1733 in a word based SUBREG world.
1734 Now use of this function can be deprecated by simplify_subreg in most
1735 cases.
1736 */
1737
1738 rtx
1739 operand_subword (rtx op, poly_uint64 offset, int validate_address,
1740 machine_mode mode)
1741 {
1742 if (mode == VOIDmode)
1743 mode = GET_MODE (op);
1744
1745 gcc_assert (mode != VOIDmode);
1746
1747 /* If OP is narrower than a word, fail. */
1748 if (mode != BLKmode
1749 && maybe_lt (GET_MODE_SIZE (mode), UNITS_PER_WORD))
1750 return 0;
1751
1752 /* If we want a word outside OP, return zero. */
1753 if (mode != BLKmode
1754 && maybe_gt ((offset + 1) * UNITS_PER_WORD, GET_MODE_SIZE (mode)))
1755 return const0_rtx;
1756
1757 /* Form a new MEM at the requested address. */
1758 if (MEM_P (op))
1759 {
1760 rtx new_rtx = adjust_address_nv (op, word_mode, offset * UNITS_PER_WORD);
1761
1762 if (! validate_address)
1763 return new_rtx;
1764
1765 else if (reload_completed)
1766 {
1767 if (! strict_memory_address_addr_space_p (word_mode,
1768 XEXP (new_rtx, 0),
1769 MEM_ADDR_SPACE (op)))
1770 return 0;
1771 }
1772 else
1773 return replace_equiv_address (new_rtx, XEXP (new_rtx, 0));
1774 }
1775
1776 /* Rest can be handled by simplify_subreg. */
1777 return simplify_gen_subreg (word_mode, op, mode, (offset * UNITS_PER_WORD));
1778 }
1779
1780 /* Similar to `operand_subword', but never return 0. If we can't
1781 extract the required subword, put OP into a register and try again.
1782 The second attempt must succeed. We always validate the address in
1783 this case.
1784
1785 MODE is the mode of OP, in case it is CONST_INT. */
1786
1787 rtx
1788 operand_subword_force (rtx op, poly_uint64 offset, machine_mode mode)
1789 {
1790 rtx result = operand_subword (op, offset, 1, mode);
1791
1792 if (result)
1793 return result;
1794
1795 if (mode != BLKmode && mode != VOIDmode)
1796 {
1797 /* If this is a register which cannot be accessed by words, copy it
1798 to a pseudo register. */
1799 if (REG_P (op))
1800 op = copy_to_reg (op);
1801 else
1802 op = force_reg (mode, op);
1803 }
1804
1805 result = operand_subword (op, offset, 1, mode);
1806 gcc_assert (result);
1807
1808 return result;
1809 }
1810 \f
1811 mem_attrs::mem_attrs ()
1812 : expr (NULL_TREE),
1813 offset (0),
1814 size (0),
1815 alias (0),
1816 align (0),
1817 addrspace (ADDR_SPACE_GENERIC),
1818 offset_known_p (false),
1819 size_known_p (false)
1820 {}
1821
1822 /* Returns 1 if both MEM_EXPR can be considered equal
1823 and 0 otherwise. */
1824
1825 int
1826 mem_expr_equal_p (const_tree expr1, const_tree expr2)
1827 {
1828 if (expr1 == expr2)
1829 return 1;
1830
1831 if (! expr1 || ! expr2)
1832 return 0;
1833
1834 if (TREE_CODE (expr1) != TREE_CODE (expr2))
1835 return 0;
1836
1837 return operand_equal_p (expr1, expr2, 0);
1838 }
1839
1840 /* Return OFFSET if XEXP (MEM, 0) - OFFSET is known to be ALIGN
1841 bits aligned for 0 <= OFFSET < ALIGN / BITS_PER_UNIT, or
1842 -1 if not known. */
1843
1844 int
1845 get_mem_align_offset (rtx mem, unsigned int align)
1846 {
1847 tree expr;
1848 poly_uint64 offset;
1849
1850 /* This function can't use
1851 if (!MEM_EXPR (mem) || !MEM_OFFSET_KNOWN_P (mem)
1852 || (MAX (MEM_ALIGN (mem),
1853 MAX (align, get_object_alignment (MEM_EXPR (mem))))
1854 < align))
1855 return -1;
1856 else
1857 return (- MEM_OFFSET (mem)) & (align / BITS_PER_UNIT - 1);
1858 for two reasons:
1859 - COMPONENT_REFs in MEM_EXPR can have NULL first operand,
1860 for <variable>. get_inner_reference doesn't handle it and
1861 even if it did, the alignment in that case needs to be determined
1862 from DECL_FIELD_CONTEXT's TYPE_ALIGN.
1863 - it would do suboptimal job for COMPONENT_REFs, even if MEM_EXPR
1864 isn't sufficiently aligned, the object it is in might be. */
1865 gcc_assert (MEM_P (mem));
1866 expr = MEM_EXPR (mem);
1867 if (expr == NULL_TREE || !MEM_OFFSET_KNOWN_P (mem))
1868 return -1;
1869
1870 offset = MEM_OFFSET (mem);
1871 if (DECL_P (expr))
1872 {
1873 if (DECL_ALIGN (expr) < align)
1874 return -1;
1875 }
1876 else if (INDIRECT_REF_P (expr))
1877 {
1878 if (TYPE_ALIGN (TREE_TYPE (expr)) < (unsigned int) align)
1879 return -1;
1880 }
1881 else if (TREE_CODE (expr) == COMPONENT_REF)
1882 {
1883 while (1)
1884 {
1885 tree inner = TREE_OPERAND (expr, 0);
1886 tree field = TREE_OPERAND (expr, 1);
1887 tree byte_offset = component_ref_field_offset (expr);
1888 tree bit_offset = DECL_FIELD_BIT_OFFSET (field);
1889
1890 poly_uint64 suboffset;
1891 if (!byte_offset
1892 || !poly_int_tree_p (byte_offset, &suboffset)
1893 || !tree_fits_uhwi_p (bit_offset))
1894 return -1;
1895
1896 offset += suboffset;
1897 offset += tree_to_uhwi (bit_offset) / BITS_PER_UNIT;
1898
1899 if (inner == NULL_TREE)
1900 {
1901 if (TYPE_ALIGN (DECL_FIELD_CONTEXT (field))
1902 < (unsigned int) align)
1903 return -1;
1904 break;
1905 }
1906 else if (DECL_P (inner))
1907 {
1908 if (DECL_ALIGN (inner) < align)
1909 return -1;
1910 break;
1911 }
1912 else if (TREE_CODE (inner) != COMPONENT_REF)
1913 return -1;
1914 expr = inner;
1915 }
1916 }
1917 else
1918 return -1;
1919
1920 HOST_WIDE_INT misalign;
1921 if (!known_misalignment (offset, align / BITS_PER_UNIT, &misalign))
1922 return -1;
1923 return misalign;
1924 }
1925
1926 /* Given REF (a MEM) and T, either the type of X or the expression
1927 corresponding to REF, set the memory attributes. OBJECTP is nonzero
1928 if we are making a new object of this type. BITPOS is nonzero if
1929 there is an offset outstanding on T that will be applied later. */
1930
1931 void
1932 set_mem_attributes_minus_bitpos (rtx ref, tree t, int objectp,
1933 poly_int64 bitpos)
1934 {
1935 poly_int64 apply_bitpos = 0;
1936 tree type;
1937 class mem_attrs attrs, *defattrs, *refattrs;
1938 addr_space_t as;
1939
1940 /* It can happen that type_for_mode was given a mode for which there
1941 is no language-level type. In which case it returns NULL, which
1942 we can see here. */
1943 if (t == NULL_TREE)
1944 return;
1945
1946 type = TYPE_P (t) ? t : TREE_TYPE (t);
1947 if (type == error_mark_node)
1948 return;
1949
1950 /* If we have already set DECL_RTL = ref, get_alias_set will get the
1951 wrong answer, as it assumes that DECL_RTL already has the right alias
1952 info. Callers should not set DECL_RTL until after the call to
1953 set_mem_attributes. */
1954 gcc_assert (!DECL_P (t) || ref != DECL_RTL_IF_SET (t));
1955
1956 /* Get the alias set from the expression or type (perhaps using a
1957 front-end routine) and use it. */
1958 attrs.alias = get_alias_set (t);
1959
1960 MEM_VOLATILE_P (ref) |= TYPE_VOLATILE (type);
1961 MEM_POINTER (ref) = POINTER_TYPE_P (type);
1962
1963 /* Default values from pre-existing memory attributes if present. */
1964 refattrs = MEM_ATTRS (ref);
1965 if (refattrs)
1966 {
1967 /* ??? Can this ever happen? Calling this routine on a MEM that
1968 already carries memory attributes should probably be invalid. */
1969 attrs.expr = refattrs->expr;
1970 attrs.offset_known_p = refattrs->offset_known_p;
1971 attrs.offset = refattrs->offset;
1972 attrs.size_known_p = refattrs->size_known_p;
1973 attrs.size = refattrs->size;
1974 attrs.align = refattrs->align;
1975 }
1976
1977 /* Otherwise, default values from the mode of the MEM reference. */
1978 else
1979 {
1980 defattrs = mode_mem_attrs[(int) GET_MODE (ref)];
1981 gcc_assert (!defattrs->expr);
1982 gcc_assert (!defattrs->offset_known_p);
1983
1984 /* Respect mode size. */
1985 attrs.size_known_p = defattrs->size_known_p;
1986 attrs.size = defattrs->size;
1987 /* ??? Is this really necessary? We probably should always get
1988 the size from the type below. */
1989
1990 /* Respect mode alignment for STRICT_ALIGNMENT targets if T is a type;
1991 if T is an object, always compute the object alignment below. */
1992 if (TYPE_P (t))
1993 attrs.align = defattrs->align;
1994 else
1995 attrs.align = BITS_PER_UNIT;
1996 /* ??? If T is a type, respecting mode alignment may *also* be wrong
1997 e.g. if the type carries an alignment attribute. Should we be
1998 able to simply always use TYPE_ALIGN? */
1999 }
2000
2001 /* We can set the alignment from the type if we are making an object or if
2002 this is an INDIRECT_REF. */
2003 if (objectp || TREE_CODE (t) == INDIRECT_REF)
2004 attrs.align = MAX (attrs.align, TYPE_ALIGN (type));
2005
2006 /* If the size is known, we can set that. */
2007 tree new_size = TYPE_SIZE_UNIT (type);
2008
2009 /* The address-space is that of the type. */
2010 as = TYPE_ADDR_SPACE (type);
2011
2012 /* If T is not a type, we may be able to deduce some more information about
2013 the expression. */
2014 if (! TYPE_P (t))
2015 {
2016 tree base;
2017
2018 if (TREE_THIS_VOLATILE (t))
2019 MEM_VOLATILE_P (ref) = 1;
2020
2021 /* Now remove any conversions: they don't change what the underlying
2022 object is. Likewise for SAVE_EXPR. */
2023 while (CONVERT_EXPR_P (t)
2024 || TREE_CODE (t) == VIEW_CONVERT_EXPR
2025 || TREE_CODE (t) == SAVE_EXPR)
2026 t = TREE_OPERAND (t, 0);
2027
2028 /* Note whether this expression can trap. */
2029 MEM_NOTRAP_P (ref) = !tree_could_trap_p (t);
2030
2031 base = get_base_address (t);
2032 if (base)
2033 {
2034 if (DECL_P (base)
2035 && TREE_READONLY (base)
2036 && (TREE_STATIC (base) || DECL_EXTERNAL (base))
2037 && !TREE_THIS_VOLATILE (base))
2038 MEM_READONLY_P (ref) = 1;
2039
2040 /* Mark static const strings readonly as well. */
2041 if (TREE_CODE (base) == STRING_CST
2042 && TREE_READONLY (base)
2043 && TREE_STATIC (base))
2044 MEM_READONLY_P (ref) = 1;
2045
2046 /* Address-space information is on the base object. */
2047 if (TREE_CODE (base) == MEM_REF
2048 || TREE_CODE (base) == TARGET_MEM_REF)
2049 as = TYPE_ADDR_SPACE (TREE_TYPE (TREE_TYPE (TREE_OPERAND (base,
2050 0))));
2051 else
2052 as = TYPE_ADDR_SPACE (TREE_TYPE (base));
2053 }
2054
2055 /* If this expression uses it's parent's alias set, mark it such
2056 that we won't change it. */
2057 if (component_uses_parent_alias_set_from (t) != NULL_TREE)
2058 MEM_KEEP_ALIAS_SET_P (ref) = 1;
2059
2060 /* If this is a decl, set the attributes of the MEM from it. */
2061 if (DECL_P (t))
2062 {
2063 attrs.expr = t;
2064 attrs.offset_known_p = true;
2065 attrs.offset = 0;
2066 apply_bitpos = bitpos;
2067 new_size = DECL_SIZE_UNIT (t);
2068 }
2069
2070 /* ??? If we end up with a constant or a descriptor do not
2071 record a MEM_EXPR. */
2072 else if (CONSTANT_CLASS_P (t)
2073 || TREE_CODE (t) == CONSTRUCTOR)
2074 ;
2075
2076 /* If this is a field reference, record it. */
2077 else if (TREE_CODE (t) == COMPONENT_REF)
2078 {
2079 attrs.expr = t;
2080 attrs.offset_known_p = true;
2081 attrs.offset = 0;
2082 apply_bitpos = bitpos;
2083 if (DECL_BIT_FIELD (TREE_OPERAND (t, 1)))
2084 new_size = DECL_SIZE_UNIT (TREE_OPERAND (t, 1));
2085 }
2086
2087 /* Else record it. */
2088 else
2089 {
2090 gcc_assert (handled_component_p (t)
2091 || TREE_CODE (t) == MEM_REF
2092 || TREE_CODE (t) == TARGET_MEM_REF);
2093 attrs.expr = t;
2094 attrs.offset_known_p = true;
2095 attrs.offset = 0;
2096 apply_bitpos = bitpos;
2097 }
2098
2099 /* If this is a reference based on a partitioned decl replace the
2100 base with a MEM_REF of the pointer representative we created
2101 during stack slot partitioning. */
2102 if (attrs.expr
2103 && VAR_P (base)
2104 && ! is_global_var (base)
2105 && cfun->gimple_df->decls_to_pointers != NULL)
2106 {
2107 tree *namep = cfun->gimple_df->decls_to_pointers->get (base);
2108 if (namep)
2109 {
2110 attrs.expr = unshare_expr (attrs.expr);
2111 tree *orig_base = &attrs.expr;
2112 while (handled_component_p (*orig_base))
2113 orig_base = &TREE_OPERAND (*orig_base, 0);
2114 tree aptrt = reference_alias_ptr_type (*orig_base);
2115 *orig_base = build2 (MEM_REF, TREE_TYPE (*orig_base), *namep,
2116 build_int_cst (aptrt, 0));
2117 }
2118 }
2119
2120 /* Compute the alignment. */
2121 unsigned int obj_align;
2122 unsigned HOST_WIDE_INT obj_bitpos;
2123 get_object_alignment_1 (t, &obj_align, &obj_bitpos);
2124 unsigned int diff_align = known_alignment (obj_bitpos - bitpos);
2125 if (diff_align != 0)
2126 obj_align = MIN (obj_align, diff_align);
2127 attrs.align = MAX (attrs.align, obj_align);
2128 }
2129
2130 poly_uint64 const_size;
2131 if (poly_int_tree_p (new_size, &const_size))
2132 {
2133 attrs.size_known_p = true;
2134 attrs.size = const_size;
2135 }
2136
2137 /* If we modified OFFSET based on T, then subtract the outstanding
2138 bit position offset. Similarly, increase the size of the accessed
2139 object to contain the negative offset. */
2140 if (maybe_ne (apply_bitpos, 0))
2141 {
2142 gcc_assert (attrs.offset_known_p);
2143 poly_int64 bytepos = bits_to_bytes_round_down (apply_bitpos);
2144 attrs.offset -= bytepos;
2145 if (attrs.size_known_p)
2146 attrs.size += bytepos;
2147 }
2148
2149 /* Now set the attributes we computed above. */
2150 attrs.addrspace = as;
2151 set_mem_attrs (ref, &attrs);
2152 }
2153
2154 void
2155 set_mem_attributes (rtx ref, tree t, int objectp)
2156 {
2157 set_mem_attributes_minus_bitpos (ref, t, objectp, 0);
2158 }
2159
2160 /* Set the alias set of MEM to SET. */
2161
2162 void
2163 set_mem_alias_set (rtx mem, alias_set_type set)
2164 {
2165 /* If the new and old alias sets don't conflict, something is wrong. */
2166 gcc_checking_assert (alias_sets_conflict_p (set, MEM_ALIAS_SET (mem)));
2167 mem_attrs attrs (*get_mem_attrs (mem));
2168 attrs.alias = set;
2169 set_mem_attrs (mem, &attrs);
2170 }
2171
2172 /* Set the address space of MEM to ADDRSPACE (target-defined). */
2173
2174 void
2175 set_mem_addr_space (rtx mem, addr_space_t addrspace)
2176 {
2177 mem_attrs attrs (*get_mem_attrs (mem));
2178 attrs.addrspace = addrspace;
2179 set_mem_attrs (mem, &attrs);
2180 }
2181
2182 /* Set the alignment of MEM to ALIGN bits. */
2183
2184 void
2185 set_mem_align (rtx mem, unsigned int align)
2186 {
2187 mem_attrs attrs (*get_mem_attrs (mem));
2188 attrs.align = align;
2189 set_mem_attrs (mem, &attrs);
2190 }
2191
2192 /* Set the expr for MEM to EXPR. */
2193
2194 void
2195 set_mem_expr (rtx mem, tree expr)
2196 {
2197 mem_attrs attrs (*get_mem_attrs (mem));
2198 attrs.expr = expr;
2199 set_mem_attrs (mem, &attrs);
2200 }
2201
2202 /* Set the offset of MEM to OFFSET. */
2203
2204 void
2205 set_mem_offset (rtx mem, poly_int64 offset)
2206 {
2207 mem_attrs attrs (*get_mem_attrs (mem));
2208 attrs.offset_known_p = true;
2209 attrs.offset = offset;
2210 set_mem_attrs (mem, &attrs);
2211 }
2212
2213 /* Clear the offset of MEM. */
2214
2215 void
2216 clear_mem_offset (rtx mem)
2217 {
2218 mem_attrs attrs (*get_mem_attrs (mem));
2219 attrs.offset_known_p = false;
2220 set_mem_attrs (mem, &attrs);
2221 }
2222
2223 /* Set the size of MEM to SIZE. */
2224
2225 void
2226 set_mem_size (rtx mem, poly_int64 size)
2227 {
2228 mem_attrs attrs (*get_mem_attrs (mem));
2229 attrs.size_known_p = true;
2230 attrs.size = size;
2231 set_mem_attrs (mem, &attrs);
2232 }
2233
2234 /* Clear the size of MEM. */
2235
2236 void
2237 clear_mem_size (rtx mem)
2238 {
2239 mem_attrs attrs (*get_mem_attrs (mem));
2240 attrs.size_known_p = false;
2241 set_mem_attrs (mem, &attrs);
2242 }
2243 \f
2244 /* Return a memory reference like MEMREF, but with its mode changed to MODE
2245 and its address changed to ADDR. (VOIDmode means don't change the mode.
2246 NULL for ADDR means don't change the address.) VALIDATE is nonzero if the
2247 returned memory location is required to be valid. INPLACE is true if any
2248 changes can be made directly to MEMREF or false if MEMREF must be treated
2249 as immutable.
2250
2251 The memory attributes are not changed. */
2252
2253 static rtx
2254 change_address_1 (rtx memref, machine_mode mode, rtx addr, int validate,
2255 bool inplace)
2256 {
2257 addr_space_t as;
2258 rtx new_rtx;
2259
2260 gcc_assert (MEM_P (memref));
2261 as = MEM_ADDR_SPACE (memref);
2262 if (mode == VOIDmode)
2263 mode = GET_MODE (memref);
2264 if (addr == 0)
2265 addr = XEXP (memref, 0);
2266 if (mode == GET_MODE (memref) && addr == XEXP (memref, 0)
2267 && (!validate || memory_address_addr_space_p (mode, addr, as)))
2268 return memref;
2269
2270 /* Don't validate address for LRA. LRA can make the address valid
2271 by itself in most efficient way. */
2272 if (validate && !lra_in_progress)
2273 {
2274 if (reload_in_progress || reload_completed)
2275 gcc_assert (memory_address_addr_space_p (mode, addr, as));
2276 else
2277 addr = memory_address_addr_space (mode, addr, as);
2278 }
2279
2280 if (rtx_equal_p (addr, XEXP (memref, 0)) && mode == GET_MODE (memref))
2281 return memref;
2282
2283 if (inplace)
2284 {
2285 XEXP (memref, 0) = addr;
2286 return memref;
2287 }
2288
2289 new_rtx = gen_rtx_MEM (mode, addr);
2290 MEM_COPY_ATTRIBUTES (new_rtx, memref);
2291 return new_rtx;
2292 }
2293
2294 /* Like change_address_1 with VALIDATE nonzero, but we are not saying in what
2295 way we are changing MEMREF, so we only preserve the alias set. */
2296
2297 rtx
2298 change_address (rtx memref, machine_mode mode, rtx addr)
2299 {
2300 rtx new_rtx = change_address_1 (memref, mode, addr, 1, false);
2301 machine_mode mmode = GET_MODE (new_rtx);
2302 class mem_attrs *defattrs;
2303
2304 mem_attrs attrs (*get_mem_attrs (memref));
2305 defattrs = mode_mem_attrs[(int) mmode];
2306 attrs.expr = NULL_TREE;
2307 attrs.offset_known_p = false;
2308 attrs.size_known_p = defattrs->size_known_p;
2309 attrs.size = defattrs->size;
2310 attrs.align = defattrs->align;
2311
2312 /* If there are no changes, just return the original memory reference. */
2313 if (new_rtx == memref)
2314 {
2315 if (mem_attrs_eq_p (get_mem_attrs (memref), &attrs))
2316 return new_rtx;
2317
2318 new_rtx = gen_rtx_MEM (mmode, XEXP (memref, 0));
2319 MEM_COPY_ATTRIBUTES (new_rtx, memref);
2320 }
2321
2322 set_mem_attrs (new_rtx, &attrs);
2323 return new_rtx;
2324 }
2325
2326 /* Return a memory reference like MEMREF, but with its mode changed
2327 to MODE and its address offset by OFFSET bytes. If VALIDATE is
2328 nonzero, the memory address is forced to be valid.
2329 If ADJUST_ADDRESS is zero, OFFSET is only used to update MEM_ATTRS
2330 and the caller is responsible for adjusting MEMREF base register.
2331 If ADJUST_OBJECT is zero, the underlying object associated with the
2332 memory reference is left unchanged and the caller is responsible for
2333 dealing with it. Otherwise, if the new memory reference is outside
2334 the underlying object, even partially, then the object is dropped.
2335 SIZE, if nonzero, is the size of an access in cases where MODE
2336 has no inherent size. */
2337
2338 rtx
2339 adjust_address_1 (rtx memref, machine_mode mode, poly_int64 offset,
2340 int validate, int adjust_address, int adjust_object,
2341 poly_int64 size)
2342 {
2343 rtx addr = XEXP (memref, 0);
2344 rtx new_rtx;
2345 scalar_int_mode address_mode;
2346 class mem_attrs attrs (*get_mem_attrs (memref)), *defattrs;
2347 unsigned HOST_WIDE_INT max_align;
2348 #ifdef POINTERS_EXTEND_UNSIGNED
2349 scalar_int_mode pointer_mode
2350 = targetm.addr_space.pointer_mode (attrs.addrspace);
2351 #endif
2352
2353 /* VOIDmode means no mode change for change_address_1. */
2354 if (mode == VOIDmode)
2355 mode = GET_MODE (memref);
2356
2357 /* Take the size of non-BLKmode accesses from the mode. */
2358 defattrs = mode_mem_attrs[(int) mode];
2359 if (defattrs->size_known_p)
2360 size = defattrs->size;
2361
2362 /* If there are no changes, just return the original memory reference. */
2363 if (mode == GET_MODE (memref)
2364 && known_eq (offset, 0)
2365 && (known_eq (size, 0)
2366 || (attrs.size_known_p && known_eq (attrs.size, size)))
2367 && (!validate || memory_address_addr_space_p (mode, addr,
2368 attrs.addrspace)))
2369 return memref;
2370
2371 /* ??? Prefer to create garbage instead of creating shared rtl.
2372 This may happen even if offset is nonzero -- consider
2373 (plus (plus reg reg) const_int) -- so do this always. */
2374 addr = copy_rtx (addr);
2375
2376 /* Convert a possibly large offset to a signed value within the
2377 range of the target address space. */
2378 address_mode = get_address_mode (memref);
2379 offset = trunc_int_for_mode (offset, address_mode);
2380
2381 if (adjust_address)
2382 {
2383 /* If MEMREF is a LO_SUM and the offset is within the alignment of the
2384 object, we can merge it into the LO_SUM. */
2385 if (GET_MODE (memref) != BLKmode
2386 && GET_CODE (addr) == LO_SUM
2387 && known_in_range_p (offset,
2388 0, (GET_MODE_ALIGNMENT (GET_MODE (memref))
2389 / BITS_PER_UNIT)))
2390 addr = gen_rtx_LO_SUM (address_mode, XEXP (addr, 0),
2391 plus_constant (address_mode,
2392 XEXP (addr, 1), offset));
2393 #ifdef POINTERS_EXTEND_UNSIGNED
2394 /* If MEMREF is a ZERO_EXTEND from pointer_mode and the offset is valid
2395 in that mode, we merge it into the ZERO_EXTEND. We take advantage of
2396 the fact that pointers are not allowed to overflow. */
2397 else if (POINTERS_EXTEND_UNSIGNED > 0
2398 && GET_CODE (addr) == ZERO_EXTEND
2399 && GET_MODE (XEXP (addr, 0)) == pointer_mode
2400 && known_eq (trunc_int_for_mode (offset, pointer_mode), offset))
2401 addr = gen_rtx_ZERO_EXTEND (address_mode,
2402 plus_constant (pointer_mode,
2403 XEXP (addr, 0), offset));
2404 #endif
2405 else
2406 addr = plus_constant (address_mode, addr, offset);
2407 }
2408
2409 new_rtx = change_address_1 (memref, mode, addr, validate, false);
2410
2411 /* If the address is a REG, change_address_1 rightfully returns memref,
2412 but this would destroy memref's MEM_ATTRS. */
2413 if (new_rtx == memref && maybe_ne (offset, 0))
2414 new_rtx = copy_rtx (new_rtx);
2415
2416 /* Conservatively drop the object if we don't know where we start from. */
2417 if (adjust_object && (!attrs.offset_known_p || !attrs.size_known_p))
2418 {
2419 attrs.expr = NULL_TREE;
2420 attrs.alias = 0;
2421 }
2422
2423 /* Compute the new values of the memory attributes due to this adjustment.
2424 We add the offsets and update the alignment. */
2425 if (attrs.offset_known_p)
2426 {
2427 attrs.offset += offset;
2428
2429 /* Drop the object if the new left end is not within its bounds. */
2430 if (adjust_object && maybe_lt (attrs.offset, 0))
2431 {
2432 attrs.expr = NULL_TREE;
2433 attrs.alias = 0;
2434 }
2435 }
2436
2437 /* Compute the new alignment by taking the MIN of the alignment and the
2438 lowest-order set bit in OFFSET, but don't change the alignment if OFFSET
2439 if zero. */
2440 if (maybe_ne (offset, 0))
2441 {
2442 max_align = known_alignment (offset) * BITS_PER_UNIT;
2443 attrs.align = MIN (attrs.align, max_align);
2444 }
2445
2446 if (maybe_ne (size, 0))
2447 {
2448 /* Drop the object if the new right end is not within its bounds. */
2449 if (adjust_object && maybe_gt (offset + size, attrs.size))
2450 {
2451 attrs.expr = NULL_TREE;
2452 attrs.alias = 0;
2453 }
2454 attrs.size_known_p = true;
2455 attrs.size = size;
2456 }
2457 else if (attrs.size_known_p)
2458 {
2459 gcc_assert (!adjust_object);
2460 attrs.size -= offset;
2461 /* ??? The store_by_pieces machinery generates negative sizes,
2462 so don't assert for that here. */
2463 }
2464
2465 set_mem_attrs (new_rtx, &attrs);
2466
2467 return new_rtx;
2468 }
2469
2470 /* Return a memory reference like MEMREF, but with its mode changed
2471 to MODE and its address changed to ADDR, which is assumed to be
2472 MEMREF offset by OFFSET bytes. If VALIDATE is
2473 nonzero, the memory address is forced to be valid. */
2474
2475 rtx
2476 adjust_automodify_address_1 (rtx memref, machine_mode mode, rtx addr,
2477 poly_int64 offset, int validate)
2478 {
2479 memref = change_address_1 (memref, VOIDmode, addr, validate, false);
2480 return adjust_address_1 (memref, mode, offset, validate, 0, 0, 0);
2481 }
2482
2483 /* Return a memory reference like MEMREF, but whose address is changed by
2484 adding OFFSET, an RTX, to it. POW2 is the highest power of two factor
2485 known to be in OFFSET (possibly 1). */
2486
2487 rtx
2488 offset_address (rtx memref, rtx offset, unsigned HOST_WIDE_INT pow2)
2489 {
2490 rtx new_rtx, addr = XEXP (memref, 0);
2491 machine_mode address_mode;
2492 class mem_attrs *defattrs;
2493
2494 mem_attrs attrs (*get_mem_attrs (memref));
2495 address_mode = get_address_mode (memref);
2496 new_rtx = simplify_gen_binary (PLUS, address_mode, addr, offset);
2497
2498 /* At this point we don't know _why_ the address is invalid. It
2499 could have secondary memory references, multiplies or anything.
2500
2501 However, if we did go and rearrange things, we can wind up not
2502 being able to recognize the magic around pic_offset_table_rtx.
2503 This stuff is fragile, and is yet another example of why it is
2504 bad to expose PIC machinery too early. */
2505 if (! memory_address_addr_space_p (GET_MODE (memref), new_rtx,
2506 attrs.addrspace)
2507 && GET_CODE (addr) == PLUS
2508 && XEXP (addr, 0) == pic_offset_table_rtx)
2509 {
2510 addr = force_reg (GET_MODE (addr), addr);
2511 new_rtx = simplify_gen_binary (PLUS, address_mode, addr, offset);
2512 }
2513
2514 update_temp_slot_address (XEXP (memref, 0), new_rtx);
2515 new_rtx = change_address_1 (memref, VOIDmode, new_rtx, 1, false);
2516
2517 /* If there are no changes, just return the original memory reference. */
2518 if (new_rtx == memref)
2519 return new_rtx;
2520
2521 /* Update the alignment to reflect the offset. Reset the offset, which
2522 we don't know. */
2523 defattrs = mode_mem_attrs[(int) GET_MODE (new_rtx)];
2524 attrs.offset_known_p = false;
2525 attrs.size_known_p = defattrs->size_known_p;
2526 attrs.size = defattrs->size;
2527 attrs.align = MIN (attrs.align, pow2 * BITS_PER_UNIT);
2528 set_mem_attrs (new_rtx, &attrs);
2529 return new_rtx;
2530 }
2531
2532 /* Return a memory reference like MEMREF, but with its address changed to
2533 ADDR. The caller is asserting that the actual piece of memory pointed
2534 to is the same, just the form of the address is being changed, such as
2535 by putting something into a register. INPLACE is true if any changes
2536 can be made directly to MEMREF or false if MEMREF must be treated as
2537 immutable. */
2538
2539 rtx
2540 replace_equiv_address (rtx memref, rtx addr, bool inplace)
2541 {
2542 /* change_address_1 copies the memory attribute structure without change
2543 and that's exactly what we want here. */
2544 update_temp_slot_address (XEXP (memref, 0), addr);
2545 return change_address_1 (memref, VOIDmode, addr, 1, inplace);
2546 }
2547
2548 /* Likewise, but the reference is not required to be valid. */
2549
2550 rtx
2551 replace_equiv_address_nv (rtx memref, rtx addr, bool inplace)
2552 {
2553 return change_address_1 (memref, VOIDmode, addr, 0, inplace);
2554 }
2555
2556 /* Return a memory reference like MEMREF, but with its mode widened to
2557 MODE and offset by OFFSET. This would be used by targets that e.g.
2558 cannot issue QImode memory operations and have to use SImode memory
2559 operations plus masking logic. */
2560
2561 rtx
2562 widen_memory_access (rtx memref, machine_mode mode, poly_int64 offset)
2563 {
2564 rtx new_rtx = adjust_address_1 (memref, mode, offset, 1, 1, 0, 0);
2565 poly_uint64 size = GET_MODE_SIZE (mode);
2566
2567 /* If there are no changes, just return the original memory reference. */
2568 if (new_rtx == memref)
2569 return new_rtx;
2570
2571 mem_attrs attrs (*get_mem_attrs (new_rtx));
2572
2573 /* If we don't know what offset we were at within the expression, then
2574 we can't know if we've overstepped the bounds. */
2575 if (! attrs.offset_known_p)
2576 attrs.expr = NULL_TREE;
2577
2578 while (attrs.expr)
2579 {
2580 if (TREE_CODE (attrs.expr) == COMPONENT_REF)
2581 {
2582 tree field = TREE_OPERAND (attrs.expr, 1);
2583 tree offset = component_ref_field_offset (attrs.expr);
2584
2585 if (! DECL_SIZE_UNIT (field))
2586 {
2587 attrs.expr = NULL_TREE;
2588 break;
2589 }
2590
2591 /* Is the field at least as large as the access? If so, ok,
2592 otherwise strip back to the containing structure. */
2593 if (poly_int_tree_p (DECL_SIZE_UNIT (field))
2594 && known_ge (wi::to_poly_offset (DECL_SIZE_UNIT (field)), size)
2595 && known_ge (attrs.offset, 0))
2596 break;
2597
2598 poly_uint64 suboffset;
2599 if (!poly_int_tree_p (offset, &suboffset))
2600 {
2601 attrs.expr = NULL_TREE;
2602 break;
2603 }
2604
2605 attrs.expr = TREE_OPERAND (attrs.expr, 0);
2606 attrs.offset += suboffset;
2607 attrs.offset += (tree_to_uhwi (DECL_FIELD_BIT_OFFSET (field))
2608 / BITS_PER_UNIT);
2609 }
2610 /* Similarly for the decl. */
2611 else if (DECL_P (attrs.expr)
2612 && DECL_SIZE_UNIT (attrs.expr)
2613 && poly_int_tree_p (DECL_SIZE_UNIT (attrs.expr))
2614 && known_ge (wi::to_poly_offset (DECL_SIZE_UNIT (attrs.expr)),
2615 size)
2616 && known_ge (attrs.offset, 0))
2617 break;
2618 else
2619 {
2620 /* The widened memory access overflows the expression, which means
2621 that it could alias another expression. Zap it. */
2622 attrs.expr = NULL_TREE;
2623 break;
2624 }
2625 }
2626
2627 if (! attrs.expr)
2628 attrs.offset_known_p = false;
2629
2630 /* The widened memory may alias other stuff, so zap the alias set. */
2631 /* ??? Maybe use get_alias_set on any remaining expression. */
2632 attrs.alias = 0;
2633 attrs.size_known_p = true;
2634 attrs.size = size;
2635 set_mem_attrs (new_rtx, &attrs);
2636 return new_rtx;
2637 }
2638 \f
2639 /* A fake decl that is used as the MEM_EXPR of spill slots. */
2640 static GTY(()) tree spill_slot_decl;
2641
2642 tree
2643 get_spill_slot_decl (bool force_build_p)
2644 {
2645 tree d = spill_slot_decl;
2646 rtx rd;
2647
2648 if (d || !force_build_p)
2649 return d;
2650
2651 d = build_decl (DECL_SOURCE_LOCATION (current_function_decl),
2652 VAR_DECL, get_identifier ("%sfp"), void_type_node);
2653 DECL_ARTIFICIAL (d) = 1;
2654 DECL_IGNORED_P (d) = 1;
2655 TREE_USED (d) = 1;
2656 spill_slot_decl = d;
2657
2658 rd = gen_rtx_MEM (BLKmode, frame_pointer_rtx);
2659 MEM_NOTRAP_P (rd) = 1;
2660 mem_attrs attrs (*mode_mem_attrs[(int) BLKmode]);
2661 attrs.alias = new_alias_set ();
2662 attrs.expr = d;
2663 set_mem_attrs (rd, &attrs);
2664 SET_DECL_RTL (d, rd);
2665
2666 return d;
2667 }
2668
2669 /* Given MEM, a result from assign_stack_local, fill in the memory
2670 attributes as appropriate for a register allocator spill slot.
2671 These slots are not aliasable by other memory. We arrange for
2672 them all to use a single MEM_EXPR, so that the aliasing code can
2673 work properly in the case of shared spill slots. */
2674
2675 void
2676 set_mem_attrs_for_spill (rtx mem)
2677 {
2678 rtx addr;
2679
2680 mem_attrs attrs (*get_mem_attrs (mem));
2681 attrs.expr = get_spill_slot_decl (true);
2682 attrs.alias = MEM_ALIAS_SET (DECL_RTL (attrs.expr));
2683 attrs.addrspace = ADDR_SPACE_GENERIC;
2684
2685 /* We expect the incoming memory to be of the form:
2686 (mem:MODE (plus (reg sfp) (const_int offset)))
2687 with perhaps the plus missing for offset = 0. */
2688 addr = XEXP (mem, 0);
2689 attrs.offset_known_p = true;
2690 strip_offset (addr, &attrs.offset);
2691
2692 set_mem_attrs (mem, &attrs);
2693 MEM_NOTRAP_P (mem) = 1;
2694 }
2695 \f
2696 /* Return a newly created CODE_LABEL rtx with a unique label number. */
2697
2698 rtx_code_label *
2699 gen_label_rtx (void)
2700 {
2701 return as_a <rtx_code_label *> (
2702 gen_rtx_CODE_LABEL (VOIDmode, NULL_RTX, NULL_RTX,
2703 NULL, label_num++, NULL));
2704 }
2705 \f
2706 /* For procedure integration. */
2707
2708 /* Install new pointers to the first and last insns in the chain.
2709 Also, set cur_insn_uid to one higher than the last in use.
2710 Used for an inline-procedure after copying the insn chain. */
2711
2712 void
2713 set_new_first_and_last_insn (rtx_insn *first, rtx_insn *last)
2714 {
2715 rtx_insn *insn;
2716
2717 set_first_insn (first);
2718 set_last_insn (last);
2719 cur_insn_uid = 0;
2720
2721 if (param_min_nondebug_insn_uid || MAY_HAVE_DEBUG_INSNS)
2722 {
2723 int debug_count = 0;
2724
2725 cur_insn_uid = param_min_nondebug_insn_uid - 1;
2726 cur_debug_insn_uid = 0;
2727
2728 for (insn = first; insn; insn = NEXT_INSN (insn))
2729 if (INSN_UID (insn) < param_min_nondebug_insn_uid)
2730 cur_debug_insn_uid = MAX (cur_debug_insn_uid, INSN_UID (insn));
2731 else
2732 {
2733 cur_insn_uid = MAX (cur_insn_uid, INSN_UID (insn));
2734 if (DEBUG_INSN_P (insn))
2735 debug_count++;
2736 }
2737
2738 if (debug_count)
2739 cur_debug_insn_uid = param_min_nondebug_insn_uid + debug_count;
2740 else
2741 cur_debug_insn_uid++;
2742 }
2743 else
2744 for (insn = first; insn; insn = NEXT_INSN (insn))
2745 cur_insn_uid = MAX (cur_insn_uid, INSN_UID (insn));
2746
2747 cur_insn_uid++;
2748 }
2749 \f
2750 /* Go through all the RTL insn bodies and copy any invalid shared
2751 structure. This routine should only be called once. */
2752
2753 static void
2754 unshare_all_rtl_1 (rtx_insn *insn)
2755 {
2756 /* Unshare just about everything else. */
2757 unshare_all_rtl_in_chain (insn);
2758
2759 /* Make sure the addresses of stack slots found outside the insn chain
2760 (such as, in DECL_RTL of a variable) are not shared
2761 with the insn chain.
2762
2763 This special care is necessary when the stack slot MEM does not
2764 actually appear in the insn chain. If it does appear, its address
2765 is unshared from all else at that point. */
2766 unsigned int i;
2767 rtx temp;
2768 FOR_EACH_VEC_SAFE_ELT (stack_slot_list, i, temp)
2769 (*stack_slot_list)[i] = copy_rtx_if_shared (temp);
2770 }
2771
2772 /* Go through all the RTL insn bodies and copy any invalid shared
2773 structure, again. This is a fairly expensive thing to do so it
2774 should be done sparingly. */
2775
2776 void
2777 unshare_all_rtl_again (rtx_insn *insn)
2778 {
2779 rtx_insn *p;
2780 tree decl;
2781
2782 for (p = insn; p; p = NEXT_INSN (p))
2783 if (INSN_P (p))
2784 {
2785 reset_used_flags (PATTERN (p));
2786 reset_used_flags (REG_NOTES (p));
2787 if (CALL_P (p))
2788 reset_used_flags (CALL_INSN_FUNCTION_USAGE (p));
2789 }
2790
2791 /* Make sure that virtual stack slots are not shared. */
2792 set_used_decls (DECL_INITIAL (cfun->decl));
2793
2794 /* Make sure that virtual parameters are not shared. */
2795 for (decl = DECL_ARGUMENTS (cfun->decl); decl; decl = DECL_CHAIN (decl))
2796 set_used_flags (DECL_RTL (decl));
2797
2798 rtx temp;
2799 unsigned int i;
2800 FOR_EACH_VEC_SAFE_ELT (stack_slot_list, i, temp)
2801 reset_used_flags (temp);
2802
2803 unshare_all_rtl_1 (insn);
2804 }
2805
2806 unsigned int
2807 unshare_all_rtl (void)
2808 {
2809 unshare_all_rtl_1 (get_insns ());
2810
2811 for (tree decl = DECL_ARGUMENTS (cfun->decl); decl; decl = DECL_CHAIN (decl))
2812 {
2813 if (DECL_RTL_SET_P (decl))
2814 SET_DECL_RTL (decl, copy_rtx_if_shared (DECL_RTL (decl)));
2815 DECL_INCOMING_RTL (decl) = copy_rtx_if_shared (DECL_INCOMING_RTL (decl));
2816 }
2817
2818 return 0;
2819 }
2820
2821
2822 /* Check that ORIG is not marked when it should not be and mark ORIG as in use,
2823 Recursively does the same for subexpressions. */
2824
2825 static void
2826 verify_rtx_sharing (rtx orig, rtx insn)
2827 {
2828 rtx x = orig;
2829 int i;
2830 enum rtx_code code;
2831 const char *format_ptr;
2832
2833 if (x == 0)
2834 return;
2835
2836 code = GET_CODE (x);
2837
2838 /* These types may be freely shared. */
2839
2840 switch (code)
2841 {
2842 case REG:
2843 case DEBUG_EXPR:
2844 case VALUE:
2845 CASE_CONST_ANY:
2846 case SYMBOL_REF:
2847 case LABEL_REF:
2848 case CODE_LABEL:
2849 case PC:
2850 case CC0:
2851 case RETURN:
2852 case SIMPLE_RETURN:
2853 case SCRATCH:
2854 /* SCRATCH must be shared because they represent distinct values. */
2855 return;
2856 case CLOBBER:
2857 /* Share clobbers of hard registers (like cc0), but do not share pseudo reg
2858 clobbers or clobbers of hard registers that originated as pseudos.
2859 This is needed to allow safe register renaming. */
2860 if (REG_P (XEXP (x, 0))
2861 && HARD_REGISTER_NUM_P (REGNO (XEXP (x, 0)))
2862 && HARD_REGISTER_NUM_P (ORIGINAL_REGNO (XEXP (x, 0))))
2863 return;
2864 break;
2865
2866 case CONST:
2867 if (shared_const_p (orig))
2868 return;
2869 break;
2870
2871 case MEM:
2872 /* A MEM is allowed to be shared if its address is constant. */
2873 if (CONSTANT_ADDRESS_P (XEXP (x, 0))
2874 || reload_completed || reload_in_progress)
2875 return;
2876
2877 break;
2878
2879 default:
2880 break;
2881 }
2882
2883 /* This rtx may not be shared. If it has already been seen,
2884 replace it with a copy of itself. */
2885 if (flag_checking && RTX_FLAG (x, used))
2886 {
2887 error ("invalid rtl sharing found in the insn");
2888 debug_rtx (insn);
2889 error ("shared rtx");
2890 debug_rtx (x);
2891 internal_error ("internal consistency failure");
2892 }
2893 gcc_assert (!RTX_FLAG (x, used));
2894
2895 RTX_FLAG (x, used) = 1;
2896
2897 /* Now scan the subexpressions recursively. */
2898
2899 format_ptr = GET_RTX_FORMAT (code);
2900
2901 for (i = 0; i < GET_RTX_LENGTH (code); i++)
2902 {
2903 switch (*format_ptr++)
2904 {
2905 case 'e':
2906 verify_rtx_sharing (XEXP (x, i), insn);
2907 break;
2908
2909 case 'E':
2910 if (XVEC (x, i) != NULL)
2911 {
2912 int j;
2913 int len = XVECLEN (x, i);
2914
2915 for (j = 0; j < len; j++)
2916 {
2917 /* We allow sharing of ASM_OPERANDS inside single
2918 instruction. */
2919 if (j && GET_CODE (XVECEXP (x, i, j)) == SET
2920 && (GET_CODE (SET_SRC (XVECEXP (x, i, j)))
2921 == ASM_OPERANDS))
2922 verify_rtx_sharing (SET_DEST (XVECEXP (x, i, j)), insn);
2923 else
2924 verify_rtx_sharing (XVECEXP (x, i, j), insn);
2925 }
2926 }
2927 break;
2928 }
2929 }
2930 return;
2931 }
2932
2933 /* Reset used-flags for INSN. */
2934
2935 static void
2936 reset_insn_used_flags (rtx insn)
2937 {
2938 gcc_assert (INSN_P (insn));
2939 reset_used_flags (PATTERN (insn));
2940 reset_used_flags (REG_NOTES (insn));
2941 if (CALL_P (insn))
2942 reset_used_flags (CALL_INSN_FUNCTION_USAGE (insn));
2943 }
2944
2945 /* Go through all the RTL insn bodies and clear all the USED bits. */
2946
2947 static void
2948 reset_all_used_flags (void)
2949 {
2950 rtx_insn *p;
2951
2952 for (p = get_insns (); p; p = NEXT_INSN (p))
2953 if (INSN_P (p))
2954 {
2955 rtx pat = PATTERN (p);
2956 if (GET_CODE (pat) != SEQUENCE)
2957 reset_insn_used_flags (p);
2958 else
2959 {
2960 gcc_assert (REG_NOTES (p) == NULL);
2961 for (int i = 0; i < XVECLEN (pat, 0); i++)
2962 {
2963 rtx insn = XVECEXP (pat, 0, i);
2964 if (INSN_P (insn))
2965 reset_insn_used_flags (insn);
2966 }
2967 }
2968 }
2969 }
2970
2971 /* Verify sharing in INSN. */
2972
2973 static void
2974 verify_insn_sharing (rtx insn)
2975 {
2976 gcc_assert (INSN_P (insn));
2977 verify_rtx_sharing (PATTERN (insn), insn);
2978 verify_rtx_sharing (REG_NOTES (insn), insn);
2979 if (CALL_P (insn))
2980 verify_rtx_sharing (CALL_INSN_FUNCTION_USAGE (insn), insn);
2981 }
2982
2983 /* Go through all the RTL insn bodies and check that there is no unexpected
2984 sharing in between the subexpressions. */
2985
2986 DEBUG_FUNCTION void
2987 verify_rtl_sharing (void)
2988 {
2989 rtx_insn *p;
2990
2991 timevar_push (TV_VERIFY_RTL_SHARING);
2992
2993 reset_all_used_flags ();
2994
2995 for (p = get_insns (); p; p = NEXT_INSN (p))
2996 if (INSN_P (p))
2997 {
2998 rtx pat = PATTERN (p);
2999 if (GET_CODE (pat) != SEQUENCE)
3000 verify_insn_sharing (p);
3001 else
3002 for (int i = 0; i < XVECLEN (pat, 0); i++)
3003 {
3004 rtx insn = XVECEXP (pat, 0, i);
3005 if (INSN_P (insn))
3006 verify_insn_sharing (insn);
3007 }
3008 }
3009
3010 reset_all_used_flags ();
3011
3012 timevar_pop (TV_VERIFY_RTL_SHARING);
3013 }
3014
3015 /* Go through all the RTL insn bodies and copy any invalid shared structure.
3016 Assumes the mark bits are cleared at entry. */
3017
3018 void
3019 unshare_all_rtl_in_chain (rtx_insn *insn)
3020 {
3021 for (; insn; insn = NEXT_INSN (insn))
3022 if (INSN_P (insn))
3023 {
3024 PATTERN (insn) = copy_rtx_if_shared (PATTERN (insn));
3025 REG_NOTES (insn) = copy_rtx_if_shared (REG_NOTES (insn));
3026 if (CALL_P (insn))
3027 CALL_INSN_FUNCTION_USAGE (insn)
3028 = copy_rtx_if_shared (CALL_INSN_FUNCTION_USAGE (insn));
3029 }
3030 }
3031
3032 /* Go through all virtual stack slots of a function and mark them as
3033 shared. We never replace the DECL_RTLs themselves with a copy,
3034 but expressions mentioned into a DECL_RTL cannot be shared with
3035 expressions in the instruction stream.
3036
3037 Note that reload may convert pseudo registers into memories in-place.
3038 Pseudo registers are always shared, but MEMs never are. Thus if we
3039 reset the used flags on MEMs in the instruction stream, we must set
3040 them again on MEMs that appear in DECL_RTLs. */
3041
3042 static void
3043 set_used_decls (tree blk)
3044 {
3045 tree t;
3046
3047 /* Mark decls. */
3048 for (t = BLOCK_VARS (blk); t; t = DECL_CHAIN (t))
3049 if (DECL_RTL_SET_P (t))
3050 set_used_flags (DECL_RTL (t));
3051
3052 /* Now process sub-blocks. */
3053 for (t = BLOCK_SUBBLOCKS (blk); t; t = BLOCK_CHAIN (t))
3054 set_used_decls (t);
3055 }
3056
3057 /* Mark ORIG as in use, and return a copy of it if it was already in use.
3058 Recursively does the same for subexpressions. Uses
3059 copy_rtx_if_shared_1 to reduce stack space. */
3060
3061 rtx
3062 copy_rtx_if_shared (rtx orig)
3063 {
3064 copy_rtx_if_shared_1 (&orig);
3065 return orig;
3066 }
3067
3068 /* Mark *ORIG1 as in use, and set it to a copy of it if it was already in
3069 use. Recursively does the same for subexpressions. */
3070
3071 static void
3072 copy_rtx_if_shared_1 (rtx *orig1)
3073 {
3074 rtx x;
3075 int i;
3076 enum rtx_code code;
3077 rtx *last_ptr;
3078 const char *format_ptr;
3079 int copied = 0;
3080 int length;
3081
3082 /* Repeat is used to turn tail-recursion into iteration. */
3083 repeat:
3084 x = *orig1;
3085
3086 if (x == 0)
3087 return;
3088
3089 code = GET_CODE (x);
3090
3091 /* These types may be freely shared. */
3092
3093 switch (code)
3094 {
3095 case REG:
3096 case DEBUG_EXPR:
3097 case VALUE:
3098 CASE_CONST_ANY:
3099 case SYMBOL_REF:
3100 case LABEL_REF:
3101 case CODE_LABEL:
3102 case PC:
3103 case CC0:
3104 case RETURN:
3105 case SIMPLE_RETURN:
3106 case SCRATCH:
3107 /* SCRATCH must be shared because they represent distinct values. */
3108 return;
3109 case CLOBBER:
3110 /* Share clobbers of hard registers (like cc0), but do not share pseudo reg
3111 clobbers or clobbers of hard registers that originated as pseudos.
3112 This is needed to allow safe register renaming. */
3113 if (REG_P (XEXP (x, 0))
3114 && HARD_REGISTER_NUM_P (REGNO (XEXP (x, 0)))
3115 && HARD_REGISTER_NUM_P (ORIGINAL_REGNO (XEXP (x, 0))))
3116 return;
3117 break;
3118
3119 case CONST:
3120 if (shared_const_p (x))
3121 return;
3122 break;
3123
3124 case DEBUG_INSN:
3125 case INSN:
3126 case JUMP_INSN:
3127 case CALL_INSN:
3128 case NOTE:
3129 case BARRIER:
3130 /* The chain of insns is not being copied. */
3131 return;
3132
3133 default:
3134 break;
3135 }
3136
3137 /* This rtx may not be shared. If it has already been seen,
3138 replace it with a copy of itself. */
3139
3140 if (RTX_FLAG (x, used))
3141 {
3142 x = shallow_copy_rtx (x);
3143 copied = 1;
3144 }
3145 RTX_FLAG (x, used) = 1;
3146
3147 /* Now scan the subexpressions recursively.
3148 We can store any replaced subexpressions directly into X
3149 since we know X is not shared! Any vectors in X
3150 must be copied if X was copied. */
3151
3152 format_ptr = GET_RTX_FORMAT (code);
3153 length = GET_RTX_LENGTH (code);
3154 last_ptr = NULL;
3155
3156 for (i = 0; i < length; i++)
3157 {
3158 switch (*format_ptr++)
3159 {
3160 case 'e':
3161 if (last_ptr)
3162 copy_rtx_if_shared_1 (last_ptr);
3163 last_ptr = &XEXP (x, i);
3164 break;
3165
3166 case 'E':
3167 if (XVEC (x, i) != NULL)
3168 {
3169 int j;
3170 int len = XVECLEN (x, i);
3171
3172 /* Copy the vector iff I copied the rtx and the length
3173 is nonzero. */
3174 if (copied && len > 0)
3175 XVEC (x, i) = gen_rtvec_v (len, XVEC (x, i)->elem);
3176
3177 /* Call recursively on all inside the vector. */
3178 for (j = 0; j < len; j++)
3179 {
3180 if (last_ptr)
3181 copy_rtx_if_shared_1 (last_ptr);
3182 last_ptr = &XVECEXP (x, i, j);
3183 }
3184 }
3185 break;
3186 }
3187 }
3188 *orig1 = x;
3189 if (last_ptr)
3190 {
3191 orig1 = last_ptr;
3192 goto repeat;
3193 }
3194 return;
3195 }
3196
3197 /* Set the USED bit in X and its non-shareable subparts to FLAG. */
3198
3199 static void
3200 mark_used_flags (rtx x, int flag)
3201 {
3202 int i, j;
3203 enum rtx_code code;
3204 const char *format_ptr;
3205 int length;
3206
3207 /* Repeat is used to turn tail-recursion into iteration. */
3208 repeat:
3209 if (x == 0)
3210 return;
3211
3212 code = GET_CODE (x);
3213
3214 /* These types may be freely shared so we needn't do any resetting
3215 for them. */
3216
3217 switch (code)
3218 {
3219 case REG:
3220 case DEBUG_EXPR:
3221 case VALUE:
3222 CASE_CONST_ANY:
3223 case SYMBOL_REF:
3224 case CODE_LABEL:
3225 case PC:
3226 case CC0:
3227 case RETURN:
3228 case SIMPLE_RETURN:
3229 return;
3230
3231 case DEBUG_INSN:
3232 case INSN:
3233 case JUMP_INSN:
3234 case CALL_INSN:
3235 case NOTE:
3236 case LABEL_REF:
3237 case BARRIER:
3238 /* The chain of insns is not being copied. */
3239 return;
3240
3241 default:
3242 break;
3243 }
3244
3245 RTX_FLAG (x, used) = flag;
3246
3247 format_ptr = GET_RTX_FORMAT (code);
3248 length = GET_RTX_LENGTH (code);
3249
3250 for (i = 0; i < length; i++)
3251 {
3252 switch (*format_ptr++)
3253 {
3254 case 'e':
3255 if (i == length-1)
3256 {
3257 x = XEXP (x, i);
3258 goto repeat;
3259 }
3260 mark_used_flags (XEXP (x, i), flag);
3261 break;
3262
3263 case 'E':
3264 for (j = 0; j < XVECLEN (x, i); j++)
3265 mark_used_flags (XVECEXP (x, i, j), flag);
3266 break;
3267 }
3268 }
3269 }
3270
3271 /* Clear all the USED bits in X to allow copy_rtx_if_shared to be used
3272 to look for shared sub-parts. */
3273
3274 void
3275 reset_used_flags (rtx x)
3276 {
3277 mark_used_flags (x, 0);
3278 }
3279
3280 /* Set all the USED bits in X to allow copy_rtx_if_shared to be used
3281 to look for shared sub-parts. */
3282
3283 void
3284 set_used_flags (rtx x)
3285 {
3286 mark_used_flags (x, 1);
3287 }
3288 \f
3289 /* Copy X if necessary so that it won't be altered by changes in OTHER.
3290 Return X or the rtx for the pseudo reg the value of X was copied into.
3291 OTHER must be valid as a SET_DEST. */
3292
3293 rtx
3294 make_safe_from (rtx x, rtx other)
3295 {
3296 while (1)
3297 switch (GET_CODE (other))
3298 {
3299 case SUBREG:
3300 other = SUBREG_REG (other);
3301 break;
3302 case STRICT_LOW_PART:
3303 case SIGN_EXTEND:
3304 case ZERO_EXTEND:
3305 other = XEXP (other, 0);
3306 break;
3307 default:
3308 goto done;
3309 }
3310 done:
3311 if ((MEM_P (other)
3312 && ! CONSTANT_P (x)
3313 && !REG_P (x)
3314 && GET_CODE (x) != SUBREG)
3315 || (REG_P (other)
3316 && (REGNO (other) < FIRST_PSEUDO_REGISTER
3317 || reg_mentioned_p (other, x))))
3318 {
3319 rtx temp = gen_reg_rtx (GET_MODE (x));
3320 emit_move_insn (temp, x);
3321 return temp;
3322 }
3323 return x;
3324 }
3325 \f
3326 /* Emission of insns (adding them to the doubly-linked list). */
3327
3328 /* Return the last insn emitted, even if it is in a sequence now pushed. */
3329
3330 rtx_insn *
3331 get_last_insn_anywhere (void)
3332 {
3333 struct sequence_stack *seq;
3334 for (seq = get_current_sequence (); seq; seq = seq->next)
3335 if (seq->last != 0)
3336 return seq->last;
3337 return 0;
3338 }
3339
3340 /* Return the first nonnote insn emitted in current sequence or current
3341 function. This routine looks inside SEQUENCEs. */
3342
3343 rtx_insn *
3344 get_first_nonnote_insn (void)
3345 {
3346 rtx_insn *insn = get_insns ();
3347
3348 if (insn)
3349 {
3350 if (NOTE_P (insn))
3351 for (insn = next_insn (insn);
3352 insn && NOTE_P (insn);
3353 insn = next_insn (insn))
3354 continue;
3355 else
3356 {
3357 if (NONJUMP_INSN_P (insn)
3358 && GET_CODE (PATTERN (insn)) == SEQUENCE)
3359 insn = as_a <rtx_sequence *> (PATTERN (insn))->insn (0);
3360 }
3361 }
3362
3363 return insn;
3364 }
3365
3366 /* Return the last nonnote insn emitted in current sequence or current
3367 function. This routine looks inside SEQUENCEs. */
3368
3369 rtx_insn *
3370 get_last_nonnote_insn (void)
3371 {
3372 rtx_insn *insn = get_last_insn ();
3373
3374 if (insn)
3375 {
3376 if (NOTE_P (insn))
3377 for (insn = previous_insn (insn);
3378 insn && NOTE_P (insn);
3379 insn = previous_insn (insn))
3380 continue;
3381 else
3382 {
3383 if (NONJUMP_INSN_P (insn))
3384 if (rtx_sequence *seq = dyn_cast <rtx_sequence *> (PATTERN (insn)))
3385 insn = seq->insn (seq->len () - 1);
3386 }
3387 }
3388
3389 return insn;
3390 }
3391
3392 /* Return the number of actual (non-debug) insns emitted in this
3393 function. */
3394
3395 int
3396 get_max_insn_count (void)
3397 {
3398 int n = cur_insn_uid;
3399
3400 /* The table size must be stable across -g, to avoid codegen
3401 differences due to debug insns, and not be affected by
3402 -fmin-insn-uid, to avoid excessive table size and to simplify
3403 debugging of -fcompare-debug failures. */
3404 if (cur_debug_insn_uid > param_min_nondebug_insn_uid)
3405 n -= cur_debug_insn_uid;
3406 else
3407 n -= param_min_nondebug_insn_uid;
3408
3409 return n;
3410 }
3411
3412 \f
3413 /* Return the next insn. If it is a SEQUENCE, return the first insn
3414 of the sequence. */
3415
3416 rtx_insn *
3417 next_insn (rtx_insn *insn)
3418 {
3419 if (insn)
3420 {
3421 insn = NEXT_INSN (insn);
3422 if (insn && NONJUMP_INSN_P (insn)
3423 && GET_CODE (PATTERN (insn)) == SEQUENCE)
3424 insn = as_a <rtx_sequence *> (PATTERN (insn))->insn (0);
3425 }
3426
3427 return insn;
3428 }
3429
3430 /* Return the previous insn. If it is a SEQUENCE, return the last insn
3431 of the sequence. */
3432
3433 rtx_insn *
3434 previous_insn (rtx_insn *insn)
3435 {
3436 if (insn)
3437 {
3438 insn = PREV_INSN (insn);
3439 if (insn && NONJUMP_INSN_P (insn))
3440 if (rtx_sequence *seq = dyn_cast <rtx_sequence *> (PATTERN (insn)))
3441 insn = seq->insn (seq->len () - 1);
3442 }
3443
3444 return insn;
3445 }
3446
3447 /* Return the next insn after INSN that is not a NOTE. This routine does not
3448 look inside SEQUENCEs. */
3449
3450 rtx_insn *
3451 next_nonnote_insn (rtx_insn *insn)
3452 {
3453 while (insn)
3454 {
3455 insn = NEXT_INSN (insn);
3456 if (insn == 0 || !NOTE_P (insn))
3457 break;
3458 }
3459
3460 return insn;
3461 }
3462
3463 /* Return the next insn after INSN that is not a DEBUG_INSN. This
3464 routine does not look inside SEQUENCEs. */
3465
3466 rtx_insn *
3467 next_nondebug_insn (rtx_insn *insn)
3468 {
3469 while (insn)
3470 {
3471 insn = NEXT_INSN (insn);
3472 if (insn == 0 || !DEBUG_INSN_P (insn))
3473 break;
3474 }
3475
3476 return insn;
3477 }
3478
3479 /* Return the previous insn before INSN that is not a NOTE. This routine does
3480 not look inside SEQUENCEs. */
3481
3482 rtx_insn *
3483 prev_nonnote_insn (rtx_insn *insn)
3484 {
3485 while (insn)
3486 {
3487 insn = PREV_INSN (insn);
3488 if (insn == 0 || !NOTE_P (insn))
3489 break;
3490 }
3491
3492 return insn;
3493 }
3494
3495 /* Return the previous insn before INSN that is not a DEBUG_INSN.
3496 This routine does not look inside SEQUENCEs. */
3497
3498 rtx_insn *
3499 prev_nondebug_insn (rtx_insn *insn)
3500 {
3501 while (insn)
3502 {
3503 insn = PREV_INSN (insn);
3504 if (insn == 0 || !DEBUG_INSN_P (insn))
3505 break;
3506 }
3507
3508 return insn;
3509 }
3510
3511 /* Return the next insn after INSN that is not a NOTE nor DEBUG_INSN.
3512 This routine does not look inside SEQUENCEs. */
3513
3514 rtx_insn *
3515 next_nonnote_nondebug_insn (rtx_insn *insn)
3516 {
3517 while (insn)
3518 {
3519 insn = NEXT_INSN (insn);
3520 if (insn == 0 || (!NOTE_P (insn) && !DEBUG_INSN_P (insn)))
3521 break;
3522 }
3523
3524 return insn;
3525 }
3526
3527 /* Return the next insn after INSN that is not a NOTE nor DEBUG_INSN,
3528 but stop the search before we enter another basic block. This
3529 routine does not look inside SEQUENCEs. */
3530
3531 rtx_insn *
3532 next_nonnote_nondebug_insn_bb (rtx_insn *insn)
3533 {
3534 while (insn)
3535 {
3536 insn = NEXT_INSN (insn);
3537 if (insn == 0)
3538 break;
3539 if (DEBUG_INSN_P (insn))
3540 continue;
3541 if (!NOTE_P (insn))
3542 break;
3543 if (NOTE_INSN_BASIC_BLOCK_P (insn))
3544 return NULL;
3545 }
3546
3547 return insn;
3548 }
3549
3550 /* Return the previous insn before INSN that is not a NOTE nor DEBUG_INSN.
3551 This routine does not look inside SEQUENCEs. */
3552
3553 rtx_insn *
3554 prev_nonnote_nondebug_insn (rtx_insn *insn)
3555 {
3556 while (insn)
3557 {
3558 insn = PREV_INSN (insn);
3559 if (insn == 0 || (!NOTE_P (insn) && !DEBUG_INSN_P (insn)))
3560 break;
3561 }
3562
3563 return insn;
3564 }
3565
3566 /* Return the previous insn before INSN that is not a NOTE nor
3567 DEBUG_INSN, but stop the search before we enter another basic
3568 block. This routine does not look inside SEQUENCEs. */
3569
3570 rtx_insn *
3571 prev_nonnote_nondebug_insn_bb (rtx_insn *insn)
3572 {
3573 while (insn)
3574 {
3575 insn = PREV_INSN (insn);
3576 if (insn == 0)
3577 break;
3578 if (DEBUG_INSN_P (insn))
3579 continue;
3580 if (!NOTE_P (insn))
3581 break;
3582 if (NOTE_INSN_BASIC_BLOCK_P (insn))
3583 return NULL;
3584 }
3585
3586 return insn;
3587 }
3588
3589 /* Return the next INSN, CALL_INSN, JUMP_INSN or DEBUG_INSN after INSN;
3590 or 0, if there is none. This routine does not look inside
3591 SEQUENCEs. */
3592
3593 rtx_insn *
3594 next_real_insn (rtx_insn *insn)
3595 {
3596 while (insn)
3597 {
3598 insn = NEXT_INSN (insn);
3599 if (insn == 0 || INSN_P (insn))
3600 break;
3601 }
3602
3603 return insn;
3604 }
3605
3606 /* Return the last INSN, CALL_INSN, JUMP_INSN or DEBUG_INSN before INSN;
3607 or 0, if there is none. This routine does not look inside
3608 SEQUENCEs. */
3609
3610 rtx_insn *
3611 prev_real_insn (rtx_insn *insn)
3612 {
3613 while (insn)
3614 {
3615 insn = PREV_INSN (insn);
3616 if (insn == 0 || INSN_P (insn))
3617 break;
3618 }
3619
3620 return insn;
3621 }
3622
3623 /* Return the next INSN, CALL_INSN or JUMP_INSN after INSN;
3624 or 0, if there is none. This routine does not look inside
3625 SEQUENCEs. */
3626
3627 rtx_insn *
3628 next_real_nondebug_insn (rtx uncast_insn)
3629 {
3630 rtx_insn *insn = safe_as_a <rtx_insn *> (uncast_insn);
3631
3632 while (insn)
3633 {
3634 insn = NEXT_INSN (insn);
3635 if (insn == 0 || NONDEBUG_INSN_P (insn))
3636 break;
3637 }
3638
3639 return insn;
3640 }
3641
3642 /* Return the last INSN, CALL_INSN or JUMP_INSN before INSN;
3643 or 0, if there is none. This routine does not look inside
3644 SEQUENCEs. */
3645
3646 rtx_insn *
3647 prev_real_nondebug_insn (rtx_insn *insn)
3648 {
3649 while (insn)
3650 {
3651 insn = PREV_INSN (insn);
3652 if (insn == 0 || NONDEBUG_INSN_P (insn))
3653 break;
3654 }
3655
3656 return insn;
3657 }
3658
3659 /* Return the last CALL_INSN in the current list, or 0 if there is none.
3660 This routine does not look inside SEQUENCEs. */
3661
3662 rtx_call_insn *
3663 last_call_insn (void)
3664 {
3665 rtx_insn *insn;
3666
3667 for (insn = get_last_insn ();
3668 insn && !CALL_P (insn);
3669 insn = PREV_INSN (insn))
3670 ;
3671
3672 return safe_as_a <rtx_call_insn *> (insn);
3673 }
3674
3675 /* Find the next insn after INSN that really does something. This routine
3676 does not look inside SEQUENCEs. After reload this also skips over
3677 standalone USE and CLOBBER insn. */
3678
3679 int
3680 active_insn_p (const rtx_insn *insn)
3681 {
3682 return (CALL_P (insn) || JUMP_P (insn)
3683 || JUMP_TABLE_DATA_P (insn) /* FIXME */
3684 || (NONJUMP_INSN_P (insn)
3685 && (! reload_completed
3686 || (GET_CODE (PATTERN (insn)) != USE
3687 && GET_CODE (PATTERN (insn)) != CLOBBER))));
3688 }
3689
3690 rtx_insn *
3691 next_active_insn (rtx_insn *insn)
3692 {
3693 while (insn)
3694 {
3695 insn = NEXT_INSN (insn);
3696 if (insn == 0 || active_insn_p (insn))
3697 break;
3698 }
3699
3700 return insn;
3701 }
3702
3703 /* Find the last insn before INSN that really does something. This routine
3704 does not look inside SEQUENCEs. After reload this also skips over
3705 standalone USE and CLOBBER insn. */
3706
3707 rtx_insn *
3708 prev_active_insn (rtx_insn *insn)
3709 {
3710 while (insn)
3711 {
3712 insn = PREV_INSN (insn);
3713 if (insn == 0 || active_insn_p (insn))
3714 break;
3715 }
3716
3717 return insn;
3718 }
3719 \f
3720 /* Return the next insn that uses CC0 after INSN, which is assumed to
3721 set it. This is the inverse of prev_cc0_setter (i.e., prev_cc0_setter
3722 applied to the result of this function should yield INSN).
3723
3724 Normally, this is simply the next insn. However, if a REG_CC_USER note
3725 is present, it contains the insn that uses CC0.
3726
3727 Return 0 if we can't find the insn. */
3728
3729 rtx_insn *
3730 next_cc0_user (rtx_insn *insn)
3731 {
3732 rtx note = find_reg_note (insn, REG_CC_USER, NULL_RTX);
3733
3734 if (note)
3735 return safe_as_a <rtx_insn *> (XEXP (note, 0));
3736
3737 insn = next_nonnote_insn (insn);
3738 if (insn && NONJUMP_INSN_P (insn) && GET_CODE (PATTERN (insn)) == SEQUENCE)
3739 insn = as_a <rtx_sequence *> (PATTERN (insn))->insn (0);
3740
3741 if (insn && INSN_P (insn) && reg_mentioned_p (cc0_rtx, PATTERN (insn)))
3742 return insn;
3743
3744 return 0;
3745 }
3746
3747 /* Find the insn that set CC0 for INSN. Unless INSN has a REG_CC_SETTER
3748 note, it is the previous insn. */
3749
3750 rtx_insn *
3751 prev_cc0_setter (rtx_insn *insn)
3752 {
3753 rtx note = find_reg_note (insn, REG_CC_SETTER, NULL_RTX);
3754
3755 if (note)
3756 return safe_as_a <rtx_insn *> (XEXP (note, 0));
3757
3758 insn = prev_nonnote_insn (insn);
3759 gcc_assert (sets_cc0_p (PATTERN (insn)));
3760
3761 return insn;
3762 }
3763
3764 /* Find a RTX_AUTOINC class rtx which matches DATA. */
3765
3766 static int
3767 find_auto_inc (const_rtx x, const_rtx reg)
3768 {
3769 subrtx_iterator::array_type array;
3770 FOR_EACH_SUBRTX (iter, array, x, NONCONST)
3771 {
3772 const_rtx x = *iter;
3773 if (GET_RTX_CLASS (GET_CODE (x)) == RTX_AUTOINC
3774 && rtx_equal_p (reg, XEXP (x, 0)))
3775 return true;
3776 }
3777 return false;
3778 }
3779
3780 /* Increment the label uses for all labels present in rtx. */
3781
3782 static void
3783 mark_label_nuses (rtx x)
3784 {
3785 enum rtx_code code;
3786 int i, j;
3787 const char *fmt;
3788
3789 code = GET_CODE (x);
3790 if (code == LABEL_REF && LABEL_P (label_ref_label (x)))
3791 LABEL_NUSES (label_ref_label (x))++;
3792
3793 fmt = GET_RTX_FORMAT (code);
3794 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
3795 {
3796 if (fmt[i] == 'e')
3797 mark_label_nuses (XEXP (x, i));
3798 else if (fmt[i] == 'E')
3799 for (j = XVECLEN (x, i) - 1; j >= 0; j--)
3800 mark_label_nuses (XVECEXP (x, i, j));
3801 }
3802 }
3803
3804 \f
3805 /* Try splitting insns that can be split for better scheduling.
3806 PAT is the pattern which might split.
3807 TRIAL is the insn providing PAT.
3808 LAST is nonzero if we should return the last insn of the sequence produced.
3809
3810 If this routine succeeds in splitting, it returns the first or last
3811 replacement insn depending on the value of LAST. Otherwise, it
3812 returns TRIAL. If the insn to be returned can be split, it will be. */
3813
3814 rtx_insn *
3815 try_split (rtx pat, rtx_insn *trial, int last)
3816 {
3817 rtx_insn *before, *after;
3818 rtx note;
3819 rtx_insn *seq, *tem;
3820 profile_probability probability;
3821 rtx_insn *insn_last, *insn;
3822 int njumps = 0;
3823 rtx_insn *call_insn = NULL;
3824
3825 if (any_condjump_p (trial)
3826 && (note = find_reg_note (trial, REG_BR_PROB, 0)))
3827 split_branch_probability
3828 = profile_probability::from_reg_br_prob_note (XINT (note, 0));
3829 else
3830 split_branch_probability = profile_probability::uninitialized ();
3831
3832 probability = split_branch_probability;
3833
3834 seq = split_insns (pat, trial);
3835
3836 split_branch_probability = profile_probability::uninitialized ();
3837
3838 if (!seq)
3839 return trial;
3840
3841 int split_insn_count = 0;
3842 /* Avoid infinite loop if any insn of the result matches
3843 the original pattern. */
3844 insn_last = seq;
3845 while (1)
3846 {
3847 if (INSN_P (insn_last)
3848 && rtx_equal_p (PATTERN (insn_last), pat))
3849 return trial;
3850 split_insn_count++;
3851 if (!NEXT_INSN (insn_last))
3852 break;
3853 insn_last = NEXT_INSN (insn_last);
3854 }
3855
3856 /* We're not good at redistributing frame information if
3857 the split occurs before reload or if it results in more
3858 than one insn. */
3859 if (RTX_FRAME_RELATED_P (trial))
3860 {
3861 if (!reload_completed || split_insn_count != 1)
3862 return trial;
3863
3864 rtx_insn *new_insn = seq;
3865 rtx_insn *old_insn = trial;
3866 copy_frame_info_to_split_insn (old_insn, new_insn);
3867 }
3868
3869 /* We will be adding the new sequence to the function. The splitters
3870 may have introduced invalid RTL sharing, so unshare the sequence now. */
3871 unshare_all_rtl_in_chain (seq);
3872
3873 /* Mark labels and copy flags. */
3874 for (insn = insn_last; insn ; insn = PREV_INSN (insn))
3875 {
3876 if (JUMP_P (insn))
3877 {
3878 if (JUMP_P (trial))
3879 CROSSING_JUMP_P (insn) = CROSSING_JUMP_P (trial);
3880 mark_jump_label (PATTERN (insn), insn, 0);
3881 njumps++;
3882 if (probability.initialized_p ()
3883 && any_condjump_p (insn)
3884 && !find_reg_note (insn, REG_BR_PROB, 0))
3885 {
3886 /* We can preserve the REG_BR_PROB notes only if exactly
3887 one jump is created, otherwise the machine description
3888 is responsible for this step using
3889 split_branch_probability variable. */
3890 gcc_assert (njumps == 1);
3891 add_reg_br_prob_note (insn, probability);
3892 }
3893 }
3894 }
3895
3896 /* If we are splitting a CALL_INSN, look for the CALL_INSN
3897 in SEQ and copy any additional information across. */
3898 if (CALL_P (trial))
3899 {
3900 for (insn = insn_last; insn ; insn = PREV_INSN (insn))
3901 if (CALL_P (insn))
3902 {
3903 gcc_assert (call_insn == NULL_RTX);
3904 call_insn = insn;
3905
3906 /* Add the old CALL_INSN_FUNCTION_USAGE to whatever the
3907 target may have explicitly specified. */
3908 rtx *p = &CALL_INSN_FUNCTION_USAGE (insn);
3909 while (*p)
3910 p = &XEXP (*p, 1);
3911 *p = CALL_INSN_FUNCTION_USAGE (trial);
3912
3913 /* If the old call was a sibling call, the new one must
3914 be too. */
3915 SIBLING_CALL_P (insn) = SIBLING_CALL_P (trial);
3916 }
3917 }
3918
3919 /* Copy notes, particularly those related to the CFG. */
3920 for (note = REG_NOTES (trial); note; note = XEXP (note, 1))
3921 {
3922 switch (REG_NOTE_KIND (note))
3923 {
3924 case REG_EH_REGION:
3925 copy_reg_eh_region_note_backward (note, insn_last, NULL);
3926 break;
3927
3928 case REG_NORETURN:
3929 case REG_SETJMP:
3930 case REG_TM:
3931 case REG_CALL_NOCF_CHECK:
3932 case REG_CALL_ARG_LOCATION:
3933 for (insn = insn_last; insn != NULL_RTX; insn = PREV_INSN (insn))
3934 {
3935 if (CALL_P (insn))
3936 add_reg_note (insn, REG_NOTE_KIND (note), XEXP (note, 0));
3937 }
3938 break;
3939
3940 case REG_NON_LOCAL_GOTO:
3941 case REG_LABEL_TARGET:
3942 for (insn = insn_last; insn != NULL_RTX; insn = PREV_INSN (insn))
3943 {
3944 if (JUMP_P (insn))
3945 add_reg_note (insn, REG_NOTE_KIND (note), XEXP (note, 0));
3946 }
3947 break;
3948
3949 case REG_INC:
3950 if (!AUTO_INC_DEC)
3951 break;
3952
3953 for (insn = insn_last; insn != NULL_RTX; insn = PREV_INSN (insn))
3954 {
3955 rtx reg = XEXP (note, 0);
3956 if (!FIND_REG_INC_NOTE (insn, reg)
3957 && find_auto_inc (PATTERN (insn), reg))
3958 add_reg_note (insn, REG_INC, reg);
3959 }
3960 break;
3961
3962 case REG_ARGS_SIZE:
3963 fixup_args_size_notes (NULL, insn_last, get_args_size (note));
3964 break;
3965
3966 case REG_CALL_DECL:
3967 gcc_assert (call_insn != NULL_RTX);
3968 add_reg_note (call_insn, REG_NOTE_KIND (note), XEXP (note, 0));
3969 break;
3970
3971 default:
3972 break;
3973 }
3974 }
3975
3976 /* If there are LABELS inside the split insns increment the
3977 usage count so we don't delete the label. */
3978 if (INSN_P (trial))
3979 {
3980 insn = insn_last;
3981 while (insn != NULL_RTX)
3982 {
3983 /* JUMP_P insns have already been "marked" above. */
3984 if (NONJUMP_INSN_P (insn))
3985 mark_label_nuses (PATTERN (insn));
3986
3987 insn = PREV_INSN (insn);
3988 }
3989 }
3990
3991 before = PREV_INSN (trial);
3992 after = NEXT_INSN (trial);
3993
3994 emit_insn_after_setloc (seq, trial, INSN_LOCATION (trial));
3995
3996 delete_insn (trial);
3997
3998 /* Recursively call try_split for each new insn created; by the
3999 time control returns here that insn will be fully split, so
4000 set LAST and continue from the insn after the one returned.
4001 We can't use next_active_insn here since AFTER may be a note.
4002 Ignore deleted insns, which can be occur if not optimizing. */
4003 for (tem = NEXT_INSN (before); tem != after; tem = NEXT_INSN (tem))
4004 if (! tem->deleted () && INSN_P (tem))
4005 tem = try_split (PATTERN (tem), tem, 1);
4006
4007 /* Return either the first or the last insn, depending on which was
4008 requested. */
4009 return last
4010 ? (after ? PREV_INSN (after) : get_last_insn ())
4011 : NEXT_INSN (before);
4012 }
4013 \f
4014 /* Make and return an INSN rtx, initializing all its slots.
4015 Store PATTERN in the pattern slots. */
4016
4017 rtx_insn *
4018 make_insn_raw (rtx pattern)
4019 {
4020 rtx_insn *insn;
4021
4022 insn = as_a <rtx_insn *> (rtx_alloc (INSN));
4023
4024 INSN_UID (insn) = cur_insn_uid++;
4025 PATTERN (insn) = pattern;
4026 INSN_CODE (insn) = -1;
4027 REG_NOTES (insn) = NULL;
4028 INSN_LOCATION (insn) = curr_insn_location ();
4029 BLOCK_FOR_INSN (insn) = NULL;
4030
4031 #ifdef ENABLE_RTL_CHECKING
4032 if (insn
4033 && INSN_P (insn)
4034 && (returnjump_p (insn)
4035 || (GET_CODE (insn) == SET
4036 && SET_DEST (insn) == pc_rtx)))
4037 {
4038 warning (0, "ICE: emit_insn used where emit_jump_insn needed:\n");
4039 debug_rtx (insn);
4040 }
4041 #endif
4042
4043 return insn;
4044 }
4045
4046 /* Like `make_insn_raw' but make a DEBUG_INSN instead of an insn. */
4047
4048 static rtx_insn *
4049 make_debug_insn_raw (rtx pattern)
4050 {
4051 rtx_debug_insn *insn;
4052
4053 insn = as_a <rtx_debug_insn *> (rtx_alloc (DEBUG_INSN));
4054 INSN_UID (insn) = cur_debug_insn_uid++;
4055 if (cur_debug_insn_uid > param_min_nondebug_insn_uid)
4056 INSN_UID (insn) = cur_insn_uid++;
4057
4058 PATTERN (insn) = pattern;
4059 INSN_CODE (insn) = -1;
4060 REG_NOTES (insn) = NULL;
4061 INSN_LOCATION (insn) = curr_insn_location ();
4062 BLOCK_FOR_INSN (insn) = NULL;
4063
4064 return insn;
4065 }
4066
4067 /* Like `make_insn_raw' but make a JUMP_INSN instead of an insn. */
4068
4069 static rtx_insn *
4070 make_jump_insn_raw (rtx pattern)
4071 {
4072 rtx_jump_insn *insn;
4073
4074 insn = as_a <rtx_jump_insn *> (rtx_alloc (JUMP_INSN));
4075 INSN_UID (insn) = cur_insn_uid++;
4076
4077 PATTERN (insn) = pattern;
4078 INSN_CODE (insn) = -1;
4079 REG_NOTES (insn) = NULL;
4080 JUMP_LABEL (insn) = NULL;
4081 INSN_LOCATION (insn) = curr_insn_location ();
4082 BLOCK_FOR_INSN (insn) = NULL;
4083
4084 return insn;
4085 }
4086
4087 /* Like `make_insn_raw' but make a CALL_INSN instead of an insn. */
4088
4089 static rtx_insn *
4090 make_call_insn_raw (rtx pattern)
4091 {
4092 rtx_call_insn *insn;
4093
4094 insn = as_a <rtx_call_insn *> (rtx_alloc (CALL_INSN));
4095 INSN_UID (insn) = cur_insn_uid++;
4096
4097 PATTERN (insn) = pattern;
4098 INSN_CODE (insn) = -1;
4099 REG_NOTES (insn) = NULL;
4100 CALL_INSN_FUNCTION_USAGE (insn) = NULL;
4101 INSN_LOCATION (insn) = curr_insn_location ();
4102 BLOCK_FOR_INSN (insn) = NULL;
4103
4104 return insn;
4105 }
4106
4107 /* Like `make_insn_raw' but make a NOTE instead of an insn. */
4108
4109 static rtx_note *
4110 make_note_raw (enum insn_note subtype)
4111 {
4112 /* Some notes are never created this way at all. These notes are
4113 only created by patching out insns. */
4114 gcc_assert (subtype != NOTE_INSN_DELETED_LABEL
4115 && subtype != NOTE_INSN_DELETED_DEBUG_LABEL);
4116
4117 rtx_note *note = as_a <rtx_note *> (rtx_alloc (NOTE));
4118 INSN_UID (note) = cur_insn_uid++;
4119 NOTE_KIND (note) = subtype;
4120 BLOCK_FOR_INSN (note) = NULL;
4121 memset (&NOTE_DATA (note), 0, sizeof (NOTE_DATA (note)));
4122 return note;
4123 }
4124 \f
4125 /* Add INSN to the end of the doubly-linked list, between PREV and NEXT.
4126 INSN may be any object that can appear in the chain: INSN_P and NOTE_P objects,
4127 but also BARRIERs and JUMP_TABLE_DATAs. PREV and NEXT may be NULL. */
4128
4129 static inline void
4130 link_insn_into_chain (rtx_insn *insn, rtx_insn *prev, rtx_insn *next)
4131 {
4132 SET_PREV_INSN (insn) = prev;
4133 SET_NEXT_INSN (insn) = next;
4134 if (prev != NULL)
4135 {
4136 SET_NEXT_INSN (prev) = insn;
4137 if (NONJUMP_INSN_P (prev) && GET_CODE (PATTERN (prev)) == SEQUENCE)
4138 {
4139 rtx_sequence *sequence = as_a <rtx_sequence *> (PATTERN (prev));
4140 SET_NEXT_INSN (sequence->insn (sequence->len () - 1)) = insn;
4141 }
4142 }
4143 if (next != NULL)
4144 {
4145 SET_PREV_INSN (next) = insn;
4146 if (NONJUMP_INSN_P (next) && GET_CODE (PATTERN (next)) == SEQUENCE)
4147 {
4148 rtx_sequence *sequence = as_a <rtx_sequence *> (PATTERN (next));
4149 SET_PREV_INSN (sequence->insn (0)) = insn;
4150 }
4151 }
4152
4153 if (NONJUMP_INSN_P (insn) && GET_CODE (PATTERN (insn)) == SEQUENCE)
4154 {
4155 rtx_sequence *sequence = as_a <rtx_sequence *> (PATTERN (insn));
4156 SET_PREV_INSN (sequence->insn (0)) = prev;
4157 SET_NEXT_INSN (sequence->insn (sequence->len () - 1)) = next;
4158 }
4159 }
4160
4161 /* Add INSN to the end of the doubly-linked list.
4162 INSN may be an INSN, JUMP_INSN, CALL_INSN, CODE_LABEL, BARRIER or NOTE. */
4163
4164 void
4165 add_insn (rtx_insn *insn)
4166 {
4167 rtx_insn *prev = get_last_insn ();
4168 link_insn_into_chain (insn, prev, NULL);
4169 if (get_insns () == NULL)
4170 set_first_insn (insn);
4171 set_last_insn (insn);
4172 }
4173
4174 /* Add INSN into the doubly-linked list after insn AFTER. */
4175
4176 static void
4177 add_insn_after_nobb (rtx_insn *insn, rtx_insn *after)
4178 {
4179 rtx_insn *next = NEXT_INSN (after);
4180
4181 gcc_assert (!optimize || !after->deleted ());
4182
4183 link_insn_into_chain (insn, after, next);
4184
4185 if (next == NULL)
4186 {
4187 struct sequence_stack *seq;
4188
4189 for (seq = get_current_sequence (); seq; seq = seq->next)
4190 if (after == seq->last)
4191 {
4192 seq->last = insn;
4193 break;
4194 }
4195 }
4196 }
4197
4198 /* Add INSN into the doubly-linked list before insn BEFORE. */
4199
4200 static void
4201 add_insn_before_nobb (rtx_insn *insn, rtx_insn *before)
4202 {
4203 rtx_insn *prev = PREV_INSN (before);
4204
4205 gcc_assert (!optimize || !before->deleted ());
4206
4207 link_insn_into_chain (insn, prev, before);
4208
4209 if (prev == NULL)
4210 {
4211 struct sequence_stack *seq;
4212
4213 for (seq = get_current_sequence (); seq; seq = seq->next)
4214 if (before == seq->first)
4215 {
4216 seq->first = insn;
4217 break;
4218 }
4219
4220 gcc_assert (seq);
4221 }
4222 }
4223
4224 /* Like add_insn_after_nobb, but try to set BLOCK_FOR_INSN.
4225 If BB is NULL, an attempt is made to infer the bb from before.
4226
4227 This and the next function should be the only functions called
4228 to insert an insn once delay slots have been filled since only
4229 they know how to update a SEQUENCE. */
4230
4231 void
4232 add_insn_after (rtx_insn *insn, rtx_insn *after, basic_block bb)
4233 {
4234 add_insn_after_nobb (insn, after);
4235 if (!BARRIER_P (after)
4236 && !BARRIER_P (insn)
4237 && (bb = BLOCK_FOR_INSN (after)))
4238 {
4239 set_block_for_insn (insn, bb);
4240 if (INSN_P (insn))
4241 df_insn_rescan (insn);
4242 /* Should not happen as first in the BB is always
4243 either NOTE or LABEL. */
4244 if (BB_END (bb) == after
4245 /* Avoid clobbering of structure when creating new BB. */
4246 && !BARRIER_P (insn)
4247 && !NOTE_INSN_BASIC_BLOCK_P (insn))
4248 BB_END (bb) = insn;
4249 }
4250 }
4251
4252 /* Like add_insn_before_nobb, but try to set BLOCK_FOR_INSN.
4253 If BB is NULL, an attempt is made to infer the bb from before.
4254
4255 This and the previous function should be the only functions called
4256 to insert an insn once delay slots have been filled since only
4257 they know how to update a SEQUENCE. */
4258
4259 void
4260 add_insn_before (rtx_insn *insn, rtx_insn *before, basic_block bb)
4261 {
4262 add_insn_before_nobb (insn, before);
4263
4264 if (!bb
4265 && !BARRIER_P (before)
4266 && !BARRIER_P (insn))
4267 bb = BLOCK_FOR_INSN (before);
4268
4269 if (bb)
4270 {
4271 set_block_for_insn (insn, bb);
4272 if (INSN_P (insn))
4273 df_insn_rescan (insn);
4274 /* Should not happen as first in the BB is always either NOTE or
4275 LABEL. */
4276 gcc_assert (BB_HEAD (bb) != insn
4277 /* Avoid clobbering of structure when creating new BB. */
4278 || BARRIER_P (insn)
4279 || NOTE_INSN_BASIC_BLOCK_P (insn));
4280 }
4281 }
4282
4283 /* Replace insn with an deleted instruction note. */
4284
4285 void
4286 set_insn_deleted (rtx_insn *insn)
4287 {
4288 if (INSN_P (insn))
4289 df_insn_delete (insn);
4290 PUT_CODE (insn, NOTE);
4291 NOTE_KIND (insn) = NOTE_INSN_DELETED;
4292 }
4293
4294
4295 /* Unlink INSN from the insn chain.
4296
4297 This function knows how to handle sequences.
4298
4299 This function does not invalidate data flow information associated with
4300 INSN (i.e. does not call df_insn_delete). That makes this function
4301 usable for only disconnecting an insn from the chain, and re-emit it
4302 elsewhere later.
4303
4304 To later insert INSN elsewhere in the insn chain via add_insn and
4305 similar functions, PREV_INSN and NEXT_INSN must be nullified by
4306 the caller. Nullifying them here breaks many insn chain walks.
4307
4308 To really delete an insn and related DF information, use delete_insn. */
4309
4310 void
4311 remove_insn (rtx_insn *insn)
4312 {
4313 rtx_insn *next = NEXT_INSN (insn);
4314 rtx_insn *prev = PREV_INSN (insn);
4315 basic_block bb;
4316
4317 if (prev)
4318 {
4319 SET_NEXT_INSN (prev) = next;
4320 if (NONJUMP_INSN_P (prev) && GET_CODE (PATTERN (prev)) == SEQUENCE)
4321 {
4322 rtx_sequence *sequence = as_a <rtx_sequence *> (PATTERN (prev));
4323 SET_NEXT_INSN (sequence->insn (sequence->len () - 1)) = next;
4324 }
4325 }
4326 else
4327 {
4328 struct sequence_stack *seq;
4329
4330 for (seq = get_current_sequence (); seq; seq = seq->next)
4331 if (insn == seq->first)
4332 {
4333 seq->first = next;
4334 break;
4335 }
4336
4337 gcc_assert (seq);
4338 }
4339
4340 if (next)
4341 {
4342 SET_PREV_INSN (next) = prev;
4343 if (NONJUMP_INSN_P (next) && GET_CODE (PATTERN (next)) == SEQUENCE)
4344 {
4345 rtx_sequence *sequence = as_a <rtx_sequence *> (PATTERN (next));
4346 SET_PREV_INSN (sequence->insn (0)) = prev;
4347 }
4348 }
4349 else
4350 {
4351 struct sequence_stack *seq;
4352
4353 for (seq = get_current_sequence (); seq; seq = seq->next)
4354 if (insn == seq->last)
4355 {
4356 seq->last = prev;
4357 break;
4358 }
4359
4360 gcc_assert (seq);
4361 }
4362
4363 /* Fix up basic block boundaries, if necessary. */
4364 if (!BARRIER_P (insn)
4365 && (bb = BLOCK_FOR_INSN (insn)))
4366 {
4367 if (BB_HEAD (bb) == insn)
4368 {
4369 /* Never ever delete the basic block note without deleting whole
4370 basic block. */
4371 gcc_assert (!NOTE_P (insn));
4372 BB_HEAD (bb) = next;
4373 }
4374 if (BB_END (bb) == insn)
4375 BB_END (bb) = prev;
4376 }
4377 }
4378
4379 /* Append CALL_FUSAGE to the CALL_INSN_FUNCTION_USAGE for CALL_INSN. */
4380
4381 void
4382 add_function_usage_to (rtx call_insn, rtx call_fusage)
4383 {
4384 gcc_assert (call_insn && CALL_P (call_insn));
4385
4386 /* Put the register usage information on the CALL. If there is already
4387 some usage information, put ours at the end. */
4388 if (CALL_INSN_FUNCTION_USAGE (call_insn))
4389 {
4390 rtx link;
4391
4392 for (link = CALL_INSN_FUNCTION_USAGE (call_insn); XEXP (link, 1) != 0;
4393 link = XEXP (link, 1))
4394 ;
4395
4396 XEXP (link, 1) = call_fusage;
4397 }
4398 else
4399 CALL_INSN_FUNCTION_USAGE (call_insn) = call_fusage;
4400 }
4401
4402 /* Delete all insns made since FROM.
4403 FROM becomes the new last instruction. */
4404
4405 void
4406 delete_insns_since (rtx_insn *from)
4407 {
4408 if (from == 0)
4409 set_first_insn (0);
4410 else
4411 SET_NEXT_INSN (from) = 0;
4412 set_last_insn (from);
4413 }
4414
4415 /* This function is deprecated, please use sequences instead.
4416
4417 Move a consecutive bunch of insns to a different place in the chain.
4418 The insns to be moved are those between FROM and TO.
4419 They are moved to a new position after the insn AFTER.
4420 AFTER must not be FROM or TO or any insn in between.
4421
4422 This function does not know about SEQUENCEs and hence should not be
4423 called after delay-slot filling has been done. */
4424
4425 void
4426 reorder_insns_nobb (rtx_insn *from, rtx_insn *to, rtx_insn *after)
4427 {
4428 if (flag_checking)
4429 {
4430 for (rtx_insn *x = from; x != to; x = NEXT_INSN (x))
4431 gcc_assert (after != x);
4432 gcc_assert (after != to);
4433 }
4434
4435 /* Splice this bunch out of where it is now. */
4436 if (PREV_INSN (from))
4437 SET_NEXT_INSN (PREV_INSN (from)) = NEXT_INSN (to);
4438 if (NEXT_INSN (to))
4439 SET_PREV_INSN (NEXT_INSN (to)) = PREV_INSN (from);
4440 if (get_last_insn () == to)
4441 set_last_insn (PREV_INSN (from));
4442 if (get_insns () == from)
4443 set_first_insn (NEXT_INSN (to));
4444
4445 /* Make the new neighbors point to it and it to them. */
4446 if (NEXT_INSN (after))
4447 SET_PREV_INSN (NEXT_INSN (after)) = to;
4448
4449 SET_NEXT_INSN (to) = NEXT_INSN (after);
4450 SET_PREV_INSN (from) = after;
4451 SET_NEXT_INSN (after) = from;
4452 if (after == get_last_insn ())
4453 set_last_insn (to);
4454 }
4455
4456 /* Same as function above, but take care to update BB boundaries. */
4457 void
4458 reorder_insns (rtx_insn *from, rtx_insn *to, rtx_insn *after)
4459 {
4460 rtx_insn *prev = PREV_INSN (from);
4461 basic_block bb, bb2;
4462
4463 reorder_insns_nobb (from, to, after);
4464
4465 if (!BARRIER_P (after)
4466 && (bb = BLOCK_FOR_INSN (after)))
4467 {
4468 rtx_insn *x;
4469 df_set_bb_dirty (bb);
4470
4471 if (!BARRIER_P (from)
4472 && (bb2 = BLOCK_FOR_INSN (from)))
4473 {
4474 if (BB_END (bb2) == to)
4475 BB_END (bb2) = prev;
4476 df_set_bb_dirty (bb2);
4477 }
4478
4479 if (BB_END (bb) == after)
4480 BB_END (bb) = to;
4481
4482 for (x = from; x != NEXT_INSN (to); x = NEXT_INSN (x))
4483 if (!BARRIER_P (x))
4484 df_insn_change_bb (x, bb);
4485 }
4486 }
4487
4488 \f
4489 /* Emit insn(s) of given code and pattern
4490 at a specified place within the doubly-linked list.
4491
4492 All of the emit_foo global entry points accept an object
4493 X which is either an insn list or a PATTERN of a single
4494 instruction.
4495
4496 There are thus a few canonical ways to generate code and
4497 emit it at a specific place in the instruction stream. For
4498 example, consider the instruction named SPOT and the fact that
4499 we would like to emit some instructions before SPOT. We might
4500 do it like this:
4501
4502 start_sequence ();
4503 ... emit the new instructions ...
4504 insns_head = get_insns ();
4505 end_sequence ();
4506
4507 emit_insn_before (insns_head, SPOT);
4508
4509 It used to be common to generate SEQUENCE rtl instead, but that
4510 is a relic of the past which no longer occurs. The reason is that
4511 SEQUENCE rtl results in much fragmented RTL memory since the SEQUENCE
4512 generated would almost certainly die right after it was created. */
4513
4514 static rtx_insn *
4515 emit_pattern_before_noloc (rtx x, rtx_insn *before, rtx_insn *last,
4516 basic_block bb,
4517 rtx_insn *(*make_raw) (rtx))
4518 {
4519 rtx_insn *insn;
4520
4521 gcc_assert (before);
4522
4523 if (x == NULL_RTX)
4524 return last;
4525
4526 switch (GET_CODE (x))
4527 {
4528 case DEBUG_INSN:
4529 case INSN:
4530 case JUMP_INSN:
4531 case CALL_INSN:
4532 case CODE_LABEL:
4533 case BARRIER:
4534 case NOTE:
4535 insn = as_a <rtx_insn *> (x);
4536 while (insn)
4537 {
4538 rtx_insn *next = NEXT_INSN (insn);
4539 add_insn_before (insn, before, bb);
4540 last = insn;
4541 insn = next;
4542 }
4543 break;
4544
4545 #ifdef ENABLE_RTL_CHECKING
4546 case SEQUENCE:
4547 gcc_unreachable ();
4548 break;
4549 #endif
4550
4551 default:
4552 last = (*make_raw) (x);
4553 add_insn_before (last, before, bb);
4554 break;
4555 }
4556
4557 return last;
4558 }
4559
4560 /* Make X be output before the instruction BEFORE. */
4561
4562 rtx_insn *
4563 emit_insn_before_noloc (rtx x, rtx_insn *before, basic_block bb)
4564 {
4565 return emit_pattern_before_noloc (x, before, before, bb, make_insn_raw);
4566 }
4567
4568 /* Make an instruction with body X and code JUMP_INSN
4569 and output it before the instruction BEFORE. */
4570
4571 rtx_jump_insn *
4572 emit_jump_insn_before_noloc (rtx x, rtx_insn *before)
4573 {
4574 return as_a <rtx_jump_insn *> (
4575 emit_pattern_before_noloc (x, before, NULL, NULL,
4576 make_jump_insn_raw));
4577 }
4578
4579 /* Make an instruction with body X and code CALL_INSN
4580 and output it before the instruction BEFORE. */
4581
4582 rtx_insn *
4583 emit_call_insn_before_noloc (rtx x, rtx_insn *before)
4584 {
4585 return emit_pattern_before_noloc (x, before, NULL, NULL,
4586 make_call_insn_raw);
4587 }
4588
4589 /* Make an instruction with body X and code DEBUG_INSN
4590 and output it before the instruction BEFORE. */
4591
4592 rtx_insn *
4593 emit_debug_insn_before_noloc (rtx x, rtx_insn *before)
4594 {
4595 return emit_pattern_before_noloc (x, before, NULL, NULL,
4596 make_debug_insn_raw);
4597 }
4598
4599 /* Make an insn of code BARRIER
4600 and output it before the insn BEFORE. */
4601
4602 rtx_barrier *
4603 emit_barrier_before (rtx_insn *before)
4604 {
4605 rtx_barrier *insn = as_a <rtx_barrier *> (rtx_alloc (BARRIER));
4606
4607 INSN_UID (insn) = cur_insn_uid++;
4608
4609 add_insn_before (insn, before, NULL);
4610 return insn;
4611 }
4612
4613 /* Emit the label LABEL before the insn BEFORE. */
4614
4615 rtx_code_label *
4616 emit_label_before (rtx_code_label *label, rtx_insn *before)
4617 {
4618 gcc_checking_assert (INSN_UID (label) == 0);
4619 INSN_UID (label) = cur_insn_uid++;
4620 add_insn_before (label, before, NULL);
4621 return label;
4622 }
4623 \f
4624 /* Helper for emit_insn_after, handles lists of instructions
4625 efficiently. */
4626
4627 static rtx_insn *
4628 emit_insn_after_1 (rtx_insn *first, rtx_insn *after, basic_block bb)
4629 {
4630 rtx_insn *last;
4631 rtx_insn *after_after;
4632 if (!bb && !BARRIER_P (after))
4633 bb = BLOCK_FOR_INSN (after);
4634
4635 if (bb)
4636 {
4637 df_set_bb_dirty (bb);
4638 for (last = first; NEXT_INSN (last); last = NEXT_INSN (last))
4639 if (!BARRIER_P (last))
4640 {
4641 set_block_for_insn (last, bb);
4642 df_insn_rescan (last);
4643 }
4644 if (!BARRIER_P (last))
4645 {
4646 set_block_for_insn (last, bb);
4647 df_insn_rescan (last);
4648 }
4649 if (BB_END (bb) == after)
4650 BB_END (bb) = last;
4651 }
4652 else
4653 for (last = first; NEXT_INSN (last); last = NEXT_INSN (last))
4654 continue;
4655
4656 after_after = NEXT_INSN (after);
4657
4658 SET_NEXT_INSN (after) = first;
4659 SET_PREV_INSN (first) = after;
4660 SET_NEXT_INSN (last) = after_after;
4661 if (after_after)
4662 SET_PREV_INSN (after_after) = last;
4663
4664 if (after == get_last_insn ())
4665 set_last_insn (last);
4666
4667 return last;
4668 }
4669
4670 static rtx_insn *
4671 emit_pattern_after_noloc (rtx x, rtx_insn *after, basic_block bb,
4672 rtx_insn *(*make_raw)(rtx))
4673 {
4674 rtx_insn *last = after;
4675
4676 gcc_assert (after);
4677
4678 if (x == NULL_RTX)
4679 return last;
4680
4681 switch (GET_CODE (x))
4682 {
4683 case DEBUG_INSN:
4684 case INSN:
4685 case JUMP_INSN:
4686 case CALL_INSN:
4687 case CODE_LABEL:
4688 case BARRIER:
4689 case NOTE:
4690 last = emit_insn_after_1 (as_a <rtx_insn *> (x), after, bb);
4691 break;
4692
4693 #ifdef ENABLE_RTL_CHECKING
4694 case SEQUENCE:
4695 gcc_unreachable ();
4696 break;
4697 #endif
4698
4699 default:
4700 last = (*make_raw) (x);
4701 add_insn_after (last, after, bb);
4702 break;
4703 }
4704
4705 return last;
4706 }
4707
4708 /* Make X be output after the insn AFTER and set the BB of insn. If
4709 BB is NULL, an attempt is made to infer the BB from AFTER. */
4710
4711 rtx_insn *
4712 emit_insn_after_noloc (rtx x, rtx_insn *after, basic_block bb)
4713 {
4714 return emit_pattern_after_noloc (x, after, bb, make_insn_raw);
4715 }
4716
4717
4718 /* Make an insn of code JUMP_INSN with body X
4719 and output it after the insn AFTER. */
4720
4721 rtx_jump_insn *
4722 emit_jump_insn_after_noloc (rtx x, rtx_insn *after)
4723 {
4724 return as_a <rtx_jump_insn *> (
4725 emit_pattern_after_noloc (x, after, NULL, make_jump_insn_raw));
4726 }
4727
4728 /* Make an instruction with body X and code CALL_INSN
4729 and output it after the instruction AFTER. */
4730
4731 rtx_insn *
4732 emit_call_insn_after_noloc (rtx x, rtx_insn *after)
4733 {
4734 return emit_pattern_after_noloc (x, after, NULL, make_call_insn_raw);
4735 }
4736
4737 /* Make an instruction with body X and code CALL_INSN
4738 and output it after the instruction AFTER. */
4739
4740 rtx_insn *
4741 emit_debug_insn_after_noloc (rtx x, rtx_insn *after)
4742 {
4743 return emit_pattern_after_noloc (x, after, NULL, make_debug_insn_raw);
4744 }
4745
4746 /* Make an insn of code BARRIER
4747 and output it after the insn AFTER. */
4748
4749 rtx_barrier *
4750 emit_barrier_after (rtx_insn *after)
4751 {
4752 rtx_barrier *insn = as_a <rtx_barrier *> (rtx_alloc (BARRIER));
4753
4754 INSN_UID (insn) = cur_insn_uid++;
4755
4756 add_insn_after (insn, after, NULL);
4757 return insn;
4758 }
4759
4760 /* Emit the label LABEL after the insn AFTER. */
4761
4762 rtx_insn *
4763 emit_label_after (rtx_insn *label, rtx_insn *after)
4764 {
4765 gcc_checking_assert (INSN_UID (label) == 0);
4766 INSN_UID (label) = cur_insn_uid++;
4767 add_insn_after (label, after, NULL);
4768 return label;
4769 }
4770 \f
4771 /* Notes require a bit of special handling: Some notes need to have their
4772 BLOCK_FOR_INSN set, others should never have it set, and some should
4773 have it set or clear depending on the context. */
4774
4775 /* Return true iff a note of kind SUBTYPE should be emitted with routines
4776 that never set BLOCK_FOR_INSN on NOTE. BB_BOUNDARY is true if the
4777 caller is asked to emit a note before BB_HEAD, or after BB_END. */
4778
4779 static bool
4780 note_outside_basic_block_p (enum insn_note subtype, bool on_bb_boundary_p)
4781 {
4782 switch (subtype)
4783 {
4784 /* NOTE_INSN_SWITCH_TEXT_SECTIONS only appears between basic blocks. */
4785 case NOTE_INSN_SWITCH_TEXT_SECTIONS:
4786 return true;
4787
4788 /* Notes for var tracking and EH region markers can appear between or
4789 inside basic blocks. If the caller is emitting on the basic block
4790 boundary, do not set BLOCK_FOR_INSN on the new note. */
4791 case NOTE_INSN_VAR_LOCATION:
4792 case NOTE_INSN_EH_REGION_BEG:
4793 case NOTE_INSN_EH_REGION_END:
4794 return on_bb_boundary_p;
4795
4796 /* Otherwise, BLOCK_FOR_INSN must be set. */
4797 default:
4798 return false;
4799 }
4800 }
4801
4802 /* Emit a note of subtype SUBTYPE after the insn AFTER. */
4803
4804 rtx_note *
4805 emit_note_after (enum insn_note subtype, rtx_insn *after)
4806 {
4807 rtx_note *note = make_note_raw (subtype);
4808 basic_block bb = BARRIER_P (after) ? NULL : BLOCK_FOR_INSN (after);
4809 bool on_bb_boundary_p = (bb != NULL && BB_END (bb) == after);
4810
4811 if (note_outside_basic_block_p (subtype, on_bb_boundary_p))
4812 add_insn_after_nobb (note, after);
4813 else
4814 add_insn_after (note, after, bb);
4815 return note;
4816 }
4817
4818 /* Emit a note of subtype SUBTYPE before the insn BEFORE. */
4819
4820 rtx_note *
4821 emit_note_before (enum insn_note subtype, rtx_insn *before)
4822 {
4823 rtx_note *note = make_note_raw (subtype);
4824 basic_block bb = BARRIER_P (before) ? NULL : BLOCK_FOR_INSN (before);
4825 bool on_bb_boundary_p = (bb != NULL && BB_HEAD (bb) == before);
4826
4827 if (note_outside_basic_block_p (subtype, on_bb_boundary_p))
4828 add_insn_before_nobb (note, before);
4829 else
4830 add_insn_before (note, before, bb);
4831 return note;
4832 }
4833 \f
4834 /* Insert PATTERN after AFTER, setting its INSN_LOCATION to LOC.
4835 MAKE_RAW indicates how to turn PATTERN into a real insn. */
4836
4837 static rtx_insn *
4838 emit_pattern_after_setloc (rtx pattern, rtx_insn *after, location_t loc,
4839 rtx_insn *(*make_raw) (rtx))
4840 {
4841 rtx_insn *last = emit_pattern_after_noloc (pattern, after, NULL, make_raw);
4842
4843 if (pattern == NULL_RTX || !loc)
4844 return last;
4845
4846 after = NEXT_INSN (after);
4847 while (1)
4848 {
4849 if (active_insn_p (after)
4850 && !JUMP_TABLE_DATA_P (after) /* FIXME */
4851 && !INSN_LOCATION (after))
4852 INSN_LOCATION (after) = loc;
4853 if (after == last)
4854 break;
4855 after = NEXT_INSN (after);
4856 }
4857 return last;
4858 }
4859
4860 /* Insert PATTERN after AFTER. MAKE_RAW indicates how to turn PATTERN
4861 into a real insn. SKIP_DEBUG_INSNS indicates whether to insert after
4862 any DEBUG_INSNs. */
4863
4864 static rtx_insn *
4865 emit_pattern_after (rtx pattern, rtx_insn *after, bool skip_debug_insns,
4866 rtx_insn *(*make_raw) (rtx))
4867 {
4868 rtx_insn *prev = after;
4869
4870 if (skip_debug_insns)
4871 while (DEBUG_INSN_P (prev))
4872 prev = PREV_INSN (prev);
4873
4874 if (INSN_P (prev))
4875 return emit_pattern_after_setloc (pattern, after, INSN_LOCATION (prev),
4876 make_raw);
4877 else
4878 return emit_pattern_after_noloc (pattern, after, NULL, make_raw);
4879 }
4880
4881 /* Like emit_insn_after_noloc, but set INSN_LOCATION according to LOC. */
4882 rtx_insn *
4883 emit_insn_after_setloc (rtx pattern, rtx_insn *after, location_t loc)
4884 {
4885 return emit_pattern_after_setloc (pattern, after, loc, make_insn_raw);
4886 }
4887
4888 /* Like emit_insn_after_noloc, but set INSN_LOCATION according to AFTER. */
4889 rtx_insn *
4890 emit_insn_after (rtx pattern, rtx_insn *after)
4891 {
4892 return emit_pattern_after (pattern, after, true, make_insn_raw);
4893 }
4894
4895 /* Like emit_jump_insn_after_noloc, but set INSN_LOCATION according to LOC. */
4896 rtx_jump_insn *
4897 emit_jump_insn_after_setloc (rtx pattern, rtx_insn *after, location_t loc)
4898 {
4899 return as_a <rtx_jump_insn *> (
4900 emit_pattern_after_setloc (pattern, after, loc, make_jump_insn_raw));
4901 }
4902
4903 /* Like emit_jump_insn_after_noloc, but set INSN_LOCATION according to AFTER. */
4904 rtx_jump_insn *
4905 emit_jump_insn_after (rtx pattern, rtx_insn *after)
4906 {
4907 return as_a <rtx_jump_insn *> (
4908 emit_pattern_after (pattern, after, true, make_jump_insn_raw));
4909 }
4910
4911 /* Like emit_call_insn_after_noloc, but set INSN_LOCATION according to LOC. */
4912 rtx_insn *
4913 emit_call_insn_after_setloc (rtx pattern, rtx_insn *after, location_t loc)
4914 {
4915 return emit_pattern_after_setloc (pattern, after, loc, make_call_insn_raw);
4916 }
4917
4918 /* Like emit_call_insn_after_noloc, but set INSN_LOCATION according to AFTER. */
4919 rtx_insn *
4920 emit_call_insn_after (rtx pattern, rtx_insn *after)
4921 {
4922 return emit_pattern_after (pattern, after, true, make_call_insn_raw);
4923 }
4924
4925 /* Like emit_debug_insn_after_noloc, but set INSN_LOCATION according to LOC. */
4926 rtx_insn *
4927 emit_debug_insn_after_setloc (rtx pattern, rtx_insn *after, location_t loc)
4928 {
4929 return emit_pattern_after_setloc (pattern, after, loc, make_debug_insn_raw);
4930 }
4931
4932 /* Like emit_debug_insn_after_noloc, but set INSN_LOCATION according to AFTER. */
4933 rtx_insn *
4934 emit_debug_insn_after (rtx pattern, rtx_insn *after)
4935 {
4936 return emit_pattern_after (pattern, after, false, make_debug_insn_raw);
4937 }
4938
4939 /* Insert PATTERN before BEFORE, setting its INSN_LOCATION to LOC.
4940 MAKE_RAW indicates how to turn PATTERN into a real insn. INSNP
4941 indicates if PATTERN is meant for an INSN as opposed to a JUMP_INSN,
4942 CALL_INSN, etc. */
4943
4944 static rtx_insn *
4945 emit_pattern_before_setloc (rtx pattern, rtx_insn *before, location_t loc,
4946 bool insnp, rtx_insn *(*make_raw) (rtx))
4947 {
4948 rtx_insn *first = PREV_INSN (before);
4949 rtx_insn *last = emit_pattern_before_noloc (pattern, before,
4950 insnp ? before : NULL,
4951 NULL, make_raw);
4952
4953 if (pattern == NULL_RTX || !loc)
4954 return last;
4955
4956 if (!first)
4957 first = get_insns ();
4958 else
4959 first = NEXT_INSN (first);
4960 while (1)
4961 {
4962 if (active_insn_p (first)
4963 && !JUMP_TABLE_DATA_P (first) /* FIXME */
4964 && !INSN_LOCATION (first))
4965 INSN_LOCATION (first) = loc;
4966 if (first == last)
4967 break;
4968 first = NEXT_INSN (first);
4969 }
4970 return last;
4971 }
4972
4973 /* Insert PATTERN before BEFORE. MAKE_RAW indicates how to turn PATTERN
4974 into a real insn. SKIP_DEBUG_INSNS indicates whether to insert
4975 before any DEBUG_INSNs. INSNP indicates if PATTERN is meant for an
4976 INSN as opposed to a JUMP_INSN, CALL_INSN, etc. */
4977
4978 static rtx_insn *
4979 emit_pattern_before (rtx pattern, rtx_insn *before, bool skip_debug_insns,
4980 bool insnp, rtx_insn *(*make_raw) (rtx))
4981 {
4982 rtx_insn *next = before;
4983
4984 if (skip_debug_insns)
4985 while (DEBUG_INSN_P (next))
4986 next = PREV_INSN (next);
4987
4988 if (INSN_P (next))
4989 return emit_pattern_before_setloc (pattern, before, INSN_LOCATION (next),
4990 insnp, make_raw);
4991 else
4992 return emit_pattern_before_noloc (pattern, before,
4993 insnp ? before : NULL,
4994 NULL, make_raw);
4995 }
4996
4997 /* Like emit_insn_before_noloc, but set INSN_LOCATION according to LOC. */
4998 rtx_insn *
4999 emit_insn_before_setloc (rtx pattern, rtx_insn *before, location_t loc)
5000 {
5001 return emit_pattern_before_setloc (pattern, before, loc, true,
5002 make_insn_raw);
5003 }
5004
5005 /* Like emit_insn_before_noloc, but set INSN_LOCATION according to BEFORE. */
5006 rtx_insn *
5007 emit_insn_before (rtx pattern, rtx_insn *before)
5008 {
5009 return emit_pattern_before (pattern, before, true, true, make_insn_raw);
5010 }
5011
5012 /* like emit_insn_before_noloc, but set INSN_LOCATION according to LOC. */
5013 rtx_jump_insn *
5014 emit_jump_insn_before_setloc (rtx pattern, rtx_insn *before, location_t loc)
5015 {
5016 return as_a <rtx_jump_insn *> (
5017 emit_pattern_before_setloc (pattern, before, loc, false,
5018 make_jump_insn_raw));
5019 }
5020
5021 /* Like emit_jump_insn_before_noloc, but set INSN_LOCATION according to BEFORE. */
5022 rtx_jump_insn *
5023 emit_jump_insn_before (rtx pattern, rtx_insn *before)
5024 {
5025 return as_a <rtx_jump_insn *> (
5026 emit_pattern_before (pattern, before, true, false,
5027 make_jump_insn_raw));
5028 }
5029
5030 /* Like emit_insn_before_noloc, but set INSN_LOCATION according to LOC. */
5031 rtx_insn *
5032 emit_call_insn_before_setloc (rtx pattern, rtx_insn *before, location_t loc)
5033 {
5034 return emit_pattern_before_setloc (pattern, before, loc, false,
5035 make_call_insn_raw);
5036 }
5037
5038 /* Like emit_call_insn_before_noloc,
5039 but set insn_location according to BEFORE. */
5040 rtx_insn *
5041 emit_call_insn_before (rtx pattern, rtx_insn *before)
5042 {
5043 return emit_pattern_before (pattern, before, true, false,
5044 make_call_insn_raw);
5045 }
5046
5047 /* Like emit_insn_before_noloc, but set INSN_LOCATION according to LOC. */
5048 rtx_insn *
5049 emit_debug_insn_before_setloc (rtx pattern, rtx_insn *before, location_t loc)
5050 {
5051 return emit_pattern_before_setloc (pattern, before, loc, false,
5052 make_debug_insn_raw);
5053 }
5054
5055 /* Like emit_debug_insn_before_noloc,
5056 but set insn_location according to BEFORE. */
5057 rtx_insn *
5058 emit_debug_insn_before (rtx pattern, rtx_insn *before)
5059 {
5060 return emit_pattern_before (pattern, before, false, false,
5061 make_debug_insn_raw);
5062 }
5063 \f
5064 /* Take X and emit it at the end of the doubly-linked
5065 INSN list.
5066
5067 Returns the last insn emitted. */
5068
5069 rtx_insn *
5070 emit_insn (rtx x)
5071 {
5072 rtx_insn *last = get_last_insn ();
5073 rtx_insn *insn;
5074
5075 if (x == NULL_RTX)
5076 return last;
5077
5078 switch (GET_CODE (x))
5079 {
5080 case DEBUG_INSN:
5081 case INSN:
5082 case JUMP_INSN:
5083 case CALL_INSN:
5084 case CODE_LABEL:
5085 case BARRIER:
5086 case NOTE:
5087 insn = as_a <rtx_insn *> (x);
5088 while (insn)
5089 {
5090 rtx_insn *next = NEXT_INSN (insn);
5091 add_insn (insn);
5092 last = insn;
5093 insn = next;
5094 }
5095 break;
5096
5097 #ifdef ENABLE_RTL_CHECKING
5098 case JUMP_TABLE_DATA:
5099 case SEQUENCE:
5100 gcc_unreachable ();
5101 break;
5102 #endif
5103
5104 default:
5105 last = make_insn_raw (x);
5106 add_insn (last);
5107 break;
5108 }
5109
5110 return last;
5111 }
5112
5113 /* Make an insn of code DEBUG_INSN with pattern X
5114 and add it to the end of the doubly-linked list. */
5115
5116 rtx_insn *
5117 emit_debug_insn (rtx x)
5118 {
5119 rtx_insn *last = get_last_insn ();
5120 rtx_insn *insn;
5121
5122 if (x == NULL_RTX)
5123 return last;
5124
5125 switch (GET_CODE (x))
5126 {
5127 case DEBUG_INSN:
5128 case INSN:
5129 case JUMP_INSN:
5130 case CALL_INSN:
5131 case CODE_LABEL:
5132 case BARRIER:
5133 case NOTE:
5134 insn = as_a <rtx_insn *> (x);
5135 while (insn)
5136 {
5137 rtx_insn *next = NEXT_INSN (insn);
5138 add_insn (insn);
5139 last = insn;
5140 insn = next;
5141 }
5142 break;
5143
5144 #ifdef ENABLE_RTL_CHECKING
5145 case JUMP_TABLE_DATA:
5146 case SEQUENCE:
5147 gcc_unreachable ();
5148 break;
5149 #endif
5150
5151 default:
5152 last = make_debug_insn_raw (x);
5153 add_insn (last);
5154 break;
5155 }
5156
5157 return last;
5158 }
5159
5160 /* Make an insn of code JUMP_INSN with pattern X
5161 and add it to the end of the doubly-linked list. */
5162
5163 rtx_insn *
5164 emit_jump_insn (rtx x)
5165 {
5166 rtx_insn *last = NULL;
5167 rtx_insn *insn;
5168
5169 switch (GET_CODE (x))
5170 {
5171 case DEBUG_INSN:
5172 case INSN:
5173 case JUMP_INSN:
5174 case CALL_INSN:
5175 case CODE_LABEL:
5176 case BARRIER:
5177 case NOTE:
5178 insn = as_a <rtx_insn *> (x);
5179 while (insn)
5180 {
5181 rtx_insn *next = NEXT_INSN (insn);
5182 add_insn (insn);
5183 last = insn;
5184 insn = next;
5185 }
5186 break;
5187
5188 #ifdef ENABLE_RTL_CHECKING
5189 case JUMP_TABLE_DATA:
5190 case SEQUENCE:
5191 gcc_unreachable ();
5192 break;
5193 #endif
5194
5195 default:
5196 last = make_jump_insn_raw (x);
5197 add_insn (last);
5198 break;
5199 }
5200
5201 return last;
5202 }
5203
5204 /* Make an insn of code CALL_INSN with pattern X
5205 and add it to the end of the doubly-linked list. */
5206
5207 rtx_insn *
5208 emit_call_insn (rtx x)
5209 {
5210 rtx_insn *insn;
5211
5212 switch (GET_CODE (x))
5213 {
5214 case DEBUG_INSN:
5215 case INSN:
5216 case JUMP_INSN:
5217 case CALL_INSN:
5218 case CODE_LABEL:
5219 case BARRIER:
5220 case NOTE:
5221 insn = emit_insn (x);
5222 break;
5223
5224 #ifdef ENABLE_RTL_CHECKING
5225 case SEQUENCE:
5226 case JUMP_TABLE_DATA:
5227 gcc_unreachable ();
5228 break;
5229 #endif
5230
5231 default:
5232 insn = make_call_insn_raw (x);
5233 add_insn (insn);
5234 break;
5235 }
5236
5237 return insn;
5238 }
5239
5240 /* Add the label LABEL to the end of the doubly-linked list. */
5241
5242 rtx_code_label *
5243 emit_label (rtx uncast_label)
5244 {
5245 rtx_code_label *label = as_a <rtx_code_label *> (uncast_label);
5246
5247 gcc_checking_assert (INSN_UID (label) == 0);
5248 INSN_UID (label) = cur_insn_uid++;
5249 add_insn (label);
5250 return label;
5251 }
5252
5253 /* Make an insn of code JUMP_TABLE_DATA
5254 and add it to the end of the doubly-linked list. */
5255
5256 rtx_jump_table_data *
5257 emit_jump_table_data (rtx table)
5258 {
5259 rtx_jump_table_data *jump_table_data =
5260 as_a <rtx_jump_table_data *> (rtx_alloc (JUMP_TABLE_DATA));
5261 INSN_UID (jump_table_data) = cur_insn_uid++;
5262 PATTERN (jump_table_data) = table;
5263 BLOCK_FOR_INSN (jump_table_data) = NULL;
5264 add_insn (jump_table_data);
5265 return jump_table_data;
5266 }
5267
5268 /* Make an insn of code BARRIER
5269 and add it to the end of the doubly-linked list. */
5270
5271 rtx_barrier *
5272 emit_barrier (void)
5273 {
5274 rtx_barrier *barrier = as_a <rtx_barrier *> (rtx_alloc (BARRIER));
5275 INSN_UID (barrier) = cur_insn_uid++;
5276 add_insn (barrier);
5277 return barrier;
5278 }
5279
5280 /* Emit a copy of note ORIG. */
5281
5282 rtx_note *
5283 emit_note_copy (rtx_note *orig)
5284 {
5285 enum insn_note kind = (enum insn_note) NOTE_KIND (orig);
5286 rtx_note *note = make_note_raw (kind);
5287 NOTE_DATA (note) = NOTE_DATA (orig);
5288 add_insn (note);
5289 return note;
5290 }
5291
5292 /* Make an insn of code NOTE or type NOTE_NO
5293 and add it to the end of the doubly-linked list. */
5294
5295 rtx_note *
5296 emit_note (enum insn_note kind)
5297 {
5298 rtx_note *note = make_note_raw (kind);
5299 add_insn (note);
5300 return note;
5301 }
5302
5303 /* Emit a clobber of lvalue X. */
5304
5305 rtx_insn *
5306 emit_clobber (rtx x)
5307 {
5308 /* CONCATs should not appear in the insn stream. */
5309 if (GET_CODE (x) == CONCAT)
5310 {
5311 emit_clobber (XEXP (x, 0));
5312 return emit_clobber (XEXP (x, 1));
5313 }
5314 return emit_insn (gen_rtx_CLOBBER (VOIDmode, x));
5315 }
5316
5317 /* Return a sequence of insns to clobber lvalue X. */
5318
5319 rtx_insn *
5320 gen_clobber (rtx x)
5321 {
5322 rtx_insn *seq;
5323
5324 start_sequence ();
5325 emit_clobber (x);
5326 seq = get_insns ();
5327 end_sequence ();
5328 return seq;
5329 }
5330
5331 /* Emit a use of rvalue X. */
5332
5333 rtx_insn *
5334 emit_use (rtx x)
5335 {
5336 /* CONCATs should not appear in the insn stream. */
5337 if (GET_CODE (x) == CONCAT)
5338 {
5339 emit_use (XEXP (x, 0));
5340 return emit_use (XEXP (x, 1));
5341 }
5342 return emit_insn (gen_rtx_USE (VOIDmode, x));
5343 }
5344
5345 /* Return a sequence of insns to use rvalue X. */
5346
5347 rtx_insn *
5348 gen_use (rtx x)
5349 {
5350 rtx_insn *seq;
5351
5352 start_sequence ();
5353 emit_use (x);
5354 seq = get_insns ();
5355 end_sequence ();
5356 return seq;
5357 }
5358
5359 /* Notes like REG_EQUAL and REG_EQUIV refer to a set in an instruction.
5360 Return the set in INSN that such notes describe, or NULL if the notes
5361 have no meaning for INSN. */
5362
5363 rtx
5364 set_for_reg_notes (rtx insn)
5365 {
5366 rtx pat, reg;
5367
5368 if (!INSN_P (insn))
5369 return NULL_RTX;
5370
5371 pat = PATTERN (insn);
5372 if (GET_CODE (pat) == PARALLEL)
5373 {
5374 /* We do not use single_set because that ignores SETs of unused
5375 registers. REG_EQUAL and REG_EQUIV notes really do require the
5376 PARALLEL to have a single SET. */
5377 if (multiple_sets (insn))
5378 return NULL_RTX;
5379 pat = XVECEXP (pat, 0, 0);
5380 }
5381
5382 if (GET_CODE (pat) != SET)
5383 return NULL_RTX;
5384
5385 reg = SET_DEST (pat);
5386
5387 /* Notes apply to the contents of a STRICT_LOW_PART. */
5388 if (GET_CODE (reg) == STRICT_LOW_PART
5389 || GET_CODE (reg) == ZERO_EXTRACT)
5390 reg = XEXP (reg, 0);
5391
5392 /* Check that we have a register. */
5393 if (!(REG_P (reg) || GET_CODE (reg) == SUBREG))
5394 return NULL_RTX;
5395
5396 return pat;
5397 }
5398
5399 /* Place a note of KIND on insn INSN with DATUM as the datum. If a
5400 note of this type already exists, remove it first. */
5401
5402 rtx
5403 set_unique_reg_note (rtx insn, enum reg_note kind, rtx datum)
5404 {
5405 rtx note = find_reg_note (insn, kind, NULL_RTX);
5406
5407 switch (kind)
5408 {
5409 case REG_EQUAL:
5410 case REG_EQUIV:
5411 /* We need to support the REG_EQUAL on USE trick of find_reloads. */
5412 if (!set_for_reg_notes (insn) && GET_CODE (PATTERN (insn)) != USE)
5413 return NULL_RTX;
5414
5415 /* Don't add ASM_OPERAND REG_EQUAL/REG_EQUIV notes.
5416 It serves no useful purpose and breaks eliminate_regs. */
5417 if (GET_CODE (datum) == ASM_OPERANDS)
5418 return NULL_RTX;
5419
5420 /* Notes with side effects are dangerous. Even if the side-effect
5421 initially mirrors one in PATTERN (INSN), later optimizations
5422 might alter the way that the final register value is calculated
5423 and so move or alter the side-effect in some way. The note would
5424 then no longer be a valid substitution for SET_SRC. */
5425 if (side_effects_p (datum))
5426 return NULL_RTX;
5427 break;
5428
5429 default:
5430 break;
5431 }
5432
5433 if (note)
5434 XEXP (note, 0) = datum;
5435 else
5436 {
5437 add_reg_note (insn, kind, datum);
5438 note = REG_NOTES (insn);
5439 }
5440
5441 switch (kind)
5442 {
5443 case REG_EQUAL:
5444 case REG_EQUIV:
5445 df_notes_rescan (as_a <rtx_insn *> (insn));
5446 break;
5447 default:
5448 break;
5449 }
5450
5451 return note;
5452 }
5453
5454 /* Like set_unique_reg_note, but don't do anything unless INSN sets DST. */
5455 rtx
5456 set_dst_reg_note (rtx insn, enum reg_note kind, rtx datum, rtx dst)
5457 {
5458 rtx set = set_for_reg_notes (insn);
5459
5460 if (set && SET_DEST (set) == dst)
5461 return set_unique_reg_note (insn, kind, datum);
5462 return NULL_RTX;
5463 }
5464 \f
5465 /* Emit the rtl pattern X as an appropriate kind of insn. Also emit a
5466 following barrier if the instruction needs one and if ALLOW_BARRIER_P
5467 is true.
5468
5469 If X is a label, it is simply added into the insn chain. */
5470
5471 rtx_insn *
5472 emit (rtx x, bool allow_barrier_p)
5473 {
5474 enum rtx_code code = classify_insn (x);
5475
5476 switch (code)
5477 {
5478 case CODE_LABEL:
5479 return emit_label (x);
5480 case INSN:
5481 return emit_insn (x);
5482 case JUMP_INSN:
5483 {
5484 rtx_insn *insn = emit_jump_insn (x);
5485 if (allow_barrier_p
5486 && (any_uncondjump_p (insn) || GET_CODE (x) == RETURN))
5487 return emit_barrier ();
5488 return insn;
5489 }
5490 case CALL_INSN:
5491 return emit_call_insn (x);
5492 case DEBUG_INSN:
5493 return emit_debug_insn (x);
5494 default:
5495 gcc_unreachable ();
5496 }
5497 }
5498 \f
5499 /* Space for free sequence stack entries. */
5500 static GTY ((deletable)) struct sequence_stack *free_sequence_stack;
5501
5502 /* Begin emitting insns to a sequence. If this sequence will contain
5503 something that might cause the compiler to pop arguments to function
5504 calls (because those pops have previously been deferred; see
5505 INHIBIT_DEFER_POP for more details), use do_pending_stack_adjust
5506 before calling this function. That will ensure that the deferred
5507 pops are not accidentally emitted in the middle of this sequence. */
5508
5509 void
5510 start_sequence (void)
5511 {
5512 struct sequence_stack *tem;
5513
5514 if (free_sequence_stack != NULL)
5515 {
5516 tem = free_sequence_stack;
5517 free_sequence_stack = tem->next;
5518 }
5519 else
5520 tem = ggc_alloc<sequence_stack> ();
5521
5522 tem->next = get_current_sequence ()->next;
5523 tem->first = get_insns ();
5524 tem->last = get_last_insn ();
5525 get_current_sequence ()->next = tem;
5526
5527 set_first_insn (0);
5528 set_last_insn (0);
5529 }
5530
5531 /* Set up the insn chain starting with FIRST as the current sequence,
5532 saving the previously current one. See the documentation for
5533 start_sequence for more information about how to use this function. */
5534
5535 void
5536 push_to_sequence (rtx_insn *first)
5537 {
5538 rtx_insn *last;
5539
5540 start_sequence ();
5541
5542 for (last = first; last && NEXT_INSN (last); last = NEXT_INSN (last))
5543 ;
5544
5545 set_first_insn (first);
5546 set_last_insn (last);
5547 }
5548
5549 /* Like push_to_sequence, but take the last insn as an argument to avoid
5550 looping through the list. */
5551
5552 void
5553 push_to_sequence2 (rtx_insn *first, rtx_insn *last)
5554 {
5555 start_sequence ();
5556
5557 set_first_insn (first);
5558 set_last_insn (last);
5559 }
5560
5561 /* Set up the outer-level insn chain
5562 as the current sequence, saving the previously current one. */
5563
5564 void
5565 push_topmost_sequence (void)
5566 {
5567 struct sequence_stack *top;
5568
5569 start_sequence ();
5570
5571 top = get_topmost_sequence ();
5572 set_first_insn (top->first);
5573 set_last_insn (top->last);
5574 }
5575
5576 /* After emitting to the outer-level insn chain, update the outer-level
5577 insn chain, and restore the previous saved state. */
5578
5579 void
5580 pop_topmost_sequence (void)
5581 {
5582 struct sequence_stack *top;
5583
5584 top = get_topmost_sequence ();
5585 top->first = get_insns ();
5586 top->last = get_last_insn ();
5587
5588 end_sequence ();
5589 }
5590
5591 /* After emitting to a sequence, restore previous saved state.
5592
5593 To get the contents of the sequence just made, you must call
5594 `get_insns' *before* calling here.
5595
5596 If the compiler might have deferred popping arguments while
5597 generating this sequence, and this sequence will not be immediately
5598 inserted into the instruction stream, use do_pending_stack_adjust
5599 before calling get_insns. That will ensure that the deferred
5600 pops are inserted into this sequence, and not into some random
5601 location in the instruction stream. See INHIBIT_DEFER_POP for more
5602 information about deferred popping of arguments. */
5603
5604 void
5605 end_sequence (void)
5606 {
5607 struct sequence_stack *tem = get_current_sequence ()->next;
5608
5609 set_first_insn (tem->first);
5610 set_last_insn (tem->last);
5611 get_current_sequence ()->next = tem->next;
5612
5613 memset (tem, 0, sizeof (*tem));
5614 tem->next = free_sequence_stack;
5615 free_sequence_stack = tem;
5616 }
5617
5618 /* Return 1 if currently emitting into a sequence. */
5619
5620 int
5621 in_sequence_p (void)
5622 {
5623 return get_current_sequence ()->next != 0;
5624 }
5625 \f
5626 /* Put the various virtual registers into REGNO_REG_RTX. */
5627
5628 static void
5629 init_virtual_regs (void)
5630 {
5631 regno_reg_rtx[VIRTUAL_INCOMING_ARGS_REGNUM] = virtual_incoming_args_rtx;
5632 regno_reg_rtx[VIRTUAL_STACK_VARS_REGNUM] = virtual_stack_vars_rtx;
5633 regno_reg_rtx[VIRTUAL_STACK_DYNAMIC_REGNUM] = virtual_stack_dynamic_rtx;
5634 regno_reg_rtx[VIRTUAL_OUTGOING_ARGS_REGNUM] = virtual_outgoing_args_rtx;
5635 regno_reg_rtx[VIRTUAL_CFA_REGNUM] = virtual_cfa_rtx;
5636 regno_reg_rtx[VIRTUAL_PREFERRED_STACK_BOUNDARY_REGNUM]
5637 = virtual_preferred_stack_boundary_rtx;
5638 }
5639
5640 \f
5641 /* Used by copy_insn_1 to avoid copying SCRATCHes more than once. */
5642 static rtx copy_insn_scratch_in[MAX_RECOG_OPERANDS];
5643 static rtx copy_insn_scratch_out[MAX_RECOG_OPERANDS];
5644 static int copy_insn_n_scratches;
5645
5646 /* When an insn is being copied by copy_insn_1, this is nonzero if we have
5647 copied an ASM_OPERANDS.
5648 In that case, it is the original input-operand vector. */
5649 static rtvec orig_asm_operands_vector;
5650
5651 /* When an insn is being copied by copy_insn_1, this is nonzero if we have
5652 copied an ASM_OPERANDS.
5653 In that case, it is the copied input-operand vector. */
5654 static rtvec copy_asm_operands_vector;
5655
5656 /* Likewise for the constraints vector. */
5657 static rtvec orig_asm_constraints_vector;
5658 static rtvec copy_asm_constraints_vector;
5659
5660 /* Recursively create a new copy of an rtx for copy_insn.
5661 This function differs from copy_rtx in that it handles SCRATCHes and
5662 ASM_OPERANDs properly.
5663 Normally, this function is not used directly; use copy_insn as front end.
5664 However, you could first copy an insn pattern with copy_insn and then use
5665 this function afterwards to properly copy any REG_NOTEs containing
5666 SCRATCHes. */
5667
5668 rtx
5669 copy_insn_1 (rtx orig)
5670 {
5671 rtx copy;
5672 int i, j;
5673 RTX_CODE code;
5674 const char *format_ptr;
5675
5676 if (orig == NULL)
5677 return NULL;
5678
5679 code = GET_CODE (orig);
5680
5681 switch (code)
5682 {
5683 case REG:
5684 case DEBUG_EXPR:
5685 CASE_CONST_ANY:
5686 case SYMBOL_REF:
5687 case CODE_LABEL:
5688 case PC:
5689 case CC0:
5690 case RETURN:
5691 case SIMPLE_RETURN:
5692 return orig;
5693 case CLOBBER:
5694 /* Share clobbers of hard registers (like cc0), but do not share pseudo reg
5695 clobbers or clobbers of hard registers that originated as pseudos.
5696 This is needed to allow safe register renaming. */
5697 if (REG_P (XEXP (orig, 0))
5698 && HARD_REGISTER_NUM_P (REGNO (XEXP (orig, 0)))
5699 && HARD_REGISTER_NUM_P (ORIGINAL_REGNO (XEXP (orig, 0))))
5700 return orig;
5701 break;
5702
5703 case SCRATCH:
5704 for (i = 0; i < copy_insn_n_scratches; i++)
5705 if (copy_insn_scratch_in[i] == orig)
5706 return copy_insn_scratch_out[i];
5707 break;
5708
5709 case CONST:
5710 if (shared_const_p (orig))
5711 return orig;
5712 break;
5713
5714 /* A MEM with a constant address is not sharable. The problem is that
5715 the constant address may need to be reloaded. If the mem is shared,
5716 then reloading one copy of this mem will cause all copies to appear
5717 to have been reloaded. */
5718
5719 default:
5720 break;
5721 }
5722
5723 /* Copy the various flags, fields, and other information. We assume
5724 that all fields need copying, and then clear the fields that should
5725 not be copied. That is the sensible default behavior, and forces
5726 us to explicitly document why we are *not* copying a flag. */
5727 copy = shallow_copy_rtx (orig);
5728
5729 /* We do not copy JUMP, CALL, or FRAME_RELATED for INSNs. */
5730 if (INSN_P (orig))
5731 {
5732 RTX_FLAG (copy, jump) = 0;
5733 RTX_FLAG (copy, call) = 0;
5734 RTX_FLAG (copy, frame_related) = 0;
5735 }
5736
5737 format_ptr = GET_RTX_FORMAT (GET_CODE (copy));
5738
5739 for (i = 0; i < GET_RTX_LENGTH (GET_CODE (copy)); i++)
5740 switch (*format_ptr++)
5741 {
5742 case 'e':
5743 if (XEXP (orig, i) != NULL)
5744 XEXP (copy, i) = copy_insn_1 (XEXP (orig, i));
5745 break;
5746
5747 case 'E':
5748 case 'V':
5749 if (XVEC (orig, i) == orig_asm_constraints_vector)
5750 XVEC (copy, i) = copy_asm_constraints_vector;
5751 else if (XVEC (orig, i) == orig_asm_operands_vector)
5752 XVEC (copy, i) = copy_asm_operands_vector;
5753 else if (XVEC (orig, i) != NULL)
5754 {
5755 XVEC (copy, i) = rtvec_alloc (XVECLEN (orig, i));
5756 for (j = 0; j < XVECLEN (copy, i); j++)
5757 XVECEXP (copy, i, j) = copy_insn_1 (XVECEXP (orig, i, j));
5758 }
5759 break;
5760
5761 case 't':
5762 case 'w':
5763 case 'i':
5764 case 'p':
5765 case 's':
5766 case 'S':
5767 case 'u':
5768 case '0':
5769 /* These are left unchanged. */
5770 break;
5771
5772 default:
5773 gcc_unreachable ();
5774 }
5775
5776 if (code == SCRATCH)
5777 {
5778 i = copy_insn_n_scratches++;
5779 gcc_assert (i < MAX_RECOG_OPERANDS);
5780 copy_insn_scratch_in[i] = orig;
5781 copy_insn_scratch_out[i] = copy;
5782 }
5783 else if (code == ASM_OPERANDS)
5784 {
5785 orig_asm_operands_vector = ASM_OPERANDS_INPUT_VEC (orig);
5786 copy_asm_operands_vector = ASM_OPERANDS_INPUT_VEC (copy);
5787 orig_asm_constraints_vector = ASM_OPERANDS_INPUT_CONSTRAINT_VEC (orig);
5788 copy_asm_constraints_vector = ASM_OPERANDS_INPUT_CONSTRAINT_VEC (copy);
5789 }
5790
5791 return copy;
5792 }
5793
5794 /* Create a new copy of an rtx.
5795 This function differs from copy_rtx in that it handles SCRATCHes and
5796 ASM_OPERANDs properly.
5797 INSN doesn't really have to be a full INSN; it could be just the
5798 pattern. */
5799 rtx
5800 copy_insn (rtx insn)
5801 {
5802 copy_insn_n_scratches = 0;
5803 orig_asm_operands_vector = 0;
5804 orig_asm_constraints_vector = 0;
5805 copy_asm_operands_vector = 0;
5806 copy_asm_constraints_vector = 0;
5807 return copy_insn_1 (insn);
5808 }
5809
5810 /* Return a copy of INSN that can be used in a SEQUENCE delay slot,
5811 on that assumption that INSN itself remains in its original place. */
5812
5813 rtx_insn *
5814 copy_delay_slot_insn (rtx_insn *insn)
5815 {
5816 /* Copy INSN with its rtx_code, all its notes, location etc. */
5817 insn = as_a <rtx_insn *> (copy_rtx (insn));
5818 INSN_UID (insn) = cur_insn_uid++;
5819 return insn;
5820 }
5821
5822 /* Initialize data structures and variables in this file
5823 before generating rtl for each function. */
5824
5825 void
5826 init_emit (void)
5827 {
5828 set_first_insn (NULL);
5829 set_last_insn (NULL);
5830 if (param_min_nondebug_insn_uid)
5831 cur_insn_uid = param_min_nondebug_insn_uid;
5832 else
5833 cur_insn_uid = 1;
5834 cur_debug_insn_uid = 1;
5835 reg_rtx_no = LAST_VIRTUAL_REGISTER + 1;
5836 first_label_num = label_num;
5837 get_current_sequence ()->next = NULL;
5838
5839 /* Init the tables that describe all the pseudo regs. */
5840
5841 crtl->emit.regno_pointer_align_length = LAST_VIRTUAL_REGISTER + 101;
5842
5843 crtl->emit.regno_pointer_align
5844 = XCNEWVEC (unsigned char, crtl->emit.regno_pointer_align_length);
5845
5846 regno_reg_rtx
5847 = ggc_cleared_vec_alloc<rtx> (crtl->emit.regno_pointer_align_length);
5848
5849 /* Put copies of all the hard registers into regno_reg_rtx. */
5850 memcpy (regno_reg_rtx,
5851 initial_regno_reg_rtx,
5852 FIRST_PSEUDO_REGISTER * sizeof (rtx));
5853
5854 /* Put copies of all the virtual register rtx into regno_reg_rtx. */
5855 init_virtual_regs ();
5856
5857 /* Indicate that the virtual registers and stack locations are
5858 all pointers. */
5859 REG_POINTER (stack_pointer_rtx) = 1;
5860 REG_POINTER (frame_pointer_rtx) = 1;
5861 REG_POINTER (hard_frame_pointer_rtx) = 1;
5862 REG_POINTER (arg_pointer_rtx) = 1;
5863
5864 REG_POINTER (virtual_incoming_args_rtx) = 1;
5865 REG_POINTER (virtual_stack_vars_rtx) = 1;
5866 REG_POINTER (virtual_stack_dynamic_rtx) = 1;
5867 REG_POINTER (virtual_outgoing_args_rtx) = 1;
5868 REG_POINTER (virtual_cfa_rtx) = 1;
5869
5870 #ifdef STACK_BOUNDARY
5871 REGNO_POINTER_ALIGN (STACK_POINTER_REGNUM) = STACK_BOUNDARY;
5872 REGNO_POINTER_ALIGN (FRAME_POINTER_REGNUM) = STACK_BOUNDARY;
5873 REGNO_POINTER_ALIGN (HARD_FRAME_POINTER_REGNUM) = STACK_BOUNDARY;
5874 REGNO_POINTER_ALIGN (ARG_POINTER_REGNUM) = STACK_BOUNDARY;
5875
5876 REGNO_POINTER_ALIGN (VIRTUAL_INCOMING_ARGS_REGNUM) = STACK_BOUNDARY;
5877 REGNO_POINTER_ALIGN (VIRTUAL_STACK_VARS_REGNUM) = STACK_BOUNDARY;
5878 REGNO_POINTER_ALIGN (VIRTUAL_STACK_DYNAMIC_REGNUM) = STACK_BOUNDARY;
5879 REGNO_POINTER_ALIGN (VIRTUAL_OUTGOING_ARGS_REGNUM) = STACK_BOUNDARY;
5880
5881 REGNO_POINTER_ALIGN (VIRTUAL_CFA_REGNUM) = BITS_PER_WORD;
5882 #endif
5883
5884 #ifdef INIT_EXPANDERS
5885 INIT_EXPANDERS;
5886 #endif
5887 }
5888
5889 /* Return the value of element I of CONST_VECTOR X as a wide_int. */
5890
5891 wide_int
5892 const_vector_int_elt (const_rtx x, unsigned int i)
5893 {
5894 /* First handle elements that are directly encoded. */
5895 machine_mode elt_mode = GET_MODE_INNER (GET_MODE (x));
5896 if (i < (unsigned int) XVECLEN (x, 0))
5897 return rtx_mode_t (CONST_VECTOR_ENCODED_ELT (x, i), elt_mode);
5898
5899 /* Identify the pattern that contains element I and work out the index of
5900 the last encoded element for that pattern. */
5901 unsigned int encoded_nelts = const_vector_encoded_nelts (x);
5902 unsigned int npatterns = CONST_VECTOR_NPATTERNS (x);
5903 unsigned int count = i / npatterns;
5904 unsigned int pattern = i % npatterns;
5905 unsigned int final_i = encoded_nelts - npatterns + pattern;
5906
5907 /* If there are no steps, the final encoded value is the right one. */
5908 if (!CONST_VECTOR_STEPPED_P (x))
5909 return rtx_mode_t (CONST_VECTOR_ENCODED_ELT (x, final_i), elt_mode);
5910
5911 /* Otherwise work out the value from the last two encoded elements. */
5912 rtx v1 = CONST_VECTOR_ENCODED_ELT (x, final_i - npatterns);
5913 rtx v2 = CONST_VECTOR_ENCODED_ELT (x, final_i);
5914 wide_int diff = wi::sub (rtx_mode_t (v2, elt_mode),
5915 rtx_mode_t (v1, elt_mode));
5916 return wi::add (rtx_mode_t (v2, elt_mode), (count - 2) * diff);
5917 }
5918
5919 /* Return the value of element I of CONST_VECTOR X. */
5920
5921 rtx
5922 const_vector_elt (const_rtx x, unsigned int i)
5923 {
5924 /* First handle elements that are directly encoded. */
5925 if (i < (unsigned int) XVECLEN (x, 0))
5926 return CONST_VECTOR_ENCODED_ELT (x, i);
5927
5928 /* If there are no steps, the final encoded value is the right one. */
5929 if (!CONST_VECTOR_STEPPED_P (x))
5930 {
5931 /* Identify the pattern that contains element I and work out the index of
5932 the last encoded element for that pattern. */
5933 unsigned int encoded_nelts = const_vector_encoded_nelts (x);
5934 unsigned int npatterns = CONST_VECTOR_NPATTERNS (x);
5935 unsigned int pattern = i % npatterns;
5936 unsigned int final_i = encoded_nelts - npatterns + pattern;
5937 return CONST_VECTOR_ENCODED_ELT (x, final_i);
5938 }
5939
5940 /* Otherwise work out the value from the last two encoded elements. */
5941 return immed_wide_int_const (const_vector_int_elt (x, i),
5942 GET_MODE_INNER (GET_MODE (x)));
5943 }
5944
5945 /* Return true if X is a valid element for a CONST_VECTOR of the given
5946 mode. */
5947
5948 bool
5949 valid_for_const_vector_p (machine_mode, rtx x)
5950 {
5951 return (CONST_SCALAR_INT_P (x)
5952 || CONST_DOUBLE_AS_FLOAT_P (x)
5953 || CONST_FIXED_P (x));
5954 }
5955
5956 /* Generate a vector constant of mode MODE in which every element has
5957 value ELT. */
5958
5959 rtx
5960 gen_const_vec_duplicate (machine_mode mode, rtx elt)
5961 {
5962 rtx_vector_builder builder (mode, 1, 1);
5963 builder.quick_push (elt);
5964 return builder.build ();
5965 }
5966
5967 /* Return a vector rtx of mode MODE in which every element has value X.
5968 The result will be a constant if X is constant. */
5969
5970 rtx
5971 gen_vec_duplicate (machine_mode mode, rtx x)
5972 {
5973 if (valid_for_const_vector_p (mode, x))
5974 return gen_const_vec_duplicate (mode, x);
5975 return gen_rtx_VEC_DUPLICATE (mode, x);
5976 }
5977
5978 /* A subroutine of const_vec_series_p that handles the case in which:
5979
5980 (GET_CODE (X) == CONST_VECTOR
5981 && CONST_VECTOR_NPATTERNS (X) == 1
5982 && !CONST_VECTOR_DUPLICATE_P (X))
5983
5984 is known to hold. */
5985
5986 bool
5987 const_vec_series_p_1 (const_rtx x, rtx *base_out, rtx *step_out)
5988 {
5989 /* Stepped sequences are only defined for integers, to avoid specifying
5990 rounding behavior. */
5991 if (GET_MODE_CLASS (GET_MODE (x)) != MODE_VECTOR_INT)
5992 return false;
5993
5994 /* A non-duplicated vector with two elements can always be seen as a
5995 series with a nonzero step. Longer vectors must have a stepped
5996 encoding. */
5997 if (maybe_ne (CONST_VECTOR_NUNITS (x), 2)
5998 && !CONST_VECTOR_STEPPED_P (x))
5999 return false;
6000
6001 /* Calculate the step between the first and second elements. */
6002 scalar_mode inner = GET_MODE_INNER (GET_MODE (x));
6003 rtx base = CONST_VECTOR_ELT (x, 0);
6004 rtx step = simplify_binary_operation (MINUS, inner,
6005 CONST_VECTOR_ENCODED_ELT (x, 1), base);
6006 if (rtx_equal_p (step, CONST0_RTX (inner)))
6007 return false;
6008
6009 /* If we have a stepped encoding, check that the step between the
6010 second and third elements is the same as STEP. */
6011 if (CONST_VECTOR_STEPPED_P (x))
6012 {
6013 rtx diff = simplify_binary_operation (MINUS, inner,
6014 CONST_VECTOR_ENCODED_ELT (x, 2),
6015 CONST_VECTOR_ENCODED_ELT (x, 1));
6016 if (!rtx_equal_p (step, diff))
6017 return false;
6018 }
6019
6020 *base_out = base;
6021 *step_out = step;
6022 return true;
6023 }
6024
6025 /* Generate a vector constant of mode MODE in which element I has
6026 the value BASE + I * STEP. */
6027
6028 rtx
6029 gen_const_vec_series (machine_mode mode, rtx base, rtx step)
6030 {
6031 gcc_assert (valid_for_const_vector_p (mode, base)
6032 && valid_for_const_vector_p (mode, step));
6033
6034 rtx_vector_builder builder (mode, 1, 3);
6035 builder.quick_push (base);
6036 for (int i = 1; i < 3; ++i)
6037 builder.quick_push (simplify_gen_binary (PLUS, GET_MODE_INNER (mode),
6038 builder[i - 1], step));
6039 return builder.build ();
6040 }
6041
6042 /* Generate a vector of mode MODE in which element I has the value
6043 BASE + I * STEP. The result will be a constant if BASE and STEP
6044 are both constants. */
6045
6046 rtx
6047 gen_vec_series (machine_mode mode, rtx base, rtx step)
6048 {
6049 if (step == const0_rtx)
6050 return gen_vec_duplicate (mode, base);
6051 if (valid_for_const_vector_p (mode, base)
6052 && valid_for_const_vector_p (mode, step))
6053 return gen_const_vec_series (mode, base, step);
6054 return gen_rtx_VEC_SERIES (mode, base, step);
6055 }
6056
6057 /* Generate a new vector constant for mode MODE and constant value
6058 CONSTANT. */
6059
6060 static rtx
6061 gen_const_vector (machine_mode mode, int constant)
6062 {
6063 machine_mode inner = GET_MODE_INNER (mode);
6064
6065 gcc_assert (!DECIMAL_FLOAT_MODE_P (inner));
6066
6067 rtx el = const_tiny_rtx[constant][(int) inner];
6068 gcc_assert (el);
6069
6070 return gen_const_vec_duplicate (mode, el);
6071 }
6072
6073 /* Generate a vector like gen_rtx_raw_CONST_VEC, but use the zero vector when
6074 all elements are zero, and the one vector when all elements are one. */
6075 rtx
6076 gen_rtx_CONST_VECTOR (machine_mode mode, rtvec v)
6077 {
6078 gcc_assert (known_eq (GET_MODE_NUNITS (mode), GET_NUM_ELEM (v)));
6079
6080 /* If the values are all the same, check to see if we can use one of the
6081 standard constant vectors. */
6082 if (rtvec_all_equal_p (v))
6083 return gen_const_vec_duplicate (mode, RTVEC_ELT (v, 0));
6084
6085 unsigned int nunits = GET_NUM_ELEM (v);
6086 rtx_vector_builder builder (mode, nunits, 1);
6087 for (unsigned int i = 0; i < nunits; ++i)
6088 builder.quick_push (RTVEC_ELT (v, i));
6089 return builder.build (v);
6090 }
6091
6092 /* Initialise global register information required by all functions. */
6093
6094 void
6095 init_emit_regs (void)
6096 {
6097 int i;
6098 machine_mode mode;
6099 mem_attrs *attrs;
6100
6101 /* Reset register attributes */
6102 reg_attrs_htab->empty ();
6103
6104 /* We need reg_raw_mode, so initialize the modes now. */
6105 init_reg_modes_target ();
6106
6107 /* Assign register numbers to the globally defined register rtx. */
6108 stack_pointer_rtx = gen_raw_REG (Pmode, STACK_POINTER_REGNUM);
6109 frame_pointer_rtx = gen_raw_REG (Pmode, FRAME_POINTER_REGNUM);
6110 hard_frame_pointer_rtx = gen_raw_REG (Pmode, HARD_FRAME_POINTER_REGNUM);
6111 arg_pointer_rtx = gen_raw_REG (Pmode, ARG_POINTER_REGNUM);
6112 virtual_incoming_args_rtx =
6113 gen_raw_REG (Pmode, VIRTUAL_INCOMING_ARGS_REGNUM);
6114 virtual_stack_vars_rtx =
6115 gen_raw_REG (Pmode, VIRTUAL_STACK_VARS_REGNUM);
6116 virtual_stack_dynamic_rtx =
6117 gen_raw_REG (Pmode, VIRTUAL_STACK_DYNAMIC_REGNUM);
6118 virtual_outgoing_args_rtx =
6119 gen_raw_REG (Pmode, VIRTUAL_OUTGOING_ARGS_REGNUM);
6120 virtual_cfa_rtx = gen_raw_REG (Pmode, VIRTUAL_CFA_REGNUM);
6121 virtual_preferred_stack_boundary_rtx =
6122 gen_raw_REG (Pmode, VIRTUAL_PREFERRED_STACK_BOUNDARY_REGNUM);
6123
6124 /* Initialize RTL for commonly used hard registers. These are
6125 copied into regno_reg_rtx as we begin to compile each function. */
6126 for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
6127 initial_regno_reg_rtx[i] = gen_raw_REG (reg_raw_mode[i], i);
6128
6129 #ifdef RETURN_ADDRESS_POINTER_REGNUM
6130 return_address_pointer_rtx
6131 = gen_raw_REG (Pmode, RETURN_ADDRESS_POINTER_REGNUM);
6132 #endif
6133
6134 pic_offset_table_rtx = NULL_RTX;
6135 if ((unsigned) PIC_OFFSET_TABLE_REGNUM != INVALID_REGNUM)
6136 pic_offset_table_rtx = gen_raw_REG (Pmode, PIC_OFFSET_TABLE_REGNUM);
6137
6138 for (i = 0; i < (int) MAX_MACHINE_MODE; i++)
6139 {
6140 mode = (machine_mode) i;
6141 attrs = ggc_cleared_alloc<mem_attrs> ();
6142 attrs->align = BITS_PER_UNIT;
6143 attrs->addrspace = ADDR_SPACE_GENERIC;
6144 if (mode != BLKmode && mode != VOIDmode)
6145 {
6146 attrs->size_known_p = true;
6147 attrs->size = GET_MODE_SIZE (mode);
6148 if (STRICT_ALIGNMENT)
6149 attrs->align = GET_MODE_ALIGNMENT (mode);
6150 }
6151 mode_mem_attrs[i] = attrs;
6152 }
6153
6154 split_branch_probability = profile_probability::uninitialized ();
6155 }
6156
6157 /* Initialize global machine_mode variables. */
6158
6159 void
6160 init_derived_machine_modes (void)
6161 {
6162 opt_scalar_int_mode mode_iter, opt_byte_mode, opt_word_mode;
6163 FOR_EACH_MODE_IN_CLASS (mode_iter, MODE_INT)
6164 {
6165 scalar_int_mode mode = mode_iter.require ();
6166
6167 if (GET_MODE_BITSIZE (mode) == BITS_PER_UNIT
6168 && !opt_byte_mode.exists ())
6169 opt_byte_mode = mode;
6170
6171 if (GET_MODE_BITSIZE (mode) == BITS_PER_WORD
6172 && !opt_word_mode.exists ())
6173 opt_word_mode = mode;
6174 }
6175
6176 byte_mode = opt_byte_mode.require ();
6177 word_mode = opt_word_mode.require ();
6178 ptr_mode = as_a <scalar_int_mode>
6179 (mode_for_size (POINTER_SIZE, GET_MODE_CLASS (Pmode), 0).require ());
6180 }
6181
6182 /* Create some permanent unique rtl objects shared between all functions. */
6183
6184 void
6185 init_emit_once (void)
6186 {
6187 int i;
6188 machine_mode mode;
6189 scalar_float_mode double_mode;
6190 opt_scalar_mode smode_iter;
6191
6192 /* Initialize the CONST_INT, CONST_WIDE_INT, CONST_DOUBLE,
6193 CONST_FIXED, and memory attribute hash tables. */
6194 const_int_htab = hash_table<const_int_hasher>::create_ggc (37);
6195
6196 #if TARGET_SUPPORTS_WIDE_INT
6197 const_wide_int_htab = hash_table<const_wide_int_hasher>::create_ggc (37);
6198 #endif
6199 const_double_htab = hash_table<const_double_hasher>::create_ggc (37);
6200
6201 if (NUM_POLY_INT_COEFFS > 1)
6202 const_poly_int_htab = hash_table<const_poly_int_hasher>::create_ggc (37);
6203
6204 const_fixed_htab = hash_table<const_fixed_hasher>::create_ggc (37);
6205
6206 reg_attrs_htab = hash_table<reg_attr_hasher>::create_ggc (37);
6207
6208 #ifdef INIT_EXPANDERS
6209 /* This is to initialize {init|mark|free}_machine_status before the first
6210 call to push_function_context_to. This is needed by the Chill front
6211 end which calls push_function_context_to before the first call to
6212 init_function_start. */
6213 INIT_EXPANDERS;
6214 #endif
6215
6216 /* Create the unique rtx's for certain rtx codes and operand values. */
6217
6218 /* Process stack-limiting command-line options. */
6219 if (opt_fstack_limit_symbol_arg != NULL)
6220 stack_limit_rtx
6221 = gen_rtx_SYMBOL_REF (Pmode, ggc_strdup (opt_fstack_limit_symbol_arg));
6222 if (opt_fstack_limit_register_no >= 0)
6223 stack_limit_rtx = gen_rtx_REG (Pmode, opt_fstack_limit_register_no);
6224
6225 /* Don't use gen_rtx_CONST_INT here since gen_rtx_CONST_INT in this case
6226 tries to use these variables. */
6227 for (i = - MAX_SAVED_CONST_INT; i <= MAX_SAVED_CONST_INT; i++)
6228 const_int_rtx[i + MAX_SAVED_CONST_INT] =
6229 gen_rtx_raw_CONST_INT (VOIDmode, (HOST_WIDE_INT) i);
6230
6231 if (STORE_FLAG_VALUE >= - MAX_SAVED_CONST_INT
6232 && STORE_FLAG_VALUE <= MAX_SAVED_CONST_INT)
6233 const_true_rtx = const_int_rtx[STORE_FLAG_VALUE + MAX_SAVED_CONST_INT];
6234 else
6235 const_true_rtx = gen_rtx_CONST_INT (VOIDmode, STORE_FLAG_VALUE);
6236
6237 double_mode = float_mode_for_size (DOUBLE_TYPE_SIZE).require ();
6238
6239 real_from_integer (&dconst0, double_mode, 0, SIGNED);
6240 real_from_integer (&dconst1, double_mode, 1, SIGNED);
6241 real_from_integer (&dconst2, double_mode, 2, SIGNED);
6242
6243 dconstm1 = dconst1;
6244 dconstm1.sign = 1;
6245
6246 dconsthalf = dconst1;
6247 SET_REAL_EXP (&dconsthalf, REAL_EXP (&dconsthalf) - 1);
6248
6249 for (i = 0; i < 3; i++)
6250 {
6251 const REAL_VALUE_TYPE *const r =
6252 (i == 0 ? &dconst0 : i == 1 ? &dconst1 : &dconst2);
6253
6254 FOR_EACH_MODE_IN_CLASS (mode, MODE_FLOAT)
6255 const_tiny_rtx[i][(int) mode] =
6256 const_double_from_real_value (*r, mode);
6257
6258 FOR_EACH_MODE_IN_CLASS (mode, MODE_DECIMAL_FLOAT)
6259 const_tiny_rtx[i][(int) mode] =
6260 const_double_from_real_value (*r, mode);
6261
6262 const_tiny_rtx[i][(int) VOIDmode] = GEN_INT (i);
6263
6264 FOR_EACH_MODE_IN_CLASS (mode, MODE_INT)
6265 const_tiny_rtx[i][(int) mode] = GEN_INT (i);
6266
6267 for (mode = MIN_MODE_PARTIAL_INT;
6268 mode <= MAX_MODE_PARTIAL_INT;
6269 mode = (machine_mode)((int)(mode) + 1))
6270 const_tiny_rtx[i][(int) mode] = GEN_INT (i);
6271 }
6272
6273 const_tiny_rtx[3][(int) VOIDmode] = constm1_rtx;
6274
6275 FOR_EACH_MODE_IN_CLASS (mode, MODE_INT)
6276 const_tiny_rtx[3][(int) mode] = constm1_rtx;
6277
6278 /* For BImode, 1 and -1 are unsigned and signed interpretations
6279 of the same value. */
6280 const_tiny_rtx[0][(int) BImode] = const0_rtx;
6281 const_tiny_rtx[1][(int) BImode] = const_true_rtx;
6282 const_tiny_rtx[3][(int) BImode] = const_true_rtx;
6283
6284 for (mode = MIN_MODE_PARTIAL_INT;
6285 mode <= MAX_MODE_PARTIAL_INT;
6286 mode = (machine_mode)((int)(mode) + 1))
6287 const_tiny_rtx[3][(int) mode] = constm1_rtx;
6288
6289 FOR_EACH_MODE_IN_CLASS (mode, MODE_COMPLEX_INT)
6290 {
6291 rtx inner = const_tiny_rtx[0][(int)GET_MODE_INNER (mode)];
6292 const_tiny_rtx[0][(int) mode] = gen_rtx_CONCAT (mode, inner, inner);
6293 }
6294
6295 FOR_EACH_MODE_IN_CLASS (mode, MODE_COMPLEX_FLOAT)
6296 {
6297 rtx inner = const_tiny_rtx[0][(int)GET_MODE_INNER (mode)];
6298 const_tiny_rtx[0][(int) mode] = gen_rtx_CONCAT (mode, inner, inner);
6299 }
6300
6301 /* As for BImode, "all 1" and "all -1" are unsigned and signed
6302 interpretations of the same value. */
6303 FOR_EACH_MODE_IN_CLASS (mode, MODE_VECTOR_BOOL)
6304 {
6305 const_tiny_rtx[0][(int) mode] = gen_const_vector (mode, 0);
6306 const_tiny_rtx[3][(int) mode] = gen_const_vector (mode, 3);
6307 const_tiny_rtx[1][(int) mode] = const_tiny_rtx[3][(int) mode];
6308 }
6309
6310 FOR_EACH_MODE_IN_CLASS (mode, MODE_VECTOR_INT)
6311 {
6312 const_tiny_rtx[0][(int) mode] = gen_const_vector (mode, 0);
6313 const_tiny_rtx[1][(int) mode] = gen_const_vector (mode, 1);
6314 const_tiny_rtx[3][(int) mode] = gen_const_vector (mode, 3);
6315 }
6316
6317 FOR_EACH_MODE_IN_CLASS (mode, MODE_VECTOR_FLOAT)
6318 {
6319 const_tiny_rtx[0][(int) mode] = gen_const_vector (mode, 0);
6320 const_tiny_rtx[1][(int) mode] = gen_const_vector (mode, 1);
6321 }
6322
6323 FOR_EACH_MODE_IN_CLASS (smode_iter, MODE_FRACT)
6324 {
6325 scalar_mode smode = smode_iter.require ();
6326 FCONST0 (smode).data.high = 0;
6327 FCONST0 (smode).data.low = 0;
6328 FCONST0 (smode).mode = smode;
6329 const_tiny_rtx[0][(int) smode]
6330 = CONST_FIXED_FROM_FIXED_VALUE (FCONST0 (smode), smode);
6331 }
6332
6333 FOR_EACH_MODE_IN_CLASS (smode_iter, MODE_UFRACT)
6334 {
6335 scalar_mode smode = smode_iter.require ();
6336 FCONST0 (smode).data.high = 0;
6337 FCONST0 (smode).data.low = 0;
6338 FCONST0 (smode).mode = smode;
6339 const_tiny_rtx[0][(int) smode]
6340 = CONST_FIXED_FROM_FIXED_VALUE (FCONST0 (smode), smode);
6341 }
6342
6343 FOR_EACH_MODE_IN_CLASS (smode_iter, MODE_ACCUM)
6344 {
6345 scalar_mode smode = smode_iter.require ();
6346 FCONST0 (smode).data.high = 0;
6347 FCONST0 (smode).data.low = 0;
6348 FCONST0 (smode).mode = smode;
6349 const_tiny_rtx[0][(int) smode]
6350 = CONST_FIXED_FROM_FIXED_VALUE (FCONST0 (smode), smode);
6351
6352 /* We store the value 1. */
6353 FCONST1 (smode).data.high = 0;
6354 FCONST1 (smode).data.low = 0;
6355 FCONST1 (smode).mode = smode;
6356 FCONST1 (smode).data
6357 = double_int_one.lshift (GET_MODE_FBIT (smode),
6358 HOST_BITS_PER_DOUBLE_INT,
6359 SIGNED_FIXED_POINT_MODE_P (smode));
6360 const_tiny_rtx[1][(int) smode]
6361 = CONST_FIXED_FROM_FIXED_VALUE (FCONST1 (smode), smode);
6362 }
6363
6364 FOR_EACH_MODE_IN_CLASS (smode_iter, MODE_UACCUM)
6365 {
6366 scalar_mode smode = smode_iter.require ();
6367 FCONST0 (smode).data.high = 0;
6368 FCONST0 (smode).data.low = 0;
6369 FCONST0 (smode).mode = smode;
6370 const_tiny_rtx[0][(int) smode]
6371 = CONST_FIXED_FROM_FIXED_VALUE (FCONST0 (smode), smode);
6372
6373 /* We store the value 1. */
6374 FCONST1 (smode).data.high = 0;
6375 FCONST1 (smode).data.low = 0;
6376 FCONST1 (smode).mode = smode;
6377 FCONST1 (smode).data
6378 = double_int_one.lshift (GET_MODE_FBIT (smode),
6379 HOST_BITS_PER_DOUBLE_INT,
6380 SIGNED_FIXED_POINT_MODE_P (smode));
6381 const_tiny_rtx[1][(int) smode]
6382 = CONST_FIXED_FROM_FIXED_VALUE (FCONST1 (smode), smode);
6383 }
6384
6385 FOR_EACH_MODE_IN_CLASS (mode, MODE_VECTOR_FRACT)
6386 {
6387 const_tiny_rtx[0][(int) mode] = gen_const_vector (mode, 0);
6388 }
6389
6390 FOR_EACH_MODE_IN_CLASS (mode, MODE_VECTOR_UFRACT)
6391 {
6392 const_tiny_rtx[0][(int) mode] = gen_const_vector (mode, 0);
6393 }
6394
6395 FOR_EACH_MODE_IN_CLASS (mode, MODE_VECTOR_ACCUM)
6396 {
6397 const_tiny_rtx[0][(int) mode] = gen_const_vector (mode, 0);
6398 const_tiny_rtx[1][(int) mode] = gen_const_vector (mode, 1);
6399 }
6400
6401 FOR_EACH_MODE_IN_CLASS (mode, MODE_VECTOR_UACCUM)
6402 {
6403 const_tiny_rtx[0][(int) mode] = gen_const_vector (mode, 0);
6404 const_tiny_rtx[1][(int) mode] = gen_const_vector (mode, 1);
6405 }
6406
6407 for (i = (int) CCmode; i < (int) MAX_MACHINE_MODE; ++i)
6408 if (GET_MODE_CLASS ((machine_mode) i) == MODE_CC)
6409 const_tiny_rtx[0][i] = const0_rtx;
6410
6411 pc_rtx = gen_rtx_fmt_ (PC, VOIDmode);
6412 ret_rtx = gen_rtx_fmt_ (RETURN, VOIDmode);
6413 simple_return_rtx = gen_rtx_fmt_ (SIMPLE_RETURN, VOIDmode);
6414 cc0_rtx = gen_rtx_fmt_ (CC0, VOIDmode);
6415 invalid_insn_rtx = gen_rtx_INSN (VOIDmode,
6416 /*prev_insn=*/NULL,
6417 /*next_insn=*/NULL,
6418 /*bb=*/NULL,
6419 /*pattern=*/NULL_RTX,
6420 /*location=*/-1,
6421 CODE_FOR_nothing,
6422 /*reg_notes=*/NULL_RTX);
6423 }
6424 \f
6425 /* Produce exact duplicate of insn INSN after AFTER.
6426 Care updating of libcall regions if present. */
6427
6428 rtx_insn *
6429 emit_copy_of_insn_after (rtx_insn *insn, rtx_insn *after)
6430 {
6431 rtx_insn *new_rtx;
6432 rtx link;
6433
6434 switch (GET_CODE (insn))
6435 {
6436 case INSN:
6437 new_rtx = emit_insn_after (copy_insn (PATTERN (insn)), after);
6438 break;
6439
6440 case JUMP_INSN:
6441 new_rtx = emit_jump_insn_after (copy_insn (PATTERN (insn)), after);
6442 CROSSING_JUMP_P (new_rtx) = CROSSING_JUMP_P (insn);
6443 break;
6444
6445 case DEBUG_INSN:
6446 new_rtx = emit_debug_insn_after (copy_insn (PATTERN (insn)), after);
6447 break;
6448
6449 case CALL_INSN:
6450 new_rtx = emit_call_insn_after (copy_insn (PATTERN (insn)), after);
6451 if (CALL_INSN_FUNCTION_USAGE (insn))
6452 CALL_INSN_FUNCTION_USAGE (new_rtx)
6453 = copy_insn (CALL_INSN_FUNCTION_USAGE (insn));
6454 SIBLING_CALL_P (new_rtx) = SIBLING_CALL_P (insn);
6455 RTL_CONST_CALL_P (new_rtx) = RTL_CONST_CALL_P (insn);
6456 RTL_PURE_CALL_P (new_rtx) = RTL_PURE_CALL_P (insn);
6457 RTL_LOOPING_CONST_OR_PURE_CALL_P (new_rtx)
6458 = RTL_LOOPING_CONST_OR_PURE_CALL_P (insn);
6459 break;
6460
6461 default:
6462 gcc_unreachable ();
6463 }
6464
6465 /* Update LABEL_NUSES. */
6466 mark_jump_label (PATTERN (new_rtx), new_rtx, 0);
6467
6468 INSN_LOCATION (new_rtx) = INSN_LOCATION (insn);
6469
6470 /* If the old insn is frame related, then so is the new one. This is
6471 primarily needed for IA-64 unwind info which marks epilogue insns,
6472 which may be duplicated by the basic block reordering code. */
6473 RTX_FRAME_RELATED_P (new_rtx) = RTX_FRAME_RELATED_P (insn);
6474
6475 /* Locate the end of existing REG_NOTES in NEW_RTX. */
6476 rtx *ptail = &REG_NOTES (new_rtx);
6477 while (*ptail != NULL_RTX)
6478 ptail = &XEXP (*ptail, 1);
6479
6480 /* Copy all REG_NOTES except REG_LABEL_OPERAND since mark_jump_label
6481 will make them. REG_LABEL_TARGETs are created there too, but are
6482 supposed to be sticky, so we copy them. */
6483 for (link = REG_NOTES (insn); link; link = XEXP (link, 1))
6484 if (REG_NOTE_KIND (link) != REG_LABEL_OPERAND)
6485 {
6486 *ptail = duplicate_reg_note (link);
6487 ptail = &XEXP (*ptail, 1);
6488 }
6489
6490 INSN_CODE (new_rtx) = INSN_CODE (insn);
6491 return new_rtx;
6492 }
6493
6494 static GTY((deletable)) rtx hard_reg_clobbers [NUM_MACHINE_MODES][FIRST_PSEUDO_REGISTER];
6495 rtx
6496 gen_hard_reg_clobber (machine_mode mode, unsigned int regno)
6497 {
6498 if (hard_reg_clobbers[mode][regno])
6499 return hard_reg_clobbers[mode][regno];
6500 else
6501 return (hard_reg_clobbers[mode][regno] =
6502 gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (mode, regno)));
6503 }
6504
6505 location_t prologue_location;
6506 location_t epilogue_location;
6507
6508 /* Hold current location information and last location information, so the
6509 datastructures are built lazily only when some instructions in given
6510 place are needed. */
6511 static location_t curr_location;
6512
6513 /* Allocate insn location datastructure. */
6514 void
6515 insn_locations_init (void)
6516 {
6517 prologue_location = epilogue_location = 0;
6518 curr_location = UNKNOWN_LOCATION;
6519 }
6520
6521 /* At the end of emit stage, clear current location. */
6522 void
6523 insn_locations_finalize (void)
6524 {
6525 epilogue_location = curr_location;
6526 curr_location = UNKNOWN_LOCATION;
6527 }
6528
6529 /* Set current location. */
6530 void
6531 set_curr_insn_location (location_t location)
6532 {
6533 curr_location = location;
6534 }
6535
6536 /* Get current location. */
6537 location_t
6538 curr_insn_location (void)
6539 {
6540 return curr_location;
6541 }
6542
6543 /* Set the location of the insn chain starting at INSN to LOC. */
6544 void
6545 set_insn_locations (rtx_insn *insn, location_t loc)
6546 {
6547 while (insn)
6548 {
6549 if (INSN_P (insn))
6550 INSN_LOCATION (insn) = loc;
6551 insn = NEXT_INSN (insn);
6552 }
6553 }
6554
6555 /* Return lexical scope block insn belongs to. */
6556 tree
6557 insn_scope (const rtx_insn *insn)
6558 {
6559 return LOCATION_BLOCK (INSN_LOCATION (insn));
6560 }
6561
6562 /* Return line number of the statement that produced this insn. */
6563 int
6564 insn_line (const rtx_insn *insn)
6565 {
6566 return LOCATION_LINE (INSN_LOCATION (insn));
6567 }
6568
6569 /* Return source file of the statement that produced this insn. */
6570 const char *
6571 insn_file (const rtx_insn *insn)
6572 {
6573 return LOCATION_FILE (INSN_LOCATION (insn));
6574 }
6575
6576 /* Return expanded location of the statement that produced this insn. */
6577 expanded_location
6578 insn_location (const rtx_insn *insn)
6579 {
6580 return expand_location (INSN_LOCATION (insn));
6581 }
6582
6583 /* Return true if memory model MODEL requires a pre-operation (release-style)
6584 barrier or a post-operation (acquire-style) barrier. While not universal,
6585 this function matches behavior of several targets. */
6586
6587 bool
6588 need_atomic_barrier_p (enum memmodel model, bool pre)
6589 {
6590 switch (model & MEMMODEL_BASE_MASK)
6591 {
6592 case MEMMODEL_RELAXED:
6593 case MEMMODEL_CONSUME:
6594 return false;
6595 case MEMMODEL_RELEASE:
6596 return pre;
6597 case MEMMODEL_ACQUIRE:
6598 return !pre;
6599 case MEMMODEL_ACQ_REL:
6600 case MEMMODEL_SEQ_CST:
6601 return true;
6602 default:
6603 gcc_unreachable ();
6604 }
6605 }
6606
6607 /* Return a constant shift amount for shifting a value of mode MODE
6608 by VALUE bits. */
6609
6610 rtx
6611 gen_int_shift_amount (machine_mode, poly_int64 value)
6612 {
6613 /* Use a 64-bit mode, to avoid any truncation.
6614
6615 ??? Perhaps this should be automatically derived from the .md files
6616 instead, or perhaps have a target hook. */
6617 scalar_int_mode shift_mode = (BITS_PER_UNIT == 8
6618 ? DImode
6619 : int_mode_for_size (64, 0).require ());
6620 return gen_int_mode (value, shift_mode);
6621 }
6622
6623 /* Initialize fields of rtl_data related to stack alignment. */
6624
6625 void
6626 rtl_data::init_stack_alignment ()
6627 {
6628 stack_alignment_needed = STACK_BOUNDARY;
6629 max_used_stack_slot_alignment = STACK_BOUNDARY;
6630 stack_alignment_estimated = 0;
6631 preferred_stack_boundary = STACK_BOUNDARY;
6632 }
6633
6634 \f
6635 #include "gt-emit-rtl.h"