Daily bump.
[gcc.git] / gcc / optabs.c
1 /* Expand the basic unary and binary arithmetic operations, for GNU compiler.
2 Copyright (C) 1987-2021 Free Software Foundation, Inc.
3
4 This file is part of GCC.
5
6 GCC is free software; you can redistribute it and/or modify it under
7 the terms of the GNU General Public License as published by the Free
8 Software Foundation; either version 3, or (at your option) any later
9 version.
10
11 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
12 WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
14 for more details.
15
16 You should have received a copy of the GNU General Public License
17 along with GCC; see the file COPYING3. If not see
18 <http://www.gnu.org/licenses/>. */
19
20
21 #include "config.h"
22 #include "system.h"
23 #include "coretypes.h"
24 #include "backend.h"
25 #include "target.h"
26 #include "rtl.h"
27 #include "tree.h"
28 #include "memmodel.h"
29 #include "predict.h"
30 #include "tm_p.h"
31 #include "optabs.h"
32 #include "expmed.h"
33 #include "emit-rtl.h"
34 #include "recog.h"
35 #include "diagnostic-core.h"
36 #include "rtx-vector-builder.h"
37
38 /* Include insn-config.h before expr.h so that HAVE_conditional_move
39 is properly defined. */
40 #include "stor-layout.h"
41 #include "except.h"
42 #include "dojump.h"
43 #include "explow.h"
44 #include "expr.h"
45 #include "optabs-tree.h"
46 #include "libfuncs.h"
47 #include "internal-fn.h"
48 #include "langhooks.h"
49
50 static void prepare_float_lib_cmp (rtx, rtx, enum rtx_code, rtx *,
51 machine_mode *);
52 static rtx expand_unop_direct (machine_mode, optab, rtx, rtx, int);
53 static void emit_libcall_block_1 (rtx_insn *, rtx, rtx, rtx, bool);
54
55 /* Debug facility for use in GDB. */
56 void debug_optab_libfuncs (void);
57 \f
58 /* Add a REG_EQUAL note to the last insn in INSNS. TARGET is being set to
59 the result of operation CODE applied to OP0 (and OP1 if it is a binary
60 operation). OP0_MODE is OP0's mode.
61
62 If the last insn does not set TARGET, don't do anything, but return 1.
63
64 If the last insn or a previous insn sets TARGET and TARGET is one of OP0
65 or OP1, don't add the REG_EQUAL note but return 0. Our caller can then
66 try again, ensuring that TARGET is not one of the operands. */
67
68 static int
69 add_equal_note (rtx_insn *insns, rtx target, enum rtx_code code, rtx op0,
70 rtx op1, machine_mode op0_mode)
71 {
72 rtx_insn *last_insn;
73 rtx set;
74 rtx note;
75
76 gcc_assert (insns && INSN_P (insns) && NEXT_INSN (insns));
77
78 if (GET_RTX_CLASS (code) != RTX_COMM_ARITH
79 && GET_RTX_CLASS (code) != RTX_BIN_ARITH
80 && GET_RTX_CLASS (code) != RTX_COMM_COMPARE
81 && GET_RTX_CLASS (code) != RTX_COMPARE
82 && GET_RTX_CLASS (code) != RTX_UNARY)
83 return 1;
84
85 if (GET_CODE (target) == ZERO_EXTRACT)
86 return 1;
87
88 for (last_insn = insns;
89 NEXT_INSN (last_insn) != NULL_RTX;
90 last_insn = NEXT_INSN (last_insn))
91 ;
92
93 /* If TARGET is in OP0 or OP1, punt. We'd end up with a note referencing
94 a value changing in the insn, so the note would be invalid for CSE. */
95 if (reg_overlap_mentioned_p (target, op0)
96 || (op1 && reg_overlap_mentioned_p (target, op1)))
97 {
98 if (MEM_P (target)
99 && (rtx_equal_p (target, op0)
100 || (op1 && rtx_equal_p (target, op1))))
101 {
102 /* For MEM target, with MEM = MEM op X, prefer no REG_EQUAL note
103 over expanding it as temp = MEM op X, MEM = temp. If the target
104 supports MEM = MEM op X instructions, it is sometimes too hard
105 to reconstruct that form later, especially if X is also a memory,
106 and due to multiple occurrences of addresses the address might
107 be forced into register unnecessarily.
108 Note that not emitting the REG_EQUIV note might inhibit
109 CSE in some cases. */
110 set = single_set (last_insn);
111 if (set
112 && GET_CODE (SET_SRC (set)) == code
113 && MEM_P (SET_DEST (set))
114 && (rtx_equal_p (SET_DEST (set), XEXP (SET_SRC (set), 0))
115 || (op1 && rtx_equal_p (SET_DEST (set),
116 XEXP (SET_SRC (set), 1)))))
117 return 1;
118 }
119 return 0;
120 }
121
122 set = set_for_reg_notes (last_insn);
123 if (set == NULL_RTX)
124 return 1;
125
126 if (! rtx_equal_p (SET_DEST (set), target)
127 /* For a STRICT_LOW_PART, the REG_NOTE applies to what is inside it. */
128 && (GET_CODE (SET_DEST (set)) != STRICT_LOW_PART
129 || ! rtx_equal_p (XEXP (SET_DEST (set), 0), target)))
130 return 1;
131
132 if (GET_RTX_CLASS (code) == RTX_UNARY)
133 switch (code)
134 {
135 case FFS:
136 case CLZ:
137 case CTZ:
138 case CLRSB:
139 case POPCOUNT:
140 case PARITY:
141 case BSWAP:
142 if (op0_mode != VOIDmode && GET_MODE (target) != op0_mode)
143 {
144 note = gen_rtx_fmt_e (code, op0_mode, copy_rtx (op0));
145 if (GET_MODE_UNIT_SIZE (op0_mode)
146 > GET_MODE_UNIT_SIZE (GET_MODE (target)))
147 note = simplify_gen_unary (TRUNCATE, GET_MODE (target),
148 note, op0_mode);
149 else
150 note = simplify_gen_unary (ZERO_EXTEND, GET_MODE (target),
151 note, op0_mode);
152 break;
153 }
154 /* FALLTHRU */
155 default:
156 note = gen_rtx_fmt_e (code, GET_MODE (target), copy_rtx (op0));
157 break;
158 }
159 else
160 note = gen_rtx_fmt_ee (code, GET_MODE (target), copy_rtx (op0), copy_rtx (op1));
161
162 set_unique_reg_note (last_insn, REG_EQUAL, note);
163
164 return 1;
165 }
166 \f
167 /* Given two input operands, OP0 and OP1, determine what the correct from_mode
168 for a widening operation would be. In most cases this would be OP0, but if
169 that's a constant it'll be VOIDmode, which isn't useful. */
170
171 static machine_mode
172 widened_mode (machine_mode to_mode, rtx op0, rtx op1)
173 {
174 machine_mode m0 = GET_MODE (op0);
175 machine_mode m1 = GET_MODE (op1);
176 machine_mode result;
177
178 if (m0 == VOIDmode && m1 == VOIDmode)
179 return to_mode;
180 else if (m0 == VOIDmode || GET_MODE_UNIT_SIZE (m0) < GET_MODE_UNIT_SIZE (m1))
181 result = m1;
182 else
183 result = m0;
184
185 if (GET_MODE_UNIT_SIZE (result) > GET_MODE_UNIT_SIZE (to_mode))
186 return to_mode;
187
188 return result;
189 }
190 \f
191 /* Widen OP to MODE and return the rtx for the widened operand. UNSIGNEDP
192 says whether OP is signed or unsigned. NO_EXTEND is nonzero if we need
193 not actually do a sign-extend or zero-extend, but can leave the
194 higher-order bits of the result rtx undefined, for example, in the case
195 of logical operations, but not right shifts. */
196
197 static rtx
198 widen_operand (rtx op, machine_mode mode, machine_mode oldmode,
199 int unsignedp, int no_extend)
200 {
201 rtx result;
202 scalar_int_mode int_mode;
203
204 /* If we don't have to extend and this is a constant, return it. */
205 if (no_extend && GET_MODE (op) == VOIDmode)
206 return op;
207
208 /* If we must extend do so. If OP is a SUBREG for a promoted object, also
209 extend since it will be more efficient to do so unless the signedness of
210 a promoted object differs from our extension. */
211 if (! no_extend
212 || !is_a <scalar_int_mode> (mode, &int_mode)
213 || (GET_CODE (op) == SUBREG && SUBREG_PROMOTED_VAR_P (op)
214 && SUBREG_CHECK_PROMOTED_SIGN (op, unsignedp)))
215 return convert_modes (mode, oldmode, op, unsignedp);
216
217 /* If MODE is no wider than a single word, we return a lowpart or paradoxical
218 SUBREG. */
219 if (GET_MODE_SIZE (int_mode) <= UNITS_PER_WORD)
220 return gen_lowpart (int_mode, force_reg (GET_MODE (op), op));
221
222 /* Otherwise, get an object of MODE, clobber it, and set the low-order
223 part to OP. */
224
225 result = gen_reg_rtx (int_mode);
226 emit_clobber (result);
227 emit_move_insn (gen_lowpart (GET_MODE (op), result), op);
228 return result;
229 }
230 \f
231 /* Expand vector widening operations.
232
233 There are two different classes of operations handled here:
234 1) Operations whose result is wider than all the arguments to the operation.
235 Examples: VEC_UNPACK_HI/LO_EXPR, VEC_WIDEN_MULT_HI/LO_EXPR
236 In this case OP0 and optionally OP1 would be initialized,
237 but WIDE_OP wouldn't (not relevant for this case).
238 2) Operations whose result is of the same size as the last argument to the
239 operation, but wider than all the other arguments to the operation.
240 Examples: WIDEN_SUM_EXPR, VEC_DOT_PROD_EXPR.
241 In the case WIDE_OP, OP0 and optionally OP1 would be initialized.
242
243 E.g, when called to expand the following operations, this is how
244 the arguments will be initialized:
245 nops OP0 OP1 WIDE_OP
246 widening-sum 2 oprnd0 - oprnd1
247 widening-dot-product 3 oprnd0 oprnd1 oprnd2
248 widening-mult 2 oprnd0 oprnd1 -
249 type-promotion (vec-unpack) 1 oprnd0 - - */
250
251 rtx
252 expand_widen_pattern_expr (sepops ops, rtx op0, rtx op1, rtx wide_op,
253 rtx target, int unsignedp)
254 {
255 class expand_operand eops[4];
256 tree oprnd0, oprnd1, oprnd2;
257 machine_mode wmode = VOIDmode, tmode0, tmode1 = VOIDmode;
258 optab widen_pattern_optab;
259 enum insn_code icode;
260 int nops = TREE_CODE_LENGTH (ops->code);
261 int op;
262 bool sbool = false;
263
264 oprnd0 = ops->op0;
265 tmode0 = TYPE_MODE (TREE_TYPE (oprnd0));
266 if (ops->code == VEC_UNPACK_FIX_TRUNC_HI_EXPR
267 || ops->code == VEC_UNPACK_FIX_TRUNC_LO_EXPR)
268 /* The sign is from the result type rather than operand's type
269 for these ops. */
270 widen_pattern_optab
271 = optab_for_tree_code (ops->code, ops->type, optab_default);
272 else if ((ops->code == VEC_UNPACK_HI_EXPR
273 || ops->code == VEC_UNPACK_LO_EXPR)
274 && VECTOR_BOOLEAN_TYPE_P (ops->type)
275 && VECTOR_BOOLEAN_TYPE_P (TREE_TYPE (oprnd0))
276 && TYPE_MODE (ops->type) == TYPE_MODE (TREE_TYPE (oprnd0))
277 && SCALAR_INT_MODE_P (TYPE_MODE (ops->type)))
278 {
279 /* For VEC_UNPACK_{LO,HI}_EXPR if the mode of op0 and result is
280 the same scalar mode for VECTOR_BOOLEAN_TYPE_P vectors, use
281 vec_unpacks_sbool_{lo,hi}_optab, so that we can pass in
282 the pattern number of elements in the wider vector. */
283 widen_pattern_optab
284 = (ops->code == VEC_UNPACK_HI_EXPR
285 ? vec_unpacks_sbool_hi_optab : vec_unpacks_sbool_lo_optab);
286 sbool = true;
287 }
288 else
289 widen_pattern_optab
290 = optab_for_tree_code (ops->code, TREE_TYPE (oprnd0), optab_default);
291 if (ops->code == WIDEN_MULT_PLUS_EXPR
292 || ops->code == WIDEN_MULT_MINUS_EXPR)
293 icode = find_widening_optab_handler (widen_pattern_optab,
294 TYPE_MODE (TREE_TYPE (ops->op2)),
295 tmode0);
296 else
297 icode = optab_handler (widen_pattern_optab, tmode0);
298 gcc_assert (icode != CODE_FOR_nothing);
299
300 if (nops >= 2)
301 {
302 oprnd1 = ops->op1;
303 tmode1 = TYPE_MODE (TREE_TYPE (oprnd1));
304 }
305 else if (sbool)
306 {
307 nops = 2;
308 op1 = GEN_INT (TYPE_VECTOR_SUBPARTS (TREE_TYPE (oprnd0)).to_constant ());
309 tmode1 = tmode0;
310 }
311
312 /* The last operand is of a wider mode than the rest of the operands. */
313 if (nops == 2)
314 wmode = tmode1;
315 else if (nops == 3)
316 {
317 gcc_assert (tmode1 == tmode0);
318 gcc_assert (op1);
319 oprnd2 = ops->op2;
320 wmode = TYPE_MODE (TREE_TYPE (oprnd2));
321 }
322
323 op = 0;
324 create_output_operand (&eops[op++], target, TYPE_MODE (ops->type));
325 create_convert_operand_from (&eops[op++], op0, tmode0, unsignedp);
326 if (op1)
327 create_convert_operand_from (&eops[op++], op1, tmode1, unsignedp);
328 if (wide_op)
329 create_convert_operand_from (&eops[op++], wide_op, wmode, unsignedp);
330 expand_insn (icode, op, eops);
331 return eops[0].value;
332 }
333
334 /* Generate code to perform an operation specified by TERNARY_OPTAB
335 on operands OP0, OP1 and OP2, with result having machine-mode MODE.
336
337 UNSIGNEDP is for the case where we have to widen the operands
338 to perform the operation. It says to use zero-extension.
339
340 If TARGET is nonzero, the value
341 is generated there, if it is convenient to do so.
342 In all cases an rtx is returned for the locus of the value;
343 this may or may not be TARGET. */
344
345 rtx
346 expand_ternary_op (machine_mode mode, optab ternary_optab, rtx op0,
347 rtx op1, rtx op2, rtx target, int unsignedp)
348 {
349 class expand_operand ops[4];
350 enum insn_code icode = optab_handler (ternary_optab, mode);
351
352 gcc_assert (optab_handler (ternary_optab, mode) != CODE_FOR_nothing);
353
354 create_output_operand (&ops[0], target, mode);
355 create_convert_operand_from (&ops[1], op0, mode, unsignedp);
356 create_convert_operand_from (&ops[2], op1, mode, unsignedp);
357 create_convert_operand_from (&ops[3], op2, mode, unsignedp);
358 expand_insn (icode, 4, ops);
359 return ops[0].value;
360 }
361
362
363 /* Like expand_binop, but return a constant rtx if the result can be
364 calculated at compile time. The arguments and return value are
365 otherwise the same as for expand_binop. */
366
367 rtx
368 simplify_expand_binop (machine_mode mode, optab binoptab,
369 rtx op0, rtx op1, rtx target, int unsignedp,
370 enum optab_methods methods)
371 {
372 if (CONSTANT_P (op0) && CONSTANT_P (op1))
373 {
374 rtx x = simplify_binary_operation (optab_to_code (binoptab),
375 mode, op0, op1);
376 if (x)
377 return x;
378 }
379
380 return expand_binop (mode, binoptab, op0, op1, target, unsignedp, methods);
381 }
382
383 /* Like simplify_expand_binop, but always put the result in TARGET.
384 Return true if the expansion succeeded. */
385
386 bool
387 force_expand_binop (machine_mode mode, optab binoptab,
388 rtx op0, rtx op1, rtx target, int unsignedp,
389 enum optab_methods methods)
390 {
391 rtx x = simplify_expand_binop (mode, binoptab, op0, op1,
392 target, unsignedp, methods);
393 if (x == 0)
394 return false;
395 if (x != target)
396 emit_move_insn (target, x);
397 return true;
398 }
399
400 /* Create a new vector value in VMODE with all elements set to OP. The
401 mode of OP must be the element mode of VMODE. If OP is a constant,
402 then the return value will be a constant. */
403
404 rtx
405 expand_vector_broadcast (machine_mode vmode, rtx op)
406 {
407 int n;
408 rtvec vec;
409
410 gcc_checking_assert (VECTOR_MODE_P (vmode));
411
412 if (valid_for_const_vector_p (vmode, op))
413 return gen_const_vec_duplicate (vmode, op);
414
415 insn_code icode = optab_handler (vec_duplicate_optab, vmode);
416 if (icode != CODE_FOR_nothing)
417 {
418 class expand_operand ops[2];
419 create_output_operand (&ops[0], NULL_RTX, vmode);
420 create_input_operand (&ops[1], op, GET_MODE (op));
421 expand_insn (icode, 2, ops);
422 return ops[0].value;
423 }
424
425 if (!GET_MODE_NUNITS (vmode).is_constant (&n))
426 return NULL;
427
428 /* ??? If the target doesn't have a vec_init, then we have no easy way
429 of performing this operation. Most of this sort of generic support
430 is hidden away in the vector lowering support in gimple. */
431 icode = convert_optab_handler (vec_init_optab, vmode,
432 GET_MODE_INNER (vmode));
433 if (icode == CODE_FOR_nothing)
434 return NULL;
435
436 vec = rtvec_alloc (n);
437 for (int i = 0; i < n; ++i)
438 RTVEC_ELT (vec, i) = op;
439 rtx ret = gen_reg_rtx (vmode);
440 emit_insn (GEN_FCN (icode) (ret, gen_rtx_PARALLEL (vmode, vec)));
441
442 return ret;
443 }
444
445 /* This subroutine of expand_doubleword_shift handles the cases in which
446 the effective shift value is >= BITS_PER_WORD. The arguments and return
447 value are the same as for the parent routine, except that SUPERWORD_OP1
448 is the shift count to use when shifting OUTOF_INPUT into INTO_TARGET.
449 INTO_TARGET may be null if the caller has decided to calculate it. */
450
451 static bool
452 expand_superword_shift (optab binoptab, rtx outof_input, rtx superword_op1,
453 rtx outof_target, rtx into_target,
454 int unsignedp, enum optab_methods methods)
455 {
456 if (into_target != 0)
457 if (!force_expand_binop (word_mode, binoptab, outof_input, superword_op1,
458 into_target, unsignedp, methods))
459 return false;
460
461 if (outof_target != 0)
462 {
463 /* For a signed right shift, we must fill OUTOF_TARGET with copies
464 of the sign bit, otherwise we must fill it with zeros. */
465 if (binoptab != ashr_optab)
466 emit_move_insn (outof_target, CONST0_RTX (word_mode));
467 else
468 if (!force_expand_binop (word_mode, binoptab, outof_input,
469 gen_int_shift_amount (word_mode,
470 BITS_PER_WORD - 1),
471 outof_target, unsignedp, methods))
472 return false;
473 }
474 return true;
475 }
476
477 /* This subroutine of expand_doubleword_shift handles the cases in which
478 the effective shift value is < BITS_PER_WORD. The arguments and return
479 value are the same as for the parent routine. */
480
481 static bool
482 expand_subword_shift (scalar_int_mode op1_mode, optab binoptab,
483 rtx outof_input, rtx into_input, rtx op1,
484 rtx outof_target, rtx into_target,
485 int unsignedp, enum optab_methods methods,
486 unsigned HOST_WIDE_INT shift_mask)
487 {
488 optab reverse_unsigned_shift, unsigned_shift;
489 rtx tmp, carries;
490
491 reverse_unsigned_shift = (binoptab == ashl_optab ? lshr_optab : ashl_optab);
492 unsigned_shift = (binoptab == ashl_optab ? ashl_optab : lshr_optab);
493
494 /* The low OP1 bits of INTO_TARGET come from the high bits of OUTOF_INPUT.
495 We therefore need to shift OUTOF_INPUT by (BITS_PER_WORD - OP1) bits in
496 the opposite direction to BINOPTAB. */
497 if (CONSTANT_P (op1) || shift_mask >= BITS_PER_WORD)
498 {
499 carries = outof_input;
500 tmp = immed_wide_int_const (wi::shwi (BITS_PER_WORD,
501 op1_mode), op1_mode);
502 tmp = simplify_expand_binop (op1_mode, sub_optab, tmp, op1,
503 0, true, methods);
504 }
505 else
506 {
507 /* We must avoid shifting by BITS_PER_WORD bits since that is either
508 the same as a zero shift (if shift_mask == BITS_PER_WORD - 1) or
509 has unknown behavior. Do a single shift first, then shift by the
510 remainder. It's OK to use ~OP1 as the remainder if shift counts
511 are truncated to the mode size. */
512 carries = expand_binop (word_mode, reverse_unsigned_shift,
513 outof_input, const1_rtx, 0, unsignedp, methods);
514 if (shift_mask == BITS_PER_WORD - 1)
515 {
516 tmp = immed_wide_int_const
517 (wi::minus_one (GET_MODE_PRECISION (op1_mode)), op1_mode);
518 tmp = simplify_expand_binop (op1_mode, xor_optab, op1, tmp,
519 0, true, methods);
520 }
521 else
522 {
523 tmp = immed_wide_int_const (wi::shwi (BITS_PER_WORD - 1,
524 op1_mode), op1_mode);
525 tmp = simplify_expand_binop (op1_mode, sub_optab, tmp, op1,
526 0, true, methods);
527 }
528 }
529 if (tmp == 0 || carries == 0)
530 return false;
531 carries = expand_binop (word_mode, reverse_unsigned_shift,
532 carries, tmp, 0, unsignedp, methods);
533 if (carries == 0)
534 return false;
535
536 /* Shift INTO_INPUT logically by OP1. This is the last use of INTO_INPUT
537 so the result can go directly into INTO_TARGET if convenient. */
538 tmp = expand_binop (word_mode, unsigned_shift, into_input, op1,
539 into_target, unsignedp, methods);
540 if (tmp == 0)
541 return false;
542
543 /* Now OR in the bits carried over from OUTOF_INPUT. */
544 if (!force_expand_binop (word_mode, ior_optab, tmp, carries,
545 into_target, unsignedp, methods))
546 return false;
547
548 /* Use a standard word_mode shift for the out-of half. */
549 if (outof_target != 0)
550 if (!force_expand_binop (word_mode, binoptab, outof_input, op1,
551 outof_target, unsignedp, methods))
552 return false;
553
554 return true;
555 }
556
557
558 /* Try implementing expand_doubleword_shift using conditional moves.
559 The shift is by < BITS_PER_WORD if (CMP_CODE CMP1 CMP2) is true,
560 otherwise it is by >= BITS_PER_WORD. SUBWORD_OP1 and SUPERWORD_OP1
561 are the shift counts to use in the former and latter case. All other
562 arguments are the same as the parent routine. */
563
564 static bool
565 expand_doubleword_shift_condmove (scalar_int_mode op1_mode, optab binoptab,
566 enum rtx_code cmp_code, rtx cmp1, rtx cmp2,
567 rtx outof_input, rtx into_input,
568 rtx subword_op1, rtx superword_op1,
569 rtx outof_target, rtx into_target,
570 int unsignedp, enum optab_methods methods,
571 unsigned HOST_WIDE_INT shift_mask)
572 {
573 rtx outof_superword, into_superword;
574
575 /* Put the superword version of the output into OUTOF_SUPERWORD and
576 INTO_SUPERWORD. */
577 outof_superword = outof_target != 0 ? gen_reg_rtx (word_mode) : 0;
578 if (outof_target != 0 && subword_op1 == superword_op1)
579 {
580 /* The value INTO_TARGET >> SUBWORD_OP1, which we later store in
581 OUTOF_TARGET, is the same as the value of INTO_SUPERWORD. */
582 into_superword = outof_target;
583 if (!expand_superword_shift (binoptab, outof_input, superword_op1,
584 outof_superword, 0, unsignedp, methods))
585 return false;
586 }
587 else
588 {
589 into_superword = gen_reg_rtx (word_mode);
590 if (!expand_superword_shift (binoptab, outof_input, superword_op1,
591 outof_superword, into_superword,
592 unsignedp, methods))
593 return false;
594 }
595
596 /* Put the subword version directly in OUTOF_TARGET and INTO_TARGET. */
597 if (!expand_subword_shift (op1_mode, binoptab,
598 outof_input, into_input, subword_op1,
599 outof_target, into_target,
600 unsignedp, methods, shift_mask))
601 return false;
602
603 /* Select between them. Do the INTO half first because INTO_SUPERWORD
604 might be the current value of OUTOF_TARGET. */
605 if (!emit_conditional_move (into_target, cmp_code, cmp1, cmp2, op1_mode,
606 into_target, into_superword, word_mode, false))
607 return false;
608
609 if (outof_target != 0)
610 if (!emit_conditional_move (outof_target, cmp_code, cmp1, cmp2, op1_mode,
611 outof_target, outof_superword,
612 word_mode, false))
613 return false;
614
615 return true;
616 }
617
618 /* Expand a doubleword shift (ashl, ashr or lshr) using word-mode shifts.
619 OUTOF_INPUT and INTO_INPUT are the two word-sized halves of the first
620 input operand; the shift moves bits in the direction OUTOF_INPUT->
621 INTO_TARGET. OUTOF_TARGET and INTO_TARGET are the equivalent words
622 of the target. OP1 is the shift count and OP1_MODE is its mode.
623 If OP1 is constant, it will have been truncated as appropriate
624 and is known to be nonzero.
625
626 If SHIFT_MASK is zero, the result of word shifts is undefined when the
627 shift count is outside the range [0, BITS_PER_WORD). This routine must
628 avoid generating such shifts for OP1s in the range [0, BITS_PER_WORD * 2).
629
630 If SHIFT_MASK is nonzero, all word-mode shift counts are effectively
631 masked by it and shifts in the range [BITS_PER_WORD, SHIFT_MASK) will
632 fill with zeros or sign bits as appropriate.
633
634 If SHIFT_MASK is BITS_PER_WORD - 1, this routine will synthesize
635 a doubleword shift whose equivalent mask is BITS_PER_WORD * 2 - 1.
636 Doing this preserves semantics required by SHIFT_COUNT_TRUNCATED.
637 In all other cases, shifts by values outside [0, BITS_PER_UNIT * 2)
638 are undefined.
639
640 BINOPTAB, UNSIGNEDP and METHODS are as for expand_binop. This function
641 may not use INTO_INPUT after modifying INTO_TARGET, and similarly for
642 OUTOF_INPUT and OUTOF_TARGET. OUTOF_TARGET can be null if the parent
643 function wants to calculate it itself.
644
645 Return true if the shift could be successfully synthesized. */
646
647 static bool
648 expand_doubleword_shift (scalar_int_mode op1_mode, optab binoptab,
649 rtx outof_input, rtx into_input, rtx op1,
650 rtx outof_target, rtx into_target,
651 int unsignedp, enum optab_methods methods,
652 unsigned HOST_WIDE_INT shift_mask)
653 {
654 rtx superword_op1, tmp, cmp1, cmp2;
655 enum rtx_code cmp_code;
656
657 /* See if word-mode shifts by BITS_PER_WORD...BITS_PER_WORD * 2 - 1 will
658 fill the result with sign or zero bits as appropriate. If so, the value
659 of OUTOF_TARGET will always be (SHIFT OUTOF_INPUT OP1). Recursively call
660 this routine to calculate INTO_TARGET (which depends on both OUTOF_INPUT
661 and INTO_INPUT), then emit code to set up OUTOF_TARGET.
662
663 This isn't worthwhile for constant shifts since the optimizers will
664 cope better with in-range shift counts. */
665 if (shift_mask >= BITS_PER_WORD
666 && outof_target != 0
667 && !CONSTANT_P (op1))
668 {
669 if (!expand_doubleword_shift (op1_mode, binoptab,
670 outof_input, into_input, op1,
671 0, into_target,
672 unsignedp, methods, shift_mask))
673 return false;
674 if (!force_expand_binop (word_mode, binoptab, outof_input, op1,
675 outof_target, unsignedp, methods))
676 return false;
677 return true;
678 }
679
680 /* Set CMP_CODE, CMP1 and CMP2 so that the rtx (CMP_CODE CMP1 CMP2)
681 is true when the effective shift value is less than BITS_PER_WORD.
682 Set SUPERWORD_OP1 to the shift count that should be used to shift
683 OUTOF_INPUT into INTO_TARGET when the condition is false. */
684 tmp = immed_wide_int_const (wi::shwi (BITS_PER_WORD, op1_mode), op1_mode);
685 if (!CONSTANT_P (op1) && shift_mask == BITS_PER_WORD - 1)
686 {
687 /* Set CMP1 to OP1 & BITS_PER_WORD. The result is zero iff OP1
688 is a subword shift count. */
689 cmp1 = simplify_expand_binop (op1_mode, and_optab, op1, tmp,
690 0, true, methods);
691 cmp2 = CONST0_RTX (op1_mode);
692 cmp_code = EQ;
693 superword_op1 = op1;
694 }
695 else
696 {
697 /* Set CMP1 to OP1 - BITS_PER_WORD. */
698 cmp1 = simplify_expand_binop (op1_mode, sub_optab, op1, tmp,
699 0, true, methods);
700 cmp2 = CONST0_RTX (op1_mode);
701 cmp_code = LT;
702 superword_op1 = cmp1;
703 }
704 if (cmp1 == 0)
705 return false;
706
707 /* If we can compute the condition at compile time, pick the
708 appropriate subroutine. */
709 tmp = simplify_relational_operation (cmp_code, SImode, op1_mode, cmp1, cmp2);
710 if (tmp != 0 && CONST_INT_P (tmp))
711 {
712 if (tmp == const0_rtx)
713 return expand_superword_shift (binoptab, outof_input, superword_op1,
714 outof_target, into_target,
715 unsignedp, methods);
716 else
717 return expand_subword_shift (op1_mode, binoptab,
718 outof_input, into_input, op1,
719 outof_target, into_target,
720 unsignedp, methods, shift_mask);
721 }
722
723 /* Try using conditional moves to generate straight-line code. */
724 if (HAVE_conditional_move)
725 {
726 rtx_insn *start = get_last_insn ();
727 if (expand_doubleword_shift_condmove (op1_mode, binoptab,
728 cmp_code, cmp1, cmp2,
729 outof_input, into_input,
730 op1, superword_op1,
731 outof_target, into_target,
732 unsignedp, methods, shift_mask))
733 return true;
734 delete_insns_since (start);
735 }
736
737 /* As a last resort, use branches to select the correct alternative. */
738 rtx_code_label *subword_label = gen_label_rtx ();
739 rtx_code_label *done_label = gen_label_rtx ();
740
741 NO_DEFER_POP;
742 do_compare_rtx_and_jump (cmp1, cmp2, cmp_code, false, op1_mode,
743 0, 0, subword_label,
744 profile_probability::uninitialized ());
745 OK_DEFER_POP;
746
747 if (!expand_superword_shift (binoptab, outof_input, superword_op1,
748 outof_target, into_target,
749 unsignedp, methods))
750 return false;
751
752 emit_jump_insn (targetm.gen_jump (done_label));
753 emit_barrier ();
754 emit_label (subword_label);
755
756 if (!expand_subword_shift (op1_mode, binoptab,
757 outof_input, into_input, op1,
758 outof_target, into_target,
759 unsignedp, methods, shift_mask))
760 return false;
761
762 emit_label (done_label);
763 return true;
764 }
765 \f
766 /* Subroutine of expand_binop. Perform a double word multiplication of
767 operands OP0 and OP1 both of mode MODE, which is exactly twice as wide
768 as the target's word_mode. This function return NULL_RTX if anything
769 goes wrong, in which case it may have already emitted instructions
770 which need to be deleted.
771
772 If we want to multiply two two-word values and have normal and widening
773 multiplies of single-word values, we can do this with three smaller
774 multiplications.
775
776 The multiplication proceeds as follows:
777 _______________________
778 [__op0_high_|__op0_low__]
779 _______________________
780 * [__op1_high_|__op1_low__]
781 _______________________________________________
782 _______________________
783 (1) [__op0_low__*__op1_low__]
784 _______________________
785 (2a) [__op0_low__*__op1_high_]
786 _______________________
787 (2b) [__op0_high_*__op1_low__]
788 _______________________
789 (3) [__op0_high_*__op1_high_]
790
791
792 This gives a 4-word result. Since we are only interested in the
793 lower 2 words, partial result (3) and the upper words of (2a) and
794 (2b) don't need to be calculated. Hence (2a) and (2b) can be
795 calculated using non-widening multiplication.
796
797 (1), however, needs to be calculated with an unsigned widening
798 multiplication. If this operation is not directly supported we
799 try using a signed widening multiplication and adjust the result.
800 This adjustment works as follows:
801
802 If both operands are positive then no adjustment is needed.
803
804 If the operands have different signs, for example op0_low < 0 and
805 op1_low >= 0, the instruction treats the most significant bit of
806 op0_low as a sign bit instead of a bit with significance
807 2**(BITS_PER_WORD-1), i.e. the instruction multiplies op1_low
808 with 2**BITS_PER_WORD - op0_low, and two's complements the
809 result. Conclusion: We need to add op1_low * 2**BITS_PER_WORD to
810 the result.
811
812 Similarly, if both operands are negative, we need to add
813 (op0_low + op1_low) * 2**BITS_PER_WORD.
814
815 We use a trick to adjust quickly. We logically shift op0_low right
816 (op1_low) BITS_PER_WORD-1 steps to get 0 or 1, and add this to
817 op0_high (op1_high) before it is used to calculate 2b (2a). If no
818 logical shift exists, we do an arithmetic right shift and subtract
819 the 0 or -1. */
820
821 static rtx
822 expand_doubleword_mult (machine_mode mode, rtx op0, rtx op1, rtx target,
823 bool umulp, enum optab_methods methods)
824 {
825 int low = (WORDS_BIG_ENDIAN ? 1 : 0);
826 int high = (WORDS_BIG_ENDIAN ? 0 : 1);
827 rtx wordm1 = (umulp ? NULL_RTX
828 : gen_int_shift_amount (word_mode, BITS_PER_WORD - 1));
829 rtx product, adjust, product_high, temp;
830
831 rtx op0_high = operand_subword_force (op0, high, mode);
832 rtx op0_low = operand_subword_force (op0, low, mode);
833 rtx op1_high = operand_subword_force (op1, high, mode);
834 rtx op1_low = operand_subword_force (op1, low, mode);
835
836 /* If we're using an unsigned multiply to directly compute the product
837 of the low-order words of the operands and perform any required
838 adjustments of the operands, we begin by trying two more multiplications
839 and then computing the appropriate sum.
840
841 We have checked above that the required addition is provided.
842 Full-word addition will normally always succeed, especially if
843 it is provided at all, so we don't worry about its failure. The
844 multiplication may well fail, however, so we do handle that. */
845
846 if (!umulp)
847 {
848 /* ??? This could be done with emit_store_flag where available. */
849 temp = expand_binop (word_mode, lshr_optab, op0_low, wordm1,
850 NULL_RTX, 1, methods);
851 if (temp)
852 op0_high = expand_binop (word_mode, add_optab, op0_high, temp,
853 NULL_RTX, 0, OPTAB_DIRECT);
854 else
855 {
856 temp = expand_binop (word_mode, ashr_optab, op0_low, wordm1,
857 NULL_RTX, 0, methods);
858 if (!temp)
859 return NULL_RTX;
860 op0_high = expand_binop (word_mode, sub_optab, op0_high, temp,
861 NULL_RTX, 0, OPTAB_DIRECT);
862 }
863
864 if (!op0_high)
865 return NULL_RTX;
866 }
867
868 adjust = expand_binop (word_mode, smul_optab, op0_high, op1_low,
869 NULL_RTX, 0, OPTAB_DIRECT);
870 if (!adjust)
871 return NULL_RTX;
872
873 /* OP0_HIGH should now be dead. */
874
875 if (!umulp)
876 {
877 /* ??? This could be done with emit_store_flag where available. */
878 temp = expand_binop (word_mode, lshr_optab, op1_low, wordm1,
879 NULL_RTX, 1, methods);
880 if (temp)
881 op1_high = expand_binop (word_mode, add_optab, op1_high, temp,
882 NULL_RTX, 0, OPTAB_DIRECT);
883 else
884 {
885 temp = expand_binop (word_mode, ashr_optab, op1_low, wordm1,
886 NULL_RTX, 0, methods);
887 if (!temp)
888 return NULL_RTX;
889 op1_high = expand_binop (word_mode, sub_optab, op1_high, temp,
890 NULL_RTX, 0, OPTAB_DIRECT);
891 }
892
893 if (!op1_high)
894 return NULL_RTX;
895 }
896
897 temp = expand_binop (word_mode, smul_optab, op1_high, op0_low,
898 NULL_RTX, 0, OPTAB_DIRECT);
899 if (!temp)
900 return NULL_RTX;
901
902 /* OP1_HIGH should now be dead. */
903
904 adjust = expand_binop (word_mode, add_optab, adjust, temp,
905 NULL_RTX, 0, OPTAB_DIRECT);
906
907 if (target && !REG_P (target))
908 target = NULL_RTX;
909
910 /* *_widen_optab needs to determine operand mode, make sure at least
911 one operand has non-VOID mode. */
912 if (GET_MODE (op0_low) == VOIDmode && GET_MODE (op1_low) == VOIDmode)
913 op0_low = force_reg (word_mode, op0_low);
914
915 if (umulp)
916 product = expand_binop (mode, umul_widen_optab, op0_low, op1_low,
917 target, 1, OPTAB_DIRECT);
918 else
919 product = expand_binop (mode, smul_widen_optab, op0_low, op1_low,
920 target, 1, OPTAB_DIRECT);
921
922 if (!product)
923 return NULL_RTX;
924
925 product_high = operand_subword (product, high, 1, mode);
926 adjust = expand_binop (word_mode, add_optab, product_high, adjust,
927 NULL_RTX, 0, OPTAB_DIRECT);
928 emit_move_insn (product_high, adjust);
929 return product;
930 }
931
932 /* Subroutine of expand_binop. Optimize unsigned double-word OP0 % OP1 for
933 constant OP1. If for some bit in [BITS_PER_WORD / 2, BITS_PER_WORD] range
934 (prefer higher bits) ((1w << bit) % OP1) == 1, then the modulo can be
935 computed in word-mode as ((OP0 & (bit - 1)) + ((OP0 >> bit) & (bit - 1))
936 + (OP0 >> (2 * bit))) % OP1. Whether we need to sum 2, 3 or 4 values
937 depends on the bit value, if 2, then carry from the addition needs to be
938 added too, i.e. like:
939 sum += __builtin_add_overflow (low, high, &sum)
940
941 Optimize signed double-word OP0 % OP1 similarly, just apply some correction
942 factor to the sum before doing unsigned remainder, in the form of
943 sum += (((signed) OP0 >> (2 * BITS_PER_WORD - 1)) & const);
944 then perform unsigned
945 remainder = sum % OP1;
946 and finally
947 remainder += ((signed) OP0 >> (2 * BITS_PER_WORD - 1)) & (1 - OP1); */
948
949 static rtx
950 expand_doubleword_mod (machine_mode mode, rtx op0, rtx op1, bool unsignedp)
951 {
952 if (INTVAL (op1) <= 1 || (INTVAL (op1) & 1) == 0)
953 return NULL_RTX;
954
955 rtx_insn *last = get_last_insn ();
956 for (int bit = BITS_PER_WORD; bit >= BITS_PER_WORD / 2; bit--)
957 {
958 wide_int w = wi::shifted_mask (bit, 1, false, 2 * BITS_PER_WORD);
959 if (wi::ne_p (wi::umod_trunc (w, INTVAL (op1)), 1))
960 continue;
961 rtx sum = NULL_RTX, mask = NULL_RTX;
962 if (bit == BITS_PER_WORD)
963 {
964 /* For signed modulo we need to add correction to the sum
965 and that might again overflow. */
966 if (!unsignedp)
967 continue;
968 if (optab_handler (uaddv4_optab, word_mode) == CODE_FOR_nothing)
969 continue;
970 tree wtype = lang_hooks.types.type_for_mode (word_mode, 1);
971 if (wtype == NULL_TREE)
972 continue;
973 tree ctype = build_complex_type (wtype);
974 if (TYPE_MODE (ctype) != GET_MODE_COMPLEX_MODE (word_mode))
975 continue;
976 machine_mode cmode = TYPE_MODE (ctype);
977 rtx op00 = operand_subword_force (op0, 0, mode);
978 rtx op01 = operand_subword_force (op0, 1, mode);
979 rtx cres = gen_rtx_CONCAT (cmode, gen_reg_rtx (word_mode),
980 gen_reg_rtx (word_mode));
981 tree lhs = make_tree (ctype, cres);
982 tree arg0 = make_tree (wtype, op00);
983 tree arg1 = make_tree (wtype, op01);
984 expand_addsub_overflow (UNKNOWN_LOCATION, PLUS_EXPR, lhs, arg0,
985 arg1, true, true, true, false, NULL);
986 sum = expand_simple_binop (word_mode, PLUS, XEXP (cres, 0),
987 XEXP (cres, 1), NULL_RTX, 1,
988 OPTAB_DIRECT);
989 if (sum == NULL_RTX)
990 return NULL_RTX;
991 }
992 else
993 {
994 /* Code below uses GEN_INT, so we need the masks to be representable
995 in HOST_WIDE_INTs. */
996 if (bit >= HOST_BITS_PER_WIDE_INT)
997 continue;
998 /* If op0 is e.g. -1 or -2 unsigned, then the 2 additions might
999 overflow. Consider 64-bit -1ULL for word size 32, if we add
1000 0x7fffffffU + 0x7fffffffU + 3U, it wraps around to 1. */
1001 if (bit == BITS_PER_WORD - 1)
1002 continue;
1003
1004 int count = (2 * BITS_PER_WORD + bit - 1) / bit;
1005 rtx sum_corr = NULL_RTX;
1006
1007 if (!unsignedp)
1008 {
1009 /* For signed modulo, compute it as unsigned modulo of
1010 sum with a correction added to it if OP0 is negative,
1011 such that the result can be computed as unsigned
1012 remainder + ((OP1 >> (2 * BITS_PER_WORD - 1)) & (1 - OP1). */
1013 w = wi::min_value (2 * BITS_PER_WORD, SIGNED);
1014 wide_int wmod1 = wi::umod_trunc (w, INTVAL (op1));
1015 wide_int wmod2 = wi::smod_trunc (w, INTVAL (op1));
1016 /* wmod2 == -wmod1. */
1017 wmod2 = wmod2 + (INTVAL (op1) - 1);
1018 if (wi::ne_p (wmod1, wmod2))
1019 {
1020 wide_int wcorr = wmod2 - wmod1;
1021 if (wi::neg_p (w))
1022 wcorr = wcorr + INTVAL (op1);
1023 /* Now verify if the count sums can't overflow, and punt
1024 if they could. */
1025 w = wi::mask (bit, false, 2 * BITS_PER_WORD);
1026 w = w * (count - 1);
1027 w = w + wi::mask (2 * BITS_PER_WORD - (count - 1) * bit,
1028 false, 2 * BITS_PER_WORD);
1029 w = w + wcorr;
1030 w = wi::lrshift (w, BITS_PER_WORD);
1031 if (wi::ne_p (w, 0))
1032 continue;
1033
1034 mask = operand_subword_force (op0, WORDS_BIG_ENDIAN ? 0 : 1,
1035 mode);
1036 mask = expand_simple_binop (word_mode, ASHIFTRT, mask,
1037 GEN_INT (BITS_PER_WORD - 1),
1038 NULL_RTX, 0, OPTAB_DIRECT);
1039 if (mask == NULL_RTX)
1040 return NULL_RTX;
1041 sum_corr = immed_wide_int_const (wcorr, word_mode);
1042 sum_corr = expand_simple_binop (word_mode, AND, mask,
1043 sum_corr, NULL_RTX, 1,
1044 OPTAB_DIRECT);
1045 if (sum_corr == NULL_RTX)
1046 return NULL_RTX;
1047 }
1048 }
1049
1050 for (int i = 0; i < count; i++)
1051 {
1052 rtx v = op0;
1053 if (i)
1054 v = expand_simple_binop (mode, LSHIFTRT, v, GEN_INT (i * bit),
1055 NULL_RTX, 1, OPTAB_DIRECT);
1056 if (v == NULL_RTX)
1057 return NULL_RTX;
1058 v = lowpart_subreg (word_mode, v, mode);
1059 if (v == NULL_RTX)
1060 return NULL_RTX;
1061 if (i != count - 1)
1062 v = expand_simple_binop (word_mode, AND, v,
1063 GEN_INT ((HOST_WIDE_INT_1U << bit)
1064 - 1), NULL_RTX, 1,
1065 OPTAB_DIRECT);
1066 if (v == NULL_RTX)
1067 return NULL_RTX;
1068 if (sum == NULL_RTX)
1069 sum = v;
1070 else
1071 sum = expand_simple_binop (word_mode, PLUS, sum, v, NULL_RTX,
1072 1, OPTAB_DIRECT);
1073 if (sum == NULL_RTX)
1074 return NULL_RTX;
1075 }
1076 if (sum_corr)
1077 {
1078 sum = expand_simple_binop (word_mode, PLUS, sum, sum_corr,
1079 NULL_RTX, 1, OPTAB_DIRECT);
1080 if (sum == NULL_RTX)
1081 return NULL_RTX;
1082 }
1083 }
1084 rtx remainder = expand_divmod (1, TRUNC_MOD_EXPR, word_mode, sum,
1085 gen_int_mode (INTVAL (op1), word_mode),
1086 NULL_RTX, 1, OPTAB_DIRECT);
1087 if (remainder == NULL_RTX)
1088 return NULL_RTX;
1089
1090 if (!unsignedp)
1091 {
1092 if (mask == NULL_RTX)
1093 {
1094 mask = operand_subword_force (op0, WORDS_BIG_ENDIAN ? 0 : 1,
1095 mode);
1096 mask = expand_simple_binop (word_mode, ASHIFTRT, mask,
1097 GEN_INT (BITS_PER_WORD - 1),
1098 NULL_RTX, 0, OPTAB_DIRECT);
1099 if (mask == NULL_RTX)
1100 return NULL_RTX;
1101 }
1102 mask = expand_simple_binop (word_mode, AND, mask,
1103 gen_int_mode (1 - INTVAL (op1),
1104 word_mode),
1105 NULL_RTX, 1, OPTAB_DIRECT);
1106 if (mask == NULL_RTX)
1107 return NULL_RTX;
1108 remainder = expand_simple_binop (word_mode, PLUS, remainder,
1109 mask, NULL_RTX, 1, OPTAB_DIRECT);
1110 if (remainder == NULL_RTX)
1111 return NULL_RTX;
1112 }
1113
1114 remainder = convert_modes (mode, word_mode, remainder, unsignedp);
1115 /* Punt if we need any library calls. */
1116 for (; last; last = NEXT_INSN (last))
1117 if (CALL_P (last))
1118 return NULL_RTX;
1119 return remainder;
1120 }
1121 return NULL_RTX;
1122 }
1123
1124 /* Similarly to the above function, but compute both quotient and remainder.
1125 Quotient can be computed from the remainder as:
1126 rem = op0 % op1; // Handled using expand_doubleword_mod
1127 quot = (op0 - rem) * inv; // inv is multiplicative inverse of op1 modulo
1128 // 2 * BITS_PER_WORD
1129
1130 We can also handle cases where op1 is a multiple of power of two constant
1131 and constant handled by expand_doubleword_mod.
1132 op11 = 1 << __builtin_ctz (op1);
1133 op12 = op1 / op11;
1134 rem1 = op0 % op12; // Handled using expand_doubleword_mod
1135 quot1 = (op0 - rem1) * inv; // inv is multiplicative inverse of op12 modulo
1136 // 2 * BITS_PER_WORD
1137 rem = (quot1 % op11) * op12 + rem1;
1138 quot = quot1 / op11; */
1139
1140 rtx
1141 expand_doubleword_divmod (machine_mode mode, rtx op0, rtx op1, rtx *rem,
1142 bool unsignedp)
1143 {
1144 *rem = NULL_RTX;
1145
1146 /* Negative dividend should have been optimized into positive,
1147 similarly modulo by 1 and modulo by power of two is optimized
1148 differently too. */
1149 if (INTVAL (op1) <= 1 || pow2p_hwi (INTVAL (op1)))
1150 return NULL_RTX;
1151
1152 rtx op11 = const1_rtx;
1153 rtx op12 = op1;
1154 if ((INTVAL (op1) & 1) == 0)
1155 {
1156 int bit = ctz_hwi (INTVAL (op1));
1157 op11 = GEN_INT (HOST_WIDE_INT_1 << bit);
1158 op12 = GEN_INT (INTVAL (op1) >> bit);
1159 }
1160
1161 rtx rem1 = expand_doubleword_mod (mode, op0, op12, unsignedp);
1162 if (rem1 == NULL_RTX)
1163 return NULL_RTX;
1164
1165 int prec = 2 * BITS_PER_WORD;
1166 wide_int a = wide_int::from (INTVAL (op12), prec + 1, UNSIGNED);
1167 wide_int b = wi::shifted_mask (prec, 1, false, prec + 1);
1168 wide_int m = wide_int::from (wi::mod_inv (a, b), prec, UNSIGNED);
1169 rtx inv = immed_wide_int_const (m, mode);
1170
1171 rtx_insn *last = get_last_insn ();
1172 rtx quot1 = expand_simple_binop (mode, MINUS, op0, rem1,
1173 NULL_RTX, unsignedp, OPTAB_DIRECT);
1174 if (quot1 == NULL_RTX)
1175 return NULL_RTX;
1176
1177 quot1 = expand_simple_binop (mode, MULT, quot1, inv,
1178 NULL_RTX, unsignedp, OPTAB_DIRECT);
1179 if (quot1 == NULL_RTX)
1180 return NULL_RTX;
1181
1182 if (op11 != const1_rtx)
1183 {
1184 rtx rem2 = expand_divmod (1, TRUNC_MOD_EXPR, mode, quot1, op11,
1185 NULL_RTX, unsignedp, OPTAB_DIRECT);
1186 if (rem2 == NULL_RTX)
1187 return NULL_RTX;
1188
1189 rem2 = expand_simple_binop (mode, MULT, rem2, op12, NULL_RTX,
1190 unsignedp, OPTAB_DIRECT);
1191 if (rem2 == NULL_RTX)
1192 return NULL_RTX;
1193
1194 rem2 = expand_simple_binop (mode, PLUS, rem2, rem1, NULL_RTX,
1195 unsignedp, OPTAB_DIRECT);
1196 if (rem2 == NULL_RTX)
1197 return NULL_RTX;
1198
1199 rtx quot2 = expand_divmod (0, TRUNC_DIV_EXPR, mode, quot1, op11,
1200 NULL_RTX, unsignedp, OPTAB_DIRECT);
1201 if (quot2 == NULL_RTX)
1202 return NULL_RTX;
1203
1204 rem1 = rem2;
1205 quot1 = quot2;
1206 }
1207
1208 /* Punt if we need any library calls. */
1209 for (; last; last = NEXT_INSN (last))
1210 if (CALL_P (last))
1211 return NULL_RTX;
1212
1213 *rem = rem1;
1214 return quot1;
1215 }
1216 \f
1217 /* Wrapper around expand_binop which takes an rtx code to specify
1218 the operation to perform, not an optab pointer. All other
1219 arguments are the same. */
1220 rtx
1221 expand_simple_binop (machine_mode mode, enum rtx_code code, rtx op0,
1222 rtx op1, rtx target, int unsignedp,
1223 enum optab_methods methods)
1224 {
1225 optab binop = code_to_optab (code);
1226 gcc_assert (binop);
1227
1228 return expand_binop (mode, binop, op0, op1, target, unsignedp, methods);
1229 }
1230
1231 /* Return whether OP0 and OP1 should be swapped when expanding a commutative
1232 binop. Order them according to commutative_operand_precedence and, if
1233 possible, try to put TARGET or a pseudo first. */
1234 static bool
1235 swap_commutative_operands_with_target (rtx target, rtx op0, rtx op1)
1236 {
1237 int op0_prec = commutative_operand_precedence (op0);
1238 int op1_prec = commutative_operand_precedence (op1);
1239
1240 if (op0_prec < op1_prec)
1241 return true;
1242
1243 if (op0_prec > op1_prec)
1244 return false;
1245
1246 /* With equal precedence, both orders are ok, but it is better if the
1247 first operand is TARGET, or if both TARGET and OP0 are pseudos. */
1248 if (target == 0 || REG_P (target))
1249 return (REG_P (op1) && !REG_P (op0)) || target == op1;
1250 else
1251 return rtx_equal_p (op1, target);
1252 }
1253
1254 /* Return true if BINOPTAB implements a shift operation. */
1255
1256 static bool
1257 shift_optab_p (optab binoptab)
1258 {
1259 switch (optab_to_code (binoptab))
1260 {
1261 case ASHIFT:
1262 case SS_ASHIFT:
1263 case US_ASHIFT:
1264 case ASHIFTRT:
1265 case LSHIFTRT:
1266 case ROTATE:
1267 case ROTATERT:
1268 return true;
1269
1270 default:
1271 return false;
1272 }
1273 }
1274
1275 /* Return true if BINOPTAB implements a commutative binary operation. */
1276
1277 static bool
1278 commutative_optab_p (optab binoptab)
1279 {
1280 return (GET_RTX_CLASS (optab_to_code (binoptab)) == RTX_COMM_ARITH
1281 || binoptab == smul_widen_optab
1282 || binoptab == umul_widen_optab
1283 || binoptab == smul_highpart_optab
1284 || binoptab == umul_highpart_optab);
1285 }
1286
1287 /* X is to be used in mode MODE as operand OPN to BINOPTAB. If we're
1288 optimizing, and if the operand is a constant that costs more than
1289 1 instruction, force the constant into a register and return that
1290 register. Return X otherwise. UNSIGNEDP says whether X is unsigned. */
1291
1292 static rtx
1293 avoid_expensive_constant (machine_mode mode, optab binoptab,
1294 int opn, rtx x, bool unsignedp)
1295 {
1296 bool speed = optimize_insn_for_speed_p ();
1297
1298 if (mode != VOIDmode
1299 && optimize
1300 && CONSTANT_P (x)
1301 && (rtx_cost (x, mode, optab_to_code (binoptab), opn, speed)
1302 > set_src_cost (x, mode, speed)))
1303 {
1304 if (CONST_INT_P (x))
1305 {
1306 HOST_WIDE_INT intval = trunc_int_for_mode (INTVAL (x), mode);
1307 if (intval != INTVAL (x))
1308 x = GEN_INT (intval);
1309 }
1310 else
1311 x = convert_modes (mode, VOIDmode, x, unsignedp);
1312 x = force_reg (mode, x);
1313 }
1314 return x;
1315 }
1316
1317 /* Helper function for expand_binop: handle the case where there
1318 is an insn ICODE that directly implements the indicated operation.
1319 Returns null if this is not possible. */
1320 static rtx
1321 expand_binop_directly (enum insn_code icode, machine_mode mode, optab binoptab,
1322 rtx op0, rtx op1,
1323 rtx target, int unsignedp, enum optab_methods methods,
1324 rtx_insn *last)
1325 {
1326 machine_mode xmode0 = insn_data[(int) icode].operand[1].mode;
1327 machine_mode xmode1 = insn_data[(int) icode].operand[2].mode;
1328 machine_mode mode0, mode1, tmp_mode;
1329 class expand_operand ops[3];
1330 bool commutative_p;
1331 rtx_insn *pat;
1332 rtx xop0 = op0, xop1 = op1;
1333 bool canonicalize_op1 = false;
1334
1335 /* If it is a commutative operator and the modes would match
1336 if we would swap the operands, we can save the conversions. */
1337 commutative_p = commutative_optab_p (binoptab);
1338 if (commutative_p
1339 && GET_MODE (xop0) != xmode0 && GET_MODE (xop1) != xmode1
1340 && GET_MODE (xop0) == xmode1 && GET_MODE (xop1) == xmode0)
1341 std::swap (xop0, xop1);
1342
1343 /* If we are optimizing, force expensive constants into a register. */
1344 xop0 = avoid_expensive_constant (xmode0, binoptab, 0, xop0, unsignedp);
1345 if (!shift_optab_p (binoptab))
1346 xop1 = avoid_expensive_constant (xmode1, binoptab, 1, xop1, unsignedp);
1347 else
1348 /* Shifts and rotates often use a different mode for op1 from op0;
1349 for VOIDmode constants we don't know the mode, so force it
1350 to be canonicalized using convert_modes. */
1351 canonicalize_op1 = true;
1352
1353 /* In case the insn wants input operands in modes different from
1354 those of the actual operands, convert the operands. It would
1355 seem that we don't need to convert CONST_INTs, but we do, so
1356 that they're properly zero-extended, sign-extended or truncated
1357 for their mode. */
1358
1359 mode0 = GET_MODE (xop0) != VOIDmode ? GET_MODE (xop0) : mode;
1360 if (xmode0 != VOIDmode && xmode0 != mode0)
1361 {
1362 xop0 = convert_modes (xmode0, mode0, xop0, unsignedp);
1363 mode0 = xmode0;
1364 }
1365
1366 mode1 = ((GET_MODE (xop1) != VOIDmode || canonicalize_op1)
1367 ? GET_MODE (xop1) : mode);
1368 if (xmode1 != VOIDmode && xmode1 != mode1)
1369 {
1370 xop1 = convert_modes (xmode1, mode1, xop1, unsignedp);
1371 mode1 = xmode1;
1372 }
1373
1374 /* If operation is commutative,
1375 try to make the first operand a register.
1376 Even better, try to make it the same as the target.
1377 Also try to make the last operand a constant. */
1378 if (commutative_p
1379 && swap_commutative_operands_with_target (target, xop0, xop1))
1380 std::swap (xop0, xop1);
1381
1382 /* Now, if insn's predicates don't allow our operands, put them into
1383 pseudo regs. */
1384
1385 if (binoptab == vec_pack_trunc_optab
1386 || binoptab == vec_pack_usat_optab
1387 || binoptab == vec_pack_ssat_optab
1388 || binoptab == vec_pack_ufix_trunc_optab
1389 || binoptab == vec_pack_sfix_trunc_optab
1390 || binoptab == vec_packu_float_optab
1391 || binoptab == vec_packs_float_optab)
1392 {
1393 /* The mode of the result is different then the mode of the
1394 arguments. */
1395 tmp_mode = insn_data[(int) icode].operand[0].mode;
1396 if (VECTOR_MODE_P (mode)
1397 && maybe_ne (GET_MODE_NUNITS (tmp_mode), 2 * GET_MODE_NUNITS (mode)))
1398 {
1399 delete_insns_since (last);
1400 return NULL_RTX;
1401 }
1402 }
1403 else
1404 tmp_mode = mode;
1405
1406 create_output_operand (&ops[0], target, tmp_mode);
1407 create_input_operand (&ops[1], xop0, mode0);
1408 create_input_operand (&ops[2], xop1, mode1);
1409 pat = maybe_gen_insn (icode, 3, ops);
1410 if (pat)
1411 {
1412 /* If PAT is composed of more than one insn, try to add an appropriate
1413 REG_EQUAL note to it. If we can't because TEMP conflicts with an
1414 operand, call expand_binop again, this time without a target. */
1415 if (INSN_P (pat) && NEXT_INSN (pat) != NULL_RTX
1416 && ! add_equal_note (pat, ops[0].value,
1417 optab_to_code (binoptab),
1418 ops[1].value, ops[2].value, mode0))
1419 {
1420 delete_insns_since (last);
1421 return expand_binop (mode, binoptab, op0, op1, NULL_RTX,
1422 unsignedp, methods);
1423 }
1424
1425 emit_insn (pat);
1426 return ops[0].value;
1427 }
1428 delete_insns_since (last);
1429 return NULL_RTX;
1430 }
1431
1432 /* Generate code to perform an operation specified by BINOPTAB
1433 on operands OP0 and OP1, with result having machine-mode MODE.
1434
1435 UNSIGNEDP is for the case where we have to widen the operands
1436 to perform the operation. It says to use zero-extension.
1437
1438 If TARGET is nonzero, the value
1439 is generated there, if it is convenient to do so.
1440 In all cases an rtx is returned for the locus of the value;
1441 this may or may not be TARGET. */
1442
1443 rtx
1444 expand_binop (machine_mode mode, optab binoptab, rtx op0, rtx op1,
1445 rtx target, int unsignedp, enum optab_methods methods)
1446 {
1447 enum optab_methods next_methods
1448 = (methods == OPTAB_LIB || methods == OPTAB_LIB_WIDEN
1449 ? OPTAB_WIDEN : methods);
1450 enum mode_class mclass;
1451 enum insn_code icode;
1452 machine_mode wider_mode;
1453 scalar_int_mode int_mode;
1454 rtx libfunc;
1455 rtx temp;
1456 rtx_insn *entry_last = get_last_insn ();
1457 rtx_insn *last;
1458
1459 mclass = GET_MODE_CLASS (mode);
1460
1461 /* If subtracting an integer constant, convert this into an addition of
1462 the negated constant. */
1463
1464 if (binoptab == sub_optab && CONST_INT_P (op1))
1465 {
1466 op1 = negate_rtx (mode, op1);
1467 binoptab = add_optab;
1468 }
1469 /* For shifts, constant invalid op1 might be expanded from different
1470 mode than MODE. As those are invalid, force them to a register
1471 to avoid further problems during expansion. */
1472 else if (CONST_INT_P (op1)
1473 && shift_optab_p (binoptab)
1474 && UINTVAL (op1) >= GET_MODE_BITSIZE (GET_MODE_INNER (mode)))
1475 {
1476 op1 = gen_int_mode (INTVAL (op1), GET_MODE_INNER (mode));
1477 op1 = force_reg (GET_MODE_INNER (mode), op1);
1478 }
1479
1480 /* Record where to delete back to if we backtrack. */
1481 last = get_last_insn ();
1482
1483 /* If we can do it with a three-operand insn, do so. */
1484
1485 if (methods != OPTAB_MUST_WIDEN)
1486 {
1487 if (convert_optab_p (binoptab))
1488 {
1489 machine_mode from_mode = widened_mode (mode, op0, op1);
1490 icode = find_widening_optab_handler (binoptab, mode, from_mode);
1491 }
1492 else
1493 icode = optab_handler (binoptab, mode);
1494 if (icode != CODE_FOR_nothing)
1495 {
1496 temp = expand_binop_directly (icode, mode, binoptab, op0, op1,
1497 target, unsignedp, methods, last);
1498 if (temp)
1499 return temp;
1500 }
1501 }
1502
1503 /* If we were trying to rotate, and that didn't work, try rotating
1504 the other direction before falling back to shifts and bitwise-or. */
1505 if (((binoptab == rotl_optab
1506 && (icode = optab_handler (rotr_optab, mode)) != CODE_FOR_nothing)
1507 || (binoptab == rotr_optab
1508 && (icode = optab_handler (rotl_optab, mode)) != CODE_FOR_nothing))
1509 && is_int_mode (mode, &int_mode))
1510 {
1511 optab otheroptab = (binoptab == rotl_optab ? rotr_optab : rotl_optab);
1512 rtx newop1;
1513 unsigned int bits = GET_MODE_PRECISION (int_mode);
1514
1515 if (CONST_INT_P (op1))
1516 newop1 = gen_int_shift_amount (int_mode, bits - INTVAL (op1));
1517 else if (targetm.shift_truncation_mask (int_mode) == bits - 1)
1518 newop1 = negate_rtx (GET_MODE (op1), op1);
1519 else
1520 newop1 = expand_binop (GET_MODE (op1), sub_optab,
1521 gen_int_mode (bits, GET_MODE (op1)), op1,
1522 NULL_RTX, unsignedp, OPTAB_DIRECT);
1523
1524 temp = expand_binop_directly (icode, int_mode, otheroptab, op0, newop1,
1525 target, unsignedp, methods, last);
1526 if (temp)
1527 return temp;
1528 }
1529
1530 /* If this is a multiply, see if we can do a widening operation that
1531 takes operands of this mode and makes a wider mode. */
1532
1533 if (binoptab == smul_optab
1534 && GET_MODE_2XWIDER_MODE (mode).exists (&wider_mode)
1535 && (convert_optab_handler ((unsignedp
1536 ? umul_widen_optab
1537 : smul_widen_optab),
1538 wider_mode, mode) != CODE_FOR_nothing))
1539 {
1540 /* *_widen_optab needs to determine operand mode, make sure at least
1541 one operand has non-VOID mode. */
1542 if (GET_MODE (op0) == VOIDmode && GET_MODE (op1) == VOIDmode)
1543 op0 = force_reg (mode, op0);
1544 temp = expand_binop (wider_mode,
1545 unsignedp ? umul_widen_optab : smul_widen_optab,
1546 op0, op1, NULL_RTX, unsignedp, OPTAB_DIRECT);
1547
1548 if (temp != 0)
1549 {
1550 if (GET_MODE_CLASS (mode) == MODE_INT
1551 && TRULY_NOOP_TRUNCATION_MODES_P (mode, GET_MODE (temp)))
1552 return gen_lowpart (mode, temp);
1553 else
1554 return convert_to_mode (mode, temp, unsignedp);
1555 }
1556 }
1557
1558 /* If this is a vector shift by a scalar, see if we can do a vector
1559 shift by a vector. If so, broadcast the scalar into a vector. */
1560 if (mclass == MODE_VECTOR_INT)
1561 {
1562 optab otheroptab = unknown_optab;
1563
1564 if (binoptab == ashl_optab)
1565 otheroptab = vashl_optab;
1566 else if (binoptab == ashr_optab)
1567 otheroptab = vashr_optab;
1568 else if (binoptab == lshr_optab)
1569 otheroptab = vlshr_optab;
1570 else if (binoptab == rotl_optab)
1571 otheroptab = vrotl_optab;
1572 else if (binoptab == rotr_optab)
1573 otheroptab = vrotr_optab;
1574
1575 if (otheroptab
1576 && (icode = optab_handler (otheroptab, mode)) != CODE_FOR_nothing)
1577 {
1578 /* The scalar may have been extended to be too wide. Truncate
1579 it back to the proper size to fit in the broadcast vector. */
1580 scalar_mode inner_mode = GET_MODE_INNER (mode);
1581 if (!CONST_INT_P (op1)
1582 && (GET_MODE_BITSIZE (as_a <scalar_int_mode> (GET_MODE (op1)))
1583 > GET_MODE_BITSIZE (inner_mode)))
1584 op1 = force_reg (inner_mode,
1585 simplify_gen_unary (TRUNCATE, inner_mode, op1,
1586 GET_MODE (op1)));
1587 rtx vop1 = expand_vector_broadcast (mode, op1);
1588 if (vop1)
1589 {
1590 temp = expand_binop_directly (icode, mode, otheroptab, op0, vop1,
1591 target, unsignedp, methods, last);
1592 if (temp)
1593 return temp;
1594 }
1595 }
1596 }
1597
1598 /* Look for a wider mode of the same class for which we think we
1599 can open-code the operation. Check for a widening multiply at the
1600 wider mode as well. */
1601
1602 if (CLASS_HAS_WIDER_MODES_P (mclass)
1603 && methods != OPTAB_DIRECT && methods != OPTAB_LIB)
1604 FOR_EACH_WIDER_MODE (wider_mode, mode)
1605 {
1606 machine_mode next_mode;
1607 if (optab_handler (binoptab, wider_mode) != CODE_FOR_nothing
1608 || (binoptab == smul_optab
1609 && GET_MODE_WIDER_MODE (wider_mode).exists (&next_mode)
1610 && (find_widening_optab_handler ((unsignedp
1611 ? umul_widen_optab
1612 : smul_widen_optab),
1613 next_mode, mode)
1614 != CODE_FOR_nothing)))
1615 {
1616 rtx xop0 = op0, xop1 = op1;
1617 int no_extend = 0;
1618
1619 /* For certain integer operations, we need not actually extend
1620 the narrow operands, as long as we will truncate
1621 the results to the same narrowness. */
1622
1623 if ((binoptab == ior_optab || binoptab == and_optab
1624 || binoptab == xor_optab
1625 || binoptab == add_optab || binoptab == sub_optab
1626 || binoptab == smul_optab || binoptab == ashl_optab)
1627 && mclass == MODE_INT)
1628 {
1629 no_extend = 1;
1630 xop0 = avoid_expensive_constant (mode, binoptab, 0,
1631 xop0, unsignedp);
1632 if (binoptab != ashl_optab)
1633 xop1 = avoid_expensive_constant (mode, binoptab, 1,
1634 xop1, unsignedp);
1635 }
1636
1637 xop0 = widen_operand (xop0, wider_mode, mode, unsignedp, no_extend);
1638
1639 /* The second operand of a shift must always be extended. */
1640 xop1 = widen_operand (xop1, wider_mode, mode, unsignedp,
1641 no_extend && binoptab != ashl_optab);
1642
1643 temp = expand_binop (wider_mode, binoptab, xop0, xop1, NULL_RTX,
1644 unsignedp, OPTAB_DIRECT);
1645 if (temp)
1646 {
1647 if (mclass != MODE_INT
1648 || !TRULY_NOOP_TRUNCATION_MODES_P (mode, wider_mode))
1649 {
1650 if (target == 0)
1651 target = gen_reg_rtx (mode);
1652 convert_move (target, temp, 0);
1653 return target;
1654 }
1655 else
1656 return gen_lowpart (mode, temp);
1657 }
1658 else
1659 delete_insns_since (last);
1660 }
1661 }
1662
1663 /* If operation is commutative,
1664 try to make the first operand a register.
1665 Even better, try to make it the same as the target.
1666 Also try to make the last operand a constant. */
1667 if (commutative_optab_p (binoptab)
1668 && swap_commutative_operands_with_target (target, op0, op1))
1669 std::swap (op0, op1);
1670
1671 /* These can be done a word at a time. */
1672 if ((binoptab == and_optab || binoptab == ior_optab || binoptab == xor_optab)
1673 && is_int_mode (mode, &int_mode)
1674 && GET_MODE_SIZE (int_mode) > UNITS_PER_WORD
1675 && optab_handler (binoptab, word_mode) != CODE_FOR_nothing)
1676 {
1677 int i;
1678 rtx_insn *insns;
1679
1680 /* If TARGET is the same as one of the operands, the REG_EQUAL note
1681 won't be accurate, so use a new target. */
1682 if (target == 0
1683 || target == op0
1684 || target == op1
1685 || reg_overlap_mentioned_p (target, op0)
1686 || reg_overlap_mentioned_p (target, op1)
1687 || !valid_multiword_target_p (target))
1688 target = gen_reg_rtx (int_mode);
1689
1690 start_sequence ();
1691
1692 /* Do the actual arithmetic. */
1693 machine_mode op0_mode = GET_MODE (op0);
1694 machine_mode op1_mode = GET_MODE (op1);
1695 if (op0_mode == VOIDmode)
1696 op0_mode = int_mode;
1697 if (op1_mode == VOIDmode)
1698 op1_mode = int_mode;
1699 for (i = 0; i < GET_MODE_BITSIZE (int_mode) / BITS_PER_WORD; i++)
1700 {
1701 rtx target_piece = operand_subword (target, i, 1, int_mode);
1702 rtx x = expand_binop (word_mode, binoptab,
1703 operand_subword_force (op0, i, op0_mode),
1704 operand_subword_force (op1, i, op1_mode),
1705 target_piece, unsignedp, next_methods);
1706
1707 if (x == 0)
1708 break;
1709
1710 if (target_piece != x)
1711 emit_move_insn (target_piece, x);
1712 }
1713
1714 insns = get_insns ();
1715 end_sequence ();
1716
1717 if (i == GET_MODE_BITSIZE (int_mode) / BITS_PER_WORD)
1718 {
1719 emit_insn (insns);
1720 return target;
1721 }
1722 }
1723
1724 /* Synthesize double word shifts from single word shifts. */
1725 if ((binoptab == lshr_optab || binoptab == ashl_optab
1726 || binoptab == ashr_optab)
1727 && is_int_mode (mode, &int_mode)
1728 && (CONST_INT_P (op1) || optimize_insn_for_speed_p ())
1729 && GET_MODE_SIZE (int_mode) == 2 * UNITS_PER_WORD
1730 && GET_MODE_PRECISION (int_mode) == GET_MODE_BITSIZE (int_mode)
1731 && optab_handler (binoptab, word_mode) != CODE_FOR_nothing
1732 && optab_handler (ashl_optab, word_mode) != CODE_FOR_nothing
1733 && optab_handler (lshr_optab, word_mode) != CODE_FOR_nothing)
1734 {
1735 unsigned HOST_WIDE_INT shift_mask, double_shift_mask;
1736 scalar_int_mode op1_mode;
1737
1738 double_shift_mask = targetm.shift_truncation_mask (int_mode);
1739 shift_mask = targetm.shift_truncation_mask (word_mode);
1740 op1_mode = (GET_MODE (op1) != VOIDmode
1741 ? as_a <scalar_int_mode> (GET_MODE (op1))
1742 : word_mode);
1743
1744 /* Apply the truncation to constant shifts. */
1745 if (double_shift_mask > 0 && CONST_INT_P (op1))
1746 op1 = gen_int_mode (INTVAL (op1) & double_shift_mask, op1_mode);
1747
1748 if (op1 == CONST0_RTX (op1_mode))
1749 return op0;
1750
1751 /* Make sure that this is a combination that expand_doubleword_shift
1752 can handle. See the comments there for details. */
1753 if (double_shift_mask == 0
1754 || (shift_mask == BITS_PER_WORD - 1
1755 && double_shift_mask == BITS_PER_WORD * 2 - 1))
1756 {
1757 rtx_insn *insns;
1758 rtx into_target, outof_target;
1759 rtx into_input, outof_input;
1760 int left_shift, outof_word;
1761
1762 /* If TARGET is the same as one of the operands, the REG_EQUAL note
1763 won't be accurate, so use a new target. */
1764 if (target == 0
1765 || target == op0
1766 || target == op1
1767 || reg_overlap_mentioned_p (target, op0)
1768 || reg_overlap_mentioned_p (target, op1)
1769 || !valid_multiword_target_p (target))
1770 target = gen_reg_rtx (int_mode);
1771
1772 start_sequence ();
1773
1774 /* OUTOF_* is the word we are shifting bits away from, and
1775 INTO_* is the word that we are shifting bits towards, thus
1776 they differ depending on the direction of the shift and
1777 WORDS_BIG_ENDIAN. */
1778
1779 left_shift = binoptab == ashl_optab;
1780 outof_word = left_shift ^ ! WORDS_BIG_ENDIAN;
1781
1782 outof_target = operand_subword (target, outof_word, 1, int_mode);
1783 into_target = operand_subword (target, 1 - outof_word, 1, int_mode);
1784
1785 outof_input = operand_subword_force (op0, outof_word, int_mode);
1786 into_input = operand_subword_force (op0, 1 - outof_word, int_mode);
1787
1788 if (expand_doubleword_shift (op1_mode, binoptab,
1789 outof_input, into_input, op1,
1790 outof_target, into_target,
1791 unsignedp, next_methods, shift_mask))
1792 {
1793 insns = get_insns ();
1794 end_sequence ();
1795
1796 emit_insn (insns);
1797 return target;
1798 }
1799 end_sequence ();
1800 }
1801 }
1802
1803 /* Synthesize double word rotates from single word shifts. */
1804 if ((binoptab == rotl_optab || binoptab == rotr_optab)
1805 && is_int_mode (mode, &int_mode)
1806 && CONST_INT_P (op1)
1807 && GET_MODE_PRECISION (int_mode) == 2 * BITS_PER_WORD
1808 && optab_handler (ashl_optab, word_mode) != CODE_FOR_nothing
1809 && optab_handler (lshr_optab, word_mode) != CODE_FOR_nothing)
1810 {
1811 rtx_insn *insns;
1812 rtx into_target, outof_target;
1813 rtx into_input, outof_input;
1814 rtx inter;
1815 int shift_count, left_shift, outof_word;
1816
1817 /* If TARGET is the same as one of the operands, the REG_EQUAL note
1818 won't be accurate, so use a new target. Do this also if target is not
1819 a REG, first because having a register instead may open optimization
1820 opportunities, and second because if target and op0 happen to be MEMs
1821 designating the same location, we would risk clobbering it too early
1822 in the code sequence we generate below. */
1823 if (target == 0
1824 || target == op0
1825 || target == op1
1826 || !REG_P (target)
1827 || reg_overlap_mentioned_p (target, op0)
1828 || reg_overlap_mentioned_p (target, op1)
1829 || !valid_multiword_target_p (target))
1830 target = gen_reg_rtx (int_mode);
1831
1832 start_sequence ();
1833
1834 shift_count = INTVAL (op1);
1835
1836 /* OUTOF_* is the word we are shifting bits away from, and
1837 INTO_* is the word that we are shifting bits towards, thus
1838 they differ depending on the direction of the shift and
1839 WORDS_BIG_ENDIAN. */
1840
1841 left_shift = (binoptab == rotl_optab);
1842 outof_word = left_shift ^ ! WORDS_BIG_ENDIAN;
1843
1844 outof_target = operand_subword (target, outof_word, 1, int_mode);
1845 into_target = operand_subword (target, 1 - outof_word, 1, int_mode);
1846
1847 outof_input = operand_subword_force (op0, outof_word, int_mode);
1848 into_input = operand_subword_force (op0, 1 - outof_word, int_mode);
1849
1850 if (shift_count == BITS_PER_WORD)
1851 {
1852 /* This is just a word swap. */
1853 emit_move_insn (outof_target, into_input);
1854 emit_move_insn (into_target, outof_input);
1855 inter = const0_rtx;
1856 }
1857 else
1858 {
1859 rtx into_temp1, into_temp2, outof_temp1, outof_temp2;
1860 HOST_WIDE_INT first_shift_count, second_shift_count;
1861 optab reverse_unsigned_shift, unsigned_shift;
1862
1863 reverse_unsigned_shift = (left_shift ^ (shift_count < BITS_PER_WORD)
1864 ? lshr_optab : ashl_optab);
1865
1866 unsigned_shift = (left_shift ^ (shift_count < BITS_PER_WORD)
1867 ? ashl_optab : lshr_optab);
1868
1869 if (shift_count > BITS_PER_WORD)
1870 {
1871 first_shift_count = shift_count - BITS_PER_WORD;
1872 second_shift_count = 2 * BITS_PER_WORD - shift_count;
1873 }
1874 else
1875 {
1876 first_shift_count = BITS_PER_WORD - shift_count;
1877 second_shift_count = shift_count;
1878 }
1879 rtx first_shift_count_rtx
1880 = gen_int_shift_amount (word_mode, first_shift_count);
1881 rtx second_shift_count_rtx
1882 = gen_int_shift_amount (word_mode, second_shift_count);
1883
1884 into_temp1 = expand_binop (word_mode, unsigned_shift,
1885 outof_input, first_shift_count_rtx,
1886 NULL_RTX, unsignedp, next_methods);
1887 into_temp2 = expand_binop (word_mode, reverse_unsigned_shift,
1888 into_input, second_shift_count_rtx,
1889 NULL_RTX, unsignedp, next_methods);
1890
1891 if (into_temp1 != 0 && into_temp2 != 0)
1892 inter = expand_binop (word_mode, ior_optab, into_temp1, into_temp2,
1893 into_target, unsignedp, next_methods);
1894 else
1895 inter = 0;
1896
1897 if (inter != 0 && inter != into_target)
1898 emit_move_insn (into_target, inter);
1899
1900 outof_temp1 = expand_binop (word_mode, unsigned_shift,
1901 into_input, first_shift_count_rtx,
1902 NULL_RTX, unsignedp, next_methods);
1903 outof_temp2 = expand_binop (word_mode, reverse_unsigned_shift,
1904 outof_input, second_shift_count_rtx,
1905 NULL_RTX, unsignedp, next_methods);
1906
1907 if (inter != 0 && outof_temp1 != 0 && outof_temp2 != 0)
1908 inter = expand_binop (word_mode, ior_optab,
1909 outof_temp1, outof_temp2,
1910 outof_target, unsignedp, next_methods);
1911
1912 if (inter != 0 && inter != outof_target)
1913 emit_move_insn (outof_target, inter);
1914 }
1915
1916 insns = get_insns ();
1917 end_sequence ();
1918
1919 if (inter != 0)
1920 {
1921 emit_insn (insns);
1922 return target;
1923 }
1924 }
1925
1926 /* These can be done a word at a time by propagating carries. */
1927 if ((binoptab == add_optab || binoptab == sub_optab)
1928 && is_int_mode (mode, &int_mode)
1929 && GET_MODE_SIZE (int_mode) >= 2 * UNITS_PER_WORD
1930 && optab_handler (binoptab, word_mode) != CODE_FOR_nothing)
1931 {
1932 unsigned int i;
1933 optab otheroptab = binoptab == add_optab ? sub_optab : add_optab;
1934 const unsigned int nwords = GET_MODE_BITSIZE (int_mode) / BITS_PER_WORD;
1935 rtx carry_in = NULL_RTX, carry_out = NULL_RTX;
1936 rtx xop0, xop1, xtarget;
1937
1938 /* We can handle either a 1 or -1 value for the carry. If STORE_FLAG
1939 value is one of those, use it. Otherwise, use 1 since it is the
1940 one easiest to get. */
1941 #if STORE_FLAG_VALUE == 1 || STORE_FLAG_VALUE == -1
1942 int normalizep = STORE_FLAG_VALUE;
1943 #else
1944 int normalizep = 1;
1945 #endif
1946
1947 /* Prepare the operands. */
1948 xop0 = force_reg (int_mode, op0);
1949 xop1 = force_reg (int_mode, op1);
1950
1951 xtarget = gen_reg_rtx (int_mode);
1952
1953 if (target == 0 || !REG_P (target) || !valid_multiword_target_p (target))
1954 target = xtarget;
1955
1956 /* Indicate for flow that the entire target reg is being set. */
1957 if (REG_P (target))
1958 emit_clobber (xtarget);
1959
1960 /* Do the actual arithmetic. */
1961 for (i = 0; i < nwords; i++)
1962 {
1963 int index = (WORDS_BIG_ENDIAN ? nwords - i - 1 : i);
1964 rtx target_piece = operand_subword (xtarget, index, 1, int_mode);
1965 rtx op0_piece = operand_subword_force (xop0, index, int_mode);
1966 rtx op1_piece = operand_subword_force (xop1, index, int_mode);
1967 rtx x;
1968
1969 /* Main add/subtract of the input operands. */
1970 x = expand_binop (word_mode, binoptab,
1971 op0_piece, op1_piece,
1972 target_piece, unsignedp, next_methods);
1973 if (x == 0)
1974 break;
1975
1976 if (i + 1 < nwords)
1977 {
1978 /* Store carry from main add/subtract. */
1979 carry_out = gen_reg_rtx (word_mode);
1980 carry_out = emit_store_flag_force (carry_out,
1981 (binoptab == add_optab
1982 ? LT : GT),
1983 x, op0_piece,
1984 word_mode, 1, normalizep);
1985 }
1986
1987 if (i > 0)
1988 {
1989 rtx newx;
1990
1991 /* Add/subtract previous carry to main result. */
1992 newx = expand_binop (word_mode,
1993 normalizep == 1 ? binoptab : otheroptab,
1994 x, carry_in,
1995 NULL_RTX, 1, next_methods);
1996
1997 if (i + 1 < nwords)
1998 {
1999 /* Get out carry from adding/subtracting carry in. */
2000 rtx carry_tmp = gen_reg_rtx (word_mode);
2001 carry_tmp = emit_store_flag_force (carry_tmp,
2002 (binoptab == add_optab
2003 ? LT : GT),
2004 newx, x,
2005 word_mode, 1, normalizep);
2006
2007 /* Logical-ior the two poss. carry together. */
2008 carry_out = expand_binop (word_mode, ior_optab,
2009 carry_out, carry_tmp,
2010 carry_out, 0, next_methods);
2011 if (carry_out == 0)
2012 break;
2013 }
2014 emit_move_insn (target_piece, newx);
2015 }
2016 else
2017 {
2018 if (x != target_piece)
2019 emit_move_insn (target_piece, x);
2020 }
2021
2022 carry_in = carry_out;
2023 }
2024
2025 if (i == GET_MODE_BITSIZE (int_mode) / (unsigned) BITS_PER_WORD)
2026 {
2027 if (optab_handler (mov_optab, int_mode) != CODE_FOR_nothing
2028 || ! rtx_equal_p (target, xtarget))
2029 {
2030 rtx_insn *temp = emit_move_insn (target, xtarget);
2031
2032 set_dst_reg_note (temp, REG_EQUAL,
2033 gen_rtx_fmt_ee (optab_to_code (binoptab),
2034 int_mode, copy_rtx (xop0),
2035 copy_rtx (xop1)),
2036 target);
2037 }
2038 else
2039 target = xtarget;
2040
2041 return target;
2042 }
2043
2044 else
2045 delete_insns_since (last);
2046 }
2047
2048 /* Attempt to synthesize double word multiplies using a sequence of word
2049 mode multiplications. We first attempt to generate a sequence using a
2050 more efficient unsigned widening multiply, and if that fails we then
2051 try using a signed widening multiply. */
2052
2053 if (binoptab == smul_optab
2054 && is_int_mode (mode, &int_mode)
2055 && GET_MODE_SIZE (int_mode) == 2 * UNITS_PER_WORD
2056 && optab_handler (smul_optab, word_mode) != CODE_FOR_nothing
2057 && optab_handler (add_optab, word_mode) != CODE_FOR_nothing)
2058 {
2059 rtx product = NULL_RTX;
2060 if (convert_optab_handler (umul_widen_optab, int_mode, word_mode)
2061 != CODE_FOR_nothing)
2062 {
2063 product = expand_doubleword_mult (int_mode, op0, op1, target,
2064 true, methods);
2065 if (!product)
2066 delete_insns_since (last);
2067 }
2068
2069 if (product == NULL_RTX
2070 && (convert_optab_handler (smul_widen_optab, int_mode, word_mode)
2071 != CODE_FOR_nothing))
2072 {
2073 product = expand_doubleword_mult (int_mode, op0, op1, target,
2074 false, methods);
2075 if (!product)
2076 delete_insns_since (last);
2077 }
2078
2079 if (product != NULL_RTX)
2080 {
2081 if (optab_handler (mov_optab, int_mode) != CODE_FOR_nothing)
2082 {
2083 rtx_insn *move = emit_move_insn (target ? target : product,
2084 product);
2085 set_dst_reg_note (move,
2086 REG_EQUAL,
2087 gen_rtx_fmt_ee (MULT, int_mode,
2088 copy_rtx (op0),
2089 copy_rtx (op1)),
2090 target ? target : product);
2091 }
2092 return product;
2093 }
2094 }
2095
2096 /* Attempt to synthetize double word modulo by constant divisor. */
2097 if ((binoptab == umod_optab
2098 || binoptab == smod_optab
2099 || binoptab == udiv_optab
2100 || binoptab == sdiv_optab)
2101 && optimize
2102 && CONST_INT_P (op1)
2103 && is_int_mode (mode, &int_mode)
2104 && GET_MODE_SIZE (int_mode) == 2 * UNITS_PER_WORD
2105 && optab_handler ((binoptab == umod_optab || binoptab == udiv_optab)
2106 ? udivmod_optab : sdivmod_optab,
2107 int_mode) == CODE_FOR_nothing
2108 && optab_handler (and_optab, word_mode) != CODE_FOR_nothing
2109 && optab_handler (add_optab, word_mode) != CODE_FOR_nothing
2110 && optimize_insn_for_speed_p ())
2111 {
2112 rtx res = NULL_RTX;
2113 if ((binoptab == umod_optab || binoptab == smod_optab)
2114 && (INTVAL (op1) & 1) == 0)
2115 res = expand_doubleword_mod (int_mode, op0, op1,
2116 binoptab == umod_optab);
2117 else
2118 {
2119 rtx quot = expand_doubleword_divmod (int_mode, op0, op1, &res,
2120 binoptab == umod_optab
2121 || binoptab == udiv_optab);
2122 if (quot == NULL_RTX)
2123 res = NULL_RTX;
2124 else if (binoptab == udiv_optab || binoptab == sdiv_optab)
2125 res = quot;
2126 }
2127 if (res != NULL_RTX)
2128 {
2129 if (optab_handler (mov_optab, int_mode) != CODE_FOR_nothing)
2130 {
2131 rtx_insn *move = emit_move_insn (target ? target : res,
2132 res);
2133 set_dst_reg_note (move, REG_EQUAL,
2134 gen_rtx_fmt_ee (optab_to_code (binoptab),
2135 int_mode, copy_rtx (op0), op1),
2136 target ? target : res);
2137 }
2138 return res;
2139 }
2140 else
2141 delete_insns_since (last);
2142 }
2143
2144 /* It can't be open-coded in this mode.
2145 Use a library call if one is available and caller says that's ok. */
2146
2147 libfunc = optab_libfunc (binoptab, mode);
2148 if (libfunc
2149 && (methods == OPTAB_LIB || methods == OPTAB_LIB_WIDEN))
2150 {
2151 rtx_insn *insns;
2152 rtx op1x = op1;
2153 machine_mode op1_mode = mode;
2154 rtx value;
2155
2156 start_sequence ();
2157
2158 if (shift_optab_p (binoptab))
2159 {
2160 op1_mode = targetm.libgcc_shift_count_mode ();
2161 /* Specify unsigned here,
2162 since negative shift counts are meaningless. */
2163 op1x = convert_to_mode (op1_mode, op1, 1);
2164 }
2165
2166 if (GET_MODE (op0) != VOIDmode
2167 && GET_MODE (op0) != mode)
2168 op0 = convert_to_mode (mode, op0, unsignedp);
2169
2170 /* Pass 1 for NO_QUEUE so we don't lose any increments
2171 if the libcall is cse'd or moved. */
2172 value = emit_library_call_value (libfunc,
2173 NULL_RTX, LCT_CONST, mode,
2174 op0, mode, op1x, op1_mode);
2175
2176 insns = get_insns ();
2177 end_sequence ();
2178
2179 bool trapv = trapv_binoptab_p (binoptab);
2180 target = gen_reg_rtx (mode);
2181 emit_libcall_block_1 (insns, target, value,
2182 trapv ? NULL_RTX
2183 : gen_rtx_fmt_ee (optab_to_code (binoptab),
2184 mode, op0, op1), trapv);
2185
2186 return target;
2187 }
2188
2189 delete_insns_since (last);
2190
2191 /* It can't be done in this mode. Can we do it in a wider mode? */
2192
2193 if (! (methods == OPTAB_WIDEN || methods == OPTAB_LIB_WIDEN
2194 || methods == OPTAB_MUST_WIDEN))
2195 {
2196 /* Caller says, don't even try. */
2197 delete_insns_since (entry_last);
2198 return 0;
2199 }
2200
2201 /* Compute the value of METHODS to pass to recursive calls.
2202 Don't allow widening to be tried recursively. */
2203
2204 methods = (methods == OPTAB_LIB_WIDEN ? OPTAB_LIB : OPTAB_DIRECT);
2205
2206 /* Look for a wider mode of the same class for which it appears we can do
2207 the operation. */
2208
2209 if (CLASS_HAS_WIDER_MODES_P (mclass))
2210 {
2211 /* This code doesn't make sense for conversion optabs, since we
2212 wouldn't then want to extend the operands to be the same size
2213 as the result. */
2214 gcc_assert (!convert_optab_p (binoptab));
2215 FOR_EACH_WIDER_MODE (wider_mode, mode)
2216 {
2217 if (optab_handler (binoptab, wider_mode)
2218 || (methods == OPTAB_LIB
2219 && optab_libfunc (binoptab, wider_mode)))
2220 {
2221 rtx xop0 = op0, xop1 = op1;
2222 int no_extend = 0;
2223
2224 /* For certain integer operations, we need not actually extend
2225 the narrow operands, as long as we will truncate
2226 the results to the same narrowness. */
2227
2228 if ((binoptab == ior_optab || binoptab == and_optab
2229 || binoptab == xor_optab
2230 || binoptab == add_optab || binoptab == sub_optab
2231 || binoptab == smul_optab || binoptab == ashl_optab)
2232 && mclass == MODE_INT)
2233 no_extend = 1;
2234
2235 xop0 = widen_operand (xop0, wider_mode, mode,
2236 unsignedp, no_extend);
2237
2238 /* The second operand of a shift must always be extended. */
2239 xop1 = widen_operand (xop1, wider_mode, mode, unsignedp,
2240 no_extend && binoptab != ashl_optab);
2241
2242 temp = expand_binop (wider_mode, binoptab, xop0, xop1, NULL_RTX,
2243 unsignedp, methods);
2244 if (temp)
2245 {
2246 if (mclass != MODE_INT
2247 || !TRULY_NOOP_TRUNCATION_MODES_P (mode, wider_mode))
2248 {
2249 if (target == 0)
2250 target = gen_reg_rtx (mode);
2251 convert_move (target, temp, 0);
2252 return target;
2253 }
2254 else
2255 return gen_lowpart (mode, temp);
2256 }
2257 else
2258 delete_insns_since (last);
2259 }
2260 }
2261 }
2262
2263 delete_insns_since (entry_last);
2264 return 0;
2265 }
2266 \f
2267 /* Expand a binary operator which has both signed and unsigned forms.
2268 UOPTAB is the optab for unsigned operations, and SOPTAB is for
2269 signed operations.
2270
2271 If we widen unsigned operands, we may use a signed wider operation instead
2272 of an unsigned wider operation, since the result would be the same. */
2273
2274 rtx
2275 sign_expand_binop (machine_mode mode, optab uoptab, optab soptab,
2276 rtx op0, rtx op1, rtx target, int unsignedp,
2277 enum optab_methods methods)
2278 {
2279 rtx temp;
2280 optab direct_optab = unsignedp ? uoptab : soptab;
2281 bool save_enable;
2282
2283 /* Do it without widening, if possible. */
2284 temp = expand_binop (mode, direct_optab, op0, op1, target,
2285 unsignedp, OPTAB_DIRECT);
2286 if (temp || methods == OPTAB_DIRECT)
2287 return temp;
2288
2289 /* Try widening to a signed int. Disable any direct use of any
2290 signed insn in the current mode. */
2291 save_enable = swap_optab_enable (soptab, mode, false);
2292
2293 temp = expand_binop (mode, soptab, op0, op1, target,
2294 unsignedp, OPTAB_WIDEN);
2295
2296 /* For unsigned operands, try widening to an unsigned int. */
2297 if (!temp && unsignedp)
2298 temp = expand_binop (mode, uoptab, op0, op1, target,
2299 unsignedp, OPTAB_WIDEN);
2300 if (temp || methods == OPTAB_WIDEN)
2301 goto egress;
2302
2303 /* Use the right width libcall if that exists. */
2304 temp = expand_binop (mode, direct_optab, op0, op1, target,
2305 unsignedp, OPTAB_LIB);
2306 if (temp || methods == OPTAB_LIB)
2307 goto egress;
2308
2309 /* Must widen and use a libcall, use either signed or unsigned. */
2310 temp = expand_binop (mode, soptab, op0, op1, target,
2311 unsignedp, methods);
2312 if (!temp && unsignedp)
2313 temp = expand_binop (mode, uoptab, op0, op1, target,
2314 unsignedp, methods);
2315
2316 egress:
2317 /* Undo the fiddling above. */
2318 if (save_enable)
2319 swap_optab_enable (soptab, mode, true);
2320 return temp;
2321 }
2322 \f
2323 /* Generate code to perform an operation specified by UNOPPTAB
2324 on operand OP0, with two results to TARG0 and TARG1.
2325 We assume that the order of the operands for the instruction
2326 is TARG0, TARG1, OP0.
2327
2328 Either TARG0 or TARG1 may be zero, but what that means is that
2329 the result is not actually wanted. We will generate it into
2330 a dummy pseudo-reg and discard it. They may not both be zero.
2331
2332 Returns 1 if this operation can be performed; 0 if not. */
2333
2334 int
2335 expand_twoval_unop (optab unoptab, rtx op0, rtx targ0, rtx targ1,
2336 int unsignedp)
2337 {
2338 machine_mode mode = GET_MODE (targ0 ? targ0 : targ1);
2339 enum mode_class mclass;
2340 machine_mode wider_mode;
2341 rtx_insn *entry_last = get_last_insn ();
2342 rtx_insn *last;
2343
2344 mclass = GET_MODE_CLASS (mode);
2345
2346 if (!targ0)
2347 targ0 = gen_reg_rtx (mode);
2348 if (!targ1)
2349 targ1 = gen_reg_rtx (mode);
2350
2351 /* Record where to go back to if we fail. */
2352 last = get_last_insn ();
2353
2354 if (optab_handler (unoptab, mode) != CODE_FOR_nothing)
2355 {
2356 class expand_operand ops[3];
2357 enum insn_code icode = optab_handler (unoptab, mode);
2358
2359 create_fixed_operand (&ops[0], targ0);
2360 create_fixed_operand (&ops[1], targ1);
2361 create_convert_operand_from (&ops[2], op0, mode, unsignedp);
2362 if (maybe_expand_insn (icode, 3, ops))
2363 return 1;
2364 }
2365
2366 /* It can't be done in this mode. Can we do it in a wider mode? */
2367
2368 if (CLASS_HAS_WIDER_MODES_P (mclass))
2369 {
2370 FOR_EACH_WIDER_MODE (wider_mode, mode)
2371 {
2372 if (optab_handler (unoptab, wider_mode) != CODE_FOR_nothing)
2373 {
2374 rtx t0 = gen_reg_rtx (wider_mode);
2375 rtx t1 = gen_reg_rtx (wider_mode);
2376 rtx cop0 = convert_modes (wider_mode, mode, op0, unsignedp);
2377
2378 if (expand_twoval_unop (unoptab, cop0, t0, t1, unsignedp))
2379 {
2380 convert_move (targ0, t0, unsignedp);
2381 convert_move (targ1, t1, unsignedp);
2382 return 1;
2383 }
2384 else
2385 delete_insns_since (last);
2386 }
2387 }
2388 }
2389
2390 delete_insns_since (entry_last);
2391 return 0;
2392 }
2393 \f
2394 /* Generate code to perform an operation specified by BINOPTAB
2395 on operands OP0 and OP1, with two results to TARG1 and TARG2.
2396 We assume that the order of the operands for the instruction
2397 is TARG0, OP0, OP1, TARG1, which would fit a pattern like
2398 [(set TARG0 (operate OP0 OP1)) (set TARG1 (operate ...))].
2399
2400 Either TARG0 or TARG1 may be zero, but what that means is that
2401 the result is not actually wanted. We will generate it into
2402 a dummy pseudo-reg and discard it. They may not both be zero.
2403
2404 Returns 1 if this operation can be performed; 0 if not. */
2405
2406 int
2407 expand_twoval_binop (optab binoptab, rtx op0, rtx op1, rtx targ0, rtx targ1,
2408 int unsignedp)
2409 {
2410 machine_mode mode = GET_MODE (targ0 ? targ0 : targ1);
2411 enum mode_class mclass;
2412 machine_mode wider_mode;
2413 rtx_insn *entry_last = get_last_insn ();
2414 rtx_insn *last;
2415
2416 mclass = GET_MODE_CLASS (mode);
2417
2418 if (!targ0)
2419 targ0 = gen_reg_rtx (mode);
2420 if (!targ1)
2421 targ1 = gen_reg_rtx (mode);
2422
2423 /* Record where to go back to if we fail. */
2424 last = get_last_insn ();
2425
2426 if (optab_handler (binoptab, mode) != CODE_FOR_nothing)
2427 {
2428 class expand_operand ops[4];
2429 enum insn_code icode = optab_handler (binoptab, mode);
2430 machine_mode mode0 = insn_data[icode].operand[1].mode;
2431 machine_mode mode1 = insn_data[icode].operand[2].mode;
2432 rtx xop0 = op0, xop1 = op1;
2433
2434 /* If we are optimizing, force expensive constants into a register. */
2435 xop0 = avoid_expensive_constant (mode0, binoptab, 0, xop0, unsignedp);
2436 xop1 = avoid_expensive_constant (mode1, binoptab, 1, xop1, unsignedp);
2437
2438 create_fixed_operand (&ops[0], targ0);
2439 create_convert_operand_from (&ops[1], xop0, mode, unsignedp);
2440 create_convert_operand_from (&ops[2], xop1, mode, unsignedp);
2441 create_fixed_operand (&ops[3], targ1);
2442 if (maybe_expand_insn (icode, 4, ops))
2443 return 1;
2444 delete_insns_since (last);
2445 }
2446
2447 /* It can't be done in this mode. Can we do it in a wider mode? */
2448
2449 if (CLASS_HAS_WIDER_MODES_P (mclass))
2450 {
2451 FOR_EACH_WIDER_MODE (wider_mode, mode)
2452 {
2453 if (optab_handler (binoptab, wider_mode) != CODE_FOR_nothing)
2454 {
2455 rtx t0 = gen_reg_rtx (wider_mode);
2456 rtx t1 = gen_reg_rtx (wider_mode);
2457 rtx cop0 = convert_modes (wider_mode, mode, op0, unsignedp);
2458 rtx cop1 = convert_modes (wider_mode, mode, op1, unsignedp);
2459
2460 if (expand_twoval_binop (binoptab, cop0, cop1,
2461 t0, t1, unsignedp))
2462 {
2463 convert_move (targ0, t0, unsignedp);
2464 convert_move (targ1, t1, unsignedp);
2465 return 1;
2466 }
2467 else
2468 delete_insns_since (last);
2469 }
2470 }
2471 }
2472
2473 delete_insns_since (entry_last);
2474 return 0;
2475 }
2476
2477 /* Expand the two-valued library call indicated by BINOPTAB, but
2478 preserve only one of the values. If TARG0 is non-NULL, the first
2479 value is placed into TARG0; otherwise the second value is placed
2480 into TARG1. Exactly one of TARG0 and TARG1 must be non-NULL. The
2481 value stored into TARG0 or TARG1 is equivalent to (CODE OP0 OP1).
2482 This routine assumes that the value returned by the library call is
2483 as if the return value was of an integral mode twice as wide as the
2484 mode of OP0. Returns 1 if the call was successful. */
2485
2486 bool
2487 expand_twoval_binop_libfunc (optab binoptab, rtx op0, rtx op1,
2488 rtx targ0, rtx targ1, enum rtx_code code)
2489 {
2490 machine_mode mode;
2491 machine_mode libval_mode;
2492 rtx libval;
2493 rtx_insn *insns;
2494 rtx libfunc;
2495
2496 /* Exactly one of TARG0 or TARG1 should be non-NULL. */
2497 gcc_assert (!targ0 != !targ1);
2498
2499 mode = GET_MODE (op0);
2500 libfunc = optab_libfunc (binoptab, mode);
2501 if (!libfunc)
2502 return false;
2503
2504 /* The value returned by the library function will have twice as
2505 many bits as the nominal MODE. */
2506 libval_mode = smallest_int_mode_for_size (2 * GET_MODE_BITSIZE (mode));
2507 start_sequence ();
2508 libval = emit_library_call_value (libfunc, NULL_RTX, LCT_CONST,
2509 libval_mode,
2510 op0, mode,
2511 op1, mode);
2512 /* Get the part of VAL containing the value that we want. */
2513 libval = simplify_gen_subreg (mode, libval, libval_mode,
2514 targ0 ? 0 : GET_MODE_SIZE (mode));
2515 insns = get_insns ();
2516 end_sequence ();
2517 /* Move the into the desired location. */
2518 emit_libcall_block (insns, targ0 ? targ0 : targ1, libval,
2519 gen_rtx_fmt_ee (code, mode, op0, op1));
2520
2521 return true;
2522 }
2523
2524 \f
2525 /* Wrapper around expand_unop which takes an rtx code to specify
2526 the operation to perform, not an optab pointer. All other
2527 arguments are the same. */
2528 rtx
2529 expand_simple_unop (machine_mode mode, enum rtx_code code, rtx op0,
2530 rtx target, int unsignedp)
2531 {
2532 optab unop = code_to_optab (code);
2533 gcc_assert (unop);
2534
2535 return expand_unop (mode, unop, op0, target, unsignedp);
2536 }
2537
2538 /* Try calculating
2539 (clz:narrow x)
2540 as
2541 (clz:wide (zero_extend:wide x)) - ((width wide) - (width narrow)).
2542
2543 A similar operation can be used for clrsb. UNOPTAB says which operation
2544 we are trying to expand. */
2545 static rtx
2546 widen_leading (scalar_int_mode mode, rtx op0, rtx target, optab unoptab)
2547 {
2548 opt_scalar_int_mode wider_mode_iter;
2549 FOR_EACH_WIDER_MODE (wider_mode_iter, mode)
2550 {
2551 scalar_int_mode wider_mode = wider_mode_iter.require ();
2552 if (optab_handler (unoptab, wider_mode) != CODE_FOR_nothing)
2553 {
2554 rtx xop0, temp;
2555 rtx_insn *last;
2556
2557 last = get_last_insn ();
2558
2559 if (target == 0)
2560 target = gen_reg_rtx (mode);
2561 xop0 = widen_operand (op0, wider_mode, mode,
2562 unoptab != clrsb_optab, false);
2563 temp = expand_unop (wider_mode, unoptab, xop0, NULL_RTX,
2564 unoptab != clrsb_optab);
2565 if (temp != 0)
2566 temp = expand_binop
2567 (wider_mode, sub_optab, temp,
2568 gen_int_mode (GET_MODE_PRECISION (wider_mode)
2569 - GET_MODE_PRECISION (mode),
2570 wider_mode),
2571 target, true, OPTAB_DIRECT);
2572 if (temp == 0)
2573 delete_insns_since (last);
2574
2575 return temp;
2576 }
2577 }
2578 return 0;
2579 }
2580
2581 /* Try calculating clz of a double-word quantity as two clz's of word-sized
2582 quantities, choosing which based on whether the high word is nonzero. */
2583 static rtx
2584 expand_doubleword_clz (scalar_int_mode mode, rtx op0, rtx target)
2585 {
2586 rtx xop0 = force_reg (mode, op0);
2587 rtx subhi = gen_highpart (word_mode, xop0);
2588 rtx sublo = gen_lowpart (word_mode, xop0);
2589 rtx_code_label *hi0_label = gen_label_rtx ();
2590 rtx_code_label *after_label = gen_label_rtx ();
2591 rtx_insn *seq;
2592 rtx temp, result;
2593
2594 /* If we were not given a target, use a word_mode register, not a
2595 'mode' register. The result will fit, and nobody is expecting
2596 anything bigger (the return type of __builtin_clz* is int). */
2597 if (!target)
2598 target = gen_reg_rtx (word_mode);
2599
2600 /* In any case, write to a word_mode scratch in both branches of the
2601 conditional, so we can ensure there is a single move insn setting
2602 'target' to tag a REG_EQUAL note on. */
2603 result = gen_reg_rtx (word_mode);
2604
2605 start_sequence ();
2606
2607 /* If the high word is not equal to zero,
2608 then clz of the full value is clz of the high word. */
2609 emit_cmp_and_jump_insns (subhi, CONST0_RTX (word_mode), EQ, 0,
2610 word_mode, true, hi0_label);
2611
2612 temp = expand_unop_direct (word_mode, clz_optab, subhi, result, true);
2613 if (!temp)
2614 goto fail;
2615
2616 if (temp != result)
2617 convert_move (result, temp, true);
2618
2619 emit_jump_insn (targetm.gen_jump (after_label));
2620 emit_barrier ();
2621
2622 /* Else clz of the full value is clz of the low word plus the number
2623 of bits in the high word. */
2624 emit_label (hi0_label);
2625
2626 temp = expand_unop_direct (word_mode, clz_optab, sublo, 0, true);
2627 if (!temp)
2628 goto fail;
2629 temp = expand_binop (word_mode, add_optab, temp,
2630 gen_int_mode (GET_MODE_BITSIZE (word_mode), word_mode),
2631 result, true, OPTAB_DIRECT);
2632 if (!temp)
2633 goto fail;
2634 if (temp != result)
2635 convert_move (result, temp, true);
2636
2637 emit_label (after_label);
2638 convert_move (target, result, true);
2639
2640 seq = get_insns ();
2641 end_sequence ();
2642
2643 add_equal_note (seq, target, CLZ, xop0, NULL_RTX, mode);
2644 emit_insn (seq);
2645 return target;
2646
2647 fail:
2648 end_sequence ();
2649 return 0;
2650 }
2651
2652 /* Try calculating popcount of a double-word quantity as two popcount's of
2653 word-sized quantities and summing up the results. */
2654 static rtx
2655 expand_doubleword_popcount (scalar_int_mode mode, rtx op0, rtx target)
2656 {
2657 rtx t0, t1, t;
2658 rtx_insn *seq;
2659
2660 start_sequence ();
2661
2662 t0 = expand_unop_direct (word_mode, popcount_optab,
2663 operand_subword_force (op0, 0, mode), NULL_RTX,
2664 true);
2665 t1 = expand_unop_direct (word_mode, popcount_optab,
2666 operand_subword_force (op0, 1, mode), NULL_RTX,
2667 true);
2668 if (!t0 || !t1)
2669 {
2670 end_sequence ();
2671 return NULL_RTX;
2672 }
2673
2674 /* If we were not given a target, use a word_mode register, not a
2675 'mode' register. The result will fit, and nobody is expecting
2676 anything bigger (the return type of __builtin_popcount* is int). */
2677 if (!target)
2678 target = gen_reg_rtx (word_mode);
2679
2680 t = expand_binop (word_mode, add_optab, t0, t1, target, 0, OPTAB_DIRECT);
2681
2682 seq = get_insns ();
2683 end_sequence ();
2684
2685 add_equal_note (seq, t, POPCOUNT, op0, NULL_RTX, mode);
2686 emit_insn (seq);
2687 return t;
2688 }
2689
2690 /* Try calculating
2691 (parity:wide x)
2692 as
2693 (parity:narrow (low (x) ^ high (x))) */
2694 static rtx
2695 expand_doubleword_parity (scalar_int_mode mode, rtx op0, rtx target)
2696 {
2697 rtx t = expand_binop (word_mode, xor_optab,
2698 operand_subword_force (op0, 0, mode),
2699 operand_subword_force (op0, 1, mode),
2700 NULL_RTX, 0, OPTAB_DIRECT);
2701 return expand_unop (word_mode, parity_optab, t, target, true);
2702 }
2703
2704 /* Try calculating
2705 (bswap:narrow x)
2706 as
2707 (lshiftrt:wide (bswap:wide x) ((width wide) - (width narrow))). */
2708 static rtx
2709 widen_bswap (scalar_int_mode mode, rtx op0, rtx target)
2710 {
2711 rtx x;
2712 rtx_insn *last;
2713 opt_scalar_int_mode wider_mode_iter;
2714
2715 FOR_EACH_WIDER_MODE (wider_mode_iter, mode)
2716 if (optab_handler (bswap_optab, wider_mode_iter.require ())
2717 != CODE_FOR_nothing)
2718 break;
2719
2720 if (!wider_mode_iter.exists ())
2721 return NULL_RTX;
2722
2723 scalar_int_mode wider_mode = wider_mode_iter.require ();
2724 last = get_last_insn ();
2725
2726 x = widen_operand (op0, wider_mode, mode, true, true);
2727 x = expand_unop (wider_mode, bswap_optab, x, NULL_RTX, true);
2728
2729 gcc_assert (GET_MODE_PRECISION (wider_mode) == GET_MODE_BITSIZE (wider_mode)
2730 && GET_MODE_PRECISION (mode) == GET_MODE_BITSIZE (mode));
2731 if (x != 0)
2732 x = expand_shift (RSHIFT_EXPR, wider_mode, x,
2733 GET_MODE_BITSIZE (wider_mode)
2734 - GET_MODE_BITSIZE (mode),
2735 NULL_RTX, true);
2736
2737 if (x != 0)
2738 {
2739 if (target == 0)
2740 target = gen_reg_rtx (mode);
2741 emit_move_insn (target, gen_lowpart (mode, x));
2742 }
2743 else
2744 delete_insns_since (last);
2745
2746 return target;
2747 }
2748
2749 /* Try calculating bswap as two bswaps of two word-sized operands. */
2750
2751 static rtx
2752 expand_doubleword_bswap (machine_mode mode, rtx op, rtx target)
2753 {
2754 rtx t0, t1;
2755
2756 t1 = expand_unop (word_mode, bswap_optab,
2757 operand_subword_force (op, 0, mode), NULL_RTX, true);
2758 t0 = expand_unop (word_mode, bswap_optab,
2759 operand_subword_force (op, 1, mode), NULL_RTX, true);
2760
2761 if (target == 0 || !valid_multiword_target_p (target))
2762 target = gen_reg_rtx (mode);
2763 if (REG_P (target))
2764 emit_clobber (target);
2765 emit_move_insn (operand_subword (target, 0, 1, mode), t0);
2766 emit_move_insn (operand_subword (target, 1, 1, mode), t1);
2767
2768 return target;
2769 }
2770
2771 /* Try calculating (parity x) as (and (popcount x) 1), where
2772 popcount can also be done in a wider mode. */
2773 static rtx
2774 expand_parity (scalar_int_mode mode, rtx op0, rtx target)
2775 {
2776 enum mode_class mclass = GET_MODE_CLASS (mode);
2777 opt_scalar_int_mode wider_mode_iter;
2778 FOR_EACH_MODE_FROM (wider_mode_iter, mode)
2779 {
2780 scalar_int_mode wider_mode = wider_mode_iter.require ();
2781 if (optab_handler (popcount_optab, wider_mode) != CODE_FOR_nothing)
2782 {
2783 rtx xop0, temp;
2784 rtx_insn *last;
2785
2786 last = get_last_insn ();
2787
2788 if (target == 0 || GET_MODE (target) != wider_mode)
2789 target = gen_reg_rtx (wider_mode);
2790
2791 xop0 = widen_operand (op0, wider_mode, mode, true, false);
2792 temp = expand_unop (wider_mode, popcount_optab, xop0, NULL_RTX,
2793 true);
2794 if (temp != 0)
2795 temp = expand_binop (wider_mode, and_optab, temp, const1_rtx,
2796 target, true, OPTAB_DIRECT);
2797
2798 if (temp)
2799 {
2800 if (mclass != MODE_INT
2801 || !TRULY_NOOP_TRUNCATION_MODES_P (mode, wider_mode))
2802 return convert_to_mode (mode, temp, 0);
2803 else
2804 return gen_lowpart (mode, temp);
2805 }
2806 else
2807 delete_insns_since (last);
2808 }
2809 }
2810 return 0;
2811 }
2812
2813 /* Try calculating ctz(x) as K - clz(x & -x) ,
2814 where K is GET_MODE_PRECISION(mode) - 1.
2815
2816 Both __builtin_ctz and __builtin_clz are undefined at zero, so we
2817 don't have to worry about what the hardware does in that case. (If
2818 the clz instruction produces the usual value at 0, which is K, the
2819 result of this code sequence will be -1; expand_ffs, below, relies
2820 on this. It might be nice to have it be K instead, for consistency
2821 with the (very few) processors that provide a ctz with a defined
2822 value, but that would take one more instruction, and it would be
2823 less convenient for expand_ffs anyway. */
2824
2825 static rtx
2826 expand_ctz (scalar_int_mode mode, rtx op0, rtx target)
2827 {
2828 rtx_insn *seq;
2829 rtx temp;
2830
2831 if (optab_handler (clz_optab, mode) == CODE_FOR_nothing)
2832 return 0;
2833
2834 start_sequence ();
2835
2836 temp = expand_unop_direct (mode, neg_optab, op0, NULL_RTX, true);
2837 if (temp)
2838 temp = expand_binop (mode, and_optab, op0, temp, NULL_RTX,
2839 true, OPTAB_DIRECT);
2840 if (temp)
2841 temp = expand_unop_direct (mode, clz_optab, temp, NULL_RTX, true);
2842 if (temp)
2843 temp = expand_binop (mode, sub_optab,
2844 gen_int_mode (GET_MODE_PRECISION (mode) - 1, mode),
2845 temp, target,
2846 true, OPTAB_DIRECT);
2847 if (temp == 0)
2848 {
2849 end_sequence ();
2850 return 0;
2851 }
2852
2853 seq = get_insns ();
2854 end_sequence ();
2855
2856 add_equal_note (seq, temp, CTZ, op0, NULL_RTX, mode);
2857 emit_insn (seq);
2858 return temp;
2859 }
2860
2861
2862 /* Try calculating ffs(x) using ctz(x) if we have that instruction, or
2863 else with the sequence used by expand_clz.
2864
2865 The ffs builtin promises to return zero for a zero value and ctz/clz
2866 may have an undefined value in that case. If they do not give us a
2867 convenient value, we have to generate a test and branch. */
2868 static rtx
2869 expand_ffs (scalar_int_mode mode, rtx op0, rtx target)
2870 {
2871 HOST_WIDE_INT val = 0;
2872 bool defined_at_zero = false;
2873 rtx temp;
2874 rtx_insn *seq;
2875
2876 if (optab_handler (ctz_optab, mode) != CODE_FOR_nothing)
2877 {
2878 start_sequence ();
2879
2880 temp = expand_unop_direct (mode, ctz_optab, op0, 0, true);
2881 if (!temp)
2882 goto fail;
2883
2884 defined_at_zero = (CTZ_DEFINED_VALUE_AT_ZERO (mode, val) == 2);
2885 }
2886 else if (optab_handler (clz_optab, mode) != CODE_FOR_nothing)
2887 {
2888 start_sequence ();
2889 temp = expand_ctz (mode, op0, 0);
2890 if (!temp)
2891 goto fail;
2892
2893 if (CLZ_DEFINED_VALUE_AT_ZERO (mode, val) == 2)
2894 {
2895 defined_at_zero = true;
2896 val = (GET_MODE_PRECISION (mode) - 1) - val;
2897 }
2898 }
2899 else
2900 return 0;
2901
2902 if (defined_at_zero && val == -1)
2903 /* No correction needed at zero. */;
2904 else
2905 {
2906 /* We don't try to do anything clever with the situation found
2907 on some processors (eg Alpha) where ctz(0:mode) ==
2908 bitsize(mode). If someone can think of a way to send N to -1
2909 and leave alone all values in the range 0..N-1 (where N is a
2910 power of two), cheaper than this test-and-branch, please add it.
2911
2912 The test-and-branch is done after the operation itself, in case
2913 the operation sets condition codes that can be recycled for this.
2914 (This is true on i386, for instance.) */
2915
2916 rtx_code_label *nonzero_label = gen_label_rtx ();
2917 emit_cmp_and_jump_insns (op0, CONST0_RTX (mode), NE, 0,
2918 mode, true, nonzero_label);
2919
2920 convert_move (temp, GEN_INT (-1), false);
2921 emit_label (nonzero_label);
2922 }
2923
2924 /* temp now has a value in the range -1..bitsize-1. ffs is supposed
2925 to produce a value in the range 0..bitsize. */
2926 temp = expand_binop (mode, add_optab, temp, gen_int_mode (1, mode),
2927 target, false, OPTAB_DIRECT);
2928 if (!temp)
2929 goto fail;
2930
2931 seq = get_insns ();
2932 end_sequence ();
2933
2934 add_equal_note (seq, temp, FFS, op0, NULL_RTX, mode);
2935 emit_insn (seq);
2936 return temp;
2937
2938 fail:
2939 end_sequence ();
2940 return 0;
2941 }
2942
2943 /* Extract the OMODE lowpart from VAL, which has IMODE. Under certain
2944 conditions, VAL may already be a SUBREG against which we cannot generate
2945 a further SUBREG. In this case, we expect forcing the value into a
2946 register will work around the situation. */
2947
2948 static rtx
2949 lowpart_subreg_maybe_copy (machine_mode omode, rtx val,
2950 machine_mode imode)
2951 {
2952 rtx ret;
2953 ret = lowpart_subreg (omode, val, imode);
2954 if (ret == NULL)
2955 {
2956 val = force_reg (imode, val);
2957 ret = lowpart_subreg (omode, val, imode);
2958 gcc_assert (ret != NULL);
2959 }
2960 return ret;
2961 }
2962
2963 /* Expand a floating point absolute value or negation operation via a
2964 logical operation on the sign bit. */
2965
2966 static rtx
2967 expand_absneg_bit (enum rtx_code code, scalar_float_mode mode,
2968 rtx op0, rtx target)
2969 {
2970 const struct real_format *fmt;
2971 int bitpos, word, nwords, i;
2972 scalar_int_mode imode;
2973 rtx temp;
2974 rtx_insn *insns;
2975
2976 /* The format has to have a simple sign bit. */
2977 fmt = REAL_MODE_FORMAT (mode);
2978 if (fmt == NULL)
2979 return NULL_RTX;
2980
2981 bitpos = fmt->signbit_rw;
2982 if (bitpos < 0)
2983 return NULL_RTX;
2984
2985 /* Don't create negative zeros if the format doesn't support them. */
2986 if (code == NEG && !fmt->has_signed_zero)
2987 return NULL_RTX;
2988
2989 if (GET_MODE_SIZE (mode) <= UNITS_PER_WORD)
2990 {
2991 if (!int_mode_for_mode (mode).exists (&imode))
2992 return NULL_RTX;
2993 word = 0;
2994 nwords = 1;
2995 }
2996 else
2997 {
2998 imode = word_mode;
2999
3000 if (FLOAT_WORDS_BIG_ENDIAN)
3001 word = (GET_MODE_BITSIZE (mode) - bitpos) / BITS_PER_WORD;
3002 else
3003 word = bitpos / BITS_PER_WORD;
3004 bitpos = bitpos % BITS_PER_WORD;
3005 nwords = (GET_MODE_BITSIZE (mode) + BITS_PER_WORD - 1) / BITS_PER_WORD;
3006 }
3007
3008 wide_int mask = wi::set_bit_in_zero (bitpos, GET_MODE_PRECISION (imode));
3009 if (code == ABS)
3010 mask = ~mask;
3011
3012 if (target == 0
3013 || target == op0
3014 || reg_overlap_mentioned_p (target, op0)
3015 || (nwords > 1 && !valid_multiword_target_p (target)))
3016 target = gen_reg_rtx (mode);
3017
3018 if (nwords > 1)
3019 {
3020 start_sequence ();
3021
3022 for (i = 0; i < nwords; ++i)
3023 {
3024 rtx targ_piece = operand_subword (target, i, 1, mode);
3025 rtx op0_piece = operand_subword_force (op0, i, mode);
3026
3027 if (i == word)
3028 {
3029 temp = expand_binop (imode, code == ABS ? and_optab : xor_optab,
3030 op0_piece,
3031 immed_wide_int_const (mask, imode),
3032 targ_piece, 1, OPTAB_LIB_WIDEN);
3033 if (temp != targ_piece)
3034 emit_move_insn (targ_piece, temp);
3035 }
3036 else
3037 emit_move_insn (targ_piece, op0_piece);
3038 }
3039
3040 insns = get_insns ();
3041 end_sequence ();
3042
3043 emit_insn (insns);
3044 }
3045 else
3046 {
3047 temp = expand_binop (imode, code == ABS ? and_optab : xor_optab,
3048 gen_lowpart (imode, op0),
3049 immed_wide_int_const (mask, imode),
3050 gen_lowpart (imode, target), 1, OPTAB_LIB_WIDEN);
3051 target = lowpart_subreg_maybe_copy (mode, temp, imode);
3052
3053 set_dst_reg_note (get_last_insn (), REG_EQUAL,
3054 gen_rtx_fmt_e (code, mode, copy_rtx (op0)),
3055 target);
3056 }
3057
3058 return target;
3059 }
3060
3061 /* As expand_unop, but will fail rather than attempt the operation in a
3062 different mode or with a libcall. */
3063 static rtx
3064 expand_unop_direct (machine_mode mode, optab unoptab, rtx op0, rtx target,
3065 int unsignedp)
3066 {
3067 if (optab_handler (unoptab, mode) != CODE_FOR_nothing)
3068 {
3069 class expand_operand ops[2];
3070 enum insn_code icode = optab_handler (unoptab, mode);
3071 rtx_insn *last = get_last_insn ();
3072 rtx_insn *pat;
3073
3074 create_output_operand (&ops[0], target, mode);
3075 create_convert_operand_from (&ops[1], op0, mode, unsignedp);
3076 pat = maybe_gen_insn (icode, 2, ops);
3077 if (pat)
3078 {
3079 if (INSN_P (pat) && NEXT_INSN (pat) != NULL_RTX
3080 && ! add_equal_note (pat, ops[0].value,
3081 optab_to_code (unoptab),
3082 ops[1].value, NULL_RTX, mode))
3083 {
3084 delete_insns_since (last);
3085 return expand_unop (mode, unoptab, op0, NULL_RTX, unsignedp);
3086 }
3087
3088 emit_insn (pat);
3089
3090 return ops[0].value;
3091 }
3092 }
3093 return 0;
3094 }
3095
3096 /* Generate code to perform an operation specified by UNOPTAB
3097 on operand OP0, with result having machine-mode MODE.
3098
3099 UNSIGNEDP is for the case where we have to widen the operands
3100 to perform the operation. It says to use zero-extension.
3101
3102 If TARGET is nonzero, the value
3103 is generated there, if it is convenient to do so.
3104 In all cases an rtx is returned for the locus of the value;
3105 this may or may not be TARGET. */
3106
3107 rtx
3108 expand_unop (machine_mode mode, optab unoptab, rtx op0, rtx target,
3109 int unsignedp)
3110 {
3111 enum mode_class mclass = GET_MODE_CLASS (mode);
3112 machine_mode wider_mode;
3113 scalar_int_mode int_mode;
3114 scalar_float_mode float_mode;
3115 rtx temp;
3116 rtx libfunc;
3117
3118 temp = expand_unop_direct (mode, unoptab, op0, target, unsignedp);
3119 if (temp)
3120 return temp;
3121
3122 /* It can't be done in this mode. Can we open-code it in a wider mode? */
3123
3124 /* Widening (or narrowing) clz needs special treatment. */
3125 if (unoptab == clz_optab)
3126 {
3127 if (is_a <scalar_int_mode> (mode, &int_mode))
3128 {
3129 temp = widen_leading (int_mode, op0, target, unoptab);
3130 if (temp)
3131 return temp;
3132
3133 if (GET_MODE_SIZE (int_mode) == 2 * UNITS_PER_WORD
3134 && optab_handler (unoptab, word_mode) != CODE_FOR_nothing)
3135 {
3136 temp = expand_doubleword_clz (int_mode, op0, target);
3137 if (temp)
3138 return temp;
3139 }
3140 }
3141
3142 goto try_libcall;
3143 }
3144
3145 if (unoptab == clrsb_optab)
3146 {
3147 if (is_a <scalar_int_mode> (mode, &int_mode))
3148 {
3149 temp = widen_leading (int_mode, op0, target, unoptab);
3150 if (temp)
3151 return temp;
3152 }
3153 goto try_libcall;
3154 }
3155
3156 if (unoptab == popcount_optab
3157 && is_a <scalar_int_mode> (mode, &int_mode)
3158 && GET_MODE_SIZE (int_mode) == 2 * UNITS_PER_WORD
3159 && optab_handler (unoptab, word_mode) != CODE_FOR_nothing
3160 && optimize_insn_for_speed_p ())
3161 {
3162 temp = expand_doubleword_popcount (int_mode, op0, target);
3163 if (temp)
3164 return temp;
3165 }
3166
3167 if (unoptab == parity_optab
3168 && is_a <scalar_int_mode> (mode, &int_mode)
3169 && GET_MODE_SIZE (int_mode) == 2 * UNITS_PER_WORD
3170 && (optab_handler (unoptab, word_mode) != CODE_FOR_nothing
3171 || optab_handler (popcount_optab, word_mode) != CODE_FOR_nothing)
3172 && optimize_insn_for_speed_p ())
3173 {
3174 temp = expand_doubleword_parity (int_mode, op0, target);
3175 if (temp)
3176 return temp;
3177 }
3178
3179 /* Widening (or narrowing) bswap needs special treatment. */
3180 if (unoptab == bswap_optab)
3181 {
3182 /* HImode is special because in this mode BSWAP is equivalent to ROTATE
3183 or ROTATERT. First try these directly; if this fails, then try the
3184 obvious pair of shifts with allowed widening, as this will probably
3185 be always more efficient than the other fallback methods. */
3186 if (mode == HImode)
3187 {
3188 rtx_insn *last;
3189 rtx temp1, temp2;
3190
3191 if (optab_handler (rotl_optab, mode) != CODE_FOR_nothing)
3192 {
3193 temp = expand_binop (mode, rotl_optab, op0,
3194 gen_int_shift_amount (mode, 8),
3195 target, unsignedp, OPTAB_DIRECT);
3196 if (temp)
3197 return temp;
3198 }
3199
3200 if (optab_handler (rotr_optab, mode) != CODE_FOR_nothing)
3201 {
3202 temp = expand_binop (mode, rotr_optab, op0,
3203 gen_int_shift_amount (mode, 8),
3204 target, unsignedp, OPTAB_DIRECT);
3205 if (temp)
3206 return temp;
3207 }
3208
3209 last = get_last_insn ();
3210
3211 temp1 = expand_binop (mode, ashl_optab, op0,
3212 gen_int_shift_amount (mode, 8), NULL_RTX,
3213 unsignedp, OPTAB_WIDEN);
3214 temp2 = expand_binop (mode, lshr_optab, op0,
3215 gen_int_shift_amount (mode, 8), NULL_RTX,
3216 unsignedp, OPTAB_WIDEN);
3217 if (temp1 && temp2)
3218 {
3219 temp = expand_binop (mode, ior_optab, temp1, temp2, target,
3220 unsignedp, OPTAB_WIDEN);
3221 if (temp)
3222 return temp;
3223 }
3224
3225 delete_insns_since (last);
3226 }
3227
3228 if (is_a <scalar_int_mode> (mode, &int_mode))
3229 {
3230 temp = widen_bswap (int_mode, op0, target);
3231 if (temp)
3232 return temp;
3233
3234 /* We do not provide a 128-bit bswap in libgcc so force the use of
3235 a double bswap for 64-bit targets. */
3236 if (GET_MODE_SIZE (int_mode) == 2 * UNITS_PER_WORD
3237 && (UNITS_PER_WORD == 8
3238 || optab_handler (unoptab, word_mode) != CODE_FOR_nothing))
3239 {
3240 temp = expand_doubleword_bswap (mode, op0, target);
3241 if (temp)
3242 return temp;
3243 }
3244 }
3245
3246 goto try_libcall;
3247 }
3248
3249 if (CLASS_HAS_WIDER_MODES_P (mclass))
3250 FOR_EACH_WIDER_MODE (wider_mode, mode)
3251 {
3252 if (optab_handler (unoptab, wider_mode) != CODE_FOR_nothing)
3253 {
3254 rtx xop0 = op0;
3255 rtx_insn *last = get_last_insn ();
3256
3257 /* For certain operations, we need not actually extend
3258 the narrow operand, as long as we will truncate the
3259 results to the same narrowness. */
3260
3261 xop0 = widen_operand (xop0, wider_mode, mode, unsignedp,
3262 (unoptab == neg_optab
3263 || unoptab == one_cmpl_optab)
3264 && mclass == MODE_INT);
3265
3266 temp = expand_unop (wider_mode, unoptab, xop0, NULL_RTX,
3267 unsignedp);
3268
3269 if (temp)
3270 {
3271 if (mclass != MODE_INT
3272 || !TRULY_NOOP_TRUNCATION_MODES_P (mode, wider_mode))
3273 {
3274 if (target == 0)
3275 target = gen_reg_rtx (mode);
3276 convert_move (target, temp, 0);
3277 return target;
3278 }
3279 else
3280 return gen_lowpart (mode, temp);
3281 }
3282 else
3283 delete_insns_since (last);
3284 }
3285 }
3286
3287 /* These can be done a word at a time. */
3288 if (unoptab == one_cmpl_optab
3289 && is_int_mode (mode, &int_mode)
3290 && GET_MODE_SIZE (int_mode) > UNITS_PER_WORD
3291 && optab_handler (unoptab, word_mode) != CODE_FOR_nothing)
3292 {
3293 int i;
3294 rtx_insn *insns;
3295
3296 if (target == 0
3297 || target == op0
3298 || reg_overlap_mentioned_p (target, op0)
3299 || !valid_multiword_target_p (target))
3300 target = gen_reg_rtx (int_mode);
3301
3302 start_sequence ();
3303
3304 /* Do the actual arithmetic. */
3305 for (i = 0; i < GET_MODE_BITSIZE (int_mode) / BITS_PER_WORD; i++)
3306 {
3307 rtx target_piece = operand_subword (target, i, 1, int_mode);
3308 rtx x = expand_unop (word_mode, unoptab,
3309 operand_subword_force (op0, i, int_mode),
3310 target_piece, unsignedp);
3311
3312 if (target_piece != x)
3313 emit_move_insn (target_piece, x);
3314 }
3315
3316 insns = get_insns ();
3317 end_sequence ();
3318
3319 emit_insn (insns);
3320 return target;
3321 }
3322
3323 /* Emit ~op0 as op0 ^ -1. */
3324 if (unoptab == one_cmpl_optab
3325 && (SCALAR_INT_MODE_P (mode) || GET_MODE_CLASS (mode) == MODE_VECTOR_INT)
3326 && optab_handler (xor_optab, mode) != CODE_FOR_nothing)
3327 {
3328 temp = expand_binop (mode, xor_optab, op0, CONSTM1_RTX (mode),
3329 target, unsignedp, OPTAB_DIRECT);
3330 if (temp)
3331 return temp;
3332 }
3333
3334 if (optab_to_code (unoptab) == NEG)
3335 {
3336 /* Try negating floating point values by flipping the sign bit. */
3337 if (is_a <scalar_float_mode> (mode, &float_mode))
3338 {
3339 temp = expand_absneg_bit (NEG, float_mode, op0, target);
3340 if (temp)
3341 return temp;
3342 }
3343
3344 /* If there is no negation pattern, and we have no negative zero,
3345 try subtracting from zero. */
3346 if (!HONOR_SIGNED_ZEROS (mode))
3347 {
3348 temp = expand_binop (mode, (unoptab == negv_optab
3349 ? subv_optab : sub_optab),
3350 CONST0_RTX (mode), op0, target,
3351 unsignedp, OPTAB_DIRECT);
3352 if (temp)
3353 return temp;
3354 }
3355 }
3356
3357 /* Try calculating parity (x) as popcount (x) % 2. */
3358 if (unoptab == parity_optab && is_a <scalar_int_mode> (mode, &int_mode))
3359 {
3360 temp = expand_parity (int_mode, op0, target);
3361 if (temp)
3362 return temp;
3363 }
3364
3365 /* Try implementing ffs (x) in terms of clz (x). */
3366 if (unoptab == ffs_optab && is_a <scalar_int_mode> (mode, &int_mode))
3367 {
3368 temp = expand_ffs (int_mode, op0, target);
3369 if (temp)
3370 return temp;
3371 }
3372
3373 /* Try implementing ctz (x) in terms of clz (x). */
3374 if (unoptab == ctz_optab && is_a <scalar_int_mode> (mode, &int_mode))
3375 {
3376 temp = expand_ctz (int_mode, op0, target);
3377 if (temp)
3378 return temp;
3379 }
3380
3381 try_libcall:
3382 /* Now try a library call in this mode. */
3383 libfunc = optab_libfunc (unoptab, mode);
3384 if (libfunc)
3385 {
3386 rtx_insn *insns;
3387 rtx value;
3388 rtx eq_value;
3389 machine_mode outmode = mode;
3390
3391 /* All of these functions return small values. Thus we choose to
3392 have them return something that isn't a double-word. */
3393 if (unoptab == ffs_optab || unoptab == clz_optab || unoptab == ctz_optab
3394 || unoptab == clrsb_optab || unoptab == popcount_optab
3395 || unoptab == parity_optab)
3396 outmode
3397 = GET_MODE (hard_libcall_value (TYPE_MODE (integer_type_node),
3398 optab_libfunc (unoptab, mode)));
3399
3400 start_sequence ();
3401
3402 /* Pass 1 for NO_QUEUE so we don't lose any increments
3403 if the libcall is cse'd or moved. */
3404 value = emit_library_call_value (libfunc, NULL_RTX, LCT_CONST, outmode,
3405 op0, mode);
3406 insns = get_insns ();
3407 end_sequence ();
3408
3409 target = gen_reg_rtx (outmode);
3410 bool trapv = trapv_unoptab_p (unoptab);
3411 if (trapv)
3412 eq_value = NULL_RTX;
3413 else
3414 {
3415 eq_value = gen_rtx_fmt_e (optab_to_code (unoptab), mode, op0);
3416 if (GET_MODE_UNIT_SIZE (outmode) < GET_MODE_UNIT_SIZE (mode))
3417 eq_value = simplify_gen_unary (TRUNCATE, outmode, eq_value, mode);
3418 else if (GET_MODE_UNIT_SIZE (outmode) > GET_MODE_UNIT_SIZE (mode))
3419 eq_value = simplify_gen_unary (ZERO_EXTEND,
3420 outmode, eq_value, mode);
3421 }
3422 emit_libcall_block_1 (insns, target, value, eq_value, trapv);
3423
3424 return target;
3425 }
3426
3427 /* It can't be done in this mode. Can we do it in a wider mode? */
3428
3429 if (CLASS_HAS_WIDER_MODES_P (mclass))
3430 {
3431 FOR_EACH_WIDER_MODE (wider_mode, mode)
3432 {
3433 if (optab_handler (unoptab, wider_mode) != CODE_FOR_nothing
3434 || optab_libfunc (unoptab, wider_mode))
3435 {
3436 rtx xop0 = op0;
3437 rtx_insn *last = get_last_insn ();
3438
3439 /* For certain operations, we need not actually extend
3440 the narrow operand, as long as we will truncate the
3441 results to the same narrowness. */
3442 xop0 = widen_operand (xop0, wider_mode, mode, unsignedp,
3443 (unoptab == neg_optab
3444 || unoptab == one_cmpl_optab
3445 || unoptab == bswap_optab)
3446 && mclass == MODE_INT);
3447
3448 temp = expand_unop (wider_mode, unoptab, xop0, NULL_RTX,
3449 unsignedp);
3450
3451 /* If we are generating clz using wider mode, adjust the
3452 result. Similarly for clrsb. */
3453 if ((unoptab == clz_optab || unoptab == clrsb_optab)
3454 && temp != 0)
3455 {
3456 scalar_int_mode wider_int_mode
3457 = as_a <scalar_int_mode> (wider_mode);
3458 int_mode = as_a <scalar_int_mode> (mode);
3459 temp = expand_binop
3460 (wider_mode, sub_optab, temp,
3461 gen_int_mode (GET_MODE_PRECISION (wider_int_mode)
3462 - GET_MODE_PRECISION (int_mode),
3463 wider_int_mode),
3464 target, true, OPTAB_DIRECT);
3465 }
3466
3467 /* Likewise for bswap. */
3468 if (unoptab == bswap_optab && temp != 0)
3469 {
3470 scalar_int_mode wider_int_mode
3471 = as_a <scalar_int_mode> (wider_mode);
3472 int_mode = as_a <scalar_int_mode> (mode);
3473 gcc_assert (GET_MODE_PRECISION (wider_int_mode)
3474 == GET_MODE_BITSIZE (wider_int_mode)
3475 && GET_MODE_PRECISION (int_mode)
3476 == GET_MODE_BITSIZE (int_mode));
3477
3478 temp = expand_shift (RSHIFT_EXPR, wider_int_mode, temp,
3479 GET_MODE_BITSIZE (wider_int_mode)
3480 - GET_MODE_BITSIZE (int_mode),
3481 NULL_RTX, true);
3482 }
3483
3484 if (temp)
3485 {
3486 if (mclass != MODE_INT)
3487 {
3488 if (target == 0)
3489 target = gen_reg_rtx (mode);
3490 convert_move (target, temp, 0);
3491 return target;
3492 }
3493 else
3494 return gen_lowpart (mode, temp);
3495 }
3496 else
3497 delete_insns_since (last);
3498 }
3499 }
3500 }
3501
3502 /* One final attempt at implementing negation via subtraction,
3503 this time allowing widening of the operand. */
3504 if (optab_to_code (unoptab) == NEG && !HONOR_SIGNED_ZEROS (mode))
3505 {
3506 rtx temp;
3507 temp = expand_binop (mode,
3508 unoptab == negv_optab ? subv_optab : sub_optab,
3509 CONST0_RTX (mode), op0,
3510 target, unsignedp, OPTAB_LIB_WIDEN);
3511 if (temp)
3512 return temp;
3513 }
3514
3515 return 0;
3516 }
3517 \f
3518 /* Emit code to compute the absolute value of OP0, with result to
3519 TARGET if convenient. (TARGET may be 0.) The return value says
3520 where the result actually is to be found.
3521
3522 MODE is the mode of the operand; the mode of the result is
3523 different but can be deduced from MODE.
3524
3525 */
3526
3527 rtx
3528 expand_abs_nojump (machine_mode mode, rtx op0, rtx target,
3529 int result_unsignedp)
3530 {
3531 rtx temp;
3532
3533 if (GET_MODE_CLASS (mode) != MODE_INT
3534 || ! flag_trapv)
3535 result_unsignedp = 1;
3536
3537 /* First try to do it with a special abs instruction. */
3538 temp = expand_unop (mode, result_unsignedp ? abs_optab : absv_optab,
3539 op0, target, 0);
3540 if (temp != 0)
3541 return temp;
3542
3543 /* For floating point modes, try clearing the sign bit. */
3544 scalar_float_mode float_mode;
3545 if (is_a <scalar_float_mode> (mode, &float_mode))
3546 {
3547 temp = expand_absneg_bit (ABS, float_mode, op0, target);
3548 if (temp)
3549 return temp;
3550 }
3551
3552 /* If we have a MAX insn, we can do this as MAX (x, -x). */
3553 if (optab_handler (smax_optab, mode) != CODE_FOR_nothing
3554 && !HONOR_SIGNED_ZEROS (mode))
3555 {
3556 rtx_insn *last = get_last_insn ();
3557
3558 temp = expand_unop (mode, result_unsignedp ? neg_optab : negv_optab,
3559 op0, NULL_RTX, 0);
3560 if (temp != 0)
3561 temp = expand_binop (mode, smax_optab, op0, temp, target, 0,
3562 OPTAB_WIDEN);
3563
3564 if (temp != 0)
3565 return temp;
3566
3567 delete_insns_since (last);
3568 }
3569
3570 /* If this machine has expensive jumps, we can do integer absolute
3571 value of X as (((signed) x >> (W-1)) ^ x) - ((signed) x >> (W-1)),
3572 where W is the width of MODE. */
3573
3574 scalar_int_mode int_mode;
3575 if (is_int_mode (mode, &int_mode)
3576 && BRANCH_COST (optimize_insn_for_speed_p (),
3577 false) >= 2)
3578 {
3579 rtx extended = expand_shift (RSHIFT_EXPR, int_mode, op0,
3580 GET_MODE_PRECISION (int_mode) - 1,
3581 NULL_RTX, 0);
3582
3583 temp = expand_binop (int_mode, xor_optab, extended, op0, target, 0,
3584 OPTAB_LIB_WIDEN);
3585 if (temp != 0)
3586 temp = expand_binop (int_mode,
3587 result_unsignedp ? sub_optab : subv_optab,
3588 temp, extended, target, 0, OPTAB_LIB_WIDEN);
3589
3590 if (temp != 0)
3591 return temp;
3592 }
3593
3594 return NULL_RTX;
3595 }
3596
3597 rtx
3598 expand_abs (machine_mode mode, rtx op0, rtx target,
3599 int result_unsignedp, int safe)
3600 {
3601 rtx temp;
3602 rtx_code_label *op1;
3603
3604 if (GET_MODE_CLASS (mode) != MODE_INT
3605 || ! flag_trapv)
3606 result_unsignedp = 1;
3607
3608 temp = expand_abs_nojump (mode, op0, target, result_unsignedp);
3609 if (temp != 0)
3610 return temp;
3611
3612 /* If that does not win, use conditional jump and negate. */
3613
3614 /* It is safe to use the target if it is the same
3615 as the source if this is also a pseudo register */
3616 if (op0 == target && REG_P (op0)
3617 && REGNO (op0) >= FIRST_PSEUDO_REGISTER)
3618 safe = 1;
3619
3620 op1 = gen_label_rtx ();
3621 if (target == 0 || ! safe
3622 || GET_MODE (target) != mode
3623 || (MEM_P (target) && MEM_VOLATILE_P (target))
3624 || (REG_P (target)
3625 && REGNO (target) < FIRST_PSEUDO_REGISTER))
3626 target = gen_reg_rtx (mode);
3627
3628 emit_move_insn (target, op0);
3629 NO_DEFER_POP;
3630
3631 do_compare_rtx_and_jump (target, CONST0_RTX (mode), GE, 0, mode,
3632 NULL_RTX, NULL, op1,
3633 profile_probability::uninitialized ());
3634
3635 op0 = expand_unop (mode, result_unsignedp ? neg_optab : negv_optab,
3636 target, target, 0);
3637 if (op0 != target)
3638 emit_move_insn (target, op0);
3639 emit_label (op1);
3640 OK_DEFER_POP;
3641 return target;
3642 }
3643
3644 /* Emit code to compute the one's complement absolute value of OP0
3645 (if (OP0 < 0) OP0 = ~OP0), with result to TARGET if convenient.
3646 (TARGET may be NULL_RTX.) The return value says where the result
3647 actually is to be found.
3648
3649 MODE is the mode of the operand; the mode of the result is
3650 different but can be deduced from MODE. */
3651
3652 rtx
3653 expand_one_cmpl_abs_nojump (machine_mode mode, rtx op0, rtx target)
3654 {
3655 rtx temp;
3656
3657 /* Not applicable for floating point modes. */
3658 if (FLOAT_MODE_P (mode))
3659 return NULL_RTX;
3660
3661 /* If we have a MAX insn, we can do this as MAX (x, ~x). */
3662 if (optab_handler (smax_optab, mode) != CODE_FOR_nothing)
3663 {
3664 rtx_insn *last = get_last_insn ();
3665
3666 temp = expand_unop (mode, one_cmpl_optab, op0, NULL_RTX, 0);
3667 if (temp != 0)
3668 temp = expand_binop (mode, smax_optab, op0, temp, target, 0,
3669 OPTAB_WIDEN);
3670
3671 if (temp != 0)
3672 return temp;
3673
3674 delete_insns_since (last);
3675 }
3676
3677 /* If this machine has expensive jumps, we can do one's complement
3678 absolute value of X as (((signed) x >> (W-1)) ^ x). */
3679
3680 scalar_int_mode int_mode;
3681 if (is_int_mode (mode, &int_mode)
3682 && BRANCH_COST (optimize_insn_for_speed_p (),
3683 false) >= 2)
3684 {
3685 rtx extended = expand_shift (RSHIFT_EXPR, int_mode, op0,
3686 GET_MODE_PRECISION (int_mode) - 1,
3687 NULL_RTX, 0);
3688
3689 temp = expand_binop (int_mode, xor_optab, extended, op0, target, 0,
3690 OPTAB_LIB_WIDEN);
3691
3692 if (temp != 0)
3693 return temp;
3694 }
3695
3696 return NULL_RTX;
3697 }
3698
3699 /* A subroutine of expand_copysign, perform the copysign operation using the
3700 abs and neg primitives advertised to exist on the target. The assumption
3701 is that we have a split register file, and leaving op0 in fp registers,
3702 and not playing with subregs so much, will help the register allocator. */
3703
3704 static rtx
3705 expand_copysign_absneg (scalar_float_mode mode, rtx op0, rtx op1, rtx target,
3706 int bitpos, bool op0_is_abs)
3707 {
3708 scalar_int_mode imode;
3709 enum insn_code icode;
3710 rtx sign;
3711 rtx_code_label *label;
3712
3713 if (target == op1)
3714 target = NULL_RTX;
3715
3716 /* Check if the back end provides an insn that handles signbit for the
3717 argument's mode. */
3718 icode = optab_handler (signbit_optab, mode);
3719 if (icode != CODE_FOR_nothing)
3720 {
3721 imode = as_a <scalar_int_mode> (insn_data[(int) icode].operand[0].mode);
3722 sign = gen_reg_rtx (imode);
3723 emit_unop_insn (icode, sign, op1, UNKNOWN);
3724 }
3725 else
3726 {
3727 if (GET_MODE_SIZE (mode) <= UNITS_PER_WORD)
3728 {
3729 if (!int_mode_for_mode (mode).exists (&imode))
3730 return NULL_RTX;
3731 op1 = gen_lowpart (imode, op1);
3732 }
3733 else
3734 {
3735 int word;
3736
3737 imode = word_mode;
3738 if (FLOAT_WORDS_BIG_ENDIAN)
3739 word = (GET_MODE_BITSIZE (mode) - bitpos) / BITS_PER_WORD;
3740 else
3741 word = bitpos / BITS_PER_WORD;
3742 bitpos = bitpos % BITS_PER_WORD;
3743 op1 = operand_subword_force (op1, word, mode);
3744 }
3745
3746 wide_int mask = wi::set_bit_in_zero (bitpos, GET_MODE_PRECISION (imode));
3747 sign = expand_binop (imode, and_optab, op1,
3748 immed_wide_int_const (mask, imode),
3749 NULL_RTX, 1, OPTAB_LIB_WIDEN);
3750 }
3751
3752 if (!op0_is_abs)
3753 {
3754 op0 = expand_unop (mode, abs_optab, op0, target, 0);
3755 if (op0 == NULL)
3756 return NULL_RTX;
3757 target = op0;
3758 }
3759 else
3760 {
3761 if (target == NULL_RTX)
3762 target = copy_to_reg (op0);
3763 else
3764 emit_move_insn (target, op0);
3765 }
3766
3767 label = gen_label_rtx ();
3768 emit_cmp_and_jump_insns (sign, const0_rtx, EQ, NULL_RTX, imode, 1, label);
3769
3770 if (CONST_DOUBLE_AS_FLOAT_P (op0))
3771 op0 = simplify_unary_operation (NEG, mode, op0, mode);
3772 else
3773 op0 = expand_unop (mode, neg_optab, op0, target, 0);
3774 if (op0 != target)
3775 emit_move_insn (target, op0);
3776
3777 emit_label (label);
3778
3779 return target;
3780 }
3781
3782
3783 /* A subroutine of expand_copysign, perform the entire copysign operation
3784 with integer bitmasks. BITPOS is the position of the sign bit; OP0_IS_ABS
3785 is true if op0 is known to have its sign bit clear. */
3786
3787 static rtx
3788 expand_copysign_bit (scalar_float_mode mode, rtx op0, rtx op1, rtx target,
3789 int bitpos, bool op0_is_abs)
3790 {
3791 scalar_int_mode imode;
3792 int word, nwords, i;
3793 rtx temp;
3794 rtx_insn *insns;
3795
3796 if (GET_MODE_SIZE (mode) <= UNITS_PER_WORD)
3797 {
3798 if (!int_mode_for_mode (mode).exists (&imode))
3799 return NULL_RTX;
3800 word = 0;
3801 nwords = 1;
3802 }
3803 else
3804 {
3805 imode = word_mode;
3806
3807 if (FLOAT_WORDS_BIG_ENDIAN)
3808 word = (GET_MODE_BITSIZE (mode) - bitpos) / BITS_PER_WORD;
3809 else
3810 word = bitpos / BITS_PER_WORD;
3811 bitpos = bitpos % BITS_PER_WORD;
3812 nwords = (GET_MODE_BITSIZE (mode) + BITS_PER_WORD - 1) / BITS_PER_WORD;
3813 }
3814
3815 wide_int mask = wi::set_bit_in_zero (bitpos, GET_MODE_PRECISION (imode));
3816
3817 if (target == 0
3818 || target == op0
3819 || target == op1
3820 || reg_overlap_mentioned_p (target, op0)
3821 || reg_overlap_mentioned_p (target, op1)
3822 || (nwords > 1 && !valid_multiword_target_p (target)))
3823 target = gen_reg_rtx (mode);
3824
3825 if (nwords > 1)
3826 {
3827 start_sequence ();
3828
3829 for (i = 0; i < nwords; ++i)
3830 {
3831 rtx targ_piece = operand_subword (target, i, 1, mode);
3832 rtx op0_piece = operand_subword_force (op0, i, mode);
3833
3834 if (i == word)
3835 {
3836 if (!op0_is_abs)
3837 op0_piece
3838 = expand_binop (imode, and_optab, op0_piece,
3839 immed_wide_int_const (~mask, imode),
3840 NULL_RTX, 1, OPTAB_LIB_WIDEN);
3841 op1 = expand_binop (imode, and_optab,
3842 operand_subword_force (op1, i, mode),
3843 immed_wide_int_const (mask, imode),
3844 NULL_RTX, 1, OPTAB_LIB_WIDEN);
3845
3846 temp = expand_binop (imode, ior_optab, op0_piece, op1,
3847 targ_piece, 1, OPTAB_LIB_WIDEN);
3848 if (temp != targ_piece)
3849 emit_move_insn (targ_piece, temp);
3850 }
3851 else
3852 emit_move_insn (targ_piece, op0_piece);
3853 }
3854
3855 insns = get_insns ();
3856 end_sequence ();
3857
3858 emit_insn (insns);
3859 }
3860 else
3861 {
3862 op1 = expand_binop (imode, and_optab, gen_lowpart (imode, op1),
3863 immed_wide_int_const (mask, imode),
3864 NULL_RTX, 1, OPTAB_LIB_WIDEN);
3865
3866 op0 = gen_lowpart (imode, op0);
3867 if (!op0_is_abs)
3868 op0 = expand_binop (imode, and_optab, op0,
3869 immed_wide_int_const (~mask, imode),
3870 NULL_RTX, 1, OPTAB_LIB_WIDEN);
3871
3872 temp = expand_binop (imode, ior_optab, op0, op1,
3873 gen_lowpart (imode, target), 1, OPTAB_LIB_WIDEN);
3874 target = lowpart_subreg_maybe_copy (mode, temp, imode);
3875 }
3876
3877 return target;
3878 }
3879
3880 /* Expand the C99 copysign operation. OP0 and OP1 must be the same
3881 scalar floating point mode. Return NULL if we do not know how to
3882 expand the operation inline. */
3883
3884 rtx
3885 expand_copysign (rtx op0, rtx op1, rtx target)
3886 {
3887 scalar_float_mode mode;
3888 const struct real_format *fmt;
3889 bool op0_is_abs;
3890 rtx temp;
3891
3892 mode = as_a <scalar_float_mode> (GET_MODE (op0));
3893 gcc_assert (GET_MODE (op1) == mode);
3894
3895 /* First try to do it with a special instruction. */
3896 temp = expand_binop (mode, copysign_optab, op0, op1,
3897 target, 0, OPTAB_DIRECT);
3898 if (temp)
3899 return temp;
3900
3901 fmt = REAL_MODE_FORMAT (mode);
3902 if (fmt == NULL || !fmt->has_signed_zero)
3903 return NULL_RTX;
3904
3905 op0_is_abs = false;
3906 if (CONST_DOUBLE_AS_FLOAT_P (op0))
3907 {
3908 if (real_isneg (CONST_DOUBLE_REAL_VALUE (op0)))
3909 op0 = simplify_unary_operation (ABS, mode, op0, mode);
3910 op0_is_abs = true;
3911 }
3912
3913 if (fmt->signbit_ro >= 0
3914 && (CONST_DOUBLE_AS_FLOAT_P (op0)
3915 || (optab_handler (neg_optab, mode) != CODE_FOR_nothing
3916 && optab_handler (abs_optab, mode) != CODE_FOR_nothing)))
3917 {
3918 temp = expand_copysign_absneg (mode, op0, op1, target,
3919 fmt->signbit_ro, op0_is_abs);
3920 if (temp)
3921 return temp;
3922 }
3923
3924 if (fmt->signbit_rw < 0)
3925 return NULL_RTX;
3926 return expand_copysign_bit (mode, op0, op1, target,
3927 fmt->signbit_rw, op0_is_abs);
3928 }
3929 \f
3930 /* Generate an instruction whose insn-code is INSN_CODE,
3931 with two operands: an output TARGET and an input OP0.
3932 TARGET *must* be nonzero, and the output is always stored there.
3933 CODE is an rtx code such that (CODE OP0) is an rtx that describes
3934 the value that is stored into TARGET.
3935
3936 Return false if expansion failed. */
3937
3938 bool
3939 maybe_emit_unop_insn (enum insn_code icode, rtx target, rtx op0,
3940 enum rtx_code code)
3941 {
3942 class expand_operand ops[2];
3943 rtx_insn *pat;
3944
3945 create_output_operand (&ops[0], target, GET_MODE (target));
3946 create_input_operand (&ops[1], op0, GET_MODE (op0));
3947 pat = maybe_gen_insn (icode, 2, ops);
3948 if (!pat)
3949 return false;
3950
3951 if (INSN_P (pat) && NEXT_INSN (pat) != NULL_RTX
3952 && code != UNKNOWN)
3953 add_equal_note (pat, ops[0].value, code, ops[1].value, NULL_RTX,
3954 GET_MODE (op0));
3955
3956 emit_insn (pat);
3957
3958 if (ops[0].value != target)
3959 emit_move_insn (target, ops[0].value);
3960 return true;
3961 }
3962 /* Generate an instruction whose insn-code is INSN_CODE,
3963 with two operands: an output TARGET and an input OP0.
3964 TARGET *must* be nonzero, and the output is always stored there.
3965 CODE is an rtx code such that (CODE OP0) is an rtx that describes
3966 the value that is stored into TARGET. */
3967
3968 void
3969 emit_unop_insn (enum insn_code icode, rtx target, rtx op0, enum rtx_code code)
3970 {
3971 bool ok = maybe_emit_unop_insn (icode, target, op0, code);
3972 gcc_assert (ok);
3973 }
3974 \f
3975 struct no_conflict_data
3976 {
3977 rtx target;
3978 rtx_insn *first, *insn;
3979 bool must_stay;
3980 };
3981
3982 /* Called via note_stores by emit_libcall_block. Set P->must_stay if
3983 the currently examined clobber / store has to stay in the list of
3984 insns that constitute the actual libcall block. */
3985 static void
3986 no_conflict_move_test (rtx dest, const_rtx set, void *p0)
3987 {
3988 struct no_conflict_data *p= (struct no_conflict_data *) p0;
3989
3990 /* If this inns directly contributes to setting the target, it must stay. */
3991 if (reg_overlap_mentioned_p (p->target, dest))
3992 p->must_stay = true;
3993 /* If we haven't committed to keeping any other insns in the list yet,
3994 there is nothing more to check. */
3995 else if (p->insn == p->first)
3996 return;
3997 /* If this insn sets / clobbers a register that feeds one of the insns
3998 already in the list, this insn has to stay too. */
3999 else if (reg_overlap_mentioned_p (dest, PATTERN (p->first))
4000 || (CALL_P (p->first) && (find_reg_fusage (p->first, USE, dest)))
4001 || reg_used_between_p (dest, p->first, p->insn)
4002 /* Likewise if this insn depends on a register set by a previous
4003 insn in the list, or if it sets a result (presumably a hard
4004 register) that is set or clobbered by a previous insn.
4005 N.B. the modified_*_p (SET_DEST...) tests applied to a MEM
4006 SET_DEST perform the former check on the address, and the latter
4007 check on the MEM. */
4008 || (GET_CODE (set) == SET
4009 && (modified_in_p (SET_SRC (set), p->first)
4010 || modified_in_p (SET_DEST (set), p->first)
4011 || modified_between_p (SET_SRC (set), p->first, p->insn)
4012 || modified_between_p (SET_DEST (set), p->first, p->insn))))
4013 p->must_stay = true;
4014 }
4015
4016 \f
4017 /* Emit code to make a call to a constant function or a library call.
4018
4019 INSNS is a list containing all insns emitted in the call.
4020 These insns leave the result in RESULT. Our block is to copy RESULT
4021 to TARGET, which is logically equivalent to EQUIV.
4022
4023 We first emit any insns that set a pseudo on the assumption that these are
4024 loading constants into registers; doing so allows them to be safely cse'ed
4025 between blocks. Then we emit all the other insns in the block, followed by
4026 an insn to move RESULT to TARGET. This last insn will have a REQ_EQUAL
4027 note with an operand of EQUIV. */
4028
4029 static void
4030 emit_libcall_block_1 (rtx_insn *insns, rtx target, rtx result, rtx equiv,
4031 bool equiv_may_trap)
4032 {
4033 rtx final_dest = target;
4034 rtx_insn *next, *last, *insn;
4035
4036 /* If this is a reg with REG_USERVAR_P set, then it could possibly turn
4037 into a MEM later. Protect the libcall block from this change. */
4038 if (! REG_P (target) || REG_USERVAR_P (target))
4039 target = gen_reg_rtx (GET_MODE (target));
4040
4041 /* If we're using non-call exceptions, a libcall corresponding to an
4042 operation that may trap may also trap. */
4043 /* ??? See the comment in front of make_reg_eh_region_note. */
4044 if (cfun->can_throw_non_call_exceptions
4045 && (equiv_may_trap || may_trap_p (equiv)))
4046 {
4047 for (insn = insns; insn; insn = NEXT_INSN (insn))
4048 if (CALL_P (insn))
4049 {
4050 rtx note = find_reg_note (insn, REG_EH_REGION, NULL_RTX);
4051 if (note)
4052 {
4053 int lp_nr = INTVAL (XEXP (note, 0));
4054 if (lp_nr == 0 || lp_nr == INT_MIN)
4055 remove_note (insn, note);
4056 }
4057 }
4058 }
4059 else
4060 {
4061 /* Look for any CALL_INSNs in this sequence, and attach a REG_EH_REGION
4062 reg note to indicate that this call cannot throw or execute a nonlocal
4063 goto (unless there is already a REG_EH_REGION note, in which case
4064 we update it). */
4065 for (insn = insns; insn; insn = NEXT_INSN (insn))
4066 if (CALL_P (insn))
4067 make_reg_eh_region_note_nothrow_nononlocal (insn);
4068 }
4069
4070 /* First emit all insns that set pseudos. Remove them from the list as
4071 we go. Avoid insns that set pseudos which were referenced in previous
4072 insns. These can be generated by move_by_pieces, for example,
4073 to update an address. Similarly, avoid insns that reference things
4074 set in previous insns. */
4075
4076 for (insn = insns; insn; insn = next)
4077 {
4078 rtx set = single_set (insn);
4079
4080 next = NEXT_INSN (insn);
4081
4082 if (set != 0 && REG_P (SET_DEST (set))
4083 && REGNO (SET_DEST (set)) >= FIRST_PSEUDO_REGISTER)
4084 {
4085 struct no_conflict_data data;
4086
4087 data.target = const0_rtx;
4088 data.first = insns;
4089 data.insn = insn;
4090 data.must_stay = 0;
4091 note_stores (insn, no_conflict_move_test, &data);
4092 if (! data.must_stay)
4093 {
4094 if (PREV_INSN (insn))
4095 SET_NEXT_INSN (PREV_INSN (insn)) = next;
4096 else
4097 insns = next;
4098
4099 if (next)
4100 SET_PREV_INSN (next) = PREV_INSN (insn);
4101
4102 add_insn (insn);
4103 }
4104 }
4105
4106 /* Some ports use a loop to copy large arguments onto the stack.
4107 Don't move anything outside such a loop. */
4108 if (LABEL_P (insn))
4109 break;
4110 }
4111
4112 /* Write the remaining insns followed by the final copy. */
4113 for (insn = insns; insn; insn = next)
4114 {
4115 next = NEXT_INSN (insn);
4116
4117 add_insn (insn);
4118 }
4119
4120 last = emit_move_insn (target, result);
4121 if (equiv)
4122 set_dst_reg_note (last, REG_EQUAL, copy_rtx (equiv), target);
4123
4124 if (final_dest != target)
4125 emit_move_insn (final_dest, target);
4126 }
4127
4128 void
4129 emit_libcall_block (rtx_insn *insns, rtx target, rtx result, rtx equiv)
4130 {
4131 emit_libcall_block_1 (insns, target, result, equiv, false);
4132 }
4133 \f
4134 /* Nonzero if we can perform a comparison of mode MODE straightforwardly.
4135 PURPOSE describes how this comparison will be used. CODE is the rtx
4136 comparison code we will be using.
4137
4138 ??? Actually, CODE is slightly weaker than that. A target is still
4139 required to implement all of the normal bcc operations, but not
4140 required to implement all (or any) of the unordered bcc operations. */
4141
4142 int
4143 can_compare_p (enum rtx_code code, machine_mode mode,
4144 enum can_compare_purpose purpose)
4145 {
4146 rtx test;
4147 test = gen_rtx_fmt_ee (code, mode, const0_rtx, const0_rtx);
4148 do
4149 {
4150 enum insn_code icode;
4151
4152 if (purpose == ccp_jump
4153 && (icode = optab_handler (cbranch_optab, mode)) != CODE_FOR_nothing
4154 && insn_operand_matches (icode, 0, test))
4155 return 1;
4156 if (purpose == ccp_store_flag
4157 && (icode = optab_handler (cstore_optab, mode)) != CODE_FOR_nothing
4158 && insn_operand_matches (icode, 1, test))
4159 return 1;
4160 if (purpose == ccp_cmov
4161 && optab_handler (cmov_optab, mode) != CODE_FOR_nothing)
4162 return 1;
4163
4164 mode = GET_MODE_WIDER_MODE (mode).else_void ();
4165 PUT_MODE (test, mode);
4166 }
4167 while (mode != VOIDmode);
4168
4169 return 0;
4170 }
4171
4172 /* Return whether RTL code CODE corresponds to an unsigned optab. */
4173
4174 static bool
4175 unsigned_optab_p (enum rtx_code code)
4176 {
4177 return code == LTU || code == LEU || code == GTU || code == GEU;
4178 }
4179
4180 /* Return whether the backend-emitted comparison for code CODE, comparing
4181 operands of mode VALUE_MODE and producing a result with MASK_MODE, matches
4182 operand OPNO of pattern ICODE. */
4183
4184 static bool
4185 insn_predicate_matches_p (enum insn_code icode, unsigned int opno,
4186 enum rtx_code code, machine_mode mask_mode,
4187 machine_mode value_mode)
4188 {
4189 rtx reg1 = alloca_raw_REG (value_mode, LAST_VIRTUAL_REGISTER + 1);
4190 rtx reg2 = alloca_raw_REG (value_mode, LAST_VIRTUAL_REGISTER + 2);
4191 rtx test = alloca_rtx_fmt_ee (code, mask_mode, reg1, reg2);
4192 return insn_operand_matches (icode, opno, test);
4193 }
4194
4195 /* Return whether the backend can emit a vector comparison (vec_cmp/vec_cmpu)
4196 for code CODE, comparing operands of mode VALUE_MODE and producing a result
4197 with MASK_MODE. */
4198
4199 bool
4200 can_vec_cmp_compare_p (enum rtx_code code, machine_mode value_mode,
4201 machine_mode mask_mode)
4202 {
4203 enum insn_code icode
4204 = get_vec_cmp_icode (value_mode, mask_mode, unsigned_optab_p (code));
4205 if (icode == CODE_FOR_nothing)
4206 return false;
4207
4208 return insn_predicate_matches_p (icode, 1, code, mask_mode, value_mode);
4209 }
4210
4211 /* Return whether the backend can emit a vector comparison (vcond/vcondu) for
4212 code CODE, comparing operands of mode CMP_OP_MODE and producing a result
4213 with VALUE_MODE. */
4214
4215 bool
4216 can_vcond_compare_p (enum rtx_code code, machine_mode value_mode,
4217 machine_mode cmp_op_mode)
4218 {
4219 enum insn_code icode
4220 = get_vcond_icode (value_mode, cmp_op_mode, unsigned_optab_p (code));
4221 if (icode == CODE_FOR_nothing)
4222 return false;
4223
4224 return insn_predicate_matches_p (icode, 3, code, value_mode, cmp_op_mode);
4225 }
4226
4227 /* Return whether the backend can emit vector set instructions for inserting
4228 element into vector at variable index position. */
4229
4230 bool
4231 can_vec_set_var_idx_p (machine_mode vec_mode)
4232 {
4233 if (!VECTOR_MODE_P (vec_mode))
4234 return false;
4235
4236 machine_mode inner_mode = GET_MODE_INNER (vec_mode);
4237 rtx reg1 = alloca_raw_REG (vec_mode, LAST_VIRTUAL_REGISTER + 1);
4238 rtx reg2 = alloca_raw_REG (inner_mode, LAST_VIRTUAL_REGISTER + 2);
4239 rtx reg3 = alloca_raw_REG (VOIDmode, LAST_VIRTUAL_REGISTER + 3);
4240
4241 enum insn_code icode = optab_handler (vec_set_optab, vec_mode);
4242
4243 return icode != CODE_FOR_nothing && insn_operand_matches (icode, 0, reg1)
4244 && insn_operand_matches (icode, 1, reg2)
4245 && insn_operand_matches (icode, 2, reg3);
4246 }
4247
4248 /* This function is called when we are going to emit a compare instruction that
4249 compares the values found in X and Y, using the rtl operator COMPARISON.
4250
4251 If they have mode BLKmode, then SIZE specifies the size of both operands.
4252
4253 UNSIGNEDP nonzero says that the operands are unsigned;
4254 this matters if they need to be widened (as given by METHODS).
4255
4256 *PTEST is where the resulting comparison RTX is returned or NULL_RTX
4257 if we failed to produce one.
4258
4259 *PMODE is the mode of the inputs (in case they are const_int).
4260
4261 This function performs all the setup necessary so that the caller only has
4262 to emit a single comparison insn. This setup can involve doing a BLKmode
4263 comparison or emitting a library call to perform the comparison if no insn
4264 is available to handle it.
4265 The values which are passed in through pointers can be modified; the caller
4266 should perform the comparison on the modified values. Constant
4267 comparisons must have already been folded. */
4268
4269 static void
4270 prepare_cmp_insn (rtx x, rtx y, enum rtx_code comparison, rtx size,
4271 int unsignedp, enum optab_methods methods,
4272 rtx *ptest, machine_mode *pmode)
4273 {
4274 machine_mode mode = *pmode;
4275 rtx libfunc, test;
4276 machine_mode cmp_mode;
4277 enum mode_class mclass;
4278
4279 /* The other methods are not needed. */
4280 gcc_assert (methods == OPTAB_DIRECT || methods == OPTAB_WIDEN
4281 || methods == OPTAB_LIB_WIDEN);
4282
4283 if (CONST_SCALAR_INT_P (y))
4284 canonicalize_comparison (mode, &comparison, &y);
4285
4286 /* If we are optimizing, force expensive constants into a register. */
4287 if (CONSTANT_P (x) && optimize
4288 && (rtx_cost (x, mode, COMPARE, 0, optimize_insn_for_speed_p ())
4289 > COSTS_N_INSNS (1)))
4290 x = force_reg (mode, x);
4291
4292 if (CONSTANT_P (y) && optimize
4293 && (rtx_cost (y, mode, COMPARE, 1, optimize_insn_for_speed_p ())
4294 > COSTS_N_INSNS (1)))
4295 y = force_reg (mode, y);
4296
4297 #if HAVE_cc0
4298 /* Make sure if we have a canonical comparison. The RTL
4299 documentation states that canonical comparisons are required only
4300 for targets which have cc0. */
4301 gcc_assert (!CONSTANT_P (x) || CONSTANT_P (y));
4302 #endif
4303
4304 /* Don't let both operands fail to indicate the mode. */
4305 if (GET_MODE (x) == VOIDmode && GET_MODE (y) == VOIDmode)
4306 x = force_reg (mode, x);
4307 if (mode == VOIDmode)
4308 mode = GET_MODE (x) != VOIDmode ? GET_MODE (x) : GET_MODE (y);
4309
4310 /* Handle all BLKmode compares. */
4311
4312 if (mode == BLKmode)
4313 {
4314 machine_mode result_mode;
4315 enum insn_code cmp_code;
4316 rtx result;
4317 rtx opalign
4318 = GEN_INT (MIN (MEM_ALIGN (x), MEM_ALIGN (y)) / BITS_PER_UNIT);
4319
4320 gcc_assert (size);
4321
4322 /* Try to use a memory block compare insn - either cmpstr
4323 or cmpmem will do. */
4324 opt_scalar_int_mode cmp_mode_iter;
4325 FOR_EACH_MODE_IN_CLASS (cmp_mode_iter, MODE_INT)
4326 {
4327 scalar_int_mode cmp_mode = cmp_mode_iter.require ();
4328 cmp_code = direct_optab_handler (cmpmem_optab, cmp_mode);
4329 if (cmp_code == CODE_FOR_nothing)
4330 cmp_code = direct_optab_handler (cmpstr_optab, cmp_mode);
4331 if (cmp_code == CODE_FOR_nothing)
4332 cmp_code = direct_optab_handler (cmpstrn_optab, cmp_mode);
4333 if (cmp_code == CODE_FOR_nothing)
4334 continue;
4335
4336 /* Must make sure the size fits the insn's mode. */
4337 if (CONST_INT_P (size)
4338 ? UINTVAL (size) > GET_MODE_MASK (cmp_mode)
4339 : (GET_MODE_BITSIZE (as_a <scalar_int_mode> (GET_MODE (size)))
4340 > GET_MODE_BITSIZE (cmp_mode)))
4341 continue;
4342
4343 result_mode = insn_data[cmp_code].operand[0].mode;
4344 result = gen_reg_rtx (result_mode);
4345 size = convert_to_mode (cmp_mode, size, 1);
4346 emit_insn (GEN_FCN (cmp_code) (result, x, y, size, opalign));
4347
4348 *ptest = gen_rtx_fmt_ee (comparison, VOIDmode, result, const0_rtx);
4349 *pmode = result_mode;
4350 return;
4351 }
4352
4353 if (methods != OPTAB_LIB && methods != OPTAB_LIB_WIDEN)
4354 goto fail;
4355
4356 /* Otherwise call a library function. */
4357 result = emit_block_comp_via_libcall (x, y, size);
4358
4359 x = result;
4360 y = const0_rtx;
4361 mode = TYPE_MODE (integer_type_node);
4362 methods = OPTAB_LIB_WIDEN;
4363 unsignedp = false;
4364 }
4365
4366 /* Don't allow operands to the compare to trap, as that can put the
4367 compare and branch in different basic blocks. */
4368 if (cfun->can_throw_non_call_exceptions)
4369 {
4370 if (may_trap_p (x))
4371 x = copy_to_reg (x);
4372 if (may_trap_p (y))
4373 y = copy_to_reg (y);
4374 }
4375
4376 if (GET_MODE_CLASS (mode) == MODE_CC)
4377 {
4378 enum insn_code icode = optab_handler (cbranch_optab, CCmode);
4379 test = gen_rtx_fmt_ee (comparison, VOIDmode, x, y);
4380 gcc_assert (icode != CODE_FOR_nothing
4381 && insn_operand_matches (icode, 0, test));
4382 *ptest = test;
4383 return;
4384 }
4385
4386 mclass = GET_MODE_CLASS (mode);
4387 test = gen_rtx_fmt_ee (comparison, VOIDmode, x, y);
4388 FOR_EACH_MODE_FROM (cmp_mode, mode)
4389 {
4390 enum insn_code icode;
4391 icode = optab_handler (cbranch_optab, cmp_mode);
4392 if (icode != CODE_FOR_nothing
4393 && insn_operand_matches (icode, 0, test))
4394 {
4395 rtx_insn *last = get_last_insn ();
4396 rtx op0 = prepare_operand (icode, x, 1, mode, cmp_mode, unsignedp);
4397 rtx op1 = prepare_operand (icode, y, 2, mode, cmp_mode, unsignedp);
4398 if (op0 && op1
4399 && insn_operand_matches (icode, 1, op0)
4400 && insn_operand_matches (icode, 2, op1))
4401 {
4402 XEXP (test, 0) = op0;
4403 XEXP (test, 1) = op1;
4404 *ptest = test;
4405 *pmode = cmp_mode;
4406 return;
4407 }
4408 delete_insns_since (last);
4409 }
4410
4411 if (methods == OPTAB_DIRECT || !CLASS_HAS_WIDER_MODES_P (mclass))
4412 break;
4413 }
4414
4415 if (methods != OPTAB_LIB_WIDEN)
4416 goto fail;
4417
4418 if (SCALAR_FLOAT_MODE_P (mode))
4419 {
4420 /* Small trick if UNORDERED isn't implemented by the hardware. */
4421 if (comparison == UNORDERED && rtx_equal_p (x, y))
4422 {
4423 prepare_cmp_insn (x, y, UNLT, NULL_RTX, unsignedp, OPTAB_WIDEN,
4424 ptest, pmode);
4425 if (*ptest)
4426 return;
4427 }
4428
4429 prepare_float_lib_cmp (x, y, comparison, ptest, pmode);
4430 }
4431 else
4432 {
4433 rtx result;
4434 machine_mode ret_mode;
4435
4436 /* Handle a libcall just for the mode we are using. */
4437 libfunc = optab_libfunc (cmp_optab, mode);
4438 gcc_assert (libfunc);
4439
4440 /* If we want unsigned, and this mode has a distinct unsigned
4441 comparison routine, use that. */
4442 if (unsignedp)
4443 {
4444 rtx ulibfunc = optab_libfunc (ucmp_optab, mode);
4445 if (ulibfunc)
4446 libfunc = ulibfunc;
4447 }
4448
4449 ret_mode = targetm.libgcc_cmp_return_mode ();
4450 result = emit_library_call_value (libfunc, NULL_RTX, LCT_CONST,
4451 ret_mode, x, mode, y, mode);
4452
4453 /* There are two kinds of comparison routines. Biased routines
4454 return 0/1/2, and unbiased routines return -1/0/1. Other parts
4455 of gcc expect that the comparison operation is equivalent
4456 to the modified comparison. For signed comparisons compare the
4457 result against 1 in the biased case, and zero in the unbiased
4458 case. For unsigned comparisons always compare against 1 after
4459 biasing the unbiased result by adding 1. This gives us a way to
4460 represent LTU.
4461 The comparisons in the fixed-point helper library are always
4462 biased. */
4463 x = result;
4464 y = const1_rtx;
4465
4466 if (!TARGET_LIB_INT_CMP_BIASED && !ALL_FIXED_POINT_MODE_P (mode))
4467 {
4468 if (unsignedp)
4469 x = plus_constant (ret_mode, result, 1);
4470 else
4471 y = const0_rtx;
4472 }
4473
4474 *pmode = ret_mode;
4475 prepare_cmp_insn (x, y, comparison, NULL_RTX, unsignedp, methods,
4476 ptest, pmode);
4477 }
4478
4479 return;
4480
4481 fail:
4482 *ptest = NULL_RTX;
4483 }
4484
4485 /* Before emitting an insn with code ICODE, make sure that X, which is going
4486 to be used for operand OPNUM of the insn, is converted from mode MODE to
4487 WIDER_MODE (UNSIGNEDP determines whether it is an unsigned conversion), and
4488 that it is accepted by the operand predicate. Return the new value. */
4489
4490 rtx
4491 prepare_operand (enum insn_code icode, rtx x, int opnum, machine_mode mode,
4492 machine_mode wider_mode, int unsignedp)
4493 {
4494 if (mode != wider_mode)
4495 x = convert_modes (wider_mode, mode, x, unsignedp);
4496
4497 if (!insn_operand_matches (icode, opnum, x))
4498 {
4499 machine_mode op_mode = insn_data[(int) icode].operand[opnum].mode;
4500 if (reload_completed)
4501 return NULL_RTX;
4502 if (GET_MODE (x) != op_mode && GET_MODE (x) != VOIDmode)
4503 return NULL_RTX;
4504 x = copy_to_mode_reg (op_mode, x);
4505 }
4506
4507 return x;
4508 }
4509
4510 /* Subroutine of emit_cmp_and_jump_insns; this function is called when we know
4511 we can do the branch. */
4512
4513 static void
4514 emit_cmp_and_jump_insn_1 (rtx test, machine_mode mode, rtx label,
4515 profile_probability prob)
4516 {
4517 machine_mode optab_mode;
4518 enum mode_class mclass;
4519 enum insn_code icode;
4520 rtx_insn *insn;
4521
4522 mclass = GET_MODE_CLASS (mode);
4523 optab_mode = (mclass == MODE_CC) ? CCmode : mode;
4524 icode = optab_handler (cbranch_optab, optab_mode);
4525
4526 gcc_assert (icode != CODE_FOR_nothing);
4527 gcc_assert (insn_operand_matches (icode, 0, test));
4528 insn = emit_jump_insn (GEN_FCN (icode) (test, XEXP (test, 0),
4529 XEXP (test, 1), label));
4530 if (prob.initialized_p ()
4531 && profile_status_for_fn (cfun) != PROFILE_ABSENT
4532 && insn
4533 && JUMP_P (insn)
4534 && any_condjump_p (insn)
4535 && !find_reg_note (insn, REG_BR_PROB, 0))
4536 add_reg_br_prob_note (insn, prob);
4537 }
4538
4539 /* Generate code to compare X with Y so that the condition codes are
4540 set and to jump to LABEL if the condition is true. If X is a
4541 constant and Y is not a constant, then the comparison is swapped to
4542 ensure that the comparison RTL has the canonical form.
4543
4544 UNSIGNEDP nonzero says that X and Y are unsigned; this matters if they
4545 need to be widened. UNSIGNEDP is also used to select the proper
4546 branch condition code.
4547
4548 If X and Y have mode BLKmode, then SIZE specifies the size of both X and Y.
4549
4550 MODE is the mode of the inputs (in case they are const_int).
4551
4552 COMPARISON is the rtl operator to compare with (EQ, NE, GT, etc.).
4553 It will be potentially converted into an unsigned variant based on
4554 UNSIGNEDP to select a proper jump instruction.
4555
4556 PROB is the probability of jumping to LABEL. */
4557
4558 void
4559 emit_cmp_and_jump_insns (rtx x, rtx y, enum rtx_code comparison, rtx size,
4560 machine_mode mode, int unsignedp, rtx label,
4561 profile_probability prob)
4562 {
4563 rtx op0 = x, op1 = y;
4564 rtx test;
4565
4566 /* Swap operands and condition to ensure canonical RTL. */
4567 if (swap_commutative_operands_p (x, y)
4568 && can_compare_p (swap_condition (comparison), mode, ccp_jump))
4569 {
4570 op0 = y, op1 = x;
4571 comparison = swap_condition (comparison);
4572 }
4573
4574 /* If OP0 is still a constant, then both X and Y must be constants
4575 or the opposite comparison is not supported. Force X into a register
4576 to create canonical RTL. */
4577 if (CONSTANT_P (op0))
4578 op0 = force_reg (mode, op0);
4579
4580 if (unsignedp)
4581 comparison = unsigned_condition (comparison);
4582
4583 prepare_cmp_insn (op0, op1, comparison, size, unsignedp, OPTAB_LIB_WIDEN,
4584 &test, &mode);
4585 emit_cmp_and_jump_insn_1 (test, mode, label, prob);
4586 }
4587
4588 \f
4589 /* Emit a library call comparison between floating point X and Y.
4590 COMPARISON is the rtl operator to compare with (EQ, NE, GT, etc.). */
4591
4592 static void
4593 prepare_float_lib_cmp (rtx x, rtx y, enum rtx_code comparison,
4594 rtx *ptest, machine_mode *pmode)
4595 {
4596 enum rtx_code swapped = swap_condition (comparison);
4597 enum rtx_code reversed = reverse_condition_maybe_unordered (comparison);
4598 machine_mode orig_mode = GET_MODE (x);
4599 machine_mode mode;
4600 rtx true_rtx, false_rtx;
4601 rtx value, target, equiv;
4602 rtx_insn *insns;
4603 rtx libfunc = 0;
4604 bool reversed_p = false;
4605 scalar_int_mode cmp_mode = targetm.libgcc_cmp_return_mode ();
4606
4607 FOR_EACH_MODE_FROM (mode, orig_mode)
4608 {
4609 if (code_to_optab (comparison)
4610 && (libfunc = optab_libfunc (code_to_optab (comparison), mode)))
4611 break;
4612
4613 if (code_to_optab (swapped)
4614 && (libfunc = optab_libfunc (code_to_optab (swapped), mode)))
4615 {
4616 std::swap (x, y);
4617 comparison = swapped;
4618 break;
4619 }
4620
4621 if (code_to_optab (reversed)
4622 && (libfunc = optab_libfunc (code_to_optab (reversed), mode)))
4623 {
4624 comparison = reversed;
4625 reversed_p = true;
4626 break;
4627 }
4628 }
4629
4630 gcc_assert (mode != VOIDmode);
4631
4632 if (mode != orig_mode)
4633 {
4634 x = convert_to_mode (mode, x, 0);
4635 y = convert_to_mode (mode, y, 0);
4636 }
4637
4638 /* Attach a REG_EQUAL note describing the semantics of the libcall to
4639 the RTL. The allows the RTL optimizers to delete the libcall if the
4640 condition can be determined at compile-time. */
4641 if (comparison == UNORDERED
4642 || FLOAT_LIB_COMPARE_RETURNS_BOOL (mode, comparison))
4643 {
4644 true_rtx = const_true_rtx;
4645 false_rtx = const0_rtx;
4646 }
4647 else
4648 {
4649 switch (comparison)
4650 {
4651 case EQ:
4652 true_rtx = const0_rtx;
4653 false_rtx = const_true_rtx;
4654 break;
4655
4656 case NE:
4657 true_rtx = const_true_rtx;
4658 false_rtx = const0_rtx;
4659 break;
4660
4661 case GT:
4662 true_rtx = const1_rtx;
4663 false_rtx = const0_rtx;
4664 break;
4665
4666 case GE:
4667 true_rtx = const0_rtx;
4668 false_rtx = constm1_rtx;
4669 break;
4670
4671 case LT:
4672 true_rtx = constm1_rtx;
4673 false_rtx = const0_rtx;
4674 break;
4675
4676 case LE:
4677 true_rtx = const0_rtx;
4678 false_rtx = const1_rtx;
4679 break;
4680
4681 default:
4682 gcc_unreachable ();
4683 }
4684 }
4685
4686 if (comparison == UNORDERED)
4687 {
4688 rtx temp = simplify_gen_relational (NE, cmp_mode, mode, x, x);
4689 equiv = simplify_gen_relational (NE, cmp_mode, mode, y, y);
4690 equiv = simplify_gen_ternary (IF_THEN_ELSE, cmp_mode, cmp_mode,
4691 temp, const_true_rtx, equiv);
4692 }
4693 else
4694 {
4695 equiv = simplify_gen_relational (comparison, cmp_mode, mode, x, y);
4696 if (! FLOAT_LIB_COMPARE_RETURNS_BOOL (mode, comparison))
4697 equiv = simplify_gen_ternary (IF_THEN_ELSE, cmp_mode, cmp_mode,
4698 equiv, true_rtx, false_rtx);
4699 }
4700
4701 start_sequence ();
4702 value = emit_library_call_value (libfunc, NULL_RTX, LCT_CONST,
4703 cmp_mode, x, mode, y, mode);
4704 insns = get_insns ();
4705 end_sequence ();
4706
4707 target = gen_reg_rtx (cmp_mode);
4708 emit_libcall_block (insns, target, value, equiv);
4709
4710 if (comparison == UNORDERED
4711 || FLOAT_LIB_COMPARE_RETURNS_BOOL (mode, comparison)
4712 || reversed_p)
4713 *ptest = gen_rtx_fmt_ee (reversed_p ? EQ : NE, VOIDmode, target, false_rtx);
4714 else
4715 *ptest = gen_rtx_fmt_ee (comparison, VOIDmode, target, const0_rtx);
4716
4717 *pmode = cmp_mode;
4718 }
4719 \f
4720 /* Generate code to indirectly jump to a location given in the rtx LOC. */
4721
4722 void
4723 emit_indirect_jump (rtx loc)
4724 {
4725 if (!targetm.have_indirect_jump ())
4726 sorry ("indirect jumps are not available on this target");
4727 else
4728 {
4729 class expand_operand ops[1];
4730 create_address_operand (&ops[0], loc);
4731 expand_jump_insn (targetm.code_for_indirect_jump, 1, ops);
4732 emit_barrier ();
4733 }
4734 }
4735 \f
4736
4737 /* Emit a conditional move instruction if the machine supports one for that
4738 condition and machine mode.
4739
4740 OP0 and OP1 are the operands that should be compared using CODE. CMODE is
4741 the mode to use should they be constants. If it is VOIDmode, they cannot
4742 both be constants.
4743
4744 OP2 should be stored in TARGET if the comparison is true, otherwise OP3
4745 should be stored there. MODE is the mode to use should they be constants.
4746 If it is VOIDmode, they cannot both be constants.
4747
4748 The result is either TARGET (perhaps modified) or NULL_RTX if the operation
4749 is not supported. */
4750
4751 rtx
4752 emit_conditional_move (rtx target, enum rtx_code code, rtx op0, rtx op1,
4753 machine_mode cmode, rtx op2, rtx op3,
4754 machine_mode mode, int unsignedp)
4755 {
4756 rtx comparison;
4757 rtx_insn *last;
4758 enum insn_code icode;
4759 enum rtx_code reversed;
4760
4761 /* If the two source operands are identical, that's just a move. */
4762
4763 if (rtx_equal_p (op2, op3))
4764 {
4765 if (!target)
4766 target = gen_reg_rtx (mode);
4767
4768 emit_move_insn (target, op3);
4769 return target;
4770 }
4771
4772 /* If one operand is constant, make it the second one. Only do this
4773 if the other operand is not constant as well. */
4774
4775 if (swap_commutative_operands_p (op0, op1))
4776 {
4777 std::swap (op0, op1);
4778 code = swap_condition (code);
4779 }
4780
4781 /* get_condition will prefer to generate LT and GT even if the old
4782 comparison was against zero, so undo that canonicalization here since
4783 comparisons against zero are cheaper. */
4784 if (code == LT && op1 == const1_rtx)
4785 code = LE, op1 = const0_rtx;
4786 else if (code == GT && op1 == constm1_rtx)
4787 code = GE, op1 = const0_rtx;
4788
4789 if (cmode == VOIDmode)
4790 cmode = GET_MODE (op0);
4791
4792 enum rtx_code orig_code = code;
4793 bool swapped = false;
4794 if (swap_commutative_operands_p (op2, op3)
4795 && ((reversed = reversed_comparison_code_parts (code, op0, op1, NULL))
4796 != UNKNOWN))
4797 {
4798 std::swap (op2, op3);
4799 code = reversed;
4800 swapped = true;
4801 }
4802
4803 if (mode == VOIDmode)
4804 mode = GET_MODE (op2);
4805
4806 icode = direct_optab_handler (movcc_optab, mode);
4807
4808 if (icode == CODE_FOR_nothing)
4809 return NULL_RTX;
4810
4811 if (!target)
4812 target = gen_reg_rtx (mode);
4813
4814 for (int pass = 0; ; pass++)
4815 {
4816 code = unsignedp ? unsigned_condition (code) : code;
4817 comparison = simplify_gen_relational (code, VOIDmode, cmode, op0, op1);
4818
4819 /* We can get const0_rtx or const_true_rtx in some circumstances. Just
4820 punt and let the caller figure out how best to deal with this
4821 situation. */
4822 if (COMPARISON_P (comparison))
4823 {
4824 saved_pending_stack_adjust save;
4825 save_pending_stack_adjust (&save);
4826 last = get_last_insn ();
4827 do_pending_stack_adjust ();
4828 machine_mode cmpmode = cmode;
4829 prepare_cmp_insn (XEXP (comparison, 0), XEXP (comparison, 1),
4830 GET_CODE (comparison), NULL_RTX, unsignedp,
4831 OPTAB_WIDEN, &comparison, &cmpmode);
4832 if (comparison)
4833 {
4834 class expand_operand ops[4];
4835
4836 create_output_operand (&ops[0], target, mode);
4837 create_fixed_operand (&ops[1], comparison);
4838 create_input_operand (&ops[2], op2, mode);
4839 create_input_operand (&ops[3], op3, mode);
4840 if (maybe_expand_insn (icode, 4, ops))
4841 {
4842 if (ops[0].value != target)
4843 convert_move (target, ops[0].value, false);
4844 return target;
4845 }
4846 }
4847 delete_insns_since (last);
4848 restore_pending_stack_adjust (&save);
4849 }
4850
4851 if (pass == 1)
4852 return NULL_RTX;
4853
4854 /* If the preferred op2/op3 order is not usable, retry with other
4855 operand order, perhaps it will expand successfully. */
4856 if (swapped)
4857 code = orig_code;
4858 else if ((reversed = reversed_comparison_code_parts (orig_code, op0, op1,
4859 NULL))
4860 != UNKNOWN)
4861 code = reversed;
4862 else
4863 return NULL_RTX;
4864 std::swap (op2, op3);
4865 }
4866 }
4867
4868
4869 /* Emit a conditional negate or bitwise complement using the
4870 negcc or notcc optabs if available. Return NULL_RTX if such operations
4871 are not available. Otherwise return the RTX holding the result.
4872 TARGET is the desired destination of the result. COMP is the comparison
4873 on which to negate. If COND is true move into TARGET the negation
4874 or bitwise complement of OP1. Otherwise move OP2 into TARGET.
4875 CODE is either NEG or NOT. MODE is the machine mode in which the
4876 operation is performed. */
4877
4878 rtx
4879 emit_conditional_neg_or_complement (rtx target, rtx_code code,
4880 machine_mode mode, rtx cond, rtx op1,
4881 rtx op2)
4882 {
4883 optab op = unknown_optab;
4884 if (code == NEG)
4885 op = negcc_optab;
4886 else if (code == NOT)
4887 op = notcc_optab;
4888 else
4889 gcc_unreachable ();
4890
4891 insn_code icode = direct_optab_handler (op, mode);
4892
4893 if (icode == CODE_FOR_nothing)
4894 return NULL_RTX;
4895
4896 if (!target)
4897 target = gen_reg_rtx (mode);
4898
4899 rtx_insn *last = get_last_insn ();
4900 class expand_operand ops[4];
4901
4902 create_output_operand (&ops[0], target, mode);
4903 create_fixed_operand (&ops[1], cond);
4904 create_input_operand (&ops[2], op1, mode);
4905 create_input_operand (&ops[3], op2, mode);
4906
4907 if (maybe_expand_insn (icode, 4, ops))
4908 {
4909 if (ops[0].value != target)
4910 convert_move (target, ops[0].value, false);
4911
4912 return target;
4913 }
4914 delete_insns_since (last);
4915 return NULL_RTX;
4916 }
4917
4918 /* Emit a conditional addition instruction if the machine supports one for that
4919 condition and machine mode.
4920
4921 OP0 and OP1 are the operands that should be compared using CODE. CMODE is
4922 the mode to use should they be constants. If it is VOIDmode, they cannot
4923 both be constants.
4924
4925 OP2 should be stored in TARGET if the comparison is false, otherwise OP2+OP3
4926 should be stored there. MODE is the mode to use should they be constants.
4927 If it is VOIDmode, they cannot both be constants.
4928
4929 The result is either TARGET (perhaps modified) or NULL_RTX if the operation
4930 is not supported. */
4931
4932 rtx
4933 emit_conditional_add (rtx target, enum rtx_code code, rtx op0, rtx op1,
4934 machine_mode cmode, rtx op2, rtx op3,
4935 machine_mode mode, int unsignedp)
4936 {
4937 rtx comparison;
4938 rtx_insn *last;
4939 enum insn_code icode;
4940
4941 /* If one operand is constant, make it the second one. Only do this
4942 if the other operand is not constant as well. */
4943
4944 if (swap_commutative_operands_p (op0, op1))
4945 {
4946 std::swap (op0, op1);
4947 code = swap_condition (code);
4948 }
4949
4950 /* get_condition will prefer to generate LT and GT even if the old
4951 comparison was against zero, so undo that canonicalization here since
4952 comparisons against zero are cheaper. */
4953 if (code == LT && op1 == const1_rtx)
4954 code = LE, op1 = const0_rtx;
4955 else if (code == GT && op1 == constm1_rtx)
4956 code = GE, op1 = const0_rtx;
4957
4958 if (cmode == VOIDmode)
4959 cmode = GET_MODE (op0);
4960
4961 if (mode == VOIDmode)
4962 mode = GET_MODE (op2);
4963
4964 icode = optab_handler (addcc_optab, mode);
4965
4966 if (icode == CODE_FOR_nothing)
4967 return 0;
4968
4969 if (!target)
4970 target = gen_reg_rtx (mode);
4971
4972 code = unsignedp ? unsigned_condition (code) : code;
4973 comparison = simplify_gen_relational (code, VOIDmode, cmode, op0, op1);
4974
4975 /* We can get const0_rtx or const_true_rtx in some circumstances. Just
4976 return NULL and let the caller figure out how best to deal with this
4977 situation. */
4978 if (!COMPARISON_P (comparison))
4979 return NULL_RTX;
4980
4981 do_pending_stack_adjust ();
4982 last = get_last_insn ();
4983 prepare_cmp_insn (XEXP (comparison, 0), XEXP (comparison, 1),
4984 GET_CODE (comparison), NULL_RTX, unsignedp, OPTAB_WIDEN,
4985 &comparison, &cmode);
4986 if (comparison)
4987 {
4988 class expand_operand ops[4];
4989
4990 create_output_operand (&ops[0], target, mode);
4991 create_fixed_operand (&ops[1], comparison);
4992 create_input_operand (&ops[2], op2, mode);
4993 create_input_operand (&ops[3], op3, mode);
4994 if (maybe_expand_insn (icode, 4, ops))
4995 {
4996 if (ops[0].value != target)
4997 convert_move (target, ops[0].value, false);
4998 return target;
4999 }
5000 }
5001 delete_insns_since (last);
5002 return NULL_RTX;
5003 }
5004 \f
5005 /* These functions attempt to generate an insn body, rather than
5006 emitting the insn, but if the gen function already emits them, we
5007 make no attempt to turn them back into naked patterns. */
5008
5009 /* Generate and return an insn body to add Y to X. */
5010
5011 rtx_insn *
5012 gen_add2_insn (rtx x, rtx y)
5013 {
5014 enum insn_code icode = optab_handler (add_optab, GET_MODE (x));
5015
5016 gcc_assert (insn_operand_matches (icode, 0, x));
5017 gcc_assert (insn_operand_matches (icode, 1, x));
5018 gcc_assert (insn_operand_matches (icode, 2, y));
5019
5020 return GEN_FCN (icode) (x, x, y);
5021 }
5022
5023 /* Generate and return an insn body to add r1 and c,
5024 storing the result in r0. */
5025
5026 rtx_insn *
5027 gen_add3_insn (rtx r0, rtx r1, rtx c)
5028 {
5029 enum insn_code icode = optab_handler (add_optab, GET_MODE (r0));
5030
5031 if (icode == CODE_FOR_nothing
5032 || !insn_operand_matches (icode, 0, r0)
5033 || !insn_operand_matches (icode, 1, r1)
5034 || !insn_operand_matches (icode, 2, c))
5035 return NULL;
5036
5037 return GEN_FCN (icode) (r0, r1, c);
5038 }
5039
5040 int
5041 have_add2_insn (rtx x, rtx y)
5042 {
5043 enum insn_code icode;
5044
5045 gcc_assert (GET_MODE (x) != VOIDmode);
5046
5047 icode = optab_handler (add_optab, GET_MODE (x));
5048
5049 if (icode == CODE_FOR_nothing)
5050 return 0;
5051
5052 if (!insn_operand_matches (icode, 0, x)
5053 || !insn_operand_matches (icode, 1, x)
5054 || !insn_operand_matches (icode, 2, y))
5055 return 0;
5056
5057 return 1;
5058 }
5059
5060 /* Generate and return an insn body to add Y to X. */
5061
5062 rtx_insn *
5063 gen_addptr3_insn (rtx x, rtx y, rtx z)
5064 {
5065 enum insn_code icode = optab_handler (addptr3_optab, GET_MODE (x));
5066
5067 gcc_assert (insn_operand_matches (icode, 0, x));
5068 gcc_assert (insn_operand_matches (icode, 1, y));
5069 gcc_assert (insn_operand_matches (icode, 2, z));
5070
5071 return GEN_FCN (icode) (x, y, z);
5072 }
5073
5074 /* Return true if the target implements an addptr pattern and X, Y,
5075 and Z are valid for the pattern predicates. */
5076
5077 int
5078 have_addptr3_insn (rtx x, rtx y, rtx z)
5079 {
5080 enum insn_code icode;
5081
5082 gcc_assert (GET_MODE (x) != VOIDmode);
5083
5084 icode = optab_handler (addptr3_optab, GET_MODE (x));
5085
5086 if (icode == CODE_FOR_nothing)
5087 return 0;
5088
5089 if (!insn_operand_matches (icode, 0, x)
5090 || !insn_operand_matches (icode, 1, y)
5091 || !insn_operand_matches (icode, 2, z))
5092 return 0;
5093
5094 return 1;
5095 }
5096
5097 /* Generate and return an insn body to subtract Y from X. */
5098
5099 rtx_insn *
5100 gen_sub2_insn (rtx x, rtx y)
5101 {
5102 enum insn_code icode = optab_handler (sub_optab, GET_MODE (x));
5103
5104 gcc_assert (insn_operand_matches (icode, 0, x));
5105 gcc_assert (insn_operand_matches (icode, 1, x));
5106 gcc_assert (insn_operand_matches (icode, 2, y));
5107
5108 return GEN_FCN (icode) (x, x, y);
5109 }
5110
5111 /* Generate and return an insn body to subtract r1 and c,
5112 storing the result in r0. */
5113
5114 rtx_insn *
5115 gen_sub3_insn (rtx r0, rtx r1, rtx c)
5116 {
5117 enum insn_code icode = optab_handler (sub_optab, GET_MODE (r0));
5118
5119 if (icode == CODE_FOR_nothing
5120 || !insn_operand_matches (icode, 0, r0)
5121 || !insn_operand_matches (icode, 1, r1)
5122 || !insn_operand_matches (icode, 2, c))
5123 return NULL;
5124
5125 return GEN_FCN (icode) (r0, r1, c);
5126 }
5127
5128 int
5129 have_sub2_insn (rtx x, rtx y)
5130 {
5131 enum insn_code icode;
5132
5133 gcc_assert (GET_MODE (x) != VOIDmode);
5134
5135 icode = optab_handler (sub_optab, GET_MODE (x));
5136
5137 if (icode == CODE_FOR_nothing)
5138 return 0;
5139
5140 if (!insn_operand_matches (icode, 0, x)
5141 || !insn_operand_matches (icode, 1, x)
5142 || !insn_operand_matches (icode, 2, y))
5143 return 0;
5144
5145 return 1;
5146 }
5147 \f
5148 /* Generate the body of an insn to extend Y (with mode MFROM)
5149 into X (with mode MTO). Do zero-extension if UNSIGNEDP is nonzero. */
5150
5151 rtx_insn *
5152 gen_extend_insn (rtx x, rtx y, machine_mode mto,
5153 machine_mode mfrom, int unsignedp)
5154 {
5155 enum insn_code icode = can_extend_p (mto, mfrom, unsignedp);
5156 return GEN_FCN (icode) (x, y);
5157 }
5158 \f
5159 /* Generate code to convert FROM to floating point
5160 and store in TO. FROM must be fixed point and not VOIDmode.
5161 UNSIGNEDP nonzero means regard FROM as unsigned.
5162 Normally this is done by correcting the final value
5163 if it is negative. */
5164
5165 void
5166 expand_float (rtx to, rtx from, int unsignedp)
5167 {
5168 enum insn_code icode;
5169 rtx target = to;
5170 scalar_mode from_mode, to_mode;
5171 machine_mode fmode, imode;
5172 bool can_do_signed = false;
5173
5174 /* Crash now, because we won't be able to decide which mode to use. */
5175 gcc_assert (GET_MODE (from) != VOIDmode);
5176
5177 /* Look for an insn to do the conversion. Do it in the specified
5178 modes if possible; otherwise convert either input, output or both to
5179 wider mode. If the integer mode is wider than the mode of FROM,
5180 we can do the conversion signed even if the input is unsigned. */
5181
5182 FOR_EACH_MODE_FROM (fmode, GET_MODE (to))
5183 FOR_EACH_MODE_FROM (imode, GET_MODE (from))
5184 {
5185 int doing_unsigned = unsignedp;
5186
5187 if (fmode != GET_MODE (to)
5188 && (significand_size (fmode)
5189 < GET_MODE_UNIT_PRECISION (GET_MODE (from))))
5190 continue;
5191
5192 icode = can_float_p (fmode, imode, unsignedp);
5193 if (icode == CODE_FOR_nothing && unsignedp)
5194 {
5195 enum insn_code scode = can_float_p (fmode, imode, 0);
5196 if (scode != CODE_FOR_nothing)
5197 can_do_signed = true;
5198 if (imode != GET_MODE (from))
5199 icode = scode, doing_unsigned = 0;
5200 }
5201
5202 if (icode != CODE_FOR_nothing)
5203 {
5204 if (imode != GET_MODE (from))
5205 from = convert_to_mode (imode, from, unsignedp);
5206
5207 if (fmode != GET_MODE (to))
5208 target = gen_reg_rtx (fmode);
5209
5210 emit_unop_insn (icode, target, from,
5211 doing_unsigned ? UNSIGNED_FLOAT : FLOAT);
5212
5213 if (target != to)
5214 convert_move (to, target, 0);
5215 return;
5216 }
5217 }
5218
5219 /* Unsigned integer, and no way to convert directly. Convert as signed,
5220 then unconditionally adjust the result. */
5221 if (unsignedp
5222 && can_do_signed
5223 && is_a <scalar_mode> (GET_MODE (to), &to_mode)
5224 && is_a <scalar_mode> (GET_MODE (from), &from_mode))
5225 {
5226 opt_scalar_mode fmode_iter;
5227 rtx_code_label *label = gen_label_rtx ();
5228 rtx temp;
5229 REAL_VALUE_TYPE offset;
5230
5231 /* Look for a usable floating mode FMODE wider than the source and at
5232 least as wide as the target. Using FMODE will avoid rounding woes
5233 with unsigned values greater than the signed maximum value. */
5234
5235 FOR_EACH_MODE_FROM (fmode_iter, to_mode)
5236 {
5237 scalar_mode fmode = fmode_iter.require ();
5238 if (GET_MODE_PRECISION (from_mode) < GET_MODE_BITSIZE (fmode)
5239 && can_float_p (fmode, from_mode, 0) != CODE_FOR_nothing)
5240 break;
5241 }
5242
5243 if (!fmode_iter.exists (&fmode))
5244 {
5245 /* There is no such mode. Pretend the target is wide enough. */
5246 fmode = to_mode;
5247
5248 /* Avoid double-rounding when TO is narrower than FROM. */
5249 if ((significand_size (fmode) + 1)
5250 < GET_MODE_PRECISION (from_mode))
5251 {
5252 rtx temp1;
5253 rtx_code_label *neglabel = gen_label_rtx ();
5254
5255 /* Don't use TARGET if it isn't a register, is a hard register,
5256 or is the wrong mode. */
5257 if (!REG_P (target)
5258 || REGNO (target) < FIRST_PSEUDO_REGISTER
5259 || GET_MODE (target) != fmode)
5260 target = gen_reg_rtx (fmode);
5261
5262 imode = from_mode;
5263 do_pending_stack_adjust ();
5264
5265 /* Test whether the sign bit is set. */
5266 emit_cmp_and_jump_insns (from, const0_rtx, LT, NULL_RTX, imode,
5267 0, neglabel);
5268
5269 /* The sign bit is not set. Convert as signed. */
5270 expand_float (target, from, 0);
5271 emit_jump_insn (targetm.gen_jump (label));
5272 emit_barrier ();
5273
5274 /* The sign bit is set.
5275 Convert to a usable (positive signed) value by shifting right
5276 one bit, while remembering if a nonzero bit was shifted
5277 out; i.e., compute (from & 1) | (from >> 1). */
5278
5279 emit_label (neglabel);
5280 temp = expand_binop (imode, and_optab, from, const1_rtx,
5281 NULL_RTX, 1, OPTAB_LIB_WIDEN);
5282 temp1 = expand_shift (RSHIFT_EXPR, imode, from, 1, NULL_RTX, 1);
5283 temp = expand_binop (imode, ior_optab, temp, temp1, temp, 1,
5284 OPTAB_LIB_WIDEN);
5285 expand_float (target, temp, 0);
5286
5287 /* Multiply by 2 to undo the shift above. */
5288 temp = expand_binop (fmode, add_optab, target, target,
5289 target, 0, OPTAB_LIB_WIDEN);
5290 if (temp != target)
5291 emit_move_insn (target, temp);
5292
5293 do_pending_stack_adjust ();
5294 emit_label (label);
5295 goto done;
5296 }
5297 }
5298
5299 /* If we are about to do some arithmetic to correct for an
5300 unsigned operand, do it in a pseudo-register. */
5301
5302 if (to_mode != fmode
5303 || !REG_P (to) || REGNO (to) < FIRST_PSEUDO_REGISTER)
5304 target = gen_reg_rtx (fmode);
5305
5306 /* Convert as signed integer to floating. */
5307 expand_float (target, from, 0);
5308
5309 /* If FROM is negative (and therefore TO is negative),
5310 correct its value by 2**bitwidth. */
5311
5312 do_pending_stack_adjust ();
5313 emit_cmp_and_jump_insns (from, const0_rtx, GE, NULL_RTX, from_mode,
5314 0, label);
5315
5316
5317 real_2expN (&offset, GET_MODE_PRECISION (from_mode), fmode);
5318 temp = expand_binop (fmode, add_optab, target,
5319 const_double_from_real_value (offset, fmode),
5320 target, 0, OPTAB_LIB_WIDEN);
5321 if (temp != target)
5322 emit_move_insn (target, temp);
5323
5324 do_pending_stack_adjust ();
5325 emit_label (label);
5326 goto done;
5327 }
5328
5329 /* No hardware instruction available; call a library routine. */
5330 {
5331 rtx libfunc;
5332 rtx_insn *insns;
5333 rtx value;
5334 convert_optab tab = unsignedp ? ufloat_optab : sfloat_optab;
5335
5336 if (is_narrower_int_mode (GET_MODE (from), SImode))
5337 from = convert_to_mode (SImode, from, unsignedp);
5338
5339 libfunc = convert_optab_libfunc (tab, GET_MODE (to), GET_MODE (from));
5340 gcc_assert (libfunc);
5341
5342 start_sequence ();
5343
5344 value = emit_library_call_value (libfunc, NULL_RTX, LCT_CONST,
5345 GET_MODE (to), from, GET_MODE (from));
5346 insns = get_insns ();
5347 end_sequence ();
5348
5349 emit_libcall_block (insns, target, value,
5350 gen_rtx_fmt_e (unsignedp ? UNSIGNED_FLOAT : FLOAT,
5351 GET_MODE (to), from));
5352 }
5353
5354 done:
5355
5356 /* Copy result to requested destination
5357 if we have been computing in a temp location. */
5358
5359 if (target != to)
5360 {
5361 if (GET_MODE (target) == GET_MODE (to))
5362 emit_move_insn (to, target);
5363 else
5364 convert_move (to, target, 0);
5365 }
5366 }
5367 \f
5368 /* Generate code to convert FROM to fixed point and store in TO. FROM
5369 must be floating point. */
5370
5371 void
5372 expand_fix (rtx to, rtx from, int unsignedp)
5373 {
5374 enum insn_code icode;
5375 rtx target = to;
5376 machine_mode fmode, imode;
5377 opt_scalar_mode fmode_iter;
5378 bool must_trunc = false;
5379
5380 /* We first try to find a pair of modes, one real and one integer, at
5381 least as wide as FROM and TO, respectively, in which we can open-code
5382 this conversion. If the integer mode is wider than the mode of TO,
5383 we can do the conversion either signed or unsigned. */
5384
5385 FOR_EACH_MODE_FROM (fmode, GET_MODE (from))
5386 FOR_EACH_MODE_FROM (imode, GET_MODE (to))
5387 {
5388 int doing_unsigned = unsignedp;
5389
5390 icode = can_fix_p (imode, fmode, unsignedp, &must_trunc);
5391 if (icode == CODE_FOR_nothing && imode != GET_MODE (to) && unsignedp)
5392 icode = can_fix_p (imode, fmode, 0, &must_trunc), doing_unsigned = 0;
5393
5394 if (icode != CODE_FOR_nothing)
5395 {
5396 rtx_insn *last = get_last_insn ();
5397 if (fmode != GET_MODE (from))
5398 from = convert_to_mode (fmode, from, 0);
5399
5400 if (must_trunc)
5401 {
5402 rtx temp = gen_reg_rtx (GET_MODE (from));
5403 from = expand_unop (GET_MODE (from), ftrunc_optab, from,
5404 temp, 0);
5405 }
5406
5407 if (imode != GET_MODE (to))
5408 target = gen_reg_rtx (imode);
5409
5410 if (maybe_emit_unop_insn (icode, target, from,
5411 doing_unsigned ? UNSIGNED_FIX : FIX))
5412 {
5413 if (target != to)
5414 convert_move (to, target, unsignedp);
5415 return;
5416 }
5417 delete_insns_since (last);
5418 }
5419 }
5420
5421 /* For an unsigned conversion, there is one more way to do it.
5422 If we have a signed conversion, we generate code that compares
5423 the real value to the largest representable positive number. If if
5424 is smaller, the conversion is done normally. Otherwise, subtract
5425 one plus the highest signed number, convert, and add it back.
5426
5427 We only need to check all real modes, since we know we didn't find
5428 anything with a wider integer mode.
5429
5430 This code used to extend FP value into mode wider than the destination.
5431 This is needed for decimal float modes which cannot accurately
5432 represent one plus the highest signed number of the same size, but
5433 not for binary modes. Consider, for instance conversion from SFmode
5434 into DImode.
5435
5436 The hot path through the code is dealing with inputs smaller than 2^63
5437 and doing just the conversion, so there is no bits to lose.
5438
5439 In the other path we know the value is positive in the range 2^63..2^64-1
5440 inclusive. (as for other input overflow happens and result is undefined)
5441 So we know that the most important bit set in mantissa corresponds to
5442 2^63. The subtraction of 2^63 should not generate any rounding as it
5443 simply clears out that bit. The rest is trivial. */
5444
5445 scalar_int_mode to_mode;
5446 if (unsignedp
5447 && is_a <scalar_int_mode> (GET_MODE (to), &to_mode)
5448 && HWI_COMPUTABLE_MODE_P (to_mode))
5449 FOR_EACH_MODE_FROM (fmode_iter, as_a <scalar_mode> (GET_MODE (from)))
5450 {
5451 scalar_mode fmode = fmode_iter.require ();
5452 if (CODE_FOR_nothing != can_fix_p (to_mode, fmode,
5453 0, &must_trunc)
5454 && (!DECIMAL_FLOAT_MODE_P (fmode)
5455 || (GET_MODE_BITSIZE (fmode) > GET_MODE_PRECISION (to_mode))))
5456 {
5457 int bitsize;
5458 REAL_VALUE_TYPE offset;
5459 rtx limit;
5460 rtx_code_label *lab1, *lab2;
5461 rtx_insn *insn;
5462
5463 bitsize = GET_MODE_PRECISION (to_mode);
5464 real_2expN (&offset, bitsize - 1, fmode);
5465 limit = const_double_from_real_value (offset, fmode);
5466 lab1 = gen_label_rtx ();
5467 lab2 = gen_label_rtx ();
5468
5469 if (fmode != GET_MODE (from))
5470 from = convert_to_mode (fmode, from, 0);
5471
5472 /* See if we need to do the subtraction. */
5473 do_pending_stack_adjust ();
5474 emit_cmp_and_jump_insns (from, limit, GE, NULL_RTX,
5475 GET_MODE (from), 0, lab1);
5476
5477 /* If not, do the signed "fix" and branch around fixup code. */
5478 expand_fix (to, from, 0);
5479 emit_jump_insn (targetm.gen_jump (lab2));
5480 emit_barrier ();
5481
5482 /* Otherwise, subtract 2**(N-1), convert to signed number,
5483 then add 2**(N-1). Do the addition using XOR since this
5484 will often generate better code. */
5485 emit_label (lab1);
5486 target = expand_binop (GET_MODE (from), sub_optab, from, limit,
5487 NULL_RTX, 0, OPTAB_LIB_WIDEN);
5488 expand_fix (to, target, 0);
5489 target = expand_binop (to_mode, xor_optab, to,
5490 gen_int_mode
5491 (HOST_WIDE_INT_1 << (bitsize - 1),
5492 to_mode),
5493 to, 1, OPTAB_LIB_WIDEN);
5494
5495 if (target != to)
5496 emit_move_insn (to, target);
5497
5498 emit_label (lab2);
5499
5500 if (optab_handler (mov_optab, to_mode) != CODE_FOR_nothing)
5501 {
5502 /* Make a place for a REG_NOTE and add it. */
5503 insn = emit_move_insn (to, to);
5504 set_dst_reg_note (insn, REG_EQUAL,
5505 gen_rtx_fmt_e (UNSIGNED_FIX, to_mode,
5506 copy_rtx (from)),
5507 to);
5508 }
5509
5510 return;
5511 }
5512 }
5513
5514 /* We can't do it with an insn, so use a library call. But first ensure
5515 that the mode of TO is at least as wide as SImode, since those are the
5516 only library calls we know about. */
5517
5518 if (is_narrower_int_mode (GET_MODE (to), SImode))
5519 {
5520 target = gen_reg_rtx (SImode);
5521
5522 expand_fix (target, from, unsignedp);
5523 }
5524 else
5525 {
5526 rtx_insn *insns;
5527 rtx value;
5528 rtx libfunc;
5529
5530 convert_optab tab = unsignedp ? ufix_optab : sfix_optab;
5531 libfunc = convert_optab_libfunc (tab, GET_MODE (to), GET_MODE (from));
5532 gcc_assert (libfunc);
5533
5534 start_sequence ();
5535
5536 value = emit_library_call_value (libfunc, NULL_RTX, LCT_CONST,
5537 GET_MODE (to), from, GET_MODE (from));
5538 insns = get_insns ();
5539 end_sequence ();
5540
5541 emit_libcall_block (insns, target, value,
5542 gen_rtx_fmt_e (unsignedp ? UNSIGNED_FIX : FIX,
5543 GET_MODE (to), from));
5544 }
5545
5546 if (target != to)
5547 {
5548 if (GET_MODE (to) == GET_MODE (target))
5549 emit_move_insn (to, target);
5550 else
5551 convert_move (to, target, 0);
5552 }
5553 }
5554
5555
5556 /* Promote integer arguments for a libcall if necessary.
5557 emit_library_call_value cannot do the promotion because it does not
5558 know if it should do a signed or unsigned promotion. This is because
5559 there are no tree types defined for libcalls. */
5560
5561 static rtx
5562 prepare_libcall_arg (rtx arg, int uintp)
5563 {
5564 scalar_int_mode mode;
5565 machine_mode arg_mode;
5566 if (is_a <scalar_int_mode> (GET_MODE (arg), &mode))
5567 {
5568 /* If we need to promote the integer function argument we need to do
5569 it here instead of inside emit_library_call_value because in
5570 emit_library_call_value we don't know if we should do a signed or
5571 unsigned promotion. */
5572
5573 int unsigned_p = 0;
5574 arg_mode = promote_function_mode (NULL_TREE, mode,
5575 &unsigned_p, NULL_TREE, 0);
5576 if (arg_mode != mode)
5577 return convert_to_mode (arg_mode, arg, uintp);
5578 }
5579 return arg;
5580 }
5581
5582 /* Generate code to convert FROM or TO a fixed-point.
5583 If UINTP is true, either TO or FROM is an unsigned integer.
5584 If SATP is true, we need to saturate the result. */
5585
5586 void
5587 expand_fixed_convert (rtx to, rtx from, int uintp, int satp)
5588 {
5589 machine_mode to_mode = GET_MODE (to);
5590 machine_mode from_mode = GET_MODE (from);
5591 convert_optab tab;
5592 enum rtx_code this_code;
5593 enum insn_code code;
5594 rtx_insn *insns;
5595 rtx value;
5596 rtx libfunc;
5597
5598 if (to_mode == from_mode)
5599 {
5600 emit_move_insn (to, from);
5601 return;
5602 }
5603
5604 if (uintp)
5605 {
5606 tab = satp ? satfractuns_optab : fractuns_optab;
5607 this_code = satp ? UNSIGNED_SAT_FRACT : UNSIGNED_FRACT_CONVERT;
5608 }
5609 else
5610 {
5611 tab = satp ? satfract_optab : fract_optab;
5612 this_code = satp ? SAT_FRACT : FRACT_CONVERT;
5613 }
5614 code = convert_optab_handler (tab, to_mode, from_mode);
5615 if (code != CODE_FOR_nothing)
5616 {
5617 emit_unop_insn (code, to, from, this_code);
5618 return;
5619 }
5620
5621 libfunc = convert_optab_libfunc (tab, to_mode, from_mode);
5622 gcc_assert (libfunc);
5623
5624 from = prepare_libcall_arg (from, uintp);
5625 from_mode = GET_MODE (from);
5626
5627 start_sequence ();
5628 value = emit_library_call_value (libfunc, NULL_RTX, LCT_CONST, to_mode,
5629 from, from_mode);
5630 insns = get_insns ();
5631 end_sequence ();
5632
5633 emit_libcall_block (insns, to, value,
5634 gen_rtx_fmt_e (optab_to_code (tab), to_mode, from));
5635 }
5636
5637 /* Generate code to convert FROM to fixed point and store in TO. FROM
5638 must be floating point, TO must be signed. Use the conversion optab
5639 TAB to do the conversion. */
5640
5641 bool
5642 expand_sfix_optab (rtx to, rtx from, convert_optab tab)
5643 {
5644 enum insn_code icode;
5645 rtx target = to;
5646 machine_mode fmode, imode;
5647
5648 /* We first try to find a pair of modes, one real and one integer, at
5649 least as wide as FROM and TO, respectively, in which we can open-code
5650 this conversion. If the integer mode is wider than the mode of TO,
5651 we can do the conversion either signed or unsigned. */
5652
5653 FOR_EACH_MODE_FROM (fmode, GET_MODE (from))
5654 FOR_EACH_MODE_FROM (imode, GET_MODE (to))
5655 {
5656 icode = convert_optab_handler (tab, imode, fmode);
5657 if (icode != CODE_FOR_nothing)
5658 {
5659 rtx_insn *last = get_last_insn ();
5660 if (fmode != GET_MODE (from))
5661 from = convert_to_mode (fmode, from, 0);
5662
5663 if (imode != GET_MODE (to))
5664 target = gen_reg_rtx (imode);
5665
5666 if (!maybe_emit_unop_insn (icode, target, from, UNKNOWN))
5667 {
5668 delete_insns_since (last);
5669 continue;
5670 }
5671 if (target != to)
5672 convert_move (to, target, 0);
5673 return true;
5674 }
5675 }
5676
5677 return false;
5678 }
5679 \f
5680 /* Report whether we have an instruction to perform the operation
5681 specified by CODE on operands of mode MODE. */
5682 int
5683 have_insn_for (enum rtx_code code, machine_mode mode)
5684 {
5685 return (code_to_optab (code)
5686 && (optab_handler (code_to_optab (code), mode)
5687 != CODE_FOR_nothing));
5688 }
5689
5690 /* Print information about the current contents of the optabs on
5691 STDERR. */
5692
5693 DEBUG_FUNCTION void
5694 debug_optab_libfuncs (void)
5695 {
5696 int i, j, k;
5697
5698 /* Dump the arithmetic optabs. */
5699 for (i = FIRST_NORM_OPTAB; i <= LAST_NORMLIB_OPTAB; ++i)
5700 for (j = 0; j < NUM_MACHINE_MODES; ++j)
5701 {
5702 rtx l = optab_libfunc ((optab) i, (machine_mode) j);
5703 if (l)
5704 {
5705 gcc_assert (GET_CODE (l) == SYMBOL_REF);
5706 fprintf (stderr, "%s\t%s:\t%s\n",
5707 GET_RTX_NAME (optab_to_code ((optab) i)),
5708 GET_MODE_NAME (j),
5709 XSTR (l, 0));
5710 }
5711 }
5712
5713 /* Dump the conversion optabs. */
5714 for (i = FIRST_CONV_OPTAB; i <= LAST_CONVLIB_OPTAB; ++i)
5715 for (j = 0; j < NUM_MACHINE_MODES; ++j)
5716 for (k = 0; k < NUM_MACHINE_MODES; ++k)
5717 {
5718 rtx l = convert_optab_libfunc ((optab) i, (machine_mode) j,
5719 (machine_mode) k);
5720 if (l)
5721 {
5722 gcc_assert (GET_CODE (l) == SYMBOL_REF);
5723 fprintf (stderr, "%s\t%s\t%s:\t%s\n",
5724 GET_RTX_NAME (optab_to_code ((optab) i)),
5725 GET_MODE_NAME (j),
5726 GET_MODE_NAME (k),
5727 XSTR (l, 0));
5728 }
5729 }
5730 }
5731
5732 /* Generate insns to trap with code TCODE if OP1 and OP2 satisfy condition
5733 CODE. Return 0 on failure. */
5734
5735 rtx_insn *
5736 gen_cond_trap (enum rtx_code code, rtx op1, rtx op2, rtx tcode)
5737 {
5738 machine_mode mode = GET_MODE (op1);
5739 enum insn_code icode;
5740 rtx_insn *insn;
5741 rtx trap_rtx;
5742
5743 if (mode == VOIDmode)
5744 return 0;
5745
5746 icode = optab_handler (ctrap_optab, mode);
5747 if (icode == CODE_FOR_nothing)
5748 return 0;
5749
5750 /* Some targets only accept a zero trap code. */
5751 if (!insn_operand_matches (icode, 3, tcode))
5752 return 0;
5753
5754 do_pending_stack_adjust ();
5755 start_sequence ();
5756 prepare_cmp_insn (op1, op2, code, NULL_RTX, false, OPTAB_DIRECT,
5757 &trap_rtx, &mode);
5758 if (!trap_rtx)
5759 insn = NULL;
5760 else
5761 insn = GEN_FCN (icode) (trap_rtx, XEXP (trap_rtx, 0), XEXP (trap_rtx, 1),
5762 tcode);
5763
5764 /* If that failed, then give up. */
5765 if (insn == 0)
5766 {
5767 end_sequence ();
5768 return 0;
5769 }
5770
5771 emit_insn (insn);
5772 insn = get_insns ();
5773 end_sequence ();
5774 return insn;
5775 }
5776
5777 /* Return rtx code for TCODE or UNKNOWN. Use UNSIGNEDP to select signed
5778 or unsigned operation code. */
5779
5780 enum rtx_code
5781 get_rtx_code_1 (enum tree_code tcode, bool unsignedp)
5782 {
5783 enum rtx_code code;
5784 switch (tcode)
5785 {
5786 case EQ_EXPR:
5787 code = EQ;
5788 break;
5789 case NE_EXPR:
5790 code = NE;
5791 break;
5792 case LT_EXPR:
5793 code = unsignedp ? LTU : LT;
5794 break;
5795 case LE_EXPR:
5796 code = unsignedp ? LEU : LE;
5797 break;
5798 case GT_EXPR:
5799 code = unsignedp ? GTU : GT;
5800 break;
5801 case GE_EXPR:
5802 code = unsignedp ? GEU : GE;
5803 break;
5804
5805 case UNORDERED_EXPR:
5806 code = UNORDERED;
5807 break;
5808 case ORDERED_EXPR:
5809 code = ORDERED;
5810 break;
5811 case UNLT_EXPR:
5812 code = UNLT;
5813 break;
5814 case UNLE_EXPR:
5815 code = UNLE;
5816 break;
5817 case UNGT_EXPR:
5818 code = UNGT;
5819 break;
5820 case UNGE_EXPR:
5821 code = UNGE;
5822 break;
5823 case UNEQ_EXPR:
5824 code = UNEQ;
5825 break;
5826 case LTGT_EXPR:
5827 code = LTGT;
5828 break;
5829
5830 case BIT_AND_EXPR:
5831 code = AND;
5832 break;
5833
5834 case BIT_IOR_EXPR:
5835 code = IOR;
5836 break;
5837
5838 default:
5839 code = UNKNOWN;
5840 break;
5841 }
5842 return code;
5843 }
5844
5845 /* Return rtx code for TCODE. Use UNSIGNEDP to select signed
5846 or unsigned operation code. */
5847
5848 enum rtx_code
5849 get_rtx_code (enum tree_code tcode, bool unsignedp)
5850 {
5851 enum rtx_code code = get_rtx_code_1 (tcode, unsignedp);
5852 gcc_assert (code != UNKNOWN);
5853 return code;
5854 }
5855
5856 /* Return a comparison rtx of mode CMP_MODE for COND. Use UNSIGNEDP to
5857 select signed or unsigned operators. OPNO holds the index of the
5858 first comparison operand for insn ICODE. Do not generate the
5859 compare instruction itself. */
5860
5861 rtx
5862 vector_compare_rtx (machine_mode cmp_mode, enum tree_code tcode,
5863 tree t_op0, tree t_op1, bool unsignedp,
5864 enum insn_code icode, unsigned int opno)
5865 {
5866 class expand_operand ops[2];
5867 rtx rtx_op0, rtx_op1;
5868 machine_mode m0, m1;
5869 enum rtx_code rcode = get_rtx_code (tcode, unsignedp);
5870
5871 gcc_assert (TREE_CODE_CLASS (tcode) == tcc_comparison);
5872
5873 /* Expand operands. For vector types with scalar modes, e.g. where int64x1_t
5874 has mode DImode, this can produce a constant RTX of mode VOIDmode; in such
5875 cases, use the original mode. */
5876 rtx_op0 = expand_expr (t_op0, NULL_RTX, TYPE_MODE (TREE_TYPE (t_op0)),
5877 EXPAND_STACK_PARM);
5878 m0 = GET_MODE (rtx_op0);
5879 if (m0 == VOIDmode)
5880 m0 = TYPE_MODE (TREE_TYPE (t_op0));
5881
5882 rtx_op1 = expand_expr (t_op1, NULL_RTX, TYPE_MODE (TREE_TYPE (t_op1)),
5883 EXPAND_STACK_PARM);
5884 m1 = GET_MODE (rtx_op1);
5885 if (m1 == VOIDmode)
5886 m1 = TYPE_MODE (TREE_TYPE (t_op1));
5887
5888 create_input_operand (&ops[0], rtx_op0, m0);
5889 create_input_operand (&ops[1], rtx_op1, m1);
5890 if (!maybe_legitimize_operands (icode, opno, 2, ops))
5891 gcc_unreachable ();
5892 return gen_rtx_fmt_ee (rcode, cmp_mode, ops[0].value, ops[1].value);
5893 }
5894
5895 /* Check if vec_perm mask SEL is a constant equivalent to a shift of
5896 the first vec_perm operand, assuming the second operand (for left shift
5897 first operand) is a constant vector of zeros. Return the shift distance
5898 in bits if so, or NULL_RTX if the vec_perm is not a shift. MODE is the
5899 mode of the value being shifted. SHIFT_OPTAB is vec_shr_optab for right
5900 shift or vec_shl_optab for left shift. */
5901 static rtx
5902 shift_amt_for_vec_perm_mask (machine_mode mode, const vec_perm_indices &sel,
5903 optab shift_optab)
5904 {
5905 unsigned int bitsize = GET_MODE_UNIT_BITSIZE (mode);
5906 poly_int64 first = sel[0];
5907 if (maybe_ge (sel[0], GET_MODE_NUNITS (mode)))
5908 return NULL_RTX;
5909
5910 if (shift_optab == vec_shl_optab)
5911 {
5912 unsigned int nelt;
5913 if (!GET_MODE_NUNITS (mode).is_constant (&nelt))
5914 return NULL_RTX;
5915 unsigned firstidx = 0;
5916 for (unsigned int i = 0; i < nelt; i++)
5917 {
5918 if (known_eq (sel[i], nelt))
5919 {
5920 if (i == 0 || firstidx)
5921 return NULL_RTX;
5922 firstidx = i;
5923 }
5924 else if (firstidx
5925 ? maybe_ne (sel[i], nelt + i - firstidx)
5926 : maybe_ge (sel[i], nelt))
5927 return NULL_RTX;
5928 }
5929
5930 if (firstidx == 0)
5931 return NULL_RTX;
5932 first = firstidx;
5933 }
5934 else if (!sel.series_p (0, 1, first, 1))
5935 {
5936 unsigned int nelt;
5937 if (!GET_MODE_NUNITS (mode).is_constant (&nelt))
5938 return NULL_RTX;
5939 for (unsigned int i = 1; i < nelt; i++)
5940 {
5941 poly_int64 expected = i + first;
5942 /* Indices into the second vector are all equivalent. */
5943 if (maybe_lt (sel[i], nelt)
5944 ? maybe_ne (sel[i], expected)
5945 : maybe_lt (expected, nelt))
5946 return NULL_RTX;
5947 }
5948 }
5949
5950 return gen_int_shift_amount (mode, first * bitsize);
5951 }
5952
5953 /* A subroutine of expand_vec_perm_var for expanding one vec_perm insn. */
5954
5955 static rtx
5956 expand_vec_perm_1 (enum insn_code icode, rtx target,
5957 rtx v0, rtx v1, rtx sel)
5958 {
5959 machine_mode tmode = GET_MODE (target);
5960 machine_mode smode = GET_MODE (sel);
5961 class expand_operand ops[4];
5962
5963 gcc_assert (GET_MODE_CLASS (smode) == MODE_VECTOR_INT
5964 || related_int_vector_mode (tmode).require () == smode);
5965 create_output_operand (&ops[0], target, tmode);
5966 create_input_operand (&ops[3], sel, smode);
5967
5968 /* Make an effort to preserve v0 == v1. The target expander is able to
5969 rely on this to determine if we're permuting a single input operand. */
5970 if (rtx_equal_p (v0, v1))
5971 {
5972 if (!insn_operand_matches (icode, 1, v0))
5973 v0 = force_reg (tmode, v0);
5974 gcc_checking_assert (insn_operand_matches (icode, 1, v0));
5975 gcc_checking_assert (insn_operand_matches (icode, 2, v0));
5976
5977 create_fixed_operand (&ops[1], v0);
5978 create_fixed_operand (&ops[2], v0);
5979 }
5980 else
5981 {
5982 create_input_operand (&ops[1], v0, tmode);
5983 create_input_operand (&ops[2], v1, tmode);
5984 }
5985
5986 if (maybe_expand_insn (icode, 4, ops))
5987 return ops[0].value;
5988 return NULL_RTX;
5989 }
5990
5991 /* Implement a permutation of vectors v0 and v1 using the permutation
5992 vector in SEL and return the result. Use TARGET to hold the result
5993 if nonnull and convenient.
5994
5995 MODE is the mode of the vectors being permuted (V0 and V1). SEL_MODE
5996 is the TYPE_MODE associated with SEL, or BLKmode if SEL isn't known
5997 to have a particular mode. */
5998
5999 rtx
6000 expand_vec_perm_const (machine_mode mode, rtx v0, rtx v1,
6001 const vec_perm_builder &sel, machine_mode sel_mode,
6002 rtx target)
6003 {
6004 if (!target || !register_operand (target, mode))
6005 target = gen_reg_rtx (mode);
6006
6007 /* Set QIMODE to a different vector mode with byte elements.
6008 If no such mode, or if MODE already has byte elements, use VOIDmode. */
6009 machine_mode qimode;
6010 if (!qimode_for_vec_perm (mode).exists (&qimode))
6011 qimode = VOIDmode;
6012
6013 rtx_insn *last = get_last_insn ();
6014
6015 bool single_arg_p = rtx_equal_p (v0, v1);
6016 /* Always specify two input vectors here and leave the target to handle
6017 cases in which the inputs are equal. Not all backends can cope with
6018 the single-input representation when testing for a double-input
6019 target instruction. */
6020 vec_perm_indices indices (sel, 2, GET_MODE_NUNITS (mode));
6021
6022 /* See if this can be handled with a vec_shr or vec_shl. We only do this
6023 if the second (for vec_shr) or first (for vec_shl) vector is all
6024 zeroes. */
6025 insn_code shift_code = CODE_FOR_nothing;
6026 insn_code shift_code_qi = CODE_FOR_nothing;
6027 optab shift_optab = unknown_optab;
6028 rtx v2 = v0;
6029 if (v1 == CONST0_RTX (GET_MODE (v1)))
6030 shift_optab = vec_shr_optab;
6031 else if (v0 == CONST0_RTX (GET_MODE (v0)))
6032 {
6033 shift_optab = vec_shl_optab;
6034 v2 = v1;
6035 }
6036 if (shift_optab != unknown_optab)
6037 {
6038 shift_code = optab_handler (shift_optab, mode);
6039 shift_code_qi = ((qimode != VOIDmode && qimode != mode)
6040 ? optab_handler (shift_optab, qimode)
6041 : CODE_FOR_nothing);
6042 }
6043 if (shift_code != CODE_FOR_nothing || shift_code_qi != CODE_FOR_nothing)
6044 {
6045 rtx shift_amt = shift_amt_for_vec_perm_mask (mode, indices, shift_optab);
6046 if (shift_amt)
6047 {
6048 class expand_operand ops[3];
6049 if (shift_amt == const0_rtx)
6050 return v2;
6051 if (shift_code != CODE_FOR_nothing)
6052 {
6053 create_output_operand (&ops[0], target, mode);
6054 create_input_operand (&ops[1], v2, mode);
6055 create_convert_operand_from_type (&ops[2], shift_amt, sizetype);
6056 if (maybe_expand_insn (shift_code, 3, ops))
6057 return ops[0].value;
6058 }
6059 if (shift_code_qi != CODE_FOR_nothing)
6060 {
6061 rtx tmp = gen_reg_rtx (qimode);
6062 create_output_operand (&ops[0], tmp, qimode);
6063 create_input_operand (&ops[1], gen_lowpart (qimode, v2), qimode);
6064 create_convert_operand_from_type (&ops[2], shift_amt, sizetype);
6065 if (maybe_expand_insn (shift_code_qi, 3, ops))
6066 return gen_lowpart (mode, ops[0].value);
6067 }
6068 }
6069 }
6070
6071 if (targetm.vectorize.vec_perm_const != NULL)
6072 {
6073 if (single_arg_p)
6074 v1 = v0;
6075
6076 if (targetm.vectorize.vec_perm_const (mode, target, v0, v1, indices))
6077 return target;
6078 }
6079
6080 /* Fall back to a constant byte-based permutation. */
6081 vec_perm_indices qimode_indices;
6082 rtx target_qi = NULL_RTX, v0_qi = NULL_RTX, v1_qi = NULL_RTX;
6083 if (qimode != VOIDmode)
6084 {
6085 qimode_indices.new_expanded_vector (indices, GET_MODE_UNIT_SIZE (mode));
6086 target_qi = gen_reg_rtx (qimode);
6087 v0_qi = gen_lowpart (qimode, v0);
6088 v1_qi = gen_lowpart (qimode, v1);
6089 if (targetm.vectorize.vec_perm_const != NULL
6090 && targetm.vectorize.vec_perm_const (qimode, target_qi, v0_qi,
6091 v1_qi, qimode_indices))
6092 return gen_lowpart (mode, target_qi);
6093 }
6094
6095 v0 = force_reg (mode, v0);
6096 if (single_arg_p)
6097 v1 = v0;
6098 v1 = force_reg (mode, v1);
6099
6100 /* Otherwise expand as a fully variable permuation. */
6101
6102 /* The optabs are only defined for selectors with the same width
6103 as the values being permuted. */
6104 machine_mode required_sel_mode;
6105 if (!related_int_vector_mode (mode).exists (&required_sel_mode))
6106 {
6107 delete_insns_since (last);
6108 return NULL_RTX;
6109 }
6110
6111 /* We know that it is semantically valid to treat SEL as having SEL_MODE.
6112 If that isn't the mode we want then we need to prove that using
6113 REQUIRED_SEL_MODE is OK. */
6114 if (sel_mode != required_sel_mode)
6115 {
6116 if (!selector_fits_mode_p (required_sel_mode, indices))
6117 {
6118 delete_insns_since (last);
6119 return NULL_RTX;
6120 }
6121 sel_mode = required_sel_mode;
6122 }
6123
6124 insn_code icode = direct_optab_handler (vec_perm_optab, mode);
6125 if (icode != CODE_FOR_nothing)
6126 {
6127 rtx sel_rtx = vec_perm_indices_to_rtx (sel_mode, indices);
6128 rtx tmp = expand_vec_perm_1 (icode, target, v0, v1, sel_rtx);
6129 if (tmp)
6130 return tmp;
6131 }
6132
6133 if (qimode != VOIDmode
6134 && selector_fits_mode_p (qimode, qimode_indices))
6135 {
6136 icode = direct_optab_handler (vec_perm_optab, qimode);
6137 if (icode != CODE_FOR_nothing)
6138 {
6139 rtx sel_qi = vec_perm_indices_to_rtx (qimode, qimode_indices);
6140 rtx tmp = expand_vec_perm_1 (icode, target_qi, v0_qi, v1_qi, sel_qi);
6141 if (tmp)
6142 return gen_lowpart (mode, tmp);
6143 }
6144 }
6145
6146 delete_insns_since (last);
6147 return NULL_RTX;
6148 }
6149
6150 /* Implement a permutation of vectors v0 and v1 using the permutation
6151 vector in SEL and return the result. Use TARGET to hold the result
6152 if nonnull and convenient.
6153
6154 MODE is the mode of the vectors being permuted (V0 and V1).
6155 SEL must have the integer equivalent of MODE and is known to be
6156 unsuitable for permutes with a constant permutation vector. */
6157
6158 rtx
6159 expand_vec_perm_var (machine_mode mode, rtx v0, rtx v1, rtx sel, rtx target)
6160 {
6161 enum insn_code icode;
6162 unsigned int i, u;
6163 rtx tmp, sel_qi;
6164
6165 u = GET_MODE_UNIT_SIZE (mode);
6166
6167 if (!target || GET_MODE (target) != mode)
6168 target = gen_reg_rtx (mode);
6169
6170 icode = direct_optab_handler (vec_perm_optab, mode);
6171 if (icode != CODE_FOR_nothing)
6172 {
6173 tmp = expand_vec_perm_1 (icode, target, v0, v1, sel);
6174 if (tmp)
6175 return tmp;
6176 }
6177
6178 /* As a special case to aid several targets, lower the element-based
6179 permutation to a byte-based permutation and try again. */
6180 machine_mode qimode;
6181 if (!qimode_for_vec_perm (mode).exists (&qimode)
6182 || maybe_gt (GET_MODE_NUNITS (qimode), GET_MODE_MASK (QImode) + 1))
6183 return NULL_RTX;
6184 icode = direct_optab_handler (vec_perm_optab, qimode);
6185 if (icode == CODE_FOR_nothing)
6186 return NULL_RTX;
6187
6188 /* Multiply each element by its byte size. */
6189 machine_mode selmode = GET_MODE (sel);
6190 if (u == 2)
6191 sel = expand_simple_binop (selmode, PLUS, sel, sel,
6192 NULL, 0, OPTAB_DIRECT);
6193 else
6194 sel = expand_simple_binop (selmode, ASHIFT, sel,
6195 gen_int_shift_amount (selmode, exact_log2 (u)),
6196 NULL, 0, OPTAB_DIRECT);
6197 gcc_assert (sel != NULL);
6198
6199 /* Broadcast the low byte each element into each of its bytes.
6200 The encoding has U interleaved stepped patterns, one for each
6201 byte of an element. */
6202 vec_perm_builder const_sel (GET_MODE_SIZE (mode), u, 3);
6203 unsigned int low_byte_in_u = BYTES_BIG_ENDIAN ? u - 1 : 0;
6204 for (i = 0; i < 3; ++i)
6205 for (unsigned int j = 0; j < u; ++j)
6206 const_sel.quick_push (i * u + low_byte_in_u);
6207 sel = gen_lowpart (qimode, sel);
6208 sel = expand_vec_perm_const (qimode, sel, sel, const_sel, qimode, NULL);
6209 gcc_assert (sel != NULL);
6210
6211 /* Add the byte offset to each byte element. */
6212 /* Note that the definition of the indicies here is memory ordering,
6213 so there should be no difference between big and little endian. */
6214 rtx_vector_builder byte_indices (qimode, u, 1);
6215 for (i = 0; i < u; ++i)
6216 byte_indices.quick_push (GEN_INT (i));
6217 tmp = byte_indices.build ();
6218 sel_qi = expand_simple_binop (qimode, PLUS, sel, tmp,
6219 sel, 0, OPTAB_DIRECT);
6220 gcc_assert (sel_qi != NULL);
6221
6222 tmp = mode != qimode ? gen_reg_rtx (qimode) : target;
6223 tmp = expand_vec_perm_1 (icode, tmp, gen_lowpart (qimode, v0),
6224 gen_lowpart (qimode, v1), sel_qi);
6225 if (tmp)
6226 tmp = gen_lowpart (mode, tmp);
6227 return tmp;
6228 }
6229
6230 /* Generate VEC_SERIES_EXPR <OP0, OP1>, returning a value of mode VMODE.
6231 Use TARGET for the result if nonnull and convenient. */
6232
6233 rtx
6234 expand_vec_series_expr (machine_mode vmode, rtx op0, rtx op1, rtx target)
6235 {
6236 class expand_operand ops[3];
6237 enum insn_code icode;
6238 machine_mode emode = GET_MODE_INNER (vmode);
6239
6240 icode = direct_optab_handler (vec_series_optab, vmode);
6241 gcc_assert (icode != CODE_FOR_nothing);
6242
6243 create_output_operand (&ops[0], target, vmode);
6244 create_input_operand (&ops[1], op0, emode);
6245 create_input_operand (&ops[2], op1, emode);
6246
6247 expand_insn (icode, 3, ops);
6248 return ops[0].value;
6249 }
6250
6251 /* Generate insns for a vector comparison into a mask. */
6252
6253 rtx
6254 expand_vec_cmp_expr (tree type, tree exp, rtx target)
6255 {
6256 class expand_operand ops[4];
6257 enum insn_code icode;
6258 rtx comparison;
6259 machine_mode mask_mode = TYPE_MODE (type);
6260 machine_mode vmode;
6261 bool unsignedp;
6262 tree op0a, op0b;
6263 enum tree_code tcode;
6264
6265 op0a = TREE_OPERAND (exp, 0);
6266 op0b = TREE_OPERAND (exp, 1);
6267 tcode = TREE_CODE (exp);
6268
6269 unsignedp = TYPE_UNSIGNED (TREE_TYPE (op0a));
6270 vmode = TYPE_MODE (TREE_TYPE (op0a));
6271
6272 icode = get_vec_cmp_icode (vmode, mask_mode, unsignedp);
6273 if (icode == CODE_FOR_nothing)
6274 {
6275 if (tcode == EQ_EXPR || tcode == NE_EXPR)
6276 icode = get_vec_cmp_eq_icode (vmode, mask_mode);
6277 if (icode == CODE_FOR_nothing)
6278 return 0;
6279 }
6280
6281 comparison = vector_compare_rtx (mask_mode, tcode, op0a, op0b,
6282 unsignedp, icode, 2);
6283 create_output_operand (&ops[0], target, mask_mode);
6284 create_fixed_operand (&ops[1], comparison);
6285 create_fixed_operand (&ops[2], XEXP (comparison, 0));
6286 create_fixed_operand (&ops[3], XEXP (comparison, 1));
6287 expand_insn (icode, 4, ops);
6288 return ops[0].value;
6289 }
6290
6291 /* Expand a highpart multiply. */
6292
6293 rtx
6294 expand_mult_highpart (machine_mode mode, rtx op0, rtx op1,
6295 rtx target, bool uns_p)
6296 {
6297 class expand_operand eops[3];
6298 enum insn_code icode;
6299 int method, i;
6300 machine_mode wmode;
6301 rtx m1, m2;
6302 optab tab1, tab2;
6303
6304 method = can_mult_highpart_p (mode, uns_p);
6305 switch (method)
6306 {
6307 case 0:
6308 return NULL_RTX;
6309 case 1:
6310 tab1 = uns_p ? umul_highpart_optab : smul_highpart_optab;
6311 return expand_binop (mode, tab1, op0, op1, target, uns_p,
6312 OPTAB_LIB_WIDEN);
6313 case 2:
6314 tab1 = uns_p ? vec_widen_umult_even_optab : vec_widen_smult_even_optab;
6315 tab2 = uns_p ? vec_widen_umult_odd_optab : vec_widen_smult_odd_optab;
6316 break;
6317 case 3:
6318 tab1 = uns_p ? vec_widen_umult_lo_optab : vec_widen_smult_lo_optab;
6319 tab2 = uns_p ? vec_widen_umult_hi_optab : vec_widen_smult_hi_optab;
6320 if (BYTES_BIG_ENDIAN)
6321 std::swap (tab1, tab2);
6322 break;
6323 default:
6324 gcc_unreachable ();
6325 }
6326
6327 icode = optab_handler (tab1, mode);
6328 wmode = insn_data[icode].operand[0].mode;
6329 gcc_checking_assert (known_eq (2 * GET_MODE_NUNITS (wmode),
6330 GET_MODE_NUNITS (mode)));
6331 gcc_checking_assert (known_eq (GET_MODE_SIZE (wmode), GET_MODE_SIZE (mode)));
6332
6333 create_output_operand (&eops[0], gen_reg_rtx (wmode), wmode);
6334 create_input_operand (&eops[1], op0, mode);
6335 create_input_operand (&eops[2], op1, mode);
6336 expand_insn (icode, 3, eops);
6337 m1 = gen_lowpart (mode, eops[0].value);
6338
6339 create_output_operand (&eops[0], gen_reg_rtx (wmode), wmode);
6340 create_input_operand (&eops[1], op0, mode);
6341 create_input_operand (&eops[2], op1, mode);
6342 expand_insn (optab_handler (tab2, mode), 3, eops);
6343 m2 = gen_lowpart (mode, eops[0].value);
6344
6345 vec_perm_builder sel;
6346 if (method == 2)
6347 {
6348 /* The encoding has 2 interleaved stepped patterns. */
6349 sel.new_vector (GET_MODE_NUNITS (mode), 2, 3);
6350 for (i = 0; i < 6; ++i)
6351 sel.quick_push (!BYTES_BIG_ENDIAN + (i & ~1)
6352 + ((i & 1) ? GET_MODE_NUNITS (mode) : 0));
6353 }
6354 else
6355 {
6356 /* The encoding has a single interleaved stepped pattern. */
6357 sel.new_vector (GET_MODE_NUNITS (mode), 1, 3);
6358 for (i = 0; i < 3; ++i)
6359 sel.quick_push (2 * i + (BYTES_BIG_ENDIAN ? 0 : 1));
6360 }
6361
6362 return expand_vec_perm_const (mode, m1, m2, sel, BLKmode, target);
6363 }
6364 \f
6365 /* Helper function to find the MODE_CC set in a sync_compare_and_swap
6366 pattern. */
6367
6368 static void
6369 find_cc_set (rtx x, const_rtx pat, void *data)
6370 {
6371 if (REG_P (x) && GET_MODE_CLASS (GET_MODE (x)) == MODE_CC
6372 && GET_CODE (pat) == SET)
6373 {
6374 rtx *p_cc_reg = (rtx *) data;
6375 gcc_assert (!*p_cc_reg);
6376 *p_cc_reg = x;
6377 }
6378 }
6379
6380 /* This is a helper function for the other atomic operations. This function
6381 emits a loop that contains SEQ that iterates until a compare-and-swap
6382 operation at the end succeeds. MEM is the memory to be modified. SEQ is
6383 a set of instructions that takes a value from OLD_REG as an input and
6384 produces a value in NEW_REG as an output. Before SEQ, OLD_REG will be
6385 set to the current contents of MEM. After SEQ, a compare-and-swap will
6386 attempt to update MEM with NEW_REG. The function returns true when the
6387 loop was generated successfully. */
6388
6389 static bool
6390 expand_compare_and_swap_loop (rtx mem, rtx old_reg, rtx new_reg, rtx seq)
6391 {
6392 machine_mode mode = GET_MODE (mem);
6393 rtx_code_label *label;
6394 rtx cmp_reg, success, oldval;
6395
6396 /* The loop we want to generate looks like
6397
6398 cmp_reg = mem;
6399 label:
6400 old_reg = cmp_reg;
6401 seq;
6402 (success, cmp_reg) = compare-and-swap(mem, old_reg, new_reg)
6403 if (success)
6404 goto label;
6405
6406 Note that we only do the plain load from memory once. Subsequent
6407 iterations use the value loaded by the compare-and-swap pattern. */
6408
6409 label = gen_label_rtx ();
6410 cmp_reg = gen_reg_rtx (mode);
6411
6412 emit_move_insn (cmp_reg, mem);
6413 emit_label (label);
6414 emit_move_insn (old_reg, cmp_reg);
6415 if (seq)
6416 emit_insn (seq);
6417
6418 success = NULL_RTX;
6419 oldval = cmp_reg;
6420 if (!expand_atomic_compare_and_swap (&success, &oldval, mem, old_reg,
6421 new_reg, false, MEMMODEL_SYNC_SEQ_CST,
6422 MEMMODEL_RELAXED))
6423 return false;
6424
6425 if (oldval != cmp_reg)
6426 emit_move_insn (cmp_reg, oldval);
6427
6428 /* Mark this jump predicted not taken. */
6429 emit_cmp_and_jump_insns (success, const0_rtx, EQ, const0_rtx,
6430 GET_MODE (success), 1, label,
6431 profile_probability::guessed_never ());
6432 return true;
6433 }
6434
6435
6436 /* This function tries to emit an atomic_exchange intruction. VAL is written
6437 to *MEM using memory model MODEL. The previous contents of *MEM are returned,
6438 using TARGET if possible. */
6439
6440 static rtx
6441 maybe_emit_atomic_exchange (rtx target, rtx mem, rtx val, enum memmodel model)
6442 {
6443 machine_mode mode = GET_MODE (mem);
6444 enum insn_code icode;
6445
6446 /* If the target supports the exchange directly, great. */
6447 icode = direct_optab_handler (atomic_exchange_optab, mode);
6448 if (icode != CODE_FOR_nothing)
6449 {
6450 class expand_operand ops[4];
6451
6452 create_output_operand (&ops[0], target, mode);
6453 create_fixed_operand (&ops[1], mem);
6454 create_input_operand (&ops[2], val, mode);
6455 create_integer_operand (&ops[3], model);
6456 if (maybe_expand_insn (icode, 4, ops))
6457 return ops[0].value;
6458 }
6459
6460 return NULL_RTX;
6461 }
6462
6463 /* This function tries to implement an atomic exchange operation using
6464 __sync_lock_test_and_set. VAL is written to *MEM using memory model MODEL.
6465 The previous contents of *MEM are returned, using TARGET if possible.
6466 Since this instructionn is an acquire barrier only, stronger memory
6467 models may require additional barriers to be emitted. */
6468
6469 static rtx
6470 maybe_emit_sync_lock_test_and_set (rtx target, rtx mem, rtx val,
6471 enum memmodel model)
6472 {
6473 machine_mode mode = GET_MODE (mem);
6474 enum insn_code icode;
6475 rtx_insn *last_insn = get_last_insn ();
6476
6477 icode = optab_handler (sync_lock_test_and_set_optab, mode);
6478
6479 /* Legacy sync_lock_test_and_set is an acquire barrier. If the pattern
6480 exists, and the memory model is stronger than acquire, add a release
6481 barrier before the instruction. */
6482
6483 if (is_mm_seq_cst (model) || is_mm_release (model) || is_mm_acq_rel (model))
6484 expand_mem_thread_fence (model);
6485
6486 if (icode != CODE_FOR_nothing)
6487 {
6488 class expand_operand ops[3];
6489 create_output_operand (&ops[0], target, mode);
6490 create_fixed_operand (&ops[1], mem);
6491 create_input_operand (&ops[2], val, mode);
6492 if (maybe_expand_insn (icode, 3, ops))
6493 return ops[0].value;
6494 }
6495
6496 /* If an external test-and-set libcall is provided, use that instead of
6497 any external compare-and-swap that we might get from the compare-and-
6498 swap-loop expansion later. */
6499 if (!can_compare_and_swap_p (mode, false))
6500 {
6501 rtx libfunc = optab_libfunc (sync_lock_test_and_set_optab, mode);
6502 if (libfunc != NULL)
6503 {
6504 rtx addr;
6505
6506 addr = convert_memory_address (ptr_mode, XEXP (mem, 0));
6507 return emit_library_call_value (libfunc, NULL_RTX, LCT_NORMAL,
6508 mode, addr, ptr_mode,
6509 val, mode);
6510 }
6511 }
6512
6513 /* If the test_and_set can't be emitted, eliminate any barrier that might
6514 have been emitted. */
6515 delete_insns_since (last_insn);
6516 return NULL_RTX;
6517 }
6518
6519 /* This function tries to implement an atomic exchange operation using a
6520 compare_and_swap loop. VAL is written to *MEM. The previous contents of
6521 *MEM are returned, using TARGET if possible. No memory model is required
6522 since a compare_and_swap loop is seq-cst. */
6523
6524 static rtx
6525 maybe_emit_compare_and_swap_exchange_loop (rtx target, rtx mem, rtx val)
6526 {
6527 machine_mode mode = GET_MODE (mem);
6528
6529 if (can_compare_and_swap_p (mode, true))
6530 {
6531 if (!target || !register_operand (target, mode))
6532 target = gen_reg_rtx (mode);
6533 if (expand_compare_and_swap_loop (mem, target, val, NULL_RTX))
6534 return target;
6535 }
6536
6537 return NULL_RTX;
6538 }
6539
6540 /* This function tries to implement an atomic test-and-set operation
6541 using the atomic_test_and_set instruction pattern. A boolean value
6542 is returned from the operation, using TARGET if possible. */
6543
6544 static rtx
6545 maybe_emit_atomic_test_and_set (rtx target, rtx mem, enum memmodel model)
6546 {
6547 machine_mode pat_bool_mode;
6548 class expand_operand ops[3];
6549
6550 if (!targetm.have_atomic_test_and_set ())
6551 return NULL_RTX;
6552
6553 /* While we always get QImode from __atomic_test_and_set, we get
6554 other memory modes from __sync_lock_test_and_set. Note that we
6555 use no endian adjustment here. This matches the 4.6 behavior
6556 in the Sparc backend. */
6557 enum insn_code icode = targetm.code_for_atomic_test_and_set;
6558 gcc_checking_assert (insn_data[icode].operand[1].mode == QImode);
6559 if (GET_MODE (mem) != QImode)
6560 mem = adjust_address_nv (mem, QImode, 0);
6561
6562 pat_bool_mode = insn_data[icode].operand[0].mode;
6563 create_output_operand (&ops[0], target, pat_bool_mode);
6564 create_fixed_operand (&ops[1], mem);
6565 create_integer_operand (&ops[2], model);
6566
6567 if (maybe_expand_insn (icode, 3, ops))
6568 return ops[0].value;
6569 return NULL_RTX;
6570 }
6571
6572 /* This function expands the legacy _sync_lock test_and_set operation which is
6573 generally an atomic exchange. Some limited targets only allow the
6574 constant 1 to be stored. This is an ACQUIRE operation.
6575
6576 TARGET is an optional place to stick the return value.
6577 MEM is where VAL is stored. */
6578
6579 rtx
6580 expand_sync_lock_test_and_set (rtx target, rtx mem, rtx val)
6581 {
6582 rtx ret;
6583
6584 /* Try an atomic_exchange first. */
6585 ret = maybe_emit_atomic_exchange (target, mem, val, MEMMODEL_SYNC_ACQUIRE);
6586 if (ret)
6587 return ret;
6588
6589 ret = maybe_emit_sync_lock_test_and_set (target, mem, val,
6590 MEMMODEL_SYNC_ACQUIRE);
6591 if (ret)
6592 return ret;
6593
6594 ret = maybe_emit_compare_and_swap_exchange_loop (target, mem, val);
6595 if (ret)
6596 return ret;
6597
6598 /* If there are no other options, try atomic_test_and_set if the value
6599 being stored is 1. */
6600 if (val == const1_rtx)
6601 ret = maybe_emit_atomic_test_and_set (target, mem, MEMMODEL_SYNC_ACQUIRE);
6602
6603 return ret;
6604 }
6605
6606 /* This function expands the atomic test_and_set operation:
6607 atomically store a boolean TRUE into MEM and return the previous value.
6608
6609 MEMMODEL is the memory model variant to use.
6610 TARGET is an optional place to stick the return value. */
6611
6612 rtx
6613 expand_atomic_test_and_set (rtx target, rtx mem, enum memmodel model)
6614 {
6615 machine_mode mode = GET_MODE (mem);
6616 rtx ret, trueval, subtarget;
6617
6618 ret = maybe_emit_atomic_test_and_set (target, mem, model);
6619 if (ret)
6620 return ret;
6621
6622 /* Be binary compatible with non-default settings of trueval, and different
6623 cpu revisions. E.g. one revision may have atomic-test-and-set, but
6624 another only has atomic-exchange. */
6625 if (targetm.atomic_test_and_set_trueval == 1)
6626 {
6627 trueval = const1_rtx;
6628 subtarget = target ? target : gen_reg_rtx (mode);
6629 }
6630 else
6631 {
6632 trueval = gen_int_mode (targetm.atomic_test_and_set_trueval, mode);
6633 subtarget = gen_reg_rtx (mode);
6634 }
6635
6636 /* Try the atomic-exchange optab... */
6637 ret = maybe_emit_atomic_exchange (subtarget, mem, trueval, model);
6638
6639 /* ... then an atomic-compare-and-swap loop ... */
6640 if (!ret)
6641 ret = maybe_emit_compare_and_swap_exchange_loop (subtarget, mem, trueval);
6642
6643 /* ... before trying the vaguely defined legacy lock_test_and_set. */
6644 if (!ret)
6645 ret = maybe_emit_sync_lock_test_and_set (subtarget, mem, trueval, model);
6646
6647 /* Recall that the legacy lock_test_and_set optab was allowed to do magic
6648 things with the value 1. Thus we try again without trueval. */
6649 if (!ret && targetm.atomic_test_and_set_trueval != 1)
6650 ret = maybe_emit_sync_lock_test_and_set (subtarget, mem, const1_rtx, model);
6651
6652 /* Failing all else, assume a single threaded environment and simply
6653 perform the operation. */
6654 if (!ret)
6655 {
6656 /* If the result is ignored skip the move to target. */
6657 if (subtarget != const0_rtx)
6658 emit_move_insn (subtarget, mem);
6659
6660 emit_move_insn (mem, trueval);
6661 ret = subtarget;
6662 }
6663
6664 /* Recall that have to return a boolean value; rectify if trueval
6665 is not exactly one. */
6666 if (targetm.atomic_test_and_set_trueval != 1)
6667 ret = emit_store_flag_force (target, NE, ret, const0_rtx, mode, 0, 1);
6668
6669 return ret;
6670 }
6671
6672 /* This function expands the atomic exchange operation:
6673 atomically store VAL in MEM and return the previous value in MEM.
6674
6675 MEMMODEL is the memory model variant to use.
6676 TARGET is an optional place to stick the return value. */
6677
6678 rtx
6679 expand_atomic_exchange (rtx target, rtx mem, rtx val, enum memmodel model)
6680 {
6681 machine_mode mode = GET_MODE (mem);
6682 rtx ret;
6683
6684 /* If loads are not atomic for the required size and we are not called to
6685 provide a __sync builtin, do not do anything so that we stay consistent
6686 with atomic loads of the same size. */
6687 if (!can_atomic_load_p (mode) && !is_mm_sync (model))
6688 return NULL_RTX;
6689
6690 ret = maybe_emit_atomic_exchange (target, mem, val, model);
6691
6692 /* Next try a compare-and-swap loop for the exchange. */
6693 if (!ret)
6694 ret = maybe_emit_compare_and_swap_exchange_loop (target, mem, val);
6695
6696 return ret;
6697 }
6698
6699 /* This function expands the atomic compare exchange operation:
6700
6701 *PTARGET_BOOL is an optional place to store the boolean success/failure.
6702 *PTARGET_OVAL is an optional place to store the old value from memory.
6703 Both target parameters may be NULL or const0_rtx to indicate that we do
6704 not care about that return value. Both target parameters are updated on
6705 success to the actual location of the corresponding result.
6706
6707 MEMMODEL is the memory model variant to use.
6708
6709 The return value of the function is true for success. */
6710
6711 bool
6712 expand_atomic_compare_and_swap (rtx *ptarget_bool, rtx *ptarget_oval,
6713 rtx mem, rtx expected, rtx desired,
6714 bool is_weak, enum memmodel succ_model,
6715 enum memmodel fail_model)
6716 {
6717 machine_mode mode = GET_MODE (mem);
6718 class expand_operand ops[8];
6719 enum insn_code icode;
6720 rtx target_oval, target_bool = NULL_RTX;
6721 rtx libfunc;
6722
6723 /* If loads are not atomic for the required size and we are not called to
6724 provide a __sync builtin, do not do anything so that we stay consistent
6725 with atomic loads of the same size. */
6726 if (!can_atomic_load_p (mode) && !is_mm_sync (succ_model))
6727 return false;
6728
6729 /* Load expected into a register for the compare and swap. */
6730 if (MEM_P (expected))
6731 expected = copy_to_reg (expected);
6732
6733 /* Make sure we always have some place to put the return oldval.
6734 Further, make sure that place is distinct from the input expected,
6735 just in case we need that path down below. */
6736 if (ptarget_oval && *ptarget_oval == const0_rtx)
6737 ptarget_oval = NULL;
6738
6739 if (ptarget_oval == NULL
6740 || (target_oval = *ptarget_oval) == NULL
6741 || reg_overlap_mentioned_p (expected, target_oval))
6742 target_oval = gen_reg_rtx (mode);
6743
6744 icode = direct_optab_handler (atomic_compare_and_swap_optab, mode);
6745 if (icode != CODE_FOR_nothing)
6746 {
6747 machine_mode bool_mode = insn_data[icode].operand[0].mode;
6748
6749 if (ptarget_bool && *ptarget_bool == const0_rtx)
6750 ptarget_bool = NULL;
6751
6752 /* Make sure we always have a place for the bool operand. */
6753 if (ptarget_bool == NULL
6754 || (target_bool = *ptarget_bool) == NULL
6755 || GET_MODE (target_bool) != bool_mode)
6756 target_bool = gen_reg_rtx (bool_mode);
6757
6758 /* Emit the compare_and_swap. */
6759 create_output_operand (&ops[0], target_bool, bool_mode);
6760 create_output_operand (&ops[1], target_oval, mode);
6761 create_fixed_operand (&ops[2], mem);
6762 create_input_operand (&ops[3], expected, mode);
6763 create_input_operand (&ops[4], desired, mode);
6764 create_integer_operand (&ops[5], is_weak);
6765 create_integer_operand (&ops[6], succ_model);
6766 create_integer_operand (&ops[7], fail_model);
6767 if (maybe_expand_insn (icode, 8, ops))
6768 {
6769 /* Return success/failure. */
6770 target_bool = ops[0].value;
6771 target_oval = ops[1].value;
6772 goto success;
6773 }
6774 }
6775
6776 /* Otherwise fall back to the original __sync_val_compare_and_swap
6777 which is always seq-cst. */
6778 icode = optab_handler (sync_compare_and_swap_optab, mode);
6779 if (icode != CODE_FOR_nothing)
6780 {
6781 rtx cc_reg;
6782
6783 create_output_operand (&ops[0], target_oval, mode);
6784 create_fixed_operand (&ops[1], mem);
6785 create_input_operand (&ops[2], expected, mode);
6786 create_input_operand (&ops[3], desired, mode);
6787 if (!maybe_expand_insn (icode, 4, ops))
6788 return false;
6789
6790 target_oval = ops[0].value;
6791
6792 /* If the caller isn't interested in the boolean return value,
6793 skip the computation of it. */
6794 if (ptarget_bool == NULL)
6795 goto success;
6796
6797 /* Otherwise, work out if the compare-and-swap succeeded. */
6798 cc_reg = NULL_RTX;
6799 if (have_insn_for (COMPARE, CCmode))
6800 note_stores (get_last_insn (), find_cc_set, &cc_reg);
6801 if (cc_reg)
6802 {
6803 target_bool = emit_store_flag_force (target_bool, EQ, cc_reg,
6804 const0_rtx, VOIDmode, 0, 1);
6805 goto success;
6806 }
6807 goto success_bool_from_val;
6808 }
6809
6810 /* Also check for library support for __sync_val_compare_and_swap. */
6811 libfunc = optab_libfunc (sync_compare_and_swap_optab, mode);
6812 if (libfunc != NULL)
6813 {
6814 rtx addr = convert_memory_address (ptr_mode, XEXP (mem, 0));
6815 rtx target = emit_library_call_value (libfunc, NULL_RTX, LCT_NORMAL,
6816 mode, addr, ptr_mode,
6817 expected, mode, desired, mode);
6818 emit_move_insn (target_oval, target);
6819
6820 /* Compute the boolean return value only if requested. */
6821 if (ptarget_bool)
6822 goto success_bool_from_val;
6823 else
6824 goto success;
6825 }
6826
6827 /* Failure. */
6828 return false;
6829
6830 success_bool_from_val:
6831 target_bool = emit_store_flag_force (target_bool, EQ, target_oval,
6832 expected, VOIDmode, 1, 1);
6833 success:
6834 /* Make sure that the oval output winds up where the caller asked. */
6835 if (ptarget_oval)
6836 *ptarget_oval = target_oval;
6837 if (ptarget_bool)
6838 *ptarget_bool = target_bool;
6839 return true;
6840 }
6841
6842 /* Generate asm volatile("" : : : "memory") as the memory blockage. */
6843
6844 static void
6845 expand_asm_memory_blockage (void)
6846 {
6847 rtx asm_op, clob;
6848
6849 asm_op = gen_rtx_ASM_OPERANDS (VOIDmode, "", "", 0,
6850 rtvec_alloc (0), rtvec_alloc (0),
6851 rtvec_alloc (0), UNKNOWN_LOCATION);
6852 MEM_VOLATILE_P (asm_op) = 1;
6853
6854 clob = gen_rtx_SCRATCH (VOIDmode);
6855 clob = gen_rtx_MEM (BLKmode, clob);
6856 clob = gen_rtx_CLOBBER (VOIDmode, clob);
6857
6858 emit_insn (gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, asm_op, clob)));
6859 }
6860
6861 /* Do not propagate memory accesses across this point. */
6862
6863 static void
6864 expand_memory_blockage (void)
6865 {
6866 if (targetm.have_memory_blockage ())
6867 emit_insn (targetm.gen_memory_blockage ());
6868 else
6869 expand_asm_memory_blockage ();
6870 }
6871
6872 /* Generate asm volatile("" : : : "memory") as a memory blockage, at the
6873 same time clobbering the register set specified by REGS. */
6874
6875 void
6876 expand_asm_reg_clobber_mem_blockage (HARD_REG_SET regs)
6877 {
6878 rtx asm_op, clob_mem;
6879
6880 unsigned int num_of_regs = 0;
6881 for (unsigned int i = 0; i < FIRST_PSEUDO_REGISTER; i++)
6882 if (TEST_HARD_REG_BIT (regs, i))
6883 num_of_regs++;
6884
6885 asm_op = gen_rtx_ASM_OPERANDS (VOIDmode, "", "", 0,
6886 rtvec_alloc (0), rtvec_alloc (0),
6887 rtvec_alloc (0), UNKNOWN_LOCATION);
6888 MEM_VOLATILE_P (asm_op) = 1;
6889
6890 rtvec v = rtvec_alloc (num_of_regs + 2);
6891
6892 clob_mem = gen_rtx_SCRATCH (VOIDmode);
6893 clob_mem = gen_rtx_MEM (BLKmode, clob_mem);
6894 clob_mem = gen_rtx_CLOBBER (VOIDmode, clob_mem);
6895
6896 RTVEC_ELT (v, 0) = asm_op;
6897 RTVEC_ELT (v, 1) = clob_mem;
6898
6899 if (num_of_regs > 0)
6900 {
6901 unsigned int j = 2;
6902 for (unsigned int i = 0; i < FIRST_PSEUDO_REGISTER; i++)
6903 if (TEST_HARD_REG_BIT (regs, i))
6904 {
6905 RTVEC_ELT (v, j) = gen_rtx_CLOBBER (VOIDmode, regno_reg_rtx[i]);
6906 j++;
6907 }
6908 gcc_assert (j == (num_of_regs + 2));
6909 }
6910
6911 emit_insn (gen_rtx_PARALLEL (VOIDmode, v));
6912 }
6913
6914 /* This routine will either emit the mem_thread_fence pattern or issue a
6915 sync_synchronize to generate a fence for memory model MEMMODEL. */
6916
6917 void
6918 expand_mem_thread_fence (enum memmodel model)
6919 {
6920 if (is_mm_relaxed (model))
6921 return;
6922 if (targetm.have_mem_thread_fence ())
6923 {
6924 emit_insn (targetm.gen_mem_thread_fence (GEN_INT (model)));
6925 expand_memory_blockage ();
6926 }
6927 else if (targetm.have_memory_barrier ())
6928 emit_insn (targetm.gen_memory_barrier ());
6929 else if (synchronize_libfunc != NULL_RTX)
6930 emit_library_call (synchronize_libfunc, LCT_NORMAL, VOIDmode);
6931 else
6932 expand_memory_blockage ();
6933 }
6934
6935 /* Emit a signal fence with given memory model. */
6936
6937 void
6938 expand_mem_signal_fence (enum memmodel model)
6939 {
6940 /* No machine barrier is required to implement a signal fence, but
6941 a compiler memory barrier must be issued, except for relaxed MM. */
6942 if (!is_mm_relaxed (model))
6943 expand_memory_blockage ();
6944 }
6945
6946 /* This function expands the atomic load operation:
6947 return the atomically loaded value in MEM.
6948
6949 MEMMODEL is the memory model variant to use.
6950 TARGET is an option place to stick the return value. */
6951
6952 rtx
6953 expand_atomic_load (rtx target, rtx mem, enum memmodel model)
6954 {
6955 machine_mode mode = GET_MODE (mem);
6956 enum insn_code icode;
6957
6958 /* If the target supports the load directly, great. */
6959 icode = direct_optab_handler (atomic_load_optab, mode);
6960 if (icode != CODE_FOR_nothing)
6961 {
6962 class expand_operand ops[3];
6963 rtx_insn *last = get_last_insn ();
6964 if (is_mm_seq_cst (model))
6965 expand_memory_blockage ();
6966
6967 create_output_operand (&ops[0], target, mode);
6968 create_fixed_operand (&ops[1], mem);
6969 create_integer_operand (&ops[2], model);
6970 if (maybe_expand_insn (icode, 3, ops))
6971 {
6972 if (!is_mm_relaxed (model))
6973 expand_memory_blockage ();
6974 return ops[0].value;
6975 }
6976 delete_insns_since (last);
6977 }
6978
6979 /* If the size of the object is greater than word size on this target,
6980 then we assume that a load will not be atomic. We could try to
6981 emulate a load with a compare-and-swap operation, but the store that
6982 doing this could result in would be incorrect if this is a volatile
6983 atomic load or targetting read-only-mapped memory. */
6984 if (maybe_gt (GET_MODE_PRECISION (mode), BITS_PER_WORD))
6985 /* If there is no atomic load, leave the library call. */
6986 return NULL_RTX;
6987
6988 /* Otherwise assume loads are atomic, and emit the proper barriers. */
6989 if (!target || target == const0_rtx)
6990 target = gen_reg_rtx (mode);
6991
6992 /* For SEQ_CST, emit a barrier before the load. */
6993 if (is_mm_seq_cst (model))
6994 expand_mem_thread_fence (model);
6995
6996 emit_move_insn (target, mem);
6997
6998 /* Emit the appropriate barrier after the load. */
6999 expand_mem_thread_fence (model);
7000
7001 return target;
7002 }
7003
7004 /* This function expands the atomic store operation:
7005 Atomically store VAL in MEM.
7006 MEMMODEL is the memory model variant to use.
7007 USE_RELEASE is true if __sync_lock_release can be used as a fall back.
7008 function returns const0_rtx if a pattern was emitted. */
7009
7010 rtx
7011 expand_atomic_store (rtx mem, rtx val, enum memmodel model, bool use_release)
7012 {
7013 machine_mode mode = GET_MODE (mem);
7014 enum insn_code icode;
7015 class expand_operand ops[3];
7016
7017 /* If the target supports the store directly, great. */
7018 icode = direct_optab_handler (atomic_store_optab, mode);
7019 if (icode != CODE_FOR_nothing)
7020 {
7021 rtx_insn *last = get_last_insn ();
7022 if (!is_mm_relaxed (model))
7023 expand_memory_blockage ();
7024 create_fixed_operand (&ops[0], mem);
7025 create_input_operand (&ops[1], val, mode);
7026 create_integer_operand (&ops[2], model);
7027 if (maybe_expand_insn (icode, 3, ops))
7028 {
7029 if (is_mm_seq_cst (model))
7030 expand_memory_blockage ();
7031 return const0_rtx;
7032 }
7033 delete_insns_since (last);
7034 }
7035
7036 /* If using __sync_lock_release is a viable alternative, try it.
7037 Note that this will not be set to true if we are expanding a generic
7038 __atomic_store_n. */
7039 if (use_release)
7040 {
7041 icode = direct_optab_handler (sync_lock_release_optab, mode);
7042 if (icode != CODE_FOR_nothing)
7043 {
7044 create_fixed_operand (&ops[0], mem);
7045 create_input_operand (&ops[1], const0_rtx, mode);
7046 if (maybe_expand_insn (icode, 2, ops))
7047 {
7048 /* lock_release is only a release barrier. */
7049 if (is_mm_seq_cst (model))
7050 expand_mem_thread_fence (model);
7051 return const0_rtx;
7052 }
7053 }
7054 }
7055
7056 /* If the size of the object is greater than word size on this target,
7057 a default store will not be atomic. */
7058 if (maybe_gt (GET_MODE_PRECISION (mode), BITS_PER_WORD))
7059 {
7060 /* If loads are atomic or we are called to provide a __sync builtin,
7061 we can try a atomic_exchange and throw away the result. Otherwise,
7062 don't do anything so that we do not create an inconsistency between
7063 loads and stores. */
7064 if (can_atomic_load_p (mode) || is_mm_sync (model))
7065 {
7066 rtx target = maybe_emit_atomic_exchange (NULL_RTX, mem, val, model);
7067 if (!target)
7068 target = maybe_emit_compare_and_swap_exchange_loop (NULL_RTX, mem,
7069 val);
7070 if (target)
7071 return const0_rtx;
7072 }
7073 return NULL_RTX;
7074 }
7075
7076 /* Otherwise assume stores are atomic, and emit the proper barriers. */
7077 expand_mem_thread_fence (model);
7078
7079 emit_move_insn (mem, val);
7080
7081 /* For SEQ_CST, also emit a barrier after the store. */
7082 if (is_mm_seq_cst (model))
7083 expand_mem_thread_fence (model);
7084
7085 return const0_rtx;
7086 }
7087
7088
7089 /* Structure containing the pointers and values required to process the
7090 various forms of the atomic_fetch_op and atomic_op_fetch builtins. */
7091
7092 struct atomic_op_functions
7093 {
7094 direct_optab mem_fetch_before;
7095 direct_optab mem_fetch_after;
7096 direct_optab mem_no_result;
7097 optab fetch_before;
7098 optab fetch_after;
7099 direct_optab no_result;
7100 enum rtx_code reverse_code;
7101 };
7102
7103
7104 /* Fill in structure pointed to by OP with the various optab entries for an
7105 operation of type CODE. */
7106
7107 static void
7108 get_atomic_op_for_code (struct atomic_op_functions *op, enum rtx_code code)
7109 {
7110 gcc_assert (op!= NULL);
7111
7112 /* If SWITCHABLE_TARGET is defined, then subtargets can be switched
7113 in the source code during compilation, and the optab entries are not
7114 computable until runtime. Fill in the values at runtime. */
7115 switch (code)
7116 {
7117 case PLUS:
7118 op->mem_fetch_before = atomic_fetch_add_optab;
7119 op->mem_fetch_after = atomic_add_fetch_optab;
7120 op->mem_no_result = atomic_add_optab;
7121 op->fetch_before = sync_old_add_optab;
7122 op->fetch_after = sync_new_add_optab;
7123 op->no_result = sync_add_optab;
7124 op->reverse_code = MINUS;
7125 break;
7126 case MINUS:
7127 op->mem_fetch_before = atomic_fetch_sub_optab;
7128 op->mem_fetch_after = atomic_sub_fetch_optab;
7129 op->mem_no_result = atomic_sub_optab;
7130 op->fetch_before = sync_old_sub_optab;
7131 op->fetch_after = sync_new_sub_optab;
7132 op->no_result = sync_sub_optab;
7133 op->reverse_code = PLUS;
7134 break;
7135 case XOR:
7136 op->mem_fetch_before = atomic_fetch_xor_optab;
7137 op->mem_fetch_after = atomic_xor_fetch_optab;
7138 op->mem_no_result = atomic_xor_optab;
7139 op->fetch_before = sync_old_xor_optab;
7140 op->fetch_after = sync_new_xor_optab;
7141 op->no_result = sync_xor_optab;
7142 op->reverse_code = XOR;
7143 break;
7144 case AND:
7145 op->mem_fetch_before = atomic_fetch_and_optab;
7146 op->mem_fetch_after = atomic_and_fetch_optab;
7147 op->mem_no_result = atomic_and_optab;
7148 op->fetch_before = sync_old_and_optab;
7149 op->fetch_after = sync_new_and_optab;
7150 op->no_result = sync_and_optab;
7151 op->reverse_code = UNKNOWN;
7152 break;
7153 case IOR:
7154 op->mem_fetch_before = atomic_fetch_or_optab;
7155 op->mem_fetch_after = atomic_or_fetch_optab;
7156 op->mem_no_result = atomic_or_optab;
7157 op->fetch_before = sync_old_ior_optab;
7158 op->fetch_after = sync_new_ior_optab;
7159 op->no_result = sync_ior_optab;
7160 op->reverse_code = UNKNOWN;
7161 break;
7162 case NOT:
7163 op->mem_fetch_before = atomic_fetch_nand_optab;
7164 op->mem_fetch_after = atomic_nand_fetch_optab;
7165 op->mem_no_result = atomic_nand_optab;
7166 op->fetch_before = sync_old_nand_optab;
7167 op->fetch_after = sync_new_nand_optab;
7168 op->no_result = sync_nand_optab;
7169 op->reverse_code = UNKNOWN;
7170 break;
7171 default:
7172 gcc_unreachable ();
7173 }
7174 }
7175
7176 /* See if there is a more optimal way to implement the operation "*MEM CODE VAL"
7177 using memory order MODEL. If AFTER is true the operation needs to return
7178 the value of *MEM after the operation, otherwise the previous value.
7179 TARGET is an optional place to place the result. The result is unused if
7180 it is const0_rtx.
7181 Return the result if there is a better sequence, otherwise NULL_RTX. */
7182
7183 static rtx
7184 maybe_optimize_fetch_op (rtx target, rtx mem, rtx val, enum rtx_code code,
7185 enum memmodel model, bool after)
7186 {
7187 /* If the value is prefetched, or not used, it may be possible to replace
7188 the sequence with a native exchange operation. */
7189 if (!after || target == const0_rtx)
7190 {
7191 /* fetch_and (&x, 0, m) can be replaced with exchange (&x, 0, m). */
7192 if (code == AND && val == const0_rtx)
7193 {
7194 if (target == const0_rtx)
7195 target = gen_reg_rtx (GET_MODE (mem));
7196 return maybe_emit_atomic_exchange (target, mem, val, model);
7197 }
7198
7199 /* fetch_or (&x, -1, m) can be replaced with exchange (&x, -1, m). */
7200 if (code == IOR && val == constm1_rtx)
7201 {
7202 if (target == const0_rtx)
7203 target = gen_reg_rtx (GET_MODE (mem));
7204 return maybe_emit_atomic_exchange (target, mem, val, model);
7205 }
7206 }
7207
7208 return NULL_RTX;
7209 }
7210
7211 /* Try to emit an instruction for a specific operation varaition.
7212 OPTAB contains the OP functions.
7213 TARGET is an optional place to return the result. const0_rtx means unused.
7214 MEM is the memory location to operate on.
7215 VAL is the value to use in the operation.
7216 USE_MEMMODEL is TRUE if the variation with a memory model should be tried.
7217 MODEL is the memory model, if used.
7218 AFTER is true if the returned result is the value after the operation. */
7219
7220 static rtx
7221 maybe_emit_op (const struct atomic_op_functions *optab, rtx target, rtx mem,
7222 rtx val, bool use_memmodel, enum memmodel model, bool after)
7223 {
7224 machine_mode mode = GET_MODE (mem);
7225 class expand_operand ops[4];
7226 enum insn_code icode;
7227 int op_counter = 0;
7228 int num_ops;
7229
7230 /* Check to see if there is a result returned. */
7231 if (target == const0_rtx)
7232 {
7233 if (use_memmodel)
7234 {
7235 icode = direct_optab_handler (optab->mem_no_result, mode);
7236 create_integer_operand (&ops[2], model);
7237 num_ops = 3;
7238 }
7239 else
7240 {
7241 icode = direct_optab_handler (optab->no_result, mode);
7242 num_ops = 2;
7243 }
7244 }
7245 /* Otherwise, we need to generate a result. */
7246 else
7247 {
7248 if (use_memmodel)
7249 {
7250 icode = direct_optab_handler (after ? optab->mem_fetch_after
7251 : optab->mem_fetch_before, mode);
7252 create_integer_operand (&ops[3], model);
7253 num_ops = 4;
7254 }
7255 else
7256 {
7257 icode = optab_handler (after ? optab->fetch_after
7258 : optab->fetch_before, mode);
7259 num_ops = 3;
7260 }
7261 create_output_operand (&ops[op_counter++], target, mode);
7262 }
7263 if (icode == CODE_FOR_nothing)
7264 return NULL_RTX;
7265
7266 create_fixed_operand (&ops[op_counter++], mem);
7267 /* VAL may have been promoted to a wider mode. Shrink it if so. */
7268 create_convert_operand_to (&ops[op_counter++], val, mode, true);
7269
7270 if (maybe_expand_insn (icode, num_ops, ops))
7271 return (target == const0_rtx ? const0_rtx : ops[0].value);
7272
7273 return NULL_RTX;
7274 }
7275
7276
7277 /* This function expands an atomic fetch_OP or OP_fetch operation:
7278 TARGET is an option place to stick the return value. const0_rtx indicates
7279 the result is unused.
7280 atomically fetch MEM, perform the operation with VAL and return it to MEM.
7281 CODE is the operation being performed (OP)
7282 MEMMODEL is the memory model variant to use.
7283 AFTER is true to return the result of the operation (OP_fetch).
7284 AFTER is false to return the value before the operation (fetch_OP).
7285
7286 This function will *only* generate instructions if there is a direct
7287 optab. No compare and swap loops or libcalls will be generated. */
7288
7289 static rtx
7290 expand_atomic_fetch_op_no_fallback (rtx target, rtx mem, rtx val,
7291 enum rtx_code code, enum memmodel model,
7292 bool after)
7293 {
7294 machine_mode mode = GET_MODE (mem);
7295 struct atomic_op_functions optab;
7296 rtx result;
7297 bool unused_result = (target == const0_rtx);
7298
7299 get_atomic_op_for_code (&optab, code);
7300
7301 /* Check to see if there are any better instructions. */
7302 result = maybe_optimize_fetch_op (target, mem, val, code, model, after);
7303 if (result)
7304 return result;
7305
7306 /* Check for the case where the result isn't used and try those patterns. */
7307 if (unused_result)
7308 {
7309 /* Try the memory model variant first. */
7310 result = maybe_emit_op (&optab, target, mem, val, true, model, true);
7311 if (result)
7312 return result;
7313
7314 /* Next try the old style withuot a memory model. */
7315 result = maybe_emit_op (&optab, target, mem, val, false, model, true);
7316 if (result)
7317 return result;
7318
7319 /* There is no no-result pattern, so try patterns with a result. */
7320 target = NULL_RTX;
7321 }
7322
7323 /* Try the __atomic version. */
7324 result = maybe_emit_op (&optab, target, mem, val, true, model, after);
7325 if (result)
7326 return result;
7327
7328 /* Try the older __sync version. */
7329 result = maybe_emit_op (&optab, target, mem, val, false, model, after);
7330 if (result)
7331 return result;
7332
7333 /* If the fetch value can be calculated from the other variation of fetch,
7334 try that operation. */
7335 if (after || unused_result || optab.reverse_code != UNKNOWN)
7336 {
7337 /* Try the __atomic version, then the older __sync version. */
7338 result = maybe_emit_op (&optab, target, mem, val, true, model, !after);
7339 if (!result)
7340 result = maybe_emit_op (&optab, target, mem, val, false, model, !after);
7341
7342 if (result)
7343 {
7344 /* If the result isn't used, no need to do compensation code. */
7345 if (unused_result)
7346 return result;
7347
7348 /* Issue compensation code. Fetch_after == fetch_before OP val.
7349 Fetch_before == after REVERSE_OP val. */
7350 if (!after)
7351 code = optab.reverse_code;
7352 if (code == NOT)
7353 {
7354 result = expand_simple_binop (mode, AND, result, val, NULL_RTX,
7355 true, OPTAB_LIB_WIDEN);
7356 result = expand_simple_unop (mode, NOT, result, target, true);
7357 }
7358 else
7359 result = expand_simple_binop (mode, code, result, val, target,
7360 true, OPTAB_LIB_WIDEN);
7361 return result;
7362 }
7363 }
7364
7365 /* No direct opcode can be generated. */
7366 return NULL_RTX;
7367 }
7368
7369
7370
7371 /* This function expands an atomic fetch_OP or OP_fetch operation:
7372 TARGET is an option place to stick the return value. const0_rtx indicates
7373 the result is unused.
7374 atomically fetch MEM, perform the operation with VAL and return it to MEM.
7375 CODE is the operation being performed (OP)
7376 MEMMODEL is the memory model variant to use.
7377 AFTER is true to return the result of the operation (OP_fetch).
7378 AFTER is false to return the value before the operation (fetch_OP). */
7379 rtx
7380 expand_atomic_fetch_op (rtx target, rtx mem, rtx val, enum rtx_code code,
7381 enum memmodel model, bool after)
7382 {
7383 machine_mode mode = GET_MODE (mem);
7384 rtx result;
7385 bool unused_result = (target == const0_rtx);
7386
7387 /* If loads are not atomic for the required size and we are not called to
7388 provide a __sync builtin, do not do anything so that we stay consistent
7389 with atomic loads of the same size. */
7390 if (!can_atomic_load_p (mode) && !is_mm_sync (model))
7391 return NULL_RTX;
7392
7393 result = expand_atomic_fetch_op_no_fallback (target, mem, val, code, model,
7394 after);
7395
7396 if (result)
7397 return result;
7398
7399 /* Add/sub can be implemented by doing the reverse operation with -(val). */
7400 if (code == PLUS || code == MINUS)
7401 {
7402 rtx tmp;
7403 enum rtx_code reverse = (code == PLUS ? MINUS : PLUS);
7404
7405 start_sequence ();
7406 tmp = expand_simple_unop (mode, NEG, val, NULL_RTX, true);
7407 result = expand_atomic_fetch_op_no_fallback (target, mem, tmp, reverse,
7408 model, after);
7409 if (result)
7410 {
7411 /* PLUS worked so emit the insns and return. */
7412 tmp = get_insns ();
7413 end_sequence ();
7414 emit_insn (tmp);
7415 return result;
7416 }
7417
7418 /* PLUS did not work, so throw away the negation code and continue. */
7419 end_sequence ();
7420 }
7421
7422 /* Try the __sync libcalls only if we can't do compare-and-swap inline. */
7423 if (!can_compare_and_swap_p (mode, false))
7424 {
7425 rtx libfunc;
7426 bool fixup = false;
7427 enum rtx_code orig_code = code;
7428 struct atomic_op_functions optab;
7429
7430 get_atomic_op_for_code (&optab, code);
7431 libfunc = optab_libfunc (after ? optab.fetch_after
7432 : optab.fetch_before, mode);
7433 if (libfunc == NULL
7434 && (after || unused_result || optab.reverse_code != UNKNOWN))
7435 {
7436 fixup = true;
7437 if (!after)
7438 code = optab.reverse_code;
7439 libfunc = optab_libfunc (after ? optab.fetch_before
7440 : optab.fetch_after, mode);
7441 }
7442 if (libfunc != NULL)
7443 {
7444 rtx addr = convert_memory_address (ptr_mode, XEXP (mem, 0));
7445 result = emit_library_call_value (libfunc, NULL, LCT_NORMAL, mode,
7446 addr, ptr_mode, val, mode);
7447
7448 if (!unused_result && fixup)
7449 result = expand_simple_binop (mode, code, result, val, target,
7450 true, OPTAB_LIB_WIDEN);
7451 return result;
7452 }
7453
7454 /* We need the original code for any further attempts. */
7455 code = orig_code;
7456 }
7457
7458 /* If nothing else has succeeded, default to a compare and swap loop. */
7459 if (can_compare_and_swap_p (mode, true))
7460 {
7461 rtx_insn *insn;
7462 rtx t0 = gen_reg_rtx (mode), t1;
7463
7464 start_sequence ();
7465
7466 /* If the result is used, get a register for it. */
7467 if (!unused_result)
7468 {
7469 if (!target || !register_operand (target, mode))
7470 target = gen_reg_rtx (mode);
7471 /* If fetch_before, copy the value now. */
7472 if (!after)
7473 emit_move_insn (target, t0);
7474 }
7475 else
7476 target = const0_rtx;
7477
7478 t1 = t0;
7479 if (code == NOT)
7480 {
7481 t1 = expand_simple_binop (mode, AND, t1, val, NULL_RTX,
7482 true, OPTAB_LIB_WIDEN);
7483 t1 = expand_simple_unop (mode, code, t1, NULL_RTX, true);
7484 }
7485 else
7486 t1 = expand_simple_binop (mode, code, t1, val, NULL_RTX, true,
7487 OPTAB_LIB_WIDEN);
7488
7489 /* For after, copy the value now. */
7490 if (!unused_result && after)
7491 emit_move_insn (target, t1);
7492 insn = get_insns ();
7493 end_sequence ();
7494
7495 if (t1 != NULL && expand_compare_and_swap_loop (mem, t0, t1, insn))
7496 return target;
7497 }
7498
7499 return NULL_RTX;
7500 }
7501 \f
7502 /* Return true if OPERAND is suitable for operand number OPNO of
7503 instruction ICODE. */
7504
7505 bool
7506 insn_operand_matches (enum insn_code icode, unsigned int opno, rtx operand)
7507 {
7508 return (!insn_data[(int) icode].operand[opno].predicate
7509 || (insn_data[(int) icode].operand[opno].predicate
7510 (operand, insn_data[(int) icode].operand[opno].mode)));
7511 }
7512 \f
7513 /* TARGET is a target of a multiword operation that we are going to
7514 implement as a series of word-mode operations. Return true if
7515 TARGET is suitable for this purpose. */
7516
7517 bool
7518 valid_multiword_target_p (rtx target)
7519 {
7520 machine_mode mode;
7521 int i, size;
7522
7523 mode = GET_MODE (target);
7524 if (!GET_MODE_SIZE (mode).is_constant (&size))
7525 return false;
7526 for (i = 0; i < size; i += UNITS_PER_WORD)
7527 if (!validate_subreg (word_mode, mode, target, i))
7528 return false;
7529 return true;
7530 }
7531
7532 /* Make OP describe an input operand that has value INTVAL and that has
7533 no inherent mode. This function should only be used for operands that
7534 are always expand-time constants. The backend may request that INTVAL
7535 be copied into a different kind of rtx, but it must specify the mode
7536 of that rtx if so. */
7537
7538 void
7539 create_integer_operand (class expand_operand *op, poly_int64 intval)
7540 {
7541 create_expand_operand (op, EXPAND_INTEGER,
7542 gen_int_mode (intval, MAX_MODE_INT),
7543 VOIDmode, false, intval);
7544 }
7545
7546 /* Like maybe_legitimize_operand, but do not change the code of the
7547 current rtx value. */
7548
7549 static bool
7550 maybe_legitimize_operand_same_code (enum insn_code icode, unsigned int opno,
7551 class expand_operand *op)
7552 {
7553 /* See if the operand matches in its current form. */
7554 if (insn_operand_matches (icode, opno, op->value))
7555 return true;
7556
7557 /* If the operand is a memory whose address has no side effects,
7558 try forcing the address into a non-virtual pseudo register.
7559 The check for side effects is important because copy_to_mode_reg
7560 cannot handle things like auto-modified addresses. */
7561 if (insn_data[(int) icode].operand[opno].allows_mem && MEM_P (op->value))
7562 {
7563 rtx addr, mem;
7564
7565 mem = op->value;
7566 addr = XEXP (mem, 0);
7567 if (!(REG_P (addr) && REGNO (addr) > LAST_VIRTUAL_REGISTER)
7568 && !side_effects_p (addr))
7569 {
7570 rtx_insn *last;
7571 machine_mode mode;
7572
7573 last = get_last_insn ();
7574 mode = get_address_mode (mem);
7575 mem = replace_equiv_address (mem, copy_to_mode_reg (mode, addr));
7576 if (insn_operand_matches (icode, opno, mem))
7577 {
7578 op->value = mem;
7579 return true;
7580 }
7581 delete_insns_since (last);
7582 }
7583 }
7584
7585 return false;
7586 }
7587
7588 /* Try to make OP match operand OPNO of instruction ICODE. Return true
7589 on success, storing the new operand value back in OP. */
7590
7591 static bool
7592 maybe_legitimize_operand (enum insn_code icode, unsigned int opno,
7593 class expand_operand *op)
7594 {
7595 machine_mode mode, imode, tmode;
7596
7597 mode = op->mode;
7598 switch (op->type)
7599 {
7600 case EXPAND_FIXED:
7601 {
7602 temporary_volatile_ok v (true);
7603 return maybe_legitimize_operand_same_code (icode, opno, op);
7604 }
7605
7606 case EXPAND_OUTPUT:
7607 gcc_assert (mode != VOIDmode);
7608 if (op->value
7609 && op->value != const0_rtx
7610 && GET_MODE (op->value) == mode
7611 && maybe_legitimize_operand_same_code (icode, opno, op))
7612 return true;
7613
7614 op->value = gen_reg_rtx (mode);
7615 op->target = 0;
7616 break;
7617
7618 case EXPAND_INPUT:
7619 input:
7620 gcc_assert (mode != VOIDmode);
7621 gcc_assert (GET_MODE (op->value) == VOIDmode
7622 || GET_MODE (op->value) == mode);
7623 if (maybe_legitimize_operand_same_code (icode, opno, op))
7624 return true;
7625
7626 op->value = copy_to_mode_reg (mode, op->value);
7627 break;
7628
7629 case EXPAND_CONVERT_TO:
7630 gcc_assert (mode != VOIDmode);
7631 op->value = convert_to_mode (mode, op->value, op->unsigned_p);
7632 goto input;
7633
7634 case EXPAND_CONVERT_FROM:
7635 if (GET_MODE (op->value) != VOIDmode)
7636 mode = GET_MODE (op->value);
7637 else
7638 /* The caller must tell us what mode this value has. */
7639 gcc_assert (mode != VOIDmode);
7640
7641 imode = insn_data[(int) icode].operand[opno].mode;
7642 tmode = (VECTOR_MODE_P (imode) && !VECTOR_MODE_P (mode)
7643 ? GET_MODE_INNER (imode) : imode);
7644 if (tmode != VOIDmode && tmode != mode)
7645 {
7646 op->value = convert_modes (tmode, mode, op->value, op->unsigned_p);
7647 mode = tmode;
7648 }
7649 if (imode != VOIDmode && imode != mode)
7650 {
7651 gcc_assert (VECTOR_MODE_P (imode) && !VECTOR_MODE_P (mode));
7652 op->value = expand_vector_broadcast (imode, op->value);
7653 mode = imode;
7654 }
7655 goto input;
7656
7657 case EXPAND_ADDRESS:
7658 op->value = convert_memory_address (as_a <scalar_int_mode> (mode),
7659 op->value);
7660 goto input;
7661
7662 case EXPAND_INTEGER:
7663 mode = insn_data[(int) icode].operand[opno].mode;
7664 if (mode != VOIDmode
7665 && known_eq (trunc_int_for_mode (op->int_value, mode),
7666 op->int_value))
7667 {
7668 op->value = gen_int_mode (op->int_value, mode);
7669 goto input;
7670 }
7671 break;
7672 }
7673 return insn_operand_matches (icode, opno, op->value);
7674 }
7675
7676 /* Make OP describe an input operand that should have the same value
7677 as VALUE, after any mode conversion that the target might request.
7678 TYPE is the type of VALUE. */
7679
7680 void
7681 create_convert_operand_from_type (class expand_operand *op,
7682 rtx value, tree type)
7683 {
7684 create_convert_operand_from (op, value, TYPE_MODE (type),
7685 TYPE_UNSIGNED (type));
7686 }
7687
7688 /* Return true if the requirements on operands OP1 and OP2 of instruction
7689 ICODE are similar enough for the result of legitimizing OP1 to be
7690 reusable for OP2. OPNO1 and OPNO2 are the operand numbers associated
7691 with OP1 and OP2 respectively. */
7692
7693 static inline bool
7694 can_reuse_operands_p (enum insn_code icode,
7695 unsigned int opno1, unsigned int opno2,
7696 const class expand_operand *op1,
7697 const class expand_operand *op2)
7698 {
7699 /* Check requirements that are common to all types. */
7700 if (op1->type != op2->type
7701 || op1->mode != op2->mode
7702 || (insn_data[(int) icode].operand[opno1].mode
7703 != insn_data[(int) icode].operand[opno2].mode))
7704 return false;
7705
7706 /* Check the requirements for specific types. */
7707 switch (op1->type)
7708 {
7709 case EXPAND_OUTPUT:
7710 /* Outputs must remain distinct. */
7711 return false;
7712
7713 case EXPAND_FIXED:
7714 case EXPAND_INPUT:
7715 case EXPAND_ADDRESS:
7716 case EXPAND_INTEGER:
7717 return true;
7718
7719 case EXPAND_CONVERT_TO:
7720 case EXPAND_CONVERT_FROM:
7721 return op1->unsigned_p == op2->unsigned_p;
7722 }
7723 gcc_unreachable ();
7724 }
7725
7726 /* Try to make operands [OPS, OPS + NOPS) match operands [OPNO, OPNO + NOPS)
7727 of instruction ICODE. Return true on success, leaving the new operand
7728 values in the OPS themselves. Emit no code on failure. */
7729
7730 bool
7731 maybe_legitimize_operands (enum insn_code icode, unsigned int opno,
7732 unsigned int nops, class expand_operand *ops)
7733 {
7734 rtx_insn *last = get_last_insn ();
7735 rtx *orig_values = XALLOCAVEC (rtx, nops);
7736 for (unsigned int i = 0; i < nops; i++)
7737 {
7738 orig_values[i] = ops[i].value;
7739
7740 /* First try reusing the result of an earlier legitimization.
7741 This avoids duplicate rtl and ensures that tied operands
7742 remain tied.
7743
7744 This search is linear, but NOPS is bounded at compile time
7745 to a small number (current a single digit). */
7746 unsigned int j = 0;
7747 for (; j < i; ++j)
7748 if (can_reuse_operands_p (icode, opno + j, opno + i, &ops[j], &ops[i])
7749 && rtx_equal_p (orig_values[j], orig_values[i])
7750 && ops[j].value
7751 && insn_operand_matches (icode, opno + i, ops[j].value))
7752 {
7753 ops[i].value = copy_rtx (ops[j].value);
7754 break;
7755 }
7756
7757 /* Otherwise try legitimizing the operand on its own. */
7758 if (j == i && !maybe_legitimize_operand (icode, opno + i, &ops[i]))
7759 {
7760 delete_insns_since (last);
7761 return false;
7762 }
7763 }
7764 return true;
7765 }
7766
7767 /* Try to generate instruction ICODE, using operands [OPS, OPS + NOPS)
7768 as its operands. Return the instruction pattern on success,
7769 and emit any necessary set-up code. Return null and emit no
7770 code on failure. */
7771
7772 rtx_insn *
7773 maybe_gen_insn (enum insn_code icode, unsigned int nops,
7774 class expand_operand *ops)
7775 {
7776 gcc_assert (nops == (unsigned int) insn_data[(int) icode].n_generator_args);
7777 if (!maybe_legitimize_operands (icode, 0, nops, ops))
7778 return NULL;
7779
7780 switch (nops)
7781 {
7782 case 1:
7783 return GEN_FCN (icode) (ops[0].value);
7784 case 2:
7785 return GEN_FCN (icode) (ops[0].value, ops[1].value);
7786 case 3:
7787 return GEN_FCN (icode) (ops[0].value, ops[1].value, ops[2].value);
7788 case 4:
7789 return GEN_FCN (icode) (ops[0].value, ops[1].value, ops[2].value,
7790 ops[3].value);
7791 case 5:
7792 return GEN_FCN (icode) (ops[0].value, ops[1].value, ops[2].value,
7793 ops[3].value, ops[4].value);
7794 case 6:
7795 return GEN_FCN (icode) (ops[0].value, ops[1].value, ops[2].value,
7796 ops[3].value, ops[4].value, ops[5].value);
7797 case 7:
7798 return GEN_FCN (icode) (ops[0].value, ops[1].value, ops[2].value,
7799 ops[3].value, ops[4].value, ops[5].value,
7800 ops[6].value);
7801 case 8:
7802 return GEN_FCN (icode) (ops[0].value, ops[1].value, ops[2].value,
7803 ops[3].value, ops[4].value, ops[5].value,
7804 ops[6].value, ops[7].value);
7805 case 9:
7806 return GEN_FCN (icode) (ops[0].value, ops[1].value, ops[2].value,
7807 ops[3].value, ops[4].value, ops[5].value,
7808 ops[6].value, ops[7].value, ops[8].value);
7809 }
7810 gcc_unreachable ();
7811 }
7812
7813 /* Try to emit instruction ICODE, using operands [OPS, OPS + NOPS)
7814 as its operands. Return true on success and emit no code on failure. */
7815
7816 bool
7817 maybe_expand_insn (enum insn_code icode, unsigned int nops,
7818 class expand_operand *ops)
7819 {
7820 rtx_insn *pat = maybe_gen_insn (icode, nops, ops);
7821 if (pat)
7822 {
7823 emit_insn (pat);
7824 return true;
7825 }
7826 return false;
7827 }
7828
7829 /* Like maybe_expand_insn, but for jumps. */
7830
7831 bool
7832 maybe_expand_jump_insn (enum insn_code icode, unsigned int nops,
7833 class expand_operand *ops)
7834 {
7835 rtx_insn *pat = maybe_gen_insn (icode, nops, ops);
7836 if (pat)
7837 {
7838 emit_jump_insn (pat);
7839 return true;
7840 }
7841 return false;
7842 }
7843
7844 /* Emit instruction ICODE, using operands [OPS, OPS + NOPS)
7845 as its operands. */
7846
7847 void
7848 expand_insn (enum insn_code icode, unsigned int nops,
7849 class expand_operand *ops)
7850 {
7851 if (!maybe_expand_insn (icode, nops, ops))
7852 gcc_unreachable ();
7853 }
7854
7855 /* Like expand_insn, but for jumps. */
7856
7857 void
7858 expand_jump_insn (enum insn_code icode, unsigned int nops,
7859 class expand_operand *ops)
7860 {
7861 if (!maybe_expand_jump_insn (icode, nops, ops))
7862 gcc_unreachable ();
7863 }