1 /* memset with unaligned store and rep stosb
2 Copyright (C) 2016-2022 Free Software Foundation, Inc.
3 This file is part of the GNU C Library.
5 The GNU C Library is free software; you can redistribute it and/or
6 modify it under the terms of the GNU Lesser General Public
7 License as published by the Free Software Foundation; either
8 version 2.1 of the License, or (at your option) any later version.
10 The GNU C Library is distributed in the hope that it will be useful,
11 but WITHOUT ANY WARRANTY; without even the implied warranty of
12 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 Lesser General Public License for more details.
15 You should have received a copy of the GNU Lesser General Public
16 License along with the GNU C Library; if not, see
17 <https://www.gnu.org/licenses/>. */
19 /* memset is implemented as:
20 1. Use overlapping store to avoid branch.
21 2. If size is less than VEC, use integer register stores.
22 3. If size is from VEC_SIZE to 2 * VEC_SIZE, use 2 VEC stores.
23 4. If size is from 2 * VEC_SIZE to 4 * VEC_SIZE, use 4 VEC stores.
24 5. If size is more to 4 * VEC_SIZE, align to 4 * VEC_SIZE with
25 4 VEC stores and store 4 * VEC at a time until done. */
29 #ifndef MEMSET_CHK_SYMBOL
30 # define MEMSET_CHK_SYMBOL(p,s) MEMSET_SYMBOL(p, s)
33 #ifndef WMEMSET_CHK_SYMBOL
34 # define WMEMSET_CHK_SYMBOL(p,s) WMEMSET_SYMBOL(p, s)
47 # define VZEROUPPER vzeroupper
48 # define VZEROUPPER_SHORT_RETURN vzeroupper; ret
54 #ifndef VZEROUPPER_SHORT_RETURN
55 # define VZEROUPPER_SHORT_RETURN rep; ret
69 # define LOOP_4X_OFFSET (VEC_SIZE * 4)
71 # define LOOP_4X_OFFSET (0)
74 #if defined USE_WITH_EVEX || defined USE_WITH_AVX512
77 # define LESS_VEC_REG rax
81 # define LESS_VEC_REG rdi
84 #ifdef USE_XMM_LESS_VEC
90 #ifdef USE_LESS_VEC_MASK_STORE
91 # define SET_REG64 rcx
92 # define SET_REG32 ecx
96 # define SET_REG64 rsi
97 # define SET_REG32 esi
102 #define PAGE_SIZE 4096
104 /* Macro to calculate size of small memset block for aligning
106 #define SMALL_MEMSET_ALIGN(mov_sz, ret_sz) (2 * (mov_sz) + (ret_sz) + 1)
110 # error SECTION is not defined!
113 .section SECTION(.text), "ax", @progbits
116 ENTRY_CHK (WMEMSET_CHK_SYMBOL (__wmemset_chk, unaligned))
118 jb HIDDEN_JUMPTARGET (__chk_fail)
119 END_CHK (WMEMSET_CHK_SYMBOL (__wmemset_chk, unaligned))
122 ENTRY (WMEMSET_SYMBOL (__wmemset, unaligned))
124 WMEMSET_SET_VEC0_AND_SET_RETURN (%esi, %rdi)
125 WMEMSET_VDUP_TO_VEC0_LOW()
127 jb L(less_vec_from_wmemset)
128 WMEMSET_VDUP_TO_VEC0_HIGH()
129 jmp L(entry_from_wmemset)
130 END (WMEMSET_SYMBOL (__wmemset, unaligned))
133 #if defined SHARED && IS_IN (libc)
134 ENTRY_CHK (MEMSET_CHK_SYMBOL (__memset_chk, unaligned))
136 jb HIDDEN_JUMPTARGET (__chk_fail)
137 END_CHK (MEMSET_CHK_SYMBOL (__memset_chk, unaligned))
140 ENTRY (MEMSET_SYMBOL (__memset, unaligned))
141 MEMSET_SET_VEC0_AND_SET_RETURN (%esi, %rdi)
143 /* Clear the upper 32 bits. */
148 MEMSET_VDUP_TO_VEC0_HIGH()
149 L(entry_from_wmemset):
150 cmpq $(VEC_SIZE * 2), %rdx
152 /* From VEC and to 2 * VEC. No branch when size == VEC_SIZE. */
153 VMOVU %VEC(0), -VEC_SIZE(%rdi,%rdx)
154 VMOVU %VEC(0), (%rdi)
156 #if defined USE_MULTIARCH && IS_IN (libc)
157 END (MEMSET_SYMBOL (__memset, unaligned))
159 # if defined SHARED && IS_IN (libc)
160 ENTRY_CHK (MEMSET_CHK_SYMBOL (__memset_chk, unaligned_erms))
162 jb HIDDEN_JUMPTARGET (__chk_fail)
163 END_CHK (MEMSET_CHK_SYMBOL (__memset_chk, unaligned_erms))
166 ENTRY_P2ALIGN (MEMSET_SYMBOL (__memset, unaligned_erms), 6)
167 MEMSET_SET_VEC0_AND_SET_RETURN (%esi, %rdi)
169 /* Clear the upper 32 bits. */
172 cmp $VEC_SIZE, %RDX_LP
174 MEMSET_VDUP_TO_VEC0_HIGH ()
175 cmp $(VEC_SIZE * 2), %RDX_LP
176 ja L(stosb_more_2x_vec)
177 /* From VEC and to 2 * VEC. No branch when size == VEC_SIZE. */
178 VMOVU %VEC(0), (%rdi)
179 VMOVU %VEC(0), (VEC_SIZE * -1)(%rdi, %rdx)
185 #ifdef USE_LESS_VEC_MASK_STORE
186 VMOVU %VEC(0), (VEC_SIZE * -2)(%rdi, %rdx)
187 VMOVU %VEC(0), (VEC_SIZE * -1)(%rdi, %rdx)
189 VMOVU %VEC(0), (VEC_SIZE * -2)(%rdi)
190 VMOVU %VEC(0), (VEC_SIZE * -1)(%rdi)
194 /* If have AVX512 mask instructions put L(less_vec) close to
195 entry as it doesn't take much space and is likely a hot target.
197 #ifdef USE_LESS_VEC_MASK_STORE
200 L(less_vec_from_wmemset):
201 /* Less than 1 VEC. */
202 # if VEC_SIZE != 16 && VEC_SIZE != 32 && VEC_SIZE != 64
203 # error Unsupported VEC_SIZE!
205 /* Clear high bits from edi. Only keeping bits relevant to page
206 cross check. Note that we are using rax which is set in
207 MEMSET_VDUP_TO_VEC0_AND_SET_RETURN as ptr from here on out. */
208 andl $(PAGE_SIZE - 1), %edi
209 /* Check if VEC_SIZE store cross page. Mask stores suffer
210 serious performance degradation when it has to fault supress.
212 cmpl $(PAGE_SIZE - VEC_SIZE), %edi
213 /* This is generally considered a cold target. */
217 bzhiq %rdx, %rcx, %rcx
221 bzhil %edx, %ecx, %ecx
224 vmovdqu8 %VEC(0), (%rax){%k1}
227 # if defined USE_MULTIARCH && IS_IN (libc)
228 /* Include L(stosb_local) here if including L(less_vec) between
229 L(stosb_more_2x_vec) and ENTRY. This is to cache align the
230 L(stosb_more_2x_vec) target. */
242 #if defined USE_MULTIARCH && IS_IN (libc)
244 L(stosb_more_2x_vec):
245 cmp __x86_rep_stosb_threshold(%rip), %RDX_LP
248 /* Fallthrough goes to L(loop_4x_vec). Tests for memset (2x, 4x]
249 and (4x, 8x] jump to target. */
251 /* Store next 2x vec regardless. */
252 VMOVU %VEC(0), (%rdi)
253 VMOVU %VEC(0), (VEC_SIZE * 1)(%rdi)
256 /* Two different methods of setting up pointers / compare. The two
257 methods are based on the fact that EVEX/AVX512 mov instructions take
258 more bytes then AVX2/SSE2 mov instructions. As well that EVEX/AVX512
259 machines also have fast LEA_BID. Both setup and END_REG to avoid complex
260 address mode. For EVEX/AVX512 this saves code size and keeps a few
261 targets in one fetch block. For AVX2/SSE2 this helps prevent AGU
263 #if !(defined USE_WITH_EVEX || defined USE_WITH_AVX512)
264 /* If AVX2/SSE2 compute END_REG (rdi) with ALU. */
268 cmpq $(VEC_SIZE * 4), %rdx
272 #if defined USE_WITH_EVEX || defined USE_WITH_AVX512
273 /* If EVEX/AVX512 compute END_REG - (VEC_SIZE * 4 + LOOP_4X_OFFSET) with
276 /* END_REG is rcx for EVEX/AVX512. */
277 leaq -(VEC_SIZE * 4 + LOOP_4X_OFFSET)(%rdi, %rdx), %END_REG
280 /* Store next 2x vec regardless. */
281 VMOVU %VEC(0), (VEC_SIZE * 2)(%rax)
282 VMOVU %VEC(0), (VEC_SIZE * 3)(%rax)
285 #if defined USE_WITH_EVEX || defined USE_WITH_AVX512
286 /* If LOOP_4X_OFFSET don't readjust LOOP_REG (rdi), just add
287 extra offset to addresses in loop. Used for AVX512 to save space
288 as no way to get (VEC_SIZE * 4) in imm8. */
289 # if LOOP_4X_OFFSET == 0
290 subq $-(VEC_SIZE * 4), %LOOP_REG
292 /* Avoid imm32 compare here to save code size. */
295 addq $-(VEC_SIZE * 4), %END_REG
296 cmpq $(VEC_SIZE * 8), %rdx
299 #if !(defined USE_WITH_EVEX || defined USE_WITH_AVX512)
300 /* Set LOOP_REG (rdx). */
301 leaq (VEC_SIZE * 4)(%rax), %LOOP_REG
303 /* Align dst for loop. */
304 andq $(VEC_SIZE * -2), %LOOP_REG
307 VMOVA %VEC(0), LOOP_4X_OFFSET(%LOOP_REG)
308 VMOVA %VEC(0), (VEC_SIZE + LOOP_4X_OFFSET)(%LOOP_REG)
309 VMOVA %VEC(0), (VEC_SIZE * 2 + LOOP_4X_OFFSET)(%LOOP_REG)
310 VMOVA %VEC(0), (VEC_SIZE * 3 + LOOP_4X_OFFSET)(%LOOP_REG)
311 subq $-(VEC_SIZE * 4), %LOOP_REG
312 cmpq %END_REG, %LOOP_REG
314 .p2align 4,, MOV_SIZE
316 VMOVU %VEC(0), LOOP_4X_OFFSET(%END_REG)
317 VMOVU %VEC(0), (VEC_SIZE + LOOP_4X_OFFSET)(%END_REG)
318 VMOVU %VEC(0), (VEC_SIZE * 2 + LOOP_4X_OFFSET)(%END_REG)
319 VMOVU %VEC(0), (VEC_SIZE * 3 + LOOP_4X_OFFSET)(%END_REG)
322 ZERO_UPPER_VEC_REGISTERS_RETURN
328 #ifndef USE_LESS_VEC_MASK_STORE
329 # if defined USE_MULTIARCH && IS_IN (libc)
330 /* If no USE_LESS_VEC_MASK put L(stosb_local) here. Will be in
331 range for 2-byte jump encoding. */
340 /* Define L(less_vec) only if not otherwise defined. */
343 /* Broadcast esi to partial register (i.e VEC_SIZE == 32 broadcast to
344 xmm). This is only does anything for AVX2. */
345 MEMSET_VDUP_TO_VEC0_LOW ()
346 L(less_vec_from_wmemset):
357 #ifndef USE_XMM_LESS_VEC
358 MOVQ %XMM0, %SET_REG64
367 movb %SET_REG8, (%LESS_VEC_REG)
371 /* Align small targets only if not doing so would cross a fetch line.
374 .p2align 4,, SMALL_MEMSET_ALIGN(MOV_SIZE, RET_SIZE)
375 /* From 32 to 63. No branch when size == 32. */
377 VMOVU %YMM0, (%LESS_VEC_REG)
378 VMOVU %YMM0, -32(%LESS_VEC_REG, %rdx)
383 .p2align 4,, SMALL_MEMSET_ALIGN(MOV_SIZE, 1)
385 /* From 16 to 31. No branch when size == 16. */
386 VMOVU %XMM0, (%LESS_VEC_REG)
387 VMOVU %XMM0, -16(%LESS_VEC_REG, %rdx)
391 /* Move size is 3 for SSE2, EVEX, and AVX512. Move size is 4 for AVX2.
393 .p2align 4,, SMALL_MEMSET_ALIGN(3 + XMM_SMALL, 1)
395 /* From 8 to 15. No branch when size == 8. */
396 #ifdef USE_XMM_LESS_VEC
398 MOVQ %XMM0, -8(%rdi, %rdx)
400 movq %SET_REG64, (%LESS_VEC_REG)
401 movq %SET_REG64, -8(%LESS_VEC_REG, %rdx)
405 /* Move size is 2 for SSE2, EVEX, and AVX512. Move size is 4 for AVX2.
407 .p2align 4,, SMALL_MEMSET_ALIGN(2 << XMM_SMALL, 1)
409 /* From 4 to 7. No branch when size == 4. */
410 #ifdef USE_XMM_LESS_VEC
412 MOVD %XMM0, -4(%rdi, %rdx)
414 movl %SET_REG32, (%LESS_VEC_REG)
415 movl %SET_REG32, -4(%LESS_VEC_REG, %rdx)
419 /* 4 * XMM_SMALL for the third mov for AVX2. */
420 .p2align 4,, 4 * XMM_SMALL + SMALL_MEMSET_ALIGN(3, 1)
422 /* From 2 to 3. No branch when size == 2. */
423 #ifdef USE_XMM_LESS_VEC
424 movb %SET_REG8, (%rdi)
425 movb %SET_REG8, 1(%rdi)
426 movb %SET_REG8, -1(%rdi, %rdx)
428 movw %SET_REG16, (%LESS_VEC_REG)
429 movb %SET_REG8, -1(%LESS_VEC_REG, %rdx)
432 END (MEMSET_SYMBOL (__memset, unaligned_erms))