initial commit
[glibc.git] / sysdeps / sparc / sparc32 / memcpy.S
1 /* Copy SIZE bytes from SRC to DEST.
2 For SPARC v7.
3 Copyright (C) 1996-2022 Free Software Foundation, Inc.
4 This file is part of the GNU C Library.
5
6 The GNU C Library is free software; you can redistribute it and/or
7 modify it under the terms of the GNU Lesser General Public
8 License as published by the Free Software Foundation; either
9 version 2.1 of the License, or (at your option) any later version.
10
11 The GNU C Library is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 Lesser General Public License for more details.
15
16 You should have received a copy of the GNU Lesser General Public
17 License along with the GNU C Library; if not, see
18 <https://www.gnu.org/licenses/>. */
19
20 #include <sysdep.h>
21
22 /* Both these macros have to start with exactly the same insn */
23 #define MOVE_BIGCHUNK(src, dst, offset, t0, t1, t2, t3, t4, t5, t6, t7) \
24 ldd [%src + offset + 0x00], %t0; \
25 ldd [%src + offset + 0x08], %t2; \
26 ldd [%src + offset + 0x10], %t4; \
27 ldd [%src + offset + 0x18], %t6; \
28 st %t0, [%dst + offset + 0x00]; \
29 st %t1, [%dst + offset + 0x04]; \
30 st %t2, [%dst + offset + 0x08]; \
31 st %t3, [%dst + offset + 0x0c]; \
32 st %t4, [%dst + offset + 0x10]; \
33 st %t5, [%dst + offset + 0x14]; \
34 st %t6, [%dst + offset + 0x18]; \
35 st %t7, [%dst + offset + 0x1c];
36
37 #define MOVE_BIGALIGNCHUNK(src, dst, offset, t0, t1, t2, t3, t4, t5, t6, t7) \
38 ldd [%src + offset + 0x00], %t0; \
39 ldd [%src + offset + 0x08], %t2; \
40 ldd [%src + offset + 0x10], %t4; \
41 ldd [%src + offset + 0x18], %t6; \
42 std %t0, [%dst + offset + 0x00]; \
43 std %t2, [%dst + offset + 0x08]; \
44 std %t4, [%dst + offset + 0x10]; \
45 std %t6, [%dst + offset + 0x18];
46
47 #define MOVE_LASTCHUNK(src, dst, offset, t0, t1, t2, t3) \
48 ldd [%src - offset - 0x10], %t0; \
49 ldd [%src - offset - 0x08], %t2; \
50 st %t0, [%dst - offset - 0x10]; \
51 st %t1, [%dst - offset - 0x0c]; \
52 st %t2, [%dst - offset - 0x08]; \
53 st %t3, [%dst - offset - 0x04];
54
55 #define MOVE_LASTALIGNCHUNK(src, dst, offset, t0, t1, t2, t3) \
56 ldd [%src - offset - 0x10], %t0; \
57 ldd [%src - offset - 0x08], %t2; \
58 std %t0, [%dst - offset - 0x10]; \
59 std %t2, [%dst - offset - 0x08];
60
61 #define MOVE_SHORTCHUNK(src, dst, offset, t0, t1) \
62 ldub [%src - offset - 0x02], %t0; \
63 ldub [%src - offset - 0x01], %t1; \
64 stb %t0, [%dst - offset - 0x02]; \
65 stb %t1, [%dst - offset - 0x01];
66
67 #define SMOVE_CHUNK(src, dst, offset, t0, t1, t2, t3, t4, t5, t6, prev, shil, shir, offset2) \
68 ldd [%src + offset + 0x00], %t0; \
69 ldd [%src + offset + 0x08], %t2; \
70 srl %t0, shir, %t5; \
71 srl %t1, shir, %t6; \
72 sll %t0, shil, %t0; \
73 or %t5, %prev, %t5; \
74 sll %t1, shil, %prev; \
75 or %t6, %t0, %t0; \
76 srl %t2, shir, %t1; \
77 srl %t3, shir, %t6; \
78 sll %t2, shil, %t2; \
79 or %t1, %prev, %t1; \
80 std %t4, [%dst + offset + offset2 - 0x04]; \
81 std %t0, [%dst + offset + offset2 + 0x04]; \
82 sll %t3, shil, %prev; \
83 or %t6, %t2, %t4;
84
85 #define SMOVE_ALIGNCHUNK(src, dst, offset, t0, t1, t2, t3, t4, t5, t6, prev, shil, shir, offset2) \
86 ldd [%src + offset + 0x00], %t0; \
87 ldd [%src + offset + 0x08], %t2; \
88 srl %t0, shir, %t4; \
89 srl %t1, shir, %t5; \
90 sll %t0, shil, %t6; \
91 or %t4, %prev, %t0; \
92 sll %t1, shil, %prev; \
93 or %t5, %t6, %t1; \
94 srl %t2, shir, %t4; \
95 srl %t3, shir, %t5; \
96 sll %t2, shil, %t6; \
97 or %t4, %prev, %t2; \
98 sll %t3, shil, %prev; \
99 or %t5, %t6, %t3; \
100 std %t0, [%dst + offset + offset2 + 0x00]; \
101 std %t2, [%dst + offset + offset2 + 0x08];
102
103 .text
104 ENTRY(__mempcpy)
105 add %o0, %o2, %g1
106 ba 101f
107 st %g1, [%sp + 64]
108 END(__mempcpy)
109
110 .align 4
111 ENTRY(memcpy) /* %o0=dst %o1=src %o2=len */
112 st %o0, [%sp + 64]
113 101:
114 sub %o0, %o1, %o4
115 9: andcc %o4, 3, %o5
116 0: bne 86f
117 cmp %o2, 15
118
119 bleu 90f
120 andcc %o1, 3, %g0
121
122 be 78f
123 andcc %o1, 4, %g0
124
125 andcc %o1, 1, %g0
126 be 4f
127 andcc %o1, 2, %g0
128
129 ldub [%o1], %g2
130 add %o1, 1, %o1
131 stb %g2, [%o0]
132 sub %o2, 1, %o2
133 bne 77f
134 add %o0, 1, %o0
135 4: lduh [%o1], %g2
136 add %o1, 2, %o1
137 sth %g2, [%o0]
138 sub %o2, 2, %o2
139 add %o0, 2, %o0
140
141 77: andcc %o1, 4, %g0
142 78: be 2f
143 mov %o2, %g1
144
145 ld [%o1], %o4
146 sub %g1, 4, %g1
147 st %o4, [%o0]
148 add %o1, 4, %o1
149 add %o0, 4, %o0
150 2: andcc %g1, 0xffffff80, %g6
151 be 3f
152 andcc %o0, 4, %g0
153
154 be 82f + 4
155 5: MOVE_BIGCHUNK(o1, o0, 0x00, o2, o3, o4, o5, g2, g3, g4, g5)
156 MOVE_BIGCHUNK(o1, o0, 0x20, o2, o3, o4, o5, g2, g3, g4, g5)
157 MOVE_BIGCHUNK(o1, o0, 0x40, o2, o3, o4, o5, g2, g3, g4, g5)
158 MOVE_BIGCHUNK(o1, o0, 0x60, o2, o3, o4, o5, g2, g3, g4, g5)
159 subcc %g6, 128, %g6
160 add %o1, 128, %o1
161 bne 5b
162 add %o0, 128, %o0
163 3: andcc %g1, 0x70, %g6
164 be 80f
165 andcc %g1, 8, %g0
166
167 srl %g6, 1, %o4
168 mov %o7, %g2
169 add %g6, %o4, %o4
170 add %o1, %g6, %o1
171 104: call 100f
172 add %o0, %g6, %o0
173 jmpl %o5 + (80f - 104b), %g0
174 mov %g2, %o7
175
176 79: MOVE_LASTCHUNK(o1, o0, 0x60, g2, g3, g4, g5)
177 MOVE_LASTCHUNK(o1, o0, 0x50, g2, g3, g4, g5)
178 MOVE_LASTCHUNK(o1, o0, 0x40, g2, g3, g4, g5)
179 MOVE_LASTCHUNK(o1, o0, 0x30, g2, g3, g4, g5)
180 MOVE_LASTCHUNK(o1, o0, 0x20, g2, g3, g4, g5)
181 MOVE_LASTCHUNK(o1, o0, 0x10, g2, g3, g4, g5)
182 MOVE_LASTCHUNK(o1, o0, 0x00, g2, g3, g4, g5)
183
184 80: be 81f
185 andcc %g1, 4, %g0
186
187 ldd [%o1], %g2
188 add %o0, 8, %o0
189 st %g2, [%o0 - 0x08]
190 add %o1, 8, %o1
191 st %g3, [%o0 - 0x04]
192
193 81: be 1f
194 andcc %g1, 2, %g0
195
196 ld [%o1], %g2
197 add %o1, 4, %o1
198 st %g2, [%o0]
199 add %o0, 4, %o0
200 1: be 1f
201 andcc %g1, 1, %g0
202
203 lduh [%o1], %g2
204 add %o1, 2, %o1
205 sth %g2, [%o0]
206 add %o0, 2, %o0
207 1: be 1f
208 nop
209
210 ldub [%o1], %g2
211 stb %g2, [%o0]
212 1: retl
213 ld [%sp + 64], %o0
214
215 82: /* ldd_std */
216 MOVE_BIGALIGNCHUNK(o1, o0, 0x00, o2, o3, o4, o5, g2, g3, g4, g5)
217 MOVE_BIGALIGNCHUNK(o1, o0, 0x20, o2, o3, o4, o5, g2, g3, g4, g5)
218 MOVE_BIGALIGNCHUNK(o1, o0, 0x40, o2, o3, o4, o5, g2, g3, g4, g5)
219 MOVE_BIGALIGNCHUNK(o1, o0, 0x60, o2, o3, o4, o5, g2, g3, g4, g5)
220 subcc %g6, 128, %g6
221 add %o1, 128, %o1
222 bne 82b
223 add %o0, 128, %o0
224
225 andcc %g1, 0x70, %g6
226 be 84f
227 andcc %g1, 8, %g0
228
229 mov %o7, %g2
230 111: call 110f
231 add %o1, %g6, %o1
232 mov %g2, %o7
233 jmpl %o5 + (84f - 111b), %g0
234 add %o0, %g6, %o0
235
236 83: MOVE_LASTALIGNCHUNK(o1, o0, 0x60, g2, g3, g4, g5)
237 MOVE_LASTALIGNCHUNK(o1, o0, 0x50, g2, g3, g4, g5)
238 MOVE_LASTALIGNCHUNK(o1, o0, 0x40, g2, g3, g4, g5)
239 MOVE_LASTALIGNCHUNK(o1, o0, 0x30, g2, g3, g4, g5)
240 MOVE_LASTALIGNCHUNK(o1, o0, 0x20, g2, g3, g4, g5)
241 MOVE_LASTALIGNCHUNK(o1, o0, 0x10, g2, g3, g4, g5)
242 MOVE_LASTALIGNCHUNK(o1, o0, 0x00, g2, g3, g4, g5)
243
244 84: be 85f
245 andcc %g1, 4, %g0
246
247 ldd [%o1], %g2
248 add %o0, 8, %o0
249 std %g2, [%o0 - 0x08]
250 add %o1, 8, %o1
251 85: be 1f
252 andcc %g1, 2, %g0
253
254 ld [%o1], %g2
255 add %o1, 4, %o1
256 st %g2, [%o0]
257 add %o0, 4, %o0
258 1: be 1f
259 andcc %g1, 1, %g0
260
261 lduh [%o1], %g2
262 add %o1, 2, %o1
263 sth %g2, [%o0]
264 add %o0, 2, %o0
265 1: be 1f
266 nop
267
268 ldub [%o1], %g2
269 stb %g2, [%o0]
270 1: retl
271 ld [%sp + 64], %o0
272
273 86: cmp %o2, 6
274 bleu 88f
275
276 cmp %o2, 256
277 bcc 87f
278
279 andcc %o0, 3, %g0
280 be 61f
281 andcc %o0, 1, %g0
282 be 60f
283 andcc %o0, 2, %g0
284
285 ldub [%o1], %g5
286 add %o1, 1, %o1
287 stb %g5, [%o0]
288 sub %o2, 1, %o2
289 bne 61f
290 add %o0, 1, %o0
291 60: ldub [%o1], %g3
292 add %o1, 2, %o1
293 stb %g3, [%o0]
294 sub %o2, 2, %o2
295 ldub [%o1 - 1], %g3
296 add %o0, 2, %o0
297 stb %g3, [%o0 - 1]
298 61: and %o1, 3, %g2
299 and %o2, 0xc, %g3
300 and %o1, -4, %o1
301 cmp %g3, 4
302 sll %g2, 3, %g4
303 mov 32, %g2
304 be 4f
305 sub %g2, %g4, %g6
306
307 blu 3f
308 cmp %g3, 0x8
309
310 be 2f
311 srl %o2, 2, %g3
312
313 ld [%o1], %o3
314 add %o0, -8, %o0
315 ld [%o1 + 4], %o4
316 b 8f
317 add %g3, 1, %g3
318 2: ld [%o1], %o4
319 add %o0, -12, %o0
320 ld [%o1 + 4], %o5
321 add %g3, 2, %g3
322 b 9f
323 add %o1, -4, %o1
324 3: ld [%o1], %g1
325 add %o0, -4, %o0
326 ld [%o1 + 4], %o3
327 srl %o2, 2, %g3
328 b 7f
329 add %o1, 4, %o1
330 4: ld [%o1], %o5
331 cmp %o2, 7
332 ld [%o1 + 4], %g1
333 srl %o2, 2, %g3
334 bleu 10f
335 add %o1, 8, %o1
336
337 ld [%o1], %o3
338 add %g3, -1, %g3
339 5: sll %o5, %g4, %g2
340 srl %g1, %g6, %g5
341 or %g2, %g5, %g2
342 st %g2, [%o0]
343 7: ld [%o1 + 4], %o4
344 sll %g1, %g4, %g2
345 srl %o3, %g6, %g5
346 or %g2, %g5, %g2
347 st %g2, [%o0 + 4]
348 8: ld [%o1 + 8], %o5
349 sll %o3, %g4, %g2
350 srl %o4, %g6, %g5
351 or %g2, %g5, %g2
352 st %g2, [%o0 + 8]
353 9: ld [%o1 + 12], %g1
354 sll %o4, %g4, %g2
355 srl %o5, %g6, %g5
356 addcc %g3, -4, %g3
357 or %g2, %g5, %g2
358 add %o1, 16, %o1
359 st %g2, [%o0 + 12]
360 add %o0, 16, %o0
361 bne,a 5b
362 ld [%o1], %o3
363 10: sll %o5, %g4, %g2
364 srl %g1, %g6, %g5
365 srl %g6, 3, %g3
366 or %g2, %g5, %g2
367 sub %o1, %g3, %o1
368 andcc %o2, 2, %g0
369 st %g2, [%o0]
370 be 1f
371 andcc %o2, 1, %g0
372
373 ldub [%o1], %g2
374 add %o1, 2, %o1
375 stb %g2, [%o0 + 4]
376 add %o0, 2, %o0
377 ldub [%o1 - 1], %g2
378 stb %g2, [%o0 + 3]
379 1: be 1f
380 nop
381 ldub [%o1], %g2
382 stb %g2, [%o0 + 4]
383 1: retl
384 ld [%sp + 64], %o0
385
386 87: andcc %o1, 3, %g0
387 be 3f
388 andcc %o1, 1, %g0
389
390 be 4f
391 andcc %o1, 2, %g0
392
393 ldub [%o1], %g2
394 add %o1, 1, %o1
395 stb %g2, [%o0]
396 sub %o2, 1, %o2
397 bne 3f
398 add %o0, 1, %o0
399 4: lduh [%o1], %g2
400 add %o1, 2, %o1
401 srl %g2, 8, %g3
402 sub %o2, 2, %o2
403 stb %g3, [%o0]
404 add %o0, 2, %o0
405 stb %g2, [%o0 - 1]
406 3: andcc %o1, 4, %g0
407
408 bne 2f
409 cmp %o5, 1
410
411 ld [%o1], %o4
412 srl %o4, 24, %g2
413 stb %g2, [%o0]
414 srl %o4, 16, %g3
415 stb %g3, [%o0 + 1]
416 srl %o4, 8, %g2
417 stb %g2, [%o0 + 2]
418 sub %o2, 4, %o2
419 stb %o4, [%o0 + 3]
420 add %o1, 4, %o1
421 add %o0, 4, %o0
422 2: be 33f
423 cmp %o5, 2
424 be 32f
425 sub %o2, 4, %o2
426 31: ld [%o1], %g2
427 add %o1, 4, %o1
428 srl %g2, 24, %g3
429 and %o0, 7, %g5
430 stb %g3, [%o0]
431 cmp %g5, 7
432 sll %g2, 8, %g1
433 add %o0, 4, %o0
434 be 41f
435 and %o2, 0xffffffc0, %o3
436 ld [%o0 - 7], %o4
437 4: SMOVE_CHUNK(o1, o0, 0x00, g2, g3, g4, g5, o4, o5, g6, g1, 8, 24, -3)
438 SMOVE_CHUNK(o1, o0, 0x10, g2, g3, g4, g5, o4, o5, g6, g1, 8, 24, -3)
439 SMOVE_CHUNK(o1, o0, 0x20, g2, g3, g4, g5, o4, o5, g6, g1, 8, 24, -3)
440 SMOVE_CHUNK(o1, o0, 0x30, g2, g3, g4, g5, o4, o5, g6, g1, 8, 24, -3)
441 subcc %o3, 64, %o3
442 add %o1, 64, %o1
443 bne 4b
444 add %o0, 64, %o0
445
446 andcc %o2, 0x30, %o3
447 be,a 1f
448 srl %g1, 16, %g2
449 4: SMOVE_CHUNK(o1, o0, 0x00, g2, g3, g4, g5, o4, o5, g6, g1, 8, 24, -3)
450 subcc %o3, 16, %o3
451 add %o1, 16, %o1
452 bne 4b
453 add %o0, 16, %o0
454
455 srl %g1, 16, %g2
456 1: st %o4, [%o0 - 7]
457 sth %g2, [%o0 - 3]
458 srl %g1, 8, %g4
459 b 88f
460 stb %g4, [%o0 - 1]
461 32: ld [%o1], %g2
462 add %o1, 4, %o1
463 srl %g2, 16, %g3
464 and %o0, 7, %g5
465 sth %g3, [%o0]
466 cmp %g5, 6
467 sll %g2, 16, %g1
468 add %o0, 4, %o0
469 be 42f
470 and %o2, 0xffffffc0, %o3
471 ld [%o0 - 6], %o4
472 4: SMOVE_CHUNK(o1, o0, 0x00, g2, g3, g4, g5, o4, o5, g6, g1, 16, 16, -2)
473 SMOVE_CHUNK(o1, o0, 0x10, g2, g3, g4, g5, o4, o5, g6, g1, 16, 16, -2)
474 SMOVE_CHUNK(o1, o0, 0x20, g2, g3, g4, g5, o4, o5, g6, g1, 16, 16, -2)
475 SMOVE_CHUNK(o1, o0, 0x30, g2, g3, g4, g5, o4, o5, g6, g1, 16, 16, -2)
476 subcc %o3, 64, %o3
477 add %o1, 64, %o1
478 bne 4b
479 add %o0, 64, %o0
480
481 andcc %o2, 0x30, %o3
482 be,a 1f
483 srl %g1, 16, %g2
484 4: SMOVE_CHUNK(o1, o0, 0x00, g2, g3, g4, g5, o4, o5, g6, g1, 16, 16, -2)
485 subcc %o3, 16, %o3
486 add %o1, 16, %o1
487 bne 4b
488 add %o0, 16, %o0
489
490 srl %g1, 16, %g2
491 1: st %o4, [%o0 - 6]
492 b 88f
493 sth %g2, [%o0 - 2]
494 33: ld [%o1], %g2
495 sub %o2, 4, %o2
496 srl %g2, 24, %g3
497 and %o0, 7, %g5
498 stb %g3, [%o0]
499 cmp %g5, 5
500 srl %g2, 8, %g4
501 sll %g2, 24, %g1
502 sth %g4, [%o0 + 1]
503 add %o1, 4, %o1
504 be 43f
505 and %o2, 0xffffffc0, %o3
506
507 ld [%o0 - 1], %o4
508 add %o0, 4, %o0
509 4: SMOVE_CHUNK(o1, o0, 0x00, g2, g3, g4, g5, o4, o5, g6, g1, 24, 8, -1)
510 SMOVE_CHUNK(o1, o0, 0x10, g2, g3, g4, g5, o4, o5, g6, g1, 24, 8, -1)
511 SMOVE_CHUNK(o1, o0, 0x20, g2, g3, g4, g5, o4, o5, g6, g1, 24, 8, -1)
512 SMOVE_CHUNK(o1, o0, 0x30, g2, g3, g4, g5, o4, o5, g6, g1, 24, 8, -1)
513 subcc %o3, 64, %o3
514 add %o1, 64, %o1
515 bne 4b
516 add %o0, 64, %o0
517
518 andcc %o2, 0x30, %o3
519 be,a 1f
520 srl %g1, 24, %g2
521 4: SMOVE_CHUNK(o1, o0, 0x00, g2, g3, g4, g5, o4, o5, g6, g1, 24, 8, -1)
522 subcc %o3, 16, %o3
523 add %o1, 16, %o1
524 bne 4b
525 add %o0, 16, %o0
526
527 srl %g1, 24, %g2
528 1: st %o4, [%o0 - 5]
529 b 88f
530 stb %g2, [%o0 - 1]
531 41: SMOVE_ALIGNCHUNK(o1, o0, 0x00, g2, g3, g4, g5, o4, o5, g6, g1, 8, 24, -3)
532 SMOVE_ALIGNCHUNK(o1, o0, 0x10, g2, g3, g4, g5, o4, o5, g6, g1, 8, 24, -3)
533 SMOVE_ALIGNCHUNK(o1, o0, 0x20, g2, g3, g4, g5, o4, o5, g6, g1, 8, 24, -3)
534 SMOVE_ALIGNCHUNK(o1, o0, 0x30, g2, g3, g4, g5, o4, o5, g6, g1, 8, 24, -3)
535 subcc %o3, 64, %o3
536 add %o1, 64, %o1
537 bne 41b
538 add %o0, 64, %o0
539
540 andcc %o2, 0x30, %o3
541 be,a 1f
542 srl %g1, 16, %g2
543 4: SMOVE_ALIGNCHUNK(o1, o0, 0x00, g2, g3, g4, g5, o4, o5, g6, g1, 8, 24, -3)
544 subcc %o3, 16, %o3
545 add %o1, 16, %o1
546 bne 4b
547 add %o0, 16, %o0
548
549 srl %g1, 16, %g2
550 1: sth %g2, [%o0 - 3]
551 srl %g1, 8, %g4
552 b 88f
553 stb %g4, [%o0 - 1]
554 43: SMOVE_ALIGNCHUNK(o1, o0, 0x00, g2, g3, g4, g5, o4, o5, g6, g1, 24, 8, 3)
555 SMOVE_ALIGNCHUNK(o1, o0, 0x10, g2, g3, g4, g5, o4, o5, g6, g1, 24, 8, 3)
556 SMOVE_ALIGNCHUNK(o1, o0, 0x20, g2, g3, g4, g5, o4, o5, g6, g1, 24, 8, 3)
557 SMOVE_ALIGNCHUNK(o1, o0, 0x30, g2, g3, g4, g5, o4, o5, g6, g1, 24, 8, 3)
558 subcc %o3, 64, %o3
559 add %o1, 64, %o1
560 bne 43b
561 add %o0, 64, %o0
562
563 andcc %o2, 0x30, %o3
564 be,a 1f
565 srl %g1, 24, %g2
566 4: SMOVE_ALIGNCHUNK(o1, o0, 0x00, g2, g3, g4, g5, o4, o5, g6, g1, 24, 8, 3)
567 subcc %o3, 16, %o3
568 add %o1, 16, %o1
569 bne 4b
570 add %o0, 16, %o0
571
572 srl %g1, 24, %g2
573 1: stb %g2, [%o0 + 3]
574 b 88f
575 add %o0, 4, %o0
576 42: SMOVE_ALIGNCHUNK(o1, o0, 0x00, g2, g3, g4, g5, o4, o5, g6, g1, 16, 16, -2)
577 SMOVE_ALIGNCHUNK(o1, o0, 0x10, g2, g3, g4, g5, o4, o5, g6, g1, 16, 16, -2)
578 SMOVE_ALIGNCHUNK(o1, o0, 0x20, g2, g3, g4, g5, o4, o5, g6, g1, 16, 16, -2)
579 SMOVE_ALIGNCHUNK(o1, o0, 0x30, g2, g3, g4, g5, o4, o5, g6, g1, 16, 16, -2)
580 subcc %o3, 64, %o3
581 add %o1, 64, %o1
582 bne 42b
583 add %o0, 64, %o0
584
585 andcc %o2, 0x30, %o3
586 be,a 1f
587 srl %g1, 16, %g2
588 4: SMOVE_ALIGNCHUNK(o1, o0, 0x00, g2, g3, g4, g5, o4, o5, g6, g1, 16, 16, -2)
589 subcc %o3, 16, %o3
590 add %o1, 16, %o1
591 bne 4b
592 add %o0, 16, %o0
593
594 srl %g1, 16, %g2
595 1: sth %g2, [%o0 - 2]
596
597 /* Fall through */
598
599 88: and %o2, 0xe, %o3
600 mov %o7, %g2
601 sll %o3, 3, %o4
602 add %o0, %o3, %o0
603 106: call 100f
604 add %o1, %o3, %o1
605 mov %g2, %o7
606 jmpl %o5 + (89f - 106b), %g0
607 andcc %o2, 1, %g0
608
609 MOVE_SHORTCHUNK(o1, o0, 0x0c, g2, g3)
610 MOVE_SHORTCHUNK(o1, o0, 0x0a, g2, g3)
611 MOVE_SHORTCHUNK(o1, o0, 0x08, g2, g3)
612 MOVE_SHORTCHUNK(o1, o0, 0x06, g2, g3)
613 MOVE_SHORTCHUNK(o1, o0, 0x04, g2, g3)
614 MOVE_SHORTCHUNK(o1, o0, 0x02, g2, g3)
615 MOVE_SHORTCHUNK(o1, o0, 0x00, g2, g3)
616
617 89: be 1f
618 nop
619
620 ldub [%o1], %g2
621 stb %g2, [%o0]
622 1: retl
623 ld [%sp + 64], %o0
624
625 90: bne 88b
626 andcc %o2, 8, %g0
627
628 be 1f
629 andcc %o2, 4, %g0
630
631 ld [%o1 + 0x00], %g2
632 ld [%o1 + 0x04], %g3
633 add %o1, 8, %o1
634 st %g2, [%o0 + 0x00]
635 st %g3, [%o0 + 0x04]
636 add %o0, 8, %o0
637 1: b 81b
638 mov %o2, %g1
639
640 100: retl
641 sub %o7, %o4, %o5
642 110: retl
643 sub %o7, %g6, %o5
644 END(memcpy)
645
646 libc_hidden_builtin_def (memcpy)
647
648 libc_hidden_def (__mempcpy)
649 weak_alias (__mempcpy, mempcpy)
650 libc_hidden_builtin_def (mempcpy)