# c code ``` void daxpy(size_t n, double a, const double x[], double y[]) { for (size_t i = 0; i < n; i++) y[i] = a*x[i] + y[i]; } ``` # SVP64 Power ISA version Summary: 9 instructions, 5 of which are 64-bit for a total of 14 "words". Relies on post-increment, relies on no overlap between x and y in memory, and critically relies on y overwrite. x is post-incremented when read, but y is post-incremented on write. Element-Strided ensures the Immediate (8) results in a contiguous LD (or store) despite RA being marked Scalar. RA is Scalar so that only one LD/ST Update "wins": the last write to RA is the address for the next block. ``` # r5: n count; r6: x ptr; r7: y ptr; fp1: a 1 addi r3,r7,0 # return result 2 mtctr 5 # move n to CTR 3 .L2 4 setvl MAXVL=32,VL=CTR # actually VL=MIN(MAXVL,CTR) 5 sv.lfdup/els *32,8(6) # load x into fp32-63, incr x 6 sv.lfd/els *64,8(7) # load y into fp64-95, NO INC 7 sv.fmadd *64,*64,1,*32 # (*y) = (*y) * (*x) + a 8 sv.stfdup/els *64,8(7) # store at y, incr y 9 sv.bc/ctr .L2 # decr CTR by VL, jump !zero 10 blr # return ``` # RVV version Summary: 12 instructions, 7 32-bit and 5 16-bit for a total of 9.5 "words" ``` # a0 is n, a1 is pointer to x[0], a2 is pointer to y[0], fa0 is a li t0, 2<<25 vsetdcfg t0 # enable 2 64b Fl.Pt. registers loop: setvl t0, a0 # vl = t0 = min(mvl, n) vld v0, a1 # load vector x c.slli t1, t0, 3 # t1 = vl * 8 (in bytes) vld v1, a2 # load vector y c.add a1, a1, t1 # increment pointer to x by vl*8 vfmadd v1, v0, fa0, v1 # v1 += v0 * fa0 (y = a * x + y) c.sub a0, a0, t0 # n -= vl (t0) vst v1, a2 # store Y c.add a2, a2, t1 # increment pointer to y by vl*8 c.bnez a0, loop # repeat if n != 0 c.ret # return ``` # SVE Version Summary: 12 instructions, all 32-bit, for a total of 12 "words" ``` 1 // x0 = &x[0], x1 = &y[0], x2 = &a, x3 = &n 2 daxpy_: 3 ldrswx3, [x3] // x3=*n 4 movx4, #0 // x4=i=0 5 whilelt p0.d, x4, x3 // p0=while(i++