update
[libreriscv.git] / simple_v_extension / simple_v_chennai_2018.tex
1 \documentclass[slidestop]{beamer}
2 \usepackage{beamerthemesplit}
3 \usepackage{graphics}
4 \usepackage{pstricks}
5
6 \title{Simple-V RISC-V Extension for Vectorisation and SIMD}
7 \author{Luke Kenneth Casson Leighton}
8
9
10 \begin{document}
11
12 \frame{
13 \begin{center}
14 \huge{Simple-V RISC-V Extension for Vectors and SIMD}\\
15 \vspace{32pt}
16 \Large{Flexible Vectorisation}\\
17 \Large{(aka not so Simple-V?)}\\
18 \Large{(aka How to Parallelise the RISC-V ISA)}\\
19 \vspace{24pt}
20 \Large{[proposed for] Chennai 9th RISC-V Workshop}\\
21 \vspace{16pt}
22 \large{\today}
23 \end{center}
24 }
25
26
27 \frame{\frametitle{Credits and Acknowledgements}
28
29 \begin{itemize}
30 \item The Designers of RISC-V\vspace{15pt}
31 \item The RVV Working Group and contributors\vspace{15pt}
32 \item Allen Baum, Jacob Bachmeyer, Xan Phung, Chuanhua Chang,\\
33 Guy Lemurieux, Jonathan Neuschafer, Roger Brussee,
34 and others\vspace{15pt}
35 \item ISA-Dev Group Members\vspace{10pt}
36 \end{itemize}
37 }
38
39
40 \frame{\frametitle{Quick refresher on SIMD}
41
42 \begin{itemize}
43 \item SIMD very easy to implement (and very seductive)\vspace{8pt}
44 \item Parallelism is in the ALU\vspace{8pt}
45 \item Zero-to-Negligeable impact for rest of core\vspace{8pt}
46 \end{itemize}
47 Where SIMD Goes Wrong:\vspace{10pt}
48 \begin{itemize}
49 \item See "SIMD instructions considered harmful"
50 https://sigarch.org/simd-instructions-considered-harmful
51 \item Setup and corner-cases alone are extremely complex.\\
52 Hardware is easy, but software is hell.
53 \item O($N^{6}$) ISA opcode proliferation!\\
54 opcode, elwidth, veclen, src1-src2-dest hi/lo
55 \end{itemize}
56 }
57
58 \frame{\frametitle{Quick refresher on RVV}
59
60 \begin{itemize}
61 \item Extremely powerful (extensible to 256 registers)\vspace{10pt}
62 \item Supports polymorphism, several datatypes (inc. FP16)\vspace{10pt}
63 \item Requires a separate Register File (16 w/ext to 256)\vspace{10pt}
64 \item Implemented as a separate pipeline (no impact on scalar)\vspace{10pt}
65 \end{itemize}
66 However...\vspace{10pt}
67 \begin{itemize}
68 \item 98 percent opcode duplication with rest of RV (CLIP)
69 \item Extending RVV requires customisation not just of h/w:\\
70 gcc, binutils also need customisation (and maintenance)
71 \end{itemize}
72 }
73
74
75 \frame{\frametitle{The Simon Sinek lowdown (Why, How, What)}
76
77 \begin{itemize}
78 \item Why?
79 Implementors need flexibility in vectorisation to optimise for
80 area or performance depending on the scope:
81 embedded DSP, Mobile GPU's, Server CPU's and more.\vspace{4pt}\\
82 Compilers also need flexibility in vectorisation to optimise for cost
83 of pipeline setup, amount of state to context switch
84 and software portability\vspace{4pt}
85 \item How?
86 By marking INT/FP regs as "Vectorised" and
87 adding a level of indirection,
88 SV expresses how existing instructions should act
89 on [contiguous] blocks of registers, in parallel.\vspace{4pt}
90 \item What?
91 Simple-V is an "API" that implicitly extends
92 existing (scalar) instructions with explicit parallelisation\\
93 (i.e. SV is actually about parallelism NOT vectors per se)
94 \end{itemize}
95 }
96
97
98 \frame{\frametitle{What's the value of SV? Why adopt it even in non-V?}
99
100 \begin{itemize}
101 \item memcpy becomes much smaller (higher bang-per-buck)
102 \item context-switch (LOAD/STORE multiple): 1-2 instructions
103 \item Compressed instrs further reduces I-cache (etc.)
104 \item Greatly-reduced I-cache load (and less reads)
105 \item Amazingly, SIMD becomes (more) tolerable\\
106 (corner-cases for setup and teardown are gone)
107 \item Modularity/Abstraction in both the h/w and the toolchain.
108 \end{itemize}
109 Note:
110 \begin{itemize}
111 \item It's not just about Vectors: it's about instruction effectiveness
112 \item Anything that makes SIMD tolerable has to be a good thing
113 \item Anything implementor is not interested in HW-optimising,\\
114 let it fall through to exceptions (implement as a trap).
115 \end{itemize}
116 }
117
118
119 \frame{\frametitle{How does Simple-V relate to RVV? What's different?}
120
121 \begin{itemize}
122 \item RVV very heavy-duty (excellent for supercomputing)\vspace{10pt}
123 \item Simple-V abstracts parallelism (based on best of RVV)\vspace{10pt}
124 \item Graded levels: hardware, hybrid or traps (fit impl. need)\vspace{10pt}
125 \item Even Compressed become vectorised (RVV can't)\vspace{10pt}
126 \end{itemize}
127 What Simple-V is not:\vspace{10pt}
128 \begin{itemize}
129 \item A full supercomputer-level Vector Proposal
130 \item A replacement for RVV (SV is designed to be over-ridden\\
131 by - or augmented to become - RVV)
132 \end{itemize}
133 }
134
135
136 \frame{\frametitle{How is Parallelism abstracted in Simple-V?}
137
138 \begin{itemize}
139 \item Register "typing" turns any op into an implicit Vector op:\\
140 registers are reinterpreted through a level of indirection
141 \item Primarily at the Instruction issue phase (except SIMD)\\
142 Note: it's ok to pass predication through to ALU (like SIMD)
143 \item Standard (and future, and custom) opcodes now parallel\vspace{10pt}
144 \end{itemize}
145 Note: EVERYTHING is parallelised:
146 \begin{itemize}
147 \item All LOAD/STORE (inc. Compressed, Int/FP versions)
148 \item All ALU ops (soft / hybrid / full HW, on per-op basis)
149 \item All branches become predication targets (C.FNE added?)
150 \item C.MV of particular interest (s/v, v/v, v/s)
151 \item FCVT, FMV, FSGNJ etc. very similar to C.MV
152 \end{itemize}
153 }
154
155
156 \frame{\frametitle{Implementation Options}
157
158 \begin{itemize}
159 \item Absolute minimum: Exceptions: if CSRs indicate "V", trap.\\
160 (Requires as absolute minimum that CSRs be in H/W)
161 \item Hardware loop, single-instruction issue\\
162 (Do / Don't send through predication to ALU)
163 \item Hardware loop, parallel (multi-instruction) issue\\
164 (Do / Don't send through predication to ALU)
165 \item Hardware loop, full parallel ALU (not recommended)
166 \end{itemize}
167 Notes:\vspace{4pt}
168 \begin{itemize}
169 \item 4 (or more?) options above may be deployed on per-op basis
170 \item SIMD always sends predication bits through to ALU
171 \item Minimum MVL MUST be sufficient to cover regfile LD/ST
172 \item Instr. FIFO may repeatedly split off N scalar ops at a time
173 \end{itemize}
174 }
175 % Instr. FIFO may need its own slide. Basically, the vectorised op
176 % gets pushed into the FIFO, where it is then "processed". Processing
177 % will remove the first set of ops from its vector numbering (taking
178 % predication into account) and shoving them **BACK** into the FIFO,
179 % but MODIFYING the remaining "vectorised" op, subtracting the now
180 % scalar ops from it.
181
182 \frame{\frametitle{Predicated 8-parallel ADD: 1-wide ALU}
183 \begin{center}
184 \includegraphics[height=2.5in]{padd9_alu1.png}\\
185 {\bf \red Predicated adds are shuffled down: 6 cycles in total}
186 \end{center}
187 }
188
189
190 \frame{\frametitle{Predicated 8-parallel ADD: 4-wide ALU}
191 \begin{center}
192 \includegraphics[height=2.5in]{padd9_alu4.png}\\
193 {\bf \red Predicated adds are shuffled down: 4 in 1st cycle, 2 in 2nd}
194 \end{center}
195 }
196
197
198 \frame{\frametitle{Predicated 8-parallel ADD: 3 phase FIFO expansion}
199 \begin{center}
200 \includegraphics[height=2.5in]{padd9_fifo.png}\\
201 {\bf \red First cycle takes first four 1s; second takes the rest}
202 \end{center}
203 }
204
205
206 \frame{\frametitle{How are SIMD Instructions Vectorised?}
207
208 \begin{itemize}
209 \item SIMD ALU(s) primarily unchanged\vspace{6pt}
210 \item Predication is added to each SIMD element\vspace{6pt}
211 \item Predication bits sent in groups to the ALU\vspace{6pt}
212 \item End of Vector enables (additional) predication\vspace{10pt}
213 \end{itemize}
214 Considerations:\vspace{4pt}
215 \begin{itemize}
216 \item Many SIMD ALUs possible (parallel execution)
217 \item Implementor free to choose (API remains the same)
218 \item Unused ALU units wasted, but s/w DRASTICALLY simpler
219 \item Very long SIMD ALUs could waste significant die area
220 \end{itemize}
221 }
222 % With multiple SIMD ALUs at for example 32-bit wide they can be used
223 % to either issue 64-bit or 128-bit or 256-bit wide SIMD operations
224 % or they can be used to cover several operations on totally different
225 % vectors / registers.
226
227 \frame{\frametitle{Predicated 9-parallel SIMD ADD}
228 \begin{center}
229 \includegraphics[height=2.5in]{padd9_simd.png}\\
230 {\bf \red 4-wide 8-bit SIMD, 4 bits of predicate passed to ALU}
231 \end{center}
232 }
233
234
235 \frame{\frametitle{What's the deal / juice / score?}
236
237 \begin{itemize}
238 \item Standard Register File(s) overloaded with CSR "reg is vector"\\
239 (see pseudocode slides for examples)
240 \item Element width (and type?) concepts remain same as RVV\\
241 (CSRs give new size (and meaning?) to elements in registers)
242 \item CSRs are key-value tables (overlaps allowed)\vspace{10pt}
243 \end{itemize}
244 Key differences from RVV:\vspace{10pt}
245 \begin{itemize}
246 \item Predication in INT regs as a BIT field (max VL=XLEN)
247 \item Minimum VL must be Num Regs - 1 (all regs single LD/ST)
248 \item SV may condense sparse Vecs: RVV lets ALU do predication
249 \item Choice to Zero or skip non-predicated elements
250 \end{itemize}
251 }
252
253
254 \begin{frame}[fragile]
255 \frametitle{ADD pseudocode (or trap, or actual hardware loop)}
256
257 \begin{semiverbatim}
258 function op\_add(rd, rs1, rs2, predr) # add not VADD!
259  int i, id=0, irs1=0, irs2=0;
260  for (i = 0; i < VL; i++)
261   if (ireg[predr] & 1<<i) # predication uses intregs
262    ireg[rd+id] <= ireg[rs1+irs1] + ireg[rs2+irs2];
263 if (reg\_is\_vectorised[rd]) \{ id += 1; \}
264 if (reg\_is\_vectorised[rs1]) \{ irs1 += 1; \}
265 if (reg\_is\_vectorised[rs2]) \{ irs2 += 1; \}
266 \end{semiverbatim}
267
268 \begin{itemize}
269 \item Above is oversimplified: Reg. indirection left out (for clarity).
270 \item SIMD slightly more complex (case above is elwidth = default)
271 \item Scalar-scalar and scalar-vector and vector-vector now all in one
272 \item OoO may choose to push ADDs into instr. queue (v. busy!)
273 \end{itemize}
274 \end{frame}
275
276 % yes it really *is* ADD not VADD. that's the entire point of
277 % this proposal, that *standard* operations are overloaded to
278 % become vectorised-on-demand
279
280
281 \begin{frame}[fragile]
282 \frametitle{Predication-Branch (or trap, or actual hardware loop)}
283
284 \begin{semiverbatim}
285 s1 = reg\_is\_vectorised(src1);
286 s2 = reg\_is\_vectorised(src2);
287 if (!s2 && !s1) goto branch;
288 for (int i = 0; i < VL; ++i)
289 if (cmp(s1 ? reg[src1+i]:reg[src1],
290 s2 ? reg[src2+i]:reg[src2])
291 ireg[rs3] |= 1<<i;
292 \end{semiverbatim}
293
294 \begin{itemize}
295 \item SIMD slightly more complex (case above is elwidth = default)
296 \item If s1 and s2 both scalars, Standard branch occurs
297 \item Predication stored in integer regfile as a bitfield
298 \item Scalar-vector and vector-vector supported
299 \end{itemize}
300 \end{frame}
301
302 \begin{frame}[fragile]
303 \frametitle{VLD/VLD.S/VLD.X (or trap, or actual hardware loop)}
304
305 \begin{semiverbatim}
306 if (unit-strided) stride = elsize;
307 else stride = areg[as2]; // constant-strided
308 for (int i = 0; i < VL; ++i)
309 if (preg\_enabled[rd] && ([!]preg[rd] & 1<<i))
310 for (int j = 0; j < seglen+1; j++)
311 if (reg\_is\_vectorised[rs2]) offs = vreg[rs2+i]
312 else offs = i*(seglen+1)*stride;
313 vreg[rd+j][i] = mem[sreg[base] + offs + j*stride]
314 \end{semiverbatim}
315
316 \begin{itemize}
317 \item Again: elwidth != default slightly more complex
318 \item rs2 vectorised taken to implicitly indicate VLD.X
319 \end{itemize}
320 \end{frame}
321
322
323 \frame{\frametitle{Why are overlaps allowed in Regfiles?}
324
325 \begin{itemize}
326 \item Same register(s) can have multiple "interpretations"
327 \item Set "real" register (scalar) without needing to set/unset CSRs.
328 \item xBitManip plus SIMD plus xBitManip = Hi/Lo bitops
329 \item (32-bit GREV plus 4x8-bit SIMD plus 32-bit GREV:\\
330 GREV @ VL=N,wid=32; SIMD @ VL=Nx4,wid=8)
331 \item RGB 565 (video): BEXTW plus 4x8-bit SIMD plus BDEPW\\
332 (BEXT/BDEP @ VL=N,wid=32; SIMD @ VL=Nx4,wid=8)
333 \item Same register(s) can be offset (no need for VSLIDE)\vspace{6pt}
334 \end{itemize}
335 Note:
336 \begin{itemize}
337 \item xBitManip reduces O($N^{6}$) SIMD down to O($N^{3}$)
338 \item Hi-Performance: Macro-op fusion (more pipeline stages?)
339 \end{itemize}
340 }
341
342
343 \frame{\frametitle{To Zero or not to place zeros in non-predicated elements?}
344
345 \begin{itemize}
346 \item Zeroing is an implementation optimisation favouring OoO
347 \item Simple implementations may skip non-predicated operations
348 \item Simple implementations explicitly have to destroy data
349 \item Complex implementations may use reg-renames to save power\\
350 Zeroing on predication chains makes optimisation harder
351 \item Compromise: REQUIRE both (specified in predication CSRs).
352 \end{itemize}
353 Considerations:
354 \begin{itemize}
355 \item Complex not really impacted, simple impacted a LOT\\
356 with Zeroing... however it's useful (memzero)
357 \item Non-zero'd overlapping "Vectors" may issue overlapping ops\\
358 (2nd op's predicated elements slot in 1st's non-predicated ops)
359 \item Please don't use Vectors for "security" (use Sec-Ext)
360 \end{itemize}
361 }
362 % with overlapping "vectors" - bearing in mind that "vectors" are
363 % just a remap onto the standard register file, if the top bits of
364 % predication are zero, and there happens to be a second vector
365 % that uses some of the same register file that happens to be
366 % predicated out, the second vector op may be issued *at the same time*
367 % if there are available parallel ALUs to do so.
368
369
370 \frame{\frametitle{Predication key-value CSR store}
371
372 \begin{itemize}
373 \item key is int regfile number or FP regfile number (1 bit)\vspace{6pt}
374 \item register to be predicated if referred to (5 bits, key)\vspace{6pt}
375 \item register to store actual predication in (5 bits, value)\vspace{6pt}
376 \item predication is inverted Y/N (1 bit)\vspace{6pt}
377 \item non-predicated elements are to be zero'd Y/N (1 bit)\vspace{6pt}
378 \end{itemize}
379 Notes:\vspace{10pt}
380 \begin{itemize}
381 \item Table should be expanded out for high-speed implementations
382 \item Multiple "keys" (and values) theoretically permitted
383 \item RVV rules about deleting higher-indexed CSRs followed
384 \end{itemize}
385 }
386
387
388 \begin{frame}[fragile]
389 \frametitle{Predication key-value CSR table decoding pseudocode}
390
391 \begin{semiverbatim}
392 struct pred fp\_pred[32];
393 struct pred int\_pred[32];
394
395 for (i = 0; i < 16; i++) // 16 CSRs?
396 tb = int\_pred if CSRpred[i].type == 0 else fp\_pred
397 idx = CSRpred[i].regidx
398 tb[idx].zero = CSRpred[i].zero
399 tb[idx].inv = CSRpred[i].inv
400 tb[idx].predidx = CSRpred[i].predidx
401 tb[idx].enabled = true
402 \end{semiverbatim}
403
404 \begin{itemize}
405 \item All 64 (int and FP) Entries zero'd before setting
406 \item Might be a bit complex to set up (TBD)
407 \end{itemize}
408
409 \end{frame}
410
411
412 \begin{frame}[fragile]
413 \frametitle{Get Predication value pseudocode}
414
415 \begin{semiverbatim}
416 def get\_pred\_val(bool is\_fp\_op, int reg):
417 tb = int\_pred if is\_fp\_op else fp\_pred
418 if (!tb[reg].enabled):
419 return ~0x0 // all ops enabled
420 predidx = tb[reg].predidx // redirection occurs HERE
421 predicate = intreg[predidx] // actual predicate HERE
422 if (tb[reg].inv):
423 predicate = ~predicate
424 return predicate
425 \end{semiverbatim}
426
427 \begin{itemize}
428 \item References different (internal) mapping table for INT or FP
429 \item Actual predicate bitmask ALWAYS from the INT regfile
430 \end{itemize}
431
432 \end{frame}
433
434
435 \frame{\frametitle{Register key-value CSR store}
436
437 \begin{itemize}
438 \item key is int regfile number or FP regfile number (1 bit)\vspace{6pt}
439 \item treated as vector if referred to in op (5 bits, key)\vspace{6pt}
440 \item starting register to actually be used (5 bits, value)\vspace{6pt}
441 \item element bitwidth: default/8/16/32/64/rsvd (3 bits)\vspace{6pt}
442 \item element type: still under consideration\vspace{6pt}
443 \end{itemize}
444 Notes:\vspace{10pt}
445 \begin{itemize}
446 \item Same notes apply (previous slide) as for predication CSR table
447 \item Level of indirection has implications for pipeline latency
448 \end{itemize}
449 }
450
451
452 \begin{frame}[fragile]
453 \frametitle{Register key-value CSR table decoding pseudocode}
454
455 \begin{semiverbatim}
456 struct vectorised fp\_vec[32];
457 struct vectorised int\_vec[32];
458
459 for (i = 0; i < 16; i++) // 16 CSRs?
460 tb = int\_vec if CSRvectortb[i].type == 0 else fp\_vec
461 idx = CSRvectortb[i].regidx
462 tb[idx].elwidth = CSRpred[i].elwidth
463 tb[idx].regidx = CSRpred[i].regidx
464 tb[idx].isvector = true
465 \end{semiverbatim}
466
467 \begin{itemize}
468 \item All 64 (int and FP) Entries zero'd before setting
469 \item Might be a bit complex to set up (TBD)
470 \end{itemize}
471
472 \end{frame}
473
474
475 \begin{frame}[fragile]
476 \frametitle{ADD pseudocode with redirection, this time}
477
478 \begin{semiverbatim}
479 function op\_add(rd, rs1, rs2) # add not VADD!
480  int i, id=0, irs1=0, irs2=0;
481  rd = int\_vec[rd ].isvector ? int\_vec[rd ].regidx : rd;
482  rs1 = int\_vec[rs1].isvector ? int\_vec[rs1].regidx : rs1;
483  rs2 = int\_vec[rs2].isvector ? int\_vec[rs2].regidx : rs2;
484  predval = get\_pred\_val(FALSE, rd);
485  for (i = 0; i < VL; i++)
486 if (predval \& 1<<i) # predication uses intregs
487    ireg[rd+id] <= ireg[rs1+irs1] + ireg[rs2+irs2];
488 if (int\_vec[rd ].isvector)  \{ id += 1; \}
489 if (int\_vec[rs1].isvector)  \{ irs1 += 1; \}
490 if (int\_vec[rs2].isvector)  \{ irs2 += 1; \}
491 \end{semiverbatim}
492
493 \begin{itemize}
494 \item SIMD (elwidth != default) not covered above
495 \end{itemize}
496 \end{frame}
497
498
499 \frame{\frametitle{C.MV extremely flexible!}
500
501 \begin{itemize}
502 \item scalar-to-vector (w/ no pred): VSPLAT
503 \item scalar-to-vector (w/ dest-pred): Sparse VSPLAT
504 \item scalar-to-vector (w/ 1-bit dest-pred): VINSERT
505 \item vector-to-scalar (w/ [1-bit?] src-pred): VEXTRACT
506 \item vector-to-vector (w/ no pred): Vector Copy
507 \item vector-to-vector (w/ src pred): Vector Gather
508 \item vector-to-vector (w/ dest pred): Vector Scatter
509 \item vector-to-vector (w/ src \& dest pred): Vector Gather/Scatter
510 \end{itemize}
511 \vspace{4pt}
512 Notes:
513 \begin{itemize}
514 \item Surprisingly powerful!
515 \item Same arrangement for FVCT, FMV, FSGNJ etc.
516 \end{itemize}
517 }
518
519
520 \begin{frame}[fragile]
521 \frametitle{MV pseudocode with predication}
522
523 \begin{semiverbatim}
524 function op\_mv(rd, rs) # MV not VMV!
525  rd = int\_vec[rd].isvector ? int\_vec[rd].regidx : rd;
526  rs = int\_vec[rs].isvector ? int\_vec[rs].regidx : rs;
527  ps = get\_pred\_val(FALSE, rs); # predication on src
528  pd = get\_pred\_val(FALSE, rd); # ... AND on dest
529  for (int i = 0, int j = 0; i < VL && j < VL;):
530 if (int\_vec[rs].isvec) while (!(ps \& 1<<i)) i++;
531 if (int\_vec[rd].isvec) while (!(pd \& 1<<j)) j++;
532 ireg[rd+j] <= ireg[rs+i];
533 if (int\_vec[rs].isvec) i++;
534 if (int\_vec[rd].isvec) j++;
535 \end{semiverbatim}
536
537 \begin{itemize}
538 \item elwidth != default not covered above (might be a bit hairy)
539 \item Ending early with 1-bit predication not included (VINSERT)
540 \end{itemize}
541 \end{frame}
542
543
544 \begin{frame}[fragile]
545 \frametitle{VSELECT: stays or goes? Stays if MV.X exists...}
546
547 \begin{semiverbatim}
548 def op_mv_x(rd, rs): # (hypothetical) RV MX.X
549 rs = regfile[rs] # level of indirection (MV.X)
550 regfile[rd] = regfile[rs] # straight regcopy
551 \end{semiverbatim}
552
553 Vectorised version aka "VSELECT":
554
555 \begin{semiverbatim}
556 def op_mv_x(rd, rs): # SV version of MX.X
557 for i in range(VL):
558 rs1 = regfile[rs+i] # indirection
559 regfile[rd+i] = regfile[rs] # straight regcopy
560 \end{semiverbatim}
561
562 \begin{itemize}
563 \item However MV.X does not exist in RV, so neither can VSELECT
564 \item \red SV is not about adding new functionality, only parallelism
565 \end{itemize}
566
567
568 \end{frame}
569
570
571 \frame{\frametitle{Opcodes, compared to RVV}
572
573 \begin{itemize}
574 \item All integer and FP opcodes all removed (no CLIP, FNE)
575 \item VMPOP, VFIRST etc. all removed (use xBitManip)
576 \item VSLIDE removed (use regfile overlaps)
577 \item C.MV covers VEXTRACT VINSERT and VSPLAT (and more)
578 \item Vector (or scalar-vector) copy: use C.MV (MV is a pseudo-op)
579 \item VMERGE: twin predicated C.MVs (one inverted. macro-op'd)
580 \item VSETVL, VGETVL stay (the only ops that do!)
581 \end{itemize}
582 Issues:
583 \begin{itemize}
584 \item VSELECT stays? no MV.X, so no (add with custom ext?)
585 \item VSNE exists, but no FNE (use predication inversion?)
586 \item VCLIP is not in RV* (add with custom ext?)
587 \end{itemize}
588 }
589
590
591 \begin{frame}[fragile]
592 \frametitle{Example c code: DAXPY}
593
594 \begin{semiverbatim}
595 void daxpy(size_t n, double a,
596 const double x[], double y[])
597 \{
598 for (size_t i = 0; i < n; i++) \{
599 y[i] = a*x[i] + y[i];
600 \}
601 \}
602 \end{semiverbatim}
603
604 \begin{itemize}
605 \item See "SIMD Considered Harmful" for SIMD/RVV analysis\\
606 https://sigarch.org/simd-instructions-considered-harmful/
607 \end{itemize}
608
609
610 \end{frame}
611
612
613 \begin{frame}[fragile]
614 \frametitle{RVV DAXPY assembly (RV32V)}
615
616 \begin{semiverbatim}
617 # a0 is n, a1 is ptr to x[0], a2 is ptr to y[0], fa0 is a
618 li t0, 2<<25
619 vsetdcfg t0 # enable 2 64b Fl.Pt. registers
620 loop:
621 setvl t0, a0 # vl = t0 = min(mvl, n)
622 vld v0, a1 # load vector x
623 slli t1, t0, 3 # t1 = vl * 8 (in bytes)
624 vld v1, a2 # load vector y
625 add a1, a1, t1 # increment pointer to x by vl*8
626 vfmadd v1, v0, fa0, v1 # v1 += v0 * fa0 (y = a * x + y)
627 sub a0, a0, t0 # n -= vl (t0)
628 vst v1, a2 # store Y
629 add a2, a2, t1 # increment pointer to y by vl*8
630 bnez a0, loop # repeat if n != 0
631 \end{semiverbatim}
632 \end{frame}
633
634
635 \begin{frame}[fragile]
636 \frametitle{SV DAXPY assembly (RV64D)}
637
638 \begin{semiverbatim}
639 # a0 is n, a1 is ptr to x[0], a2 is ptr to y[0], fa0 is a
640 CSRvect1 = \{type: F, key: a3, val: a3, elwidth: dflt\}
641 CSRvect2 = \{type: F, key: a7, val: a7, elwidth: dflt\}
642 loop:
643 setvl t0, a0, 4 # vl = t0 = min(4, n)
644 ld a3, a1 # load 4 registers a3-6 from x
645 slli t1, t0, 3 # t1 = vl * 8 (in bytes)
646 ld a7, a2 # load 4 registers a7-10 from y
647 add a1, a1, t1 # increment pointer to x by vl*8
648 fmadd a7, a3, fa0, a7 # v1 += v0 * fa0 (y = a * x + y)
649 sub a0, a0, t0 # n -= vl (t0)
650 st a7, a2 # store 4 registers a7-10 to y
651 add a2, a2, t1 # increment pointer to y by vl*8
652 bnez a0, loop # repeat if n != 0
653 \end{semiverbatim}
654 \end{frame}
655
656
657 \frame{\frametitle{Under consideration}
658
659 \begin{itemize}
660 \item Is C.FNE actually needed? Should it be added if it is?
661 \item Element type implies polymorphism. Should it be in SV?
662 \item Should use of registers be allowed to "wrap" (x30 x31 x1 x2)?
663 \item Is detection of all-scalar ops ok (without slowing pipeline)?
664 \item Can VSELECT be removed? (it's really complex)
665 \item Can CLIP be done as a CSR (mode, like elwidth)
666 \item SIMD saturation (etc.) also set as a mode?
667 \item Include src1/src2 predication on Comparison Ops?\\
668 (same arrangement as C.MV, with same flexibility/power)
669 \item 8/16-bit ops is it worthwhile adding a "start offset"? \\
670 (a bit like misaligned addressing... for registers)\\
671 or just use predication to skip start?
672 \end{itemize}
673 }
674
675
676 \frame{\frametitle{What's the downside(s) of SV?}
677 \begin{itemize}
678 \item EVERY register operation is inherently parallelised\\
679 (scalar ops are just vectors of length 1)\vspace{4pt}
680 \item Tightly coupled with the core (instruction issue)\\
681 could be disabled through MISA switch\vspace{4pt}
682 \item An extra pipeline phase is pretty much essential\\
683 for fast low-latency implementations\vspace{4pt}
684 \item With zeroing off, skipping non-predicated elements is hard:\\
685 it is however an optimisation (and could be skipped).\vspace{4pt}
686 \item Setting up the Register/Predication tables (interpreting the\\
687 CSR key-value stores) might be a bit complex to optimise
688 (any change to a CSR key-value entry needs to redo the table)
689 \end{itemize}
690 }
691
692
693 \frame{\frametitle{Is this OK (low latency)? Detect scalar-ops (only)}
694 \begin{center}
695 \includegraphics[height=2.5in]{scalardetect.png}\\
696 {\bf \red Detect when all registers are scalar for a given op}
697 \end{center}
698 }
699
700
701 \frame{\frametitle{Summary}
702
703 \begin{itemize}
704 \item Actually about parallelism, not Vectors (or SIMD) per se\\
705 and NOT about adding new ALU/logic/functionality.
706 \item Only needs 2 actual instructions (plus the CSRs).\\
707 RVV - and "standard" SIMD - require ISA duplication
708 \item Designed for flexibility (graded levels of complexity)
709 \item Huge range of implementor freedom
710 \item Fits RISC-V ethos: achieve more with less
711 \item Reduces SIMD ISA proliferation by 3-4 orders of magnitude \\
712 (without SIMD downsides or sacrificing speed trade-off)
713 \item Covers 98\% of RVV, allows RVV to fit "on top"
714 \item Byproduct of SV is a reduction in code size, power usage
715 etc. (increase efficiency, just like Compressed)
716 \end{itemize}
717 }
718
719
720 \frame{
721 \begin{center}
722 {\Huge The end\vspace{20pt}\\
723 Thank you\vspace{20pt}\\
724 Questions?\vspace{20pt}
725 }
726 \end{center}
727
728 \begin{itemize}
729 \item Discussion: ISA-DEV mailing list
730 \item http://libre-riscv.org/simple\_v\_extension/
731 \end{itemize}
732 }
733
734
735 \end{document}