reorg
[libreriscv.git] / simple_v_extension / simple_v_chennai_2018.tex
1 \documentclass[slidestop]{beamer}
2 \usepackage{beamerthemesplit}
3 \usepackage{graphics}
4 \usepackage{pstricks}
5
6 \title{Simple-V RISC-V Extension for Vectorisation and SIMD}
7 \author{Luke Kenneth Casson Leighton}
8
9
10 \begin{document}
11
12 \frame{
13 \begin{center}
14 \huge{Simple-V RISC-V Extension for Vectors and SIMD}\\
15 \vspace{32pt}
16 \Large{Flexible Vectorisation}\\
17 \Large{(aka not so Simple-V?)}\\
18 \Large{(aka How to Parallelise the RISC-V ISA)}\\
19 \vspace{24pt}
20 \Large{[proposed for] Chennai 9th RISC-V Workshop}\\
21 \vspace{16pt}
22 \large{\today}
23 \end{center}
24 }
25
26
27 \frame{\frametitle{Credits and Acknowledgements}
28
29 \begin{itemize}
30 \item The Designers of RISC-V\vspace{15pt}
31 \item The RVV Working Group and contributors\vspace{15pt}
32 \item Allen Baum, Jacob Bachmeyer, Xan Phung, Chuanhua Chang,\\
33 Guy Lemurieux, Jonathan Neuschafer, Roger Brussee,
34 and others\vspace{15pt}
35 \item ISA-Dev Group Members\vspace{10pt}
36 \end{itemize}
37 }
38
39
40 \frame{\frametitle{Quick refresher on SIMD}
41
42 \begin{itemize}
43 \item SIMD very easy to implement (and very seductive)\vspace{8pt}
44 \item Parallelism is in the ALU\vspace{8pt}
45 \item Zero-to-Negligeable impact for rest of core\vspace{8pt}
46 \end{itemize}
47 Where SIMD Goes Wrong:\vspace{10pt}
48 \begin{itemize}
49 \item See "SIMD instructions considered harmful"
50 https://sigarch.org/simd-instructions-considered-harmful
51 \item Setup and corner-cases alone are extremely complex.\\
52 Hardware is easy, but software is hell.
53 \item O($N^{6}$) ISA opcode proliferation!\\
54 opcode, elwidth, veclen, src1-src2-dest hi/lo
55 \end{itemize}
56 }
57
58 \frame{\frametitle{Quick refresher on RVV}
59
60 \begin{itemize}
61 \item Extremely powerful (extensible to 256 registers)\vspace{10pt}
62 \item Supports polymorphism, several datatypes (inc. FP16)\vspace{10pt}
63 \item Requires a separate Register File (32 w/ext to 256)\vspace{10pt}
64 \item Implemented as a separate pipeline (no impact on scalar)\vspace{10pt}
65 \end{itemize}
66 However...\vspace{10pt}
67 \begin{itemize}
68 \item 98 percent opcode duplication with rest of RV (CLIP)
69 \item Extending RVV requires customisation not just of h/w:\\
70 gcc, binutils also need customisation (and maintenance)
71 \end{itemize}
72 }
73
74
75 \frame{\frametitle{The Simon Sinek lowdown (Why, How, What)}
76
77 \begin{itemize}
78 \item Why?
79 Implementors need flexibility in vectorisation to optimise for
80 area or performance depending on the scope:
81 embedded DSP, Mobile GPU's, Server CPU's and more.\\
82 Compilers also need flexibility in vectorisation to optimise for cost
83 of pipeline setup, amount of state to context switch
84 and software portability
85 \item How?
86 By marking INT/FP regs as "Vectorised" and
87 adding a level of indirection,
88 SV expresses how existing instructions should act
89 on [contiguous] blocks of registers, in parallel, WITHOUT
90 needing new any actual extra arithmetic opcodes.
91 \item What?
92 Simple-V is an "API" that implicitly extends
93 existing (scalar) instructions with explicit parallelisation\\
94 i.e. SV is actually about parallelism NOT vectors per se.\\
95 Has a lot in common with VLIW (without the actual VLIW).
96 \end{itemize}
97 }
98
99
100 \frame{\frametitle{What's the value of SV? Why adopt it even in non-V?}
101
102 \begin{itemize}
103 \item memcpy becomes much smaller (higher bang-per-buck)
104 \item context-switch (LOAD/STORE multiple): 1-2 instructions
105 \item Compressed instrs further reduces I-cache (etc.)
106 \item Greatly-reduced I-cache load (and less reads)
107 \item Amazingly, SIMD becomes (more) tolerable (no corner-cases)
108 \item Modularity/Abstraction in both the h/w and the toolchain.
109 \item "Reach" of registers accessible by Compressed is enhanced
110 \item Future: double the standard INT/FP register file sizes.
111 \end{itemize}
112 Note:
113 \begin{itemize}
114 \item It's not just about Vectors: it's about instruction effectiveness
115 \item Anything implementor is not interested in HW-optimising,\\
116 let it fall through to exceptions (implement as a trap).
117 \end{itemize}
118 }
119
120
121 \frame{\frametitle{How does Simple-V relate to RVV? What's different?}
122
123 \begin{itemize}
124 \item RVV very heavy-duty (excellent for supercomputing)\vspace{8pt}
125 \item Simple-V abstracts parallelism (based on best of RVV)\vspace{8pt}
126 \item Graded levels: hardware, hybrid or traps (fit impl. need)\vspace{8pt}
127 \item Even Compressed become vectorised (RVV can't)\vspace{8pt}
128 \item No polymorphism in SV (too complex)\vspace{8pt}
129 \end{itemize}
130 What Simple-V is not:\vspace{4pt}
131 \begin{itemize}
132 \item A full supercomputer-level Vector Proposal
133 \item A replacement for RVV (SV is designed to be over-ridden\\
134 by - or augmented to become - RVV)
135 \end{itemize}
136 }
137
138
139 \frame{\frametitle{How is Parallelism abstracted in Simple-V?}
140
141 \begin{itemize}
142 \item Register "typing" turns any op into an implicit Vector op:\\
143 registers are reinterpreted through a level of indirection
144 \item Primarily at the Instruction issue phase (except SIMD)\\
145 Note: it's ok to pass predication through to ALU (like SIMD)
146 \item Standard (and future, and custom) opcodes now parallel\vspace{10pt}
147 \end{itemize}
148 Note: EVERYTHING is parallelised:
149 \begin{itemize}
150 \item All LOAD/STORE (inc. Compressed, Int/FP versions)
151 \item All ALU ops (Int, FP, SIMD, DSP, everything)
152 \item All branches become predication targets (C.FNE added?)
153 \item C.MV of particular interest (s/v, v/v, v/s)
154 \item FCVT, FMV, FSGNJ etc. very similar to C.MV
155 \end{itemize}
156 }
157
158
159 \frame{\frametitle{Implementation Options}
160
161 \begin{itemize}
162 \item Absolute minimum: Exceptions: if CSRs indicate "V", trap.\\
163 (Requires as absolute minimum that CSRs be in H/W)
164 \item Hardware loop, single-instruction issue\\
165 (Do / Don't send through predication to ALU)
166 \item Hardware loop, parallel (multi-instruction) issue\\
167 (Do / Don't send through predication to ALU)
168 \item Hardware loop, full parallel ALU (not recommended)
169 \end{itemize}
170 Notes:\vspace{4pt}
171 \begin{itemize}
172 \item 4 (or more?) options above may be deployed on per-op basis
173 \item SIMD always sends predication bits through to ALU
174 \item Minimum MVL MUST be sufficient to cover regfile LD/ST
175 \item Instr. FIFO may repeatedly split off N scalar ops at a time
176 \end{itemize}
177 }
178 % Instr. FIFO may need its own slide. Basically, the vectorised op
179 % gets pushed into the FIFO, where it is then "processed". Processing
180 % will remove the first set of ops from its vector numbering (taking
181 % predication into account) and shoving them **BACK** into the FIFO,
182 % but MODIFYING the remaining "vectorised" op, subtracting the now
183 % scalar ops from it.
184
185 \frame{\frametitle{Predicated 8-parallel ADD: 1-wide ALU}
186 \begin{center}
187 \includegraphics[height=2.5in]{padd9_alu1.png}\\
188 {\bf \red Predicated adds are shuffled down: 6 cycles in total}
189 \end{center}
190 }
191
192
193 \frame{\frametitle{Predicated 8-parallel ADD: 4-wide ALU}
194 \begin{center}
195 \includegraphics[height=2.5in]{padd9_alu4.png}\\
196 {\bf \red Predicated adds are shuffled down: 4 in 1st cycle, 2 in 2nd}
197 \end{center}
198 }
199
200
201 \frame{\frametitle{Predicated 8-parallel ADD: 3 phase FIFO expansion}
202 \begin{center}
203 \includegraphics[height=2.5in]{padd9_fifo.png}\\
204 {\bf \red First cycle takes first four 1s; second takes the rest}
205 \end{center}
206 }
207
208
209 \frame{\frametitle{How are SIMD Instructions Vectorised?}
210
211 \begin{itemize}
212 \item SIMD ALU(s) primarily unchanged
213 \item Predication is added down each SIMD element (if requested,
214 otherwise the entire block will be predicated)
215 \item Predication bits sent in groups to the ALU (if requested,
216 otherwise just one bit for the entire packed block)
217 \item End of Vector enables (additional) predication:
218 completely nullifies end-case code (but only in group
219 predication mode)
220 \end{itemize}
221 Considerations:\vspace{4pt}
222 \begin{itemize}
223 \item Many SIMD ALUs possible (parallel execution)
224 \item Implementor free to choose (API remains the same)
225 \item Unused ALU units wasted, but s/w DRASTICALLY simpler
226 \item Very long SIMD ALUs could waste significant die area
227 \end{itemize}
228 }
229 % With multiple SIMD ALUs at for example 32-bit wide they can be used
230 % to either issue 64-bit or 128-bit or 256-bit wide SIMD operations
231 % or they can be used to cover several operations on totally different
232 % vectors / registers.
233
234 \frame{\frametitle{Predicated 9-parallel SIMD ADD}
235 \begin{center}
236 \includegraphics[height=2.5in]{padd9_simd.png}\\
237 {\bf \red 4-wide 8-bit SIMD, 4 bits of predicate passed to ALU}
238 \end{center}
239 }
240
241
242 \frame{\frametitle{What's the deal / juice / score?}
243
244 \begin{itemize}
245 \item Standard Register File(s) overloaded with CSR "reg is vector"\\
246 (see pseudocode slides for examples)
247 \item "2nd FP\&INT register bank" possibility (reserved for future)
248 \item Element width concept remain same as RVV\\
249 (CSRs give new size to elements in registers)
250 \item CSRs are key-value tables (overlaps allowed: v. important)
251 \end{itemize}
252 Key differences from RVV:
253 \begin{itemize}
254 \item Predication in INT regs as a BIT field (max VL=XLEN)
255 \item Minimum VL must be Num Regs - 1 (all regs single LD/ST)
256 \item SV may condense sparse Vecs: RVV lets ALU do predication
257 \item Choice to Zero or skip non-predicated elements
258 \end{itemize}
259 }
260
261
262 \begin{frame}[fragile]
263 \frametitle{ADD pseudocode (or trap, or actual hardware loop)}
264
265 \begin{semiverbatim}
266 function op\_add(rd, rs1, rs2, predr) # add not VADD!
267  int i, id=0, irs1=0, irs2=0;
268  for (i = 0; i < VL; i++)
269   if (ireg[predr] & 1<<i) # predication uses intregs
270    ireg[rd+id] <= ireg[rs1+irs1] + ireg[rs2+irs2];
271 if (reg\_is\_vectorised[rd]) \{ id += 1; \}
272 if (reg\_is\_vectorised[rs1]) \{ irs1 += 1; \}
273 if (reg\_is\_vectorised[rs2]) \{ irs2 += 1; \}
274 \end{semiverbatim}
275
276 \begin{itemize}
277 \item Above is oversimplified: Reg. indirection left out (for clarity).
278 \item SIMD slightly more complex (case above is elwidth = default)
279 \item Scalar-scalar and scalar-vector and vector-vector now all in one
280 \item OoO may choose to push ADDs into instr. queue (v. busy!)
281 \end{itemize}
282 \end{frame}
283
284 % yes it really *is* ADD not VADD. that's the entire point of
285 % this proposal, that *standard* operations are overloaded to
286 % become vectorised-on-demand
287
288
289 \begin{frame}[fragile]
290 \frametitle{Predication-Branch (or trap, or actual hardware loop)}
291
292 \begin{semiverbatim}
293 s1 = reg\_is\_vectorised(src1);
294 s2 = reg\_is\_vectorised(src2);
295 if (!s2 && !s1) goto branch;
296 for (int i = 0; i < VL; ++i)
297 if (cmp(s1 ? reg[src1+i]:reg[src1],
298 s2 ? reg[src2+i]:reg[src2])
299 ireg[rs3] |= 1<<i;
300 \end{semiverbatim}
301
302 \begin{itemize}
303 \item SIMD slightly more complex (case above is elwidth = default)
304 \item If s1 and s2 both scalars, Standard branch occurs
305 \item Predication stored in integer regfile as a bitfield
306 \item Scalar-vector and vector-vector supported
307 \item Overload Branch immediate to be predication target rs3
308 \end{itemize}
309 \end{frame}
310
311 \begin{frame}[fragile]
312 \frametitle{VLD/VLD.S/VLD.X (or trap, or actual hardware loop)}
313
314 \begin{semiverbatim}
315 if (unit-strided) stride = elsize;
316 else stride = areg[as2]; // constant-strided
317 for (int i = 0; i < VL; ++i)
318 if (preg\_enabled[rd] && ([!]preg[rd] & 1<<i))
319 for (int j = 0; j < seglen+1; j++)
320 if (reg\_is\_vectorised[rs2]) offs = vreg[rs2+i]
321 else offs = i*(seglen+1)*stride;
322 vreg[rd+j][i] = mem[sreg[base] + offs + j*stride]
323 \end{semiverbatim}
324
325 \begin{itemize}
326 \item Again: elwidth != default slightly more complex
327 \item rs2 vectorised taken to implicitly indicate VLD.X
328 \end{itemize}
329 \end{frame}
330
331
332 \frame{\frametitle{Predication key-value CSR store}
333
334 \begin{itemize}
335 \item key is int regfile number or FP regfile number (1 bit)\vspace{6pt}
336 \item register to be predicated if referred to (5 bits, key)\vspace{6pt}
337 \item register to store actual predication in (5 bits, value)\vspace{6pt}
338 \item predication is inverted Y/N (1 bit)\vspace{6pt}
339 \item non-predicated elements are to be zero'd Y/N (1 bit)\vspace{6pt}
340 \end{itemize}
341 Notes:\vspace{10pt}
342 \begin{itemize}
343 \item Table should be expanded out for high-speed implementations
344 \item Multiple "keys" (and values) theoretically permitted
345 \item RVV rules about deleting higher-indexed CSRs followed
346 \end{itemize}
347 }
348
349
350 \begin{frame}[fragile]
351 \frametitle{Predication key-value CSR table decoding pseudocode}
352
353 \begin{semiverbatim}
354 struct pred fp\_pred[32];
355 struct pred int\_pred[32];
356
357 for (i = 0; i < 16; i++) // 16 CSRs?
358 tb = int\_pred if CSRpred[i].type == 0 else fp\_pred
359 idx = CSRpred[i].regidx
360 tb[idx].zero = CSRpred[i].zero
361 tb[idx].inv = CSRpred[i].inv
362 tb[idx].predidx = CSRpred[i].predidx
363 tb[idx].enabled = true
364 \end{semiverbatim}
365
366 \begin{itemize}
367 \item All 64 (int and FP) Entries zero'd before setting
368 \item Might be a bit complex to set up (TBD)
369 \end{itemize}
370
371 \end{frame}
372
373
374 \begin{frame}[fragile]
375 \frametitle{Get Predication value pseudocode}
376
377 \begin{semiverbatim}
378 def get\_pred\_val(bool is\_fp\_op, int reg):
379 tb = int\_pred if is\_fp\_op else fp\_pred
380 if (!tb[reg].enabled):
381 return ~0x0 // all ops enabled
382 predidx = tb[reg].predidx // redirection occurs HERE
383 predicate = intreg[predidx] // actual predicate HERE
384 if (tb[reg].inv):
385 predicate = ~predicate // invert ALL bits
386 return predicate
387 \end{semiverbatim}
388
389 \begin{itemize}
390 \item References different (internal) mapping table for INT or FP
391 \item Actual predicate bitmask ALWAYS from the INT regfile
392 \end{itemize}
393
394 \end{frame}
395
396
397 \frame{\frametitle{To Zero or not to place zeros in non-predicated elements?}
398
399 \begin{itemize}
400 \item Zeroing is an implementation optimisation favouring OoO
401 \item Simple implementations may skip non-predicated operations
402 \item Simple implementations explicitly have to destroy data
403 \item Complex implementations may use reg-renames to save power\\
404 Zeroing on predication chains makes optimisation harder
405 \item Compromise: REQUIRE both (specified in predication CSRs).
406 \end{itemize}
407 Considerations:
408 \begin{itemize}
409 \item Complex not really impacted, simple impacted a LOT\\
410 with Zeroing... however it's useful (memzero)
411 \item Non-zero'd overlapping "Vectors" may issue overlapping ops\\
412 (2nd op's predicated elements slot in 1st's non-predicated ops)
413 \item Please don't use Vectors for "security" (use Sec-Ext)
414 \end{itemize}
415 }
416 % with overlapping "vectors" - bearing in mind that "vectors" are
417 % just a remap onto the standard register file, if the top bits of
418 % predication are zero, and there happens to be a second vector
419 % that uses some of the same register file that happens to be
420 % predicated out, the second vector op may be issued *at the same time*
421 % if there are available parallel ALUs to do so.
422
423
424 \frame{\frametitle{Register key-value CSR store}
425
426 \begin{itemize}
427 \item key is int regfile number or FP regfile number (1 bit)
428 \item treated as vector if referred to in op (5 bits, key)
429 \item starting register to actually be used (5 bits, value)
430 \item element bitwidth: default, dflt/2, 8, 16 (2 bits)
431 \item is vector: Y/N (1 bit)
432 \item is packed SIMD: Y/N (1 bit)
433 \item register bank: 0/reserved for future ext. (1 bit)
434 \end{itemize}
435 Notes:
436 \begin{itemize}
437 \item References different (internal) mapping table for INT or FP
438 \item Level of indirection has implications for pipeline latency
439 \item (future) bank bit, no need to extend opcodes: set bank=1,
440 just use normal 5-bit regs, indirection takes care of the rest.
441 \end{itemize}
442 }
443
444
445 \frame{\frametitle{Register element width and packed SIMD}
446
447 Packed SIMD = N:
448 \begin{itemize}
449 \item default: RV32/64/128 opcodes define elwidth = 32/64/128
450 \item default/2: RV32/64/128 opcodes, elwidth = 16/32/64 with
451 top half of register ignored (src), zero'd/s-ext (dest)
452 \item 8 or 16: elwidth = 8 (or 16), similar to default/2
453 \end{itemize}
454 Packed SIMD = Y (default is moot, packing is 1:1)
455 \begin{itemize}
456 \item default/2: 2 elements per register @ opcode-defined bitwidth
457 \item 8 or 16: standard 8 (or 16) packed SIMD
458 \end{itemize}
459 Notes:
460 \begin{itemize}
461 \item Different src/dest widths (and packs) PERMITTED
462 \item RV* already allows (and defines) how RV32 ops work in RV64\\
463 so just logically follow that lead/example.
464 \end{itemize}
465 }
466
467
468 \begin{frame}[fragile]
469 \frametitle{Register key-value CSR table decoding pseudocode}
470
471 \begin{semiverbatim}
472 struct vectorised fp\_vec[32], int\_vec[32]; // 64 in future
473
474 for (i = 0; i < 16; i++) // 16 CSRs?
475 tb = int\_vec if CSRvectortb[i].type == 0 else fp\_vec
476 idx = CSRvectortb[i].regidx
477 tb[idx].elwidth = CSRpred[i].elwidth
478 tb[idx].regidx = CSRpred[i].regidx // indirection
479 tb[idx].isvector = CSRpred[i].isvector
480 tb[idx].packed = CSRpred[i].packed // SIMD or not
481 tb[idx].bank = CSRpred[i].bank // 0 (1=rsvd)
482 \end{semiverbatim}
483
484 \begin{itemize}
485 \item All 32 int (and 32 FP) entries zero'd before setup
486 \item Might be a bit complex to set up (TBD)
487 \end{itemize}
488
489 \end{frame}
490
491
492 \begin{frame}[fragile]
493 \frametitle{ADD pseudocode with redirection, this time}
494
495 \begin{semiverbatim}
496 function op\_add(rd, rs1, rs2) # add not VADD!
497  int i, id=0, irs1=0, irs2=0;
498  rd = int\_vec[rd ].isvector ? int\_vec[rd ].regidx : rd;
499  rs1 = int\_vec[rs1].isvector ? int\_vec[rs1].regidx : rs1;
500  rs2 = int\_vec[rs2].isvector ? int\_vec[rs2].regidx : rs2;
501  predval = get\_pred\_val(FALSE, rd);
502  for (i = 0; i < VL; i++)
503 if (predval \& 1<<i) # predication uses intregs
504    ireg[rd+id] <= ireg[rs1+irs1] + ireg[rs2+irs2];
505 if (int\_vec[rd ].isvector)  \{ id += 1; \}
506 if (int\_vec[rs1].isvector)  \{ irs1 += 1; \}
507 if (int\_vec[rs2].isvector)  \{ irs2 += 1; \}
508 \end{semiverbatim}
509
510 \begin{itemize}
511 \item SIMD (elwidth != default) not covered above
512 \end{itemize}
513 \end{frame}
514
515
516 \frame{\frametitle{Why are overlaps allowed in Regfiles?}
517
518 \begin{itemize}
519 \item Same register(s) can have multiple "interpretations"
520 \item Set "real" register (scalar) without needing to set/unset CSRs.
521 \item xBitManip plus SIMD plus xBitManip = Hi/Lo bitops
522 \item (32-bit GREV plus 4x8-bit SIMD plus 32-bit GREV:\\
523 GREV @ VL=N,wid=32; SIMD @ VL=Nx4,wid=8)
524 \item RGB 565 (video): BEXTW plus 4x8-bit SIMD plus BDEPW\\
525 (BEXT/BDEP @ VL=N,wid=32; SIMD @ VL=Nx4,wid=8)
526 \item Same register(s) can be offset (no need for VSLIDE)\vspace{6pt}
527 \end{itemize}
528 Note:
529 \begin{itemize}
530 \item xBitManip reduces O($N^{6}$) SIMD down to O($N^{3}$)
531 \item Hi-Performance: Macro-op fusion (more pipeline stages?)
532 \end{itemize}
533 }
534
535
536 \frame{\frametitle{C.MV extremely flexible!}
537
538 \begin{itemize}
539 \item scalar-to-vector (w/ no pred): VSPLAT
540 \item scalar-to-vector (w/ dest-pred): Sparse VSPLAT
541 \item scalar-to-vector (w/ 1-bit dest-pred): VINSERT
542 \item vector-to-scalar (w/ [1-bit?] src-pred): VEXTRACT
543 \item vector-to-vector (w/ no pred): Vector Copy
544 \item vector-to-vector (w/ src pred): Vector Gather
545 \item vector-to-vector (w/ dest pred): Vector Scatter
546 \item vector-to-vector (w/ src \& dest pred): Vector Gather/Scatter
547 \end{itemize}
548 \vspace{4pt}
549 Notes:
550 \begin{itemize}
551 \item Surprisingly powerful! Zero-predication even more so
552 \item Same arrangement for FVCT, FMV, FSGNJ etc.
553 \end{itemize}
554 }
555
556
557 \begin{frame}[fragile]
558 \frametitle{MV pseudocode with predication}
559
560 \begin{semiverbatim}
561 function op\_mv(rd, rs) # MV not VMV!
562  rd = int\_vec[rd].isvector ? int\_vec[rd].regidx : rd;
563  rs = int\_vec[rs].isvector ? int\_vec[rs].regidx : rs;
564  ps = get\_pred\_val(FALSE, rs); # predication on src
565  pd = get\_pred\_val(FALSE, rd); # ... AND on dest
566  for (int i = 0, int j = 0; i < VL && j < VL;):
567 if (int\_vec[rs].isvec) while (!(ps \& 1<<i)) i++;
568 if (int\_vec[rd].isvec) while (!(pd \& 1<<j)) j++;
569 ireg[rd+j] <= ireg[rs+i];
570 if (int\_vec[rs].isvec) i++;
571 if (int\_vec[rd].isvec) j++;
572 \end{semiverbatim}
573
574 \begin{itemize}
575 \item elwidth != default not covered above (might be a bit hairy)
576 \item Ending early with 1-bit predication not included (VINSERT)
577 \end{itemize}
578 \end{frame}
579
580
581 \begin{frame}[fragile]
582 \frametitle{VSELECT: stays or goes? Stays if MV.X exists...}
583
584 \begin{semiverbatim}
585 def op_mv_x(rd, rs): # (hypothetical) RV MX.X
586 rs = regfile[rs] # level of indirection (MV.X)
587 regfile[rd] = regfile[rs] # straight regcopy
588 \end{semiverbatim}
589
590 Vectorised version aka "VSELECT":
591
592 \begin{semiverbatim}
593 def op_mv_x(rd, rs): # SV version of MX.X
594 for i in range(VL):
595 rs1 = regfile[rs+i] # indirection
596 regfile[rd+i] = regfile[rs] # straight regcopy
597 \end{semiverbatim}
598
599 \begin{itemize}
600 \item However MV.X does not exist in RV, so neither can VSELECT
601 \item \red SV is not about adding new functionality, only parallelism
602 \end{itemize}
603
604
605 \end{frame}
606
607
608 \frame{\frametitle{Opcodes, compared to RVV}
609
610 \begin{itemize}
611 \item All integer and FP opcodes all removed (no CLIP, FNE)
612 \item VMPOP, VFIRST etc. all removed (use xBitManip)
613 \item VSLIDE removed (use regfile overlaps)
614 \item C.MV covers VEXTRACT VINSERT and VSPLAT (and more)
615 \item Vector (or scalar-vector) copy: use C.MV (MV is a pseudo-op)
616 \item VMERGE: twin predicated C.MVs (one inverted. macro-op'd)
617 \item VSETVL, VGETVL stay (the only ops that do!)
618 \end{itemize}
619 Issues:
620 \begin{itemize}
621 \item VSELECT stays? no MV.X, so no (add with custom ext?)
622 \item VSNE exists, but no FNE (use predication inversion?)
623 \item VCLIP is not in RV* (add with custom ext?)
624 \end{itemize}
625 }
626
627
628 \begin{frame}[fragile]
629 \frametitle{Example c code: DAXPY}
630
631 \begin{semiverbatim}
632 void daxpy(size_t n, double a,
633 const double x[], double y[])
634 \{
635 for (size_t i = 0; i < n; i++) \{
636 y[i] = a*x[i] + y[i];
637 \}
638 \}
639 \end{semiverbatim}
640
641 \begin{itemize}
642 \item See "SIMD Considered Harmful" for SIMD/RVV analysis\\
643 https://sigarch.org/simd-instructions-considered-harmful/
644 \end{itemize}
645
646
647 \end{frame}
648
649
650 \begin{frame}[fragile]
651 \frametitle{RVV DAXPY assembly (RV32V)}
652
653 \begin{semiverbatim}
654 # a0 is n, a1 is ptr to x[0], a2 is ptr to y[0], fa0 is a
655 li t0, 2<<25
656 vsetdcfg t0 # enable 2 64b Fl.Pt. registers
657 loop:
658 setvl t0, a0 # vl = t0 = min(mvl, n)
659 vld v0, a1 # load vector x
660 slli t1, t0, 3 # t1 = vl * 8 (in bytes)
661 vld v1, a2 # load vector y
662 add a1, a1, t1 # increment pointer to x by vl*8
663 vfmadd v1, v0, fa0, v1 # v1 += v0 * fa0 (y = a * x + y)
664 sub a0, a0, t0 # n -= vl (t0)
665 vst v1, a2 # store Y
666 add a2, a2, t1 # increment pointer to y by vl*8
667 bnez a0, loop # repeat if n != 0
668 \end{semiverbatim}
669 \end{frame}
670
671
672 \begin{frame}[fragile]
673 \frametitle{SV DAXPY assembly (RV64D)}
674
675 \begin{semiverbatim}
676 # a0 is n, a1 is ptr to x[0], a2 is ptr to y[0], fa0 is a
677 CSRvect1 = \{type: F, key: a3, val: a3, elwidth: dflt\}
678 CSRvect2 = \{type: F, key: a7, val: a7, elwidth: dflt\}
679 loop:
680 setvl t0, a0, 4 # vl = t0 = min(4, n)
681 ld a3, a1 # load 4 registers a3-6 from x
682 slli t1, t0, 3 # t1 = vl * 8 (in bytes)
683 ld a7, a2 # load 4 registers a7-10 from y
684 add a1, a1, t1 # increment pointer to x by vl*8
685 fmadd a7, a3, fa0, a7 # v1 += v0 * fa0 (y = a * x + y)
686 sub a0, a0, t0 # n -= vl (t0)
687 st a7, a2 # store 4 registers a7-10 to y
688 add a2, a2, t1 # increment pointer to y by vl*8
689 bnez a0, loop # repeat if n != 0
690 \end{semiverbatim}
691 \end{frame}
692
693
694 \frame{\frametitle{Under consideration}
695
696 \begin{itemize}
697 \item Is C.FNE actually needed? Should it be added if it is?
698 \item Element type implies polymorphism. Should it be in SV?
699 \item Should use of registers be allowed to "wrap" (x30 x31 x1 x2)?
700 \item Is detection of all-scalar ops ok (without slowing pipeline)?
701 \item Can VSELECT be removed? (it's really complex)
702 \item Can CLIP be done as a CSR (mode, like elwidth)
703 \item SIMD saturation (etc.) also set as a mode?
704 \item Include src1/src2 predication on Comparison Ops?\\
705 (same arrangement as C.MV, with same flexibility/power)
706 \item 8/16-bit ops is it worthwhile adding a "start offset"? \\
707 (a bit like misaligned addressing... for registers)\\
708 or just use predication to skip start?
709 \end{itemize}
710 }
711
712
713 \frame{\frametitle{What's the downside(s) of SV?}
714 \begin{itemize}
715 \item EVERY register operation is inherently parallelised\\
716 (scalar ops are just vectors of length 1)\vspace{4pt}
717 \item Tightly coupled with the core (instruction issue)\\
718 could be disabled through MISA switch\vspace{4pt}
719 \item An extra pipeline phase almost certainly essential\\
720 for fast low-latency implementations\vspace{4pt}
721 \item With zeroing off, skipping non-predicated elements is hard:\\
722 it is however an optimisation (and could be skipped).\vspace{4pt}
723 \item Setting up the Register/Predication tables (interpreting the\\
724 CSR key-value stores) might be a bit complex to optimise
725 (any change to a CSR key-value entry needs to redo the table)
726 \end{itemize}
727 }
728
729
730 \frame{\frametitle{Is this OK (low latency)? Detect scalar-ops (only)}
731 \begin{center}
732 \includegraphics[height=2.5in]{scalardetect.png}\\
733 {\bf \red Detect when all registers are scalar for a given op}
734 \end{center}
735 }
736
737
738 \frame{\frametitle{Summary}
739
740 \begin{itemize}
741 \item Actually about parallelism, not Vectors (or SIMD) per se\\
742 and NOT about adding new ALU/logic/functionality.
743 \item Only needs 2 actual instructions (plus the CSRs).\\
744 RVV - and "standard" SIMD - require ISA duplication
745 \item Designed for flexibility (graded levels of complexity)
746 \item Huge range of implementor freedom
747 \item Fits RISC-V ethos: achieve more with less
748 \item Reduces SIMD ISA proliferation by 3-4 orders of magnitude \\
749 (without SIMD downsides or sacrificing speed trade-off)
750 \item Covers 98\% of RVV, allows RVV to fit "on top"
751 \item Byproduct of SV is a reduction in code size, power usage
752 etc. (increase efficiency, just like Compressed)
753 \end{itemize}
754 }
755
756
757 \frame{
758 \begin{center}
759 {\Huge The end\vspace{20pt}\\
760 Thank you\vspace{20pt}\\
761 Questions?\vspace{20pt}
762 }
763 \end{center}
764
765 \begin{itemize}
766 \item Discussion: ISA-DEV mailing list
767 \item http://libre-riscv.org/simple\_v\_extension/
768 \end{itemize}
769 }
770
771
772 \end{document}