8cec8922e9c877d6a9922d40fce87bb2f7494817
[libreriscv.git] / simple_v_extension / simple_v_chennai_2018.tex
1 \documentclass[slidestop]{beamer}
2 \usepackage{beamerthemesplit}
3 \usepackage{graphics}
4 \usepackage{pstricks}
5
6 \title{Simple-V RISC-V Extension for Vectorisation and SIMD}
7 \author{Luke Kenneth Casson Leighton}
8
9
10 \begin{document}
11
12 \frame{
13 \begin{center}
14 \huge{Simple-V RISC-V Extension for Vectors and SIMD}\\
15 \vspace{32pt}
16 \Large{Flexible Vectorisation}\\
17 \Large{(aka not so Simple-V?)}\\
18 \Large{(aka How to Parallelise the RISC-V ISA)}\\
19 \vspace{24pt}
20 \Large{[proposed for] Chennai 9th RISC-V Workshop}\\
21 \vspace{16pt}
22 \large{\today}
23 \end{center}
24 }
25
26
27 \frame{\frametitle{Credits and Acknowledgements}
28
29 \begin{itemize}
30 \item The Designers of RISC-V\vspace{15pt}
31 \item The RVV Working Group and contributors\vspace{15pt}
32 \item Allen Baum, Jacob Bachmeyer, Xan Phung, Chuanhua Chang,\\
33 Guy Lemurieux, Jonathan Neuschafer, Roger Brussee,
34 and others\vspace{15pt}
35 \item ISA-Dev Group Members\vspace{10pt}
36 \end{itemize}
37 }
38
39
40 \frame{\frametitle{Quick refresher on SIMD}
41
42 \begin{itemize}
43 \item SIMD very easy to implement (and very seductive)\vspace{8pt}
44 \item Parallelism is in the ALU\vspace{8pt}
45 \item Zero-to-Negligeable impact for rest of core\vspace{8pt}
46 \end{itemize}
47 Where SIMD Goes Wrong:\vspace{10pt}
48 \begin{itemize}
49 \item See "SIMD instructions considered harmful"
50 https://sigarch.org/simd-instructions-considered-harmful
51 \item Setup and corner-cases alone are extremely complex.\\
52 Hardware is easy, but software is hell.
53 \item O($N^{6}$) ISA opcode proliferation!\\
54 opcode, elwidth, veclen, src1-src2-dest hi/lo
55 \end{itemize}
56 }
57
58 \frame{\frametitle{Quick refresher on RVV}
59
60 \begin{itemize}
61 \item Extremely powerful (extensible to 256 registers)\vspace{10pt}
62 \item Supports polymorphism, several datatypes (inc. FP16)\vspace{10pt}
63 \item Requires a separate Register File (32 w/ext to 256)\vspace{10pt}
64 \item Implemented as a separate pipeline (no impact on scalar)\vspace{10pt}
65 \end{itemize}
66 However...\vspace{10pt}
67 \begin{itemize}
68 \item 98 percent opcode duplication with rest of RV (CLIP)
69 \item Extending RVV requires customisation not just of h/w:\\
70 gcc, binutils also need customisation (and maintenance)
71 \end{itemize}
72 }
73
74
75 \frame{\frametitle{The Simon Sinek lowdown (Why, How, What)}
76
77 \begin{itemize}
78 \item Why?
79 Implementors need flexibility in vectorisation to optimise for
80 area or performance depending on the scope:
81 embedded DSP, Mobile GPU's, Server CPU's and more.\\
82 Compilers also need flexibility in vectorisation to optimise for cost
83 of pipeline setup, amount of state to context switch
84 and software portability
85 \item How?
86 By marking INT/FP regs as "Vectorised" and
87 adding a level of indirection,
88 SV expresses how existing instructions should act
89 on [contiguous] blocks of registers, in parallel, WITHOUT
90 needing new any actual extra arithmetic opcodes.
91 \item What?
92 Simple-V is an "API" that implicitly extends
93 existing (scalar) instructions with explicit parallelisation\\
94 i.e. SV is actually about parallelism NOT vectors per se.\\
95 Has a lot in common with VLIW (without the actual VLIW).
96 \end{itemize}
97 }
98
99
100 \frame{\frametitle{What's the value of SV? Why adopt it even in non-V?}
101
102 \begin{itemize}
103 \item memcpy becomes much smaller (higher bang-per-buck)
104 \item context-switch (LOAD/STORE multiple): 1-2 instructions
105 \item Compressed instrs further reduces I-cache (etc.)
106 \item Greatly-reduced I-cache load (and less reads)
107 \item Amazingly, SIMD becomes (more) tolerable (no corner-cases)
108 \item Modularity/Abstraction in both the h/w and the toolchain.
109 \item "Reach" of registers accessible by Compressed is enhanced
110 \item Future: double the standard INT/FP register file sizes.
111 \end{itemize}
112 Note:
113 \begin{itemize}
114 \item It's not just about Vectors: it's about instruction effectiveness
115 \item Anything implementor is not interested in HW-optimising,\\
116 let it fall through to exceptions (implement as a trap).
117 \end{itemize}
118 }
119
120
121 \frame{\frametitle{How does Simple-V relate to RVV? What's different?}
122
123 \begin{itemize}
124 \item RVV very heavy-duty (excellent for supercomputing)\vspace{8pt}
125 \item Simple-V abstracts parallelism (based on best of RVV)\vspace{8pt}
126 \item Graded levels: hardware, hybrid or traps (fit impl. need)\vspace{8pt}
127 \item Even Compressed become vectorised (RVV can't)\vspace{8pt}
128 \item No polymorphism in SV (too complex)\vspace{8pt}
129 \end{itemize}
130 What Simple-V is not:\vspace{4pt}
131 \begin{itemize}
132 \item A full supercomputer-level Vector Proposal
133 \item A replacement for RVV (SV is designed to be over-ridden\\
134 by - or augmented to become - RVV)
135 \end{itemize}
136 }
137
138
139 \frame{\frametitle{How is Parallelism abstracted in Simple-V?}
140
141 \begin{itemize}
142 \item Register "typing" turns any op into an implicit Vector op:\\
143 registers are reinterpreted through a level of indirection
144 \item Primarily at the Instruction issue phase (except SIMD)\\
145 Note: it's ok to pass predication through to ALU (like SIMD)
146 \item Standard (and future, and custom) opcodes now parallel\vspace{10pt}
147 \end{itemize}
148 Note: EVERYTHING is parallelised:
149 \begin{itemize}
150 \item All LOAD/STORE (inc. Compressed, Int/FP versions)
151 \item All ALU ops (Int, FP, SIMD, DSP, everything)
152 \item All branches become predication targets (C.FNE added?)
153 \item C.MV of particular interest (s/v, v/v, v/s)
154 \item FCVT, FMV, FSGNJ etc. very similar to C.MV
155 \end{itemize}
156 }
157
158
159 \frame{\frametitle{What's the deal / juice / score?}
160
161 \begin{itemize}
162 \item Standard Register File(s) overloaded with CSR "reg is vector"\\
163 (see pseudocode slides for examples)
164 \item "2nd FP\&INT register bank" possibility, reserved for future\\
165 (would allow standard regfiles to remain unmodified)
166 \item Element width concept remain same as RVV\\
167 (CSRs give new size to elements in registers)
168 \item CSRs are key-value tables (overlaps allowed: v. important)
169 \end{itemize}
170 Key differences from RVV:
171 \begin{itemize}
172 \item Predication in INT reg as a BIT field (max VL=XLEN)
173 \item Minimum VL must be Num Regs - 1 (all regs single LD/ST)
174 \item SV may condense sparse Vecs: RVV lets ALU do predication
175 \item Choice to Zero or skip non-predicated elements
176 \end{itemize}
177 }
178
179
180 \begin{frame}[fragile]
181 \frametitle{ADD pseudocode (or trap, or actual hardware loop)}
182
183 \begin{semiverbatim}
184 function op\_add(rd, rs1, rs2, predr) # add not VADD!
185  int i, id=0, irs1=0, irs2=0;
186  for (i = 0; i < VL; i++)
187   if (ireg[predr] & 1<<i) # predication uses intregs
188    ireg[rd+id] <= ireg[rs1+irs1] + ireg[rs2+irs2];
189 if (reg\_is\_vectorised[rd]) \{ id += 1; \}
190 if (reg\_is\_vectorised[rs1]) \{ irs1 += 1; \}
191 if (reg\_is\_vectorised[rs2]) \{ irs2 += 1; \}
192 \end{semiverbatim}
193
194 \begin{itemize}
195 \item Above is oversimplified: Reg. indirection left out (for clarity).
196 \item SIMD slightly more complex (case above is elwidth = default)
197 \item Scalar-scalar and scalar-vector and vector-vector now all in one
198 \item OoO may choose to push ADDs into instr. queue (v. busy!)
199 \end{itemize}
200 \end{frame}
201
202 % yes it really *is* ADD not VADD. that's the entire point of
203 % this proposal, that *standard* operations are overloaded to
204 % become vectorised-on-demand
205
206
207 \begin{frame}[fragile]
208 \frametitle{Predication-Branch (or trap, or actual hardware loop)}
209
210 \begin{semiverbatim}
211 s1 = reg\_is\_vectorised(src1);
212 s2 = reg\_is\_vectorised(src2);
213 if (!s2 && !s1) goto branch;
214 for (int i = 0; i < VL; ++i)
215 if (cmp(s1 ? reg[src1+i]:reg[src1],
216 s2 ? reg[src2+i]:reg[src2])
217 ireg[rs3] |= 1<<i;
218 \end{semiverbatim}
219
220 \begin{itemize}
221 \item SIMD slightly more complex (case above is elwidth = default)
222 \item If s1 and s2 both scalars, Standard branch occurs
223 \item Predication stored in integer regfile as a bitfield
224 \item Scalar-vector and vector-vector supported
225 \item Overload Branch immediate to be predication target rs3
226 \end{itemize}
227 \end{frame}
228
229 \begin{frame}[fragile]
230 \frametitle{VLD/VLD.S/VLD.X (or trap, or actual hardware loop)}
231
232 \begin{semiverbatim}
233 if (unit-strided) stride = elsize;
234 else stride = areg[as2]; // constant-strided
235 for (int i = 0; i < VL; ++i)
236 if (preg\_enabled[rd] && ([!]preg[rd] & 1<<i))
237 for (int j = 0; j < seglen+1; j++)
238 if (reg\_is\_vectorised[rs2]) offs = vreg[rs2+i]
239 else offs = i*(seglen+1)*stride;
240 vreg[rd+j][i] = mem[sreg[base] + offs + j*stride]
241 \end{semiverbatim}
242
243 \begin{itemize}
244 \item Again: elwidth != default slightly more complex
245 \item rs2 vectorised taken to implicitly indicate VLD.X
246 \end{itemize}
247 \end{frame}
248
249
250 \frame{\frametitle{Register key-value CSR store}
251
252 \begin{itemize}
253 \item key is int regfile number or FP regfile number (1 bit)
254 \item treated as vector if referred to in op (5 bits, key)
255 \item starting register to actually be used (5 bits, value)
256 \item element bitwidth: default, dflt/2, 8, 16 (2 bits)
257 \item is vector: Y/N (1 bit)
258 \item is packed SIMD: Y/N (1 bit)
259 \item register bank: 0/reserved for future ext. (1 bit)
260 \end{itemize}
261 Notes:
262 \begin{itemize}
263 \item References different (internal) mapping table for INT or FP
264 \item Level of indirection has implications for pipeline latency
265 \item (future) bank bit, no need to extend opcodes: set bank=1,
266 just use normal 5-bit regs, indirection takes care of the rest.
267 \end{itemize}
268 }
269
270
271 \frame{\frametitle{Register element width and packed SIMD}
272
273 Packed SIMD = N:
274 \begin{itemize}
275 \item default: RV32/64/128 opcodes define elwidth = 32/64/128
276 \item default/2: RV32/64/128 opcodes, elwidth = 16/32/64 with
277 top half of register ignored (src), zero'd/s-ext (dest)
278 \item 8 or 16: elwidth = 8 (or 16), similar to default/2
279 \end{itemize}
280 Packed SIMD = Y (default is moot, packing is 1:1)
281 \begin{itemize}
282 \item default/2: 2 elements per register @ opcode-defined bitwidth
283 \item 8 or 16: standard 8 (or 16) packed SIMD
284 \end{itemize}
285 Notes:
286 \begin{itemize}
287 \item Different src/dest widths (and packs) PERMITTED
288 \item RV* already allows (and defines) how RV32 ops work in RV64\\
289 so just logically follow that lead/example.
290 \end{itemize}
291 }
292
293
294 \begin{frame}[fragile]
295 \frametitle{Register key-value CSR table decoding pseudocode}
296
297 \begin{semiverbatim}
298 struct vectorised fp\_vec[32], int\_vec[32]; // 64 in future
299
300 for (i = 0; i < 16; i++) // 16 CSRs?
301 tb = int\_vec if CSRvec[i].type == 0 else fp\_vec
302 idx = CSRvec[i].regkey // INT/FP src/dst reg in opcode
303 tb[idx].elwidth = CSRvec[i].elwidth
304 tb[idx].regidx = CSRvec[i].regidx // indirection
305 tb[idx].isvector = CSRvec[i].isvector
306 tb[idx].packed = CSRvec[i].packed // SIMD or not
307 tb[idx].bank = CSRvec[i].bank // 0 (1=rsvd)
308 \end{semiverbatim}
309
310 \begin{itemize}
311 \item All 32 int (and 32 FP) entries zero'd before setup
312 \item Might be a bit complex to set up in hardware (TBD)
313 \end{itemize}
314
315 \end{frame}
316
317
318 \frame{\frametitle{Predication key-value CSR store}
319
320 \begin{itemize}
321 \item key is int regfile number or FP regfile number (1 bit)\vspace{6pt}
322 \item register to be predicated if referred to (5 bits, key)\vspace{6pt}
323 \item INT reg with actual predication mask (5 bits, value)\vspace{6pt}
324 \item predication is inverted Y/N (1 bit)\vspace{6pt}
325 \item non-predicated elements are to be zero'd Y/N (1 bit)\vspace{6pt}
326 \end{itemize}
327 Notes:\vspace{10pt}
328 \begin{itemize}
329 \item Table should be expanded out for high-speed implementations
330 \item Multiple "keys" (and values) theoretically permitted
331 \item RVV rules about deleting higher-indexed CSRs followed
332 \end{itemize}
333 }
334
335
336 \begin{frame}[fragile]
337 \frametitle{Predication key-value CSR table decoding pseudocode}
338
339 \begin{semiverbatim}
340 struct pred fp\_pred[32], int\_pred[32];
341
342 for (i = 0; i < 16; i++) // 16 CSRs?
343 tb = int\_pred if CSRpred[i].type == 0 else fp\_pred
344 idx = CSRpred[i].regkey
345 tb[idx].zero = CSRpred[i].zero
346 tb[idx].inv = CSRpred[i].inv
347 tb[idx].predidx = CSRpred[i].predidx
348 tb[idx].enabled = true
349 \end{semiverbatim}
350
351 \begin{itemize}
352 \item All 32 int and 32 FP entries zero'd before setting
353 \item Might be a bit complex to set up in hardware (TBD)
354 \end{itemize}
355
356 \end{frame}
357
358
359 \begin{frame}[fragile]
360 \frametitle{Get Predication value pseudocode}
361
362 \begin{semiverbatim}
363 def get\_pred\_val(bool is\_fp\_op, int reg):
364 tb = int\_pred if is\_fp\_op else fp\_pred
365 if (!tb[reg].enabled):
366 return ~0x0 // all ops enabled
367 predidx = tb[reg].predidx // redirection occurs HERE
368 predicate = intreg[predidx] // actual predicate HERE
369 if (tb[reg].inv):
370 predicate = ~predicate // invert ALL bits
371 return predicate
372 \end{semiverbatim}
373
374 \begin{itemize}
375 \item References different (internal) mapping table for INT or FP
376 \item Actual predicate bitmask ALWAYS from the INT regfile
377 \item Hard-limit on MVL of XLEN (predication only 1 intreg)
378 \end{itemize}
379
380 \end{frame}
381
382
383 \frame{\frametitle{To Zero or not to place zeros in non-predicated elements?}
384
385 \begin{itemize}
386 \item Zeroing is an implementation optimisation favouring OoO
387 \item Simple implementations may skip non-predicated operations
388 \item Simple implementations explicitly have to destroy data
389 \item Complex implementations may use reg-renames to save power\\
390 Zeroing on predication chains makes optimisation harder
391 \item Compromise: REQUIRE both (specified in predication CSRs).
392 \end{itemize}
393 Considerations:
394 \begin{itemize}
395 \item Complex not really impacted, simple impacted a LOT\\
396 with Zeroing... however it's useful (memzero)
397 \item Non-zero'd overlapping "Vectors" may issue overlapping ops\\
398 (2nd op's predicated elements slot in 1st's non-predicated ops)
399 \item Please don't use Vectors for "security" (use Sec-Ext)
400 \end{itemize}
401 }
402 % with overlapping "vectors" - bearing in mind that "vectors" are
403 % just a remap onto the standard register file, if the top bits of
404 % predication are zero, and there happens to be a second vector
405 % that uses some of the same register file that happens to be
406 % predicated out, the second vector op may be issued *at the same time*
407 % if there are available parallel ALUs to do so.
408
409
410 \frame{\frametitle{Implementation Options}
411
412 \begin{itemize}
413 \item Absolute minimum: Exceptions: if CSRs indicate "V", trap.\\
414 (Requires as absolute minimum that CSRs be in H/W)
415 \item Hardware loop, single-instruction issue\\
416 (Do / Don't send through predication to ALU)
417 \item Hardware loop, parallel (multi-instruction) issue\\
418 (Do / Don't send through predication to ALU)
419 \item Hardware loop, full parallel ALU (not recommended)
420 \end{itemize}
421 Notes:\vspace{4pt}
422 \begin{itemize}
423 \item 4 (or more?) options above may be deployed on per-op basis
424 \item SIMD always sends predication bits through to ALU
425 \item Minimum MVL MUST be sufficient to cover regfile LD/ST
426 \item Instr. FIFO may repeatedly split off N scalar ops at a time
427 \end{itemize}
428 }
429 % Instr. FIFO may need its own slide. Basically, the vectorised op
430 % gets pushed into the FIFO, where it is then "processed". Processing
431 % will remove the first set of ops from its vector numbering (taking
432 % predication into account) and shoving them **BACK** into the FIFO,
433 % but MODIFYING the remaining "vectorised" op, subtracting the now
434 % scalar ops from it.
435
436 \frame{\frametitle{Predicated 8-parallel ADD: 1-wide ALU}
437 \begin{center}
438 \includegraphics[height=2.5in]{padd9_alu1.png}\\
439 {\bf \red Predicated adds are shuffled down: 6 cycles in total}
440 \end{center}
441 }
442
443
444 \frame{\frametitle{Predicated 8-parallel ADD: 4-wide ALU}
445 \begin{center}
446 \includegraphics[height=2.5in]{padd9_alu4.png}\\
447 {\bf \red Predicated adds are shuffled down: 4 in 1st cycle, 2 in 2nd}
448 \end{center}
449 }
450
451
452 \frame{\frametitle{Predicated 8-parallel ADD: 3 phase FIFO expansion}
453 \begin{center}
454 \includegraphics[height=2.5in]{padd9_fifo.png}\\
455 {\bf \red First cycle takes first four 1s; second takes the rest}
456 \end{center}
457 }
458
459
460 \begin{frame}[fragile]
461 \frametitle{ADD pseudocode with redirection (and proper predication)}
462
463 \begin{semiverbatim}
464 function op\_add(rd, rs1, rs2) # add not VADD!
465  int i, id=0, irs1=0, irs2=0;
466  rd = int\_vec[rd ].isvector ? int\_vec[rd ].regidx : rd;
467  rs1 = int\_vec[rs1].isvector ? int\_vec[rs1].regidx : rs1;
468  rs2 = int\_vec[rs2].isvector ? int\_vec[rs2].regidx : rs2;
469  predval = get\_pred\_val(FALSE, rd);
470  for (i = 0; i < VL; i++)
471 if (predval \& 1<<i) # predication uses intregs
472    ireg[rd+id] <= ireg[rs1+irs1] + ireg[rs2+irs2];
473 if (int\_vec[rd ].isvector)  \{ id += 1; \}
474 if (int\_vec[rs1].isvector)  \{ irs1 += 1; \}
475 if (int\_vec[rs2].isvector)  \{ irs2 += 1; \}
476 \end{semiverbatim}
477
478 \begin{itemize}
479 \item SIMD (elwidth != default) not covered above
480 \end{itemize}
481 \end{frame}
482
483
484 \frame{\frametitle{How are SIMD Instructions Vectorised?}
485
486 \begin{itemize}
487 \item SIMD ALU(s) primarily unchanged
488 \item Predication is added down each SIMD element (if requested,
489 otherwise entire block will be predicated as a group)
490 \item Predication bits sent in groups to the ALU (if requested,
491 otherwise just one bit for the entire packed block)
492 \item End of Vector enables (additional) predication:
493 completely nullifies end-case code (ONLY in multi-bit
494 predication mode)
495 \end{itemize}
496 Considerations:\vspace{4pt}
497 \begin{itemize}
498 \item Many SIMD ALUs possible (parallel execution)
499 \item Implementor free to choose (API remains the same)
500 \item Unused ALU units wasted, but s/w DRASTICALLY simpler
501 \item Very long SIMD ALUs could waste significant die area
502 \end{itemize}
503 }
504 % With multiple SIMD ALUs at for example 32-bit wide they can be used
505 % to either issue 64-bit or 128-bit or 256-bit wide SIMD operations
506 % or they can be used to cover several operations on totally different
507 % vectors / registers.
508
509 \frame{\frametitle{Predicated 9-parallel SIMD ADD}
510 \begin{center}
511 \includegraphics[height=2.5in]{padd9_simd.png}\\
512 {\bf \red 4-wide 8-bit SIMD, 4 bits of predicate passed to ALU}
513 \end{center}
514 }
515
516
517 \frame{\frametitle{Why are overlaps allowed in Regfiles?}
518
519 \begin{itemize}
520 \item Same register(s) can have multiple "interpretations"
521 \item Set "real" register (scalar) without needing to set/unset CSRs.
522 \item xBitManip plus SIMD plus xBitManip = Hi/Lo bitops
523 \item (32-bit GREV plus 4x8-bit SIMD plus 32-bit GREV:\\
524 GREV @ VL=N,wid=32; SIMD @ VL=Nx4,wid=8)
525 \item RGB 565 (video): BEXTW plus 4x8-bit SIMD plus BDEPW\\
526 (BEXT/BDEP @ VL=N,wid=32; SIMD @ VL=Nx4,wid=8)
527 \item Same register(s) can be offset (no need for VSLIDE)\vspace{6pt}
528 \end{itemize}
529 Note:
530 \begin{itemize}
531 \item xBitManip reduces O($N^{6}$) SIMD down to O($N^{3}$)
532 \item Hi-Performance: Macro-op fusion (more pipeline stages?)
533 \end{itemize}
534 }
535
536
537 \frame{\frametitle{C.MV extremely flexible!}
538
539 \begin{itemize}
540 \item scalar-to-vector (w/ no pred): VSPLAT
541 \item scalar-to-vector (w/ dest-pred): Sparse VSPLAT
542 \item scalar-to-vector (w/ 1-bit dest-pred): VINSERT
543 \item vector-to-scalar (w/ [1-bit?] src-pred): VEXTRACT
544 \item vector-to-vector (w/ no pred): Vector Copy
545 \item vector-to-vector (w/ src pred): Vector Gather
546 \item vector-to-vector (w/ dest pred): Vector Scatter
547 \item vector-to-vector (w/ src \& dest pred): Vector Gather/Scatter
548 \end{itemize}
549 \vspace{4pt}
550 Notes:
551 \begin{itemize}
552 \item Surprisingly powerful! Zero-predication even more so
553 \item Same arrangement for FVCT, FMV, FSGNJ etc.
554 \end{itemize}
555 }
556
557
558 \begin{frame}[fragile]
559 \frametitle{MV pseudocode with predication}
560
561 \begin{semiverbatim}
562 function op\_mv(rd, rs) # MV not VMV!
563  rd = int\_vec[rd].isvector ? int\_vec[rd].regidx : rd;
564  rs = int\_vec[rs].isvector ? int\_vec[rs].regidx : rs;
565  ps = get\_pred\_val(FALSE, rs); # predication on src
566  pd = get\_pred\_val(FALSE, rd); # ... AND on dest
567  for (int i = 0, int j = 0; i < VL && j < VL;):
568 if (int\_vec[rs].isvec) while (!(ps \& 1<<i)) i++;
569 if (int\_vec[rd].isvec) while (!(pd \& 1<<j)) j++;
570 ireg[rd+j] <= ireg[rs+i];
571 if (int\_vec[rs].isvec) i++;
572 if (int\_vec[rd].isvec) j++;
573 \end{semiverbatim}
574
575 \begin{itemize}
576 \item elwidth != default not covered above (might be a bit hairy)
577 \item Ending early with 1-bit predication not included (VINSERT)
578 \end{itemize}
579 \end{frame}
580
581
582 \begin{frame}[fragile]
583 \frametitle{VSELECT: stays or goes? Stays if MV.X exists...}
584
585 \begin{semiverbatim}
586 def op_mv_x(rd, rs): # (hypothetical) RV MX.X
587 rs = regfile[rs] # level of indirection (MV.X)
588 regfile[rd] = regfile[rs] # straight regcopy
589 \end{semiverbatim}
590
591 Vectorised version aka "VSELECT":
592
593 \begin{semiverbatim}
594 def op_mv_x(rd, rs): # SV version of MX.X
595 for i in range(VL):
596 rs1 = regfile[rs+i] # indirection
597 regfile[rd+i] = regfile[rs] # straight regcopy
598 \end{semiverbatim}
599
600 \begin{itemize}
601 \item However MV.X does not exist in RV, so neither can VSELECT
602 \item \red SV is not about adding new functionality, only parallelism
603 \end{itemize}
604
605
606 \end{frame}
607
608
609 \frame{\frametitle{Opcodes, compared to RVV}
610
611 \begin{itemize}
612 \item All integer and FP opcodes all removed (no CLIP, FNE)
613 \item VMPOP, VFIRST etc. all removed (use xBitManip)
614 \item VSLIDE removed (use regfile overlaps)
615 \item C.MV covers VEXTRACT VINSERT and VSPLAT (and more)
616 \item Vector (or scalar-vector) copy: use C.MV (MV is a pseudo-op)
617 \item VMERGE: twin predicated C.MVs (one inverted. macro-op'd)
618 \item VSETVL, VGETVL stay (the only ops that do!)
619 \end{itemize}
620 Issues:
621 \begin{itemize}
622 \item VSELECT stays? no MV.X, so no (add with custom ext?)
623 \item VSNE exists, but no FNE (use predication inversion?)
624 \item VCLIP is not in RV* (add with custom ext? or CSR?)
625 \end{itemize}
626 }
627
628
629 \begin{frame}[fragile]
630 \frametitle{Example c code: DAXPY}
631
632 \begin{semiverbatim}
633 void daxpy(size_t n, double a,
634 const double x[], double y[])
635 \{
636 for (size_t i = 0; i < n; i++) \{
637 y[i] = a*x[i] + y[i];
638 \}
639 \}
640 \end{semiverbatim}
641
642 \begin{itemize}
643 \item See "SIMD Considered Harmful" for SIMD/RVV analysis\\
644 https://sigarch.org/simd-instructions-considered-harmful/
645 \end{itemize}
646
647
648 \end{frame}
649
650
651 \begin{frame}[fragile]
652 \frametitle{RVV DAXPY assembly (RV32V)}
653
654 \begin{semiverbatim}
655 # a0 is n, a1 is ptr to x[0], a2 is ptr to y[0], fa0 is a
656 li t0, 2<<25
657 vsetdcfg t0 # enable 2 64b Fl.Pt. registers
658 loop:
659 setvl t0, a0 # vl = t0 = min(mvl, n)
660 vld v0, a1 # load vector x
661 slli t1, t0, 3 # t1 = vl * 8 (in bytes)
662 vld v1, a2 # load vector y
663 add a1, a1, t1 # increment pointer to x by vl*8
664 vfmadd v1, v0, fa0, v1 # v1 += v0 * fa0 (y = a * x + y)
665 sub a0, a0, t0 # n -= vl (t0)
666 vst v1, a2 # store Y
667 add a2, a2, t1 # increment pointer to y by vl*8
668 bnez a0, loop # repeat if n != 0
669 \end{semiverbatim}
670 \end{frame}
671
672
673 \begin{frame}[fragile]
674 \frametitle{SV DAXPY assembly (RV64D)}
675
676 \begin{semiverbatim}
677 # a0 is n, a1 is ptr to x[0], a2 is ptr to y[0], fa0 is a
678 CSRvect1 = \{type: F, key: a3, val: a3, elwidth: dflt\}
679 CSRvect2 = \{type: F, key: a7, val: a7, elwidth: dflt\}
680 loop:
681 setvl t0, a0, 4 # vl = t0 = min(4, n)
682 ld a3, a1 # load 4 registers a3-6 from x
683 slli t1, t0, 3 # t1 = vl * 8 (in bytes)
684 ld a7, a2 # load 4 registers a7-10 from y
685 add a1, a1, t1 # increment pointer to x by vl*8
686 fmadd a7, a3, fa0, a7 # v1 += v0 * fa0 (y = a * x + y)
687 sub a0, a0, t0 # n -= vl (t0)
688 st a7, a2 # store 4 registers a7-10 to y
689 add a2, a2, t1 # increment pointer to y by vl*8
690 bnez a0, loop # repeat if n != 0
691 \end{semiverbatim}
692 \end{frame}
693
694
695 \frame{\frametitle{Under consideration}
696
697 \begin{itemize}
698 \item Should future extra bank be included now?
699 \item How many Register and Predication CSRs should there be?\\
700 (and how many in RV32E)
701 \item How many in M-Mode (for doing context-switch)?
702 \item Should use of registers be allowed to "wrap" (x30 x31 x1 x2)?
703 \item Can CLIP be done as a CSR (mode, like elwidth)
704 \item SIMD saturation (etc.) also set as a mode?
705 \item Include src1/src2 predication on Comparison Ops?\\
706 (same arrangement as C.MV, with same flexibility/power)
707 \item 8/16-bit ops is it worthwhile adding a "start offset"? \\
708 (a bit like misaligned addressing... for registers)\\
709 or just use predication to skip start?
710 \end{itemize}
711 }
712
713
714 \frame{\frametitle{What's the downside(s) of SV?}
715 \begin{itemize}
716 \item EVERY register operation is inherently parallelised\\
717 (scalar ops are just vectors of length 1)\vspace{4pt}
718 \item Tightly coupled with the core (instruction issue)\\
719 could be disabled through MISA switch\vspace{4pt}
720 \item An extra pipeline phase almost certainly essential\\
721 for fast low-latency implementations\vspace{4pt}
722 \item With zeroing off, skipping non-predicated elements is hard:\\
723 it is however an optimisation (and could be skipped).\vspace{4pt}
724 \item Setting up the Register/Predication tables (interpreting the\\
725 CSR key-value stores) might be a bit complex to optimise
726 (any change to a CSR key-value entry needs to redo the table)
727 \end{itemize}
728 }
729
730
731 \frame{\frametitle{Summary}
732
733 \begin{itemize}
734 \item Actually about parallelism, not Vectors (or SIMD) per se\\
735 and NOT about adding new ALU/logic/functionality.
736 \item Only needs 2 actual instructions (plus the CSRs).\\
737 RVV - and "standard" SIMD - require ISA duplication
738 \item Designed for flexibility (graded levels of complexity)
739 \item Huge range of implementor freedom
740 \item Fits RISC-V ethos: achieve more with less
741 \item Reduces SIMD ISA proliferation by 3-4 orders of magnitude \\
742 (without SIMD downsides or sacrificing speed trade-off)
743 \item Covers 98\% of RVV, allows RVV to fit "on top"
744 \item Byproduct of SV is a reduction in code size, power usage
745 etc. (increase efficiency, just like Compressed)
746 \end{itemize}
747 }
748
749
750 \frame{
751 \begin{center}
752 {\Huge The end\vspace{20pt}\\
753 Thank you\vspace{20pt}\\
754 Questions?\vspace{20pt}
755 }
756 \end{center}
757
758 \begin{itemize}
759 \item Discussion: ISA-DEV mailing list
760 \item http://libre-riscv.org/simple\_v\_extension/
761 \end{itemize}
762 }
763
764
765 \end{document}