add placeholder pseudocode slides
[libreriscv.git] / simple_v_extension / simple_v_chennai_2018.tex
1 \documentclass[slidestop]{beamer}
2 \usepackage{beamerthemesplit}
3 \usepackage{graphics}
4 \usepackage{pstricks}
5
6 \title{Simple-V RISC-V Extension for Vectorisation and SIMD}
7 \author{Luke Kenneth Casson Leighton}
8
9
10 \begin{document}
11
12 \frame{
13 \begin{center}
14 \huge{Simple-V RISC-V Extension for Vectors and SIMD}\\
15 \vspace{32pt}
16 \Large{Flexible Vectorisation}\\
17 \Large{(aka not so Simple-V?)}\\
18 \vspace{24pt}
19 \Large{[proposed for] Chennai 9th RISC-V Workshop}\\
20 \vspace{24pt}
21 \large{\today}
22 \end{center}
23 }
24
25
26 \frame{\frametitle{Credits and Acknowledgements}
27
28 \begin{itemize}
29 \item The Designers of RISC-V\vspace{15pt}
30 \item The RVV Working Group and contributors\vspace{15pt}
31 \item Allen Baum, Jacob Bachmeyer, Xan Phung, Chuanhua Chang,\\
32 Guy Lemurieux, Jonathan Neuschafer, Roger Brussee,
33 and others\vspace{15pt}
34 \item ISA-Dev Group Members\vspace{10pt}
35 \end{itemize}
36 }
37
38
39 \frame{\frametitle{Quick refresher on SIMD}
40
41 \begin{itemize}
42 \item SIMD very easy to implement (and very seductive)\vspace{10pt}
43 \item Parallelism is in the ALU\vspace{10pt}
44 \item Zero-to-Negligeable impact for rest of core\vspace{10pt}
45 \end{itemize}
46 Where SIMD Goes Wrong:\vspace{10pt}
47 \begin{itemize}
48 \item See "SIMD instructions considered harmful"
49 https://www.sigarch.org/simd-instructions-considered-harmful
50 \item Corner-cases alone are extremely complex.\\
51 Hardware is easy, but software is hell.
52 \item O($N^{6}$) ISA opcode proliferation!\\
53 opcode, elwidth, veclen, src1-src2-dest hi/lo
54 \end{itemize}
55 }
56
57 \frame{\frametitle{Quick refresher on RVV}
58
59 \begin{itemize}
60 \item Extremely powerful (extensible to 256 registers)\vspace{10pt}
61 \item Supports polymorphism, several datatypes (inc. FP16)\vspace{10pt}
62 \item Requires a separate Register File (32 w/ext to 256)\vspace{10pt}
63 \item Implemented as a separate pipeline (no impact on scalar)\vspace{10pt}
64 \end{itemize}
65 However...\vspace{10pt}
66 \begin{itemize}
67 \item 98 percent opcode duplication with rest of RV (CLIP)
68 \item Extending RVV requires customisation not just of h/w:\\
69 gcc and s/w also need customisation (and maintenance)
70 \end{itemize}
71 }
72
73
74 \frame{\frametitle{The Simon Sinek lowdown (Why, How, What)}
75
76 \begin{itemize}
77 \item Why?
78 Implementors need flexibility in vectorisation to optimise for
79 area or performance depending on the scope:
80 embedded DSP, Mobile GPU's, Server CPU's and more.\vspace{4pt}\\
81 Compilers also need flexibility in vectorisation to optimise for cost
82 of pipeline setup, amount of state to context switch
83 and software portability\vspace{4pt}
84 \item How?
85 By implicitly marking INT/FP regs as "Vectorised",\\
86 SV expresses how existing instructions should act
87 on [contiguous] blocks of registers, in parallel.\vspace{4pt}
88 \item What?
89 Simple-V is an "API" that implicitly extends
90 existing (scalar) instructions with explicit parallelisation
91 (i.e. SV is actually about parallelism NOT vectors per se)
92 \end{itemize}
93 }
94
95
96 \frame{\frametitle{What's the value of SV? Why adopt it even in non-V?}
97
98 \begin{itemize}
99 \item memcpy becomes much smaller (higher bang-per-buck)
100 \item context-switch (LOAD/STORE multiple): 1-2 instructions
101 \item Compressed instrs further reduces I-cache (etc.)
102 \item Greatly-reduced I-cache load (and less reads)
103 \item Amazingly, SIMD becomes (more) tolerable\\
104 (corner-cases for setup and teardown are gone)
105 \end{itemize}
106 Note:
107 \begin{itemize}
108 \item It's not just about Vectors: it's about instruction effectiveness
109 \item Anything that makes SIMD tolerable has to be a good thing
110 \item Anything implementor is not interested in HW-optimising,\\
111 let it fall through to exceptions (implement as a trap).
112 \end{itemize}
113 }
114
115
116 \frame{\frametitle{How does Simple-V relate to RVV? What's different?}
117
118 \begin{itemize}
119 \item RVV very heavy-duty (excellent for supercomputing)\vspace{10pt}
120 \item Simple-V abstracts parallelism (based on best of RVV)\vspace{10pt}
121 \item Graded levels: hardware, hybrid or traps (fit impl. need)\vspace{10pt}
122 \item Even Compressed become vectorised (RVV can't)\vspace{10pt}
123 \end{itemize}
124 What Simple-V is not:\vspace{10pt}
125 \begin{itemize}
126 \item A full supercomputer-level Vector Proposal
127 \item A replacement for RVV (SV is designed to be over-ridden\\
128 by - or augmented to become, or just be replaced by - RVV)
129 \end{itemize}
130 }
131
132
133 \frame{\frametitle{How is Parallelism abstracted in Simple-V?}
134
135 \begin{itemize}
136 \item Register "typing" turns any op into an implicit Vector op\vspace{10pt}
137 \item Primarily at the Instruction issue phase (except SIMD)\\
138 Note: it's ok to pass predication through to ALU (like SIMD)
139 \item Standard (and future, and custom) opcodes now parallel\vspace{10pt}
140 \end{itemize}
141 Note: EVERYTHING is parallelised:
142 \begin{itemize}
143 \item All LOAD/STORE (inc. Compressed, Int/FP versions)
144 \item All ALU ops (soft / hybrid / full HW, on per-op basis)
145 \item All branches become predication targets (C.FNE added?)
146 \item C.MV of particular interest (s/v, v/v, v/s)
147 \item FCVT, FMV, FSGNJ etc. very similar to C.MV
148 \end{itemize}
149 }
150
151
152 \frame{\frametitle{Implementation Options}
153
154 \begin{itemize}
155 \item Absolute minimum: Exceptions (if CSRs indicate "V", trap)
156 \item Hardware loop, single-instruction issue\\
157 (Do / Don't send through predication to ALU)
158 \item Hardware loop, parallel (multi-instruction) issue\\
159 (Do / Don't send through predication to ALU)
160 \item Hardware loop, full parallel ALU (not recommended)
161 \end{itemize}
162 Notes:\vspace{6pt}
163 \begin{itemize}
164 \item 4 (or more?) options above may be deployed on per-op basis
165 \item SIMD always sends predication bits through to ALU
166 \item Minimum MVL MUST be sufficient to cover regfile LD/ST
167 \item Instr. FIFO may repeatedly split off N scalar ops at a time
168 \end{itemize}
169 }
170 % Instr. FIFO may need its own slide. Basically, the vectorised op
171 % gets pushed into the FIFO, where it is then "processed". Processing
172 % will remove the first set of ops from its vector numbering (taking
173 % predication into account) and shoving them **BACK** into the FIFO,
174 % but MODIFYING the remaining "vectorised" op, subtracting the now
175 % scalar ops from it.
176
177 \frame{\frametitle{Predicated 8-parallel ADD: 1-wide ALU}
178 \begin{center}
179 \includegraphics[height=2.5in]{padd9_alu1.png}\\
180 {\bf \red Predicated adds are shuffled down: 6 cycles in total}
181 \end{center}
182 }
183
184
185 \frame{\frametitle{Predicated 8-parallel ADD: 4-wide ALU}
186 \begin{center}
187 \includegraphics[height=2.5in]{padd9_alu4.png}\\
188 {\bf \red Predicated adds are shuffled down: 4 in 1st cycle, 2 in 2nd}
189 \end{center}
190 }
191
192
193 \frame{\frametitle{Predicated 8-parallel ADD: 3 phase FIFO expansion}
194 \begin{center}
195 \includegraphics[height=2.5in]{padd9_fifo.png}\\
196 {\bf \red First cycle takes first four 1s; second takes the rest}
197 \end{center}
198 }
199
200
201 \frame{\frametitle{How are SIMD Instructions Vectorised?}
202
203 \begin{itemize}
204 \item SIMD ALU(s) primarily unchanged\vspace{6pt}
205 \item Predication is added to each SIMD element\vspace{6pt}
206 \item Predication bits sent in groups to the ALU\vspace{6pt}
207 \item End of Vector enables (additional) predication\vspace{10pt}
208 \end{itemize}
209 Considerations:\vspace{4pt}
210 \begin{itemize}
211 \item Many SIMD ALUs possible (parallel execution)
212 \item Implementor free to choose (API remains the same)
213 \item Unused ALU units wasted, but s/w DRASTICALLY simpler
214 \item Very long SIMD ALUs could waste significant die area
215 \end{itemize}
216 }
217 % With multiple SIMD ALUs at for example 32-bit wide they can be used
218 % to either issue 64-bit or 128-bit or 256-bit wide SIMD operations
219 % or they can be used to cover several operations on totally different
220 % vectors / registers.
221
222 \frame{\frametitle{Predicated 9-parallel SIMD ADD}
223 \begin{center}
224 \includegraphics[height=2.5in]{padd9_simd.png}\\
225 {\bf \red 4-wide 8-bit SIMD, 4 bits of predicate passed to ALU}
226 \end{center}
227 }
228
229
230 \frame{\frametitle{What's the deal / juice / score?}
231
232 \begin{itemize}
233 \item Standard Register File(s) overloaded with CSR "reg is vector"\\
234 (see pseudocode slides for examples)
235 \item Element width (and type?) concepts remain same as RVV\\
236 (CSRs are used to "interpret" elements in registers)
237 \item CSRs are key-value tables (overlaps allowed)\vspace{10pt}
238 \end{itemize}
239 Key differences from RVV:\vspace{10pt}
240 \begin{itemize}
241 \item Predication in INT regs as a BIT field (max VL=XLEN)
242 \item Minimum VL must be Num Regs - 1 (all regs single LD/ST)
243 \item SV may condense sparse Vecs: RVV lets ALU do predication
244 \item Choice to Zero or skip non-predicated elements
245 \end{itemize}
246 }
247
248
249 \begin{frame}[fragile]
250 \frametitle{ADD pseudocode (or trap, or actual hardware loop)}
251
252 \begin{semiverbatim}
253 function op_add(rd, rs1, rs2, predr) # add not VADD!
254  int i, id=0, irs1=0, irs2=0;
255  for (i = 0; i < VL; i++)
256   if (ireg[predr] & 1<<i) # predication uses intregs
257    ireg[rd+id] <= ireg[rs1+irs1] + ireg[rs2+irs2];
258 if (reg_is_vectorised[rd]) \{ id += 1; \}
259 if (reg_is_vectorised[rs1]) \{ irs1 += 1; \}
260 if (reg_is_vectorised[rs2]) \{ irs2 += 1; \}
261 \end{semiverbatim}
262
263 \begin{itemize}
264 \item SIMD slightly more complex (case above is elwidth = default)
265 \item Scalar-scalar and scalar-vector and vector-vector now all in one
266 \item OoO may choose to push ADDs into instr. queue (v. busy!)
267 \end{itemize}
268 \end{frame}
269
270 % yes it really *is* ADD not VADD. that's the entire point of
271 % this proposal, that *standard* operations are overloaded to
272 % become vectorised-on-demand
273
274
275 \begin{frame}[fragile]
276 \frametitle{Predication-Branch (or trap, or actual hardware loop)}
277
278 \begin{semiverbatim}
279 s1 = reg_is_vectorised(src1);
280 s2 = reg_is_vectorised(src2);
281 if (!s2 && !s1) goto branch;
282 for (int i = 0; i < VL; ++i)
283 if cmp(s1 ? reg[src1+i] : reg[src1],
284 s2 ? reg[src2+i] : reg[src2])
285 preg[rs3] |= 1 << i;
286 \end{semiverbatim}
287
288 \begin{itemize}
289 \item SIMD slightly more complex (case above is elwidth = default)
290 \item If s1 and s2 both scalars, Standard branch occurs
291 \item Predication stored in integer regfile as a bitfield
292 \item Scalar-vector and vector-vector supported
293 \end{itemize}
294 \end{frame}
295
296 \begin{frame}[fragile]
297 \frametitle{VLD/VLD.S/VLD.X (or trap, or actual hardware loop)}
298
299 \begin{semiverbatim}
300 if (unit-strided) stride = elsize;
301 else stride = areg[as2]; // constant-strided
302 for (int i = 0; i < VL; ++i)
303 if (preg_enabled[rd] && ([!]preg[rd] & 1<<i))
304 for (int j = 0; j < seglen+1; j++)
305 if (reg_is_vectorised[rs2]) offs = vreg[rs2+i]
306 else offs = i*(seglen+1)*stride;
307 vreg[rd+j][i] = mem[sreg[base] + offs + j*stride]
308 \end{semiverbatim}
309
310 \begin{itemize}
311 \item Again: elwidth != default slightly more complex
312 \item rs2 vectorised taken to implicitly indicate VLD.X
313 \end{itemize}
314 \end{frame}
315
316
317 \frame{\frametitle{Why are overlaps allowed in Regfiles?}
318
319 \begin{itemize}
320 \item Same register(s) can have multiple "interpretations"
321 \item Set "real" register (scalar) without needing to set/unset CSRs.
322 \item xBitManip plus SIMD plus xBitManip = Hi/Lo bitops
323 \item (32-bit GREV plus 4x8-bit SIMD plus 32-bit GREV:\\
324 GREV @ VL=N,wid=32; SIMD @ VL=Nx4,wid=8)
325 \item RGB 565 (video): BEXTW plus 4x8-bit SIMD plus BDEPW\\
326 (BEXT/BDEP @ VL=N,wid=32; SIMD @ VL=Nx4,wid=8)
327 \item Same register(s) can be offset (no need for VSLIDE)\vspace{6pt}
328 \end{itemize}
329 Note:
330 \begin{itemize}
331 \item xBitManip reduces O($N^{6}$) SIMD down to O($N^{3}$)
332 \item Hi-Performance: Macro-op fusion (more pipeline stages?)
333 \end{itemize}
334 }
335
336
337 \frame{\frametitle{To Zero or not to place zeros in non-predicated elements?}
338
339 \begin{itemize}
340 \item Zeroing is an implementation optimisation favouring OoO
341 \item Simple implementations may skip non-predicated operations
342 \item Simple implementations explicitly have to destroy data
343 \item Complex implementations may use reg-renames to save power\\
344 Zeroing on predication chains makes optimisation harder
345 \item Compromise: REQUIRE both (specified in predication CSRs).
346 \end{itemize}
347 Considerations:
348 \begin{itemize}
349 \item Complex not really impacted, simple impacted a LOT\\
350 with Zeroing... however it's useful (memzero)
351 \item Non-zero'd overlapping "Vectors" may issue overlapping ops\\
352 (2nd op's predicated elements slot in 1st's non-predicated ops)
353 \item Please don't use Vectors for "security" (use Sec-Ext)
354 \end{itemize}
355 }
356 % with overlapping "vectors" - bearing in mind that "vectors" are
357 % just a remap onto the standard register file, if the top bits of
358 % predication are zero, and there happens to be a second vector
359 % that uses some of the same register file that happens to be
360 % predicated out, the second vector op may be issued *at the same time*
361 % if there are available parallel ALUs to do so.
362
363
364 \frame{\frametitle{Predication key-value CSR store}
365
366 \begin{itemize}
367 \item key is int regfile number or FP regfile number (1 bit)\vspace{6pt}
368 \item register to be predicated if referred to (5 bits, key)\vspace{6pt}
369 \item register to store actual predication in (5 bits, value)\vspace{6pt}
370 \item predication is inverted Y/N (1 bit)\vspace{6pt}
371 \item non-predicated elements are to be zero'd Y/N (1 bit)\vspace{6pt}
372 \end{itemize}
373 Notes:\vspace{10pt}
374 \begin{itemize}
375 \item Table should be expanded out for high-speed implementations
376 \item Multiple "keys" (and values) theoretically permitted
377 \item RVV rules about deleting higher-indexed CSRs followed
378 \end{itemize}
379 }
380
381
382 \frame{\frametitle{Predication key-value CSR pseudocode}
383
384 \begin{itemize}
385 \item TODO
386 \end{itemize}
387 }
388
389
390 \frame{\frametitle{Register key-value CSR store}
391
392 \begin{itemize}
393 \item key is int regfile number or FP regfile number (1 bit)\vspace{6pt}
394 \item treated as vector if referred to in op (5 bits, key)\vspace{6pt}
395 \item starting register to actually be used (5 bits, value)\vspace{6pt}
396 \item element bitwidth: default/8/16/32/64/rsvd (3 bits)\vspace{6pt}
397 \item element type: still under consideration\vspace{6pt}
398 \end{itemize}
399 Notes:\vspace{10pt}
400 \begin{itemize}
401 \item Same notes apply (previous slide) as for predication CSR table
402 \item Level of indirection has implications for pipeline latency
403 \end{itemize}
404 }
405
406
407 \frame{\frametitle{Register key-value CSR pseudocode}
408
409 \begin{itemize}
410 \item TODO
411 \end{itemize}
412 }
413
414
415 \frame{\frametitle{C.MV extremely flexible!}
416
417 \begin{itemize}
418 \item scalar-to-vector (w/ no pred): VSPLAT
419 \item scalar-to-vector (w/ dest-pred): Sparse VSPLAT
420 \item scalar-to-vector (w/ 1-bit dest-pred): VINSERT
421 \item vector-to-scalar (w/ [1-bit?] src-pred): VEXTRACT
422 \item vector-to-vector (w/ no pred): Vector Copy
423 \item vector-to-vector (w/ src pred): Vector Gather
424 \item vector-to-vector (w/ dest pred): Vector Scatter
425 \item vector-to-vector (w/ src \& dest pred): Vector Gather/Scatter
426 \end{itemize}
427 \vspace{4pt}
428 Notes:
429 \begin{itemize}
430 \item Surprisingly powerful!
431 \item Same arrangement for FVCT, FMV, FSGNJ etc.
432 \end{itemize}
433 }
434
435
436 \frame{\frametitle{Opcodes, compared to RVV}
437
438 \begin{itemize}
439 \item All integer and FP opcodes all removed (no CLIP!)\vspace{8pt}
440 \item VMPOP, VFIRST etc. all removed (use xBitManip)\vspace{8pt}
441 \item VSLIDE removed (use regfile overlaps)\vspace{8pt}
442 \item C.MV covers VEXTRACT VINSERT and VSPLAT (and more)\vspace{8pt}
443 \item VSETVL, VGETVL, VSELECT stay\vspace{8pt}
444 \item Issue: VCLIP is not in RV* (add with custom ext?)\vspace{8pt}
445 \item Vector (or scalar-vector) use C.MV (MV is a pseudo-op)\vspace{8pt}
446 \item VMERGE: twin predicated C.MVs (one inverted. macro-op'd)\vspace{8pt}
447 \end{itemize}
448 }
449
450
451 \frame{\frametitle{Under consideration}
452
453 \begin{itemize}
454 \item Is C.FNE actually needed? Should it be added if it is?
455 \item Element type implies polymorphism. Should it be in SV?
456 \item Should use of registers be allowed to "wrap" (x30 x31 x1 x2)?
457 \item Is detection of all-scalar ops ok (without slowing pipeline)?
458 \item Can VSELECT be removed? (it's really complex)
459 \item Can CLIP be done as a CSR (mode, like elwidth)
460 \item SIMD saturation (etc.) also set as a mode?
461 \item 8/16-bit ops is it worthwhile adding a "start offset"? \\
462 (a bit like misaligned addressing... for registers)\\
463 or just use predication to skip start?
464 \end{itemize}
465 }
466
467
468 \frame{\frametitle{What's the downside(s) of SV?}
469 \begin{itemize}
470 \item EVERY register operation is inherently parallelised\\
471 (scalar ops are just vectors of length 1)\vspace{8pt}
472 \item An extra pipeline phase is pretty much essential\\
473 for fast low-latency implementations\vspace{8pt}
474 \item Assuming an instruction FIFO, N ops could be taken off\\
475 of a parallel op per cycle (avoids filling entire FIFO;\\
476 also is less work per cycle: lower complexity / latency)\vspace{8pt}
477 \item With zeroing off, skipping non-predicated elements is hard:\\
478 it is however an optimisation (and could be skipped).
479 \end{itemize}
480 }
481
482
483 \frame{\frametitle{Is this OK (low latency)? Detect scalar-ops (only)}
484 \begin{center}
485 \includegraphics[height=2.5in]{scalardetect.png}\\
486 {\bf \red Detect when all registers are scalar for a given op}
487 \end{center}
488 }
489
490
491 \frame{\frametitle{TODO (break into separate slides)}
492
493 \begin{itemize}
494 \item Then explain why this proposal is a good way to \\
495 abstract parallelism\\
496 (hopefully also explaining how \\
497 a good compiler can make clever use of this increase parallelism\\
498 Then explain how this can be implemented (at instruction\\
499 issue time???) with\\
500 implementation options, and what these "cost".\\
501 Finally give examples that show simple usage that compares\\
502 C code\\
503 RVIC\\
504 RVV\\
505 RVICXsimplev
506 \end{itemize}
507 }
508
509
510 \frame{\frametitle{Summary}
511
512 \begin{itemize}
513 \item Actually about parallelism, not Vectors (or SIMD) per se
514 \item Designed for flexibility (graded levels of complexity)
515 \item Huge range of implementor freedom
516 \item Fits RISC-V ethos: achieve more with less
517 \item Reduces SIMD ISA proliferation by 3-4 orders of magnitude \\
518 (without SIMD downsides or sacrificing speed trade-off)
519 \item Covers 98\% of RVV, allows RVV to fit "on top"
520 \item Not designed for supercomputing (that's RVV), designed for
521 in between: DSPs, RV32E, Embedded 3D GPUs etc.
522 \item Not specifically designed for Vectorisation: designed to\\
523 reduce code size (increase efficiency, just
524 like Compressed)
525 \end{itemize}
526 }
527
528
529 \frame{\frametitle{slide}
530
531 \begin{itemize}
532 \item \vspace{10pt}
533 \end{itemize}
534 Considerations:\vspace{10pt}
535 \begin{itemize}
536 \item \vspace{10pt}
537 \end{itemize}
538 }
539
540
541 \frame{
542 \begin{center}
543 {\Huge \red The end\vspace{20pt}\\
544 Thank you}
545 \end{center}
546 }
547
548
549 \end{document}