update
[libreriscv.git] / simple_v_extension / simple_v_chennai_2018.tex
1 \documentclass[slidestop]{beamer}
2 \usepackage{beamerthemesplit}
3 \usepackage{graphics}
4 \usepackage{pstricks}
5
6 \title{Simple-V RISC-V Extension for Vectorisation and SIMD}
7 \author{Luke Kenneth Casson Leighton}
8
9
10 \begin{document}
11
12 \frame{
13 \begin{center}
14 \huge{Simple-V RISC-V Extension for Vectors and SIMD}\\
15 \vspace{32pt}
16 \Large{Flexible Vectorisation}\\
17 \Large{(aka not so Simple-V?)}\\
18 \vspace{24pt}
19 \Large{[proposed for] Chennai 9th RISC-V Workshop}\\
20 \vspace{24pt}
21 \large{\today}
22 \end{center}
23 }
24
25
26 \frame{\frametitle{Credits and Acknowledgements}
27
28 \begin{itemize}
29 \item The Designers of RISC-V\vspace{15pt}
30 \item The RVV Working Group and contributors\vspace{15pt}
31 \item Allen Baum, Jacob Bachmeyer, Xan Phung, Chuanhua Chang,\\
32 Guy Lemurieux, Jonathan Neuschafer, Roger Brussee,
33 and others\vspace{15pt}
34 \item ISA-Dev Group Members\vspace{10pt}
35 \end{itemize}
36 }
37
38
39 \frame{\frametitle{Quick refresher on SIMD}
40
41 \begin{itemize}
42 \item SIMD very easy to implement (and very seductive)\vspace{10pt}
43 \item Parallelism is in the ALU\vspace{10pt}
44 \item Zero-to-Negligeable impact for rest of core\vspace{10pt}
45 \end{itemize}
46 Where SIMD Goes Wrong:\vspace{10pt}
47 \begin{itemize}
48 \item See "SIMD instructions considered harmful"
49 https://www.sigarch.org/simd-instructions-considered-harmful
50 \item Corner-cases alone are extremely complex.\\
51 Hardware is easy, but software is hell.
52 \item O($N^{6}$) ISA opcode proliferation!\\
53 opcode, elwidth, veclen, src1-src2-dest hi/lo
54 \end{itemize}
55 }
56
57 \frame{\frametitle{Quick refresher on RVV}
58
59 \begin{itemize}
60 \item Extremely powerful (extensible to 256 registers)\vspace{10pt}
61 \item Supports polymorphism, several datatypes (inc. FP16)\vspace{10pt}
62 \item Requires a separate Register File (32 w/ext to 256)\vspace{10pt}
63 \item Implemented as a separate pipeline (no impact on scalar)\vspace{10pt}
64 \end{itemize}
65 However...\vspace{10pt}
66 \begin{itemize}
67 \item 98 percent opcode duplication with rest of RV (CLIP)
68 \item Extending RVV requires customisation not just of h/w:\\
69 gcc and s/w also need customisation (and maintenance)
70 \end{itemize}
71 }
72
73
74 \frame{\frametitle{The Simon Sinek lowdown (Why, How, What)}
75
76 \begin{itemize}
77 \item Why?
78 Implementors need flexibility in vectorisation to optimise for
79 area or performance depending on the scope:
80 embedded DSP, Mobile GPU's, Server CPU's and more.\vspace{4pt}\\
81 Compilers also need flexibility in vectorisation to optimise for cost
82 of pipeline setup, amount of state to context switch
83 and software portability\vspace{4pt}
84 \item How?
85 By implicitly marking INT/FP regs as "Vectorised",\\
86 SV expresses how existing instructions should act
87 on [contiguous] blocks of registers, in parallel.\vspace{4pt}
88 \item What?
89 Simple-V is an "API" that implicitly extends
90 existing (scalar) instructions with explicit parallelisation
91 (i.e. SV is actually about parallelism NOT vectors per se)
92 \end{itemize}
93 }
94
95
96 \frame{\frametitle{What's the value of SV? Why adopt it even in non-V?}
97
98 \begin{itemize}
99 \item memcpy becomes much smaller (higher bang-per-buck)\vspace{10pt}
100 \item context-switch (LOAD/STORE multiple): 1-2 instructions\vspace{10pt}
101 \item Compressed instrs further reduces I-cache (etc.)\vspace{10pt}
102 \item greatly-reduced I-cache load (and less reads)\vspace{10pt}
103 \end{itemize}
104 Note:\vspace{10pt}
105 \begin{itemize}
106 \item It's not just about Vectors: it's about instruction effectiveness
107 \item Anything implementor is not interested in HW-optimising,\\
108 let it fall through to exceptions (implement as a trap).
109 \end{itemize}
110 }
111
112
113 \frame{\frametitle{How does Simple-V relate to RVV? What's different?}
114
115 \begin{itemize}
116 \item RVV very heavy-duty (excellent for supercomputing)\vspace{10pt}
117 \item Simple-V abstracts parallelism (based on best of RVV)\vspace{10pt}
118 \item Graded levels: hardware, hybrid or traps (fit impl. need)\vspace{10pt}
119 \item Even Compressed become vectorised (RVV can't)\vspace{10pt}
120 \end{itemize}
121 What Simple-V is not:\vspace{10pt}
122 \begin{itemize}
123 \item A full supercomputer-level Vector Proposal
124 \item A replacement for RVV (SV is designed to be over-ridden\\
125 by - or augmented to become, or just be replaced by - RVV)
126 \end{itemize}
127 }
128
129
130 \frame{\frametitle{How is Parallelism abstracted in Simple-V?}
131
132 \begin{itemize}
133 \item Register "typing" turns any op into an implicit Vector op\vspace{10pt}
134 \item Primarily at the Instruction issue phase (except SIMD)\\
135 Note: it's ok to pass predication through to ALU (like SIMD)
136 \item Standard (and future, and custom) opcodes now parallel\vspace{10pt}
137 \end{itemize}
138 Notes:\vspace{6pt}
139 \begin{itemize}
140 \item All LOAD/STORE (inc. Compressed, Int/FP versions)
141 \item All ALU ops (soft / hybrid / full HW, on per-op basis)
142 \item All branches become predication targets (C.FNE added)
143 \item C.MV of particular interest (s/v, v/v, v/s)
144 \end{itemize}
145 }
146
147
148 \frame{\frametitle{Implementation Options}
149
150 \begin{itemize}
151 \item Absolute minimum: Exceptions (if CSRs indicate "V", trap)
152 \item Hardware loop, single-instruction issue\\
153 (Do / Don't send through predication to ALU)
154 \item Hardware loop, parallel (multi-instruction) issue\\
155 (Do / Don't send through predication to ALU)
156 \item Hardware loop, full parallel ALU (not recommended)
157 \end{itemize}
158 Notes:\vspace{6pt}
159 \begin{itemize}
160 \item 4 (or more?) options above may be deployed on per-op basis
161 \item SIMD always sends predication bits through to ALU
162 \item Minimum MVL MUST be sufficient to cover regfile LD/ST
163 \item Instr. FIFO may repeatedly split off N scalar ops at a time
164 \end{itemize}
165 }
166 % Instr. FIFO may need its own slide. Basically, the vectorised op
167 % gets pushed into the FIFO, where it is then "processed". Processing
168 % will remove the first set of ops from its vector numbering (taking
169 % predication into account) and shoving them **BACK** into the FIFO,
170 % but MODIFYING the remaining "vectorised" op, subtracting the now
171 % scalar ops from it.
172
173 \frame{\frametitle{How are SIMD Instructions Vectorised?}
174
175 \begin{itemize}
176 \item SIMD ALU(s) primarily unchanged\vspace{10pt}
177 \item Predication is added to each SIMD element (NO ZEROING!)\vspace{10pt}
178 \item End of Vector enables predication (NO ZEROING!)\vspace{10pt}
179 \end{itemize}
180 Considerations:\vspace{10pt}
181 \begin{itemize}
182 \item Many SIMD ALUs possible (parallel execution)\vspace{10pt}
183 \item Very long SIMD ALUs could waste die area (short vectors)\vspace{10pt}
184 \item Implementor free to choose (API remains the same)\vspace{10pt}
185 \end{itemize}
186 }
187 % With multiple SIMD ALUs at for example 32-bit wide they can be used
188 % to either issue 64-bit or 128-bit or 256-bit wide SIMD operations
189 % or they can be used to cover several operations on totally different
190 % vectors / registers.
191
192 \frame{\frametitle{What's the deal / juice / score?}
193
194 \begin{itemize}
195 \item Standard Register File(s) overloaded with CSR "vector span"\\
196 (see pseudocode slides for examples)
197 \item Element width and type concepts remain same as RVV\\
198 (CSRs are used to "interpret" elements in registers)
199 \item CSRs are key-value tables (overlaps allowed)\vspace{10pt}
200 \end{itemize}
201 Key differences from RVV:\vspace{10pt}
202 \begin{itemize}
203 \item Predication in INT regs as a BIT field (max VL=XLEN)
204 \item Minimum VL must be Num Regs - 1 (all regs single LD/ST)
205 \item SV may condense sparse Vecs: RVV lets ALU do predication
206 \item NO ZEROING: non-predicated elements are skipped
207 \end{itemize}
208 }
209
210
211 \begin{frame}[fragile]
212 \frametitle{ADD pseudocode (or trap, or actual hardware loop)}
213
214 \begin{semiverbatim}
215 function op_add(rd, rs1, rs2, predr) # add not VADD!
216  int i, id=0, irs1=0, irs2=0;
217  for (i = 0; i < VL; i++)
218   if (ireg[predr] & 1<<i) # predication uses intregs
219    ireg[rd+id] <= ireg[rs1+irs1] + ireg[rs2+irs2];
220 if (reg_is_vectorised[rd]) \{ id += 1; \}
221 if (reg_is_vectorised[rs1]) \{ irs1 += 1; \}
222 if (reg_is_vectorised[rs2]) \{ irs2 += 1; \}
223 \end{semiverbatim}
224
225 \begin{itemize}
226 \item SIMD slightly more complex (case above is elwidth = default)
227 \item Scalar-scalar and scalar-vector and vector-vector now all in one
228 \item OoO may choose to push ADDs into instr. queue (v. busy!)
229 \end{itemize}
230 \end{frame}
231
232 % yes it really *is* ADD not VADD. that's the entire point of
233 % this proposal, that *standard* operations are overloaded to
234 % become vectorised-on-demand
235
236
237 \begin{frame}[fragile]
238 \frametitle{Predication-Branch (or trap, or actual hardware loop)}
239
240 \begin{semiverbatim}
241 s1 = reg_is_vectorised(src1);
242 s2 = reg_is_vectorised(src2);
243 if (!s2 && !s1) goto branch;
244 for (int i = 0; i < VL; ++i)
245 if cmp(s1 ? reg[src1+i] : reg[src1],
246 s2 ? reg[src2+i] : reg[src2])
247 preg[rs3] |= 1 << i;
248 \end{semiverbatim}
249
250 \begin{itemize}
251 \item SIMD slightly more complex (case above is elwidth = default)
252 \item If s1 and s2 both scalars, Standard branch occurs
253 \item Predication stored in integer regfile as a bitfield
254 \item Scalar-vector and vector-vector supported
255 \end{itemize}
256 \end{frame}
257
258 \begin{frame}[fragile]
259 \frametitle{VLD/VLD.S/VLD.X (or trap, or actual hardware loop)}
260
261 \begin{semiverbatim}
262 if (unit-strided) stride = elsize;
263 else stride = areg[as2]; // constant-strided
264 for (int i = 0; i < VL; ++i)
265 if (preg_enabled[rd] && ([!]preg[rd] & 1<<i))
266 for (int j = 0; j < seglen+1; j++)
267 if (reg_is_vectorised[rs2]) offs = vreg[rs2][i]
268 else offs = i*(seglen+1)*stride;
269 vreg[rd+j][i] = mem[sreg[base] + offs + j*stride]
270 \end{semiverbatim}
271
272 \begin{itemize}
273 \item Again: elwidth != default slightly more complex
274 \item rs2 vectorised taken to implicitly indicate VLD.X
275 \end{itemize}
276 \end{frame}
277
278
279 \frame{\frametitle{Why are overlaps allowed in Regfiles?}
280
281 \begin{itemize}
282 \item Same register(s) can have multiple "interpretations"\vspace{6pt}
283 \item xBitManip plus SIMD plus xBitManip = Hi/Lo bitops\vspace{6pt}
284 \item (32-bit GREV plus 4x8-bit SIMD plus 32-bit GREV)\vspace{6pt}
285 \item RGB 565 (video): BEXTW plus 4x8-bit SIMD plus BDEPW\vspace{6pt}
286 \item Same register(s) can be offset (no need for VSLIDE)\vspace{6pt}
287 \end{itemize}
288 Note:\vspace{10pt}
289 \begin{itemize}
290 \item xBitManip reduces O($N^{6}$) SIMD down to O($N^{3}$)
291 \item Hi-Performance: Macro-op fusion (more pipeline stages?)
292 \end{itemize}
293 }
294
295
296 \frame{\frametitle{Why no Zeroing (place zeros in non-predicated elements)?}
297
298 \begin{itemize}
299 \item Zeroing is an implementation optimisation favouring OoO\vspace{8pt}
300 \item Simple implementations may skip non-predicated operations\vspace{8pt}
301 \item Simple implementations explicitly have to destroy data\vspace{8pt}
302 \item Complex implementations may use reg-renames to save power\\
303 Zeroing on predication chains makes optimisation harder
304 \end{itemize}
305 Considerations:\vspace{10pt}
306 \begin{itemize}
307 \item Complex not really impacted, Simple impacted a LOT
308 \item Overlapping "Vectors" may issue overlapping ops
309 \item Please don't use Vectors for "security" (use Sec-Ext)
310 \end{itemize}
311 }
312 % with overlapping "vectors" - bearing in mind that "vectors" are
313 % just a remap onto the standard register file, if the top bits of
314 % predication are zero, and there happens to be a second vector
315 % that uses some of the same register file that happens to be
316 % predicated out, the second vector op may be issued *at the same time*
317 % if there are available parallel ALUs to do so.
318
319
320 \frame{\frametitle{Predication key-value CSR store}
321
322 \begin{itemize}
323 \item key is int regfile number or FP regfile number (1 bit)\vspace{6pt}
324 \item register to be predicated if referred to (5 bits, key)\vspace{6pt}
325 \item register to store actual predication in (5 bits, value)\vspace{6pt}
326 \item predication is inverted (1 bit)\vspace{6pt}
327 \item non-predicated elements are to be zero'd (1 bit)\vspace{6pt}
328 \end{itemize}
329 Notes:\vspace{10pt}
330 \begin{itemize}
331 \item Table should be expanded out for high-speed implementations
332 \item Multiple "keys" (and values) theoretically permitted
333 \item RVV rules about deleting higher-indexed CSRs followed
334 \end{itemize}
335 }
336
337
338 \frame{\frametitle{Register key-value CSR store}
339
340 \begin{itemize}
341 \item key is int regfile number or FP regfile number (1 bit)\vspace{6pt}
342 \item treated as vector if referred to in op (5 bits, key)\vspace{6pt}
343 \item starting register to actually be used (5 bits, value)\vspace{6pt}
344 \item element bitwidth: default/8/16/32/64/rsvd (3 bits)\vspace{6pt}
345 \item element type: still under consideration\vspace{6pt}
346 \end{itemize}
347 Notes:\vspace{10pt}
348 \begin{itemize}
349 \item Same notes apply (previous slide) as for predication CSR table
350 \item Level of indirection has implications for pipeline latency
351 \end{itemize}
352 }
353
354
355 \frame{\frametitle{C.MV extremely flexible!}
356
357 \begin{itemize}
358 \item scalar-to-vector (w/no pred): VSPLAT
359 \item scalar-to-vector (w/dest-pred): Sparse VSPLAT
360 \item scalar-to-vector (w/single dest-pred): VINSERT
361 \item vector-to-scalar (w/src-pred): VEXTRACT
362 \item vector-to-vector (w/no pred): Vector Copy
363 \item vector-to-vector (w/src xor dest pred): Sparse Vector Copy
364 \item vector-to-vector (w/src and dest pred): Vector Gather/Scatter
365 \end{itemize}
366 \vspace{8pt}
367 Notes:\vspace{10pt}
368 \begin{itemize}
369 \item Really powerful!
370 \item Any other options?
371 \end{itemize}
372 }
373
374
375 \frame{\frametitle{Opcodes, compared to RVV}
376
377 \begin{itemize}
378 \item All integer and FP opcodes all removed (no CLIP!)\vspace{8pt}
379 \item VMPOP, VFIRST etc. all removed (use xBitManip)\vspace{8pt}
380 \item VSLIDE removed (use regfile overlaps)\vspace{8pt}
381 \item C.MV covers VEXTRACT VINSERT and VSPLAT (and more)\vspace{8pt}
382 \item VSETVL, VGETVL, VSELECT stay\vspace{8pt}
383 \item Issue: VCLIP is not in RV* (add with custom ext?)\vspace{8pt}
384 \item Vector (or scalar-vector) use C.MV (MV is a pseudo-op)\vspace{8pt}
385 \item VMERGE: twin predicated C.MVs (one inverted. macro-op'd)\vspace{8pt}
386 \end{itemize}
387 }
388
389
390 \frame{\frametitle{Under consideration}
391
392 \begin{itemize}
393 \item Is C.FNE actually needed? Should it be added if it is?
394 \item Element type implies polymorphism. Should it be in SV?
395 \item Should use of registers be allowed to "wrap" (x30 x31 x1 x2)?
396 \item Is detection of all-scalar ops ok (without slowing pipeline)?
397 \item Can VSELECT be removed? (it's really complex)
398 \item Can CLIP be done as a CSR (mode, like elwidth)
399 \item SIMD saturation (etc.) also set as a mode?
400 \item C.MV src predication no different from dest predication\\
401 What to do? Make one have different meaning?
402 \item 8/16-bit ops is it worthwhile adding a "start offset"? \\
403 (a bit like misaligned addressing... for registers)\\
404 or just use predication to skip start?
405 \end{itemize}
406 }
407
408
409 \frame{\frametitle{What's the downside(s) of SV?}
410 \begin{itemize}
411 \item EVERY register operation is inherently parallelised\\
412 (scalar ops are just vectors of length 1)
413 \item An extra pipeline phase is pretty much essential\\
414 for fast low-latency implementations
415 \item Assuming an instruction FIFO, N ops could be taken off\\
416 of a parallel op per cycle (avoids filling entire FIFO;\\
417 also is less work per cycle: lower complexity / latency)
418 \item With zeroing off, skipping non-predicated elements is hard:\\
419 it is however an optimisation (and could be skipped).
420 \end{itemize}
421 }
422
423
424 \frame{\frametitle{Is this OK (low latency)? Detect scalar-ops (only)}
425 \begin{center}
426 \includegraphics[height=2.5in]{scalardetect.png}\\
427 {\bf \red Detect when all registers are scalar for a given op}
428 \end{center}
429 }
430
431
432 \frame{\frametitle{TODO (break into separate slides)}
433
434 \begin{itemize}
435 \item Then explain why this proposal is a good way to \\
436 abstract parallelism\\
437 (hopefully also explaining how \\
438 a good compiler can make clever use of this increase parallelism\\
439 Then explain how this can be implemented (at instruction\\
440 issue time???) with\\
441 implementation options, and what these "cost".\\
442 Finally give examples that show simple usage that compares\\
443 C code\\
444 RVIC\\
445 RVV\\
446 RVICXsimplev
447 \end{itemize}
448 }
449
450
451 \frame{\frametitle{Summary}
452
453 \begin{itemize}
454 \item Designed for flexibility (graded levels of complexity)\vspace{6pt}
455 \item Huge range of implementor freedom\vspace{6pt}
456 \item Fits RISC-V ethos: achieve more with less\vspace{6pt}
457 \item Reduces SIMD ISA proliferation by 3-4 orders of magnitude \\
458 (without SIMD downsides or sacrificing speed trade-off)\vspace{6pt}
459 \item Covers 98\% of RVV, allows RVV to fit "on top"\vspace{6pt}
460 \item Not designed for supercomputing (that's RVV), designed for
461 in between: DSPs, RV32E, Embedded 3D GPUs etc.\vspace{6pt}
462 \item Not specifically designed for Vectorisation: designed to\\
463 reduce code size (increase efficiency, just
464 like Compressed)\vspace{6pt}
465 \end{itemize}
466 }
467
468
469 \frame{\frametitle{slide}
470
471 \begin{itemize}
472 \item \vspace{10pt}
473 \end{itemize}
474 Considerations:\vspace{10pt}
475 \begin{itemize}
476 \item \vspace{10pt}
477 \end{itemize}
478 }
479
480
481 \frame{
482 \begin{center}
483 {\Huge \red The end\vspace{20pt}\\
484 Thank you}
485 \end{center}
486 }
487
488
489 \end{document}