a2c45ce6f334b6fb5d3ff757872f9608c2503f0d
[libreriscv.git] / simple_v_extension / simple_v_chennai_2018.tex
1 \documentclass[slidestop]{beamer}
2 \usepackage{beamerthemesplit}
3 \usepackage{graphics}
4 \usepackage{pstricks}
5
6 \title{Simple-V RISC-V Extension for Vectorisation and SIMD}
7 \author{Luke Kenneth Casson Leighton}
8
9
10 \begin{document}
11
12 \frame{
13 \begin{center}
14 \huge{Simple-V RISC-V Extension for Vectors and SIMD}\\
15 \vspace{32pt}
16 \Large{Flexible Vectorisation}\\
17 \Large{(aka not so Simple-V?)}\\
18 \vspace{24pt}
19 \Large{[proposed for] Chennai 9th RISC-V Workshop}\\
20 \vspace{24pt}
21 \large{\today}
22 \end{center}
23 }
24
25
26 \frame{\frametitle{Credits and Acknowledgements}
27
28 \begin{itemize}
29 \item The Designers of RISC-V\vspace{15pt}
30 \item The RVV Working Group and contributors\vspace{15pt}
31 \item Allen Baum, Jacob Bachmeyer, Xan Phung, Chuanhua Chang,\\
32 Guy Lemurieux, Jonathan Neuschafer, Roger Brussee,
33 and others\vspace{15pt}
34 \item ISA-Dev Group Members\vspace{10pt}
35 \end{itemize}
36 }
37
38
39 \frame{\frametitle{Quick refresher on SIMD}
40
41 \begin{itemize}
42 \item SIMD very easy to implement (and very seductive)\vspace{10pt}
43 \item Parallelism is in the ALU\vspace{10pt}
44 \item Zero-to-Negligeable impact for rest of core\vspace{10pt}
45 \end{itemize}
46 Where SIMD Goes Wrong:\vspace{10pt}
47 \begin{itemize}
48 \item See "SIMD instructions considered harmful"
49 https://www.sigarch.org/simd-instructions-considered-harmful
50 \item Corner-cases alone are extremely complex.\\
51 Hardware is easy, but software is hell.
52 \item O($N^{6}$) ISA opcode proliferation!\\
53 opcode, elwidth, veclen, src1-src2-dest hi/lo
54 \end{itemize}
55 }
56
57 \frame{\frametitle{Quick refresher on RVV}
58
59 \begin{itemize}
60 \item Extremely powerful (extensible to 256 registers)\vspace{10pt}
61 \item Supports polymorphism, several datatypes (inc. FP16)\vspace{10pt}
62 \item Requires a separate Register File (32 w/ext to 256)\vspace{10pt}
63 \item Implemented as a separate pipeline (no impact on scalar)\vspace{10pt}
64 \end{itemize}
65 However...\vspace{10pt}
66 \begin{itemize}
67 \item 98 percent opcode duplication with rest of RV (CLIP)
68 \item Extending RVV requires customisation not just of h/w:\\
69 gcc and s/w also need customisation (and maintenance)
70 \end{itemize}
71 }
72
73
74 \frame{\frametitle{The Simon Sinek lowdown (Why, How, What)}
75
76 \begin{itemize}
77 \item Why?
78 Implementors need flexibility in vectorisation to optimise for
79 area or performance depending on the scope:
80 embedded DSP, Mobile GPU's, Server CPU's and more.\vspace{4pt}\\
81 Compilers also need flexibility in vectorisation to optimise for cost
82 of pipeline setup, amount of state to context switch
83 and software portability\vspace{4pt}
84 \item How?
85 By marking INT/FP regs as "Vectorised" and
86 adding a level of indirection,
87 SV expresses how existing instructions should act
88 on [contiguous] blocks of registers, in parallel.\vspace{4pt}
89 \item What?
90 Simple-V is an "API" that implicitly extends
91 existing (scalar) instructions with explicit parallelisation
92 (i.e. SV is actually about parallelism NOT vectors per se)
93 \end{itemize}
94 }
95
96
97 \frame{\frametitle{What's the value of SV? Why adopt it even in non-V?}
98
99 \begin{itemize}
100 \item memcpy becomes much smaller (higher bang-per-buck)
101 \item context-switch (LOAD/STORE multiple): 1-2 instructions
102 \item Compressed instrs further reduces I-cache (etc.)
103 \item Greatly-reduced I-cache load (and less reads)
104 \item Amazingly, SIMD becomes (more) tolerable\\
105 (corner-cases for setup and teardown are gone)
106 \end{itemize}
107 Note:
108 \begin{itemize}
109 \item It's not just about Vectors: it's about instruction effectiveness
110 \item Anything that makes SIMD tolerable has to be a good thing
111 \item Anything implementor is not interested in HW-optimising,\\
112 let it fall through to exceptions (implement as a trap).
113 \end{itemize}
114 }
115
116
117 \frame{\frametitle{How does Simple-V relate to RVV? What's different?}
118
119 \begin{itemize}
120 \item RVV very heavy-duty (excellent for supercomputing)\vspace{10pt}
121 \item Simple-V abstracts parallelism (based on best of RVV)\vspace{10pt}
122 \item Graded levels: hardware, hybrid or traps (fit impl. need)\vspace{10pt}
123 \item Even Compressed become vectorised (RVV can't)\vspace{10pt}
124 \end{itemize}
125 What Simple-V is not:\vspace{10pt}
126 \begin{itemize}
127 \item A full supercomputer-level Vector Proposal
128 \item A replacement for RVV (SV is designed to be over-ridden\\
129 by - or augmented to become, or just be replaced by - RVV)
130 \end{itemize}
131 }
132
133
134 \frame{\frametitle{How is Parallelism abstracted in Simple-V?}
135
136 \begin{itemize}
137 \item Register "typing" turns any op into an implicit Vector op:\\
138 registers are reinterpreted through a level of indirection
139 \item Primarily at the Instruction issue phase (except SIMD)\\
140 Note: it's ok to pass predication through to ALU (like SIMD)
141 \item Standard (and future, and custom) opcodes now parallel\vspace{10pt}
142 \end{itemize}
143 Note: EVERYTHING is parallelised:
144 \begin{itemize}
145 \item All LOAD/STORE (inc. Compressed, Int/FP versions)
146 \item All ALU ops (soft / hybrid / full HW, on per-op basis)
147 \item All branches become predication targets (C.FNE added?)
148 \item C.MV of particular interest (s/v, v/v, v/s)
149 \item FCVT, FMV, FSGNJ etc. very similar to C.MV
150 \end{itemize}
151 }
152
153
154 \frame{\frametitle{Implementation Options}
155
156 \begin{itemize}
157 \item Absolute minimum: Exceptions (if CSRs indicate "V", trap)
158 \item Hardware loop, single-instruction issue\\
159 (Do / Don't send through predication to ALU)
160 \item Hardware loop, parallel (multi-instruction) issue\\
161 (Do / Don't send through predication to ALU)
162 \item Hardware loop, full parallel ALU (not recommended)
163 \end{itemize}
164 Notes:\vspace{6pt}
165 \begin{itemize}
166 \item 4 (or more?) options above may be deployed on per-op basis
167 \item SIMD always sends predication bits through to ALU
168 \item Minimum MVL MUST be sufficient to cover regfile LD/ST
169 \item Instr. FIFO may repeatedly split off N scalar ops at a time
170 \end{itemize}
171 }
172 % Instr. FIFO may need its own slide. Basically, the vectorised op
173 % gets pushed into the FIFO, where it is then "processed". Processing
174 % will remove the first set of ops from its vector numbering (taking
175 % predication into account) and shoving them **BACK** into the FIFO,
176 % but MODIFYING the remaining "vectorised" op, subtracting the now
177 % scalar ops from it.
178
179 \frame{\frametitle{Predicated 8-parallel ADD: 1-wide ALU}
180 \begin{center}
181 \includegraphics[height=2.5in]{padd9_alu1.png}\\
182 {\bf \red Predicated adds are shuffled down: 6 cycles in total}
183 \end{center}
184 }
185
186
187 \frame{\frametitle{Predicated 8-parallel ADD: 4-wide ALU}
188 \begin{center}
189 \includegraphics[height=2.5in]{padd9_alu4.png}\\
190 {\bf \red Predicated adds are shuffled down: 4 in 1st cycle, 2 in 2nd}
191 \end{center}
192 }
193
194
195 \frame{\frametitle{Predicated 8-parallel ADD: 3 phase FIFO expansion}
196 \begin{center}
197 \includegraphics[height=2.5in]{padd9_fifo.png}\\
198 {\bf \red First cycle takes first four 1s; second takes the rest}
199 \end{center}
200 }
201
202
203 \frame{\frametitle{How are SIMD Instructions Vectorised?}
204
205 \begin{itemize}
206 \item SIMD ALU(s) primarily unchanged\vspace{6pt}
207 \item Predication is added to each SIMD element\vspace{6pt}
208 \item Predication bits sent in groups to the ALU\vspace{6pt}
209 \item End of Vector enables (additional) predication\vspace{10pt}
210 \end{itemize}
211 Considerations:\vspace{4pt}
212 \begin{itemize}
213 \item Many SIMD ALUs possible (parallel execution)
214 \item Implementor free to choose (API remains the same)
215 \item Unused ALU units wasted, but s/w DRASTICALLY simpler
216 \item Very long SIMD ALUs could waste significant die area
217 \end{itemize}
218 }
219 % With multiple SIMD ALUs at for example 32-bit wide they can be used
220 % to either issue 64-bit or 128-bit or 256-bit wide SIMD operations
221 % or they can be used to cover several operations on totally different
222 % vectors / registers.
223
224 \frame{\frametitle{Predicated 9-parallel SIMD ADD}
225 \begin{center}
226 \includegraphics[height=2.5in]{padd9_simd.png}\\
227 {\bf \red 4-wide 8-bit SIMD, 4 bits of predicate passed to ALU}
228 \end{center}
229 }
230
231
232 \frame{\frametitle{What's the deal / juice / score?}
233
234 \begin{itemize}
235 \item Standard Register File(s) overloaded with CSR "reg is vector"\\
236 (see pseudocode slides for examples)
237 \item Element width (and type?) concepts remain same as RVV\\
238 (CSRs are used to "interpret" elements in registers)
239 \item CSRs are key-value tables (overlaps allowed)\vspace{10pt}
240 \end{itemize}
241 Key differences from RVV:\vspace{10pt}
242 \begin{itemize}
243 \item Predication in INT regs as a BIT field (max VL=XLEN)
244 \item Minimum VL must be Num Regs - 1 (all regs single LD/ST)
245 \item SV may condense sparse Vecs: RVV lets ALU do predication
246 \item Choice to Zero or skip non-predicated elements
247 \end{itemize}
248 }
249
250
251 \begin{frame}[fragile]
252 \frametitle{ADD pseudocode (or trap, or actual hardware loop)}
253
254 \begin{semiverbatim}
255 function op\_add(rd, rs1, rs2, predr) # add not VADD!
256  int i, id=0, irs1=0, irs2=0;
257  for (i = 0; i < VL; i++)
258   if (ireg[predr] & 1<<i) # predication uses intregs
259    ireg[rd+id] <= ireg[rs1+irs1] + ireg[rs2+irs2];
260 if (reg\_is\_vectorised[rd]) \{ id += 1; \}
261 if (reg\_is\_vectorised[rs1]) \{ irs1 += 1; \}
262 if (reg\_is\_vectorised[rs2]) \{ irs2 += 1; \}
263 \end{semiverbatim}
264
265 \begin{itemize}
266 \item Above is oversimplified: Reg. indirection left out (for clarity).
267 \item SIMD slightly more complex (case above is elwidth = default)
268 \item Scalar-scalar and scalar-vector and vector-vector now all in one
269 \item OoO may choose to push ADDs into instr. queue (v. busy!)
270 \end{itemize}
271 \end{frame}
272
273 % yes it really *is* ADD not VADD. that's the entire point of
274 % this proposal, that *standard* operations are overloaded to
275 % become vectorised-on-demand
276
277
278 \begin{frame}[fragile]
279 \frametitle{Predication-Branch (or trap, or actual hardware loop)}
280
281 \begin{semiverbatim}
282 s1 = reg\_is\_vectorised(src1);
283 s2 = reg\_is\_vectorised(src2);
284 if (!s2 && !s1) goto branch;
285 for (int i = 0; i < VL; ++i)
286 if cmp(s1 ? reg[src1+i] : reg[src1],
287 s2 ? reg[src2+i] : reg[src2])
288 preg[rs3] |= 1 << i;
289 \end{semiverbatim}
290
291 \begin{itemize}
292 \item SIMD slightly more complex (case above is elwidth = default)
293 \item If s1 and s2 both scalars, Standard branch occurs
294 \item Predication stored in integer regfile as a bitfield
295 \item Scalar-vector and vector-vector supported
296 \end{itemize}
297 \end{frame}
298
299 \begin{frame}[fragile]
300 \frametitle{VLD/VLD.S/VLD.X (or trap, or actual hardware loop)}
301
302 \begin{semiverbatim}
303 if (unit-strided) stride = elsize;
304 else stride = areg[as2]; // constant-strided
305 for (int i = 0; i < VL; ++i)
306 if (preg\_enabled[rd] && ([!]preg[rd] & 1<<i))
307 for (int j = 0; j < seglen+1; j++)
308 if (reg\_is\_vectorised[rs2]) offs = vreg[rs2+i]
309 else offs = i*(seglen+1)*stride;
310 vreg[rd+j][i] = mem[sreg[base] + offs + j*stride]
311 \end{semiverbatim}
312
313 \begin{itemize}
314 \item Again: elwidth != default slightly more complex
315 \item rs2 vectorised taken to implicitly indicate VLD.X
316 \end{itemize}
317 \end{frame}
318
319
320 \frame{\frametitle{Why are overlaps allowed in Regfiles?}
321
322 \begin{itemize}
323 \item Same register(s) can have multiple "interpretations"
324 \item Set "real" register (scalar) without needing to set/unset CSRs.
325 \item xBitManip plus SIMD plus xBitManip = Hi/Lo bitops
326 \item (32-bit GREV plus 4x8-bit SIMD plus 32-bit GREV:\\
327 GREV @ VL=N,wid=32; SIMD @ VL=Nx4,wid=8)
328 \item RGB 565 (video): BEXTW plus 4x8-bit SIMD plus BDEPW\\
329 (BEXT/BDEP @ VL=N,wid=32; SIMD @ VL=Nx4,wid=8)
330 \item Same register(s) can be offset (no need for VSLIDE)\vspace{6pt}
331 \end{itemize}
332 Note:
333 \begin{itemize}
334 \item xBitManip reduces O($N^{6}$) SIMD down to O($N^{3}$)
335 \item Hi-Performance: Macro-op fusion (more pipeline stages?)
336 \end{itemize}
337 }
338
339
340 \frame{\frametitle{To Zero or not to place zeros in non-predicated elements?}
341
342 \begin{itemize}
343 \item Zeroing is an implementation optimisation favouring OoO
344 \item Simple implementations may skip non-predicated operations
345 \item Simple implementations explicitly have to destroy data
346 \item Complex implementations may use reg-renames to save power\\
347 Zeroing on predication chains makes optimisation harder
348 \item Compromise: REQUIRE both (specified in predication CSRs).
349 \end{itemize}
350 Considerations:
351 \begin{itemize}
352 \item Complex not really impacted, simple impacted a LOT\\
353 with Zeroing... however it's useful (memzero)
354 \item Non-zero'd overlapping "Vectors" may issue overlapping ops\\
355 (2nd op's predicated elements slot in 1st's non-predicated ops)
356 \item Please don't use Vectors for "security" (use Sec-Ext)
357 \end{itemize}
358 }
359 % with overlapping "vectors" - bearing in mind that "vectors" are
360 % just a remap onto the standard register file, if the top bits of
361 % predication are zero, and there happens to be a second vector
362 % that uses some of the same register file that happens to be
363 % predicated out, the second vector op may be issued *at the same time*
364 % if there are available parallel ALUs to do so.
365
366
367 \frame{\frametitle{Predication key-value CSR store}
368
369 \begin{itemize}
370 \item key is int regfile number or FP regfile number (1 bit)\vspace{6pt}
371 \item register to be predicated if referred to (5 bits, key)\vspace{6pt}
372 \item register to store actual predication in (5 bits, value)\vspace{6pt}
373 \item predication is inverted Y/N (1 bit)\vspace{6pt}
374 \item non-predicated elements are to be zero'd Y/N (1 bit)\vspace{6pt}
375 \end{itemize}
376 Notes:\vspace{10pt}
377 \begin{itemize}
378 \item Table should be expanded out for high-speed implementations
379 \item Multiple "keys" (and values) theoretically permitted
380 \item RVV rules about deleting higher-indexed CSRs followed
381 \end{itemize}
382 }
383
384
385 \begin{frame}[fragile]
386 \frametitle{Predication key-value CSR table decoding pseudocode}
387
388 \begin{semiverbatim}
389 struct pred fp\_pred[32];
390 struct pred int\_pred[32];
391
392 for (i = 0; i < 16; i++) // 16 CSRs?
393 tb = int\_pred if CSRpred[i].type == 0 else fp\_pred
394 idx = CSRpred[i].regidx
395 tb[idx].zero = CSRpred[i].zero
396 tb[idx].inv = CSRpred[i].inv
397 tb[idx].predidx = CSRpred[i].predidx
398 tb[idx].enabled = true
399 \end{semiverbatim}
400
401 \begin{itemize}
402 \item All 64 (int and FP) Entries zero'd before setting
403 \item Might be a bit complex to set up (TBD)
404 \end{itemize}
405
406 \end{frame}
407
408
409 \begin{frame}[fragile]
410 \frametitle{Get Predication value pseudocode}
411
412 \begin{semiverbatim}
413 def get\_pred\_val(bool is\_fp\_op, int reg):
414 tb = int\_pred if is\_fp\_op else fp\_pred
415 if (!tb[reg].enabled):
416 return ~0x0 // all ops enabled
417 predidx = tb[reg].predidx // redirection occurs HERE
418 predicate = intreg[predidx] // actual predicate HERE
419 if (tb[reg].inv):
420 predicate = ~predicate
421 return predicate
422 \end{semiverbatim}
423
424 \begin{itemize}
425 \item References different (internal) mapping table for INT or FP
426 \item Actual predicate bitmask ALWAYS from the INT regfile
427 \end{itemize}
428
429 \end{frame}
430
431
432 \frame{\frametitle{Register key-value CSR store}
433
434 \begin{itemize}
435 \item key is int regfile number or FP regfile number (1 bit)\vspace{6pt}
436 \item treated as vector if referred to in op (5 bits, key)\vspace{6pt}
437 \item starting register to actually be used (5 bits, value)\vspace{6pt}
438 \item element bitwidth: default/8/16/32/64/rsvd (3 bits)\vspace{6pt}
439 \item element type: still under consideration\vspace{6pt}
440 \end{itemize}
441 Notes:\vspace{10pt}
442 \begin{itemize}
443 \item Same notes apply (previous slide) as for predication CSR table
444 \item Level of indirection has implications for pipeline latency
445 \end{itemize}
446 }
447
448
449 \begin{frame}[fragile]
450 \frametitle{Register key-value CSR table decoding pseudocode}
451
452 \begin{semiverbatim}
453 struct vectorised fp\_vec[32];
454 struct vectorised int\_vec[32];
455
456 for (i = 0; i < 16; i++) // 16 CSRs?
457 tb = int\_vec if CSRvectortb[i].type == 0 else fp\_vec
458 idx = CSRvectortb[i].regidx
459 tb[idx].elwidth = CSRpred[i].elwidth
460 tb[idx].regidx = CSRpred[i].regidx
461 tb[idx].isvector = true
462 \end{semiverbatim}
463
464 \begin{itemize}
465 \item All 64 (int and FP) Entries zero'd before setting
466 \item Might be a bit complex to set up (TBD)
467 \end{itemize}
468
469 \end{frame}
470
471
472 \begin{frame}[fragile]
473 \frametitle{ADD pseudocode with redirection, this time}
474
475 \begin{semiverbatim}
476 function op\_add(rd, rs1, rs2, predr) # add not VADD!
477  int i, id=0, irs1=0, irs2=0;
478  rd = int\_vec[rd ].isvector ? int\_vec[rd ].regidx : rd;
479  rs1 = int\_vec[rs1].isvector ? int\_vec[rs1].regidx : rs1;
480  rs2 = int\_vec[rs2].isvector ? int\_vec[rs2].regidx : rs2;
481  predval = get\_pred\_val(FALSE, rd);
482  for (i = 0; i < VL; i++)
483 if (predval \& 1<<i) # predication uses intregs
484    ireg[rd+id] <= ireg[rs1+irs1] + ireg[rs2+irs2];
485 if (int\_vec[rd ].isvector)  \{ id += 1; \}
486 if (int\_vec[rs1].isvector)  \{ irs1 += 1; \}
487 if (int\_vec[rs2].isvector)  \{ irs2 += 1; \}
488 \end{semiverbatim}
489
490 \begin{itemize}
491 \item SIMD (elwidth != default) not covered above
492 \end{itemize}
493 \end{frame}
494
495
496 \frame{\frametitle{C.MV extremely flexible!}
497
498 \begin{itemize}
499 \item scalar-to-vector (w/ no pred): VSPLAT
500 \item scalar-to-vector (w/ dest-pred): Sparse VSPLAT
501 \item scalar-to-vector (w/ 1-bit dest-pred): VINSERT
502 \item vector-to-scalar (w/ [1-bit?] src-pred): VEXTRACT
503 \item vector-to-vector (w/ no pred): Vector Copy
504 \item vector-to-vector (w/ src pred): Vector Gather
505 \item vector-to-vector (w/ dest pred): Vector Scatter
506 \item vector-to-vector (w/ src \& dest pred): Vector Gather/Scatter
507 \end{itemize}
508 \vspace{4pt}
509 Notes:
510 \begin{itemize}
511 \item Surprisingly powerful!
512 \item Same arrangement for FVCT, FMV, FSGNJ etc.
513 \end{itemize}
514 }
515
516
517 \frame{\frametitle{Opcodes, compared to RVV}
518
519 \begin{itemize}
520 \item All integer and FP opcodes all removed (no CLIP!)\vspace{8pt}
521 \item VMPOP, VFIRST etc. all removed (use xBitManip)\vspace{8pt}
522 \item VSLIDE removed (use regfile overlaps)\vspace{8pt}
523 \item C.MV covers VEXTRACT VINSERT and VSPLAT (and more)\vspace{8pt}
524 \item VSETVL, VGETVL, VSELECT stay\vspace{8pt}
525 \item Issue: VCLIP is not in RV* (add with custom ext?)\vspace{8pt}
526 \item Vector (or scalar-vector) use C.MV (MV is a pseudo-op)\vspace{8pt}
527 \item VMERGE: twin predicated C.MVs (one inverted. macro-op'd)\vspace{8pt}
528 \end{itemize}
529 }
530
531
532 \frame{\frametitle{Under consideration}
533
534 \begin{itemize}
535 \item Is C.FNE actually needed? Should it be added if it is?
536 \item Element type implies polymorphism. Should it be in SV?
537 \item Should use of registers be allowed to "wrap" (x30 x31 x1 x2)?
538 \item Is detection of all-scalar ops ok (without slowing pipeline)?
539 \item Can VSELECT be removed? (it's really complex)
540 \item Can CLIP be done as a CSR (mode, like elwidth)
541 \item SIMD saturation (etc.) also set as a mode?
542 \item 8/16-bit ops is it worthwhile adding a "start offset"? \\
543 (a bit like misaligned addressing... for registers)\\
544 or just use predication to skip start?
545 \end{itemize}
546 }
547
548
549 \frame{\frametitle{What's the downside(s) of SV?}
550 \begin{itemize}
551 \item EVERY register operation is inherently parallelised\\
552 (scalar ops are just vectors of length 1)\vspace{4pt}
553 \item An extra pipeline phase is pretty much essential\\
554 for fast low-latency implementations\vspace{4pt}
555 \item Assuming an instruction FIFO, N ops could be taken off\\
556 of a parallel op per cycle (avoids filling entire FIFO;\\
557 also is less work per cycle: lower complexity / latency)\vspace{4pt}
558 \item With zeroing off, skipping non-predicated elements is hard:\\
559 it is however an optimisation (and could be skipped).\vspace{4pt}
560 \item Setting up the Register/Predication tables (interpreting the\\
561 CSR key-value stores) might be a bit complex to optimise
562 (any change to a CSR key-value entry needs to redo the table)
563 \end{itemize}
564 }
565
566
567 \frame{\frametitle{Is this OK (low latency)? Detect scalar-ops (only)}
568 \begin{center}
569 \includegraphics[height=2.5in]{scalardetect.png}\\
570 {\bf \red Detect when all registers are scalar for a given op}
571 \end{center}
572 }
573
574
575 \frame{\frametitle{TODO (break into separate slides)}
576
577 \begin{itemize}
578 \item Then explain why this proposal is a good way to \\
579 abstract parallelism\\
580 (hopefully also explaining how \\
581 a good compiler can make clever use of this increase parallelism\\
582 Then explain how this can be implemented (at instruction\\
583 issue time???) with\\
584 implementation options, and what these "cost".\\
585 Finally give examples that show simple usage that compares\\
586 C code\\
587 RVIC\\
588 RVV\\
589 RVICXsimplev
590 \end{itemize}
591 }
592
593
594 \frame{\frametitle{Summary}
595
596 \begin{itemize}
597 \item Actually about parallelism, not Vectors (or SIMD) per se
598 \item Designed for flexibility (graded levels of complexity)
599 \item Huge range of implementor freedom
600 \item Fits RISC-V ethos: achieve more with less
601 \item Reduces SIMD ISA proliferation by 3-4 orders of magnitude \\
602 (without SIMD downsides or sacrificing speed trade-off)
603 \item Covers 98\% of RVV, allows RVV to fit "on top"
604 \item Not designed for supercomputing (that's RVV), designed for
605 in between: DSPs, RV32E, Embedded 3D GPUs etc.
606 \item Not specifically designed for Vectorisation: designed to\\
607 reduce code size (increase efficiency, just
608 like Compressed)
609 \end{itemize}
610 }
611
612
613 \frame{\frametitle{slide}
614
615 \begin{itemize}
616 \item \vspace{10pt}
617 \end{itemize}
618 Considerations:\vspace{10pt}
619 \begin{itemize}
620 \item \vspace{10pt}
621 \end{itemize}
622 }
623
624
625 \frame{
626 \begin{center}
627 {\Huge \red The end\vspace{20pt}\\
628 Thank you}
629 \end{center}
630 }
631
632
633 \end{document}